]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - ubuntu/vbox/vboxguest/VBoxGuest.c
UBUNTU: ubuntu: vbox -- update to 5.2.6-dfsg-1
[mirror_ubuntu-bionic-kernel.git] / ubuntu / vbox / vboxguest / VBoxGuest.c
CommitLineData
056a1eb7
SF
1/* $Id: VBoxGuest.cpp $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
26894aac 7 * Copyright (C) 2007-2017 Oracle Corporation
056a1eb7
SF
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/** @page pg_vbdrv VBoxGuest
28 *
29 * VBoxGuest is the device driver for VMMDev.
30 *
31 * The device driver is shipped as part of the guest additions. It has roots in
32 * the host VMM support driver (usually known as VBoxDrv), so fixes in platform
33 * specific code may apply to both drivers.
34 *
35 * The common code lives in VBoxGuest.cpp and is compiled both as C++ and C.
36 * The VBoxGuest.cpp source file shall not contain platform specific code,
37 * though it must occationally do a few \#ifdef RT_OS_XXX tests to cater for
38 * platform differences. Though, in those cases, it is common that more than
39 * one platform needs special handling.
40 *
41 * On most platforms the device driver should create two device nodes, one for
42 * full (unrestricted) access to the feature set, and one which only provides a
43 * restrict set of functions. These are generally referred to as 'vboxguest'
44 * and 'vboxuser' respectively. Currently, this two device approach is only
45 * implemented on Linux!
46 *
47 */
48
49
50/*********************************************************************************************************************************
51* Header Files *
52*********************************************************************************************************************************/
53#define LOG_GROUP LOG_GROUP_DEFAULT
54#include "VBoxGuestInternal.h"
55#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
56#include <VBox/log.h>
57#include <iprt/mem.h>
58#include <iprt/time.h>
59#include <iprt/memobj.h>
60#include <iprt/asm.h>
61#include <iprt/asm-amd64-x86.h>
62#include <iprt/string.h>
63#include <iprt/process.h>
64#include <iprt/assert.h>
65#include <iprt/param.h>
66#include <iprt/timer.h>
67#ifdef VBOX_WITH_HGCM
68# include <iprt/thread.h>
69#endif
70#include "version-generated.h"
71#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
72# include "revision-generated.h"
73#endif
056a1eb7
SF
74#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
75# include <iprt/rand.h>
76#endif
77
78
79/*********************************************************************************************************************************
80* Defined Constants And Macros *
81*********************************************************************************************************************************/
82#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88#ifdef VBOX_WITH_HGCM
89static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
90#endif
91static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
92static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
93static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
94static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
95static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
96static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
97static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
98 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
99static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
100 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
101static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
6d209b23
SF
102 uint32_t fOrMask, uint32_t fNoMask,
103 uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps, bool fSessionTermination);
104static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
105 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags, bool fSessionTermination);
056a1eb7
SF
106static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
107
108
109/*********************************************************************************************************************************
110* Global Variables *
111*********************************************************************************************************************************/
112static const uint32_t g_cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
113
114#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
115/**
116 * Drag in the rest of IRPT since we share it with the
117 * rest of the kernel modules on Solaris.
118 */
119PFNRT g_apfnVBoxGuestIPRTDeps[] =
120{
121 /* VirtioNet */
122 (PFNRT)RTRandBytes,
123 /* RTSemMutex* */
124 (PFNRT)RTSemMutexCreate,
125 (PFNRT)RTSemMutexDestroy,
126 (PFNRT)RTSemMutexRequest,
127 (PFNRT)RTSemMutexRequestNoResume,
128 (PFNRT)RTSemMutexRequestDebug,
129 (PFNRT)RTSemMutexRequestNoResumeDebug,
130 (PFNRT)RTSemMutexRelease,
131 (PFNRT)RTSemMutexIsOwned,
132 NULL
133};
134#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
135
136
137/**
138 * Reserves memory in which the VMM can relocate any guest mappings
139 * that are floating around.
140 *
141 * This operation is a little bit tricky since the VMM might not accept
142 * just any address because of address clashes between the three contexts
143 * it operates in, so use a small stack to perform this operation.
144 *
145 * @returns VBox status code (ignored).
146 * @param pDevExt The device extension.
147 */
148static int vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
149{
150 /*
151 * Query the required space.
152 */
153 VMMDevReqHypervisorInfo *pReq;
6d209b23 154 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
056a1eb7
SF
155 if (RT_FAILURE(rc))
156 return rc;
157 pReq->hypervisorStart = 0;
158 pReq->hypervisorSize = 0;
6d209b23 159 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
160 if (RT_FAILURE(rc)) /* this shouldn't happen! */
161 {
6d209b23 162 VbglR0GRFree(&pReq->header);
056a1eb7
SF
163 return rc;
164 }
165
166 /*
167 * The VMM will report back if there is nothing it wants to map, like for
168 * instance in VT-x and AMD-V mode.
169 */
170 if (pReq->hypervisorSize == 0)
171 Log(("vgdrvInitFixateGuestMappings: nothing to do\n"));
172 else
173 {
174 /*
175 * We have to try several times since the host can be picky
176 * about certain addresses.
177 */
178 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
179 uint32_t cbHypervisor = pReq->hypervisorSize;
180 RTR0MEMOBJ ahTries[5];
181 uint32_t iTry;
182 bool fBitched = false;
183 Log(("vgdrvInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
184 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
185 {
186 /*
187 * Reserve space, or if that isn't supported, create a object for
188 * some fictive physical memory and map that in to kernel space.
189 *
190 * To make the code a bit uglier, most systems cannot help with
191 * 4MB alignment, so we have to deal with that in addition to
192 * having two ways of getting the memory.
193 */
194 uint32_t uAlignment = _4M;
195 RTR0MEMOBJ hObj;
196 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
197 if (rc == VERR_NOT_SUPPORTED)
198 {
199 uAlignment = PAGE_SIZE;
200 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
201 }
202 /*
203 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
204 * not implemented at all at the current platform, try to map the memory object into the
205 * virtual kernel space.
206 */
207 if (rc == VERR_NOT_SUPPORTED)
208 {
209 if (hFictive == NIL_RTR0MEMOBJ)
210 {
211 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
212 if (RT_FAILURE(rc))
213 break;
214 hFictive = hObj;
215 }
216 uAlignment = _4M;
217 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
218 if (rc == VERR_NOT_SUPPORTED)
219 {
220 uAlignment = PAGE_SIZE;
221 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
222 }
223 }
224 if (RT_FAILURE(rc))
225 {
226 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
227 rc, cbHypervisor, uAlignment, iTry));
228 fBitched = true;
229 break;
230 }
231
232 /*
233 * Try set it.
234 */
235 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
236 pReq->header.rc = VERR_INTERNAL_ERROR;
237 pReq->hypervisorSize = cbHypervisor;
238 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
239 if ( uAlignment == PAGE_SIZE
240 && pReq->hypervisorStart & (_4M - 1))
241 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
242 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
243
6d209b23 244 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
245 if (RT_SUCCESS(rc))
246 {
247 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
248 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
249 RTR0MemObjAddress(pDevExt->hGuestMappings),
250 RTR0MemObjSize(pDevExt->hGuestMappings),
251 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
252 break;
253 }
254 ahTries[iTry] = hObj;
255 }
256
257 /*
258 * Cleanup failed attempts.
259 */
260 while (iTry-- > 0)
261 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
262 if ( RT_FAILURE(rc)
263 && hFictive != NIL_RTR0PTR)
264 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
265 if (RT_FAILURE(rc) && !fBitched)
266 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
267 }
6d209b23 268 VbglR0GRFree(&pReq->header);
056a1eb7
SF
269
270 /*
271 * We ignore failed attempts for now.
272 */
273 return VINF_SUCCESS;
274}
275
276
277/**
278 * Undo what vgdrvInitFixateGuestMappings did.
279 *
280 * @param pDevExt The device extension.
281 */
282static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
283{
284 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
285 {
286 /*
287 * Tell the host that we're going to free the memory we reserved for
288 * it, the free it up. (Leak the memory if anything goes wrong here.)
289 */
290 VMMDevReqHypervisorInfo *pReq;
6d209b23 291 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
056a1eb7
SF
292 if (RT_SUCCESS(rc))
293 {
294 pReq->hypervisorStart = 0;
295 pReq->hypervisorSize = 0;
6d209b23
SF
296 rc = VbglR0GRPerform(&pReq->header);
297 VbglR0GRFree(&pReq->header);
056a1eb7
SF
298 }
299 if (RT_SUCCESS(rc))
300 {
301 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
302 AssertRC(rc);
303 }
304 else
305 LogRel(("vgdrvTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
306
307 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
308 }
309}
310
311
312
313/**
314 * Report the guest information to the host.
315 *
316 * @returns IPRT status code.
317 * @param enmOSType The OS type to report.
318 */
319static int vgdrvReportGuestInfo(VBOXOSTYPE enmOSType)
320{
321 /*
322 * Allocate and fill in the two guest info reports.
323 */
324 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
325 VMMDevReportGuestInfo *pReqInfo1 = NULL;
6d209b23
SF
326 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
327 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
056a1eb7
SF
328 if (RT_SUCCESS(rc))
329 {
330 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
331 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
332 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
333 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
334 pReqInfo2->guestInfo.additionsFeatures = 0; /* (no features defined yet) */
335 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
336
6d209b23
SF
337 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
338 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
056a1eb7
SF
339 if (RT_SUCCESS(rc))
340 {
341 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
342 pReqInfo1->guestInfo.osType = enmOSType;
343
344 /*
345 * There are two protocols here:
346 * 1. Info2 + Info1. Supported by >=3.2.51.
347 * 2. Info1 and optionally Info2. The old protocol.
348 *
349 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
350 * if not supported by the VMMDev (message ordering requirement).
351 */
6d209b23
SF
352 rc = VbglR0GRPerform(&pReqInfo2->header);
353 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
056a1eb7
SF
354 if (RT_SUCCESS(rc))
355 {
6d209b23
SF
356 rc = VbglR0GRPerform(&pReqInfo1->header);
357 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
056a1eb7
SF
358 }
359 else if ( rc == VERR_NOT_SUPPORTED
360 || rc == VERR_NOT_IMPLEMENTED)
361 {
6d209b23
SF
362 rc = VbglR0GRPerform(&pReqInfo1->header);
363 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
056a1eb7
SF
364 if (RT_SUCCESS(rc))
365 {
6d209b23
SF
366 rc = VbglR0GRPerform(&pReqInfo2->header);
367 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
056a1eb7
SF
368 if (rc == VERR_NOT_IMPLEMENTED)
369 rc = VINF_SUCCESS;
370 }
371 }
6d209b23 372 VbglR0GRFree(&pReqInfo1->header);
056a1eb7 373 }
6d209b23 374 VbglR0GRFree(&pReqInfo2->header);
056a1eb7
SF
375 }
376
377 return rc;
378}
379
380
381/**
382 * Report the guest driver status to the host.
383 *
384 * @returns IPRT status code.
385 * @param fActive Flag whether the driver is now active or not.
386 */
387static int vgdrvReportDriverStatus(bool fActive)
388{
389 /*
390 * Report guest status of the VBox driver to the host.
391 */
392 VMMDevReportGuestStatus *pReq2 = NULL;
6d209b23
SF
393 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
394 Log(("vgdrvReportDriverStatus: VbglR0GRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
056a1eb7
SF
395 if (RT_SUCCESS(rc))
396 {
397 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
398 pReq2->guestStatus.status = fActive ?
399 VBoxGuestFacilityStatus_Active
400 : VBoxGuestFacilityStatus_Inactive;
401 pReq2->guestStatus.flags = 0;
6d209b23
SF
402 rc = VbglR0GRPerform(&pReq2->header);
403 Log(("vgdrvReportDriverStatus: VbglR0GRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
056a1eb7
SF
404 fActive ? 1 : 0, rc));
405 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
406 rc = VINF_SUCCESS;
6d209b23 407 VbglR0GRFree(&pReq2->header);
056a1eb7
SF
408 }
409
410 return rc;
411}
412
413
414/** @name Memory Ballooning
415 * @{
416 */
417
418/**
419 * Inflate the balloon by one chunk represented by an R0 memory object.
420 *
421 * The caller owns the balloon mutex.
422 *
423 * @returns IPRT status code.
424 * @param pMemObj Pointer to the R0 memory object.
425 * @param pReq The pre-allocated request for performing the VMMDev call.
426 */
427static int vgdrvBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
428{
429 uint32_t iPage;
430 int rc;
431
432 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
433 {
434 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
435 pReq->aPhysPage[iPage] = phys;
436 }
437
438 pReq->fInflate = true;
439 pReq->header.size = g_cbChangeMemBalloonReq;
440 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
441
6d209b23 442 rc = VbglR0GRPerform(&pReq->header);
056a1eb7 443 if (RT_FAILURE(rc))
6d209b23 444 LogRel(("vgdrvBalloonInflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
056a1eb7
SF
445 return rc;
446}
447
448
449/**
450 * Deflate the balloon by one chunk - info the host and free the memory object.
451 *
452 * The caller owns the balloon mutex.
453 *
454 * @returns IPRT status code.
455 * @param pMemObj Pointer to the R0 memory object.
456 * The memory object will be freed afterwards.
457 * @param pReq The pre-allocated request for performing the VMMDev call.
458 */
459static int vgdrvBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
460{
461 uint32_t iPage;
462 int rc;
463
464 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
465 {
466 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
467 pReq->aPhysPage[iPage] = phys;
468 }
469
470 pReq->fInflate = false;
471 pReq->header.size = g_cbChangeMemBalloonReq;
472 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
473
6d209b23 474 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
475 if (RT_FAILURE(rc))
476 {
6d209b23 477 LogRel(("vgdrvBalloonDeflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
056a1eb7
SF
478 return rc;
479 }
480
481 rc = RTR0MemObjFree(*pMemObj, true);
482 if (RT_FAILURE(rc))
483 {
484 LogRel(("vgdrvBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
485 return rc;
486 }
487
488 *pMemObj = NIL_RTR0MEMOBJ;
489 return VINF_SUCCESS;
490}
491
492
493/**
494 * Inflate/deflate the memory balloon and notify the host.
495 *
496 * This is a worker used by vgdrvIoCtl_CheckMemoryBalloon - it takes the mutex.
497 *
498 * @returns VBox status code.
499 * @param pDevExt The device extension.
500 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
501 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
502 * (VINF_SUCCESS if set).
503 */
6d209b23 504static int vgdrvSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, bool *pfHandleInR3)
056a1eb7
SF
505{
506 int rc = VINF_SUCCESS;
507
508 if (pDevExt->MemBalloon.fUseKernelAPI)
509 {
510 VMMDevChangeMemBalloon *pReq;
511 uint32_t i;
512
513 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
514 {
515 LogRel(("vgdrvSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
516 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
517 return VERR_INVALID_PARAMETER;
518 }
519
520 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
521 return VINF_SUCCESS; /* nothing to do */
522
523 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
524 && !pDevExt->MemBalloon.paMemObj)
525 {
526 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
527 if (!pDevExt->MemBalloon.paMemObj)
528 {
529 LogRel(("vgdrvSetBalloonSizeKernel: no memory for paMemObj!\n"));
530 return VERR_NO_MEMORY;
531 }
532 }
533
6d209b23 534 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
056a1eb7
SF
535 if (RT_FAILURE(rc))
536 return rc;
537
538 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
539 {
540 /* inflate */
541 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
542 {
543 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
544 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
545 if (RT_FAILURE(rc))
546 {
547 if (rc == VERR_NOT_SUPPORTED)
548 {
549 /* not supported -- fall back to the R3-allocated memory. */
550 rc = VINF_SUCCESS;
551 pDevExt->MemBalloon.fUseKernelAPI = false;
552 Assert(pDevExt->MemBalloon.cChunks == 0);
553 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
554 }
555 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
556 * cannot allocate more memory => don't try further, just stop here */
557 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
558 break;
559 }
560
561 rc = vgdrvBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
562 if (RT_FAILURE(rc))
563 {
564 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
565 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
566 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
567 break;
568 }
569 pDevExt->MemBalloon.cChunks++;
570 }
571 }
572 else
573 {
574 /* deflate */
575 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
576 {
577 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
578 if (RT_FAILURE(rc))
579 {
580 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
581 break;
582 }
583 pDevExt->MemBalloon.cChunks--;
584 }
585 }
586
6d209b23 587 VbglR0GRFree(&pReq->header);
056a1eb7
SF
588 }
589
590 /*
591 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
592 * the balloon changes via the other API.
593 */
594 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
595
596 return rc;
597}
598
599
600/**
601 * Inflate/deflate the balloon by one chunk.
602 *
603 * Worker for vgdrvIoCtl_ChangeMemoryBalloon - it takes the mutex.
604 *
605 * @returns VBox status code.
606 * @param pDevExt The device extension.
607 * @param pSession The session.
6d209b23
SF
608 * @param pvChunk The address of the chunk to add to / remove from the
609 * balloon. (user space address)
056a1eb7
SF
610 * @param fInflate Inflate if true, deflate if false.
611 */
6d209b23 612static int vgdrvSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, RTR3PTR pvChunk, bool fInflate)
056a1eb7
SF
613{
614 VMMDevChangeMemBalloon *pReq;
615 PRTR0MEMOBJ pMemObj = NULL;
616 int rc = VINF_SUCCESS;
617 uint32_t i;
618 RT_NOREF1(pSession);
619
620 if (fInflate)
621 {
622 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
623 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
624 {
6d209b23 625 LogRel(("vgdrvSetBalloonSizeFromUser: cannot inflate balloon, already have %u chunks (max=%u)\n",
056a1eb7
SF
626 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
627 return VERR_INVALID_PARAMETER;
628 }
629
630 if (!pDevExt->MemBalloon.paMemObj)
631 {
632 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
633 if (!pDevExt->MemBalloon.paMemObj)
634 {
6d209b23 635 LogRel(("vgdrvSetBalloonSizeFromUser: no memory for paMemObj!\n"));
056a1eb7
SF
636 return VERR_NO_MEMORY;
637 }
638 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
639 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
640 }
641 }
642 else
643 {
644 if (pDevExt->MemBalloon.cChunks == 0)
645 {
6d209b23 646 AssertMsgFailed(("vgdrvSetBalloonSizeFromUser: cannot decrease balloon, already at size 0\n"));
056a1eb7
SF
647 return VERR_INVALID_PARAMETER;
648 }
649 }
650
651 /*
652 * Enumerate all memory objects and check if the object is already registered.
653 */
654 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
655 {
656 if ( fInflate
657 && !pMemObj
658 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
659 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
6d209b23 660 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == pvChunk)
056a1eb7
SF
661 {
662 if (fInflate)
663 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
664 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
665 break;
666 }
667 }
668 if (!pMemObj)
669 {
670 if (fInflate)
671 {
672 /* no free object pointer found -- should not happen */
673 return VERR_NO_MEMORY;
674 }
675
676 /* cannot free this memory as it wasn't provided before */
677 return VERR_NOT_FOUND;
678 }
679
680 /*
681 * Try inflate / default the balloon as requested.
682 */
6d209b23 683 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
056a1eb7
SF
684 if (RT_FAILURE(rc))
685 return rc;
686
687 if (fInflate)
688 {
6d209b23 689 rc = RTR0MemObjLockUser(pMemObj, pvChunk, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
056a1eb7
SF
690 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
691 if (RT_SUCCESS(rc))
692 {
693 rc = vgdrvBalloonInflate(pMemObj, pReq);
694 if (RT_SUCCESS(rc))
695 pDevExt->MemBalloon.cChunks++;
696 else
697 {
6d209b23 698 Log(("vgdrvSetBalloonSizeFromUser(inflate): failed, rc=%Rrc!\n", rc));
056a1eb7
SF
699 RTR0MemObjFree(*pMemObj, true);
700 *pMemObj = NIL_RTR0MEMOBJ;
701 }
702 }
703 }
704 else
705 {
706 rc = vgdrvBalloonDeflate(pMemObj, pReq);
707 if (RT_SUCCESS(rc))
708 pDevExt->MemBalloon.cChunks--;
709 else
6d209b23 710 Log(("vgdrvSetBalloonSizeFromUser(deflate): failed, rc=%Rrc!\n", rc));
056a1eb7
SF
711 }
712
6d209b23 713 VbglR0GRFree(&pReq->header);
056a1eb7
SF
714 return rc;
715}
716
717
718/**
719 * Cleanup the memory balloon of a session.
720 *
721 * Will request the balloon mutex, so it must be valid and the caller must not
722 * own it already.
723 *
724 * @param pDevExt The device extension.
725 * @param pSession The session. Can be NULL at unload.
726 */
727static void vgdrvCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
728{
729 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
730 if ( pDevExt->MemBalloon.pOwner == pSession
731 || pSession == NULL /*unload*/)
732 {
733 if (pDevExt->MemBalloon.paMemObj)
734 {
735 VMMDevChangeMemBalloon *pReq;
6d209b23 736 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
056a1eb7
SF
737 if (RT_SUCCESS(rc))
738 {
739 uint32_t i;
740 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
741 {
742 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
743 if (RT_FAILURE(rc))
744 {
745 LogRel(("vgdrvCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
746 rc, pDevExt->MemBalloon.cChunks));
747 break;
748 }
749 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
750 pDevExt->MemBalloon.cChunks--;
751 }
6d209b23 752 VbglR0GRFree(&pReq->header);
056a1eb7
SF
753 }
754 else
755 LogRel(("vgdrvCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
756 rc, pDevExt->MemBalloon.cChunks));
757 RTMemFree(pDevExt->MemBalloon.paMemObj);
758 pDevExt->MemBalloon.paMemObj = NULL;
759 }
760
761 pDevExt->MemBalloon.pOwner = NULL;
762 }
763 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
764}
765
766/** @} */
767
768
769
770/** @name Heartbeat
771 * @{
772 */
773
774/**
775 * Sends heartbeat to host.
776 *
777 * @returns VBox status code.
778 */
779static int vgdrvHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
780{
781 int rc;
782 if (pDevExt->pReqGuestHeartbeat)
783 {
6d209b23
SF
784 rc = VbglR0GRPerform(pDevExt->pReqGuestHeartbeat);
785 Log3(("vgdrvHeartbeatSend: VbglR0GRPerform vgdrvHeartbeatSend completed with rc=%Rrc\n", rc));
056a1eb7
SF
786 }
787 else
788 rc = VERR_INVALID_STATE;
789 return rc;
790}
791
792
793/**
794 * Callback for heartbeat timer.
795 */
796static DECLCALLBACK(void) vgdrvHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
797{
798 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
799 int rc;
800 AssertReturnVoid(pDevExt);
801
802 rc = vgdrvHeartbeatSend(pDevExt);
803 if (RT_FAILURE(rc))
804 Log(("HB Timer: vgdrvHeartbeatSend failed: rc=%Rrc\n", rc));
805
806 NOREF(hTimer); NOREF(iTick);
807}
808
809
810/**
811 * Configure the host to check guest's heartbeat
812 * and get heartbeat interval from the host.
813 *
814 * @returns VBox status code.
815 * @param pDevExt The device extension.
816 * @param fEnabled Set true to enable guest heartbeat checks on host.
817 */
818static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
819{
820 VMMDevReqHeartbeat *pReq;
6d209b23
SF
821 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
822 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRAlloc vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
056a1eb7
SF
823 if (RT_SUCCESS(rc))
824 {
825 pReq->fEnabled = fEnabled;
826 pReq->cNsInterval = 0;
6d209b23
SF
827 rc = VbglR0GRPerform(&pReq->header);
828 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRPerform vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
056a1eb7 829 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
6d209b23 830 VbglR0GRFree(&pReq->header);
056a1eb7
SF
831 }
832 return rc;
833}
834
835
836/**
837 * Initializes the heartbeat timer.
838 *
839 * This feature may be disabled by the host.
840 *
841 * @returns VBox status (ignored).
842 * @param pDevExt The device extension.
843 */
844static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
845{
846 /*
847 * Make sure that heartbeat checking is disabled.
848 */
849 int rc = vgdrvHeartbeatHostConfigure(pDevExt, false);
850 if (RT_SUCCESS(rc))
851 {
852 rc = vgdrvHeartbeatHostConfigure(pDevExt, true);
853 if (RT_SUCCESS(rc))
854 {
855 /*
856 * Preallocate the request to use it from the timer callback because:
6d209b23 857 * 1) on Windows VbglR0GRAlloc must be called at IRQL <= APC_LEVEL
056a1eb7
SF
858 * and the timer callback runs at DISPATCH_LEVEL;
859 * 2) avoid repeated allocations.
860 */
6d209b23 861 rc = VbglR0GRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
056a1eb7
SF
862 if (RT_SUCCESS(rc))
863 {
864 LogRel(("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %RU64 milliseconds\n",
865 pDevExt->cNsHeartbeatInterval / RT_NS_1MS));
866 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
867 (PFNRTTIMER)vgdrvHeartbeatTimerHandler, pDevExt);
868 if (RT_SUCCESS(rc))
869 {
870 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
871 if (RT_SUCCESS(rc))
872 return VINF_SUCCESS;
873
874 LogRel(("vgdrvHeartbeatInit: Heartbeat timer failed to start, rc=%Rrc\n", rc));
875 }
876 else
877 LogRel(("vgdrvHeartbeatInit: Failed to create heartbeat timer: %Rrc\n", rc));
878
6d209b23 879 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
056a1eb7
SF
880 pDevExt->pReqGuestHeartbeat = NULL;
881 }
882 else
6d209b23 883 LogRel(("vgdrvHeartbeatInit: VbglR0GRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
056a1eb7
SF
884
885 LogRel(("vgdrvHeartbeatInit: Failed to set up the timer, guest heartbeat is disabled\n"));
886 vgdrvHeartbeatHostConfigure(pDevExt, false);
887 }
888 else
889 LogRel(("vgdrvHeartbeatInit: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
890 }
891 return rc;
892}
893
894/** @} */
895
896
897/**
898 * Helper to reinit the VMMDev communication after hibernation.
899 *
900 * @returns VBox status code.
901 * @param pDevExt The device extension.
902 * @param enmOSType The OS type.
903 *
904 * @todo Call this on all platforms, not just windows.
905 */
906int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
907{
908 int rc = vgdrvReportGuestInfo(enmOSType);
909 if (RT_SUCCESS(rc))
910 {
911 rc = vgdrvReportDriverStatus(true /* Driver is active */);
912 if (RT_FAILURE(rc))
913 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
914 }
915 else
916 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
917 LogFlow(("VGDrvCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
918 RT_NOREF1(pDevExt);
919 return rc;
920}
921
922
923/**
924 * Initializes the VBoxGuest device extension when the
925 * device driver is loaded.
926 *
927 * The native code locates the VMMDev on the PCI bus and retrieve
928 * the MMIO and I/O port ranges, this function will take care of
929 * mapping the MMIO memory (if present). Upon successful return
930 * the native code should set up the interrupt handler.
931 *
932 * @returns VBox status code.
933 *
934 * @param pDevExt The device extension. Allocated by the native code.
935 * @param IOPortBase The base of the I/O port range.
936 * @param pvMMIOBase The base of the MMIO memory mapping.
937 * This is optional, pass NULL if not present.
938 * @param cbMMIO The size of the MMIO memory mapping.
939 * This is optional, pass 0 if not present.
940 * @param enmOSType The guest OS type to report to the VMMDev.
941 * @param fFixedEvents Events that will be enabled upon init and no client
942 * will ever be allowed to mask.
943 */
944int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
945 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
946{
947 int rc, rc2;
948
949#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
950 /*
951 * Create the release log.
952 */
953 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
954 PRTLOGGER pRelLogger;
955 rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
956 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
957 if (RT_SUCCESS(rc))
958 RTLogRelSetDefaultInstance(pRelLogger);
959 /** @todo Add native hook for getting logger config parameters and setting
960 * them. On linux we should use the module parameter stuff... */
961#endif
962
963 /*
964 * Adjust fFixedEvents.
965 */
966#ifdef VBOX_WITH_HGCM
967 fFixedEvents |= VMMDEV_EVENT_HGCM;
968#endif
969
970 /*
971 * Initialize the data.
972 */
973 pDevExt->IOPortBase = IOPortBase;
974 pDevExt->pVMMDevMemory = NULL;
975 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
976 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
977 pDevExt->pIrqAckEvents = NULL;
978 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
979 RTListInit(&pDevExt->WaitList);
980#ifdef VBOX_WITH_HGCM
981 RTListInit(&pDevExt->HGCMWaitList);
982#endif
983#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
984 RTListInit(&pDevExt->WakeUpList);
985#endif
986 RTListInit(&pDevExt->WokenUpList);
987 RTListInit(&pDevExt->FreeList);
988 RTListInit(&pDevExt->SessionList);
989 pDevExt->cSessions = 0;
990 pDevExt->fLoggingEnabled = false;
991 pDevExt->f32PendingEvents = 0;
992 pDevExt->u32MousePosChangedSeq = 0;
993 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
994 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
995 pDevExt->MemBalloon.cChunks = 0;
996 pDevExt->MemBalloon.cMaxChunks = 0;
997 pDevExt->MemBalloon.fUseKernelAPI = true;
998 pDevExt->MemBalloon.paMemObj = NULL;
999 pDevExt->MemBalloon.pOwner = NULL;
6d209b23
SF
1000 pDevExt->pfnMouseNotifyCallback = NULL;
1001 pDevExt->pvMouseNotifyCallbackArg = NULL;
056a1eb7
SF
1002 pDevExt->pReqGuestHeartbeat = NULL;
1003
1004 pDevExt->fFixedEvents = fFixedEvents;
1005 vgdrvBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1006 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1007
1008 vgdrvBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1009 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1010
1011 pDevExt->fAcquireModeGuestCaps = 0;
1012 pDevExt->fSetModeGuestCaps = 0;
1013 pDevExt->fAcquiredGuestCaps = 0;
1014 vgdrvBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1015 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1016
1017 /*
1018 * If there is an MMIO region validate the version and size.
1019 */
1020 if (pvMMIOBase)
1021 {
1022 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1023 Assert(cbMMIO);
1024 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1025 && pVMMDev->u32Size >= 32
1026 && pVMMDev->u32Size <= cbMMIO)
1027 {
1028 pDevExt->pVMMDevMemory = pVMMDev;
1029 Log(("VGDrvCommonInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1030 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1031 }
1032 else /* try live without it. */
1033 LogRel(("VGDrvCommonInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1034 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1035 }
1036
1037 /*
1038 * Create the wait and session spinlocks as well as the ballooning mutex.
1039 */
1040 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1041 if (RT_SUCCESS(rc))
1042 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1043 if (RT_FAILURE(rc))
1044 {
1045 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1046 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
1047 RTSpinlockDestroy(pDevExt->EventSpinlock);
1048 return rc;
1049 }
1050
1051 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1052 if (RT_FAILURE(rc))
1053 {
1054 LogRel(("VGDrvCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1055 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1056 RTSpinlockDestroy(pDevExt->EventSpinlock);
1057 return rc;
1058 }
1059
1060 /*
1061 * Initialize the guest library and report the guest info back to VMMDev,
1062 * set the interrupt control filter mask, and fixate the guest mappings
1063 * made by the VMM.
1064 */
6d209b23 1065 rc = VbglR0InitPrimary(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
056a1eb7
SF
1066 if (RT_SUCCESS(rc))
1067 {
6d209b23 1068 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
056a1eb7
SF
1069 if (RT_SUCCESS(rc))
1070 {
6d209b23 1071 pDevExt->PhysIrqAckEvents = VbglR0PhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
056a1eb7
SF
1072 Assert(pDevExt->PhysIrqAckEvents != 0);
1073
1074 rc = vgdrvReportGuestInfo(enmOSType);
1075 if (RT_SUCCESS(rc))
1076 {
1077 /*
1078 * Set the fixed event and make sure the host doesn't have any lingering
1079 * the guest capabilities or mouse status bits set.
1080 */
1081 rc = vgdrvResetEventFilterOnHost(pDevExt, pDevExt->fFixedEvents);
1082 if (RT_SUCCESS(rc))
1083 {
1084 rc = vgdrvResetCapabilitiesOnHost(pDevExt);
1085 if (RT_SUCCESS(rc))
1086 {
1087 rc = vgdrvResetMouseStatusOnHost(pDevExt);
1088 if (RT_SUCCESS(rc))
1089 {
1090 /*
1091 * Initialize stuff which may fail without requiring the driver init to fail.
1092 */
1093 vgdrvInitFixateGuestMappings(pDevExt);
1094 vgdrvHeartbeatInit(pDevExt);
1095
1096 /*
1097 * Done!
1098 */
1099 rc = vgdrvReportDriverStatus(true /* Driver is active */);
1100 if (RT_FAILURE(rc))
1101 LogRel(("VGDrvCommonInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1102
1103 LogFlowFunc(("VGDrvCommonInitDevExt: returns success\n"));
1104 return VINF_SUCCESS;
1105 }
1106 LogRel(("VGDrvCommonInitDevExt: failed to clear mouse status: rc=%Rrc\n", rc));
1107 }
1108 else
1109 LogRel(("VGDrvCommonInitDevExt: failed to clear guest capabilities: rc=%Rrc\n", rc));
1110 }
1111 else
1112 LogRel(("VGDrvCommonInitDevExt: failed to set fixed event filter: rc=%Rrc\n", rc));
1113 }
1114 else
6d209b23
SF
1115 LogRel(("VGDrvCommonInitDevExt: vgdrvReportGuestInfo failed: rc=%Rrc\n", rc));
1116 VbglR0GRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
056a1eb7
SF
1117 }
1118 else
6d209b23 1119 LogRel(("VGDrvCommonInitDevExt: VbglR0GRAlloc failed: rc=%Rrc\n", rc));
056a1eb7 1120
6d209b23 1121 VbglR0TerminatePrimary();
056a1eb7
SF
1122 }
1123 else
6d209b23 1124 LogRel(("VGDrvCommonInitDevExt: VbglR0InitPrimary failed: rc=%Rrc\n", rc));
056a1eb7
SF
1125
1126 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1127 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1128 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1129
1130#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1131 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1132 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1133#endif
1134 return rc; /* (failed) */
1135}
1136
1137
1138/**
1139 * Deletes all the items in a wait chain.
1140 * @param pList The head of the chain.
1141 */
1142static void vgdrvDeleteWaitList(PRTLISTNODE pList)
1143{
1144 while (!RTListIsEmpty(pList))
1145 {
1146 int rc2;
1147 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1148 RTListNodeRemove(&pWait->ListNode);
1149
1150 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1151 pWait->Event = NIL_RTSEMEVENTMULTI;
1152 pWait->pSession = NULL;
1153 RTMemFree(pWait);
1154 }
1155}
1156
1157
1158/**
1159 * Destroys the VBoxGuest device extension.
1160 *
1161 * The native code should call this before the driver is loaded,
1162 * but don't call this on shutdown.
1163 *
1164 * @param pDevExt The device extension.
1165 */
1166void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1167{
1168 int rc2;
1169 Log(("VGDrvCommonDeleteDevExt:\n"));
1170 Log(("VBoxGuest: The additions driver is terminating.\n"));
1171
1172 /*
1173 * Stop and destroy HB timer and
1174 * disable host heartbeat checking.
1175 */
1176 if (pDevExt->pHeartbeatTimer)
1177 {
1178 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1179 vgdrvHeartbeatHostConfigure(pDevExt, false);
1180 }
1181
6d209b23 1182 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
056a1eb7
SF
1183 pDevExt->pReqGuestHeartbeat = NULL;
1184
1185 /*
1186 * Clean up the bits that involves the host first.
1187 */
1188 vgdrvTermUnfixGuestMappings(pDevExt);
1189 if (!RTListIsEmpty(&pDevExt->SessionList))
1190 {
1191 LogRelFunc(("session list not empty!\n"));
1192 RTListInit(&pDevExt->SessionList);
1193 }
1194 /* Update the host flags (mouse status etc) not to reflect this session. */
1195 pDevExt->fFixedEvents = 0;
1196 vgdrvResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1197 vgdrvResetCapabilitiesOnHost(pDevExt);
1198 vgdrvResetMouseStatusOnHost(pDevExt);
1199
1200 vgdrvCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1201
1202 /*
1203 * Cleanup all the other resources.
1204 */
1205 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1206 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1207 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1208
1209 vgdrvDeleteWaitList(&pDevExt->WaitList);
1210#ifdef VBOX_WITH_HGCM
1211 vgdrvDeleteWaitList(&pDevExt->HGCMWaitList);
1212#endif
1213#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1214 vgdrvDeleteWaitList(&pDevExt->WakeUpList);
1215#endif
1216 vgdrvDeleteWaitList(&pDevExt->WokenUpList);
1217 vgdrvDeleteWaitList(&pDevExt->FreeList);
1218
6d209b23 1219 VbglR0TerminatePrimary();
056a1eb7
SF
1220
1221 pDevExt->pVMMDevMemory = NULL;
1222
1223 pDevExt->IOPortBase = 0;
1224 pDevExt->pIrqAckEvents = NULL;
1225
1226#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1227 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1228 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1229#endif
1230
1231}
1232
1233
1234/**
1235 * Creates a VBoxGuest user session.
1236 *
1237 * The native code calls this when a ring-3 client opens the device.
1238 * Use VGDrvCommonCreateKernelSession when a ring-0 client connects.
1239 *
1240 * @returns VBox status code.
1241 * @param pDevExt The device extension.
1242 * @param ppSession Where to store the session on success.
1243 */
1244int VGDrvCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1245{
1246 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1247 if (RT_UNLIKELY(!pSession))
1248 {
1249 LogRel(("VGDrvCommonCreateUserSession: no memory!\n"));
1250 return VERR_NO_MEMORY;
1251 }
1252
1253 pSession->Process = RTProcSelf();
1254 pSession->R0Process = RTR0ProcHandleSelf();
1255 pSession->pDevExt = pDevExt;
1256 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1257 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1258 pDevExt->cSessions++;
1259 RTSpinlockRelease(pDevExt->SessionSpinlock);
1260
1261 *ppSession = pSession;
1262 LogFlow(("VGDrvCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1263 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1264 return VINF_SUCCESS;
1265}
1266
1267
1268/**
1269 * Creates a VBoxGuest kernel session.
1270 *
1271 * The native code calls this when a ring-0 client connects to the device.
1272 * Use VGDrvCommonCreateUserSession when a ring-3 client opens the device.
1273 *
1274 * @returns VBox status code.
1275 * @param pDevExt The device extension.
1276 * @param ppSession Where to store the session on success.
1277 */
1278int VGDrvCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1279{
1280 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1281 if (RT_UNLIKELY(!pSession))
1282 {
1283 LogRel(("VGDrvCommonCreateKernelSession: no memory!\n"));
1284 return VERR_NO_MEMORY;
1285 }
1286
1287 pSession->Process = NIL_RTPROCESS;
1288 pSession->R0Process = NIL_RTR0PROCESS;
1289 pSession->pDevExt = pDevExt;
1290 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1291 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1292 pDevExt->cSessions++;
1293 RTSpinlockRelease(pDevExt->SessionSpinlock);
1294
1295 *ppSession = pSession;
1296 LogFlow(("VGDrvCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1297 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1298 return VINF_SUCCESS;
1299}
1300
1301
1302/**
1303 * Closes a VBoxGuest session.
1304 *
1305 * @param pDevExt The device extension.
1306 * @param pSession The session to close (and free).
1307 */
1308void VGDrvCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1309{
1310#ifdef VBOX_WITH_HGCM
1311 unsigned i;
1312#endif
1313 LogFlow(("VGDrvCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1314 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1315
1316 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1317 RTListNodeRemove(&pSession->ListNode);
1318 pDevExt->cSessions--;
1319 RTSpinlockRelease(pDevExt->SessionSpinlock);
6d209b23
SF
1320 vgdrvAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBGL_IOC_AGC_FLAGS_DEFAULT, true /*fSessionTermination*/);
1321 vgdrvSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/,
1322 NULL /*pfSessionCaps*/, NULL /*pfGlobalCaps*/, true /*fSessionTermination*/);
056a1eb7
SF
1323 vgdrvSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1324 vgdrvSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1325
1326 vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1327
1328#ifdef VBOX_WITH_HGCM
1329 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1330 if (pSession->aHGCMClientIds[i])
1331 {
6d209b23 1332 uint32_t idClient = pSession->aHGCMClientIds[i];
056a1eb7 1333 pSession->aHGCMClientIds[i] = 0;
6d209b23
SF
1334 Log(("VGDrvCommonCloseSession: disconnecting client id %#RX32\n", idClient));
1335 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
056a1eb7
SF
1336 }
1337#endif
1338
1339 pSession->pDevExt = NULL;
1340 pSession->Process = NIL_RTPROCESS;
1341 pSession->R0Process = NIL_RTR0PROCESS;
1342 vgdrvCloseMemBalloon(pDevExt, pSession);
1343 RTMemFree(pSession);
1344}
1345
1346
1347/**
1348 * Allocates a wait-for-event entry.
1349 *
1350 * @returns The wait-for-event entry.
1351 * @param pDevExt The device extension.
1352 * @param pSession The session that's allocating this. Can be NULL.
1353 */
1354static PVBOXGUESTWAIT vgdrvWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1355{
1356 /*
1357 * Allocate it one way or the other.
1358 */
1359 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1360 if (pWait)
1361 {
1362 RTSpinlockAcquire(pDevExt->EventSpinlock);
1363
1364 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1365 if (pWait)
1366 RTListNodeRemove(&pWait->ListNode);
1367
1368 RTSpinlockRelease(pDevExt->EventSpinlock);
1369 }
1370 if (!pWait)
1371 {
1372 int rc;
1373
1374 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1375 if (!pWait)
1376 {
1377 LogRelMax(32, ("vgdrvWaitAlloc: out-of-memory!\n"));
1378 return NULL;
1379 }
1380
1381 rc = RTSemEventMultiCreate(&pWait->Event);
1382 if (RT_FAILURE(rc))
1383 {
1384 LogRelMax(32, ("vgdrvWaitAlloc: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1385 RTMemFree(pWait);
1386 return NULL;
1387 }
1388
1389 pWait->ListNode.pNext = NULL;
1390 pWait->ListNode.pPrev = NULL;
1391 }
1392
1393 /*
1394 * Zero members just as an precaution.
1395 */
1396 pWait->fReqEvents = 0;
1397 pWait->fResEvents = 0;
1398#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1399 pWait->fPendingWakeUp = false;
1400 pWait->fFreeMe = false;
1401#endif
1402 pWait->pSession = pSession;
1403#ifdef VBOX_WITH_HGCM
1404 pWait->pHGCMReq = NULL;
1405#endif
1406 RTSemEventMultiReset(pWait->Event);
1407 return pWait;
1408}
1409
1410
1411/**
1412 * Frees the wait-for-event entry.
1413 *
1414 * The caller must own the wait spinlock !
1415 * The entry must be in a list!
1416 *
1417 * @param pDevExt The device extension.
1418 * @param pWait The wait-for-event entry to free.
1419 */
1420static void vgdrvWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1421{
1422 pWait->fReqEvents = 0;
1423 pWait->fResEvents = 0;
1424#ifdef VBOX_WITH_HGCM
1425 pWait->pHGCMReq = NULL;
1426#endif
1427#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1428 Assert(!pWait->fFreeMe);
1429 if (pWait->fPendingWakeUp)
1430 pWait->fFreeMe = true;
1431 else
1432#endif
1433 {
1434 RTListNodeRemove(&pWait->ListNode);
1435 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1436 }
1437}
1438
1439
1440/**
1441 * Frees the wait-for-event entry.
1442 *
1443 * @param pDevExt The device extension.
1444 * @param pWait The wait-for-event entry to free.
1445 */
1446static void vgdrvWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1447{
1448 RTSpinlockAcquire(pDevExt->EventSpinlock);
1449 vgdrvWaitFreeLocked(pDevExt, pWait);
1450 RTSpinlockRelease(pDevExt->EventSpinlock);
1451}
1452
1453
1454#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1455/**
1456 * Processes the wake-up list.
1457 *
1458 * All entries in the wake-up list gets signalled and moved to the woken-up
1459 * list.
1460 * At least on Windows this function can be invoked concurrently from
1461 * different VCPUs. So, be thread-safe.
1462 *
1463 * @param pDevExt The device extension.
1464 */
1465void VGDrvCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1466{
1467 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1468 {
1469 RTSpinlockAcquire(pDevExt->EventSpinlock);
1470 for (;;)
1471 {
1472 int rc;
1473 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1474 if (!pWait)
1475 break;
1476 /* Prevent other threads from accessing pWait when spinlock is released. */
1477 RTListNodeRemove(&pWait->ListNode);
1478
1479 pWait->fPendingWakeUp = true;
1480 RTSpinlockRelease(pDevExt->EventSpinlock);
1481
1482 rc = RTSemEventMultiSignal(pWait->Event);
1483 AssertRC(rc);
1484
1485 RTSpinlockAcquire(pDevExt->EventSpinlock);
1486 Assert(pWait->ListNode.pNext == NULL && pWait->ListNode.pPrev == NULL);
1487 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1488 pWait->fPendingWakeUp = false;
1489 if (RT_LIKELY(!pWait->fFreeMe))
1490 { /* likely */ }
1491 else
1492 {
1493 pWait->fFreeMe = false;
1494 vgdrvWaitFreeLocked(pDevExt, pWait);
1495 }
1496 }
1497 RTSpinlockRelease(pDevExt->EventSpinlock);
1498 }
1499}
1500#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1501
1502
1503/**
1504 * Implements the fast (no input or output) type of IOCtls.
1505 *
1506 * This is currently just a placeholder stub inherited from the support driver code.
1507 *
1508 * @returns VBox status code.
1509 * @param iFunction The IOCtl function number.
1510 * @param pDevExt The device extension.
1511 * @param pSession The session.
1512 */
6d209b23 1513int VGDrvCommonIoCtlFast(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
056a1eb7
SF
1514{
1515 LogFlow(("VGDrvCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1516
1517 NOREF(iFunction);
1518 NOREF(pDevExt);
1519 NOREF(pSession);
1520 return VERR_NOT_SUPPORTED;
1521}
1522
1523
1524/**
6d209b23
SF
1525 * Gets the driver I/O control interface version, maybe adjusting it for
1526 * backwards compatibility.
1527 *
1528 * The adjusting is currently not implemented as we only have one major I/O
1529 * control interface version out there to support. This is something we will
1530 * implement as needed.
056a1eb7
SF
1531 *
1532 * returns IPRT status code.
1533 * @param pDevExt The device extension.
6d209b23
SF
1534 * @param pSession The session.
1535 * @param pReq The request info.
056a1eb7 1536 */
6d209b23 1537static int vgdrvIoCtl_DriverVersionInfo(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCDRIVERVERSIONINFO pReq)
056a1eb7 1538{
6d209b23
SF
1539 int rc;
1540 LogFlow(("VBGL_IOCTL_DRIVER_VERSION_INFO: uReqVersion=%#x uMinVersion=%#x uReserved1=%#x uReserved2=%#x\n",
1541 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved1, pReq->u.In.uReserved2));
1542 RT_NOREF2(pDevExt, pSession);
1543
1544 /*
1545 * Input validation.
1546 */
1547 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1548 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1549 {
1550 /*
1551 * Match the version.
1552 * The current logic is very simple, match the major interface version.
1553 */
1554 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1555 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1556 rc = VINF_SUCCESS;
1557 else
1558 {
1559 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1560 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1561 rc = VERR_VERSION_MISMATCH;
1562 }
1563 }
1564 else
1565 {
1566 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1567 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1568 rc = VERR_INVALID_PARAMETER;
1569 }
056a1eb7 1570
6d209b23
SF
1571 pReq->u.Out.uSessionVersion = RT_SUCCESS(rc) ? VBGL_IOC_VERSION : UINT32_MAX;
1572 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1573 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1574 pReq->u.Out.uReserved1 = 0;
1575 pReq->u.Out.uReserved2 = 0;
1576 return rc;
1577}
1578
1579
1580/**
1581 * Similar to vgdrvIoCtl_DriverVersionInfo, except its for IDC.
1582 *
1583 * returns IPRT status code.
1584 * @param pDevExt The device extension.
1585 * @param pSession The session.
1586 * @param pReq The request info.
1587 */
1588static int vgdrvIoCtl_IdcConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCCONNECT pReq)
1589{
1590 int rc;
1591 LogFlow(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x uReqVersion=%#x uMinVersion=%#x uReserved=%#x\n",
1592 pReq->u.In.u32MagicCookie, pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved));
1593 Assert(pSession != NULL);
1594 RT_NOREF(pDevExt);
1595
1596 /*
1597 * Input validation.
1598 */
1599 if (pReq->u.In.u32MagicCookie == VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE)
1600 {
1601 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1602 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1603 {
1604 /*
1605 * Match the version.
1606 * The current logic is very simple, match the major interface version.
1607 */
1608 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1609 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1610 {
1611 pReq->u.Out.pvSession = pSession;
1612 pReq->u.Out.uSessionVersion = VBGL_IOC_VERSION;
1613 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1614 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1615 pReq->u.Out.uReserved1 = 0;
1616 pReq->u.Out.pvReserved2 = NULL;
1617 return VINF_SUCCESS;
1618
1619 }
1620 LogRel(("VBGL_IOCTL_IDC_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1621 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1622 rc = VERR_VERSION_MISMATCH;
1623 }
1624 else
1625 {
1626 LogRel(("VBGL_IOCTL_IDC_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1627 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1628 rc = VERR_INVALID_PARAMETER;
1629 }
1630
1631 pReq->u.Out.pvSession = NULL;
1632 pReq->u.Out.uSessionVersion = UINT32_MAX;
1633 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1634 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1635 pReq->u.Out.uReserved1 = 0;
1636 pReq->u.Out.pvReserved2 = NULL;
1637 }
1638 else
1639 {
1640 LogRel(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1641 pReq->u.In.u32MagicCookie, VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE));
1642 rc = VERR_INVALID_PARAMETER;
1643 }
1644 return rc;
1645}
1646
1647
1648/**
1649 * Counterpart to vgdrvIoCtl_IdcConnect, destroys the session.
1650 *
1651 * returns IPRT status code.
1652 * @param pDevExt The device extension.
1653 * @param pSession The session.
1654 * @param pReq The request info.
1655 */
1656static int vgdrvIoCtl_IdcDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCDISCONNECT pReq)
1657{
1658 LogFlow(("VBGL_IOCTL_IDC_DISCONNECT: pvSession=%p vs pSession=%p\n", pReq->u.In.pvSession, pSession));
1659 RT_NOREF(pDevExt);
1660 Assert(pSession != NULL);
1661
1662 if (pReq->u.In.pvSession == pSession)
1663 {
1664 VGDrvCommonCloseSession(pDevExt, pSession);
1665 return VINF_SUCCESS;
1666 }
1667 LogRel(("VBGL_IOCTL_IDC_DISCONNECT: In.pvSession=%p is not equal to pSession=%p!\n", pReq->u.In.pvSession, pSession));
1668 return VERR_INVALID_PARAMETER;
1669}
1670
1671
1672/**
1673 * Return the VMM device I/O info.
1674 *
1675 * returns IPRT status code.
1676 * @param pDevExt The device extension.
1677 * @param pInfo The request info.
1678 * @note Ring-0 only, caller checked.
1679 */
1680static int vgdrvIoCtl_GetVMMDevIoInfo(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCGETVMMDEVIOINFO pInfo)
1681{
1682 LogFlow(("VBGL_IOCTL_GET_VMMDEV_IO_INFO\n"));
1683
1684 pInfo->u.Out.IoPort = pDevExt->IOPortBase;
1685 pInfo->u.Out.pvVmmDevMapping = pDevExt->pVMMDevMemory;
1686 pInfo->u.Out.auPadding[0] = 0;
1687#if HC_ARCH_BITS != 32
1688 pInfo->u.Out.auPadding[1] = 0;
1689 pInfo->u.Out.auPadding[2] = 0;
1690#endif
056a1eb7
SF
1691 return VINF_SUCCESS;
1692}
1693
1694
056a1eb7
SF
1695/**
1696 * Set the callback for the kernel mouse handler.
1697 *
1698 * returns IPRT status code.
1699 * @param pDevExt The device extension.
1700 * @param pNotify The new callback information.
1701 */
6d209b23 1702int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
056a1eb7 1703{
6d209b23 1704 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->u.In.pfnNotify, pNotify->u.In.pvUser));
056a1eb7
SF
1705
1706#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
1707 VGDrvNativeSetMouseNotifyCallback(pDevExt, pNotify);
1708#else
1709 RTSpinlockAcquire(pDevExt->EventSpinlock);
6d209b23
SF
1710 pDevExt->pfnMouseNotifyCallback = pNotify->u.In.pfnNotify;
1711 pDevExt->pvMouseNotifyCallbackArg = pNotify->u.In.pvUser;
056a1eb7
SF
1712 RTSpinlockRelease(pDevExt->EventSpinlock);
1713#endif
1714 return VINF_SUCCESS;
1715}
056a1eb7
SF
1716
1717
1718/**
1719 * Worker vgdrvIoCtl_WaitEvent.
1720 *
1721 * The caller enters the spinlock, we leave it.
1722 *
1723 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1724 */
1725DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
6d209b23 1726 PVBGLIOCWAITFOREVENTS pInfo, int iEvent, const uint32_t fReqEvents)
056a1eb7
SF
1727{
1728 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1729 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
1730 fMatches &= vgdrvGetAllowedEventMaskForSession(pDevExt, pSession);
1731 if (fMatches || pSession->fPendingCancelWaitEvents)
1732 {
1733 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1734 RTSpinlockRelease(pDevExt->EventSpinlock);
1735
6d209b23 1736 pInfo->u.Out.fEvents = fMatches;
056a1eb7 1737 if (fReqEvents & ~((uint32_t)1 << iEvent))
6d209b23 1738 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
056a1eb7 1739 else
6d209b23 1740 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
056a1eb7
SF
1741 pSession->fPendingCancelWaitEvents = false;
1742 return VINF_SUCCESS;
1743 }
1744
1745 RTSpinlockRelease(pDevExt->EventSpinlock);
1746 return VERR_TIMEOUT;
1747}
1748
1749
6d209b23
SF
1750static int vgdrvIoCtl_WaitForEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1751 PVBGLIOCWAITFOREVENTS pInfo, bool fInterruptible)
056a1eb7 1752{
6d209b23
SF
1753 uint32_t const cMsTimeout = pInfo->u.In.cMsTimeOut;
1754 const uint32_t fReqEvents = pInfo->u.In.fEvents;
056a1eb7
SF
1755 uint32_t fResEvents;
1756 int iEvent;
1757 PVBOXGUESTWAIT pWait;
1758 int rc;
1759
6d209b23 1760 pInfo->u.Out.fEvents = 0; /* Note! This overwrites pInfo->u.In.* fields! */
056a1eb7
SF
1761
1762 /*
1763 * Copy and verify the input mask.
1764 */
1765 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1766 if (RT_UNLIKELY(iEvent < 0))
1767 {
1768 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1769 return VERR_INVALID_PARAMETER;
1770 }
1771
1772 /*
1773 * Check the condition up front, before doing the wait-for-event allocations.
1774 */
1775 RTSpinlockAcquire(pDevExt->EventSpinlock);
1776 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1777 if (rc == VINF_SUCCESS)
1778 return rc;
1779
6d209b23 1780 if (!cMsTimeout)
056a1eb7 1781 {
056a1eb7
SF
1782 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
1783 return VERR_TIMEOUT;
1784 }
1785
1786 pWait = vgdrvWaitAlloc(pDevExt, pSession);
1787 if (!pWait)
1788 return VERR_NO_MEMORY;
1789 pWait->fReqEvents = fReqEvents;
1790
1791 /*
1792 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1793 * If the wait condition is met, return.
1794 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1795 */
1796 RTSpinlockAcquire(pDevExt->EventSpinlock);
1797 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1798 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1799 if (rc == VINF_SUCCESS)
1800 {
1801 vgdrvWaitFreeUnlocked(pDevExt, pWait);
1802 return rc;
1803 }
1804
1805 if (fInterruptible)
6d209b23 1806 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
056a1eb7 1807 else
6d209b23 1808 rc = RTSemEventMultiWait(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
056a1eb7
SF
1809
1810 /*
1811 * There is one special case here and that's when the semaphore is
1812 * destroyed upon device driver unload. This shouldn't happen of course,
1813 * but in case it does, just get out of here ASAP.
1814 */
1815 if (rc == VERR_SEM_DESTROYED)
1816 return rc;
1817
1818 /*
1819 * Unlink the wait item and dispose of it.
1820 */
1821 RTSpinlockAcquire(pDevExt->EventSpinlock);
1822 fResEvents = pWait->fResEvents;
1823 vgdrvWaitFreeLocked(pDevExt, pWait);
1824 RTSpinlockRelease(pDevExt->EventSpinlock);
1825
1826 /*
1827 * Now deal with the return code.
1828 */
6d209b23
SF
1829 if ( fResEvents
1830 && fResEvents != UINT32_MAX)
056a1eb7 1831 {
6d209b23 1832 pInfo->u.Out.fEvents = fResEvents;
056a1eb7 1833 if (fReqEvents & ~((uint32_t)1 << iEvent))
6d209b23 1834 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
056a1eb7 1835 else
6d209b23 1836 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
056a1eb7
SF
1837 rc = VINF_SUCCESS;
1838 }
1839 else if ( fResEvents == UINT32_MAX
1840 || rc == VERR_INTERRUPTED)
1841 {
056a1eb7
SF
1842 rc = VERR_INTERRUPTED;
1843 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
1844 }
1845 else if (rc == VERR_TIMEOUT)
056a1eb7 1846 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
056a1eb7
SF
1847 else
1848 {
1849 if (RT_SUCCESS(rc))
1850 {
1851 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
1852 rc = VERR_INTERNAL_ERROR;
1853 }
056a1eb7
SF
1854 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
1855 }
1856
1857 return rc;
1858}
1859
1860
6d209b23
SF
1861/** @todo the semantics of this IoCtl have been tightened, so that no calls to
1862 * VBOXGUEST_IOCTL_WAITEVENT are allowed in a session after it has been
1863 * called. Change the code to make calls to VBOXGUEST_IOCTL_WAITEVENT made
1864 * after that to return VERR_INTERRUPTED or something appropriate. */
056a1eb7
SF
1865static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1866{
1867 PVBOXGUESTWAIT pWait;
1868 PVBOXGUESTWAIT pSafe;
1869 int rc = 0;
1870 /* Was as least one WAITEVENT in process for this session? If not we
1871 * set a flag that the next call should be interrupted immediately. This
1872 * is needed so that a user thread can reliably interrupt another one in a
1873 * WAITEVENT loop. */
1874 bool fCancelledOne = false;
1875
1876 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
1877
1878 /*
1879 * Walk the event list and wake up anyone with a matching session.
1880 */
1881 RTSpinlockAcquire(pDevExt->EventSpinlock);
1882 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1883 {
1884 if (pWait->pSession == pSession)
1885 {
1886 fCancelledOne = true;
1887 pWait->fResEvents = UINT32_MAX;
1888 RTListNodeRemove(&pWait->ListNode);
1889#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1890 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1891#else
1892 rc |= RTSemEventMultiSignal(pWait->Event);
1893 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1894#endif
1895 }
1896 }
1897 if (!fCancelledOne)
1898 pSession->fPendingCancelWaitEvents = true;
1899 RTSpinlockRelease(pDevExt->EventSpinlock);
1900 Assert(rc == 0);
1901 NOREF(rc);
1902
1903#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1904 VGDrvCommonWaitDoWakeUps(pDevExt);
1905#endif
1906
1907 return VINF_SUCCESS;
1908}
1909
1910
1911/**
1912 * Checks if the VMM request is allowed in the context of the given session.
1913 *
1914 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1915 * @param pDevExt The device extension.
1916 * @param pSession The calling session.
1917 * @param enmType The request type.
1918 * @param pReqHdr The request.
1919 */
1920static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1921 VMMDevRequestHeader const *pReqHdr)
1922{
1923 /*
1924 * Categorize the request being made.
1925 */
1926 /** @todo This need quite some more work! */
1927 enum
1928 {
1929 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1930 } enmRequired;
1931 RT_NOREF1(pDevExt);
1932
1933 switch (enmType)
1934 {
1935 /*
1936 * Deny access to anything we don't know or provide specialized I/O controls for.
1937 */
1938#ifdef VBOX_WITH_HGCM
1939 case VMMDevReq_HGCMConnect:
1940 case VMMDevReq_HGCMDisconnect:
1941# ifdef VBOX_WITH_64_BITS_GUESTS
1942 case VMMDevReq_HGCMCall32:
1943 case VMMDevReq_HGCMCall64:
1944# else
1945 case VMMDevReq_HGCMCall:
1946# endif /* VBOX_WITH_64_BITS_GUESTS */
1947 case VMMDevReq_HGCMCancel:
1948 case VMMDevReq_HGCMCancel2:
1949#endif /* VBOX_WITH_HGCM */
1950 case VMMDevReq_SetGuestCapabilities:
1951 default:
1952 enmRequired = kLevel_NoOne;
1953 break;
1954
1955 /*
1956 * There are a few things only this driver can do (and it doesn't use
1957 * the VMMRequst I/O control route anyway, but whatever).
1958 */
1959 case VMMDevReq_ReportGuestInfo:
1960 case VMMDevReq_ReportGuestInfo2:
1961 case VMMDevReq_GetHypervisorInfo:
1962 case VMMDevReq_SetHypervisorInfo:
1963 case VMMDevReq_RegisterPatchMemory:
1964 case VMMDevReq_DeregisterPatchMemory:
1965 case VMMDevReq_GetMemBalloonChangeRequest:
1966 enmRequired = kLevel_OnlyVBoxGuest;
1967 break;
1968
1969 /*
1970 * Trusted users apps only.
1971 */
1972 case VMMDevReq_QueryCredentials:
1973 case VMMDevReq_ReportCredentialsJudgement:
1974 case VMMDevReq_RegisterSharedModule:
1975 case VMMDevReq_UnregisterSharedModule:
1976 case VMMDevReq_WriteCoreDump:
1977 case VMMDevReq_GetCpuHotPlugRequest:
1978 case VMMDevReq_SetCpuHotPlugStatus:
1979 case VMMDevReq_CheckSharedModules:
1980 case VMMDevReq_GetPageSharingStatus:
1981 case VMMDevReq_DebugIsPageShared:
1982 case VMMDevReq_ReportGuestStats:
1983 case VMMDevReq_ReportGuestUserState:
1984 case VMMDevReq_GetStatisticsChangeRequest:
1985 case VMMDevReq_ChangeMemBalloon:
1986 enmRequired = kLevel_TrustedUsers;
1987 break;
1988
1989 /*
1990 * Anyone.
1991 */
1992 case VMMDevReq_GetMouseStatus:
1993 case VMMDevReq_SetMouseStatus:
1994 case VMMDevReq_SetPointerShape:
1995 case VMMDevReq_GetHostVersion:
1996 case VMMDevReq_Idle:
1997 case VMMDevReq_GetHostTime:
1998 case VMMDevReq_SetPowerStatus:
1999 case VMMDevReq_AcknowledgeEvents:
2000 case VMMDevReq_CtlGuestFilterMask:
2001 case VMMDevReq_ReportGuestStatus:
2002 case VMMDevReq_GetDisplayChangeRequest:
2003 case VMMDevReq_VideoModeSupported:
2004 case VMMDevReq_GetHeightReduction:
2005 case VMMDevReq_GetDisplayChangeRequest2:
2006 case VMMDevReq_VideoModeSupported2:
2007 case VMMDevReq_VideoAccelEnable:
2008 case VMMDevReq_VideoAccelFlush:
2009 case VMMDevReq_VideoSetVisibleRegion:
2010 case VMMDevReq_GetDisplayChangeRequestEx:
2011 case VMMDevReq_GetSeamlessChangeRequest:
2012 case VMMDevReq_GetVRDPChangeRequest:
2013 case VMMDevReq_LogString:
2014 case VMMDevReq_GetSessionId:
2015 enmRequired = kLevel_AllUsers;
2016 break;
2017
2018 /*
2019 * Depends on the request parameters...
2020 */
2021 /** @todo this have to be changed into an I/O control and the facilities
2022 * tracked in the session so they can automatically be failed when the
2023 * session terminates without reporting the new status.
2024 *
2025 * The information presented by IGuest is not reliable without this! */
2026 case VMMDevReq_ReportGuestCapabilities:
2027 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
2028 {
2029 case VBoxGuestFacilityType_All:
2030 case VBoxGuestFacilityType_VBoxGuestDriver:
2031 enmRequired = kLevel_OnlyVBoxGuest;
2032 break;
2033 case VBoxGuestFacilityType_VBoxService:
2034 enmRequired = kLevel_TrustedUsers;
2035 break;
2036 case VBoxGuestFacilityType_VBoxTrayClient:
2037 case VBoxGuestFacilityType_Seamless:
2038 case VBoxGuestFacilityType_Graphics:
2039 default:
2040 enmRequired = kLevel_AllUsers;
2041 break;
2042 }
2043 break;
2044 }
2045
2046 /*
2047 * Check against the session.
2048 */
2049 switch (enmRequired)
2050 {
2051 default:
2052 case kLevel_NoOne:
2053 break;
2054 case kLevel_OnlyVBoxGuest:
2055 case kLevel_OnlyKernel:
2056 if (pSession->R0Process == NIL_RTR0PROCESS)
2057 return VINF_SUCCESS;
2058 break;
2059 case kLevel_TrustedUsers:
992741ae
SF
2060 if (pSession->fUserSession)
2061 break;
056a1eb7
SF
2062 case kLevel_AllUsers:
2063 return VINF_SUCCESS;
2064 }
2065
2066 return VERR_PERMISSION_DENIED;
2067}
2068
6d209b23
SF
2069static int vgdrvIoCtl_VMMDevRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2070 VMMDevRequestHeader *pReqHdr, size_t cbData)
056a1eb7
SF
2071{
2072 int rc;
2073 VMMDevRequestHeader *pReqCopy;
2074
2075 /*
2076 * Validate the header and request size.
2077 */
2078 const VMMDevRequestType enmType = pReqHdr->requestType;
2079 const uint32_t cbReq = pReqHdr->size;
2080 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
2081
2082 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
2083
2084 if (cbReq < cbMinSize)
2085 {
2086 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
2087 cbReq, cbMinSize, enmType));
2088 return VERR_INVALID_PARAMETER;
2089 }
2090 if (cbReq > cbData)
2091 {
2092 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
2093 cbData, cbReq, enmType));
2094 return VERR_INVALID_PARAMETER;
2095 }
6d209b23 2096 rc = VbglGR0Verify(pReqHdr, cbData);
056a1eb7
SF
2097 if (RT_FAILURE(rc))
2098 {
2099 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
2100 cbData, cbReq, enmType, rc));
2101 return rc;
2102 }
2103
2104 rc = vgdrvCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
2105 if (RT_FAILURE(rc))
2106 {
2107 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
2108 return rc;
2109 }
2110
2111 /*
2112 * Make a copy of the request in the physical memory heap so
2113 * the VBoxGuestLibrary can more easily deal with the request.
2114 * (This is really a waste of time since the OS or the OS specific
2115 * code has already buffered or locked the input/output buffer, but
2116 * it does makes things a bit simpler wrt to phys address.)
2117 */
6d209b23 2118 rc = VbglR0GRAlloc(&pReqCopy, cbReq, enmType);
056a1eb7
SF
2119 if (RT_FAILURE(rc))
2120 {
2121 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2122 cbReq, cbReq, rc));
2123 return rc;
2124 }
2125 memcpy(pReqCopy, pReqHdr, cbReq);
6d209b23
SF
2126 Assert(pReqCopy->reserved1 == cbReq);
2127 pReqCopy->reserved1 = 0; /* VGDrvCommonIoCtl or caller sets cbOut, so clear it. */
056a1eb7
SF
2128
2129 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
2130 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
2131
6d209b23 2132 rc = VbglR0GRPerform(pReqCopy);
056a1eb7
SF
2133 if ( RT_SUCCESS(rc)
2134 && RT_SUCCESS(pReqCopy->rc))
2135 {
2136 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
2137 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
2138
2139 memcpy(pReqHdr, pReqCopy, cbReq);
6d209b23 2140 pReqHdr->reserved1 = cbReq; /* preserve cbOut */
056a1eb7
SF
2141 }
2142 else if (RT_FAILURE(rc))
6d209b23 2143 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglR0GRPerform - rc=%Rrc!\n", rc));
056a1eb7
SF
2144 else
2145 {
2146 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
2147 rc = pReqCopy->rc;
2148 }
2149
6d209b23 2150 VbglR0GRFree(pReqCopy);
056a1eb7
SF
2151 return rc;
2152}
2153
2154
2155#ifdef VBOX_WITH_HGCM
2156
2157AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2158
2159/** Worker for vgdrvHgcmAsyncWaitCallback*. */
2160static int vgdrvHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2161 bool fInterruptible, uint32_t cMillies)
2162{
2163 int rc;
2164
2165 /*
2166 * Check to see if the condition was met by the time we got here.
2167 *
2168 * We create a simple poll loop here for dealing with out-of-memory
2169 * conditions since the caller isn't necessarily able to deal with
2170 * us returning too early.
2171 */
2172 PVBOXGUESTWAIT pWait;
2173 for (;;)
2174 {
2175 RTSpinlockAcquire(pDevExt->EventSpinlock);
2176 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2177 {
2178 RTSpinlockRelease(pDevExt->EventSpinlock);
2179 return VINF_SUCCESS;
2180 }
2181 RTSpinlockRelease(pDevExt->EventSpinlock);
2182
2183 pWait = vgdrvWaitAlloc(pDevExt, NULL);
2184 if (pWait)
2185 break;
2186 if (fInterruptible)
2187 return VERR_INTERRUPTED;
2188 RTThreadSleep(1);
2189 }
2190 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2191 pWait->pHGCMReq = pHdr;
2192
2193 /*
2194 * Re-enter the spinlock and re-check for the condition.
2195 * If the condition is met, return.
2196 * Otherwise link us into the HGCM wait list and go to sleep.
2197 */
2198 RTSpinlockAcquire(pDevExt->EventSpinlock);
2199 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2200 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2201 {
2202 vgdrvWaitFreeLocked(pDevExt, pWait);
2203 RTSpinlockRelease(pDevExt->EventSpinlock);
2204 return VINF_SUCCESS;
2205 }
2206 RTSpinlockRelease(pDevExt->EventSpinlock);
2207
2208 if (fInterruptible)
2209 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2210 else
2211 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2212 if (rc == VERR_SEM_DESTROYED)
2213 return rc;
2214
2215 /*
2216 * Unlink, free and return.
2217 */
2218 if ( RT_FAILURE(rc)
2219 && rc != VERR_TIMEOUT
2220 && ( !fInterruptible
2221 || rc != VERR_INTERRUPTED))
2222 LogRel(("vgdrvHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2223
2224 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2225 return rc;
2226}
2227
2228
2229/**
2230 * This is a callback for dealing with async waits.
2231 *
2232 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2233 */
2234static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2235{
2236 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2237 LogFlow(("vgdrvHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2238 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2239 false /* fInterruptible */, u32User /* cMillies */);
2240}
2241
2242
2243/**
2244 * This is a callback for dealing with async waits with a timeout.
2245 *
2246 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2247 */
2248static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2249{
2250 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2251 LogFlow(("vgdrvHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2252 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2253 true /* fInterruptible */, u32User /* cMillies */);
2254}
2255
2256
6d209b23 2257static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCONNECT pInfo)
056a1eb7
SF
2258{
2259 int rc;
6d209b23 2260 HGCMCLIENTID idClient = 0;
056a1eb7
SF
2261
2262 /*
2263 * The VbglHGCMConnect call will invoke the callback if the HGCM
2264 * call is performed in an ASYNC fashion. The function is not able
2265 * to deal with cancelled requests.
2266 */
2267 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
6d209b23
SF
2268 pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2269 ? pInfo->u.In.Loc.u.host.achName : "<not local host>"));
056a1eb7 2270
6d209b23
SF
2271 rc = VbglR0HGCMInternalConnect(&pInfo->u.In.Loc, &idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2272 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: idClient=%RX32 (rc=%Rrc)\n", idClient, rc));
056a1eb7
SF
2273 if (RT_SUCCESS(rc))
2274 {
6d209b23
SF
2275 /*
2276 * Append the client id to the client id table.
2277 * If the table has somehow become filled up, we'll disconnect the session.
2278 */
2279 unsigned i;
2280 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2281 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2282 if (!pSession->aHGCMClientIds[i])
056a1eb7 2283 {
6d209b23
SF
2284 pSession->aHGCMClientIds[i] = idClient;
2285 break;
056a1eb7 2286 }
6d209b23
SF
2287 RTSpinlockRelease(pDevExt->SessionSpinlock);
2288 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2289 {
2290 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2291 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2292
2293 pInfo->u.Out.idClient = 0;
2294 return VERR_TOO_MANY_OPEN_FILES;
056a1eb7 2295 }
056a1eb7 2296 }
6d209b23 2297 pInfo->u.Out.idClient = idClient;
056a1eb7
SF
2298 return rc;
2299}
2300
2301
6d209b23 2302static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMDISCONNECT pInfo)
056a1eb7
SF
2303{
2304 /*
2305 * Validate the client id and invalidate its entry while we're in the call.
2306 */
2307 int rc;
6d209b23 2308 const uint32_t idClient = pInfo->u.In.idClient;
056a1eb7
SF
2309 unsigned i;
2310 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2311 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
6d209b23 2312 if (pSession->aHGCMClientIds[i] == idClient)
056a1eb7
SF
2313 {
2314 pSession->aHGCMClientIds[i] = UINT32_MAX;
2315 break;
2316 }
2317 RTSpinlockRelease(pDevExt->SessionSpinlock);
2318 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2319 {
6d209b23 2320 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
056a1eb7
SF
2321 return VERR_INVALID_HANDLE;
2322 }
2323
2324 /*
2325 * The VbglHGCMConnect call will invoke the callback if the HGCM
2326 * call is performed in an ASYNC fashion. The function is not able
2327 * to deal with cancelled requests.
2328 */
6d209b23
SF
2329 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2330 rc = VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2331 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: rc=%Rrc\n", rc));
056a1eb7
SF
2332
2333 /* Update the client id array according to the result. */
2334 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2335 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
6d209b23 2336 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) ? 0 : idClient;
056a1eb7
SF
2337 RTSpinlockRelease(pDevExt->SessionSpinlock);
2338
2339 return rc;
2340}
2341
2342
6d209b23
SF
2343static int vgdrvIoCtl_HGCMCallInner(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2344 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2345 size_t cbExtra, size_t cbData)
056a1eb7
SF
2346{
2347 const uint32_t u32ClientId = pInfo->u32ClientID;
2348 uint32_t fFlags;
2349 size_t cbActual;
2350 unsigned i;
2351 int rc;
2352
2353 /*
2354 * Some more validations.
2355 */
2356 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2357 {
2358 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2359 return VERR_INVALID_PARAMETER;
2360 }
2361
2362 cbActual = cbExtra + sizeof(*pInfo);
2363#ifdef RT_ARCH_AMD64
2364 if (f32bit)
2365 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2366 else
2367#endif
2368 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2369 if (cbData < cbActual)
2370 {
2371 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2372 cbData, cbData, cbActual, cbActual));
2373 return VERR_INVALID_PARAMETER;
2374 }
6d209b23 2375 pInfo->Hdr.cbOut = (uint32_t)cbActual;
056a1eb7
SF
2376
2377 /*
2378 * Validate the client id.
2379 */
2380 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2381 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2382 if (pSession->aHGCMClientIds[i] == u32ClientId)
2383 break;
2384 RTSpinlockRelease(pDevExt->SessionSpinlock);
2385 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2386 {
2387 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2388 return VERR_INVALID_HANDLE;
2389 }
2390
2391 /*
2392 * The VbglHGCMCall call will invoke the callback if the HGCM
2393 * call is performed in an ASYNC fashion. This function can
2394 * deal with cancelled requests, so we let user more requests
2395 * be interruptible (should add a flag for this later I guess).
2396 */
2397 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2398 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2399 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2400#ifdef RT_ARCH_AMD64
2401 if (f32bit)
2402 {
2403 if (fInterruptible)
2404 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2405 else
2406 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2407 }
2408 else
2409#endif
2410 {
2411 if (fInterruptible)
2412 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2413 else
2414 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2415 }
2416 if (RT_SUCCESS(rc))
2417 {
6d209b23
SF
2418 rc = pInfo->Hdr.rc;
2419 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", rc));
056a1eb7
SF
2420 }
2421 else
2422 {
2423 if ( rc != VERR_INTERRUPTED
2424 && rc != VERR_TIMEOUT)
6d209b23 2425 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
056a1eb7 2426 else
6d209b23 2427 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
056a1eb7
SF
2428 }
2429 return rc;
2430}
2431
6d209b23
SF
2432
2433static int vgdrvIoCtl_HGCMCallWrapper(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2434 bool f32bit, bool fUserData, size_t cbData)
2435{
2436 return vgdrvIoCtl_HGCMCallInner(pDevExt, pSession, pInfo, pInfo->cMsTimeout,
2437 pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2438 f32bit, fUserData, 0 /*cbExtra*/, cbData);
2439}
2440
2441
056a1eb7
SF
2442#endif /* VBOX_WITH_HGCM */
2443
2444/**
6d209b23 2445 * Handle VBGL_IOCTL_CHECK_BALLOON from R3.
056a1eb7
SF
2446 *
2447 * Ask the host for the size of the balloon and try to set it accordingly. If
2448 * this approach fails because it's not supported, return with fHandleInR3 set
2449 * and let the user land supply memory we can lock via the other ioctl.
2450 *
2451 * @returns VBox status code.
2452 *
2453 * @param pDevExt The device extension.
2454 * @param pSession The session.
2455 * @param pInfo The output buffer.
056a1eb7 2456 */
6d209b23 2457static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHECKBALLOON pInfo)
056a1eb7
SF
2458{
2459 VMMDevGetMemBalloonChangeRequest *pReq;
2460 int rc;
2461
6d209b23 2462 LogFlow(("VBGL_IOCTL_CHECK_BALLOON:\n"));
056a1eb7
SF
2463 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2464 AssertRCReturn(rc, rc);
2465
2466 /*
2467 * The first user trying to query/change the balloon becomes the
2468 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2469 */
2470 if ( pDevExt->MemBalloon.pOwner != pSession
2471 && pDevExt->MemBalloon.pOwner == NULL)
2472 pDevExt->MemBalloon.pOwner = pSession;
2473
2474 if (pDevExt->MemBalloon.pOwner == pSession)
2475 {
6d209b23 2476 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
056a1eb7
SF
2477 if (RT_SUCCESS(rc))
2478 {
2479 /*
2480 * This is a response to that event. Setting this bit means that
2481 * we request the value from the host and change the guest memory
2482 * balloon according to this value.
2483 */
2484 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
6d209b23 2485 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
2486 if (RT_SUCCESS(rc))
2487 {
2488 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2489 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2490
6d209b23
SF
2491 pInfo->u.Out.cBalloonChunks = pReq->cBalloonChunks;
2492 pInfo->u.Out.fHandleInR3 = false;
2493 pInfo->u.Out.afPadding[0] = false;
2494 pInfo->u.Out.afPadding[1] = false;
2495 pInfo->u.Out.afPadding[2] = false;
056a1eb7 2496
6d209b23 2497 rc = vgdrvSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->u.Out.fHandleInR3);
056a1eb7
SF
2498 /* Ignore various out of memory failures. */
2499 if ( rc == VERR_NO_MEMORY
2500 || rc == VERR_NO_PHYS_MEMORY
2501 || rc == VERR_NO_CONT_MEMORY)
2502 rc = VINF_SUCCESS;
056a1eb7
SF
2503 }
2504 else
6d209b23
SF
2505 LogRel(("VBGL_IOCTL_CHECK_BALLOON: VbglR0GRPerform failed. rc=%Rrc\n", rc));
2506 VbglR0GRFree(&pReq->header);
056a1eb7
SF
2507 }
2508 }
2509 else
2510 rc = VERR_PERMISSION_DENIED;
2511
2512 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
6d209b23 2513 LogFlow(("VBGL_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
056a1eb7
SF
2514 return rc;
2515}
2516
2517
2518/**
2519 * Handle a request for changing the memory balloon.
2520 *
2521 * @returns VBox status code.
2522 *
2523 * @param pDevExt The device extention.
2524 * @param pSession The session.
2525 * @param pInfo The change request structure (input).
056a1eb7 2526 */
6d209b23 2527static int vgdrvIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEBALLOON pInfo)
056a1eb7
SF
2528{
2529 int rc;
6d209b23
SF
2530 LogFlow(("VBGL_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%p\n", pInfo->u.In.fInflate, pInfo->u.In.pvChunk));
2531 if ( pInfo->u.In.abPadding[0]
2532 || pInfo->u.In.abPadding[1]
2533 || pInfo->u.In.abPadding[2]
2534 || pInfo->u.In.abPadding[3]
2535 || pInfo->u.In.abPadding[4]
2536 || pInfo->u.In.abPadding[5]
2537 || pInfo->u.In.abPadding[6]
2538#if ARCH_BITS == 32
2539 || pInfo->u.In.abPadding[7]
2540 || pInfo->u.In.abPadding[8]
2541 || pInfo->u.In.abPadding[9]
2542#endif
2543 )
2544 {
2545 Log(("VBGL_IOCTL_CHANGE_BALLOON: Padding isn't all zero: %.*Rhxs\n", sizeof(pInfo->u.In.abPadding), pInfo->u.In.abPadding));
2546 return VERR_INVALID_PARAMETER;
2547 }
056a1eb7
SF
2548
2549 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2550 AssertRCReturn(rc, rc);
2551
2552 if (!pDevExt->MemBalloon.fUseKernelAPI)
2553 {
2554 /*
2555 * The first user trying to query/change the balloon becomes the
2556 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2557 */
2558 if ( pDevExt->MemBalloon.pOwner != pSession
2559 && pDevExt->MemBalloon.pOwner == NULL)
2560 pDevExt->MemBalloon.pOwner = pSession;
2561
2562 if (pDevExt->MemBalloon.pOwner == pSession)
6d209b23 2563 rc = vgdrvSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u.In.pvChunk, pInfo->u.In.fInflate != false);
056a1eb7
SF
2564 else
2565 rc = VERR_PERMISSION_DENIED;
2566 }
2567 else
2568 rc = VERR_PERMISSION_DENIED;
2569
2570 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2571 return rc;
2572}
2573
2574
2575/**
2576 * Handle a request for writing a core dump of the guest on the host.
2577 *
2578 * @returns VBox status code.
2579 *
2580 * @param pDevExt The device extension.
2581 * @param pInfo The output buffer.
2582 */
6d209b23 2583static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCWRITECOREDUMP pInfo)
056a1eb7
SF
2584{
2585 VMMDevReqWriteCoreDump *pReq = NULL;
2586 int rc;
2587 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
2588 RT_NOREF1(pDevExt);
2589
6d209b23 2590 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
056a1eb7
SF
2591 if (RT_SUCCESS(rc))
2592 {
6d209b23
SF
2593 pReq->fFlags = pInfo->u.In.fFlags;
2594 rc = VbglR0GRPerform(&pReq->header);
056a1eb7 2595 if (RT_FAILURE(rc))
6d209b23 2596 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglR0GRPerform failed, rc=%Rrc!\n", rc));
056a1eb7 2597
6d209b23 2598 VbglR0GRFree(&pReq->header);
056a1eb7
SF
2599 }
2600 else
2601 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2602 sizeof(*pReq), sizeof(*pReq), rc));
2603 return rc;
2604}
2605
2606
2607/**
2608 * Guest backdoor logging.
2609 *
2610 * @returns VBox status code.
2611 *
2612 * @param pDevExt The device extension.
2613 * @param pch The log message (need not be NULL terminated).
2614 * @param cbData Size of the buffer.
056a1eb7
SF
2615 * @param fUserSession Copy of VBOXGUESTSESSION::fUserSession for the
2616 * call. True normal user, false root user.
2617 */
6d209b23 2618static int vgdrvIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, bool fUserSession)
056a1eb7
SF
2619{
2620 if (pDevExt->fLoggingEnabled)
2621 RTLogBackdoorPrintf("%.*s", cbData, pch);
2622 else if (!fUserSession)
2623 LogRel(("%.*s", cbData, pch));
2624 else
2625 Log(("%.*s", cbData, pch));
056a1eb7
SF
2626 return VINF_SUCCESS;
2627}
2628
2629
2630/** @name Guest Capabilities, Mouse Status and Event Filter
2631 * @{
2632 */
2633
2634/**
2635 * Clears a bit usage tracker (init time).
2636 *
2637 * @param pTracker The tracker to clear.
2638 */
2639static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
2640{
2641 uint32_t iBit;
2642 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2643
2644 for (iBit = 0; iBit < 32; iBit++)
2645 pTracker->acPerBitUsage[iBit] = 0;
2646 pTracker->fMask = 0;
2647}
2648
2649
2650#ifdef VBOX_STRICT
2651/**
2652 * Checks that pTracker->fMask is correct and that the usage values are within
2653 * the valid range.
2654 *
2655 * @param pTracker The tracker.
2656 * @param cMax Max valid usage value.
2657 * @param pszWhat Identifies the tracker in assertions.
2658 */
2659static void vgdrvBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
2660{
2661 uint32_t fMask = 0;
2662 uint32_t iBit;
2663 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2664
2665 for (iBit = 0; iBit < 32; iBit++)
2666 if (pTracker->acPerBitUsage[iBit])
2667 {
2668 fMask |= RT_BIT_32(iBit);
2669 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2670 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2671 }
2672
2673 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
2674}
2675#endif
2676
2677
2678/**
2679 * Applies a change to the bit usage tracker.
2680 *
2681 *
2682 * @returns true if the mask changed, false if not.
2683 * @param pTracker The bit usage tracker.
2684 * @param fChanged The bits to change.
2685 * @param fPrevious The previous value of the bits.
2686 * @param cMax The max valid usage value for assertions.
2687 * @param pszWhat Identifies the tracker in assertions.
2688 */
2689static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
2690 uint32_t cMax, const char *pszWhat)
2691{
2692 bool fGlobalChange = false;
2693 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2694
2695 while (fChanged)
2696 {
2697 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
2698 uint32_t const fBitMask = RT_BIT_32(iBit);
2699 Assert(iBit < 32); Assert(fBitMask & fChanged);
2700
2701 if (fBitMask & fPrevious)
2702 {
2703 pTracker->acPerBitUsage[iBit] -= 1;
2704 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2705 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2706 if (pTracker->acPerBitUsage[iBit] == 0)
2707 {
2708 fGlobalChange = true;
2709 pTracker->fMask &= ~fBitMask;
2710 }
2711 }
2712 else
2713 {
2714 pTracker->acPerBitUsage[iBit] += 1;
2715 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
2716 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2717 if (pTracker->acPerBitUsage[iBit] == 1)
2718 {
2719 fGlobalChange = true;
2720 pTracker->fMask |= fBitMask;
2721 }
2722 }
2723
2724 fChanged &= ~fBitMask;
2725 }
2726
2727#ifdef VBOX_STRICT
2728 vgdrvBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
2729#endif
2730 NOREF(pszWhat); NOREF(cMax);
2731 return fGlobalChange;
2732}
2733
2734
2735/**
2736 * Init and termination worker for resetting the (host) event filter on the host
2737 *
2738 * @returns VBox status code.
2739 * @param pDevExt The device extension.
2740 * @param fFixedEvents Fixed events (init time).
2741 */
2742static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
2743{
2744 VMMDevCtlGuestFilterMask *pReq;
6d209b23 2745 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
056a1eb7
SF
2746 if (RT_SUCCESS(rc))
2747 {
2748 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
2749 pReq->u32OrMask = fFixedEvents;
6d209b23 2750 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
2751 if (RT_FAILURE(rc))
2752 LogRelFunc(("failed with rc=%Rrc\n", rc));
6d209b23 2753 VbglR0GRFree(&pReq->header);
056a1eb7
SF
2754 }
2755 RT_NOREF1(pDevExt);
2756 return rc;
2757}
2758
2759
2760/**
2761 * Changes the event filter mask for the given session.
2762 *
6d209b23
SF
2763 * This is called in response to VBGL_IOCTL_CHANGE_FILTER_MASK as well as to do
2764 * session cleanup.
056a1eb7
SF
2765 *
2766 * @returns VBox status code.
2767 * @param pDevExt The device extension.
2768 * @param pSession The session.
2769 * @param fOrMask The events to add.
2770 * @param fNotMask The events to remove.
2771 * @param fSessionTermination Set if we're called by the session cleanup code.
2772 * This tweaks the error handling so we perform
2773 * proper session cleanup even if the host
2774 * misbehaves.
2775 *
2776 * @remarks Takes the session spinlock.
2777 */
2778static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2779 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2780{
2781 VMMDevCtlGuestFilterMask *pReq;
2782 uint32_t fChanged;
2783 uint32_t fPrevious;
2784 int rc;
2785
2786 /*
2787 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2788 */
6d209b23 2789 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
056a1eb7
SF
2790 if (RT_SUCCESS(rc))
2791 { /* nothing */ }
2792 else if (!fSessionTermination)
2793 {
6d209b23 2794 LogRel(("vgdrvSetSessionFilterMask: VbglR0GRAlloc failure: %Rrc\n", rc));
056a1eb7
SF
2795 return rc;
2796 }
2797 else
2798 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2799
2800
2801 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2802
2803 /*
2804 * Apply the changes to the session mask.
2805 */
2806 fPrevious = pSession->fEventFilter;
2807 pSession->fEventFilter |= fOrMask;
2808 pSession->fEventFilter &= ~fNotMask;
2809
2810 /*
2811 * If anything actually changed, update the global usage counters.
2812 */
2813 fChanged = fPrevious ^ pSession->fEventFilter;
6d209b23
SF
2814 LogFlow(("vgdrvSetSessionEventFilter: Session->fEventFilter: %#x -> %#x (changed %#x)\n",
2815 fPrevious, pSession->fEventFilter, fChanged));
056a1eb7
SF
2816 if (fChanged)
2817 {
2818 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
2819 pDevExt->cSessions, "EventFilterTracker");
2820
2821 /*
2822 * If there are global changes, update the event filter on the host.
2823 */
2824 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
2825 {
2826 Assert(pReq || fSessionTermination);
2827 if (pReq)
2828 {
2829 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
2830 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
2831 rc = VINF_SUCCESS;
2832 else
2833 {
2834 pDevExt->fEventFilterHost = pReq->u32OrMask;
2835 pReq->u32NotMask = ~pReq->u32OrMask;
6d209b23 2836 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
2837 if (RT_FAILURE(rc))
2838 {
2839 /*
2840 * Failed, roll back (unless it's session termination time).
2841 */
2842 pDevExt->fEventFilterHost = UINT32_MAX;
2843 if (!fSessionTermination)
2844 {
2845 vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
2846 pDevExt->cSessions, "EventFilterTracker");
2847 pSession->fEventFilter = fPrevious;
2848 }
2849 }
2850 }
2851 }
2852 else
2853 rc = VINF_SUCCESS;
2854 }
2855 }
2856
2857 RTSpinlockRelease(pDevExt->SessionSpinlock);
2858 if (pReq)
6d209b23 2859 VbglR0GRFree(&pReq->header);
056a1eb7
SF
2860 return rc;
2861}
2862
2863
2864/**
6d209b23 2865 * Handle VBGL_IOCTL_CHANGE_FILTER_MASK.
056a1eb7
SF
2866 *
2867 * @returns VBox status code.
2868 *
2869 * @param pDevExt The device extension.
2870 * @param pSession The session.
2871 * @param pInfo The request.
2872 */
6d209b23 2873static int vgdrvIoCtl_ChangeFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEFILTERMASK pInfo)
056a1eb7 2874{
6d209b23 2875 LogFlow(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
056a1eb7 2876
6d209b23 2877 if ((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
056a1eb7 2878 {
6d209b23 2879 Log(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
056a1eb7
SF
2880 return VERR_INVALID_PARAMETER;
2881 }
2882
6d209b23 2883 return vgdrvSetSessionEventFilter(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask, false /*fSessionTermination*/);
056a1eb7
SF
2884}
2885
2886
2887/**
2888 * Init and termination worker for set mouse feature status to zero on the host.
2889 *
2890 * @returns VBox status code.
2891 * @param pDevExt The device extension.
2892 */
2893static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
2894{
2895 VMMDevReqMouseStatus *pReq;
6d209b23 2896 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
056a1eb7
SF
2897 if (RT_SUCCESS(rc))
2898 {
2899 pReq->mouseFeatures = 0;
2900 pReq->pointerXPos = 0;
2901 pReq->pointerYPos = 0;
6d209b23 2902 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
2903 if (RT_FAILURE(rc))
2904 LogRelFunc(("failed with rc=%Rrc\n", rc));
6d209b23 2905 VbglR0GRFree(&pReq->header);
056a1eb7
SF
2906 }
2907 RT_NOREF1(pDevExt);
2908 return rc;
2909}
2910
2911
2912/**
2913 * Changes the mouse status mask for the given session.
2914 *
2915 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
2916 * do session cleanup.
2917 *
2918 * @returns VBox status code.
2919 * @param pDevExt The device extension.
2920 * @param pSession The session.
2921 * @param fOrMask The status flags to add.
2922 * @param fNotMask The status flags to remove.
2923 * @param fSessionTermination Set if we're called by the session cleanup code.
2924 * This tweaks the error handling so we perform
2925 * proper session cleanup even if the host
2926 * misbehaves.
2927 *
2928 * @remarks Takes the session spinlock.
2929 */
2930static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2931 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2932{
2933 VMMDevReqMouseStatus *pReq;
2934 uint32_t fChanged;
2935 uint32_t fPrevious;
2936 int rc;
2937
2938 /*
2939 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2940 */
6d209b23 2941 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
056a1eb7
SF
2942 if (RT_SUCCESS(rc))
2943 { /* nothing */ }
2944 else if (!fSessionTermination)
2945 {
6d209b23 2946 LogRel(("vgdrvSetSessionMouseStatus: VbglR0GRAlloc failure: %Rrc\n", rc));
056a1eb7
SF
2947 return rc;
2948 }
2949 else
2950 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2951
2952
2953 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2954
2955 /*
2956 * Apply the changes to the session mask.
2957 */
2958 fPrevious = pSession->fMouseStatus;
2959 pSession->fMouseStatus |= fOrMask;
2960 pSession->fMouseStatus &= ~fNotMask;
2961
2962 /*
2963 * If anything actually changed, update the global usage counters.
2964 */
2965 fChanged = fPrevious ^ pSession->fMouseStatus;
2966 if (fChanged)
2967 {
2968 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
2969 pDevExt->cSessions, "MouseStatusTracker");
2970
2971 /*
2972 * If there are global changes, update the event filter on the host.
2973 */
2974 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
2975 {
2976 Assert(pReq || fSessionTermination);
2977 if (pReq)
2978 {
2979 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
2980 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
2981 rc = VINF_SUCCESS;
2982 else
2983 {
2984 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
2985 pReq->pointerXPos = 0;
2986 pReq->pointerYPos = 0;
6d209b23 2987 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
2988 if (RT_FAILURE(rc))
2989 {
2990 /*
2991 * Failed, roll back (unless it's session termination time).
2992 */
2993 pDevExt->fMouseStatusHost = UINT32_MAX;
2994 if (!fSessionTermination)
2995 {
2996 vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
2997 pDevExt->cSessions, "MouseStatusTracker");
2998 pSession->fMouseStatus = fPrevious;
2999 }
3000 }
3001 }
3002 }
3003 else
3004 rc = VINF_SUCCESS;
3005 }
3006 }
3007
3008 RTSpinlockRelease(pDevExt->SessionSpinlock);
3009 if (pReq)
6d209b23 3010 VbglR0GRFree(&pReq->header);
056a1eb7
SF
3011 return rc;
3012}
3013
3014
3015/**
3016 * Sets the mouse status features for this session and updates them globally.
3017 *
3018 * @returns VBox status code.
3019 *
3020 * @param pDevExt The device extention.
3021 * @param pSession The session.
3022 * @param fFeatures New bitmap of enabled features.
3023 */
3024static int vgdrvIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
3025{
6d209b23 3026 LogFlow(("VBGL_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
056a1eb7
SF
3027
3028 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
3029 return VERR_INVALID_PARAMETER;
3030
3031 return vgdrvSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
3032}
3033
3034
3035/**
3036 * Return the mask of VMM device events that this session is allowed to see (wrt
3037 * to "acquire" mode guest capabilities).
3038 *
3039 * The events associated with guest capabilities in "acquire" mode will be
3040 * restricted to sessions which has acquired the respective capabilities.
3041 * If someone else tries to wait for acquired events, they won't be woken up
3042 * when the event becomes pending. Should some other thread in the session
3043 * acquire the capability while the corresponding event is pending, the waiting
3044 * thread will woken up.
3045 *
3046 * @returns Mask of events valid for the given session.
3047 * @param pDevExt The device extension.
3048 * @param pSession The session.
3049 *
3050 * @remarks Needs only be called when dispatching events in the
3051 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
3052 */
3053static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
3054{
3055 uint32_t fAcquireModeGuestCaps;
3056 uint32_t fAcquiredGuestCaps;
3057 uint32_t fAllowedEvents;
3058
3059 /*
3060 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
3061 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
3062 */
3063 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
3064 if (fAcquireModeGuestCaps == 0)
3065 return VMMDEV_EVENT_VALID_EVENT_MASK;
3066 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
3067
3068 /*
3069 * Calculate which events to allow according to the cap config and caps
3070 * acquired by the session.
3071 */
3072 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
3073 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
3074 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
3075 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
3076
3077 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3078 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
3079 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3080
3081 return fAllowedEvents;
3082}
3083
3084
3085/**
3086 * Init and termination worker for set guest capabilities to zero on the host.
3087 *
3088 * @returns VBox status code.
3089 * @param pDevExt The device extension.
3090 */
3091static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
3092{
3093 VMMDevReqGuestCapabilities2 *pReq;
6d209b23 3094 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
056a1eb7
SF
3095 if (RT_SUCCESS(rc))
3096 {
3097 pReq->u32NotMask = UINT32_MAX;
3098 pReq->u32OrMask = 0;
6d209b23 3099 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
3100
3101 if (RT_FAILURE(rc))
3102 LogRelFunc(("failed with rc=%Rrc\n", rc));
6d209b23 3103 VbglR0GRFree(&pReq->header);
056a1eb7
SF
3104 }
3105 RT_NOREF1(pDevExt);
3106 return rc;
3107}
3108
3109
3110/**
3111 * Sets the guest capabilities to the host while holding the lock.
3112 *
3113 * This will ASSUME that we're the ones in charge of the mask, so
3114 * we'll simply clear all bits we don't set.
3115 *
3116 * @returns VBox status code.
3117 * @param pDevExt The device extension.
3118 * @param pReq The request.
3119 */
3120static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
3121{
3122 int rc;
3123
3124 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3125 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
3126 rc = VINF_SUCCESS;
3127 else
3128 {
3129 pDevExt->fGuestCapsHost = pReq->u32OrMask;
3130 pReq->u32NotMask = ~pReq->u32OrMask;
6d209b23 3131 rc = VbglR0GRPerform(&pReq->header);
056a1eb7
SF
3132 if (RT_FAILURE(rc))
3133 pDevExt->fGuestCapsHost = UINT32_MAX;
3134 }
3135
3136 return rc;
3137}
3138
3139
3140/**
3141 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
3142 * the given session.
3143 *
3144 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
3145 * to do session cleanup.
3146 *
3147 * @returns VBox status code.
3148 * @param pDevExt The device extension.
3149 * @param pSession The session.
3150 * @param fOrMask The capabilities to add .
3151 * @param fNotMask The capabilities to remove. Ignored in
3152 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
6d209b23 3153 * @param fFlags Confusing operation modifier.
056a1eb7
SF
3154 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
3155 * configure and acquire/release the capabilities.
3156 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3157 * means only configure capabilities in the
3158 * @a fOrMask capabilities for "acquire" mode.
3159 * @param fSessionTermination Set if we're called by the session cleanup code.
3160 * This tweaks the error handling so we perform
3161 * proper session cleanup even if the host
3162 * misbehaves.
3163 *
3164 * @remarks Takes both the session and event spinlocks.
3165 */
3166static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
6d209b23 3167 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags,
056a1eb7
SF
3168 bool fSessionTermination)
3169{
3170 uint32_t fCurrentOwnedCaps;
3171 uint32_t fSessionRemovedCaps;
3172 uint32_t fSessionAddedCaps;
3173 uint32_t fOtherConflictingCaps;
3174 VMMDevReqGuestCapabilities2 *pReq = NULL;
3175 int rc;
3176
3177
3178 /*
3179 * Validate and adjust input.
3180 */
3181 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3182 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3183 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3184 {
6d209b23
SF
3185 LogRel(("vgdrvAcquireSessionCapabilities: invalid fOrMask=%#x (pSession=%p fNotMask=%#x fFlags=%#x)\n",
3186 fOrMask, pSession, fNotMask, fFlags));
056a1eb7
SF
3187 return VERR_INVALID_PARAMETER;
3188 }
3189
6d209b23 3190 if ((fFlags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) != 0)
056a1eb7 3191 {
6d209b23
SF
3192 LogRel(("vgdrvAcquireSessionCapabilities: invalid fFlags=%#x (pSession=%p fOrMask=%#x fNotMask=%#x)\n",
3193 fFlags, pSession, fOrMask, fNotMask));
056a1eb7
SF
3194 return VERR_INVALID_PARAMETER;
3195 }
3196 Assert(!fOrMask || !fSessionTermination);
3197
3198 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3199 fNotMask &= ~fOrMask;
3200
3201 /*
3202 * Preallocate a update request if we're about to do more than just configure
3203 * the capability mode.
3204 */
6d209b23 3205 if (!(fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE))
056a1eb7 3206 {
6d209b23 3207 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
056a1eb7
SF
3208 if (RT_SUCCESS(rc))
3209 { /* do nothing */ }
3210 else if (!fSessionTermination)
3211 {
6d209b23
SF
3212 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: VbglR0GRAlloc failure: %Rrc\n",
3213 pSession, fOrMask, fNotMask, fFlags, rc));
056a1eb7
SF
3214 return rc;
3215 }
3216 else
3217 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3218 }
3219
3220 /*
3221 * Try switch the capabilities in the OR mask into "acquire" mode.
3222 *
3223 * Note! We currently ignore anyone which may already have "set" the capabilities
3224 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3225 */
3226 RTSpinlockAcquire(pDevExt->EventSpinlock);
3227
3228 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3229 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3230 else
3231 {
3232 RTSpinlockRelease(pDevExt->EventSpinlock);
3233
3234 if (pReq)
6d209b23 3235 VbglR0GRFree(&pReq->header);
056a1eb7 3236 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
6d209b23
SF
3237 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: calling caps acquire for set caps\n",
3238 pSession, fOrMask, fNotMask, fFlags));
056a1eb7
SF
3239 return VERR_INVALID_STATE;
3240 }
3241
3242 /*
3243 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3244 */
6d209b23 3245 if (fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
056a1eb7
SF
3246 {
3247 RTSpinlockRelease(pDevExt->EventSpinlock);
3248
3249 Assert(!pReq);
6d209b23
SF
3250 Log(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: configured acquire caps: 0x%x\n",
3251 pSession, fOrMask, fNotMask, fFlags));
056a1eb7
SF
3252 return VINF_SUCCESS;
3253 }
3254 Assert(pReq || fSessionTermination);
3255
3256 /*
3257 * Caller wants to acquire/release the capabilities too.
3258 *
3259 * Note! The mode change of the capabilities above won't be reverted on
3260 * failure, this is intentional.
3261 */
3262 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3263 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3264 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3265 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3266 fOtherConflictingCaps &= fSessionAddedCaps;
3267
3268 if (!fOtherConflictingCaps)
3269 {
3270 if (fSessionAddedCaps)
3271 {
3272 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3273 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3274 }
3275
3276 if (fSessionRemovedCaps)
3277 {
3278 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3279 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3280 }
3281
3282 /*
3283 * If something changes (which is very likely), tell the host.
3284 */
3285 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3286 {
3287 Assert(pReq || fSessionTermination);
3288 if (pReq)
3289 {
3290 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3291 if (RT_FAILURE(rc) && !fSessionTermination)
3292 {
3293 /* Failed, roll back. */
3294 if (fSessionAddedCaps)
3295 {
3296 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3297 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3298 }
3299 if (fSessionRemovedCaps)
3300 {
3301 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3302 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3303 }
3304
3305 RTSpinlockRelease(pDevExt->EventSpinlock);
3306 LogRel(("vgdrvAcquireSessionCapabilities: vgdrvUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
6d209b23 3307 VbglR0GRFree(&pReq->header);
056a1eb7
SF
3308 return rc;
3309 }
3310 }
3311 }
3312 }
3313 else
3314 {
3315 RTSpinlockRelease(pDevExt->EventSpinlock);
3316
3317 Log(("vgdrvAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
6d209b23 3318 VbglR0GRFree(&pReq->header);
056a1eb7
SF
3319 return VERR_RESOURCE_BUSY;
3320 }
3321
3322 RTSpinlockRelease(pDevExt->EventSpinlock);
3323 if (pReq)
6d209b23 3324 VbglR0GRFree(&pReq->header);
056a1eb7
SF
3325
3326 /*
3327 * If we added a capability, check if that means some other thread in our
3328 * session should be unblocked because there are events pending.
3329 *
3330 * HACK ALERT! When the seamless support capability is added we generate a
3331 * seamless change event so that the ring-3 client can sync with
3332 * the seamless state. Although this introduces a spurious
3333 * wakeups of the ring-3 client, it solves the problem of client
3334 * state inconsistency in multiuser environment (on Windows).
3335 */
3336 if (fSessionAddedCaps)
3337 {
3338 uint32_t fGenFakeEvents = 0;
3339 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3340 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3341
3342 RTSpinlockAcquire(pDevExt->EventSpinlock);
3343 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3344 vgdrvDispatchEventsLocked(pDevExt, fGenFakeEvents);
3345 RTSpinlockRelease(pDevExt->EventSpinlock);
3346
3347#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3348 VGDrvCommonWaitDoWakeUps(pDevExt);
3349#endif
3350 }
3351
3352 return VINF_SUCCESS;
3353}
3354
3355
3356/**
6d209b23 3357 * Handle VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES.
056a1eb7
SF
3358 *
3359 * @returns VBox status code.
3360 *
3361 * @param pDevExt The device extension.
3362 * @param pSession The session.
3363 * @param pAcquire The request.
3364 */
6d209b23 3365static int vgdrvIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCACQUIREGUESTCAPS pAcquire)
056a1eb7
SF
3366{
3367 int rc;
6d209b23
SF
3368 LogFlow(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES: or=%#x not=%#x flags=%#x\n",
3369 pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask, pAcquire->u.In.fFlags));
056a1eb7 3370
6d209b23
SF
3371 rc = vgdrvAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask,
3372 pAcquire->u.In.fFlags, false /*fSessionTermination*/);
056a1eb7 3373 if (RT_FAILURE(rc))
6d209b23
SF
3374 LogRel(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES failed rc=%Rrc\n", rc));
3375 return rc;
056a1eb7
SF
3376}
3377
3378
3379/**
3380 * Sets the guest capabilities for a session.
3381 *
3382 * @returns VBox status code.
3383 * @param pDevExt The device extension.
3384 * @param pSession The session.
3385 * @param fOrMask The capabilities to add.
3386 * @param fNotMask The capabilities to remove.
6d209b23
SF
3387 * @param pfSessionCaps Where to return the guest capabilities reported
3388 * for this session. Optional.
3389 * @param pfGlobalCaps Where to return the guest capabilities reported
3390 * for all the sessions. Optional.
3391 *
056a1eb7
SF
3392 * @param fSessionTermination Set if we're called by the session cleanup code.
3393 * This tweaks the error handling so we perform
3394 * proper session cleanup even if the host
3395 * misbehaves.
3396 *
3397 * @remarks Takes the session spinlock.
3398 */
3399static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
6d209b23
SF
3400 uint32_t fOrMask, uint32_t fNotMask, uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps,
3401 bool fSessionTermination)
056a1eb7
SF
3402{
3403 /*
3404 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3405 */
3406 VMMDevReqGuestCapabilities2 *pReq;
6d209b23 3407 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
056a1eb7
SF
3408 if (RT_SUCCESS(rc))
3409 { /* nothing */ }
3410 else if (!fSessionTermination)
3411 {
6d209b23
SF
3412 if (pfSessionCaps)
3413 *pfSessionCaps = UINT32_MAX;
3414 if (pfGlobalCaps)
3415 *pfGlobalCaps = UINT32_MAX;
3416 LogRel(("vgdrvSetSessionCapabilities: VbglR0GRAlloc failure: %Rrc\n", rc));
056a1eb7
SF
3417 return rc;
3418 }
3419 else
3420 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3421
3422
3423 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3424
3425#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3426 /*
3427 * Capabilities in "acquire" mode cannot be set via this API.
3428 * (Acquire mode is only used on windows at the time of writing.)
3429 */
3430 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3431#endif
3432 {
3433 /*
3434 * Apply the changes to the session mask.
3435 */
3436 uint32_t fChanged;
3437 uint32_t fPrevious = pSession->fCapabilities;
3438 pSession->fCapabilities |= fOrMask;
3439 pSession->fCapabilities &= ~fNotMask;
3440
3441 /*
3442 * If anything actually changed, update the global usage counters.
3443 */
3444 fChanged = fPrevious ^ pSession->fCapabilities;
3445 if (fChanged)
3446 {
3447 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3448 pDevExt->cSessions, "SetGuestCapsTracker");
3449
3450 /*
3451 * If there are global changes, update the capabilities on the host.
3452 */
3453 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3454 {
3455 Assert(pReq || fSessionTermination);
3456 if (pReq)
3457 {
3458 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3459
3460 /* On failure, roll back (unless it's session termination time). */
3461 if (RT_FAILURE(rc) && !fSessionTermination)
3462 {
3463 vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3464 pDevExt->cSessions, "SetGuestCapsTracker");
3465 pSession->fCapabilities = fPrevious;
3466 }
3467 }
3468 }
3469 }
3470 }
3471#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3472 else
3473 rc = VERR_RESOURCE_BUSY;
3474#endif
3475
6d209b23
SF
3476 if (pfSessionCaps)
3477 *pfSessionCaps = pSession->fCapabilities;
3478 if (pfGlobalCaps)
3479 *pfGlobalCaps = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3480
056a1eb7
SF
3481 RTSpinlockRelease(pDevExt->SessionSpinlock);
3482 if (pReq)
6d209b23 3483 VbglR0GRFree(&pReq->header);
056a1eb7
SF
3484 return rc;
3485}
3486
3487
3488/**
6d209b23 3489 * Handle VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES.
056a1eb7
SF
3490 *
3491 * @returns VBox status code.
3492 *
3493 * @param pDevExt The device extension.
3494 * @param pSession The session.
3495 * @param pInfo The request.
3496 */
6d209b23 3497static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCSETGUESTCAPS pInfo)
056a1eb7
SF
3498{
3499 int rc;
6d209b23 3500 LogFlow(("VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
056a1eb7 3501
6d209b23
SF
3502 if (!((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3503 rc = vgdrvSetSessionCapabilities(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask,
3504 &pInfo->u.Out.fSessionCaps, &pInfo->u.Out.fGlobalCaps, false /*fSessionTermination*/);
056a1eb7
SF
3505 else
3506 rc = VERR_INVALID_PARAMETER;
3507
3508 return rc;
3509}
3510
3511/** @} */
3512
3513
3514/**
3515 * Common IOCtl for user to kernel and kernel to kernel communication.
3516 *
3517 * This function only does the basic validation and then invokes
3518 * worker functions that takes care of each specific function.
3519 *
3520 * @returns VBox status code.
3521 *
3522 * @param iFunction The requested function.
3523 * @param pDevExt The device extension.
3524 * @param pSession The client session.
6d209b23
SF
3525 * @param pReqHdr Pointer to the request. This always starts with
3526 * a request common header.
3527 * @param cbReq The max size of the request buffer.
056a1eb7 3528 */
6d209b23 3529int VGDrvCommonIoCtl(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLREQHDR pReqHdr, size_t cbReq)
056a1eb7 3530{
6d209b23 3531 uintptr_t const iFunctionStripped = VBGL_IOCTL_CODE_STRIPPED(iFunction);
056a1eb7 3532 int rc;
056a1eb7 3533
6d209b23
SF
3534 LogFlow(("VGDrvCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pReqHdr=%p cbReq=%zu\n",
3535 iFunction, pDevExt, pSession, pReqHdr, cbReq));
056a1eb7
SF
3536
3537 /*
3538 * Define some helper macros to simplify validation.
3539 */
6d209b23 3540#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
056a1eb7 3541 do { \
6d209b23
SF
3542 if (RT_LIKELY( pReqHdr->cbIn == (cbInExpect) \
3543 && ( pReqHdr->cbOut == (cbOutExpect) \
3544 || ((cbInExpect) == (cbOutExpect) && pReqHdr->cbOut == 0) ) )) \
3545 { /* likely */ } \
3546 else \
056a1eb7 3547 { \
6d209b23
SF
3548 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
3549 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
3550 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
056a1eb7
SF
3551 } \
3552 } while (0)
6d209b23
SF
3553
3554#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
3555
3556#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
056a1eb7 3557 do { \
6d209b23
SF
3558 if (RT_LIKELY(pReqHdr->cbIn == (cbInExpect))) \
3559 { /* likely */ } \
3560 else \
056a1eb7 3561 { \
6d209b23
SF
3562 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
3563 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
3564 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
056a1eb7 3565 } \
6d209b23
SF
3566 } while (0)
3567
3568#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
3569 do { \
3570 if (RT_LIKELY( pReqHdr->cbOut == (cbOutExpect) \
3571 || (pReqHdr->cbOut == 0 && pReqHdr->cbIn == (cbOutExpect)))) \
3572 { /* likely */ } \
3573 else \
3574 { \
3575 Log(( #Name ": Invalid input/output sizes. cbOut=%ld (%ld) expected %ld.\n", \
3576 (long)pReqHdr->cbOut, (long)pReqHdr->cbIn, (long)(cbOutExpect))); \
3577 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3578 } \
3579 } while (0)
3580
3581#define REQ_CHECK_EXPR(Name, expr) \
3582 do { \
3583 if (RT_LIKELY(!!(expr))) \
3584 { /* likely */ } \
3585 else \
056a1eb7 3586 { \
6d209b23
SF
3587 Log(( #Name ": %s\n", #expr)); \
3588 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
056a1eb7
SF
3589 } \
3590 } while (0)
6d209b23
SF
3591
3592#define REQ_CHECK_EXPR_FMT(expr, fmt) \
056a1eb7 3593 do { \
6d209b23
SF
3594 if (RT_LIKELY(!!(expr))) \
3595 { /* likely */ } \
3596 else \
056a1eb7 3597 { \
6d209b23
SF
3598 Log( fmt ); \
3599 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
056a1eb7 3600 } \
6d209b23
SF
3601 } while (0)
3602
3603#define REQ_CHECK_RING0(mnemonic) \
3604 do { \
3605 if (pSession->R0Process != NIL_RTR0PROCESS) \
056a1eb7 3606 { \
6d209b23
SF
3607 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
3608 pSession->Process, (uintptr_t)pSession->R0Process)); \
3609 return pReqHdr->rc = VERR_PERMISSION_DENIED; \
056a1eb7
SF
3610 } \
3611 } while (0)
3612
3613
3614 /*
6d209b23 3615 * Validate the request.
056a1eb7 3616 */
6d209b23
SF
3617 if (RT_LIKELY(cbReq >= sizeof(*pReqHdr)))
3618 { /* likely */ }
3619 else
056a1eb7 3620 {
6d209b23
SF
3621 Log(("VGDrvCommonIoCtl: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
3622 return VERR_INVALID_PARAMETER;
056a1eb7 3623 }
6d209b23
SF
3624
3625 if (pReqHdr->cbOut == 0)
3626 pReqHdr->cbOut = pReqHdr->cbIn;
3627
3628 if (RT_LIKELY( pReqHdr->uVersion == VBGLREQHDR_VERSION
3629 && pReqHdr->cbIn >= sizeof(*pReqHdr)
3630 && pReqHdr->cbIn <= cbReq
3631 && pReqHdr->cbOut >= sizeof(*pReqHdr)
3632 && pReqHdr->cbOut <= cbReq))
3633 { /* likely */ }
3634 else
056a1eb7 3635 {
6d209b23
SF
3636 Log(("VGDrvCommonIoCtl: Bad ioctl request header; cbIn=%#lx cbOut=%#lx version=%#lx\n",
3637 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->uVersion));
3638 return VERR_INVALID_PARAMETER;
056a1eb7 3639 }
6d209b23
SF
3640
3641 if (RT_LIKELY(RT_VALID_PTR(pSession)))
3642 { /* likely */ }
3643 else
056a1eb7 3644 {
6d209b23
SF
3645 Log(("VGDrvCommonIoCtl: Invalid pSession value %p (ioctl=%#x)\n", pSession, iFunction));
3646 return VERR_INVALID_PARAMETER;
056a1eb7 3647 }
6d209b23
SF
3648
3649
3650 /*
3651 * Deal with variably sized requests first.
3652 */
3653 rc = VINF_SUCCESS;
3654 if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST(0))
3655 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST_BIG) )
056a1eb7 3656 {
6d209b23
SF
3657 REQ_CHECK_EXPR(VBGL_IOCTL_VMMDEV_REQUEST, pReqHdr->uType != VBGLREQHDR_TYPE_DEFAULT);
3658 REQ_CHECK_EXPR_FMT(pReqHdr->cbIn == pReqHdr->cbOut,
3659 ("VBGL_IOCTL_VMMDEV_REQUEST: cbIn=%ld != cbOut=%ld\n", (long)pReqHdr->cbIn, (long)pReqHdr->cbOut));
3660 pReqHdr->rc = vgdrvIoCtl_VMMDevRequest(pDevExt, pSession, (VMMDevRequestHeader *)pReqHdr, cbReq);
056a1eb7 3661 }
6d209b23 3662 else if (RT_LIKELY(pReqHdr->uType == VBGLREQHDR_TYPE_DEFAULT))
056a1eb7 3663 {
6d209b23
SF
3664 if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_LOG(0)))
3665 {
3666 REQ_CHECK_SIZE_OUT(VBGL_IOCTL_LOG, VBGL_IOCTL_LOG_SIZE_OUT);
3667 pReqHdr->rc = vgdrvIoCtl_Log(pDevExt, &((PVBGLIOCLOG)pReqHdr)->u.In.szMsg[0], pReqHdr->cbIn - sizeof(VBGLREQHDR),
3668 pSession->fUserSession);
3669 }
3670#ifdef VBOX_WITH_HGCM
3671 else if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL(0))
3672# if ARCH_BITS == 64
3673 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0))
056a1eb7 3674# endif
6d209b23
SF
3675 )
3676 {
3677 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
3678 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
3679 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
3680 iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0)),
3681 false /*fUserData*/, cbReq);
3682 }
3683 else if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA(0)))
3684 {
3685 REQ_CHECK_RING0("VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA");
3686 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
3687 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
3688 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
3689 ARCH_BITS == 32, true /*fUserData*/, cbReq);
3690 }
056a1eb7 3691#endif /* VBOX_WITH_HGCM */
6d209b23 3692 else
056a1eb7 3693 {
6d209b23
SF
3694 switch (iFunction)
3695 {
3696 /*
3697 * Ring-0 only:
3698 */
3699 case VBGL_IOCTL_IDC_CONNECT:
3700 REQ_CHECK_RING0("VBGL_IOCL_IDC_CONNECT");
3701 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_CONNECT);
3702 pReqHdr->rc = vgdrvIoCtl_IdcConnect(pDevExt, pSession, (PVBGLIOCIDCCONNECT)pReqHdr);
3703 break;
056a1eb7 3704
6d209b23
SF
3705 case VBGL_IOCTL_IDC_DISCONNECT:
3706 REQ_CHECK_RING0("VBGL_IOCTL_IDC_DISCONNECT");
3707 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_DISCONNECT);
3708 pReqHdr->rc = vgdrvIoCtl_IdcDisconnect(pDevExt, pSession, (PVBGLIOCIDCDISCONNECT)pReqHdr);
3709 break;
056a1eb7 3710
6d209b23
SF
3711 case VBGL_IOCTL_GET_VMMDEV_IO_INFO:
3712 REQ_CHECK_RING0("GET_VMMDEV_IO_INFO");
3713 REQ_CHECK_SIZES(VBGL_IOCTL_GET_VMMDEV_IO_INFO);
3714 pReqHdr->rc = vgdrvIoCtl_GetVMMDevIoInfo(pDevExt, (PVBGLIOCGETVMMDEVIOINFO)pReqHdr);
3715 break;
056a1eb7 3716
6d209b23
SF
3717 case VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
3718 REQ_CHECK_RING0("SET_MOUSE_NOTIFY_CALLBACK");
3719 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK);
3720 pReqHdr->rc = vgdrvIoCtl_SetMouseNotifyCallback(pDevExt, (PVBGLIOCSETMOUSENOTIFYCALLBACK)pReqHdr);
3721 break;
056a1eb7 3722
6d209b23
SF
3723 /*
3724 * Ring-3 only:
3725 */
3726 case VBGL_IOCTL_DRIVER_VERSION_INFO:
3727 REQ_CHECK_SIZES(VBGL_IOCTL_DRIVER_VERSION_INFO);
3728 pReqHdr->rc = vgdrvIoCtl_DriverVersionInfo(pDevExt, pSession, (PVBGLIOCDRIVERVERSIONINFO)pReqHdr);
3729 break;
3730
3731 /*
3732 * Both ring-3 and ring-0:
3733 */
3734 case VBGL_IOCTL_WAIT_FOR_EVENTS:
3735 REQ_CHECK_SIZES(VBGL_IOCTL_WAIT_FOR_EVENTS);
3736 pReqHdr->rc = vgdrvIoCtl_WaitForEvents(pDevExt, pSession, (VBGLIOCWAITFOREVENTS *)pReqHdr,
3737 pSession->R0Process != NIL_RTR0PROCESS);
3738 break;
3739
3740 case VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
3741 REQ_CHECK_SIZES(VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS);
3742 pReqHdr->rc = vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
3743 break;
3744
3745 case VBGL_IOCTL_CHANGE_FILTER_MASK:
3746 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_FILTER_MASK);
3747 pReqHdr->rc = vgdrvIoCtl_ChangeFilterMask(pDevExt, pSession, (PVBGLIOCCHANGEFILTERMASK)pReqHdr);
3748 break;
056a1eb7
SF
3749
3750#ifdef VBOX_WITH_HGCM
6d209b23
SF
3751 case VBGL_IOCTL_HGCM_CONNECT:
3752 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_CONNECT);
3753 pReqHdr->rc = vgdrvIoCtl_HGCMConnect(pDevExt, pSession, (PVBGLIOCHGCMCONNECT)pReqHdr);
3754 break;
056a1eb7 3755
6d209b23
SF
3756 case VBGL_IOCTL_HGCM_DISCONNECT:
3757 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_DISCONNECT);
3758 pReqHdr->rc = vgdrvIoCtl_HGCMDisconnect(pDevExt, pSession, (PVBGLIOCHGCMDISCONNECT)pReqHdr);
3759 break;
3760#endif
056a1eb7 3761
6d209b23
SF
3762 case VBGL_IOCTL_CHECK_BALLOON:
3763 REQ_CHECK_SIZES(VBGL_IOCTL_CHECK_BALLOON);
3764 pReqHdr->rc = vgdrvIoCtl_CheckMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHECKBALLOON)pReqHdr);
3765 break;
056a1eb7 3766
6d209b23
SF
3767 case VBGL_IOCTL_CHANGE_BALLOON:
3768 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_BALLOON);
3769 pReqHdr->rc = vgdrvIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHANGEBALLOON)pReqHdr);
3770 break;
056a1eb7 3771
6d209b23
SF
3772 case VBGL_IOCTL_WRITE_CORE_DUMP:
3773 REQ_CHECK_SIZES(VBGL_IOCTL_WRITE_CORE_DUMP);
3774 pReqHdr->rc = vgdrvIoCtl_WriteCoreDump(pDevExt, (PVBGLIOCWRITECOREDUMP)pReqHdr);
3775 break;
056a1eb7 3776
6d209b23
SF
3777 case VBGL_IOCTL_SET_MOUSE_STATUS:
3778 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_STATUS);
3779 pReqHdr->rc = vgdrvIoCtl_SetMouseStatus(pDevExt, pSession, ((PVBGLIOCSETMOUSESTATUS)pReqHdr)->u.In.fStatus);
3780 break;
056a1eb7 3781
6d209b23
SF
3782 case VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
3783 REQ_CHECK_SIZES(VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES);
3784 pReqHdr->rc = vgdrvIoCtl_GuestCapsAcquire(pDevExt, pSession, (PVBGLIOCACQUIREGUESTCAPS)pReqHdr);
3785 break;
056a1eb7 3786
6d209b23
SF
3787 case VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES:
3788 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES);
3789 pReqHdr->rc = vgdrvIoCtl_SetCapabilities(pDevExt, pSession, (PVBGLIOCSETGUESTCAPS)pReqHdr);
3790 break;
056a1eb7 3791
6d209b23
SF
3792#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
3793 case VBGL_IOCTL_DPC_LATENCY_CHECKER:
3794 REQ_CHECK_SIZES(VBGL_IOCTL_DPC_LATENCY_CHECKER);
3795 pReqHdr->rc = VGDrvNtIOCtl_DpcLatencyChecker();
3796 break;
3797#endif
056a1eb7 3798
6d209b23
SF
3799 default:
3800 {
3801 LogRel(("VGDrvCommonIoCtl: Unknown request iFunction=%#x (stripped %#x) cbReq=%#x\n",
3802 iFunction, iFunctionStripped, cbReq));
3803 pReqHdr->rc = rc = VERR_NOT_SUPPORTED;
3804 break;
3805 }
056a1eb7
SF
3806 }
3807 }
3808 }
6d209b23
SF
3809 else
3810 {
3811 Log(("VGDrvCommonIoCtl: uType=%#x, expected default (ioctl=%#x)\n", pReqHdr->uType, iFunction));
3812 return VERR_INVALID_PARAMETER;
3813 }
056a1eb7 3814
6d209b23 3815 LogFlow(("VGDrvCommonIoCtl: returns %Rrc (req: rc=%Rrc cbOut=%#x)\n", rc, pReqHdr->rc, pReqHdr->cbOut));
056a1eb7
SF
3816 return rc;
3817}
3818
3819
3820/**
3821 * Used by VGDrvCommonISR as well as the acquire guest capability code.
3822 *
3823 * @returns VINF_SUCCESS on success. On failure, ORed together
3824 * RTSemEventMultiSignal errors (completes processing despite errors).
3825 * @param pDevExt The VBoxGuest device extension.
3826 * @param fEvents The events to dispatch.
3827 */
3828static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
3829{
3830 PVBOXGUESTWAIT pWait;
3831 PVBOXGUESTWAIT pSafe;
3832 int rc = VINF_SUCCESS;
3833
3834 fEvents |= pDevExt->f32PendingEvents;
3835
3836 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3837 {
3838 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
3839 if ( fHandledEvents != 0
3840 && !pWait->fResEvents)
3841 {
3842 /* Does this one wait on any of the events we're dispatching? We do a quick
3843 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
3844 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
3845 fHandledEvents &= vgdrvGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
3846 if (fHandledEvents)
3847 {
3848 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3849 fEvents &= ~pWait->fResEvents;
3850 RTListNodeRemove(&pWait->ListNode);
3851#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3852 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3853#else
3854 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3855 rc |= RTSemEventMultiSignal(pWait->Event);
3856#endif
3857 if (!fEvents)
3858 break;
3859 }
3860 }
3861 }
3862
3863 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3864 return rc;
3865}
3866
3867
3868/**
3869 * Simply checks whether the IRQ is ours or not, does not do any interrupt
3870 * procesing.
3871 *
3872 * @returns true if it was our interrupt, false if it wasn't.
3873 * @param pDevExt The VBoxGuest device extension.
3874 */
3875bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt)
3876{
3877 RTSpinlockAcquire(pDevExt->EventSpinlock);
3878 bool const fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3879 RTSpinlockRelease(pDevExt->EventSpinlock);
3880
3881 return fOurIrq;
3882}
3883
3884
3885/**
3886 * Common interrupt service routine.
3887 *
3888 * This deals with events and with waking up thread waiting for those events.
3889 *
3890 * @returns true if it was our interrupt, false if it wasn't.
3891 * @param pDevExt The VBoxGuest device extension.
3892 */
3893bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt)
3894{
3895 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
3896 bool fMousePositionChanged = false;
3897 int rc = 0;
3898 bool fOurIrq;
3899
3900 /*
3901 * Make sure we've initialized the device extension.
3902 */
3903 if (RT_UNLIKELY(!pReq))
3904 return false;
3905
3906 /*
3907 * Enter the spinlock and check if it's our IRQ or not.
3908 */
3909 RTSpinlockAcquire(pDevExt->EventSpinlock);
3910 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3911 if (fOurIrq)
3912 {
3913 /*
3914 * Acknowlegde events.
6d209b23 3915 * We don't use VbglR0GRPerform here as it may take another spinlocks.
056a1eb7
SF
3916 */
3917 pReq->header.rc = VERR_INTERNAL_ERROR;
3918 pReq->events = 0;
3919 ASMCompilerBarrier();
3920 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3921 ASMCompilerBarrier(); /* paranoia */
3922 if (RT_SUCCESS(pReq->header.rc))
3923 {
3924 uint32_t fEvents = pReq->events;
3925
3926 Log3(("VGDrvCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
3927
3928 /*
3929 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3930 */
3931 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3932 {
3933 fMousePositionChanged = true;
3934 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
6d209b23
SF
3935#if !defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
3936 if (pDevExt->pfnMouseNotifyCallback)
3937 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
056a1eb7
SF
3938#endif
3939 }
3940
3941#ifdef VBOX_WITH_HGCM
3942 /*
3943 * The HGCM event/list is kind of different in that we evaluate all entries.
3944 */
3945 if (fEvents & VMMDEV_EVENT_HGCM)
3946 {
3947 PVBOXGUESTWAIT pWait;
3948 PVBOXGUESTWAIT pSafe;
3949 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3950 {
3951 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3952 {
3953 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3954 RTListNodeRemove(&pWait->ListNode);
3955# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3956 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3957# else
3958 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3959 rc |= RTSemEventMultiSignal(pWait->Event);
3960# endif
3961 }
3962 }
3963 fEvents &= ~VMMDEV_EVENT_HGCM;
3964 }
3965#endif
3966
3967 /*
3968 * Normal FIFO waiter evaluation.
3969 */
3970 rc |= vgdrvDispatchEventsLocked(pDevExt, fEvents);
3971 }
3972 else /* something is serious wrong... */
3973 Log(("VGDrvCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3974 pReq->header.rc, pReq->events));
3975 }
3976 else
3977 Log3(("VGDrvCommonISR: not ours\n"));
3978
3979 RTSpinlockRelease(pDevExt->EventSpinlock);
3980
3981 /*
3982 * Execute the mouse notification callback here if it cannot be executed while
3983 * holding the interrupt safe spinlock, see @bugref{8639}.
3984 */
6d209b23 3985#if defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT) && !defined(RT_OS_WINDOWS) /* (Windows does this in the Dpc callback) */
056a1eb7 3986 if ( fMousePositionChanged
6d209b23
SF
3987 && pDevExt->pfnMouseNotifyCallback)
3988 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
056a1eb7
SF
3989#endif
3990
3991#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
3992 /*
3993 * Do wake-ups.
3994 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3995 * care of it. Same on darwin, doing it in the work loop callback.
3996 */
3997 VGDrvCommonWaitDoWakeUps(pDevExt);
3998#endif
3999
4000 /*
4001 * Work the poll and async notification queues on OSes that implements that.
4002 * (Do this outside the spinlock to prevent some recursive spinlocking.)
4003 */
4004 if (fMousePositionChanged)
4005 {
4006 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
4007 VGDrvNativeISRMousePollEvent(pDevExt);
4008 }
4009
4010 Assert(rc == 0);
4011 NOREF(rc);
4012 return fOurIrq;
4013}
4014