1 /* $Id: HGCMInternal.cpp $ */
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
7 * Copyright (C) 2006-2016 Oracle Corporation
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
27 /* Entire file is ifdef'ed with VBGL_VBOXGUEST */
31 /*********************************************************************************************************************************
33 *********************************************************************************************************************************/
34 #define LOG_GROUP LOG_GROUP_HGCM
36 #include "VBGLInternal.h"
37 #include <iprt/alloca.h>
39 #include <iprt/assert.h>
41 #include <iprt/memobj.h>
42 #include <iprt/string.h>
43 #include <iprt/thread.h>
44 #include <iprt/time.h>
47 /*********************************************************************************************************************************
48 * Defined Constants And Macros *
49 *********************************************************************************************************************************/
50 /** The max parameter buffer size for a user request. */
51 #define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
52 /** The max parameter buffer size for a kernel request. */
53 #define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
54 #if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
55 /** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
57 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
58 # define USE_BOUNCE_BUFFERS
62 /*********************************************************************************************************************************
63 * Structures and Typedefs *
64 *********************************************************************************************************************************/
66 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
75 #ifdef USE_BOUNCE_BUFFERS
83 /* These functions can be only used by VBoxGuest. */
85 DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo
*pConnectInfo
,
86 PFNVBGLHGCMCALLBACK pfnAsyncCallback
, void *pvAsyncData
, uint32_t u32AsyncData
)
88 VMMDevHGCMConnect
*pHGCMConnect
;
91 if (!pConnectInfo
|| !pfnAsyncCallback
)
92 return VERR_INVALID_PARAMETER
;
96 /* Allocate request */
97 rc
= VbglGRAlloc ((VMMDevRequestHeader
**)&pHGCMConnect
, sizeof (VMMDevHGCMConnect
), VMMDevReq_HGCMConnect
);
101 /* Initialize request memory */
102 pHGCMConnect
->header
.fu32Flags
= 0;
104 memcpy (&pHGCMConnect
->loc
, &pConnectInfo
->Loc
, sizeof (HGCMServiceLocation
));
105 pHGCMConnect
->u32ClientID
= 0;
108 rc
= VbglGRPerform (&pHGCMConnect
->header
.header
);
112 /* Check if host decides to process the request asynchronously. */
113 if (rc
== VINF_HGCM_ASYNC_EXECUTE
)
115 /* Wait for request completion interrupt notification from host */
116 pfnAsyncCallback (&pHGCMConnect
->header
, pvAsyncData
, u32AsyncData
);
119 pConnectInfo
->result
= pHGCMConnect
->header
.result
;
121 if (RT_SUCCESS (pConnectInfo
->result
))
122 pConnectInfo
->u32ClientID
= pHGCMConnect
->u32ClientID
;
125 VbglGRFree (&pHGCMConnect
->header
.header
);
132 DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo
*pDisconnectInfo
,
133 PFNVBGLHGCMCALLBACK pfnAsyncCallback
, void *pvAsyncData
, uint32_t u32AsyncData
)
135 VMMDevHGCMDisconnect
*pHGCMDisconnect
;
138 if (!pDisconnectInfo
|| !pfnAsyncCallback
)
139 return VERR_INVALID_PARAMETER
;
141 pHGCMDisconnect
= NULL
;
143 /* Allocate request */
144 rc
= VbglGRAlloc ((VMMDevRequestHeader
**)&pHGCMDisconnect
, sizeof (VMMDevHGCMDisconnect
), VMMDevReq_HGCMDisconnect
);
148 /* Initialize request memory */
149 pHGCMDisconnect
->header
.fu32Flags
= 0;
151 pHGCMDisconnect
->u32ClientID
= pDisconnectInfo
->u32ClientID
;
154 rc
= VbglGRPerform (&pHGCMDisconnect
->header
.header
);
158 /* Check if host decides to process the request asynchronously. */
159 if (rc
== VINF_HGCM_ASYNC_EXECUTE
)
161 /* Wait for request completion interrupt notification from host */
162 pfnAsyncCallback (&pHGCMDisconnect
->header
, pvAsyncData
, u32AsyncData
);
165 pDisconnectInfo
->result
= pHGCMDisconnect
->header
.result
;
168 VbglGRFree (&pHGCMDisconnect
->header
.header
);
176 * Preprocesses the HGCM call, validating and locking/buffering parameters.
178 * @returns VBox status code.
180 * @param pCallInfo The call info.
181 * @param cbCallInfo The size of the call info structure.
182 * @param fIsUser Is it a user request or kernel request.
183 * @param pcbExtra Where to return the extra request space needed for
184 * physical page lists.
186 static int vbglR0HGCMInternalPreprocessCall(VBoxGuestHGCMCallInfo
const *pCallInfo
, uint32_t cbCallInfo
,
187 bool fIsUser
, struct VbglR0ParmInfo
*pParmInfo
, size_t *pcbExtra
)
189 HGCMFunctionParameter
const *pSrcParm
= VBOXGUEST_HGCM_CALL_PARMS(pCallInfo
);
190 uint32_t cParms
= pCallInfo
->cParms
;
195 * Lock down the any linear buffers so we can get their addresses
196 * and figure out how much extra storage we need for page lists.
198 * Note! With kernel mode users we can be assertive. For user mode users
199 * we should just (debug) log it and fail without any fanfare.
202 pParmInfo
->cLockBufs
= 0;
203 for (iParm
= 0; iParm
< cParms
; iParm
++, pSrcParm
++)
205 switch (pSrcParm
->type
)
207 case VMMDevHGCMParmType_32bit
:
208 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm
, pSrcParm
->u
.value32
));
211 case VMMDevHGCMParmType_64bit
:
212 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm
, pSrcParm
->u
.value64
));
215 case VMMDevHGCMParmType_PageList
:
217 return VERR_INVALID_PARAMETER
;
218 cb
= pSrcParm
->u
.PageList
.size
;
221 uint32_t off
= pSrcParm
->u
.PageList
.offset
;
222 HGCMPageListInfo
*pPgLst
;
226 AssertMsgReturn(cb
<= VBGLR0_MAX_HGCM_KERNEL_PARM
, ("%#x > %#x\n", cb
, VBGLR0_MAX_HGCM_KERNEL_PARM
),
228 AssertMsgReturn( off
>= pCallInfo
->cParms
* sizeof(HGCMFunctionParameter
)
229 && off
<= cbCallInfo
- sizeof(HGCMPageListInfo
),
230 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off
, pCallInfo
->cParms
, cbCallInfo
),
231 VERR_INVALID_PARAMETER
);
233 pPgLst
= (HGCMPageListInfo
*)((uint8_t *)pCallInfo
+ off
);
234 cPages
= pPgLst
->cPages
;
235 u32
= RT_OFFSETOF(HGCMPageListInfo
, aPages
[cPages
]) + off
;
236 AssertMsgReturn(u32
<= cbCallInfo
,
237 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32
, cPages
, off
, cbCallInfo
),
238 VERR_INVALID_PARAMETER
);
239 AssertMsgReturn(pPgLst
->offFirstPage
< PAGE_SIZE
, ("#x\n", pPgLst
->offFirstPage
), VERR_INVALID_PARAMETER
);
240 u32
= RT_ALIGN_32(pPgLst
->offFirstPage
+ cb
, PAGE_SIZE
) >> PAGE_SHIFT
;
241 AssertMsgReturn(cPages
== u32
, ("cPages=%#x u32=%#x\n", cPages
, u32
), VERR_INVALID_PARAMETER
);
242 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst
->flags
), ("%#x\n", pPgLst
->flags
), VERR_INVALID_PARAMETER
);
243 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
244 iParm
, cb
, cPages
, pPgLst
->offFirstPage
, pPgLst
->flags
));
248 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32
, pPgLst
->aPages
[u32
]));
249 AssertMsgReturn(!(pPgLst
->aPages
[u32
] & (PAGE_OFFSET_MASK
| UINT64_C(0xfff0000000000000))),
250 ("pg#%u=%RHp\n", u32
, pPgLst
->aPages
[u32
]),
251 VERR_INVALID_PARAMETER
);
254 *pcbExtra
+= RT_OFFSETOF(HGCMPageListInfo
, aPages
[pPgLst
->cPages
]);
257 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm
));
260 case VMMDevHGCMParmType_LinAddr_Locked_In
:
261 case VMMDevHGCMParmType_LinAddr_Locked_Out
:
262 case VMMDevHGCMParmType_LinAddr_Locked
:
264 return VERR_INVALID_PARAMETER
;
265 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
267 cb
= pSrcParm
->u
.Pointer
.size
;
268 AssertMsgReturn(cb
<= VBGLR0_MAX_HGCM_KERNEL_PARM
, ("%#x > %#x\n", cb
, VBGLR0_MAX_HGCM_KERNEL_PARM
),
271 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
272 iParm
, pSrcParm
->type
, cb
, pSrcParm
->u
.Pointer
.u
.linearAddr
));
274 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm
, pSrcParm
->type
));
279 case VMMDevHGCMParmType_LinAddr_In
:
280 case VMMDevHGCMParmType_LinAddr_Out
:
281 case VMMDevHGCMParmType_LinAddr
:
282 cb
= pSrcParm
->u
.Pointer
.size
;
285 #ifdef USE_BOUNCE_BUFFERS
286 void *pvSmallBuf
= NULL
;
288 uint32_t iLockBuf
= pParmInfo
->cLockBufs
;
291 uint32_t fAccess
= pSrcParm
->type
== VMMDevHGCMParmType_LinAddr_In
292 || pSrcParm
->type
== VMMDevHGCMParmType_LinAddr_Locked_In
294 : RTMEM_PROT_READ
| RTMEM_PROT_WRITE
;
296 AssertReturn(iLockBuf
< RT_ELEMENTS(pParmInfo
->aLockBufs
), VERR_INVALID_PARAMETER
);
299 AssertMsgReturn(cb
<= VBGLR0_MAX_HGCM_KERNEL_PARM
, ("%#x > %#x\n", cb
, VBGLR0_MAX_HGCM_KERNEL_PARM
),
301 rc
= RTR0MemObjLockKernel(&hObj
, (void *)pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
, fAccess
);
304 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
305 pCallInfo
->u32ClientID
, pCallInfo
->u32Function
, iParm
, pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
, rc
));
308 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
309 iParm
, pSrcParm
->type
, cb
, pSrcParm
->u
.Pointer
.u
.linearAddr
, hObj
));
311 else if (cb
> VBGLR0_MAX_HGCM_USER_PARM
)
313 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
314 pCallInfo
->u32ClientID
, pCallInfo
->u32Function
, iParm
, pSrcParm
->u
.Pointer
.u
.linearAddr
,
315 cb
, VBGLR0_MAX_HGCM_USER_PARM
));
316 return VERR_OUT_OF_RANGE
;
320 #ifndef USE_BOUNCE_BUFFERS
321 rc
= RTR0MemObjLockUser(&hObj
, (RTR3PTR
)pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
, fAccess
, NIL_RTR0PROCESS
);
324 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
325 pCallInfo
->u32ClientID
, pCallInfo
->u32Function
, iParm
, pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
, rc
));
328 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
329 iParm
, pSrcParm
->type
, cb
, pSrcParm
->u
.Pointer
.u
.linearAddr
, hObj
));
331 #else /* USE_BOUNCE_BUFFERS */
333 * This is a bit massive, but we don't want to waste a
334 * whole page for a 3 byte string buffer (guest props).
336 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
337 * the system is using some power of two allocator.
339 /** @todo A more efficient strategy would be to combine buffers. However it
340 * is probably going to be more massive than the current code, so
341 * it can wait till later. */
342 bool fCopyIn
= pSrcParm
->type
!= VMMDevHGCMParmType_LinAddr_Out
343 && pSrcParm
->type
!= VMMDevHGCMParmType_LinAddr_Locked_Out
;
344 if (cb
<= PAGE_SIZE
/ 2 - 16)
346 pvSmallBuf
= fCopyIn
? RTMemTmpAlloc(cb
) : RTMemTmpAllocZ(cb
);
347 if (RT_UNLIKELY(!pvSmallBuf
))
348 return VERR_NO_MEMORY
;
351 rc
= RTR0MemUserCopyFrom(pvSmallBuf
, pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
);
354 RTMemTmpFree(pvSmallBuf
);
355 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
356 pCallInfo
->u32ClientID
, pCallInfo
->u32Function
, iParm
,
357 pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
, rc
));
361 rc
= RTR0MemObjLockKernel(&hObj
, pvSmallBuf
, cb
, fAccess
);
364 RTMemTmpFree(pvSmallBuf
);
365 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
366 rc
, pvSmallBuf
, cb
));
369 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
370 iParm
, pSrcParm
->type
, cb
, pSrcParm
->u
.Pointer
.u
.linearAddr
, pvSmallBuf
, hObj
));
374 rc
= RTR0MemObjAllocPage(&hObj
, cb
, false /*fExecutable*/);
378 memset(RTR0MemObjAddress(hObj
), '\0', cb
);
381 rc
= RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj
), pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
);
384 RTR0MemObjFree(hObj
, false /*fFreeMappings*/);
385 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
386 pCallInfo
->u32ClientID
, pCallInfo
->u32Function
, iParm
,
387 pSrcParm
->u
.Pointer
.u
.linearAddr
, cb
, rc
));
391 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
392 iParm
, pSrcParm
->type
, cb
, pSrcParm
->u
.Pointer
.u
.linearAddr
, hObj
));
394 #endif /* USE_BOUNCE_BUFFERS */
397 pParmInfo
->aLockBufs
[iLockBuf
].iParm
= iParm
;
398 pParmInfo
->aLockBufs
[iLockBuf
].hObj
= hObj
;
399 #ifdef USE_BOUNCE_BUFFERS
400 pParmInfo
->aLockBufs
[iLockBuf
].pvSmallBuf
= pvSmallBuf
;
402 pParmInfo
->cLockBufs
= iLockBuf
+ 1;
404 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
406 size_t const cPages
= RTR0MemObjSize(hObj
) >> PAGE_SHIFT
;
407 *pcbExtra
+= RT_OFFSETOF(HGCMPageListInfo
, aPages
[cPages
]);
411 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm
, pSrcParm
->type
));
415 return VERR_INVALID_PARAMETER
;
424 * Translates locked linear address to the normal type.
425 * The locked types are only for the guest side and not handled by the host.
427 * @returns normal linear address type.
428 * @param enmType The type.
430 static HGCMFunctionParameterType
vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType
)
434 case VMMDevHGCMParmType_LinAddr_Locked_In
:
435 return VMMDevHGCMParmType_LinAddr_In
;
436 case VMMDevHGCMParmType_LinAddr_Locked_Out
:
437 return VMMDevHGCMParmType_LinAddr_Out
;
438 case VMMDevHGCMParmType_LinAddr_Locked
:
439 return VMMDevHGCMParmType_LinAddr
;
447 * Translates linear address types to page list direction flags.
449 * @returns page list flags.
450 * @param enmType The type.
452 static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType
)
456 case VMMDevHGCMParmType_LinAddr_In
:
457 case VMMDevHGCMParmType_LinAddr_Locked_In
:
458 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST
;
460 case VMMDevHGCMParmType_LinAddr_Out
:
461 case VMMDevHGCMParmType_LinAddr_Locked_Out
:
462 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST
;
464 default: AssertFailed();
465 case VMMDevHGCMParmType_LinAddr
:
466 case VMMDevHGCMParmType_LinAddr_Locked
:
467 return VBOX_HGCM_F_PARM_DIRECTION_BOTH
;
473 * Initializes the call request that we're sending to the host.
475 * @returns VBox status code.
477 * @param pCallInfo The call info.
478 * @param cbCallInfo The size of the call info structure.
479 * @param fIsUser Is it a user request or kernel request.
480 * @param pcbExtra Where to return the extra request space needed for
481 * physical page lists.
483 static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall
*pHGCMCall
, VBoxGuestHGCMCallInfo
const *pCallInfo
,
484 uint32_t cbCallInfo
, bool fIsUser
, struct VbglR0ParmInfo
*pParmInfo
)
486 HGCMFunctionParameter
const *pSrcParm
= VBOXGUEST_HGCM_CALL_PARMS(pCallInfo
);
487 HGCMFunctionParameter
*pDstParm
= VMMDEV_HGCM_CALL_PARMS(pHGCMCall
);
488 uint32_t cParms
= pCallInfo
->cParms
;
489 uint32_t offExtra
= (uint32_t)((uintptr_t)(pDstParm
+ cParms
) - (uintptr_t)pHGCMCall
);
490 uint32_t iLockBuf
= 0;
492 RT_NOREF1(cbCallInfo
);
493 #ifndef USE_BOUNCE_BUFFERS
498 * The call request headers.
500 pHGCMCall
->header
.fu32Flags
= 0;
501 pHGCMCall
->header
.result
= VINF_SUCCESS
;
503 pHGCMCall
->u32ClientID
= pCallInfo
->u32ClientID
;
504 pHGCMCall
->u32Function
= pCallInfo
->u32Function
;
505 pHGCMCall
->cParms
= cParms
;
510 for (iParm
= 0; iParm
< pCallInfo
->cParms
; iParm
++, pSrcParm
++, pDstParm
++)
512 switch (pSrcParm
->type
)
514 case VMMDevHGCMParmType_32bit
:
515 case VMMDevHGCMParmType_64bit
:
516 *pDstParm
= *pSrcParm
;
519 case VMMDevHGCMParmType_PageList
:
520 pDstParm
->type
= VMMDevHGCMParmType_PageList
;
521 pDstParm
->u
.PageList
.size
= pSrcParm
->u
.PageList
.size
;
522 if (pSrcParm
->u
.PageList
.size
)
524 HGCMPageListInfo
const *pSrcPgLst
= (HGCMPageListInfo
*)((uint8_t *)pCallInfo
+ pSrcParm
->u
.PageList
.offset
);
525 HGCMPageListInfo
*pDstPgLst
= (HGCMPageListInfo
*)((uint8_t *)pHGCMCall
+ offExtra
);
526 uint32_t const cPages
= pSrcPgLst
->cPages
;
529 pDstParm
->u
.PageList
.offset
= offExtra
;
530 pDstPgLst
->flags
= pSrcPgLst
->flags
;
531 pDstPgLst
->offFirstPage
= pSrcPgLst
->offFirstPage
;
532 pDstPgLst
->cPages
= cPages
;
533 for (iPage
= 0; iPage
< cPages
; iPage
++)
534 pDstPgLst
->aPages
[iPage
] = pSrcPgLst
->aPages
[iPage
];
536 offExtra
+= RT_OFFSETOF(HGCMPageListInfo
, aPages
[cPages
]);
539 pDstParm
->u
.PageList
.offset
= 0;
542 case VMMDevHGCMParmType_LinAddr_Locked_In
:
543 case VMMDevHGCMParmType_LinAddr_Locked_Out
:
544 case VMMDevHGCMParmType_LinAddr_Locked
:
545 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
547 *pDstParm
= *pSrcParm
;
548 pDstParm
->type
= vbglR0HGCMInternalConvertLinAddrType(pSrcParm
->type
);
553 case VMMDevHGCMParmType_LinAddr_In
:
554 case VMMDevHGCMParmType_LinAddr_Out
:
555 case VMMDevHGCMParmType_LinAddr
:
556 if (pSrcParm
->u
.Pointer
.size
!= 0)
558 #ifdef USE_BOUNCE_BUFFERS
559 void *pvSmallBuf
= pParmInfo
->aLockBufs
[iLockBuf
].pvSmallBuf
;
561 RTR0MEMOBJ hObj
= pParmInfo
->aLockBufs
[iLockBuf
].hObj
;
562 Assert(iParm
== pParmInfo
->aLockBufs
[iLockBuf
].iParm
);
564 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
566 HGCMPageListInfo
*pDstPgLst
= (HGCMPageListInfo
*)((uint8_t *)pHGCMCall
+ offExtra
);
567 size_t const cPages
= RTR0MemObjSize(hObj
) >> PAGE_SHIFT
;
570 pDstParm
->type
= VMMDevHGCMParmType_PageList
;
571 pDstParm
->u
.PageList
.size
= pSrcParm
->u
.Pointer
.size
;
572 pDstParm
->u
.PageList
.offset
= offExtra
;
573 pDstPgLst
->flags
= vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm
->type
);
574 #ifdef USE_BOUNCE_BUFFERS
576 pDstPgLst
->offFirstPage
= (uintptr_t)pvSmallBuf
& PAGE_OFFSET_MASK
;
579 pDstPgLst
->offFirstPage
= pSrcParm
->u
.Pointer
.u
.linearAddr
& PAGE_OFFSET_MASK
;
580 pDstPgLst
->cPages
= (uint32_t)cPages
; Assert(pDstPgLst
->cPages
== cPages
);
581 for (iPage
= 0; iPage
< cPages
; iPage
++)
583 pDstPgLst
->aPages
[iPage
] = RTR0MemObjGetPagePhysAddr(hObj
, iPage
);
584 Assert(pDstPgLst
->aPages
[iPage
] != NIL_RTHCPHYS
);
587 offExtra
+= RT_OFFSETOF(HGCMPageListInfo
, aPages
[cPages
]);
591 pDstParm
->type
= vbglR0HGCMInternalConvertLinAddrType(pSrcParm
->type
);
592 pDstParm
->u
.Pointer
.size
= pSrcParm
->u
.Pointer
.size
;
593 #ifdef USE_BOUNCE_BUFFERS
595 pDstParm
->u
.Pointer
.u
.linearAddr
= pvSmallBuf
596 ? (uintptr_t)pvSmallBuf
597 : (uintptr_t)RTR0MemObjAddress(hObj
);
600 pDstParm
->u
.Pointer
.u
.linearAddr
= pSrcParm
->u
.Pointer
.u
.linearAddr
;
606 pDstParm
->type
= vbglR0HGCMInternalConvertLinAddrType(pSrcParm
->type
);
607 pDstParm
->u
.Pointer
.size
= 0;
608 pDstParm
->u
.Pointer
.u
.linearAddr
= 0;
614 pDstParm
->type
= VMMDevHGCMParmType_Invalid
;
622 * Performs the call and completion wait.
624 * @returns VBox status code of this operation, not necessarily the call.
626 * @param pHGCMCall The HGCM call info.
627 * @param pfnAsyncCallback The async callback that will wait for the call
629 * @param pvAsyncData Argument for the callback.
630 * @param u32AsyncData Argument for the callback.
631 * @param pfLeakIt Where to return the leak it / free it,
632 * indicator. Cancellation fun.
634 static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall
*pHGCMCall
, PFNVBGLHGCMCALLBACK pfnAsyncCallback
,
635 void *pvAsyncData
, uint32_t u32AsyncData
, bool *pfLeakIt
)
639 Log(("calling VbglGRPerform\n"));
640 rc
= VbglGRPerform(&pHGCMCall
->header
.header
);
641 Log(("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc
, pHGCMCall
->header
.result
));
644 * If the call failed, but as a result of the request itself, then pretend
645 * success. Upper layers will interpret the result code in the packet.
648 && rc
== pHGCMCall
->header
.result
)
650 Assert(pHGCMCall
->header
.fu32Flags
& VBOX_HGCM_REQ_DONE
);
655 * Check if host decides to process the request asynchronously,
656 * if so, we wait for it to complete using the caller supplied callback.
659 if (rc
== VINF_HGCM_ASYNC_EXECUTE
)
661 Log(("Processing HGCM call asynchronously\n"));
662 rc
= pfnAsyncCallback(&pHGCMCall
->header
, pvAsyncData
, u32AsyncData
);
663 if (pHGCMCall
->header
.fu32Flags
& VBOX_HGCM_REQ_DONE
)
665 Assert(!(pHGCMCall
->header
.fu32Flags
& VBOX_HGCM_REQ_CANCELLED
));
671 * The request didn't complete in time or the call was interrupted,
672 * the RC from the callback indicates which. Try cancel the request.
674 * This is a bit messy because we're racing request completion. Sorry.
676 /** @todo It would be nice if we could use the waiter callback to do further
677 * waiting in case of a completion race. If it wasn't for WINNT having its own
678 * version of all that stuff, I would've done it already. */
679 VMMDevHGCMCancel2
*pCancelReq
;
680 int rc2
= VbglGRAlloc((VMMDevRequestHeader
**)&pCancelReq
, sizeof(*pCancelReq
), VMMDevReq_HGCMCancel2
);
683 pCancelReq
->physReqToCancel
= VbglPhysHeapGetPhysAddr(pHGCMCall
);
684 rc2
= VbglGRPerform(&pCancelReq
->header
);
685 VbglGRFree(&pCancelReq
->header
);
687 #if 1 /** @todo ADDVER: Remove this on next minor version change. */
688 if (rc2
== VERR_NOT_IMPLEMENTED
)
690 /* host is too old, or we're out of heap. */
691 pHGCMCall
->header
.fu32Flags
|= VBOX_HGCM_REQ_CANCELLED
;
692 pHGCMCall
->header
.header
.requestType
= VMMDevReq_HGCMCancel
;
693 rc2
= VbglGRPerform(&pHGCMCall
->header
.header
);
694 if (rc2
== VERR_INVALID_PARAMETER
)
695 rc2
= VERR_NOT_FOUND
;
696 else if (RT_SUCCESS(rc
))
700 if (RT_SUCCESS(rc
)) rc
= VERR_INTERRUPTED
; /** @todo weed this out from the WINNT VBoxGuest code. */
703 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
704 pHGCMCall
->header
.fu32Flags
|= VBOX_HGCM_REQ_CANCELLED
;
709 * Wait for a bit while the host (hopefully) completes it.
711 uint64_t u64Start
= RTTimeSystemMilliTS();
712 uint32_t cMilliesToWait
= rc2
== VERR_NOT_FOUND
|| rc2
== VERR_SEM_DESTROYED
? 500 : 2000;
713 uint64_t cElapsed
= 0;
714 if (rc2
!= VERR_NOT_FOUND
)
716 static unsigned s_cErrors
= 0;
717 if (s_cErrors
++ < 32)
718 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc
, rc2
));
721 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc
, rc2
));
725 ASMCompilerBarrier(); /* paranoia */
726 if (pHGCMCall
->header
.fu32Flags
& VBOX_HGCM_REQ_DONE
)
729 cElapsed
= RTTimeSystemMilliTS() - u64Start
;
730 } while (cElapsed
< cMilliesToWait
);
732 ASMCompilerBarrier(); /* paranoia^2 */
733 if (pHGCMCall
->header
.fu32Flags
& VBOX_HGCM_REQ_DONE
)
737 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
738 pHGCMCall
->header
.header
.size
, pHGCMCall
->u32Function
, pHGCMCall
->cParms
, rc2
));
741 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc
, rc2
, cElapsed
));
746 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
747 rc
, pHGCMCall
->header
.result
, pHGCMCall
->header
.fu32Flags
, *pfLeakIt
));
753 * Copies the result of the call back to the caller info structure and user
754 * buffers (if using bounce buffers).
756 * @returns rc, unless RTR0MemUserCopyTo fails.
757 * @param pCallInfo Call info structure to update.
758 * @param pHGCMCall HGCM call request.
759 * @param pParmInfo Parameter locking/buffering info.
760 * @param fIsUser Is it a user (true) or kernel request.
761 * @param rc The current result code. Passed along to
762 * preserve informational status codes.
764 static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo
*pCallInfo
, VMMDevHGCMCall
const *pHGCMCall
,
765 struct VbglR0ParmInfo
*pParmInfo
, bool fIsUser
, int rc
)
767 HGCMFunctionParameter
const *pSrcParm
= VMMDEV_HGCM_CALL_PARMS(pHGCMCall
);
768 HGCMFunctionParameter
*pDstParm
= VBOXGUEST_HGCM_CALL_PARMS(pCallInfo
);
769 uint32_t cParms
= pCallInfo
->cParms
;
770 #ifdef USE_BOUNCE_BUFFERS
771 uint32_t iLockBuf
= 0;
774 RT_NOREF1(pParmInfo
);
775 #ifndef USE_BOUNCE_BUFFERS
782 pCallInfo
->result
= pHGCMCall
->header
.result
;
785 * Copy back parameters.
787 for (iParm
= 0; iParm
< cParms
; iParm
++, pSrcParm
++, pDstParm
++)
789 switch (pDstParm
->type
)
791 case VMMDevHGCMParmType_32bit
:
792 case VMMDevHGCMParmType_64bit
:
793 *pDstParm
= *pSrcParm
;
796 case VMMDevHGCMParmType_PageList
:
797 pDstParm
->u
.PageList
.size
= pSrcParm
->u
.PageList
.size
;
800 case VMMDevHGCMParmType_LinAddr_Locked_In
:
801 case VMMDevHGCMParmType_LinAddr_In
:
802 #ifdef USE_BOUNCE_BUFFERS
804 && iLockBuf
< pParmInfo
->cLockBufs
805 && iParm
== pParmInfo
->aLockBufs
[iLockBuf
].iParm
)
808 pDstParm
->u
.Pointer
.size
= pSrcParm
->u
.Pointer
.size
;
811 case VMMDevHGCMParmType_LinAddr_Locked_Out
:
812 case VMMDevHGCMParmType_LinAddr_Locked
:
813 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
815 pDstParm
->u
.Pointer
.size
= pSrcParm
->u
.Pointer
.size
;
820 case VMMDevHGCMParmType_LinAddr_Out
:
821 case VMMDevHGCMParmType_LinAddr
:
823 #ifdef USE_BOUNCE_BUFFERS
826 size_t cbOut
= RT_MIN(pSrcParm
->u
.Pointer
.size
, pDstParm
->u
.Pointer
.size
);
830 Assert(pParmInfo
->aLockBufs
[iLockBuf
].iParm
== iParm
);
831 rc2
= RTR0MemUserCopyTo((RTR3PTR
)pDstParm
->u
.Pointer
.u
.linearAddr
,
832 pParmInfo
->aLockBufs
[iLockBuf
].pvSmallBuf
833 ? pParmInfo
->aLockBufs
[iLockBuf
].pvSmallBuf
834 : RTR0MemObjAddress(pParmInfo
->aLockBufs
[iLockBuf
].hObj
),
840 else if ( iLockBuf
< pParmInfo
->cLockBufs
841 && iParm
== pParmInfo
->aLockBufs
[iLockBuf
].iParm
)
845 pDstParm
->u
.Pointer
.size
= pSrcParm
->u
.Pointer
.size
;
851 rc
= VERR_INTERNAL_ERROR_4
;
856 #ifdef USE_BOUNCE_BUFFERS
857 Assert(!fIsUser
|| pParmInfo
->cLockBufs
== iLockBuf
);
863 DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo
*pCallInfo
, uint32_t cbCallInfo
, uint32_t fFlags
,
864 PFNVBGLHGCMCALLBACK pfnAsyncCallback
, void *pvAsyncData
, uint32_t u32AsyncData
)
866 bool fIsUser
= (fFlags
& VBGLR0_HGCMCALL_F_MODE_MASK
) == VBGLR0_HGCMCALL_F_USER
;
867 struct VbglR0ParmInfo ParmInfo
;
874 AssertMsgReturn( !pCallInfo
876 || pCallInfo
->cParms
> VBOX_HGCM_MAX_PARMS
877 || !(fFlags
& ~VBGLR0_HGCMCALL_F_MODE_MASK
),
878 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo
, pfnAsyncCallback
, fFlags
),
879 VERR_INVALID_PARAMETER
);
880 AssertReturn( cbCallInfo
>= sizeof(VBoxGuestHGCMCallInfo
)
881 || cbCallInfo
>= pCallInfo
->cParms
* sizeof(HGCMFunctionParameter
),
882 VERR_INVALID_PARAMETER
);
884 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
885 pCallInfo
->u32ClientID
, pCallInfo
->u32ClientID
, pCallInfo
->u32Function
, pCallInfo
->cParms
, cbCallInfo
, fFlags
));
888 * Validate, lock and buffer the parameters for the call.
889 * This will calculate the amount of extra space for physical page list.
891 rc
= vbglR0HGCMInternalPreprocessCall(pCallInfo
, cbCallInfo
, fIsUser
, &ParmInfo
, &cbExtra
);
895 * Allocate the request buffer and recreate the call request.
897 VMMDevHGCMCall
*pHGCMCall
;
898 rc
= VbglGRAlloc((VMMDevRequestHeader
**)&pHGCMCall
,
899 sizeof(VMMDevHGCMCall
) + pCallInfo
->cParms
* sizeof(HGCMFunctionParameter
) + cbExtra
,
904 vbglR0HGCMInternalInitCall(pHGCMCall
, pCallInfo
, cbCallInfo
, fIsUser
, &ParmInfo
);
909 rc
= vbglR0HGCMInternalDoCall(pHGCMCall
, pfnAsyncCallback
, pvAsyncData
, u32AsyncData
, &fLeakIt
);
913 * Copy back the result (parameters and buffers that changed).
915 rc
= vbglR0HGCMInternalCopyBackResult(pCallInfo
, pHGCMCall
, &ParmInfo
, fIsUser
, rc
);
919 if ( rc
!= VERR_INTERRUPTED
920 && rc
!= VERR_TIMEOUT
)
922 static unsigned s_cErrors
= 0;
923 if (s_cErrors
++ < 32)
924 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc
));
929 VbglGRFree(&pHGCMCall
->header
.header
);
933 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc
));
936 * Release locks and free bounce buffers.
938 if (ParmInfo
.cLockBufs
)
939 while (ParmInfo
.cLockBufs
-- > 0)
941 RTR0MemObjFree(ParmInfo
.aLockBufs
[ParmInfo
.cLockBufs
].hObj
, false /*fFreeMappings*/);
942 #ifdef USE_BOUNCE_BUFFERS
943 RTMemTmpFree(ParmInfo
.aLockBufs
[ParmInfo
.cLockBufs
].pvSmallBuf
);
952 DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo
*pCallInfo
, uint32_t cbCallInfo
, uint32_t fFlags
,
953 PFNVBGLHGCMCALLBACK pfnAsyncCallback
, void *pvAsyncData
, uint32_t u32AsyncData
)
955 VBoxGuestHGCMCallInfo
*pCallInfo64
= NULL
;
956 HGCMFunctionParameter
*pParm64
= NULL
;
957 HGCMFunctionParameter32
*pParm32
= NULL
;
960 int rc
= VINF_SUCCESS
;
965 AssertMsgReturn( !pCallInfo
967 || pCallInfo
->cParms
> VBOX_HGCM_MAX_PARMS
968 || !(fFlags
& ~VBGLR0_HGCMCALL_F_MODE_MASK
),
969 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo
, pfnAsyncCallback
, fFlags
),
970 VERR_INVALID_PARAMETER
);
971 AssertReturn( cbCallInfo
>= sizeof(VBoxGuestHGCMCallInfo
)
972 || cbCallInfo
>= pCallInfo
->cParms
* sizeof(HGCMFunctionParameter32
),
973 VERR_INVALID_PARAMETER
);
975 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
976 #if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
977 AssertReturn((fFlags
& VBGLR0_HGCMCALL_F_MODE_MASK
) == VBGLR0_HGCMCALL_F_KERNEL
, VERR_WRONG_ORDER
);
980 cParms
= pCallInfo
->cParms
;
981 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms
, pCallInfo
->u32Function
, fFlags
));
984 * The simple approach, allocate a temporary request and convert the parameters.
986 pCallInfo64
= (VBoxGuestHGCMCallInfo
*)RTMemTmpAllocZ(sizeof(*pCallInfo64
) + cParms
* sizeof(HGCMFunctionParameter
));
988 return VERR_NO_TMP_MEMORY
;
990 *pCallInfo64
= *pCallInfo
;
991 pParm32
= VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo
);
992 pParm64
= VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64
);
993 for (iParm
= 0; iParm
< cParms
; iParm
++, pParm32
++, pParm64
++)
995 switch (pParm32
->type
)
997 case VMMDevHGCMParmType_32bit
:
998 pParm64
->type
= VMMDevHGCMParmType_32bit
;
999 pParm64
->u
.value32
= pParm32
->u
.value32
;
1002 case VMMDevHGCMParmType_64bit
:
1003 pParm64
->type
= VMMDevHGCMParmType_64bit
;
1004 pParm64
->u
.value64
= pParm32
->u
.value64
;
1007 case VMMDevHGCMParmType_LinAddr_Out
:
1008 case VMMDevHGCMParmType_LinAddr
:
1009 case VMMDevHGCMParmType_LinAddr_In
:
1010 pParm64
->type
= pParm32
->type
;
1011 pParm64
->u
.Pointer
.size
= pParm32
->u
.Pointer
.size
;
1012 pParm64
->u
.Pointer
.u
.linearAddr
= pParm32
->u
.Pointer
.u
.linearAddr
;
1016 rc
= VERR_INVALID_PARAMETER
;
1017 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32
->type
));
1025 rc
= VbglR0HGCMInternalCall(pCallInfo64
, sizeof(*pCallInfo64
) + cParms
* sizeof(HGCMFunctionParameter
), fFlags
,
1026 pfnAsyncCallback
, pvAsyncData
, u32AsyncData
);
1030 *pCallInfo
= *pCallInfo64
;
1035 pParm32
= VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo
);
1036 pParm64
= VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64
);
1037 for (iParm
= 0; iParm
< cParms
; iParm
++, pParm32
++, pParm64
++)
1039 switch (pParm64
->type
)
1041 case VMMDevHGCMParmType_32bit
:
1042 pParm32
->u
.value32
= pParm64
->u
.value32
;
1045 case VMMDevHGCMParmType_64bit
:
1046 pParm32
->u
.value64
= pParm64
->u
.value64
;
1049 case VMMDevHGCMParmType_LinAddr_Out
:
1050 case VMMDevHGCMParmType_LinAddr
:
1051 case VMMDevHGCMParmType_LinAddr_In
:
1052 pParm32
->u
.Pointer
.size
= pParm64
->u
.Pointer
.size
;
1056 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32
->type
));
1057 rc
= VERR_INTERNAL_ERROR_3
;
1064 static unsigned s_cErrors
= 0;
1065 if (s_cErrors
++ < 32)
1066 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc
));
1071 static unsigned s_cErrors
= 0;
1072 if (s_cErrors
++ < 32)
1073 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc
));
1076 RTMemTmpFree(pCallInfo64
);
1079 #endif /* ARCH_BITS == 64 */
1081 #endif /* VBGL_VBOXGUEST */