]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - ubuntu/vbox/vboxguest/HGCMInternal.c
UBUNTU: vbox-update: Fix up KERN_DIR definitions
[mirror_ubuntu-bionic-kernel.git] / ubuntu / vbox / vboxguest / HGCMInternal.c
1 /* $Id: HGCMInternal.cpp $ */
2 /** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6 /*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27 /* Entire file is ifdef'ed with VBGL_VBOXGUEST */
28 #ifdef VBGL_VBOXGUEST
29
30
31 /*********************************************************************************************************************************
32 * Header Files *
33 *********************************************************************************************************************************/
34 #define LOG_GROUP LOG_GROUP_HGCM
35
36 #include "VBGLInternal.h"
37 #include <iprt/alloca.h>
38 #include <iprt/asm.h>
39 #include <iprt/assert.h>
40 #include <iprt/mem.h>
41 #include <iprt/memobj.h>
42 #include <iprt/string.h>
43 #include <iprt/thread.h>
44 #include <iprt/time.h>
45
46
47 /*********************************************************************************************************************************
48 * Defined Constants And Macros *
49 *********************************************************************************************************************************/
50 /** The max parameter buffer size for a user request. */
51 #define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
52 /** The max parameter buffer size for a kernel request. */
53 #define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
54 #if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
55 /** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
56 * side effects.
57 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
58 # define USE_BOUNCE_BUFFERS
59 #endif
60
61
62 /*********************************************************************************************************************************
63 * Structures and Typedefs *
64 *********************************************************************************************************************************/
65 /**
66 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
67 */
68 struct VbglR0ParmInfo
69 {
70 uint32_t cLockBufs;
71 struct
72 {
73 uint32_t iParm;
74 RTR0MEMOBJ hObj;
75 #ifdef USE_BOUNCE_BUFFERS
76 void *pvSmallBuf;
77 #endif
78 } aLockBufs[10];
79 };
80
81
82
83 /* These functions can be only used by VBoxGuest. */
84
85 DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
86 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
87 {
88 VMMDevHGCMConnect *pHGCMConnect;
89 int rc;
90
91 if (!pConnectInfo || !pfnAsyncCallback)
92 return VERR_INVALID_PARAMETER;
93
94 pHGCMConnect = NULL;
95
96 /* Allocate request */
97 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMConnect, sizeof (VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
98
99 if (RT_SUCCESS(rc))
100 {
101 /* Initialize request memory */
102 pHGCMConnect->header.fu32Flags = 0;
103
104 memcpy (&pHGCMConnect->loc, &pConnectInfo->Loc, sizeof (HGCMServiceLocation));
105 pHGCMConnect->u32ClientID = 0;
106
107 /* Issue request */
108 rc = VbglGRPerform (&pHGCMConnect->header.header);
109
110 if (RT_SUCCESS(rc))
111 {
112 /* Check if host decides to process the request asynchronously. */
113 if (rc == VINF_HGCM_ASYNC_EXECUTE)
114 {
115 /* Wait for request completion interrupt notification from host */
116 pfnAsyncCallback (&pHGCMConnect->header, pvAsyncData, u32AsyncData);
117 }
118
119 pConnectInfo->result = pHGCMConnect->header.result;
120
121 if (RT_SUCCESS (pConnectInfo->result))
122 pConnectInfo->u32ClientID = pHGCMConnect->u32ClientID;
123 }
124
125 VbglGRFree (&pHGCMConnect->header.header);
126 }
127
128 return rc;
129 }
130
131
132 DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
133 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
134 {
135 VMMDevHGCMDisconnect *pHGCMDisconnect;
136 int rc;
137
138 if (!pDisconnectInfo || !pfnAsyncCallback)
139 return VERR_INVALID_PARAMETER;
140
141 pHGCMDisconnect = NULL;
142
143 /* Allocate request */
144 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
145
146 if (RT_SUCCESS(rc))
147 {
148 /* Initialize request memory */
149 pHGCMDisconnect->header.fu32Flags = 0;
150
151 pHGCMDisconnect->u32ClientID = pDisconnectInfo->u32ClientID;
152
153 /* Issue request */
154 rc = VbglGRPerform (&pHGCMDisconnect->header.header);
155
156 if (RT_SUCCESS(rc))
157 {
158 /* Check if host decides to process the request asynchronously. */
159 if (rc == VINF_HGCM_ASYNC_EXECUTE)
160 {
161 /* Wait for request completion interrupt notification from host */
162 pfnAsyncCallback (&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
163 }
164
165 pDisconnectInfo->result = pHGCMDisconnect->header.result;
166 }
167
168 VbglGRFree (&pHGCMDisconnect->header.header);
169 }
170
171 return rc;
172 }
173
174
175 /**
176 * Preprocesses the HGCM call, validating and locking/buffering parameters.
177 *
178 * @returns VBox status code.
179 *
180 * @param pCallInfo The call info.
181 * @param cbCallInfo The size of the call info structure.
182 * @param fIsUser Is it a user request or kernel request.
183 * @param pcbExtra Where to return the extra request space needed for
184 * physical page lists.
185 */
186 static int vbglR0HGCMInternalPreprocessCall(VBoxGuestHGCMCallInfo const *pCallInfo, uint32_t cbCallInfo,
187 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
188 {
189 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
190 uint32_t cParms = pCallInfo->cParms;
191 uint32_t iParm;
192 uint32_t cb;
193
194 /*
195 * Lock down the any linear buffers so we can get their addresses
196 * and figure out how much extra storage we need for page lists.
197 *
198 * Note! With kernel mode users we can be assertive. For user mode users
199 * we should just (debug) log it and fail without any fanfare.
200 */
201 *pcbExtra = 0;
202 pParmInfo->cLockBufs = 0;
203 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
204 {
205 switch (pSrcParm->type)
206 {
207 case VMMDevHGCMParmType_32bit:
208 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
209 break;
210
211 case VMMDevHGCMParmType_64bit:
212 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
213 break;
214
215 case VMMDevHGCMParmType_PageList:
216 if (fIsUser)
217 return VERR_INVALID_PARAMETER;
218 cb = pSrcParm->u.PageList.size;
219 if (cb)
220 {
221 uint32_t off = pSrcParm->u.PageList.offset;
222 HGCMPageListInfo *pPgLst;
223 uint32_t cPages;
224 uint32_t u32;
225
226 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
227 VERR_OUT_OF_RANGE);
228 AssertMsgReturn( off >= pCallInfo->cParms * sizeof(HGCMFunctionParameter)
229 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
230 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, pCallInfo->cParms, cbCallInfo),
231 VERR_INVALID_PARAMETER);
232
233 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
234 cPages = pPgLst->cPages;
235 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
236 AssertMsgReturn(u32 <= cbCallInfo,
237 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
238 VERR_INVALID_PARAMETER);
239 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
240 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
241 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
242 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
243 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
244 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
245 u32 = cPages;
246 while (u32-- > 0)
247 {
248 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
249 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
250 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
251 VERR_INVALID_PARAMETER);
252 }
253
254 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
255 }
256 else
257 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
258 break;
259
260 case VMMDevHGCMParmType_LinAddr_Locked_In:
261 case VMMDevHGCMParmType_LinAddr_Locked_Out:
262 case VMMDevHGCMParmType_LinAddr_Locked:
263 if (fIsUser)
264 return VERR_INVALID_PARAMETER;
265 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
266 {
267 cb = pSrcParm->u.Pointer.size;
268 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
269 VERR_OUT_OF_RANGE);
270 if (cb != 0)
271 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
272 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
273 else
274 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
275 break;
276 }
277 /* fall thru */
278
279 case VMMDevHGCMParmType_LinAddr_In:
280 case VMMDevHGCMParmType_LinAddr_Out:
281 case VMMDevHGCMParmType_LinAddr:
282 cb = pSrcParm->u.Pointer.size;
283 if (cb != 0)
284 {
285 #ifdef USE_BOUNCE_BUFFERS
286 void *pvSmallBuf = NULL;
287 #endif
288 uint32_t iLockBuf = pParmInfo->cLockBufs;
289 RTR0MEMOBJ hObj;
290 int rc;
291 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
292 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
293 ? RTMEM_PROT_READ
294 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
295
296 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
297 if (!fIsUser)
298 {
299 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
300 VERR_OUT_OF_RANGE);
301 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
302 if (RT_FAILURE(rc))
303 {
304 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
305 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
306 return rc;
307 }
308 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
309 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
310 }
311 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
312 {
313 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
314 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
315 cb, VBGLR0_MAX_HGCM_USER_PARM));
316 return VERR_OUT_OF_RANGE;
317 }
318 else
319 {
320 #ifndef USE_BOUNCE_BUFFERS
321 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
322 if (RT_FAILURE(rc))
323 {
324 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
325 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
326 return rc;
327 }
328 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
329 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
330
331 #else /* USE_BOUNCE_BUFFERS */
332 /*
333 * This is a bit massive, but we don't want to waste a
334 * whole page for a 3 byte string buffer (guest props).
335 *
336 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
337 * the system is using some power of two allocator.
338 */
339 /** @todo A more efficient strategy would be to combine buffers. However it
340 * is probably going to be more massive than the current code, so
341 * it can wait till later. */
342 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
343 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
344 if (cb <= PAGE_SIZE / 2 - 16)
345 {
346 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
347 if (RT_UNLIKELY(!pvSmallBuf))
348 return VERR_NO_MEMORY;
349 if (fCopyIn)
350 {
351 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
352 if (RT_FAILURE(rc))
353 {
354 RTMemTmpFree(pvSmallBuf);
355 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
356 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
357 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
358 return rc;
359 }
360 }
361 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
362 if (RT_FAILURE(rc))
363 {
364 RTMemTmpFree(pvSmallBuf);
365 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
366 rc, pvSmallBuf, cb));
367 return rc;
368 }
369 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
370 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
371 }
372 else
373 {
374 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
375 if (RT_FAILURE(rc))
376 return rc;
377 if (!fCopyIn)
378 memset(RTR0MemObjAddress(hObj), '\0', cb);
379 else
380 {
381 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
382 if (RT_FAILURE(rc))
383 {
384 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
385 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
386 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
387 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
388 return rc;
389 }
390 }
391 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
392 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
393 }
394 #endif /* USE_BOUNCE_BUFFERS */
395 }
396
397 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
398 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
399 #ifdef USE_BOUNCE_BUFFERS
400 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
401 #endif
402 pParmInfo->cLockBufs = iLockBuf + 1;
403
404 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
405 {
406 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
407 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
408 }
409 }
410 else
411 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
412 break;
413
414 default:
415 return VERR_INVALID_PARAMETER;
416 }
417 }
418
419 return VINF_SUCCESS;
420 }
421
422
423 /**
424 * Translates locked linear address to the normal type.
425 * The locked types are only for the guest side and not handled by the host.
426 *
427 * @returns normal linear address type.
428 * @param enmType The type.
429 */
430 static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
431 {
432 switch (enmType)
433 {
434 case VMMDevHGCMParmType_LinAddr_Locked_In:
435 return VMMDevHGCMParmType_LinAddr_In;
436 case VMMDevHGCMParmType_LinAddr_Locked_Out:
437 return VMMDevHGCMParmType_LinAddr_Out;
438 case VMMDevHGCMParmType_LinAddr_Locked:
439 return VMMDevHGCMParmType_LinAddr;
440 default:
441 return enmType;
442 }
443 }
444
445
446 /**
447 * Translates linear address types to page list direction flags.
448 *
449 * @returns page list flags.
450 * @param enmType The type.
451 */
452 static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
453 {
454 switch (enmType)
455 {
456 case VMMDevHGCMParmType_LinAddr_In:
457 case VMMDevHGCMParmType_LinAddr_Locked_In:
458 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
459
460 case VMMDevHGCMParmType_LinAddr_Out:
461 case VMMDevHGCMParmType_LinAddr_Locked_Out:
462 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
463
464 default: AssertFailed();
465 case VMMDevHGCMParmType_LinAddr:
466 case VMMDevHGCMParmType_LinAddr_Locked:
467 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
468 }
469 }
470
471
472 /**
473 * Initializes the call request that we're sending to the host.
474 *
475 * @returns VBox status code.
476 *
477 * @param pCallInfo The call info.
478 * @param cbCallInfo The size of the call info structure.
479 * @param fIsUser Is it a user request or kernel request.
480 * @param pcbExtra Where to return the extra request space needed for
481 * physical page lists.
482 */
483 static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, VBoxGuestHGCMCallInfo const *pCallInfo,
484 uint32_t cbCallInfo, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
485 {
486 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
487 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
488 uint32_t cParms = pCallInfo->cParms;
489 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
490 uint32_t iLockBuf = 0;
491 uint32_t iParm;
492 RT_NOREF1(cbCallInfo);
493 #ifndef USE_BOUNCE_BUFFERS
494 RT_NOREF1(fIsUser);
495 #endif
496
497 /*
498 * The call request headers.
499 */
500 pHGCMCall->header.fu32Flags = 0;
501 pHGCMCall->header.result = VINF_SUCCESS;
502
503 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
504 pHGCMCall->u32Function = pCallInfo->u32Function;
505 pHGCMCall->cParms = cParms;
506
507 /*
508 * The parameters.
509 */
510 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
511 {
512 switch (pSrcParm->type)
513 {
514 case VMMDevHGCMParmType_32bit:
515 case VMMDevHGCMParmType_64bit:
516 *pDstParm = *pSrcParm;
517 break;
518
519 case VMMDevHGCMParmType_PageList:
520 pDstParm->type = VMMDevHGCMParmType_PageList;
521 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
522 if (pSrcParm->u.PageList.size)
523 {
524 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
525 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
526 uint32_t const cPages = pSrcPgLst->cPages;
527 uint32_t iPage;
528
529 pDstParm->u.PageList.offset = offExtra;
530 pDstPgLst->flags = pSrcPgLst->flags;
531 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
532 pDstPgLst->cPages = cPages;
533 for (iPage = 0; iPage < cPages; iPage++)
534 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
535
536 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
537 }
538 else
539 pDstParm->u.PageList.offset = 0;
540 break;
541
542 case VMMDevHGCMParmType_LinAddr_Locked_In:
543 case VMMDevHGCMParmType_LinAddr_Locked_Out:
544 case VMMDevHGCMParmType_LinAddr_Locked:
545 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
546 {
547 *pDstParm = *pSrcParm;
548 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
549 break;
550 }
551 /* fall thru */
552
553 case VMMDevHGCMParmType_LinAddr_In:
554 case VMMDevHGCMParmType_LinAddr_Out:
555 case VMMDevHGCMParmType_LinAddr:
556 if (pSrcParm->u.Pointer.size != 0)
557 {
558 #ifdef USE_BOUNCE_BUFFERS
559 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
560 #endif
561 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
562 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
563
564 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
565 {
566 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
567 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
568 size_t iPage;
569
570 pDstParm->type = VMMDevHGCMParmType_PageList;
571 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
572 pDstParm->u.PageList.offset = offExtra;
573 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
574 #ifdef USE_BOUNCE_BUFFERS
575 if (fIsUser)
576 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
577 else
578 #endif
579 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
580 pDstPgLst->cPages = (uint32_t)cPages; Assert(pDstPgLst->cPages == cPages);
581 for (iPage = 0; iPage < cPages; iPage++)
582 {
583 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
584 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
585 }
586
587 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
588 }
589 else
590 {
591 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
592 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
593 #ifdef USE_BOUNCE_BUFFERS
594 if (fIsUser)
595 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
596 ? (uintptr_t)pvSmallBuf
597 : (uintptr_t)RTR0MemObjAddress(hObj);
598 else
599 #endif
600 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
601 }
602 iLockBuf++;
603 }
604 else
605 {
606 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
607 pDstParm->u.Pointer.size = 0;
608 pDstParm->u.Pointer.u.linearAddr = 0;
609 }
610 break;
611
612 default:
613 AssertFailed();
614 pDstParm->type = VMMDevHGCMParmType_Invalid;
615 break;
616 }
617 }
618 }
619
620
621 /**
622 * Performs the call and completion wait.
623 *
624 * @returns VBox status code of this operation, not necessarily the call.
625 *
626 * @param pHGCMCall The HGCM call info.
627 * @param pfnAsyncCallback The async callback that will wait for the call
628 * to complete.
629 * @param pvAsyncData Argument for the callback.
630 * @param u32AsyncData Argument for the callback.
631 * @param pfLeakIt Where to return the leak it / free it,
632 * indicator. Cancellation fun.
633 */
634 static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
635 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
636 {
637 int rc;
638
639 Log(("calling VbglGRPerform\n"));
640 rc = VbglGRPerform(&pHGCMCall->header.header);
641 Log(("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
642
643 /*
644 * If the call failed, but as a result of the request itself, then pretend
645 * success. Upper layers will interpret the result code in the packet.
646 */
647 if ( RT_FAILURE(rc)
648 && rc == pHGCMCall->header.result)
649 {
650 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
651 rc = VINF_SUCCESS;
652 }
653
654 /*
655 * Check if host decides to process the request asynchronously,
656 * if so, we wait for it to complete using the caller supplied callback.
657 */
658 *pfLeakIt = false;
659 if (rc == VINF_HGCM_ASYNC_EXECUTE)
660 {
661 Log(("Processing HGCM call asynchronously\n"));
662 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
663 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
664 {
665 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
666 rc = VINF_SUCCESS;
667 }
668 else
669 {
670 /*
671 * The request didn't complete in time or the call was interrupted,
672 * the RC from the callback indicates which. Try cancel the request.
673 *
674 * This is a bit messy because we're racing request completion. Sorry.
675 */
676 /** @todo It would be nice if we could use the waiter callback to do further
677 * waiting in case of a completion race. If it wasn't for WINNT having its own
678 * version of all that stuff, I would've done it already. */
679 VMMDevHGCMCancel2 *pCancelReq;
680 int rc2 = VbglGRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
681 if (RT_SUCCESS(rc2))
682 {
683 pCancelReq->physReqToCancel = VbglPhysHeapGetPhysAddr(pHGCMCall);
684 rc2 = VbglGRPerform(&pCancelReq->header);
685 VbglGRFree(&pCancelReq->header);
686 }
687 #if 1 /** @todo ADDVER: Remove this on next minor version change. */
688 if (rc2 == VERR_NOT_IMPLEMENTED)
689 {
690 /* host is too old, or we're out of heap. */
691 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
692 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
693 rc2 = VbglGRPerform(&pHGCMCall->header.header);
694 if (rc2 == VERR_INVALID_PARAMETER)
695 rc2 = VERR_NOT_FOUND;
696 else if (RT_SUCCESS(rc))
697 RTThreadSleep(1);
698 }
699 #endif
700 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
701 if (RT_SUCCESS(rc2))
702 {
703 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
704 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
705 }
706 else
707 {
708 /*
709 * Wait for a bit while the host (hopefully) completes it.
710 */
711 uint64_t u64Start = RTTimeSystemMilliTS();
712 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
713 uint64_t cElapsed = 0;
714 if (rc2 != VERR_NOT_FOUND)
715 {
716 static unsigned s_cErrors = 0;
717 if (s_cErrors++ < 32)
718 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
719 }
720 else
721 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
722
723 do
724 {
725 ASMCompilerBarrier(); /* paranoia */
726 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
727 break;
728 RTThreadSleep(1);
729 cElapsed = RTTimeSystemMilliTS() - u64Start;
730 } while (cElapsed < cMilliesToWait);
731
732 ASMCompilerBarrier(); /* paranoia^2 */
733 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
734 rc = VINF_SUCCESS;
735 else
736 {
737 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
738 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
739 *pfLeakIt = true;
740 }
741 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
742 }
743 }
744 }
745
746 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
747 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
748 return rc;
749 }
750
751
752 /**
753 * Copies the result of the call back to the caller info structure and user
754 * buffers (if using bounce buffers).
755 *
756 * @returns rc, unless RTR0MemUserCopyTo fails.
757 * @param pCallInfo Call info structure to update.
758 * @param pHGCMCall HGCM call request.
759 * @param pParmInfo Parameter locking/buffering info.
760 * @param fIsUser Is it a user (true) or kernel request.
761 * @param rc The current result code. Passed along to
762 * preserve informational status codes.
763 */
764 static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo *pCallInfo, VMMDevHGCMCall const *pHGCMCall,
765 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
766 {
767 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
768 HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
769 uint32_t cParms = pCallInfo->cParms;
770 #ifdef USE_BOUNCE_BUFFERS
771 uint32_t iLockBuf = 0;
772 #endif
773 uint32_t iParm;
774 RT_NOREF1(pParmInfo);
775 #ifndef USE_BOUNCE_BUFFERS
776 RT_NOREF1(fIsUser);
777 #endif
778
779 /*
780 * The call result.
781 */
782 pCallInfo->result = pHGCMCall->header.result;
783
784 /*
785 * Copy back parameters.
786 */
787 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
788 {
789 switch (pDstParm->type)
790 {
791 case VMMDevHGCMParmType_32bit:
792 case VMMDevHGCMParmType_64bit:
793 *pDstParm = *pSrcParm;
794 break;
795
796 case VMMDevHGCMParmType_PageList:
797 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
798 break;
799
800 case VMMDevHGCMParmType_LinAddr_Locked_In:
801 case VMMDevHGCMParmType_LinAddr_In:
802 #ifdef USE_BOUNCE_BUFFERS
803 if ( fIsUser
804 && iLockBuf < pParmInfo->cLockBufs
805 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
806 iLockBuf++;
807 #endif
808 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
809 break;
810
811 case VMMDevHGCMParmType_LinAddr_Locked_Out:
812 case VMMDevHGCMParmType_LinAddr_Locked:
813 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
814 {
815 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
816 break;
817 }
818 /* fall thru */
819
820 case VMMDevHGCMParmType_LinAddr_Out:
821 case VMMDevHGCMParmType_LinAddr:
822 {
823 #ifdef USE_BOUNCE_BUFFERS
824 if (fIsUser)
825 {
826 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
827 if (cbOut)
828 {
829 int rc2;
830 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
831 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
832 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
833 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
834 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
835 cbOut);
836 if (RT_FAILURE(rc2))
837 return rc2;
838 iLockBuf++;
839 }
840 else if ( iLockBuf < pParmInfo->cLockBufs
841 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
842 iLockBuf++;
843 }
844 #endif
845 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
846 break;
847 }
848
849 default:
850 AssertFailed();
851 rc = VERR_INTERNAL_ERROR_4;
852 break;
853 }
854 }
855
856 #ifdef USE_BOUNCE_BUFFERS
857 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
858 #endif
859 return rc;
860 }
861
862
863 DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
864 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
865 {
866 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
867 struct VbglR0ParmInfo ParmInfo;
868 size_t cbExtra;
869 int rc;
870
871 /*
872 * Basic validation.
873 */
874 AssertMsgReturn( !pCallInfo
875 || !pfnAsyncCallback
876 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
877 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
878 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
879 VERR_INVALID_PARAMETER);
880 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
881 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
882 VERR_INVALID_PARAMETER);
883
884 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
885 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
886
887 /*
888 * Validate, lock and buffer the parameters for the call.
889 * This will calculate the amount of extra space for physical page list.
890 */
891 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
892 if (RT_SUCCESS(rc))
893 {
894 /*
895 * Allocate the request buffer and recreate the call request.
896 */
897 VMMDevHGCMCall *pHGCMCall;
898 rc = VbglGRAlloc((VMMDevRequestHeader **)&pHGCMCall,
899 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
900 VMMDevReq_HGCMCall);
901 if (RT_SUCCESS(rc))
902 {
903 bool fLeakIt;
904 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fIsUser, &ParmInfo);
905
906 /*
907 * Perform the call.
908 */
909 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
910 if (RT_SUCCESS(rc))
911 {
912 /*
913 * Copy back the result (parameters and buffers that changed).
914 */
915 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
916 }
917 else
918 {
919 if ( rc != VERR_INTERRUPTED
920 && rc != VERR_TIMEOUT)
921 {
922 static unsigned s_cErrors = 0;
923 if (s_cErrors++ < 32)
924 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
925 }
926 }
927
928 if (!fLeakIt)
929 VbglGRFree(&pHGCMCall->header.header);
930 }
931 }
932 else
933 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
934
935 /*
936 * Release locks and free bounce buffers.
937 */
938 if (ParmInfo.cLockBufs)
939 while (ParmInfo.cLockBufs-- > 0)
940 {
941 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
942 #ifdef USE_BOUNCE_BUFFERS
943 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
944 #endif
945 }
946
947 return rc;
948 }
949
950
951 #if ARCH_BITS == 64
952 DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
953 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
954 {
955 VBoxGuestHGCMCallInfo *pCallInfo64 = NULL;
956 HGCMFunctionParameter *pParm64 = NULL;
957 HGCMFunctionParameter32 *pParm32 = NULL;
958 uint32_t cParms = 0;
959 uint32_t iParm = 0;
960 int rc = VINF_SUCCESS;
961
962 /*
963 * Input validation.
964 */
965 AssertMsgReturn( !pCallInfo
966 || !pfnAsyncCallback
967 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
968 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
969 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
970 VERR_INVALID_PARAMETER);
971 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
972 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
973 VERR_INVALID_PARAMETER);
974
975 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
976 #if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
977 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
978 #endif
979
980 cParms = pCallInfo->cParms;
981 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
982
983 /*
984 * The simple approach, allocate a temporary request and convert the parameters.
985 */
986 pCallInfo64 = (VBoxGuestHGCMCallInfo *)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
987 if (!pCallInfo64)
988 return VERR_NO_TMP_MEMORY;
989
990 *pCallInfo64 = *pCallInfo;
991 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
992 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
993 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
994 {
995 switch (pParm32->type)
996 {
997 case VMMDevHGCMParmType_32bit:
998 pParm64->type = VMMDevHGCMParmType_32bit;
999 pParm64->u.value32 = pParm32->u.value32;
1000 break;
1001
1002 case VMMDevHGCMParmType_64bit:
1003 pParm64->type = VMMDevHGCMParmType_64bit;
1004 pParm64->u.value64 = pParm32->u.value64;
1005 break;
1006
1007 case VMMDevHGCMParmType_LinAddr_Out:
1008 case VMMDevHGCMParmType_LinAddr:
1009 case VMMDevHGCMParmType_LinAddr_In:
1010 pParm64->type = pParm32->type;
1011 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1012 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1013 break;
1014
1015 default:
1016 rc = VERR_INVALID_PARAMETER;
1017 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1018 break;
1019 }
1020 if (RT_FAILURE(rc))
1021 break;
1022 }
1023 if (RT_SUCCESS(rc))
1024 {
1025 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1026 pfnAsyncCallback, pvAsyncData, u32AsyncData);
1027
1028 if (RT_SUCCESS(rc))
1029 {
1030 *pCallInfo = *pCallInfo64;
1031
1032 /*
1033 * Copy back.
1034 */
1035 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
1036 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
1037 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1038 {
1039 switch (pParm64->type)
1040 {
1041 case VMMDevHGCMParmType_32bit:
1042 pParm32->u.value32 = pParm64->u.value32;
1043 break;
1044
1045 case VMMDevHGCMParmType_64bit:
1046 pParm32->u.value64 = pParm64->u.value64;
1047 break;
1048
1049 case VMMDevHGCMParmType_LinAddr_Out:
1050 case VMMDevHGCMParmType_LinAddr:
1051 case VMMDevHGCMParmType_LinAddr_In:
1052 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1053 break;
1054
1055 default:
1056 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1057 rc = VERR_INTERNAL_ERROR_3;
1058 break;
1059 }
1060 }
1061 }
1062 else
1063 {
1064 static unsigned s_cErrors = 0;
1065 if (s_cErrors++ < 32)
1066 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1067 }
1068 }
1069 else
1070 {
1071 static unsigned s_cErrors = 0;
1072 if (s_cErrors++ < 32)
1073 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1074 }
1075
1076 RTMemTmpFree(pCallInfo64);
1077 return rc;
1078 }
1079 #endif /* ARCH_BITS == 64 */
1080
1081 #endif /* VBGL_VBOXGUEST */
1082