]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - ubuntu/vbox/vboxguest/VBoxGuestR0LibHGCMInternal.c
UBUNTU: ubuntu: vbox -- update to 5.2.2-dfsg-2
[mirror_ubuntu-bionic-kernel.git] / ubuntu / vbox / vboxguest / VBoxGuestR0LibHGCMInternal.c
CommitLineData
6d209b23 1/* $Id: VBoxGuestR0LibHGCMInternal.cpp $ */
056a1eb7
SF
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
26894aac 7 * Copyright (C) 2006-2017 Oracle Corporation
056a1eb7
SF
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
056a1eb7
SF
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_HGCM
32
6d209b23 33#include "VBoxGuestR0LibInternal.h"
056a1eb7
SF
34#include <iprt/alloca.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/mem.h>
38#include <iprt/memobj.h>
39#include <iprt/string.h>
40#include <iprt/thread.h>
41#include <iprt/time.h>
42
6d209b23
SF
43#ifndef VBGL_VBOXGUEST
44# error "This file should only be part of the VBoxGuestR0LibBase library that is linked into VBoxGuest."
45#endif
46
056a1eb7
SF
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51/** The max parameter buffer size for a user request. */
52#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
53/** The max parameter buffer size for a kernel request. */
54#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
55#if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
56/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
57 * side effects.
58 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
59# define USE_BOUNCE_BUFFERS
60#endif
61
62
63/*********************************************************************************************************************************
64* Structures and Typedefs *
65*********************************************************************************************************************************/
66/**
67 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
68 */
69struct VbglR0ParmInfo
70{
71 uint32_t cLockBufs;
72 struct
73 {
74 uint32_t iParm;
75 RTR0MEMOBJ hObj;
76#ifdef USE_BOUNCE_BUFFERS
77 void *pvSmallBuf;
78#endif
79 } aLockBufs[10];
80};
81
82
83
84/* These functions can be only used by VBoxGuest. */
85
6d209b23
SF
86DECLR0VBGL(int) VbglR0HGCMInternalConnect(HGCMServiceLocation const *pLoc, HGCMCLIENTID *pidClient,
87 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
056a1eb7 88{
056a1eb7 89 int rc;
6d209b23
SF
90 if ( RT_VALID_PTR(pLoc)
91 && RT_VALID_PTR(pidClient)
92 && RT_VALID_PTR(pfnAsyncCallback))
056a1eb7 93 {
6d209b23
SF
94 /* Allocate request */
95 VMMDevHGCMConnect *pHGCMConnect = NULL;
96 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMConnect, sizeof(VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
056a1eb7
SF
97 if (RT_SUCCESS(rc))
98 {
6d209b23
SF
99 /* Initialize request memory */
100 pHGCMConnect->header.fu32Flags = 0;
056a1eb7 101
6d209b23
SF
102 memcpy(&pHGCMConnect->loc, pLoc, sizeof(pHGCMConnect->loc));
103 pHGCMConnect->u32ClientID = 0;
056a1eb7 104
6d209b23
SF
105 /* Issue request */
106 rc = VbglR0GRPerform (&pHGCMConnect->header.header);
107 if (RT_SUCCESS(rc))
108 {
109 /* Check if host decides to process the request asynchronously. */
110 if (rc == VINF_HGCM_ASYNC_EXECUTE)
111 {
112 /* Wait for request completion interrupt notification from host */
113 pfnAsyncCallback(&pHGCMConnect->header, pvAsyncData, u32AsyncData);
114 }
056a1eb7 115
6d209b23
SF
116 rc = pHGCMConnect->header.result;
117 if (RT_SUCCESS(rc))
118 *pidClient = pHGCMConnect->u32ClientID;
119 }
120 VbglR0GRFree(&pHGCMConnect->header.header);
121 }
056a1eb7 122 }
6d209b23
SF
123 else
124 rc = VERR_INVALID_PARAMETER;
056a1eb7
SF
125 return rc;
126}
127
128
6d209b23
SF
129DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(HGCMCLIENTID idClient,
130 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
056a1eb7 131{
056a1eb7 132 int rc;
6d209b23
SF
133 if ( idClient != 0
134 && pfnAsyncCallback)
056a1eb7 135 {
6d209b23
SF
136 /* Allocate request */
137 VMMDevHGCMDisconnect *pHGCMDisconnect = NULL;
138 rc = VbglR0GRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
056a1eb7
SF
139 if (RT_SUCCESS(rc))
140 {
6d209b23
SF
141 /* Initialize request memory */
142 pHGCMDisconnect->header.fu32Flags = 0;
143
144 pHGCMDisconnect->u32ClientID = idClient;
145
146 /* Issue request */
147 rc = VbglR0GRPerform(&pHGCMDisconnect->header.header);
148 if (RT_SUCCESS(rc))
056a1eb7 149 {
6d209b23
SF
150 /* Check if host decides to process the request asynchronously. */
151 if (rc == VINF_HGCM_ASYNC_EXECUTE)
152 {
153 /* Wait for request completion interrupt notification from host */
154 pfnAsyncCallback(&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
155 }
156
157 rc = pHGCMDisconnect->header.result;
056a1eb7
SF
158 }
159
6d209b23 160 VbglR0GRFree(&pHGCMDisconnect->header.header);
056a1eb7 161 }
056a1eb7 162 }
6d209b23
SF
163 else
164 rc = VERR_INVALID_PARAMETER;
056a1eb7
SF
165 return rc;
166}
167
168
169/**
170 * Preprocesses the HGCM call, validating and locking/buffering parameters.
171 *
172 * @returns VBox status code.
173 *
174 * @param pCallInfo The call info.
175 * @param cbCallInfo The size of the call info structure.
176 * @param fIsUser Is it a user request or kernel request.
177 * @param pcbExtra Where to return the extra request space needed for
178 * physical page lists.
179 */
6d209b23 180static int vbglR0HGCMInternalPreprocessCall(PCVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
056a1eb7
SF
181 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
182{
6d209b23
SF
183 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
184 uint32_t const cParms = pCallInfo->cParms;
185 uint32_t iParm;
186 uint32_t cb;
056a1eb7
SF
187
188 /*
189 * Lock down the any linear buffers so we can get their addresses
190 * and figure out how much extra storage we need for page lists.
191 *
192 * Note! With kernel mode users we can be assertive. For user mode users
193 * we should just (debug) log it and fail without any fanfare.
194 */
195 *pcbExtra = 0;
196 pParmInfo->cLockBufs = 0;
197 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
198 {
199 switch (pSrcParm->type)
200 {
201 case VMMDevHGCMParmType_32bit:
202 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
203 break;
204
205 case VMMDevHGCMParmType_64bit:
206 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
207 break;
208
209 case VMMDevHGCMParmType_PageList:
210 if (fIsUser)
211 return VERR_INVALID_PARAMETER;
212 cb = pSrcParm->u.PageList.size;
213 if (cb)
214 {
215 uint32_t off = pSrcParm->u.PageList.offset;
216 HGCMPageListInfo *pPgLst;
217 uint32_t cPages;
218 uint32_t u32;
219
220 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
221 VERR_OUT_OF_RANGE);
6d209b23 222 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
056a1eb7 223 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
6d209b23 224 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
056a1eb7
SF
225 VERR_INVALID_PARAMETER);
226
227 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
228 cPages = pPgLst->cPages;
229 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
230 AssertMsgReturn(u32 <= cbCallInfo,
231 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
232 VERR_INVALID_PARAMETER);
233 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
234 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
235 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
236 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
237 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
238 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
239 u32 = cPages;
240 while (u32-- > 0)
241 {
242 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
243 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
244 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
245 VERR_INVALID_PARAMETER);
246 }
247
248 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
249 }
250 else
251 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
252 break;
253
254 case VMMDevHGCMParmType_LinAddr_Locked_In:
255 case VMMDevHGCMParmType_LinAddr_Locked_Out:
256 case VMMDevHGCMParmType_LinAddr_Locked:
257 if (fIsUser)
258 return VERR_INVALID_PARAMETER;
259 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
260 {
261 cb = pSrcParm->u.Pointer.size;
262 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
263 VERR_OUT_OF_RANGE);
264 if (cb != 0)
265 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
266 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
267 else
268 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
269 break;
270 }
6d209b23 271 RT_FALL_THRU();
056a1eb7
SF
272
273 case VMMDevHGCMParmType_LinAddr_In:
274 case VMMDevHGCMParmType_LinAddr_Out:
275 case VMMDevHGCMParmType_LinAddr:
276 cb = pSrcParm->u.Pointer.size;
277 if (cb != 0)
278 {
279#ifdef USE_BOUNCE_BUFFERS
280 void *pvSmallBuf = NULL;
281#endif
282 uint32_t iLockBuf = pParmInfo->cLockBufs;
283 RTR0MEMOBJ hObj;
284 int rc;
285 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
286 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
287 ? RTMEM_PROT_READ
288 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
289
290 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
291 if (!fIsUser)
292 {
293 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
294 VERR_OUT_OF_RANGE);
295 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
296 if (RT_FAILURE(rc))
297 {
298 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
299 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
300 return rc;
301 }
302 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
303 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
304 }
305 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
306 {
307 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
308 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
309 cb, VBGLR0_MAX_HGCM_USER_PARM));
310 return VERR_OUT_OF_RANGE;
311 }
312 else
313 {
314#ifndef USE_BOUNCE_BUFFERS
315 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
316 if (RT_FAILURE(rc))
317 {
318 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
319 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
320 return rc;
321 }
322 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
323 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
324
325#else /* USE_BOUNCE_BUFFERS */
326 /*
327 * This is a bit massive, but we don't want to waste a
328 * whole page for a 3 byte string buffer (guest props).
329 *
330 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
331 * the system is using some power of two allocator.
332 */
333 /** @todo A more efficient strategy would be to combine buffers. However it
334 * is probably going to be more massive than the current code, so
335 * it can wait till later. */
336 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
337 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
338 if (cb <= PAGE_SIZE / 2 - 16)
339 {
340 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
341 if (RT_UNLIKELY(!pvSmallBuf))
342 return VERR_NO_MEMORY;
343 if (fCopyIn)
344 {
345 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
346 if (RT_FAILURE(rc))
347 {
348 RTMemTmpFree(pvSmallBuf);
349 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
350 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
351 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
352 return rc;
353 }
354 }
355 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
356 if (RT_FAILURE(rc))
357 {
358 RTMemTmpFree(pvSmallBuf);
359 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
360 rc, pvSmallBuf, cb));
361 return rc;
362 }
363 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
364 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
365 }
366 else
367 {
368 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
369 if (RT_FAILURE(rc))
370 return rc;
371 if (!fCopyIn)
372 memset(RTR0MemObjAddress(hObj), '\0', cb);
373 else
374 {
375 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
376 if (RT_FAILURE(rc))
377 {
378 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
379 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
380 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
381 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
382 return rc;
383 }
384 }
385 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
386 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
387 }
388#endif /* USE_BOUNCE_BUFFERS */
389 }
390
391 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
392 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
393#ifdef USE_BOUNCE_BUFFERS
394 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
395#endif
396 pParmInfo->cLockBufs = iLockBuf + 1;
397
398 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
399 {
400 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
401 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
402 }
403 }
404 else
405 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
406 break;
407
408 default:
409 return VERR_INVALID_PARAMETER;
410 }
411 }
412
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * Translates locked linear address to the normal type.
419 * The locked types are only for the guest side and not handled by the host.
420 *
421 * @returns normal linear address type.
422 * @param enmType The type.
423 */
424static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
425{
426 switch (enmType)
427 {
428 case VMMDevHGCMParmType_LinAddr_Locked_In:
429 return VMMDevHGCMParmType_LinAddr_In;
430 case VMMDevHGCMParmType_LinAddr_Locked_Out:
431 return VMMDevHGCMParmType_LinAddr_Out;
432 case VMMDevHGCMParmType_LinAddr_Locked:
433 return VMMDevHGCMParmType_LinAddr;
434 default:
435 return enmType;
436 }
437}
438
439
440/**
441 * Translates linear address types to page list direction flags.
442 *
443 * @returns page list flags.
444 * @param enmType The type.
445 */
446static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
447{
448 switch (enmType)
449 {
450 case VMMDevHGCMParmType_LinAddr_In:
451 case VMMDevHGCMParmType_LinAddr_Locked_In:
452 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
453
454 case VMMDevHGCMParmType_LinAddr_Out:
455 case VMMDevHGCMParmType_LinAddr_Locked_Out:
456 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
457
458 default: AssertFailed();
459 case VMMDevHGCMParmType_LinAddr:
460 case VMMDevHGCMParmType_LinAddr_Locked:
461 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
462 }
463}
464
465
466/**
467 * Initializes the call request that we're sending to the host.
468 *
469 * @returns VBox status code.
470 *
471 * @param pCallInfo The call info.
472 * @param cbCallInfo The size of the call info structure.
473 * @param fIsUser Is it a user request or kernel request.
474 * @param pcbExtra Where to return the extra request space needed for
475 * physical page lists.
476 */
6d209b23 477static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, PCVBGLIOCHGCMCALL pCallInfo,
056a1eb7
SF
478 uint32_t cbCallInfo, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
479{
6d209b23 480 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
056a1eb7 481 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
6d209b23 482 uint32_t const cParms = pCallInfo->cParms;
056a1eb7
SF
483 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
484 uint32_t iLockBuf = 0;
485 uint32_t iParm;
486 RT_NOREF1(cbCallInfo);
487#ifndef USE_BOUNCE_BUFFERS
488 RT_NOREF1(fIsUser);
489#endif
490
491 /*
492 * The call request headers.
493 */
494 pHGCMCall->header.fu32Flags = 0;
495 pHGCMCall->header.result = VINF_SUCCESS;
496
497 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
498 pHGCMCall->u32Function = pCallInfo->u32Function;
499 pHGCMCall->cParms = cParms;
500
501 /*
502 * The parameters.
503 */
6d209b23 504 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
056a1eb7
SF
505 {
506 switch (pSrcParm->type)
507 {
508 case VMMDevHGCMParmType_32bit:
509 case VMMDevHGCMParmType_64bit:
510 *pDstParm = *pSrcParm;
511 break;
512
513 case VMMDevHGCMParmType_PageList:
514 pDstParm->type = VMMDevHGCMParmType_PageList;
515 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
516 if (pSrcParm->u.PageList.size)
517 {
518 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
519 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
520 uint32_t const cPages = pSrcPgLst->cPages;
521 uint32_t iPage;
522
523 pDstParm->u.PageList.offset = offExtra;
524 pDstPgLst->flags = pSrcPgLst->flags;
525 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
526 pDstPgLst->cPages = cPages;
527 for (iPage = 0; iPage < cPages; iPage++)
528 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
529
530 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
531 }
532 else
533 pDstParm->u.PageList.offset = 0;
534 break;
535
536 case VMMDevHGCMParmType_LinAddr_Locked_In:
537 case VMMDevHGCMParmType_LinAddr_Locked_Out:
538 case VMMDevHGCMParmType_LinAddr_Locked:
539 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
540 {
541 *pDstParm = *pSrcParm;
542 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
543 break;
544 }
6d209b23 545 RT_FALL_THRU();
056a1eb7
SF
546
547 case VMMDevHGCMParmType_LinAddr_In:
548 case VMMDevHGCMParmType_LinAddr_Out:
549 case VMMDevHGCMParmType_LinAddr:
550 if (pSrcParm->u.Pointer.size != 0)
551 {
552#ifdef USE_BOUNCE_BUFFERS
553 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
554#endif
555 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
556 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
557
558 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
559 {
560 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
561 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
562 size_t iPage;
563
564 pDstParm->type = VMMDevHGCMParmType_PageList;
565 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
566 pDstParm->u.PageList.offset = offExtra;
567 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
568#ifdef USE_BOUNCE_BUFFERS
569 if (fIsUser)
570 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
571 else
572#endif
573 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
574 pDstPgLst->cPages = (uint32_t)cPages; Assert(pDstPgLst->cPages == cPages);
575 for (iPage = 0; iPage < cPages; iPage++)
576 {
577 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
578 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
579 }
580
581 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
582 }
583 else
584 {
585 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
586 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
587#ifdef USE_BOUNCE_BUFFERS
588 if (fIsUser)
589 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
590 ? (uintptr_t)pvSmallBuf
591 : (uintptr_t)RTR0MemObjAddress(hObj);
592 else
593#endif
594 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
595 }
596 iLockBuf++;
597 }
598 else
599 {
600 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
601 pDstParm->u.Pointer.size = 0;
602 pDstParm->u.Pointer.u.linearAddr = 0;
603 }
604 break;
605
606 default:
607 AssertFailed();
608 pDstParm->type = VMMDevHGCMParmType_Invalid;
609 break;
610 }
611 }
612}
613
614
615/**
616 * Performs the call and completion wait.
617 *
618 * @returns VBox status code of this operation, not necessarily the call.
619 *
620 * @param pHGCMCall The HGCM call info.
621 * @param pfnAsyncCallback The async callback that will wait for the call
622 * to complete.
623 * @param pvAsyncData Argument for the callback.
624 * @param u32AsyncData Argument for the callback.
625 * @param pfLeakIt Where to return the leak it / free it,
626 * indicator. Cancellation fun.
627 */
628static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
629 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
630{
631 int rc;
632
6d209b23
SF
633 Log(("calling VbglR0GRPerform\n"));
634 rc = VbglR0GRPerform(&pHGCMCall->header.header);
635 Log(("VbglR0GRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
056a1eb7
SF
636
637 /*
638 * If the call failed, but as a result of the request itself, then pretend
639 * success. Upper layers will interpret the result code in the packet.
640 */
641 if ( RT_FAILURE(rc)
642 && rc == pHGCMCall->header.result)
643 {
644 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
645 rc = VINF_SUCCESS;
646 }
647
648 /*
649 * Check if host decides to process the request asynchronously,
650 * if so, we wait for it to complete using the caller supplied callback.
651 */
652 *pfLeakIt = false;
653 if (rc == VINF_HGCM_ASYNC_EXECUTE)
654 {
655 Log(("Processing HGCM call asynchronously\n"));
656 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
657 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
658 {
659 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
660 rc = VINF_SUCCESS;
661 }
662 else
663 {
664 /*
665 * The request didn't complete in time or the call was interrupted,
666 * the RC from the callback indicates which. Try cancel the request.
667 *
668 * This is a bit messy because we're racing request completion. Sorry.
669 */
670 /** @todo It would be nice if we could use the waiter callback to do further
671 * waiting in case of a completion race. If it wasn't for WINNT having its own
672 * version of all that stuff, I would've done it already. */
673 VMMDevHGCMCancel2 *pCancelReq;
6d209b23 674 int rc2 = VbglR0GRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
056a1eb7
SF
675 if (RT_SUCCESS(rc2))
676 {
6d209b23
SF
677 pCancelReq->physReqToCancel = VbglR0PhysHeapGetPhysAddr(pHGCMCall);
678 rc2 = VbglR0GRPerform(&pCancelReq->header);
679 VbglR0GRFree(&pCancelReq->header);
056a1eb7
SF
680 }
681#if 1 /** @todo ADDVER: Remove this on next minor version change. */
682 if (rc2 == VERR_NOT_IMPLEMENTED)
683 {
684 /* host is too old, or we're out of heap. */
685 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
686 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
6d209b23 687 rc2 = VbglR0GRPerform(&pHGCMCall->header.header);
056a1eb7
SF
688 if (rc2 == VERR_INVALID_PARAMETER)
689 rc2 = VERR_NOT_FOUND;
690 else if (RT_SUCCESS(rc))
691 RTThreadSleep(1);
692 }
693#endif
694 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
695 if (RT_SUCCESS(rc2))
696 {
697 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
698 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
699 }
700 else
701 {
702 /*
703 * Wait for a bit while the host (hopefully) completes it.
704 */
705 uint64_t u64Start = RTTimeSystemMilliTS();
706 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
707 uint64_t cElapsed = 0;
708 if (rc2 != VERR_NOT_FOUND)
709 {
710 static unsigned s_cErrors = 0;
711 if (s_cErrors++ < 32)
712 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
713 }
714 else
715 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
716
717 do
718 {
719 ASMCompilerBarrier(); /* paranoia */
720 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
721 break;
722 RTThreadSleep(1);
723 cElapsed = RTTimeSystemMilliTS() - u64Start;
724 } while (cElapsed < cMilliesToWait);
725
726 ASMCompilerBarrier(); /* paranoia^2 */
727 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
728 rc = VINF_SUCCESS;
729 else
730 {
731 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
732 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
733 *pfLeakIt = true;
734 }
735 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
736 }
737 }
738 }
739
740 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
741 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
742 return rc;
743}
744
745
746/**
747 * Copies the result of the call back to the caller info structure and user
748 * buffers (if using bounce buffers).
749 *
750 * @returns rc, unless RTR0MemUserCopyTo fails.
751 * @param pCallInfo Call info structure to update.
752 * @param pHGCMCall HGCM call request.
753 * @param pParmInfo Parameter locking/buffering info.
754 * @param fIsUser Is it a user (true) or kernel request.
755 * @param rc The current result code. Passed along to
756 * preserve informational status codes.
757 */
6d209b23 758static int vbglR0HGCMInternalCopyBackResult(PVBGLIOCHGCMCALL pCallInfo, VMMDevHGCMCall const *pHGCMCall,
056a1eb7
SF
759 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
760{
761 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
6d209b23
SF
762 HGCMFunctionParameter *pDstParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
763 uint32_t const cParms = pCallInfo->cParms;
056a1eb7
SF
764#ifdef USE_BOUNCE_BUFFERS
765 uint32_t iLockBuf = 0;
766#endif
767 uint32_t iParm;
768 RT_NOREF1(pParmInfo);
769#ifndef USE_BOUNCE_BUFFERS
770 RT_NOREF1(fIsUser);
771#endif
772
773 /*
774 * The call result.
775 */
6d209b23 776 pCallInfo->Hdr.rc = pHGCMCall->header.result;
056a1eb7
SF
777
778 /*
779 * Copy back parameters.
780 */
781 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
782 {
783 switch (pDstParm->type)
784 {
785 case VMMDevHGCMParmType_32bit:
786 case VMMDevHGCMParmType_64bit:
787 *pDstParm = *pSrcParm;
788 break;
789
790 case VMMDevHGCMParmType_PageList:
791 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
792 break;
793
794 case VMMDevHGCMParmType_LinAddr_Locked_In:
795 case VMMDevHGCMParmType_LinAddr_In:
796#ifdef USE_BOUNCE_BUFFERS
797 if ( fIsUser
798 && iLockBuf < pParmInfo->cLockBufs
799 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
800 iLockBuf++;
801#endif
802 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
803 break;
804
805 case VMMDevHGCMParmType_LinAddr_Locked_Out:
806 case VMMDevHGCMParmType_LinAddr_Locked:
807 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
808 {
809 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
810 break;
811 }
6d209b23 812 RT_FALL_THRU();
056a1eb7
SF
813
814 case VMMDevHGCMParmType_LinAddr_Out:
815 case VMMDevHGCMParmType_LinAddr:
816 {
817#ifdef USE_BOUNCE_BUFFERS
818 if (fIsUser)
819 {
820 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
821 if (cbOut)
822 {
823 int rc2;
824 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
825 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
826 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
827 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
828 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
829 cbOut);
830 if (RT_FAILURE(rc2))
831 return rc2;
832 iLockBuf++;
833 }
834 else if ( iLockBuf < pParmInfo->cLockBufs
835 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
836 iLockBuf++;
837 }
838#endif
839 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
840 break;
841 }
842
843 default:
844 AssertFailed();
845 rc = VERR_INTERNAL_ERROR_4;
846 break;
847 }
848 }
849
850#ifdef USE_BOUNCE_BUFFERS
851 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
852#endif
853 return rc;
854}
855
856
6d209b23 857DECLR0VBGL(int) VbglR0HGCMInternalCall(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
056a1eb7
SF
858 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
859{
860 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
861 struct VbglR0ParmInfo ParmInfo;
862 size_t cbExtra;
863 int rc;
864
865 /*
866 * Basic validation.
867 */
868 AssertMsgReturn( !pCallInfo
869 || !pfnAsyncCallback
870 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
871 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
872 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
873 VERR_INVALID_PARAMETER);
6d209b23 874 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
056a1eb7
SF
875 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
876 VERR_INVALID_PARAMETER);
877
878 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
879 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
880
881 /*
882 * Validate, lock and buffer the parameters for the call.
883 * This will calculate the amount of extra space for physical page list.
884 */
885 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
886 if (RT_SUCCESS(rc))
887 {
888 /*
889 * Allocate the request buffer and recreate the call request.
890 */
891 VMMDevHGCMCall *pHGCMCall;
6d209b23 892 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMCall,
26894aac
SF
893 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
894 VMMDevReq_HGCMCall);
056a1eb7
SF
895 if (RT_SUCCESS(rc))
896 {
897 bool fLeakIt;
898 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fIsUser, &ParmInfo);
899
900 /*
901 * Perform the call.
902 */
903 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
904 if (RT_SUCCESS(rc))
905 {
906 /*
907 * Copy back the result (parameters and buffers that changed).
908 */
909 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
910 }
911 else
912 {
913 if ( rc != VERR_INTERRUPTED
914 && rc != VERR_TIMEOUT)
915 {
916 static unsigned s_cErrors = 0;
917 if (s_cErrors++ < 32)
918 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
919 }
920 }
921
922 if (!fLeakIt)
6d209b23 923 VbglR0GRFree(&pHGCMCall->header.header);
056a1eb7
SF
924 }
925 }
926 else
927 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
928
929 /*
930 * Release locks and free bounce buffers.
931 */
932 if (ParmInfo.cLockBufs)
933 while (ParmInfo.cLockBufs-- > 0)
934 {
935 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
936#ifdef USE_BOUNCE_BUFFERS
937 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
938#endif
939 }
940
941 return rc;
942}
943
944
945#if ARCH_BITS == 64
6d209b23 946DECLR0VBGL(int) VbglR0HGCMInternalCall32(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
056a1eb7
SF
947 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
948{
6d209b23 949 PVBGLIOCHGCMCALL pCallInfo64 = NULL;
056a1eb7
SF
950 HGCMFunctionParameter *pParm64 = NULL;
951 HGCMFunctionParameter32 *pParm32 = NULL;
952 uint32_t cParms = 0;
953 uint32_t iParm = 0;
954 int rc = VINF_SUCCESS;
955
956 /*
957 * Input validation.
958 */
959 AssertMsgReturn( !pCallInfo
960 || !pfnAsyncCallback
961 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
962 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
963 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
964 VERR_INVALID_PARAMETER);
6d209b23 965 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
056a1eb7
SF
966 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
967 VERR_INVALID_PARAMETER);
968
969 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
970#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
971 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
972#endif
973
974 cParms = pCallInfo->cParms;
975 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
976
977 /*
978 * The simple approach, allocate a temporary request and convert the parameters.
979 */
6d209b23 980 pCallInfo64 = (PVBGLIOCHGCMCALL)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
056a1eb7
SF
981 if (!pCallInfo64)
982 return VERR_NO_TMP_MEMORY;
983
984 *pCallInfo64 = *pCallInfo;
6d209b23
SF
985 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
986 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
056a1eb7
SF
987 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
988 {
989 switch (pParm32->type)
990 {
991 case VMMDevHGCMParmType_32bit:
992 pParm64->type = VMMDevHGCMParmType_32bit;
993 pParm64->u.value32 = pParm32->u.value32;
994 break;
995
996 case VMMDevHGCMParmType_64bit:
997 pParm64->type = VMMDevHGCMParmType_64bit;
998 pParm64->u.value64 = pParm32->u.value64;
999 break;
1000
1001 case VMMDevHGCMParmType_LinAddr_Out:
1002 case VMMDevHGCMParmType_LinAddr:
1003 case VMMDevHGCMParmType_LinAddr_In:
1004 pParm64->type = pParm32->type;
1005 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1006 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1007 break;
1008
1009 default:
1010 rc = VERR_INVALID_PARAMETER;
1011 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1012 break;
1013 }
1014 if (RT_FAILURE(rc))
1015 break;
1016 }
1017 if (RT_SUCCESS(rc))
1018 {
1019 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1020 pfnAsyncCallback, pvAsyncData, u32AsyncData);
1021
1022 if (RT_SUCCESS(rc))
1023 {
1024 *pCallInfo = *pCallInfo64;
1025
1026 /*
1027 * Copy back.
1028 */
6d209b23
SF
1029 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1030 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
056a1eb7
SF
1031 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1032 {
1033 switch (pParm64->type)
1034 {
1035 case VMMDevHGCMParmType_32bit:
1036 pParm32->u.value32 = pParm64->u.value32;
1037 break;
1038
1039 case VMMDevHGCMParmType_64bit:
1040 pParm32->u.value64 = pParm64->u.value64;
1041 break;
1042
1043 case VMMDevHGCMParmType_LinAddr_Out:
1044 case VMMDevHGCMParmType_LinAddr:
1045 case VMMDevHGCMParmType_LinAddr_In:
1046 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1047 break;
1048
1049 default:
1050 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1051 rc = VERR_INTERNAL_ERROR_3;
1052 break;
1053 }
1054 }
1055 }
1056 else
1057 {
1058 static unsigned s_cErrors = 0;
1059 if (s_cErrors++ < 32)
1060 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1061 }
1062 }
1063 else
1064 {
1065 static unsigned s_cErrors = 0;
1066 if (s_cErrors++ < 32)
1067 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1068 }
1069
1070 RTMemTmpFree(pCallInfo64);
1071 return rc;
1072}
1073#endif /* ARCH_BITS == 64 */
1074