]> git.proxmox.com Git - mirror_edk2.git/blobdiff - OvmfPkg/VirtioGpuDxe/Commands.c
UefiCpuPkg/MpInitLib: Remove Executable attribute from MpLib.h
[mirror_edk2.git] / OvmfPkg / VirtioGpuDxe / Commands.c
index 804de950ff24dbf6f386dc8c1aa590608381ffdd..1645100a05d539e84018bb2403a1e82d3a8cb210 100644 (file)
@@ -3,18 +3,12 @@
   VirtIo GPU initialization, and commands (primitives) for the GPU device.\r
 \r
   Copyright (C) 2016, Red Hat, Inc.\r
+  Copyright (c) 2017, AMD Inc, All rights reserved.<BR>\r
 \r
-  This program and the accompanying materials are licensed and made available\r
-  under the terms and conditions of the BSD License which accompanies this\r
-  distribution. The full text of the license may be found at\r
-  http://opensource.org/licenses/bsd-license.php\r
-\r
-  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT\r
-  WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+  SPDX-License-Identifier: BSD-2-Clause-Patent\r
 \r
 **/\r
 \r
-#include <IndustryStandard/VirtioGpu.h>\r
 #include <Library/VirtioLib.h>\r
 \r
 #include "VirtioGpu.h"\r
@@ -44,6 +38,7 @@ VirtioGpuInit (
   EFI_STATUS Status;\r
   UINT64     Features;\r
   UINT16     QueueSize;\r
+  UINT64     RingBaseShift;\r
 \r
   //\r
   // Execute virtio-v1.0-cs04, 3.1.1 Driver Requirements: Device\r
@@ -89,7 +84,7 @@ VirtioGpuInit (
   //\r
   // We only want the most basic 2D features.\r
   //\r
-  Features &= VIRTIO_F_VERSION_1;\r
+  Features &= VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM;\r
 \r
   //\r
   // ... and write the subset of feature bits understood by the [...] driver to\r
@@ -128,14 +123,33 @@ VirtioGpuInit (
   //\r
   // [...] population of virtqueues [...]\r
   //\r
-  Status = VirtioRingInit (QueueSize, &VgpuDev->Ring);\r
+  Status = VirtioRingInit (VgpuDev->VirtIo, QueueSize, &VgpuDev->Ring);\r
   if (EFI_ERROR (Status)) {\r
     goto Failed;\r
   }\r
-  Status = VgpuDev->VirtIo->SetQueueAddress (VgpuDev->VirtIo, &VgpuDev->Ring);\r
+  //\r
+  // If anything fails from here on, we have to release the ring.\r
+  //\r
+  Status = VirtioRingMap (\r
+             VgpuDev->VirtIo,\r
+             &VgpuDev->Ring,\r
+             &RingBaseShift,\r
+             &VgpuDev->RingMap\r
+             );\r
   if (EFI_ERROR (Status)) {\r
     goto ReleaseQueue;\r
   }\r
+  //\r
+  // If anything fails from here on, we have to unmap the ring.\r
+  //\r
+  Status = VgpuDev->VirtIo->SetQueueAddress (\r
+                              VgpuDev->VirtIo,\r
+                              &VgpuDev->Ring,\r
+                              RingBaseShift\r
+                              );\r
+  if (EFI_ERROR (Status)) {\r
+    goto UnmapQueue;\r
+  }\r
 \r
   //\r
   // 8. Set the DRIVER_OK status bit.\r
@@ -143,13 +157,16 @@ VirtioGpuInit (
   NextDevStat |= VSTAT_DRIVER_OK;\r
   Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
   if (EFI_ERROR (Status)) {\r
-    goto ReleaseQueue;\r
+    goto UnmapQueue;\r
   }\r
 \r
   return EFI_SUCCESS;\r
 \r
+UnmapQueue:\r
+  VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
+\r
 ReleaseQueue:\r
-  VirtioRingUninit (&VgpuDev->Ring);\r
+  VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);\r
 \r
 Failed:\r
   //\r
@@ -184,7 +201,128 @@ VirtioGpuUninit (
   // configuration.\r
   //\r
   VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
-  VirtioRingUninit (&VgpuDev->Ring);\r
+  VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
+  VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);\r
+}\r
+\r
+/**\r
+  Allocate, zero and map memory, for bus master common buffer operation, to be\r
+  attached as backing store to a host-side VirtIo GPU resource.\r
+\r
+  @param[in]  VgpuDev        The VGPU_DEV object that represents the VirtIo GPU\r
+                             device.\r
+\r
+  @param[in]  NumberOfPages  The number of whole pages to allocate and map.\r
+\r
+  @param[out] HostAddress    The system memory address of the allocated area.\r
+\r
+  @param[out] DeviceAddress  The bus master device address of the allocated\r
+                             area. The VirtIo GPU device may be programmed to\r
+                             access the allocated area through DeviceAddress;\r
+                             DeviceAddress is to be passed to the\r
+                             VirtioGpuResourceAttachBacking() function, as the\r
+                             BackingStoreDeviceAddress parameter.\r
+\r
+  @param[out] Mapping        A resulting token to pass to\r
+                             VirtioGpuUnmapAndFreeBackingStore().\r
+\r
+  @retval EFI_SUCCESS  The requested number of pages has been allocated, zeroed\r
+                       and mapped.\r
+\r
+  @return              Status codes propagated from\r
+                       VgpuDev->VirtIo->AllocateSharedPages() and\r
+                       VirtioMapAllBytesInSharedBuffer().\r
+**/\r
+EFI_STATUS\r
+VirtioGpuAllocateZeroAndMapBackingStore (\r
+  IN  VGPU_DEV             *VgpuDev,\r
+  IN  UINTN                NumberOfPages,\r
+  OUT VOID                 **HostAddress,\r
+  OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,\r
+  OUT VOID                 **Mapping\r
+  )\r
+{\r
+  EFI_STATUS Status;\r
+  VOID       *NewHostAddress;\r
+\r
+  Status = VgpuDev->VirtIo->AllocateSharedPages (\r
+                              VgpuDev->VirtIo,\r
+                              NumberOfPages,\r
+                              &NewHostAddress\r
+                              );\r
+  if (EFI_ERROR (Status)) {\r
+    return Status;\r
+  }\r
+\r
+  //\r
+  // Avoid exposing stale data to the device even temporarily: zero the area\r
+  // before mapping it.\r
+  //\r
+  ZeroMem (NewHostAddress, EFI_PAGES_TO_SIZE (NumberOfPages));\r
+\r
+  Status = VirtioMapAllBytesInSharedBuffer (\r
+             VgpuDev->VirtIo,                      // VirtIo\r
+             VirtioOperationBusMasterCommonBuffer, // Operation\r
+             NewHostAddress,                       // HostAddress\r
+             EFI_PAGES_TO_SIZE (NumberOfPages),    // NumberOfBytes\r
+             DeviceAddress,                        // DeviceAddress\r
+             Mapping                               // Mapping\r
+             );\r
+  if (EFI_ERROR (Status)) {\r
+    goto FreeSharedPages;\r
+  }\r
+\r
+  *HostAddress = NewHostAddress;\r
+  return EFI_SUCCESS;\r
+\r
+FreeSharedPages:\r
+  VgpuDev->VirtIo->FreeSharedPages (\r
+                     VgpuDev->VirtIo,\r
+                     NumberOfPages,\r
+                     NewHostAddress\r
+                     );\r
+  return Status;\r
+}\r
+\r
+/**\r
+  Unmap and free memory originally allocated and mapped with\r
+  VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+  If the memory allocated and mapped with\r
+  VirtioGpuAllocateZeroAndMapBackingStore() was attached to a host-side VirtIo\r
+  GPU resource with VirtioGpuResourceAttachBacking(), then the caller is\r
+  responsible for detaching the backing store from the same resource, with\r
+  VirtioGpuResourceDetachBacking(), before calling this function.\r
+\r
+  @param[in] VgpuDev        The VGPU_DEV object that represents the VirtIo GPU\r
+                            device.\r
+\r
+  @param[in] NumberOfPages  The NumberOfPages parameter originally passed to\r
+                            VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+  @param[in] HostAddress    The HostAddress value originally output by\r
+                            VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+  @param[in] Mapping        The token that was originally output by\r
+                            VirtioGpuAllocateZeroAndMapBackingStore().\r
+**/\r
+VOID\r
+VirtioGpuUnmapAndFreeBackingStore (\r
+  IN VGPU_DEV *VgpuDev,\r
+  IN UINTN    NumberOfPages,\r
+  IN VOID     *HostAddress,\r
+  IN VOID     *Mapping\r
+  )\r
+{\r
+  VgpuDev->VirtIo->UnmapSharedBuffer (\r
+                     VgpuDev->VirtIo,\r
+                     Mapping\r
+                     );\r
+  VgpuDev->VirtIo->FreeSharedPages (\r
+                     VgpuDev->VirtIo,\r
+                     NumberOfPages,\r
+                     HostAddress\r
+                     );\r
 }\r
 \r
 /**\r
@@ -209,6 +347,421 @@ VirtioGpuExitBoot (
 {\r
   VGPU_DEV *VgpuDev;\r
 \r
+  DEBUG ((DEBUG_VERBOSE, "%a: Context=0x%p\n", __FUNCTION__, Context));\r
   VgpuDev = Context;\r
   VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
 }\r
+\r
+/**\r
+  Internal utility function that sends a request to the VirtIo GPU device\r
+  model, awaits the answer from the host, and returns a status.\r
+\r
+  @param[in,out] VgpuDev  The VGPU_DEV object that represents the VirtIo GPU\r
+                          device. The caller is responsible to have\r
+                          successfully invoked VirtioGpuInit() on VgpuDev\r
+                          previously, while VirtioGpuUninit() must not have\r
+                          been called on VgpuDev.\r
+\r
+  @param[in] RequestType  The type of the request. The caller is responsible\r
+                          for providing a VirtioGpuCmd* RequestType which, on\r
+                          success, elicits a VirtioGpuRespOkNodata response\r
+                          from the host.\r
+\r
+  @param[in] Fence        Whether to enable fencing for this request. Fencing\r
+                          forces the host to complete the command before\r
+                          producing a response. If Fence is TRUE, then\r
+                          VgpuDev->FenceId is consumed, and incremented.\r
+\r
+  @param[in,out] Header   Pointer to the caller-allocated request object. The\r
+                          request must start with VIRTIO_GPU_CONTROL_HEADER.\r
+                          This function overwrites all fields of Header before\r
+                          submitting the request to the host:\r
+\r
+                          - it sets Type from RequestType,\r
+\r
+                          - it sets Flags and FenceId based on Fence,\r
+\r
+                          - it zeroes CtxId and Padding.\r
+\r
+  @param[in] RequestSize  Size of the entire caller-allocated request object,\r
+                          including the leading VIRTIO_GPU_CONTROL_HEADER.\r
+\r
+  @retval EFI_SUCCESS            Operation successful.\r
+\r
+  @retval EFI_DEVICE_ERROR       The host rejected the request. The host error\r
+                                 code has been logged on the DEBUG_ERROR level.\r
+\r
+  @return                        Codes for unexpected errors in VirtIo\r
+                                 messaging, or request/response\r
+                                 mapping/unmapping.\r
+**/\r
+STATIC\r
+EFI_STATUS\r
+VirtioGpuSendCommand (\r
+  IN OUT VGPU_DEV                           *VgpuDev,\r
+  IN     VIRTIO_GPU_CONTROL_TYPE            RequestType,\r
+  IN     BOOLEAN                            Fence,\r
+  IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
+  IN     UINTN                              RequestSize\r
+  )\r
+{\r
+  DESC_INDICES                       Indices;\r
+  volatile VIRTIO_GPU_CONTROL_HEADER Response;\r
+  EFI_STATUS                         Status;\r
+  UINT32                             ResponseSize;\r
+  EFI_PHYSICAL_ADDRESS               RequestDeviceAddress;\r
+  VOID                               *RequestMap;\r
+  EFI_PHYSICAL_ADDRESS               ResponseDeviceAddress;\r
+  VOID                               *ResponseMap;\r
+\r
+  //\r
+  // Initialize Header.\r
+  //\r
+  Header->Type      = RequestType;\r
+  if (Fence) {\r
+    Header->Flags   = VIRTIO_GPU_FLAG_FENCE;\r
+    Header->FenceId = VgpuDev->FenceId++;\r
+  } else {\r
+    Header->Flags   = 0;\r
+    Header->FenceId = 0;\r
+  }\r
+  Header->CtxId     = 0;\r
+  Header->Padding   = 0;\r
+\r
+  ASSERT (RequestSize >= sizeof *Header);\r
+  ASSERT (RequestSize <= MAX_UINT32);\r
+\r
+  //\r
+  // Map request and response to bus master device addresses.\r
+  //\r
+  Status = VirtioMapAllBytesInSharedBuffer (\r
+             VgpuDev->VirtIo,\r
+             VirtioOperationBusMasterRead,\r
+             (VOID *)Header,\r
+             RequestSize,\r
+             &RequestDeviceAddress,\r
+             &RequestMap\r
+             );\r
+  if (EFI_ERROR (Status)) {\r
+    return Status;\r
+  }\r
+  Status = VirtioMapAllBytesInSharedBuffer (\r
+             VgpuDev->VirtIo,\r
+             VirtioOperationBusMasterWrite,\r
+             (VOID *)&Response,\r
+             sizeof Response,\r
+             &ResponseDeviceAddress,\r
+             &ResponseMap\r
+             );\r
+  if (EFI_ERROR (Status)) {\r
+    goto UnmapRequest;\r
+  }\r
+\r
+  //\r
+  // Compose the descriptor chain.\r
+  //\r
+  VirtioPrepare (&VgpuDev->Ring, &Indices);\r
+  VirtioAppendDesc (\r
+    &VgpuDev->Ring,\r
+    RequestDeviceAddress,\r
+    (UINT32)RequestSize,\r
+    VRING_DESC_F_NEXT,\r
+    &Indices\r
+    );\r
+  VirtioAppendDesc (\r
+    &VgpuDev->Ring,\r
+    ResponseDeviceAddress,\r
+    (UINT32)sizeof Response,\r
+    VRING_DESC_F_WRITE,\r
+    &Indices\r
+    );\r
+\r
+  //\r
+  // Send the command.\r
+  //\r
+  Status = VirtioFlush (VgpuDev->VirtIo, VIRTIO_GPU_CONTROL_QUEUE,\r
+             &VgpuDev->Ring, &Indices, &ResponseSize);\r
+  if (EFI_ERROR (Status)) {\r
+    goto UnmapResponse;\r
+  }\r
+\r
+  //\r
+  // Verify response size.\r
+  //\r
+  if (ResponseSize != sizeof Response) {\r
+    DEBUG ((DEBUG_ERROR, "%a: malformed response to Request=0x%x\n",\r
+      __FUNCTION__, (UINT32)RequestType));\r
+    Status = EFI_PROTOCOL_ERROR;\r
+    goto UnmapResponse;\r
+  }\r
+\r
+  //\r
+  // Unmap response and request, in reverse order of mapping. On error, the\r
+  // respective mapping is invalidated anyway, only the data may not have been\r
+  // committed to system memory (in case of VirtioOperationBusMasterWrite).\r
+  //\r
+  Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
+  if (EFI_ERROR (Status)) {\r
+    goto UnmapRequest;\r
+  }\r
+  Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
+  if (EFI_ERROR (Status)) {\r
+    return Status;\r
+  }\r
+\r
+  //\r
+  // Parse the response.\r
+  //\r
+  if (Response.Type == VirtioGpuRespOkNodata) {\r
+    return EFI_SUCCESS;\r
+  }\r
+\r
+  DEBUG ((DEBUG_ERROR, "%a: Request=0x%x Response=0x%x\n", __FUNCTION__,\r
+    (UINT32)RequestType, Response.Type));\r
+  return EFI_DEVICE_ERROR;\r
+\r
+UnmapResponse:\r
+  VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
+\r
+UnmapRequest:\r
+  VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
+\r
+  return Status;\r
+}\r
+\r
+/**\r
+  The following functions send requests to the VirtIo GPU device model, await\r
+  the answer from the host, and return a status. They share the following\r
+  interface details:\r
+\r
+  @param[in,out] VgpuDev  The VGPU_DEV object that represents the VirtIo GPU\r
+                          device. The caller is responsible to have\r
+                          successfully invoked VirtioGpuInit() on VgpuDev\r
+                          previously, while VirtioGpuUninit() must not have\r
+                          been called on VgpuDev.\r
+\r
+  @retval EFI_INVALID_PARAMETER  Invalid command-specific parameters were\r
+                                 detected by this driver.\r
+\r
+  @retval EFI_SUCCESS            Operation successful.\r
+\r
+  @retval EFI_DEVICE_ERROR       The host rejected the request. The host error\r
+                                 code has been logged on the DEBUG_ERROR level.\r
+\r
+  @return                        Codes for unexpected errors in VirtIo\r
+                                 messaging.\r
+\r
+  For the command-specific parameters, please consult the GPU Device section of\r
+  the VirtIo 1.0 specification (see references in\r
+  "OvmfPkg/Include/IndustryStandard/VirtioGpu.h").\r
+**/\r
+EFI_STATUS\r
+VirtioGpuResourceCreate2d (\r
+  IN OUT VGPU_DEV           *VgpuDev,\r
+  IN     UINT32             ResourceId,\r
+  IN     VIRTIO_GPU_FORMATS Format,\r
+  IN     UINT32             Width,\r
+  IN     UINT32             Height\r
+  )\r
+{\r
+  volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;\r
+\r
+  if (ResourceId == 0) {\r
+    return EFI_INVALID_PARAMETER;\r
+  }\r
+\r
+  Request.ResourceId = ResourceId;\r
+  Request.Format     = (UINT32)Format;\r
+  Request.Width      = Width;\r
+  Request.Height     = Height;\r
+\r
+  return VirtioGpuSendCommand (\r
+           VgpuDev,\r
+           VirtioGpuCmdResourceCreate2d,\r
+           FALSE,                        // Fence\r
+           &Request.Header,\r
+           sizeof Request\r
+           );\r
+}\r
+\r
+EFI_STATUS\r
+VirtioGpuResourceUnref (\r
+  IN OUT VGPU_DEV *VgpuDev,\r
+  IN     UINT32   ResourceId\r
+  )\r
+{\r
+  volatile VIRTIO_GPU_RESOURCE_UNREF Request;\r
+\r
+  if (ResourceId == 0) {\r
+    return EFI_INVALID_PARAMETER;\r
+  }\r
+\r
+  Request.ResourceId = ResourceId;\r
+  Request.Padding    = 0;\r
+\r
+  return VirtioGpuSendCommand (\r
+           VgpuDev,\r
+           VirtioGpuCmdResourceUnref,\r
+           FALSE,                     // Fence\r
+           &Request.Header,\r
+           sizeof Request\r
+           );\r
+}\r
+\r
+EFI_STATUS\r
+VirtioGpuResourceAttachBacking (\r
+  IN OUT VGPU_DEV             *VgpuDev,\r
+  IN     UINT32               ResourceId,\r
+  IN     EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,\r
+  IN     UINTN                NumberOfPages\r
+  )\r
+{\r
+  volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;\r
+\r
+  if (ResourceId == 0) {\r
+    return EFI_INVALID_PARAMETER;\r
+  }\r
+\r
+  Request.ResourceId    = ResourceId;\r
+  Request.NrEntries     = 1;\r
+  Request.Entry.Addr    = BackingStoreDeviceAddress;\r
+  Request.Entry.Length  = (UINT32)EFI_PAGES_TO_SIZE (NumberOfPages);\r
+  Request.Entry.Padding = 0;\r
+\r
+  return VirtioGpuSendCommand (\r
+           VgpuDev,\r
+           VirtioGpuCmdResourceAttachBacking,\r
+           FALSE,                             // Fence\r
+           &Request.Header,\r
+           sizeof Request\r
+           );\r
+}\r
+\r
+EFI_STATUS\r
+VirtioGpuResourceDetachBacking (\r
+  IN OUT VGPU_DEV *VgpuDev,\r
+  IN     UINT32   ResourceId\r
+  )\r
+{\r
+  volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;\r
+\r
+  if (ResourceId == 0) {\r
+    return EFI_INVALID_PARAMETER;\r
+  }\r
+\r
+  Request.ResourceId = ResourceId;\r
+  Request.Padding    = 0;\r
+\r
+  //\r
+  // In this case, we set Fence to TRUE, because after this function returns,\r
+  // the caller might reasonably want to repurpose the backing pages\r
+  // immediately. Thus we should ensure that the host releases all references\r
+  // to the backing pages before we return.\r
+  //\r
+  return VirtioGpuSendCommand (\r
+           VgpuDev,\r
+           VirtioGpuCmdResourceDetachBacking,\r
+           TRUE,                              // Fence\r
+           &Request.Header,\r
+           sizeof Request\r
+           );\r
+}\r
+\r
+EFI_STATUS\r
+VirtioGpuSetScanout (\r
+  IN OUT VGPU_DEV *VgpuDev,\r
+  IN     UINT32   X,\r
+  IN     UINT32   Y,\r
+  IN     UINT32   Width,\r
+  IN     UINT32   Height,\r
+  IN     UINT32   ScanoutId,\r
+  IN     UINT32   ResourceId\r
+  )\r
+{\r
+  volatile VIRTIO_GPU_SET_SCANOUT Request;\r
+\r
+  //\r
+  // Unlike for most other commands, ResourceId=0 is valid; it\r
+  // is used to disable a scanout.\r
+  //\r
+  Request.Rectangle.X      = X;\r
+  Request.Rectangle.Y      = Y;\r
+  Request.Rectangle.Width  = Width;\r
+  Request.Rectangle.Height = Height;\r
+  Request.ScanoutId        = ScanoutId;\r
+  Request.ResourceId       = ResourceId;\r
+\r
+  return VirtioGpuSendCommand (\r
+           VgpuDev,\r
+           VirtioGpuCmdSetScanout,\r
+           FALSE,                  // Fence\r
+           &Request.Header,\r
+           sizeof Request\r
+           );\r
+}\r
+\r
+EFI_STATUS\r
+VirtioGpuTransferToHost2d (\r
+  IN OUT VGPU_DEV *VgpuDev,\r
+  IN     UINT32   X,\r
+  IN     UINT32   Y,\r
+  IN     UINT32   Width,\r
+  IN     UINT32   Height,\r
+  IN     UINT64   Offset,\r
+  IN     UINT32   ResourceId\r
+  )\r
+{\r
+  volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;\r
+\r
+  if (ResourceId == 0) {\r
+    return EFI_INVALID_PARAMETER;\r
+  }\r
+\r
+  Request.Rectangle.X      = X;\r
+  Request.Rectangle.Y      = Y;\r
+  Request.Rectangle.Width  = Width;\r
+  Request.Rectangle.Height = Height;\r
+  Request.Offset           = Offset;\r
+  Request.ResourceId       = ResourceId;\r
+  Request.Padding          = 0;\r
+\r
+  return VirtioGpuSendCommand (\r
+           VgpuDev,\r
+           VirtioGpuCmdTransferToHost2d,\r
+           FALSE,                        // Fence\r
+           &Request.Header,\r
+           sizeof Request\r
+           );\r
+}\r
+\r
+EFI_STATUS\r
+VirtioGpuResourceFlush (\r
+  IN OUT VGPU_DEV *VgpuDev,\r
+  IN     UINT32   X,\r
+  IN     UINT32   Y,\r
+  IN     UINT32   Width,\r
+  IN     UINT32   Height,\r
+  IN     UINT32   ResourceId\r
+  )\r
+{\r
+  volatile VIRTIO_GPU_RESOURCE_FLUSH Request;\r
+\r
+  if (ResourceId == 0) {\r
+    return EFI_INVALID_PARAMETER;\r
+  }\r
+\r
+  Request.Rectangle.X      = X;\r
+  Request.Rectangle.Y      = Y;\r
+  Request.Rectangle.Width  = Width;\r
+  Request.Rectangle.Height = Height;\r
+  Request.ResourceId       = ResourceId;\r
+  Request.Padding          = 0;\r
+\r
+  return VirtioGpuSendCommand (\r
+           VgpuDev,\r
+           VirtioGpuCmdResourceFlush,\r
+           FALSE,                     // Fence\r
+           &Request.Header,\r
+           sizeof Request\r
+           );\r
+}\r