Copyright (C) 2016, Red Hat, Inc.\r
Copyright (c) 2017, AMD Inc, All rights reserved.<BR>\r
\r
- This program and the accompanying materials are licensed and made available\r
- under the terms and conditions of the BSD License which accompanies this\r
- distribution. The full text of the license may be found at\r
- http://opensource.org/licenses/bsd-license.php\r
-\r
- THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT\r
- WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+ SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
**/\r
EFI_STATUS\r
VirtioGpuInit (\r
- IN OUT VGPU_DEV *VgpuDev\r
+ IN OUT VGPU_DEV *VgpuDev\r
)\r
{\r
- UINT8 NextDevStat;\r
- EFI_STATUS Status;\r
- UINT64 Features;\r
- UINT16 QueueSize;\r
- UINT64 RingBaseShift;\r
+ UINT8 NextDevStat;\r
+ EFI_STATUS Status;\r
+ UINT64 Features;\r
+ UINT16 QueueSize;\r
+ UINT64 RingBaseShift;\r
\r
//\r
// Execute virtio-v1.0-cs04, 3.1.1 Driver Requirements: Device\r
// 1. Reset the device.\r
//\r
NextDevStat = 0;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
// 2. Set the ACKNOWLEDGE status bit [...]\r
//\r
NextDevStat |= VSTAT_ACK;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
// 3. Set the DRIVER status bit [...]\r
//\r
NextDevStat |= VSTAT_DRIVER;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
+\r
if ((Features & VIRTIO_F_VERSION_1) == 0) {\r
Status = EFI_UNSUPPORTED;\r
goto Failed;\r
}\r
+\r
//\r
// We only want the most basic 2D features.\r
//\r
- Features &= VIRTIO_F_VERSION_1;\r
+ Features &= VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM;\r
\r
//\r
// ... and write the subset of feature bits understood by the [...] driver to\r
// 7. Perform device-specific setup, including discovery of virtqueues for\r
// the device [...]\r
//\r
- Status = VgpuDev->VirtIo->SetQueueSel (VgpuDev->VirtIo,\r
- VIRTIO_GPU_CONTROL_QUEUE);\r
+ Status = VgpuDev->VirtIo->SetQueueSel (\r
+ VgpuDev->VirtIo,\r
+ VIRTIO_GPU_CONTROL_QUEUE\r
+ );\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
+\r
Status = VgpuDev->VirtIo->GetQueueNumMax (VgpuDev->VirtIo, &QueueSize);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
+\r
//\r
// If anything fails from here on, we have to release the ring.\r
//\r
if (EFI_ERROR (Status)) {\r
goto ReleaseQueue;\r
}\r
+\r
//\r
// If anything fails from here on, we have to unmap the ring.\r
//\r
// 8. Set the DRIVER_OK status bit.\r
//\r
NextDevStat |= VSTAT_DRIVER_OK;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto UnmapQueue;\r
}\r
**/\r
VOID\r
VirtioGpuUninit (\r
- IN OUT VGPU_DEV *VgpuDev\r
+ IN OUT VGPU_DEV *VgpuDev\r
)\r
{\r
//\r
VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);\r
}\r
\r
+/**\r
+ Allocate, zero and map memory, for bus master common buffer operation, to be\r
+ attached as backing store to a host-side VirtIo GPU resource.\r
+\r
+ @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
+ device.\r
+\r
+ @param[in] NumberOfPages The number of whole pages to allocate and map.\r
+\r
+ @param[out] HostAddress The system memory address of the allocated area.\r
+\r
+ @param[out] DeviceAddress The bus master device address of the allocated\r
+ area. The VirtIo GPU device may be programmed to\r
+ access the allocated area through DeviceAddress;\r
+ DeviceAddress is to be passed to the\r
+ VirtioGpuResourceAttachBacking() function, as the\r
+ BackingStoreDeviceAddress parameter.\r
+\r
+ @param[out] Mapping A resulting token to pass to\r
+ VirtioGpuUnmapAndFreeBackingStore().\r
+\r
+ @retval EFI_SUCCESS The requested number of pages has been allocated, zeroed\r
+ and mapped.\r
+\r
+ @return Status codes propagated from\r
+ VgpuDev->VirtIo->AllocateSharedPages() and\r
+ VirtioMapAllBytesInSharedBuffer().\r
+**/\r
+EFI_STATUS\r
+VirtioGpuAllocateZeroAndMapBackingStore (\r
+ IN VGPU_DEV *VgpuDev,\r
+ IN UINTN NumberOfPages,\r
+ OUT VOID **HostAddress,\r
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,\r
+ OUT VOID **Mapping\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ VOID *NewHostAddress;\r
+\r
+ Status = VgpuDev->VirtIo->AllocateSharedPages (\r
+ VgpuDev->VirtIo,\r
+ NumberOfPages,\r
+ &NewHostAddress\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ //\r
+ // Avoid exposing stale data to the device even temporarily: zero the area\r
+ // before mapping it.\r
+ //\r
+ ZeroMem (NewHostAddress, EFI_PAGES_TO_SIZE (NumberOfPages));\r
+\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ VgpuDev->VirtIo, // VirtIo\r
+ VirtioOperationBusMasterCommonBuffer, // Operation\r
+ NewHostAddress, // HostAddress\r
+ EFI_PAGES_TO_SIZE (NumberOfPages), // NumberOfBytes\r
+ DeviceAddress, // DeviceAddress\r
+ Mapping // Mapping\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ goto FreeSharedPages;\r
+ }\r
+\r
+ *HostAddress = NewHostAddress;\r
+ return EFI_SUCCESS;\r
+\r
+FreeSharedPages:\r
+ VgpuDev->VirtIo->FreeSharedPages (\r
+ VgpuDev->VirtIo,\r
+ NumberOfPages,\r
+ NewHostAddress\r
+ );\r
+ return Status;\r
+}\r
+\r
+/**\r
+ Unmap and free memory originally allocated and mapped with\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+ If the memory allocated and mapped with\r
+ VirtioGpuAllocateZeroAndMapBackingStore() was attached to a host-side VirtIo\r
+ GPU resource with VirtioGpuResourceAttachBacking(), then the caller is\r
+ responsible for detaching the backing store from the same resource, with\r
+ VirtioGpuResourceDetachBacking(), before calling this function.\r
+\r
+ @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
+ device.\r
+\r
+ @param[in] NumberOfPages The NumberOfPages parameter originally passed to\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+ @param[in] HostAddress The HostAddress value originally output by\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+ @param[in] Mapping The token that was originally output by\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+**/\r
+VOID\r
+VirtioGpuUnmapAndFreeBackingStore (\r
+ IN VGPU_DEV *VgpuDev,\r
+ IN UINTN NumberOfPages,\r
+ IN VOID *HostAddress,\r
+ IN VOID *Mapping\r
+ )\r
+{\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (\r
+ VgpuDev->VirtIo,\r
+ Mapping\r
+ );\r
+ VgpuDev->VirtIo->FreeSharedPages (\r
+ VgpuDev->VirtIo,\r
+ NumberOfPages,\r
+ HostAddress\r
+ );\r
+}\r
+\r
/**\r
EFI_EVENT_NOTIFY function for the VGPU_DEV.ExitBoot event. It resets the\r
VirtIo device, causing it to release its resources and to forget its\r
VOID\r
EFIAPI\r
VirtioGpuExitBoot (\r
- IN EFI_EVENT Event,\r
- IN VOID *Context\r
+ IN EFI_EVENT Event,\r
+ IN VOID *Context\r
)\r
{\r
- VGPU_DEV *VgpuDev;\r
+ VGPU_DEV *VgpuDev;\r
\r
+ DEBUG ((DEBUG_VERBOSE, "%a: Context=0x%p\n", __FUNCTION__, Context));\r
VgpuDev = Context;\r
VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
- VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
}\r
\r
/**\r
@param[in] RequestSize Size of the entire caller-allocated request object,\r
including the leading VIRTIO_GPU_CONTROL_HEADER.\r
\r
+ @param[in] ResponseType The type of the response (VirtioGpuResp*).\r
+\r
+ @param[in,out] Response Pointer to the caller-allocated response object. The\r
+ request must start with VIRTIO_GPU_CONTROL_HEADER.\r
+\r
+ @param[in] ResponseSize Size of the entire caller-allocated response object,\r
+ including the leading VIRTIO_GPU_CONTROL_HEADER.\r
+\r
@retval EFI_SUCCESS Operation successful.\r
\r
@retval EFI_DEVICE_ERROR The host rejected the request. The host error\r
- code has been logged on the EFI_D_ERROR level.\r
+ code has been logged on the DEBUG_ERROR level.\r
\r
@return Codes for unexpected errors in VirtIo\r
- messaging.\r
+ messaging, or request/response\r
+ mapping/unmapping.\r
**/\r
STATIC\r
EFI_STATUS\r
-VirtioGpuSendCommand (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
- IN BOOLEAN Fence,\r
- IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
- IN UINTN RequestSize\r
+VirtioGpuSendCommandWithReply (\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
+ IN BOOLEAN Fence,\r
+ IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
+ IN UINTN RequestSize,\r
+ IN VIRTIO_GPU_CONTROL_TYPE ResponseType,\r
+ IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Response,\r
+ IN UINTN ResponseSize\r
)\r
{\r
- DESC_INDICES Indices;\r
- volatile VIRTIO_GPU_CONTROL_HEADER Response;\r
- EFI_STATUS Status;\r
- UINT32 ResponseSize;\r
+ DESC_INDICES Indices;\r
+ EFI_STATUS Status;\r
+ UINT32 ResponseSizeRet;\r
+ EFI_PHYSICAL_ADDRESS RequestDeviceAddress;\r
+ VOID *RequestMap;\r
+ EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;\r
+ VOID *ResponseMap;\r
\r
//\r
// Initialize Header.\r
//\r
- Header->Type = RequestType;\r
+ Header->Type = RequestType;\r
if (Fence) {\r
Header->Flags = VIRTIO_GPU_FLAG_FENCE;\r
Header->FenceId = VgpuDev->FenceId++;\r
Header->Flags = 0;\r
Header->FenceId = 0;\r
}\r
- Header->CtxId = 0;\r
- Header->Padding = 0;\r
+\r
+ Header->CtxId = 0;\r
+ Header->Padding = 0;\r
\r
ASSERT (RequestSize >= sizeof *Header);\r
ASSERT (RequestSize <= MAX_UINT32);\r
\r
+ //\r
+ // Map request and response to bus master device addresses.\r
+ //\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ VgpuDev->VirtIo,\r
+ VirtioOperationBusMasterRead,\r
+ (VOID *)Header,\r
+ RequestSize,\r
+ &RequestDeviceAddress,\r
+ &RequestMap\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ VgpuDev->VirtIo,\r
+ VirtioOperationBusMasterWrite,\r
+ (VOID *)Response,\r
+ ResponseSize,\r
+ &ResponseDeviceAddress,\r
+ &ResponseMap\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ goto UnmapRequest;\r
+ }\r
+\r
//\r
// Compose the descriptor chain.\r
//\r
VirtioPrepare (&VgpuDev->Ring, &Indices);\r
- VirtioAppendDesc (&VgpuDev->Ring, (UINTN)Header, (UINT32)RequestSize,\r
- VRING_DESC_F_NEXT, &Indices);\r
- VirtioAppendDesc (&VgpuDev->Ring, (UINTN)&Response, sizeof Response,\r
- VRING_DESC_F_WRITE, &Indices);\r
+ VirtioAppendDesc (\r
+ &VgpuDev->Ring,\r
+ RequestDeviceAddress,\r
+ (UINT32)RequestSize,\r
+ VRING_DESC_F_NEXT,\r
+ &Indices\r
+ );\r
+ VirtioAppendDesc (\r
+ &VgpuDev->Ring,\r
+ ResponseDeviceAddress,\r
+ (UINT32)ResponseSize,\r
+ VRING_DESC_F_WRITE,\r
+ &Indices\r
+ );\r
\r
//\r
// Send the command.\r
//\r
- Status = VirtioFlush (VgpuDev->VirtIo, VIRTIO_GPU_CONTROL_QUEUE,\r
- &VgpuDev->Ring, &Indices, &ResponseSize);\r
+ Status = VirtioFlush (\r
+ VgpuDev->VirtIo,\r
+ VIRTIO_GPU_CONTROL_QUEUE,\r
+ &VgpuDev->Ring,\r
+ &Indices,\r
+ &ResponseSizeRet\r
+ );\r
if (EFI_ERROR (Status)) {\r
- return Status;\r
+ goto UnmapResponse;\r
}\r
\r
//\r
- // Parse the response.\r
+ // Verify response size.\r
+ //\r
+ if (ResponseSize != ResponseSizeRet) {\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "%a: malformed response to Request=0x%x\n",\r
+ __FUNCTION__,\r
+ (UINT32)RequestType\r
+ ));\r
+ Status = EFI_PROTOCOL_ERROR;\r
+ goto UnmapResponse;\r
+ }\r
+\r
+ //\r
+ // Unmap response and request, in reverse order of mapping. On error, the\r
+ // respective mapping is invalidated anyway, only the data may not have been\r
+ // committed to system memory (in case of VirtioOperationBusMasterWrite).\r
//\r
- if (ResponseSize != sizeof Response) {\r
- DEBUG ((EFI_D_ERROR, "%a: malformed response to Request=0x%x\n",\r
- __FUNCTION__, (UINT32)RequestType));\r
- return EFI_PROTOCOL_ERROR;\r
+ Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
+ if (EFI_ERROR (Status)) {\r
+ goto UnmapRequest;\r
+ }\r
+\r
+ Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
}\r
\r
- if (Response.Type == VirtioGpuRespOkNodata) {\r
+ //\r
+ // Parse the response.\r
+ //\r
+ if (Response->Type == (UINT32)ResponseType) {\r
return EFI_SUCCESS;\r
}\r
\r
- DEBUG ((EFI_D_ERROR, "%a: Request=0x%x Response=0x%x\n", __FUNCTION__,\r
- (UINT32)RequestType, Response.Type));\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "%a: Request=0x%x Response=0x%x (expected 0x%x)\n",\r
+ __FUNCTION__,\r
+ (UINT32)RequestType,\r
+ Response->Type,\r
+ ResponseType\r
+ ));\r
return EFI_DEVICE_ERROR;\r
+\r
+UnmapResponse:\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
+\r
+UnmapRequest:\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
+\r
+ return Status;\r
+}\r
+\r
+/**\r
+ Simplified version of VirtioGpuSendCommandWithReply() for commands\r
+ which do not send back any data.\r
+**/\r
+STATIC\r
+EFI_STATUS\r
+VirtioGpuSendCommand (\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
+ IN BOOLEAN Fence,\r
+ IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
+ IN UINTN RequestSize\r
+ )\r
+{\r
+ volatile VIRTIO_GPU_CONTROL_HEADER Response;\r
+\r
+ return VirtioGpuSendCommandWithReply (\r
+ VgpuDev,\r
+ RequestType,\r
+ Fence,\r
+ Header,\r
+ RequestSize,\r
+ VirtioGpuRespOkNodata,\r
+ &Response,\r
+ sizeof (Response)\r
+ );\r
}\r
\r
/**\r
@retval EFI_SUCCESS Operation successful.\r
\r
@retval EFI_DEVICE_ERROR The host rejected the request. The host error\r
- code has been logged on the EFI_D_ERROR level.\r
+ code has been logged on the DEBUG_ERROR level.\r
\r
@return Codes for unexpected errors in VirtIo\r
messaging.\r
**/\r
EFI_STATUS\r
VirtioGpuResourceCreate2d (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId,\r
- IN VIRTIO_GPU_FORMATS Format,\r
- IN UINT32 Width,\r
- IN UINT32 Height\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId,\r
+ IN VIRTIO_GPU_FORMATS Format,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;\r
+ volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuResourceUnref (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_UNREF Request;\r
+ volatile VIRTIO_GPU_RESOURCE_UNREF Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuResourceAttachBacking (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId,\r
- IN VOID *FirstBackingPage,\r
- IN UINTN NumberOfPages\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId,\r
+ IN EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,\r
+ IN UINTN NumberOfPages\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;\r
+ volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
Request.ResourceId = ResourceId;\r
Request.NrEntries = 1;\r
- Request.Entry.Addr = (UINTN)FirstBackingPage;\r
+ Request.Entry.Addr = BackingStoreDeviceAddress;\r
Request.Entry.Length = (UINT32)EFI_PAGES_TO_SIZE (NumberOfPages);\r
Request.Entry.Padding = 0;\r
\r
\r
EFI_STATUS\r
VirtioGpuResourceDetachBacking (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;\r
+ volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuSetScanout (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 X,\r
- IN UINT32 Y,\r
- IN UINT32 Width,\r
- IN UINT32 Height,\r
- IN UINT32 ScanoutId,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 X,\r
+ IN UINT32 Y,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height,\r
+ IN UINT32 ScanoutId,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_SET_SCANOUT Request;\r
+ volatile VIRTIO_GPU_SET_SCANOUT Request;\r
\r
//\r
// Unlike for most other commands, ResourceId=0 is valid; it\r
\r
EFI_STATUS\r
VirtioGpuTransferToHost2d (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 X,\r
- IN UINT32 Y,\r
- IN UINT32 Width,\r
- IN UINT32 Height,\r
- IN UINT64 Offset,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 X,\r
+ IN UINT32 Y,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height,\r
+ IN UINT64 Offset,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;\r
+ volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuResourceFlush (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 X,\r
- IN UINT32 Y,\r
- IN UINT32 Width,\r
- IN UINT32 Height,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 X,\r
+ IN UINT32 Y,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_FLUSH Request;\r
+ volatile VIRTIO_GPU_RESOURCE_FLUSH Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
sizeof Request\r
);\r
}\r
+\r
+EFI_STATUS\r
+VirtioGpuGetDisplayInfo (\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ volatile VIRTIO_GPU_RESP_DISPLAY_INFO *Response\r
+ )\r
+{\r
+ volatile VIRTIO_GPU_CONTROL_HEADER Request;\r
+\r
+ return VirtioGpuSendCommandWithReply (\r
+ VgpuDev,\r
+ VirtioGpuCmdGetDisplayInfo,\r
+ FALSE, // Fence\r
+ &Request,\r
+ sizeof Request,\r
+ VirtioGpuRespOkDisplayInfo,\r
+ &Response->Header,\r
+ sizeof *Response\r
+ );\r
+}\r