VirtIo GPU initialization, and commands (primitives) for the GPU device.\r
\r
Copyright (C) 2016, Red Hat, Inc.\r
+ Copyright (c) 2017, AMD Inc, All rights reserved.<BR>\r
\r
This program and the accompanying materials are licensed and made available\r
under the terms and conditions of the BSD License which accompanies this\r
EFI_STATUS Status;\r
UINT64 Features;\r
UINT16 QueueSize;\r
+ UINT64 RingBaseShift;\r
\r
//\r
// Execute virtio-v1.0-cs04, 3.1.1 Driver Requirements: Device\r
//\r
// [...] population of virtqueues [...]\r
//\r
- Status = VirtioRingInit (QueueSize, &VgpuDev->Ring);\r
+ Status = VirtioRingInit (VgpuDev->VirtIo, QueueSize, &VgpuDev->Ring);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
- Status = VgpuDev->VirtIo->SetQueueAddress (VgpuDev->VirtIo, &VgpuDev->Ring);\r
+ //\r
+ // If anything fails from here on, we have to release the ring.\r
+ //\r
+ Status = VirtioRingMap (\r
+ VgpuDev->VirtIo,\r
+ &VgpuDev->Ring,\r
+ &RingBaseShift,\r
+ &VgpuDev->RingMap\r
+ );\r
if (EFI_ERROR (Status)) {\r
goto ReleaseQueue;\r
}\r
+ //\r
+ // If anything fails from here on, we have to unmap the ring.\r
+ //\r
+ Status = VgpuDev->VirtIo->SetQueueAddress (\r
+ VgpuDev->VirtIo,\r
+ &VgpuDev->Ring,\r
+ RingBaseShift\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ goto UnmapQueue;\r
+ }\r
\r
//\r
// 8. Set the DRIVER_OK status bit.\r
NextDevStat |= VSTAT_DRIVER_OK;\r
Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
- goto ReleaseQueue;\r
+ goto UnmapQueue;\r
}\r
\r
return EFI_SUCCESS;\r
\r
+UnmapQueue:\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
+\r
ReleaseQueue:\r
- VirtioRingUninit (&VgpuDev->Ring);\r
+ VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);\r
\r
Failed:\r
//\r
// configuration.\r
//\r
VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
- VirtioRingUninit (&VgpuDev->Ring);\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
+ VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);\r
+}\r
+\r
+/**\r
+ Allocate, zero and map memory, for bus master common buffer operation, to be\r
+ attached as backing store to a host-side VirtIo GPU resource.\r
+\r
+ @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
+ device.\r
+\r
+ @param[in] NumberOfPages The number of whole pages to allocate and map.\r
+\r
+ @param[out] HostAddress The system memory address of the allocated area.\r
+\r
+ @param[out] DeviceAddress The bus master device address of the allocated\r
+ area. The VirtIo GPU device may be programmed to\r
+ access the allocated area through DeviceAddress;\r
+ DeviceAddress is to be passed to the\r
+ VirtioGpuResourceAttachBacking() function, as the\r
+ BackingStoreDeviceAddress parameter.\r
+\r
+ @param[out] Mapping A resulting token to pass to\r
+ VirtioGpuUnmapAndFreeBackingStore().\r
+\r
+ @retval EFI_SUCCESS The requested number of pages has been allocated, zeroed\r
+ and mapped.\r
+\r
+ @return Status codes propagated from\r
+ VgpuDev->VirtIo->AllocateSharedPages() and\r
+ VirtioMapAllBytesInSharedBuffer().\r
+**/\r
+EFI_STATUS\r
+VirtioGpuAllocateZeroAndMapBackingStore (\r
+ IN VGPU_DEV *VgpuDev,\r
+ IN UINTN NumberOfPages,\r
+ OUT VOID **HostAddress,\r
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,\r
+ OUT VOID **Mapping\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ VOID *NewHostAddress;\r
+\r
+ Status = VgpuDev->VirtIo->AllocateSharedPages (\r
+ VgpuDev->VirtIo,\r
+ NumberOfPages,\r
+ &NewHostAddress\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ //\r
+ // Avoid exposing stale data to the device even temporarily: zero the area\r
+ // before mapping it.\r
+ //\r
+ ZeroMem (NewHostAddress, EFI_PAGES_TO_SIZE (NumberOfPages));\r
+\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ VgpuDev->VirtIo, // VirtIo\r
+ VirtioOperationBusMasterCommonBuffer, // Operation\r
+ NewHostAddress, // HostAddress\r
+ EFI_PAGES_TO_SIZE (NumberOfPages), // NumberOfBytes\r
+ DeviceAddress, // DeviceAddress\r
+ Mapping // Mapping\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ goto FreeSharedPages;\r
+ }\r
+\r
+ *HostAddress = NewHostAddress;\r
+ return EFI_SUCCESS;\r
+\r
+FreeSharedPages:\r
+ VgpuDev->VirtIo->FreeSharedPages (\r
+ VgpuDev->VirtIo,\r
+ NumberOfPages,\r
+ NewHostAddress\r
+ );\r
+ return Status;\r
+}\r
+\r
+/**\r
+ Unmap and free memory originally allocated and mapped with\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+ If the memory allocated and mapped with\r
+ VirtioGpuAllocateZeroAndMapBackingStore() was attached to a host-side VirtIo\r
+ GPU resource with VirtioGpuResourceAttachBacking(), then the caller is\r
+ responsible for detaching the backing store from the same resource, with\r
+ VirtioGpuResourceDetachBacking(), before calling this function.\r
+\r
+ @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
+ device.\r
+\r
+ @param[in] NumberOfPages The NumberOfPages parameter originally passed to\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+ @param[in] HostAddress The HostAddress value originally output by\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+\r
+ @param[in] Mapping The token that was originally output by\r
+ VirtioGpuAllocateZeroAndMapBackingStore().\r
+**/\r
+VOID\r
+VirtioGpuUnmapAndFreeBackingStore (\r
+ IN VGPU_DEV *VgpuDev,\r
+ IN UINTN NumberOfPages,\r
+ IN VOID *HostAddress,\r
+ IN VOID *Mapping\r
+ )\r
+{\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (\r
+ VgpuDev->VirtIo,\r
+ Mapping\r
+ );\r
+ VgpuDev->VirtIo->FreeSharedPages (\r
+ VgpuDev->VirtIo,\r
+ NumberOfPages,\r
+ HostAddress\r
+ );\r
}\r
\r
/**\r
\r
VgpuDev = Context;\r
VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
}\r
\r
/**\r
code has been logged on the EFI_D_ERROR level.\r
\r
@return Codes for unexpected errors in VirtIo\r
- messaging.\r
+ messaging, or request/response\r
+ mapping/unmapping.\r
**/\r
STATIC\r
EFI_STATUS\r
volatile VIRTIO_GPU_CONTROL_HEADER Response;\r
EFI_STATUS Status;\r
UINT32 ResponseSize;\r
+ EFI_PHYSICAL_ADDRESS RequestDeviceAddress;\r
+ VOID *RequestMap;\r
+ EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;\r
+ VOID *ResponseMap;\r
\r
//\r
// Initialize Header.\r
Header->Padding = 0;\r
\r
ASSERT (RequestSize >= sizeof *Header);\r
+ ASSERT (RequestSize <= MAX_UINT32);\r
+\r
+ //\r
+ // Map request and response to bus master device addresses.\r
+ //\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ VgpuDev->VirtIo,\r
+ VirtioOperationBusMasterRead,\r
+ (VOID *)Header,\r
+ RequestSize,\r
+ &RequestDeviceAddress,\r
+ &RequestMap\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ VgpuDev->VirtIo,\r
+ VirtioOperationBusMasterWrite,\r
+ (VOID *)&Response,\r
+ sizeof Response,\r
+ &ResponseDeviceAddress,\r
+ &ResponseMap\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ goto UnmapRequest;\r
+ }\r
\r
//\r
// Compose the descriptor chain.\r
//\r
VirtioPrepare (&VgpuDev->Ring, &Indices);\r
- VirtioAppendDesc (&VgpuDev->Ring, (UINTN)Header, RequestSize,\r
- VRING_DESC_F_NEXT, &Indices);\r
- VirtioAppendDesc (&VgpuDev->Ring, (UINTN)&Response, sizeof Response,\r
- VRING_DESC_F_WRITE, &Indices);\r
+ VirtioAppendDesc (\r
+ &VgpuDev->Ring,\r
+ RequestDeviceAddress,\r
+ (UINT32)RequestSize,\r
+ VRING_DESC_F_NEXT,\r
+ &Indices\r
+ );\r
+ VirtioAppendDesc (\r
+ &VgpuDev->Ring,\r
+ ResponseDeviceAddress,\r
+ (UINT32)sizeof Response,\r
+ VRING_DESC_F_WRITE,\r
+ &Indices\r
+ );\r
\r
//\r
// Send the command.\r
Status = VirtioFlush (VgpuDev->VirtIo, VIRTIO_GPU_CONTROL_QUEUE,\r
&VgpuDev->Ring, &Indices, &ResponseSize);\r
if (EFI_ERROR (Status)) {\r
- return Status;\r
+ goto UnmapResponse;\r
}\r
\r
//\r
- // Parse the response.\r
+ // Verify response size.\r
//\r
if (ResponseSize != sizeof Response) {\r
DEBUG ((EFI_D_ERROR, "%a: malformed response to Request=0x%x\n",\r
__FUNCTION__, (UINT32)RequestType));\r
- return EFI_PROTOCOL_ERROR;\r
+ Status = EFI_PROTOCOL_ERROR;\r
+ goto UnmapResponse;\r
}\r
\r
+ //\r
+ // Unmap response and request, in reverse order of mapping. On error, the\r
+ // respective mapping is invalidated anyway, only the data may not have been\r
+ // committed to system memory (in case of VirtioOperationBusMasterWrite).\r
+ //\r
+ Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
+ if (EFI_ERROR (Status)) {\r
+ goto UnmapRequest;\r
+ }\r
+ Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ //\r
+ // Parse the response.\r
+ //\r
if (Response.Type == VirtioGpuRespOkNodata) {\r
return EFI_SUCCESS;\r
}\r
DEBUG ((EFI_D_ERROR, "%a: Request=0x%x Response=0x%x\n", __FUNCTION__,\r
(UINT32)RequestType, Response.Type));\r
return EFI_DEVICE_ERROR;\r
+\r
+UnmapResponse:\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
+\r
+UnmapRequest:\r
+ VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
+\r
+ return Status;\r
}\r
\r
/**\r
\r
EFI_STATUS\r
VirtioGpuResourceAttachBacking (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId,\r
- IN VOID *FirstBackingPage,\r
- IN UINTN NumberOfPages\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId,\r
+ IN EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,\r
+ IN UINTN NumberOfPages\r
)\r
{\r
volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;\r
\r
Request.ResourceId = ResourceId;\r
Request.NrEntries = 1;\r
- Request.Entry.Addr = (UINTN)FirstBackingPage;\r
+ Request.Entry.Addr = BackingStoreDeviceAddress;\r
Request.Entry.Length = (UINT32)EFI_PAGES_TO_SIZE (NumberOfPages);\r
Request.Entry.Padding = 0;\r
\r