Copyright (C) 2016, Red Hat, Inc.\r
Copyright (c) 2017, AMD Inc, All rights reserved.<BR>\r
\r
- This program and the accompanying materials are licensed and made available\r
- under the terms and conditions of the BSD License which accompanies this\r
- distribution. The full text of the license may be found at\r
- http://opensource.org/licenses/bsd-license.php\r
-\r
- THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT\r
- WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+ SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
**/\r
EFI_STATUS\r
VirtioGpuInit (\r
- IN OUT VGPU_DEV *VgpuDev\r
+ IN OUT VGPU_DEV *VgpuDev\r
)\r
{\r
- UINT8 NextDevStat;\r
- EFI_STATUS Status;\r
- UINT64 Features;\r
- UINT16 QueueSize;\r
- UINT64 RingBaseShift;\r
+ UINT8 NextDevStat;\r
+ EFI_STATUS Status;\r
+ UINT64 Features;\r
+ UINT16 QueueSize;\r
+ UINT64 RingBaseShift;\r
\r
//\r
// Execute virtio-v1.0-cs04, 3.1.1 Driver Requirements: Device\r
// 1. Reset the device.\r
//\r
NextDevStat = 0;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
// 2. Set the ACKNOWLEDGE status bit [...]\r
//\r
NextDevStat |= VSTAT_ACK;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
// 3. Set the DRIVER status bit [...]\r
//\r
NextDevStat |= VSTAT_DRIVER;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
+\r
if ((Features & VIRTIO_F_VERSION_1) == 0) {\r
Status = EFI_UNSUPPORTED;\r
goto Failed;\r
}\r
+\r
//\r
// We only want the most basic 2D features.\r
//\r
// 7. Perform device-specific setup, including discovery of virtqueues for\r
// the device [...]\r
//\r
- Status = VgpuDev->VirtIo->SetQueueSel (VgpuDev->VirtIo,\r
- VIRTIO_GPU_CONTROL_QUEUE);\r
+ Status = VgpuDev->VirtIo->SetQueueSel (\r
+ VgpuDev->VirtIo,\r
+ VIRTIO_GPU_CONTROL_QUEUE\r
+ );\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
+\r
Status = VgpuDev->VirtIo->GetQueueNumMax (VgpuDev->VirtIo, &QueueSize);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
+\r
//\r
// If anything fails from here on, we have to release the ring.\r
//\r
if (EFI_ERROR (Status)) {\r
goto ReleaseQueue;\r
}\r
+\r
//\r
// If anything fails from here on, we have to unmap the ring.\r
//\r
// 8. Set the DRIVER_OK status bit.\r
//\r
NextDevStat |= VSTAT_DRIVER_OK;\r
- Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
+ Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
goto UnmapQueue;\r
}\r
**/\r
VOID\r
VirtioGpuUninit (\r
- IN OUT VGPU_DEV *VgpuDev\r
+ IN OUT VGPU_DEV *VgpuDev\r
)\r
{\r
//\r
**/\r
EFI_STATUS\r
VirtioGpuAllocateZeroAndMapBackingStore (\r
- IN VGPU_DEV *VgpuDev,\r
- IN UINTN NumberOfPages,\r
- OUT VOID **HostAddress,\r
- OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,\r
- OUT VOID **Mapping\r
+ IN VGPU_DEV *VgpuDev,\r
+ IN UINTN NumberOfPages,\r
+ OUT VOID **HostAddress,\r
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,\r
+ OUT VOID **Mapping\r
)\r
{\r
- EFI_STATUS Status;\r
- VOID *NewHostAddress;\r
+ EFI_STATUS Status;\r
+ VOID *NewHostAddress;\r
\r
Status = VgpuDev->VirtIo->AllocateSharedPages (\r
VgpuDev->VirtIo,\r
**/\r
VOID\r
VirtioGpuUnmapAndFreeBackingStore (\r
- IN VGPU_DEV *VgpuDev,\r
- IN UINTN NumberOfPages,\r
- IN VOID *HostAddress,\r
- IN VOID *Mapping\r
+ IN VGPU_DEV *VgpuDev,\r
+ IN UINTN NumberOfPages,\r
+ IN VOID *HostAddress,\r
+ IN VOID *Mapping\r
)\r
{\r
VgpuDev->VirtIo->UnmapSharedBuffer (\r
VOID\r
EFIAPI\r
VirtioGpuExitBoot (\r
- IN EFI_EVENT Event,\r
- IN VOID *Context\r
+ IN EFI_EVENT Event,\r
+ IN VOID *Context\r
)\r
{\r
- VGPU_DEV *VgpuDev;\r
+ VGPU_DEV *VgpuDev;\r
\r
+ DEBUG ((DEBUG_VERBOSE, "%a: Context=0x%p\n", __FUNCTION__, Context));\r
VgpuDev = Context;\r
VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
-\r
- //\r
- // If VirtioGpuDriverBindingStart() and VirtioGpuDriverBindingStop() have\r
- // been called thus far in such a sequence that right now our (sole) child\r
- // handle exists -- with the GOP on it standing for head (scanout) #0 --,\r
- // then we have to unmap the current video mode's backing store.\r
- //\r
- if (VgpuDev->Child != NULL) {\r
- //\r
- // The current video mode is guaranteed to have a valid and mapped backing\r
- // store, due to the first Gop.SetMode() call, made internally in\r
- // InitVgpuGop().\r
- //\r
- ASSERT (VgpuDev->Child->BackingStore != NULL);\r
-\r
- VgpuDev->VirtIo->UnmapSharedBuffer (\r
- VgpuDev->VirtIo,\r
- VgpuDev->Child->BackingStoreMap\r
- );\r
- }\r
-\r
- VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
}\r
\r
/**\r
@param[in] RequestSize Size of the entire caller-allocated request object,\r
including the leading VIRTIO_GPU_CONTROL_HEADER.\r
\r
+ @param[in] ResponseType The type of the response (VirtioGpuResp*).\r
+\r
+ @param[in,out] Response Pointer to the caller-allocated response object. The\r
+ request must start with VIRTIO_GPU_CONTROL_HEADER.\r
+\r
+ @param[in] ResponseSize Size of the entire caller-allocated response object,\r
+ including the leading VIRTIO_GPU_CONTROL_HEADER.\r
+\r
@retval EFI_SUCCESS Operation successful.\r
\r
@retval EFI_DEVICE_ERROR The host rejected the request. The host error\r
- code has been logged on the EFI_D_ERROR level.\r
+ code has been logged on the DEBUG_ERROR level.\r
\r
@return Codes for unexpected errors in VirtIo\r
messaging, or request/response\r
**/\r
STATIC\r
EFI_STATUS\r
-VirtioGpuSendCommand (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
- IN BOOLEAN Fence,\r
- IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
- IN UINTN RequestSize\r
+VirtioGpuSendCommandWithReply (\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
+ IN BOOLEAN Fence,\r
+ IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
+ IN UINTN RequestSize,\r
+ IN VIRTIO_GPU_CONTROL_TYPE ResponseType,\r
+ IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Response,\r
+ IN UINTN ResponseSize\r
)\r
{\r
- DESC_INDICES Indices;\r
- volatile VIRTIO_GPU_CONTROL_HEADER Response;\r
- EFI_STATUS Status;\r
- UINT32 ResponseSize;\r
- EFI_PHYSICAL_ADDRESS RequestDeviceAddress;\r
- VOID *RequestMap;\r
- EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;\r
- VOID *ResponseMap;\r
+ DESC_INDICES Indices;\r
+ EFI_STATUS Status;\r
+ UINT32 ResponseSizeRet;\r
+ EFI_PHYSICAL_ADDRESS RequestDeviceAddress;\r
+ VOID *RequestMap;\r
+ EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;\r
+ VOID *ResponseMap;\r
\r
//\r
// Initialize Header.\r
//\r
- Header->Type = RequestType;\r
+ Header->Type = RequestType;\r
if (Fence) {\r
Header->Flags = VIRTIO_GPU_FLAG_FENCE;\r
Header->FenceId = VgpuDev->FenceId++;\r
Header->Flags = 0;\r
Header->FenceId = 0;\r
}\r
- Header->CtxId = 0;\r
- Header->Padding = 0;\r
+\r
+ Header->CtxId = 0;\r
+ Header->Padding = 0;\r
\r
ASSERT (RequestSize >= sizeof *Header);\r
ASSERT (RequestSize <= MAX_UINT32);\r
if (EFI_ERROR (Status)) {\r
return Status;\r
}\r
+\r
Status = VirtioMapAllBytesInSharedBuffer (\r
VgpuDev->VirtIo,\r
VirtioOperationBusMasterWrite,\r
- (VOID *)&Response,\r
- sizeof Response,\r
+ (VOID *)Response,\r
+ ResponseSize,\r
&ResponseDeviceAddress,\r
&ResponseMap\r
);\r
VirtioAppendDesc (\r
&VgpuDev->Ring,\r
ResponseDeviceAddress,\r
- (UINT32)sizeof Response,\r
+ (UINT32)ResponseSize,\r
VRING_DESC_F_WRITE,\r
&Indices\r
);\r
//\r
// Send the command.\r
//\r
- Status = VirtioFlush (VgpuDev->VirtIo, VIRTIO_GPU_CONTROL_QUEUE,\r
- &VgpuDev->Ring, &Indices, &ResponseSize);\r
+ Status = VirtioFlush (\r
+ VgpuDev->VirtIo,\r
+ VIRTIO_GPU_CONTROL_QUEUE,\r
+ &VgpuDev->Ring,\r
+ &Indices,\r
+ &ResponseSizeRet\r
+ );\r
if (EFI_ERROR (Status)) {\r
goto UnmapResponse;\r
}\r
//\r
// Verify response size.\r
//\r
- if (ResponseSize != sizeof Response) {\r
- DEBUG ((EFI_D_ERROR, "%a: malformed response to Request=0x%x\n",\r
- __FUNCTION__, (UINT32)RequestType));\r
+ if (ResponseSize != ResponseSizeRet) {\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "%a: malformed response to Request=0x%x\n",\r
+ __FUNCTION__,\r
+ (UINT32)RequestType\r
+ ));\r
Status = EFI_PROTOCOL_ERROR;\r
goto UnmapResponse;\r
}\r
if (EFI_ERROR (Status)) {\r
goto UnmapRequest;\r
}\r
+\r
Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
if (EFI_ERROR (Status)) {\r
return Status;\r
//\r
// Parse the response.\r
//\r
- if (Response.Type == VirtioGpuRespOkNodata) {\r
+ if (Response->Type == (UINT32)ResponseType) {\r
return EFI_SUCCESS;\r
}\r
\r
- DEBUG ((EFI_D_ERROR, "%a: Request=0x%x Response=0x%x\n", __FUNCTION__,\r
- (UINT32)RequestType, Response.Type));\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "%a: Request=0x%x Response=0x%x (expected 0x%x)\n",\r
+ __FUNCTION__,\r
+ (UINT32)RequestType,\r
+ Response->Type,\r
+ ResponseType\r
+ ));\r
return EFI_DEVICE_ERROR;\r
\r
UnmapResponse:\r
return Status;\r
}\r
\r
+/**\r
+ Simplified version of VirtioGpuSendCommandWithReply() for commands\r
+ which do not send back any data.\r
+**/\r
+STATIC\r
+EFI_STATUS\r
+VirtioGpuSendCommand (\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
+ IN BOOLEAN Fence,\r
+ IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
+ IN UINTN RequestSize\r
+ )\r
+{\r
+ volatile VIRTIO_GPU_CONTROL_HEADER Response;\r
+\r
+ return VirtioGpuSendCommandWithReply (\r
+ VgpuDev,\r
+ RequestType,\r
+ Fence,\r
+ Header,\r
+ RequestSize,\r
+ VirtioGpuRespOkNodata,\r
+ &Response,\r
+ sizeof (Response)\r
+ );\r
+}\r
+\r
/**\r
The following functions send requests to the VirtIo GPU device model, await\r
the answer from the host, and return a status. They share the following\r
@retval EFI_SUCCESS Operation successful.\r
\r
@retval EFI_DEVICE_ERROR The host rejected the request. The host error\r
- code has been logged on the EFI_D_ERROR level.\r
+ code has been logged on the DEBUG_ERROR level.\r
\r
@return Codes for unexpected errors in VirtIo\r
messaging.\r
**/\r
EFI_STATUS\r
VirtioGpuResourceCreate2d (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId,\r
- IN VIRTIO_GPU_FORMATS Format,\r
- IN UINT32 Width,\r
- IN UINT32 Height\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId,\r
+ IN VIRTIO_GPU_FORMATS Format,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;\r
+ volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuResourceUnref (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_UNREF Request;\r
+ volatile VIRTIO_GPU_RESOURCE_UNREF Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuResourceAttachBacking (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId,\r
- IN EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,\r
- IN UINTN NumberOfPages\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId,\r
+ IN EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,\r
+ IN UINTN NumberOfPages\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;\r
+ volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuResourceDetachBacking (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;\r
+ volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuSetScanout (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 X,\r
- IN UINT32 Y,\r
- IN UINT32 Width,\r
- IN UINT32 Height,\r
- IN UINT32 ScanoutId,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 X,\r
+ IN UINT32 Y,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height,\r
+ IN UINT32 ScanoutId,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_SET_SCANOUT Request;\r
+ volatile VIRTIO_GPU_SET_SCANOUT Request;\r
\r
//\r
// Unlike for most other commands, ResourceId=0 is valid; it\r
\r
EFI_STATUS\r
VirtioGpuTransferToHost2d (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 X,\r
- IN UINT32 Y,\r
- IN UINT32 Width,\r
- IN UINT32 Height,\r
- IN UINT64 Offset,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 X,\r
+ IN UINT32 Y,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height,\r
+ IN UINT64 Offset,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;\r
+ volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
\r
EFI_STATUS\r
VirtioGpuResourceFlush (\r
- IN OUT VGPU_DEV *VgpuDev,\r
- IN UINT32 X,\r
- IN UINT32 Y,\r
- IN UINT32 Width,\r
- IN UINT32 Height,\r
- IN UINT32 ResourceId\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ IN UINT32 X,\r
+ IN UINT32 Y,\r
+ IN UINT32 Width,\r
+ IN UINT32 Height,\r
+ IN UINT32 ResourceId\r
)\r
{\r
- volatile VIRTIO_GPU_RESOURCE_FLUSH Request;\r
+ volatile VIRTIO_GPU_RESOURCE_FLUSH Request;\r
\r
if (ResourceId == 0) {\r
return EFI_INVALID_PARAMETER;\r
sizeof Request\r
);\r
}\r
+\r
+EFI_STATUS\r
+VirtioGpuGetDisplayInfo (\r
+ IN OUT VGPU_DEV *VgpuDev,\r
+ volatile VIRTIO_GPU_RESP_DISPLAY_INFO *Response\r
+ )\r
+{\r
+ volatile VIRTIO_GPU_CONTROL_HEADER Request;\r
+\r
+ return VirtioGpuSendCommandWithReply (\r
+ VgpuDev,\r
+ VirtioGpuCmdGetDisplayInfo,\r
+ FALSE, // Fence\r
+ &Request,\r
+ sizeof Request,\r
+ VirtioGpuRespOkDisplayInfo,\r
+ &Response->Header,\r
+ sizeof *Response\r
+ );\r
+}\r