synchronous requests and EFI_BLOCK_IO_PROTOCOL for now.\r
\r
Copyright (C) 2012, Red Hat, Inc.\r
- Copyright (c) 2012, Intel Corporation. All rights reserved.<BR>\r
+ Copyright (c) 2012 - 2018, Intel Corporation. All rights reserved.<BR>\r
+ Copyright (c) 2017, AMD Inc, All rights reserved.<BR>\r
\r
- This program and the accompanying materials are licensed and made available\r
- under the terms and conditions of the BSD License which accompanies this\r
- distribution. The full text of the license may be found at\r
- http://opensource.org/licenses/bsd-license.php\r
-\r
- THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT\r
- WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+ SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
\r
@retval EFI_DEVICE_ERROR Failed to notify host side via VirtIo write, or\r
unable to parse host response, or host response\r
- is not VIRTIO_BLK_S_OK.\r
+ is not VIRTIO_BLK_S_OK or failed to map Buffer\r
+ for a bus master operation.\r
\r
**/\r
\r
{\r
UINT32 BlockSize;\r
volatile VIRTIO_BLK_REQ Request;\r
- volatile UINT8 HostStatus;\r
+ volatile UINT8 *HostStatus;\r
+ VOID *HostStatusBuffer;\r
DESC_INDICES Indices;\r
+ VOID *RequestMapping;\r
+ VOID *StatusMapping;\r
+ VOID *BufferMapping;\r
+ EFI_PHYSICAL_ADDRESS BufferDeviceAddress;\r
+ EFI_PHYSICAL_ADDRESS HostStatusDeviceAddress;\r
+ EFI_PHYSICAL_ADDRESS RequestDeviceAddress;\r
+ EFI_STATUS Status;\r
+ EFI_STATUS UnmapStatus;\r
\r
BlockSize = Dev->BlockIoMedia.BlockSize;\r
\r
+ //\r
+ // Set BufferMapping and BufferDeviceAddress to suppress incorrect\r
+ // compiler/analyzer warnings.\r
+ //\r
+ BufferMapping = NULL;\r
+ BufferDeviceAddress = 0;\r
+\r
//\r
// ensured by VirtioBlkInit()\r
//\r
Request.IoPrio = 0;\r
Request.Sector = MultU64x32(Lba, BlockSize / 512);\r
\r
- VirtioPrepare (&Dev->Ring, &Indices);\r
+ //\r
+ // Host status is bi-directional (we preset with a value and expect the\r
+ // device to update it). Allocate a host status buffer which can be mapped\r
+ // to access equally by both processor and the device.\r
+ //\r
+ Status = Dev->VirtIo->AllocateSharedPages (\r
+ Dev->VirtIo,\r
+ EFI_SIZE_TO_PAGES (sizeof *HostStatus),\r
+ &HostStatusBuffer\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return EFI_DEVICE_ERROR;\r
+ }\r
+\r
+ HostStatus = HostStatusBuffer;\r
+\r
+ //\r
+ // Map virtio-blk request header (must be done after request header is\r
+ // populated)\r
+ //\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ Dev->VirtIo,\r
+ VirtioOperationBusMasterRead,\r
+ (VOID *) &Request,\r
+ sizeof Request,\r
+ &RequestDeviceAddress,\r
+ &RequestMapping\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ Status = EFI_DEVICE_ERROR;\r
+ goto FreeHostStatusBuffer;\r
+ }\r
+\r
+ //\r
+ // Map data buffer\r
+ //\r
+ if (BufferSize > 0) {\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ Dev->VirtIo,\r
+ (RequestIsWrite ?\r
+ VirtioOperationBusMasterRead :\r
+ VirtioOperationBusMasterWrite),\r
+ (VOID *) Buffer,\r
+ BufferSize,\r
+ &BufferDeviceAddress,\r
+ &BufferMapping\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ Status = EFI_DEVICE_ERROR;\r
+ goto UnmapRequestBuffer;\r
+ }\r
+ }\r
\r
//\r
// preset a host status for ourselves that we do not accept as success\r
//\r
- HostStatus = VIRTIO_BLK_S_IOERR;\r
+ *HostStatus = VIRTIO_BLK_S_IOERR;\r
+\r
+ //\r
+ // Map the Status Buffer with VirtioOperationBusMasterCommonBuffer so that\r
+ // both processor and device can access it.\r
+ //\r
+ Status = VirtioMapAllBytesInSharedBuffer (\r
+ Dev->VirtIo,\r
+ VirtioOperationBusMasterCommonBuffer,\r
+ HostStatusBuffer,\r
+ sizeof *HostStatus,\r
+ &HostStatusDeviceAddress,\r
+ &StatusMapping\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ Status = EFI_DEVICE_ERROR;\r
+ goto UnmapDataBuffer;\r
+ }\r
+\r
+ VirtioPrepare (&Dev->Ring, &Indices);\r
\r
//\r
// ensured by VirtioBlkInit() -- this predicate, in combination with the\r
//\r
// virtio-blk header in first desc\r
//\r
- VirtioAppendDesc (&Dev->Ring, (UINTN) &Request, sizeof Request,\r
- VRING_DESC_F_NEXT, &Indices);\r
+ VirtioAppendDesc (\r
+ &Dev->Ring,\r
+ RequestDeviceAddress,\r
+ sizeof Request,\r
+ VRING_DESC_F_NEXT,\r
+ &Indices\r
+ );\r
\r
//\r
// data buffer for read/write in second desc\r
//\r
// VRING_DESC_F_WRITE is interpreted from the host's point of view.\r
//\r
- VirtioAppendDesc (&Dev->Ring, (UINTN) Buffer, (UINT32) BufferSize,\r
+ VirtioAppendDesc (\r
+ &Dev->Ring,\r
+ BufferDeviceAddress,\r
+ (UINT32) BufferSize,\r
VRING_DESC_F_NEXT | (RequestIsWrite ? 0 : VRING_DESC_F_WRITE),\r
- &Indices);\r
+ &Indices\r
+ );\r
}\r
\r
//\r
// host status in last (second or third) desc\r
//\r
- VirtioAppendDesc (&Dev->Ring, (UINTN) &HostStatus, sizeof HostStatus,\r
- VRING_DESC_F_WRITE, &Indices);\r
+ VirtioAppendDesc (\r
+ &Dev->Ring,\r
+ HostStatusDeviceAddress,\r
+ sizeof *HostStatus,\r
+ VRING_DESC_F_WRITE,\r
+ &Indices\r
+ );\r
\r
//\r
// virtio-blk's only virtqueue is #0, called "requestq" (see Appendix D).\r
//\r
- if (VirtioFlush (Dev->VirtIo, 0, &Dev->Ring, &Indices) == EFI_SUCCESS &&\r
- HostStatus == VIRTIO_BLK_S_OK) {\r
- return EFI_SUCCESS;\r
+ if (VirtioFlush (Dev->VirtIo, 0, &Dev->Ring, &Indices,\r
+ NULL) == EFI_SUCCESS &&\r
+ *HostStatus == VIRTIO_BLK_S_OK) {\r
+ Status = EFI_SUCCESS;\r
+ } else {\r
+ Status = EFI_DEVICE_ERROR;\r
+ }\r
+\r
+ Dev->VirtIo->UnmapSharedBuffer (Dev->VirtIo, StatusMapping);\r
+\r
+UnmapDataBuffer:\r
+ if (BufferSize > 0) {\r
+ UnmapStatus = Dev->VirtIo->UnmapSharedBuffer (Dev->VirtIo, BufferMapping);\r
+ if (EFI_ERROR (UnmapStatus) && !RequestIsWrite && !EFI_ERROR (Status)) {\r
+ //\r
+ // Data from the bus master may not reach the caller; fail the request.\r
+ //\r
+ Status = EFI_DEVICE_ERROR;\r
+ }\r
}\r
\r
- return EFI_DEVICE_ERROR;\r
+UnmapRequestBuffer:\r
+ Dev->VirtIo->UnmapSharedBuffer (Dev->VirtIo, RequestMapping);\r
+\r
+FreeHostStatusBuffer:\r
+ Dev->VirtIo->FreeSharedPages (\r
+ Dev->VirtIo,\r
+ EFI_SIZE_TO_PAGES (sizeof *HostStatus),\r
+ HostStatusBuffer\r
+ );\r
+\r
+ return Status;\r
}\r
\r
\r
virtio-blk attributes the host provides.\r
\r
@return Error codes from VirtioRingInit() or\r
- VIRTIO_CFG_READ() / VIRTIO_CFG_WRITE().\r
+ VIRTIO_CFG_READ() / VIRTIO_CFG_WRITE or\r
+ VirtioRingMap().\r
\r
**/\r
\r
UINT8 NextDevStat;\r
EFI_STATUS Status;\r
\r
- UINT32 Features;\r
+ UINT64 Features;\r
UINT64 NumSectors;\r
UINT32 BlockSize;\r
UINT8 PhysicalBlockExp;\r
UINT8 AlignmentOffset;\r
UINT32 OptIoSize;\r
UINT16 QueueSize;\r
+ UINT64 RingBaseShift;\r
\r
PhysicalBlockExp = 0;\r
AlignmentOffset = 0;\r
}\r
}\r
\r
+ Features &= VIRTIO_BLK_F_BLK_SIZE | VIRTIO_BLK_F_TOPOLOGY | VIRTIO_BLK_F_RO |\r
+ VIRTIO_BLK_F_FLUSH | VIRTIO_F_VERSION_1 |\r
+ VIRTIO_F_IOMMU_PLATFORM;\r
+\r
+ //\r
+ // In virtio-1.0, feature negotiation is expected to complete before queue\r
+ // discovery, and the device can also reject the selected set of features.\r
+ //\r
+ if (Dev->VirtIo->Revision >= VIRTIO_SPEC_REVISION (1, 0, 0)) {\r
+ Status = Virtio10WriteFeatures (Dev->VirtIo, Features, &NextDevStat);\r
+ if (EFI_ERROR (Status)) {\r
+ goto Failed;\r
+ }\r
+ }\r
+\r
//\r
// step 4b -- allocate virtqueue\r
//\r
goto Failed;\r
}\r
\r
- Status = VirtioRingInit (QueueSize, &Dev->Ring);\r
+ Status = VirtioRingInit (Dev->VirtIo, QueueSize, &Dev->Ring);\r
if (EFI_ERROR (Status)) {\r
goto Failed;\r
}\r
\r
+ //\r
+ // If anything fails from here on, we must release the ring resources\r
+ //\r
+ Status = VirtioRingMap (\r
+ Dev->VirtIo,\r
+ &Dev->Ring,\r
+ &RingBaseShift,\r
+ &Dev->RingMap\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ goto ReleaseQueue;\r
+ }\r
+\r
//\r
// Additional steps for MMIO: align the queue appropriately, and set the\r
- // size. If anything fails from here on, we must release the ring resources.\r
+ // size. If anything fails from here on, we must unmap the ring resources.\r
//\r
Status = Dev->VirtIo->SetQueueNum (Dev->VirtIo, QueueSize);\r
if (EFI_ERROR (Status)) {\r
- goto ReleaseQueue;\r
+ goto UnmapQueue;\r
}\r
\r
Status = Dev->VirtIo->SetQueueAlign (Dev->VirtIo, EFI_PAGE_SIZE);\r
if (EFI_ERROR (Status)) {\r
- goto ReleaseQueue;\r
+ goto UnmapQueue;\r
}\r
\r
//\r
// step 4c -- Report GPFN (guest-physical frame number) of queue.\r
//\r
- Status = Dev->VirtIo->SetQueueAddress (Dev->VirtIo,\r
- (UINT32) ((UINTN) Dev->Ring.Base >> EFI_PAGE_SHIFT));\r
+ Status = Dev->VirtIo->SetQueueAddress (\r
+ Dev->VirtIo,\r
+ &Dev->Ring,\r
+ RingBaseShift\r
+ );\r
if (EFI_ERROR (Status)) {\r
- goto ReleaseQueue;\r
+ goto UnmapQueue;\r
}\r
\r
\r
//\r
- // step 5 -- Report understood features. There are no virtio-blk specific\r
- // features to negotiate in virtio-0.9.5, plus we do not want any of the\r
- // device-independent (known or unknown) VIRTIO_F_* capabilities (see\r
- // Appendix B).\r
+ // step 5 -- Report understood features.\r
//\r
- Status = Dev->VirtIo->SetGuestFeatures (Dev->VirtIo, 0);\r
- if (EFI_ERROR (Status)) {\r
- goto ReleaseQueue;\r
+ if (Dev->VirtIo->Revision < VIRTIO_SPEC_REVISION (1, 0, 0)) {\r
+ Features &= ~(UINT64)(VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM);\r
+ Status = Dev->VirtIo->SetGuestFeatures (Dev->VirtIo, Features);\r
+ if (EFI_ERROR (Status)) {\r
+ goto UnmapQueue;\r
+ }\r
}\r
\r
//\r
NextDevStat |= VSTAT_DRIVER_OK;\r
Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat);\r
if (EFI_ERROR (Status)) {\r
- goto ReleaseQueue;\r
+ goto UnmapQueue;\r
}\r
\r
//\r
Dev->BlockIoMedia.RemovableMedia = FALSE;\r
Dev->BlockIoMedia.MediaPresent = TRUE;\r
Dev->BlockIoMedia.LogicalPartition = FALSE;\r
- Dev->BlockIoMedia.ReadOnly = !!(Features & VIRTIO_BLK_F_RO);\r
- Dev->BlockIoMedia.WriteCaching = !!(Features & VIRTIO_BLK_F_FLUSH);\r
+ Dev->BlockIoMedia.ReadOnly = (BOOLEAN) ((Features & VIRTIO_BLK_F_RO) != 0);\r
+ Dev->BlockIoMedia.WriteCaching = (BOOLEAN) ((Features & VIRTIO_BLK_F_FLUSH) != 0);\r
Dev->BlockIoMedia.BlockSize = BlockSize;\r
Dev->BlockIoMedia.IoAlign = 0;\r
Dev->BlockIoMedia.LastBlock = DivU64x32 (NumSectors,\r
BlockSize / 512) - 1;\r
\r
+ DEBUG ((DEBUG_INFO, "%a: LbaSize=0x%x[B] NumBlocks=0x%Lx[Lba]\n",\r
+ __FUNCTION__, Dev->BlockIoMedia.BlockSize,\r
+ Dev->BlockIoMedia.LastBlock + 1));\r
+\r
if (Features & VIRTIO_BLK_F_TOPOLOGY) {\r
Dev->BlockIo.Revision = EFI_BLOCK_IO_PROTOCOL_REVISION3;\r
\r
Dev->BlockIoMedia.LowestAlignedLba = AlignmentOffset;\r
Dev->BlockIoMedia.LogicalBlocksPerPhysicalBlock = 1u << PhysicalBlockExp;\r
Dev->BlockIoMedia.OptimalTransferLengthGranularity = OptIoSize;\r
+\r
+ DEBUG ((DEBUG_INFO, "%a: FirstAligned=0x%Lx[Lba] PhysBlkSize=0x%x[Lba]\n",\r
+ __FUNCTION__, Dev->BlockIoMedia.LowestAlignedLba,\r
+ Dev->BlockIoMedia.LogicalBlocksPerPhysicalBlock));\r
+ DEBUG ((DEBUG_INFO, "%a: OptimalTransferLengthGranularity=0x%x[Lba]\n",\r
+ __FUNCTION__, Dev->BlockIoMedia.OptimalTransferLengthGranularity));\r
}\r
return EFI_SUCCESS;\r
\r
+UnmapQueue:\r
+ Dev->VirtIo->UnmapSharedBuffer (Dev->VirtIo, Dev->RingMap);\r
+\r
ReleaseQueue:\r
- VirtioRingUninit (&Dev->Ring);\r
+ VirtioRingUninit (Dev->VirtIo, &Dev->Ring);\r
\r
Failed:\r
//\r
//\r
Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, 0);\r
\r
- VirtioRingUninit (&Dev->Ring);\r
+ Dev->VirtIo->UnmapSharedBuffer (Dev->VirtIo, Dev->RingMap);\r
+ VirtioRingUninit (Dev->VirtIo, &Dev->Ring);\r
\r
SetMem (&Dev->BlockIo, sizeof Dev->BlockIo, 0x00);\r
SetMem (&Dev->BlockIoMedia, sizeof Dev->BlockIoMedia, 0x00);\r
}\r
\r
\r
+/**\r
+\r
+ Event notification function enqueued by ExitBootServices().\r
+\r
+ @param[in] Event Event whose notification function is being invoked.\r
+\r
+ @param[in] Context Pointer to the VBLK_DEV structure.\r
+\r
+**/\r
+\r
+STATIC\r
+VOID\r
+EFIAPI\r
+VirtioBlkExitBoot (\r
+ IN EFI_EVENT Event,\r
+ IN VOID *Context\r
+ )\r
+{\r
+ VBLK_DEV *Dev;\r
+\r
+ DEBUG ((DEBUG_VERBOSE, "%a: Context=0x%p\n", __FUNCTION__, Context));\r
+ //\r
+ // Reset the device. This causes the hypervisor to forget about the virtio\r
+ // ring.\r
+ //\r
+ // We allocated said ring in EfiBootServicesData type memory, and code\r
+ // executing after ExitBootServices() is permitted to overwrite it.\r
+ //\r
+ Dev = Context;\r
+ Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, 0);\r
+}\r
+\r
/**\r
\r
After we've pronounced support for a specific device in\r
DriverBindingSupported(), we start managing said device (passed in by the\r
- Driver Exeuction Environment) with the following service.\r
+ Driver Execution Environment) with the following service.\r
\r
See DriverBindingSupported() for specification references.\r
\r
\r
@retval EFI_SUCCESS Driver instance has been created and\r
initialized for the virtio-blk device, it\r
- is now accessibla via EFI_BLOCK_IO_PROTOCOL.\r
+ is now accessible via EFI_BLOCK_IO_PROTOCOL.\r
\r
@retval EFI_OUT_OF_RESOURCES Memory allocation failed.\r
\r
goto CloseVirtIo;\r
}\r
\r
+ Status = gBS->CreateEvent (EVT_SIGNAL_EXIT_BOOT_SERVICES, TPL_CALLBACK,\r
+ &VirtioBlkExitBoot, Dev, &Dev->ExitBoot);\r
+ if (EFI_ERROR (Status)) {\r
+ goto UninitDev;\r
+ }\r
+\r
//\r
// Setup complete, attempt to export the driver instance's BlockIo interface.\r
//\r
&gEfiBlockIoProtocolGuid, EFI_NATIVE_INTERFACE,\r
&Dev->BlockIo);\r
if (EFI_ERROR (Status)) {\r
- goto UninitDev;\r
+ goto CloseExitBoot;\r
}\r
\r
return EFI_SUCCESS;\r
\r
+CloseExitBoot:\r
+ gBS->CloseEvent (Dev->ExitBoot);\r
+\r
UninitDev:\r
VirtioBlkUninit (Dev);\r
\r
return Status;\r
}\r
\r
+ gBS->CloseEvent (Dev->ExitBoot);\r
+\r
VirtioBlkUninit (Dev);\r
\r
gBS->CloseProtocol (DeviceHandle, &gVirtioDeviceProtocolGuid,\r