/** @file\r
\r
The protocol provides support to allocate, free, map and umap a DMA buffer\r
- for bus master (e.g PciHostBridge). When SEV is enabled, the DMA operations\r
- must be performed on unencrypted buffer hence we use a bounce buffer to map\r
- the guest buffer into an unencrypted DMA buffer.\r
+ for bus master (e.g PciHostBridge). When SEV or TDX is enabled, the DMA\r
+ operations must be performed on unencrypted buffer hence we use a bounce\r
+ buffer to map the guest buffer into an unencrypted DMA buffer.\r
\r
Copyright (c) 2017, AMD Inc. All rights reserved.<BR>\r
Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
\r
**/\r
\r
+#include <Library/PcdLib.h>\r
+#include <ConfidentialComputingGuestAttr.h>\r
#include "AmdSevIoMmu.h"\r
\r
#define MAP_INFO_SIG SIGNATURE_64 ('M', 'A', 'P', '_', 'I', 'N', 'F', 'O')\r
\r
/**\r
Provides the controller-specific addresses required to access system memory\r
- from a DMA bus master. On SEV guest, the DMA operations must be performed on\r
+ from a DMA bus master. On SEV/TDX guest, the DMA operations must be performed on\r
shared buffer hence we allocate a bounce buffer to map the HostAddress to a\r
DeviceAddress. The Encryption attribute is removed from the DeviceAddress\r
buffer.\r
goto FreeMapInfo;\r
}\r
\r
- //\r
- // Clear the memory encryption mask on the plaintext buffer.\r
- //\r
- Status = MemEncryptSevClearPageEncMask (\r
- 0,\r
- MapInfo->PlainTextAddress,\r
- MapInfo->NumberOfPages\r
- );\r
+ if (CC_GUEST_IS_SEV (PcdGet64 (PcdConfidentialComputingGuestAttr))) {\r
+ //\r
+ // Clear the memory encryption mask on the plaintext buffer.\r
+ //\r
+ Status = MemEncryptSevClearPageEncMask (\r
+ 0,\r
+ MapInfo->PlainTextAddress,\r
+ MapInfo->NumberOfPages\r
+ );\r
+ } else if (CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr))) {\r
+ //\r
+ // Set the memory shared bit.\r
+ //\r
+ Status = MemEncryptTdxSetPageSharedBit (\r
+ 0,\r
+ MapInfo->PlainTextAddress,\r
+ MapInfo->NumberOfPages\r
+ );\r
+ } else {\r
+ ASSERT (FALSE);\r
+ }\r
+\r
ASSERT_EFI_ERROR (Status);\r
if (EFI_ERROR (Status)) {\r
CpuDeadLoop ();\r
}\r
\r
MapInfo = (MAP_INFO *)Mapping;\r
-\r
+ Status = EFI_SUCCESS;\r
//\r
// set CommonBufferHeader to suppress incorrect compiler/analyzer warnings\r
//\r
break;\r
}\r
\r
- //\r
- // Restore the memory encryption mask on the area we used to hold the\r
- // plaintext.\r
- //\r
- Status = MemEncryptSevSetPageEncMask (\r
- 0,\r
- MapInfo->PlainTextAddress,\r
- MapInfo->NumberOfPages\r
- );\r
+ if (CC_GUEST_IS_SEV (PcdGet64 (PcdConfidentialComputingGuestAttr))) {\r
+ //\r
+ // Restore the memory encryption mask on the area we used to hold the\r
+ // plaintext.\r
+ //\r
+ Status = MemEncryptSevSetPageEncMask (\r
+ 0,\r
+ MapInfo->PlainTextAddress,\r
+ MapInfo->NumberOfPages\r
+ );\r
+ } else if (CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr))) {\r
+ //\r
+ // Restore the memory shared bit mask on the area we used to hold the\r
+ // plaintext.\r
+ //\r
+ Status = MemEncryptTdxClearPageSharedBit (\r
+ 0,\r
+ MapInfo->PlainTextAddress,\r
+ MapInfo->NumberOfPages\r
+ );\r
+ } else {\r
+ ASSERT (FALSE);\r
+ }\r
+\r
ASSERT_EFI_ERROR (Status);\r
if (EFI_ERROR (Status)) {\r
CpuDeadLoop ();\r
return EFI_UNSUPPORTED;\r
}\r
\r
-EDKII_IOMMU_PROTOCOL mAmdSev = {\r
+EDKII_IOMMU_PROTOCOL mIoMmu = {\r
EDKII_IOMMU_PROTOCOL_REVISION,\r
IoMmuSetAttribute,\r
IoMmuMap,\r
STATIC\r
VOID\r
EFIAPI\r
-AmdSevExitBoot (\r
+IoMmuExitBoot (\r
IN EFI_EVENT Event,\r
IN VOID *EventToSignal\r
)\r
//\r
// (1) The NotifyFunctions of all the events in\r
// EFI_EVENT_GROUP_EXIT_BOOT_SERVICES will have been queued before\r
- // AmdSevExitBoot() is entered.\r
+ // IoMmuExitBoot() is entered.\r
//\r
- // (2) AmdSevExitBoot() is executing minimally at TPL_CALLBACK.\r
+ // (2) IoMmuExitBoot() is executing minimally at TPL_CALLBACK.\r
//\r
- // (3) AmdSevExitBoot() has been queued in unspecified order relative to the\r
+ // (3) IoMmuExitBoot() has been queued in unspecified order relative to the\r
// NotifyFunctions of all the other events in\r
// EFI_EVENT_GROUP_EXIT_BOOT_SERVICES whose NotifyTpl is the same as\r
// Event's.\r
// Consequences:\r
//\r
// - If Event's NotifyTpl is TPL_CALLBACK, then some other NotifyFunctions\r
- // queued at TPL_CALLBACK may be invoked after AmdSevExitBoot() returns.\r
+ // queued at TPL_CALLBACK may be invoked after IoMmuExitBoot() returns.\r
//\r
// - If Event's NotifyTpl is TPL_NOTIFY, then some other NotifyFunctions\r
- // queued at TPL_NOTIFY may be invoked after AmdSevExitBoot() returns; plus\r
+ // queued at TPL_NOTIFY may be invoked after IoMmuExitBoot() returns; plus\r
// *all* NotifyFunctions queued at TPL_CALLBACK will be invoked strictly\r
// after all NotifyFunctions queued at TPL_NOTIFY, including\r
- // AmdSevExitBoot(), have been invoked.\r
+ // IoMmuExitBoot(), have been invoked.\r
//\r
// - By signaling EventToSignal here, whose NotifyTpl is TPL_CALLBACK, we\r
// queue EventToSignal's NotifyFunction after the NotifyFunctions of *all*\r
STATIC\r
VOID\r
EFIAPI\r
-AmdSevUnmapAllMappings (\r
+IoMmuUnmapAllMappings (\r
IN EFI_EVENT Event,\r
IN VOID *Context\r
)\r
NextNode = GetNextNode (&mMapInfos, Node);\r
MapInfo = CR (Node, MAP_INFO, Link, MAP_INFO_SIG);\r
IoMmuUnmapWorker (\r
- &mAmdSev, // This\r
+ &mIoMmu, // This\r
MapInfo, // Mapping\r
TRUE // MemoryMapLocked\r
);\r
**/\r
EFI_STATUS\r
EFIAPI\r
-AmdSevInstallIoMmuProtocol (\r
+InstallIoMmuProtocol (\r
VOID\r
)\r
{\r
Status = gBS->CreateEvent (\r
EVT_NOTIFY_SIGNAL, // Type\r
TPL_CALLBACK, // NotifyTpl\r
- AmdSevUnmapAllMappings, // NotifyFunction\r
+ IoMmuUnmapAllMappings, // NotifyFunction\r
NULL, // NotifyContext\r
&UnmapAllMappingsEvent // Event\r
);\r
Status = gBS->CreateEvent (\r
EVT_SIGNAL_EXIT_BOOT_SERVICES, // Type\r
TPL_CALLBACK, // NotifyTpl\r
- AmdSevExitBoot, // NotifyFunction\r
+ IoMmuExitBoot, // NotifyFunction\r
UnmapAllMappingsEvent, // NotifyContext\r
&ExitBootEvent // Event\r
);\r
Status = gBS->InstallMultipleProtocolInterfaces (\r
&Handle,\r
&gEdkiiIoMmuProtocolGuid,\r
- &mAmdSev,\r
+ &mIoMmu,\r
NULL\r
);\r
if (EFI_ERROR (Status)) {\r