From: Brijesh Singh via groups.io Date: Thu, 9 Dec 2021 03:27:42 +0000 (+0800) Subject: OvmfPkg/MemEncryptSevLib: add support to validate system RAM X-Git-Tag: edk2-stable202202~183 X-Git-Url: https://git.proxmox.com/?p=mirror_edk2.git;a=commitdiff_plain;h=ade62c18f4742301bbef474ac10518bde5972fba OvmfPkg/MemEncryptSevLib: add support to validate system RAM BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275 Many of the integrity guarantees of SEV-SNP are enforced through the Reverse Map Table (RMP). Each RMP entry contains the GPA at which a particular page of DRAM should be mapped. The guest can request the hypervisor to add pages in the RMP table via the Page State Change VMGEXIT defined in the GHCB specification section 2.5.1 and 4.1.6. Inside each RMP entry is a Validated flag; this flag is automatically cleared to 0 by the CPU hardware when a new RMP entry is created for a guest. Each VM page can be either validated or invalidated, as indicated by the Validated flag in the RMP entry. Memory access to a private page that is not validated generates a #VC. A VM can use the PVALIDATE instruction to validate the private page before using it. During the guest creation, the boot ROM memory is pre-validated by the AMD-SEV firmware. The MemEncryptSevSnpValidateSystemRam() can be called during the SEC and PEI phase to validate the detected system RAM. One of the fields in the Page State Change NAE is the RMP page size. The page size input parameter indicates that either a 4KB or 2MB page should be used while adding the RMP entry. During the validation, when possible, the MemEncryptSevSnpValidateSystemRam() will use the 2MB entry. A hypervisor backing the memory may choose to use the different page size in the RMP entry. In those cases, the PVALIDATE instruction should return SIZEMISMATCH. If a SIZEMISMATCH is detected, then validate all 512-pages constituting a 2MB region. Upon completion, the PVALIDATE instruction sets the rFLAGS.CF to 0 if instruction changed the RMP entry and to 1 if the instruction did not change the RMP entry. The rFlags.CF will be 1 only when a memory region is already validated. We should not double validate a memory as it could lead to a security compromise. If double validation is detected, terminate the boot. Cc: Michael Roth Cc: James Bottomley Cc: Min Xu Cc: Jiewen Yao Cc: Tom Lendacky Cc: Jordan Justen Cc: Ard Biesheuvel Cc: Erdem Aktas Cc: Gerd Hoffmann Acked-by: Jiewen Yao Acked-by: Gerd Hoffmann Signed-off-by: Brijesh Singh --- diff --git a/OvmfPkg/Include/Library/MemEncryptSevLib.h b/OvmfPkg/Include/Library/MemEncryptSevLib.h index 3c77d71df7..4fa9c0d700 100644 --- a/OvmfPkg/Include/Library/MemEncryptSevLib.h +++ b/OvmfPkg/Include/Library/MemEncryptSevLib.h @@ -214,4 +214,18 @@ MemEncryptSevClearMmioPageEncMask ( IN UINTN NumPages ); +/** + Pre-validate the system RAM when SEV-SNP is enabled in the guest VM. + + @param[in] BaseAddress Base address + @param[in] NumPages Number of pages starting from the base address + +**/ +VOID +EFIAPI +MemEncryptSevSnpPreValidateSystemRam ( + IN PHYSICAL_ADDRESS BaseAddress, + IN UINTN NumPages + ); + #endif // _MEM_ENCRYPT_SEV_LIB_H_ diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf index f2e162d680..f613bb314f 100644 --- a/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf @@ -34,8 +34,10 @@ PeiDxeMemEncryptSevLibInternal.c [Sources.X64] + X64/DxeSnpSystemRamValidate.c X64/MemEncryptSevLib.c X64/PeiDxeVirtualMemory.c + X64/SnpPageStateChangeInternal.c X64/VirtualMemory.c X64/VirtualMemory.h @@ -49,6 +51,7 @@ DebugLib MemoryAllocationLib PcdLib + VmgExitLib [FeaturePcd] gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c b/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c index 9bd66301fc..f92299fc77 100644 --- a/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c @@ -136,3 +136,20 @@ MemEncryptSevClearMmioPageEncMask ( // return RETURN_UNSUPPORTED; } + +/** + Pre-validate the system RAM when SEV-SNP is enabled in the guest VM. + + @param[in] BaseAddress Base address + @param[in] NumPages Number of pages starting from the base address + +**/ +VOID +EFIAPI +MemEncryptSevSnpPreValidateSystemRam ( + IN PHYSICAL_ADDRESS BaseAddress, + IN UINTN NumPages + ) +{ + ASSERT (FALSE); +} diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf index 03a78c32df..0402e49a10 100644 --- a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf @@ -36,6 +36,8 @@ [Sources.X64] X64/MemEncryptSevLib.c X64/PeiDxeVirtualMemory.c + X64/PeiSnpSystemRamValidate.c + X64/SnpPageStateChangeInternal.c X64/VirtualMemory.c X64/VirtualMemory.h @@ -49,6 +51,7 @@ DebugLib MemoryAllocationLib PcdLib + VmgExitLib [FeaturePcd] gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf index 279c38bfbc..939af0a91e 100644 --- a/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf @@ -35,6 +35,8 @@ [Sources.X64] X64/MemEncryptSevLib.c X64/SecVirtualMemory.c + X64/SecSnpSystemRamValidate.c + X64/SnpPageStateChangeInternal.c X64/VirtualMemory.c X64/VirtualMemory.h @@ -46,6 +48,7 @@ CpuLib DebugLib PcdLib + VmgExitLib [FixedPcd] gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaBase diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/DxeSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/DxeSnpSystemRamValidate.c new file mode 100644 index 0000000000..d3a95e4913 --- /dev/null +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/DxeSnpSystemRamValidate.c @@ -0,0 +1,40 @@ +/** @file + + SEV-SNP Page Validation functions. + + Copyright (c) 2021 AMD Incorporated. All rights reserved.
+ + SPDX-License-Identifier: BSD-2-Clause-Patent + +**/ + +#include +#include +#include +#include + +#include "SnpPageStateChange.h" + +/** + Pre-validate the system RAM when SEV-SNP is enabled in the guest VM. + + @param[in] BaseAddress Base address + @param[in] NumPages Number of pages starting from the base address + +**/ +VOID +EFIAPI +MemEncryptSevSnpPreValidateSystemRam ( + IN PHYSICAL_ADDRESS BaseAddress, + IN UINTN NumPages + ) +{ + if (!MemEncryptSevSnpIsEnabled ()) { + return; + } + + // + // All the pre-validation must be completed in the PEI phase. + // + ASSERT (FALSE); +} diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c new file mode 100644 index 0000000000..bc891c2636 --- /dev/null +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c @@ -0,0 +1,36 @@ +/** @file + + SEV-SNP Page Validation functions. + + Copyright (c) 2021 AMD Incorporated. All rights reserved.
+ + SPDX-License-Identifier: BSD-2-Clause-Patent + +**/ + +#include +#include +#include + +#include "SnpPageStateChange.h" + +/** + Pre-validate the system RAM when SEV-SNP is enabled in the guest VM. + + @param[in] BaseAddress Base address + @param[in] NumPages Number of pages starting from the base address + +**/ +VOID +EFIAPI +MemEncryptSevSnpPreValidateSystemRam ( + IN PHYSICAL_ADDRESS BaseAddress, + IN UINTN NumPages + ) +{ + if (!MemEncryptSevSnpIsEnabled ()) { + return; + } + + InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE); +} diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c new file mode 100644 index 0000000000..bc891c2636 --- /dev/null +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c @@ -0,0 +1,36 @@ +/** @file + + SEV-SNP Page Validation functions. + + Copyright (c) 2021 AMD Incorporated. All rights reserved.
+ + SPDX-License-Identifier: BSD-2-Clause-Patent + +**/ + +#include +#include +#include + +#include "SnpPageStateChange.h" + +/** + Pre-validate the system RAM when SEV-SNP is enabled in the guest VM. + + @param[in] BaseAddress Base address + @param[in] NumPages Number of pages starting from the base address + +**/ +VOID +EFIAPI +MemEncryptSevSnpPreValidateSystemRam ( + IN PHYSICAL_ADDRESS BaseAddress, + IN UINTN NumPages + ) +{ + if (!MemEncryptSevSnpIsEnabled ()) { + return; + } + + InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE); +} diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h new file mode 100644 index 0000000000..b396f0ffbd --- /dev/null +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h @@ -0,0 +1,30 @@ +/** @file + + SEV-SNP Page Validation functions. + + Copyright (c) 2021 AMD Incorporated. All rights reserved.
+ + SPDX-License-Identifier: BSD-2-Clause-Patent + +**/ + +#ifndef SNP_PAGE_STATE_INTERNAL_H_ +#define SNP_PAGE_STATE_INTERNAL_H_ + +// +// SEV-SNP Page states +// +typedef enum { + SevSnpPagePrivate, + SevSnpPageShared, +} SEV_SNP_PAGE_STATE; + +VOID +InternalSetPageState ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINTN NumPages, + IN SEV_SNP_PAGE_STATE State, + IN BOOLEAN UseLargeEntry + ); + +#endif diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c new file mode 100644 index 0000000000..9c552ef5c7 --- /dev/null +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c @@ -0,0 +1,301 @@ +/** @file + + SEV-SNP Page Validation functions. + + Copyright (c) 2021 AMD Incorporated. All rights reserved.
+ + SPDX-License-Identifier: BSD-2-Clause-Patent + +**/ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "SnpPageStateChange.h" + +#define IS_ALIGNED(x, y) ((((x) & (y - 1)) == 0)) +#define PAGES_PER_LARGE_ENTRY 512 + +STATIC +UINTN +MemoryStateToGhcbOp ( + IN SEV_SNP_PAGE_STATE State + ) +{ + UINTN Cmd; + + switch (State) { + case SevSnpPageShared: Cmd = SNP_PAGE_STATE_SHARED; + break; + case SevSnpPagePrivate: Cmd = SNP_PAGE_STATE_PRIVATE; + break; + default: ASSERT (0); + } + + return Cmd; +} + +STATIC +VOID +SnpPageStateFailureTerminate ( + VOID + ) +{ + MSR_SEV_ES_GHCB_REGISTER Msr; + + // + // Use the GHCB MSR Protocol to request termination by the hypervisor + // + Msr.GhcbPhysicalAddress = 0; + Msr.GhcbTerminate.Function = GHCB_INFO_TERMINATE_REQUEST; + Msr.GhcbTerminate.ReasonCodeSet = GHCB_TERMINATE_GHCB; + Msr.GhcbTerminate.ReasonCode = GHCB_TERMINATE_GHCB_GENERAL; + AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress); + + AsmVmgExit (); + + ASSERT (FALSE); + CpuDeadLoop (); +} + +/** + This function issues the PVALIDATE instruction to validate or invalidate the memory + range specified. If PVALIDATE returns size mismatch then it retry validating with + smaller page size. + + */ +STATIC +VOID +PvalidateRange ( + IN SNP_PAGE_STATE_CHANGE_INFO *Info, + IN UINTN StartIndex, + IN UINTN EndIndex, + IN BOOLEAN Validate + ) +{ + UINTN Address, RmpPageSize, Ret, i; + + for ( ; StartIndex <= EndIndex; StartIndex++) { + // + // Get the address and the page size from the Info. + // + Address = Info->Entry[StartIndex].GuestFrameNumber << EFI_PAGE_SHIFT; + RmpPageSize = Info->Entry[StartIndex].PageSize; + + Ret = AsmPvalidate (RmpPageSize, Validate, Address); + + // + // If we fail to validate due to size mismatch then try with the + // smaller page size. This senario will occur if the backing page in + // the RMP entry is 4K and we are validating it as a 2MB. + // + if ((Ret == PVALIDATE_RET_SIZE_MISMATCH) && (RmpPageSize == PvalidatePageSize2MB)) { + for (i = 0; i < PAGES_PER_LARGE_ENTRY; i++) { + Ret = AsmPvalidate (PvalidatePageSize4K, Validate, Address); + if (Ret) { + break; + } + + Address = Address + EFI_PAGE_SIZE; + } + } + + // + // If validation failed then do not continue. + // + if (Ret) { + DEBUG (( + DEBUG_ERROR, + "%a:%a: Failed to %a address 0x%Lx Error code %d\n", + gEfiCallerBaseName, + __FUNCTION__, + Validate ? "Validate" : "Invalidate", + Address, + Ret + )); + SnpPageStateFailureTerminate (); + } + } +} + +STATIC +EFI_PHYSICAL_ADDRESS +BuildPageStateBuffer ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN EFI_PHYSICAL_ADDRESS EndAddress, + IN SEV_SNP_PAGE_STATE State, + IN BOOLEAN UseLargeEntry, + IN SNP_PAGE_STATE_CHANGE_INFO *Info + ) +{ + EFI_PHYSICAL_ADDRESS NextAddress; + UINTN i, RmpPageSize; + + // Clear the page state structure + SetMem (Info, sizeof (*Info), 0); + + i = 0; + NextAddress = EndAddress; + + // + // Populate the page state entry structure + // + while ((BaseAddress < EndAddress) && (i < SNP_PAGE_STATE_MAX_ENTRY)) { + // + // Is this a 2MB aligned page? Check if we can use the Large RMP entry. + // + if (UseLargeEntry && IS_ALIGNED (BaseAddress, SIZE_2MB) && + ((EndAddress - BaseAddress) >= SIZE_2MB)) + { + RmpPageSize = PvalidatePageSize2MB; + NextAddress = BaseAddress + SIZE_2MB; + } else { + RmpPageSize = PvalidatePageSize4K; + NextAddress = BaseAddress + EFI_PAGE_SIZE; + } + + Info->Entry[i].GuestFrameNumber = BaseAddress >> EFI_PAGE_SHIFT; + Info->Entry[i].PageSize = RmpPageSize; + Info->Entry[i].Operation = MemoryStateToGhcbOp (State); + Info->Entry[i].CurrentPage = 0; + Info->Header.EndEntry = (UINT16)i; + + BaseAddress = NextAddress; + i++; + } + + return NextAddress; +} + +STATIC +VOID +PageStateChangeVmgExit ( + IN GHCB *Ghcb, + IN SNP_PAGE_STATE_CHANGE_INFO *Info + ) +{ + EFI_STATUS Status; + + // + // As per the GHCB specification, the hypervisor can resume the guest before + // processing all the entries. Checks whether all the entries are processed. + // + // The stragtegy here is to wait for the hypervisor to change the page + // state in the RMP table before guest access the memory pages. If the + // page state was not successful, then later memory access will result + // in the crash. + // + while (Info->Header.CurrentEntry <= Info->Header.EndEntry) { + Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer; + VmgSetOffsetValid (Ghcb, GhcbSwScratch); + + Status = VmgExit (Ghcb, SVM_EXIT_SNP_PAGE_STATE_CHANGE, 0, 0); + + // + // The Page State Change VMGEXIT can pass the failure through the + // ExitInfo2. Lets check both the return value as well as ExitInfo2. + // + if ((Status != 0) || (Ghcb->SaveArea.SwExitInfo2)) { + SnpPageStateFailureTerminate (); + } + } +} + +/** + The function is used to set the page state when SEV-SNP is active. The page state + transition consist of changing the page ownership in the RMP table, and using the + PVALIDATE instruction to update the Validated bit in RMP table. + + When the UseLargeEntry is set to TRUE, then function will try to use the large RMP + entry (whevever possible). + */ +VOID +InternalSetPageState ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINTN NumPages, + IN SEV_SNP_PAGE_STATE State, + IN BOOLEAN UseLargeEntry + ) +{ + GHCB *Ghcb; + EFI_PHYSICAL_ADDRESS NextAddress, EndAddress; + MSR_SEV_ES_GHCB_REGISTER Msr; + BOOLEAN InterruptState; + SNP_PAGE_STATE_CHANGE_INFO *Info; + + Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB); + Ghcb = Msr.Ghcb; + + EndAddress = BaseAddress + EFI_PAGES_TO_SIZE (NumPages); + + DEBUG (( + DEBUG_VERBOSE, + "%a:%a Address 0x%Lx - 0x%Lx State = %a LargeEntry = %d\n", + gEfiCallerBaseName, + __FUNCTION__, + BaseAddress, + EndAddress, + State == SevSnpPageShared ? "Shared" : "Private", + UseLargeEntry + )); + + while (BaseAddress < EndAddress) { + UINTN CurrentEntry, EndEntry; + + // + // Initialize the GHCB + // + VmgInit (Ghcb, &InterruptState); + + // + // Build the page state structure + // + Info = (SNP_PAGE_STATE_CHANGE_INFO *)Ghcb->SharedBuffer; + NextAddress = BuildPageStateBuffer ( + BaseAddress, + EndAddress, + State, + UseLargeEntry, + Info + ); + + // + // Save the current and end entry from the page state structure. We need + // it later. + // + CurrentEntry = Info->Header.CurrentEntry; + EndEntry = Info->Header.EndEntry; + + // + // If the caller requested to change the page state to shared then + // invalidate the pages before making the page shared in the RMP table. + // + if (State == SevSnpPageShared) { + PvalidateRange (Info, CurrentEntry, EndEntry, FALSE); + } + + // + // Invoke the page state change VMGEXIT. + // + PageStateChangeVmgExit (Ghcb, Info); + + // + // If the caller requested to change the page state to private then + // validate the pages after it has been added in the RMP table. + // + if (State == SevSnpPagePrivate) { + PvalidateRange (Info, CurrentEntry, EndEntry, TRUE); + } + + VmgDone (Ghcb, InterruptState); + + BaseAddress = NextAddress; + } +} diff --git a/OvmfPkg/OvmfPkgIa32.dsc b/OvmfPkg/OvmfPkgIa32.dsc index 6a5be97c05..1dc069e424 100644 --- a/OvmfPkg/OvmfPkgIa32.dsc +++ b/OvmfPkg/OvmfPkgIa32.dsc @@ -266,6 +266,7 @@ !else CpuExceptionHandlerLib|UefiCpuPkg/Library/CpuExceptionHandlerLib/SecPeiCpuExceptionHandlerLib.inf !endif + MemEncryptSevLib|OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf [LibraryClasses.common.PEI_CORE] HobLib|MdePkg/Library/PeiHobLib/PeiHobLib.inf diff --git a/OvmfPkg/OvmfPkgIa32X64.dsc b/OvmfPkg/OvmfPkgIa32X64.dsc index 13d9a1f111..a766457e6b 100644 --- a/OvmfPkg/OvmfPkgIa32X64.dsc +++ b/OvmfPkg/OvmfPkgIa32X64.dsc @@ -270,6 +270,7 @@ !else CpuExceptionHandlerLib|UefiCpuPkg/Library/CpuExceptionHandlerLib/SecPeiCpuExceptionHandlerLib.inf !endif + MemEncryptSevLib|OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf [LibraryClasses.common.PEI_CORE] HobLib|MdePkg/Library/PeiHobLib/PeiHobLib.inf