--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>\r
+\r
+ SPDX-License-Identifier: BSD-2-Clause-Patent\r
+\r
+**/\r
+\r
+#ifndef PEILESS_STARTUP_LIB_H_\r
+#define PEILESS_STARTUP_LIB_H_\r
+\r
+#include <Library/BaseLib.h>\r
+#include <Uefi/UefiBaseType.h>\r
+#include <Uefi/UefiSpec.h>\r
+#include <Pi/PiPeiCis.h>\r
+#include <Library/DebugLib.h>\r
+#include <Protocol/DebugSupport.h>\r
+\r
+/**\r
+ * This function brings up the Tdx guest from SEC phase to DXE phase.\r
+ * PEI phase is skipped because most of the components in PEI phase\r
+ * is not needed for Tdx guest, for example, MP Services, TPM etc.\r
+ * In this way, the attack surfaces are reduced as much as possible.\r
+ *\r
+ * @param Context The pointer to the SecCoreData\r
+ * @return VOID This function never returns\r
+ */\r
+VOID\r
+EFIAPI\r
+PeilessStartup (\r
+ IN VOID *Context\r
+ );\r
+\r
+#endif\r
--- /dev/null
+/** @file\r
+ Responsibility of this file is to load the DXE Core from a Firmware Volume.\r
+\r
+Copyright (c) 2016 HP Development Company, L.P.\r
+Copyright (c) 2006 - 2020, Intel Corporation. All rights reserved.<BR>\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
+\r
+**/\r
+\r
+#include "PeilessStartupInternal.h"\r
+#include <Library/DebugLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/PcdLib.h>\r
+#include <Guid/MemoryTypeInformation.h>\r
+#include <Guid/MemoryAllocationHob.h>\r
+#include <Guid/PcdDataBaseSignatureGuid.h>\r
+#include <Register/Intel/Cpuid.h>\r
+#include <Library/PrePiLib.h>\r
+#include "X64/PageTables.h"\r
+#include <Library/ReportStatusCodeLib.h>\r
+\r
+#define STACK_SIZE 0x20000\r
+\r
+/**\r
+ Transfers control to DxeCore.\r
+\r
+ This function performs a CPU architecture specific operations to execute\r
+ the entry point of DxeCore\r
+\r
+ @param DxeCoreEntryPoint The entry point of DxeCore.\r
+\r
+**/\r
+VOID\r
+HandOffToDxeCore (\r
+ IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint\r
+ )\r
+{\r
+ VOID *BaseOfStack;\r
+ VOID *TopOfStack;\r
+ UINTN PageTables;\r
+\r
+ //\r
+ // Clear page 0 and mark it as allocated if NULL pointer detection is enabled.\r
+ //\r
+ if (IsNullDetectionEnabled ()) {\r
+ ClearFirst4KPage (GetHobList ());\r
+ BuildMemoryAllocationHob (0, EFI_PAGES_TO_SIZE (1), EfiBootServicesData);\r
+ }\r
+\r
+ //\r
+ // Allocate 128KB for the Stack\r
+ //\r
+ BaseOfStack = AllocatePages (EFI_SIZE_TO_PAGES (STACK_SIZE));\r
+ ASSERT (BaseOfStack != NULL);\r
+\r
+ //\r
+ // Compute the top of the stack we were allocated. Pre-allocate a UINTN\r
+ // for safety.\r
+ //\r
+ TopOfStack = (VOID *)((UINTN)BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT);\r
+ TopOfStack = ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT);\r
+\r
+ DEBUG ((DEBUG_INFO, "BaseOfStack=0x%x, TopOfStack=0x%x\n", BaseOfStack, TopOfStack));\r
+\r
+ //\r
+ // Create page table and save PageMapLevel4 to CR3\r
+ //\r
+ PageTables = CreateIdentityMappingPageTables (\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)BaseOfStack,\r
+ STACK_SIZE\r
+ );\r
+ if (PageTables == 0) {\r
+ DEBUG ((DEBUG_ERROR, "Failed to create idnetity mapping page tables.\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ AsmWriteCr3 (PageTables);\r
+\r
+ //\r
+ // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.\r
+ //\r
+ UpdateStackHob ((EFI_PHYSICAL_ADDRESS)(UINTN)BaseOfStack, STACK_SIZE);\r
+\r
+ DEBUG ((DEBUG_INFO, "SwitchStack then Jump to DxeCore\n"));\r
+ //\r
+ // Transfer the control to the entry point of DxeCore.\r
+ //\r
+ SwitchStack (\r
+ (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r
+ GetHobList (),\r
+ NULL,\r
+ TopOfStack\r
+ );\r
+}\r
+\r
+/**\r
+ Searches DxeCore in all firmware Volumes and loads the first\r
+ instance that contains DxeCore.\r
+\r
+ @return FileHandle of DxeCore to load DxeCore.\r
+\r
+**/\r
+EFI_STATUS\r
+FindDxeCore (\r
+ IN INTN FvInstance,\r
+ IN OUT EFI_PEI_FILE_HANDLE *FileHandle\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ EFI_PEI_FV_HANDLE VolumeHandle;\r
+\r
+ if (FileHandle == NULL) {\r
+ ASSERT (FALSE);\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ *FileHandle = NULL;\r
+\r
+ //\r
+ // Caller passed in a specific FV to try, so only try that one\r
+ //\r
+ Status = FfsFindNextVolume (FvInstance, &VolumeHandle);\r
+ if (!EFI_ERROR (Status)) {\r
+ Status = FfsFindNextFile (EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE, VolumeHandle, FileHandle);\r
+ if (*FileHandle) {\r
+ // Assume the FV that contains multiple compressed FVs.\r
+ // So decompress the compressed FVs\r
+ Status = FfsProcessFvFile (*FileHandle);\r
+ ASSERT_EFI_ERROR (Status);\r
+ Status = FfsAnyFvFindFirstFile (EFI_FV_FILETYPE_DXE_CORE, &VolumeHandle, FileHandle);\r
+ }\r
+ }\r
+\r
+ return Status;\r
+}\r
+\r
+/**\r
+ This function finds DXE Core in the firmware volume and transfer the control to\r
+ DXE core.\r
+\r
+ @return EFI_SUCCESS DXE core was successfully loaded.\r
+ @return EFI_OUT_OF_RESOURCES There are not enough resources to load DXE core.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+DxeLoadCore (\r
+ IN INTN FvInstance\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ EFI_FV_FILE_INFO DxeCoreFileInfo;\r
+ EFI_PHYSICAL_ADDRESS DxeCoreAddress;\r
+ UINT64 DxeCoreSize;\r
+ EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint;\r
+ EFI_PEI_FILE_HANDLE FileHandle;\r
+ VOID *PeCoffImage;\r
+\r
+ //\r
+ // Look in all the FVs present and find the DXE Core FileHandle\r
+ //\r
+ Status = FindDxeCore (FvInstance, &FileHandle);\r
+\r
+ if (EFI_ERROR (Status)) {\r
+ ASSERT (FALSE);\r
+ return Status;\r
+ }\r
+\r
+ //\r
+ // Load the DXE Core from a Firmware Volume.\r
+ //\r
+ Status = FfsFindSectionData (EFI_SECTION_PE32, FileHandle, &PeCoffImage);\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ Status = LoadPeCoffImage (PeCoffImage, &DxeCoreAddress, &DxeCoreSize, &DxeCoreEntryPoint);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Extract the DxeCore GUID file name.\r
+ //\r
+ Status = FfsGetFileInfo (FileHandle, &DxeCoreFileInfo);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Add HOB for the DXE Core\r
+ //\r
+ BuildModuleHob (\r
+ &DxeCoreFileInfo.FileName,\r
+ DxeCoreAddress,\r
+ ALIGN_VALUE (DxeCoreSize, EFI_PAGE_SIZE),\r
+ DxeCoreEntryPoint\r
+ );\r
+\r
+ DEBUG ((\r
+ DEBUG_INFO | DEBUG_LOAD,\r
+ "Loading DXE CORE at 0x%11p EntryPoint=0x%11p\n",\r
+ (VOID *)(UINTN)DxeCoreAddress,\r
+ FUNCTION_ENTRY_POINT (DxeCoreEntryPoint)\r
+ ));\r
+\r
+ // Transfer control to the DXE Core\r
+ // The hand off state is simply a pointer to the HOB list\r
+ //\r
+ HandOffToDxeCore (DxeCoreEntryPoint);\r
+\r
+ //\r
+ // If we get here, then the DXE Core returned. This is an error\r
+ // DxeCore should not return.\r
+ //\r
+ ASSERT (FALSE);\r
+ CpuDeadLoop ();\r
+\r
+ return EFI_OUT_OF_RESOURCES;\r
+}\r
--- /dev/null
+/** @file\r
+ Main SEC phase code. Handles initial TDX Hob List Processing\r
+\r
+ Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>\r
+ (C) Copyright 2016 Hewlett Packard Enterprise Development LP<BR>\r
+\r
+ SPDX-License-Identifier: BSD-2-Clause-Patent\r
+\r
+**/\r
+\r
+#include <PiPei.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/HobLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/PciLib.h>\r
+#include <Library/PrePiLib.h>\r
+#include <Library/QemuFwCfgLib.h>\r
+#include <IndustryStandard/Tdx.h>\r
+#include <IndustryStandard/UefiTcgPlatform.h>\r
+#include <Library/PlatformInitLib.h>\r
+#include <OvmfPlatforms.h>\r
+#include "PeilessStartupInternal.h"\r
+\r
+/**\r
+ * Construct the HobList in SEC phase.\r
+ *\r
+ * @return EFI_SUCCESS Successfully construct the firmware hoblist.\r
+ * @return EFI_NOT_FOUND Cannot find a memory region to be the fw hoblist.\r
+ */\r
+EFI_STATUS\r
+EFIAPI\r
+ConstructSecHobList (\r
+ )\r
+{\r
+ UINT32 LowMemorySize;\r
+ UINT32 LowMemoryStart;\r
+\r
+ EFI_HOB_HANDOFF_INFO_TABLE *HobList;\r
+ EFI_HOB_PLATFORM_INFO PlatformInfoHob;\r
+\r
+ ZeroMem (&PlatformInfoHob, sizeof (PlatformInfoHob));\r
+ PlatformInfoHob.HostBridgeDevId = PciRead16 (OVMF_HOSTBRIDGE_DID);\r
+ LowMemorySize = PlatformGetSystemMemorySizeBelow4gb (&PlatformInfoHob);\r
+ ASSERT (LowMemorySize != 0);\r
+ LowMemoryStart = FixedPcdGet32 (PcdOvmfDxeMemFvBase) + FixedPcdGet32 (PcdOvmfDxeMemFvSize);\r
+ LowMemorySize -= LowMemoryStart;\r
+\r
+ DEBUG ((DEBUG_INFO, "LowMemory Start and End: %x, %x\n", LowMemoryStart, LowMemoryStart + LowMemorySize));\r
+ HobList = HobConstructor (\r
+ (VOID *)(UINTN)LowMemoryStart,\r
+ LowMemorySize,\r
+ (VOID *)(UINTN)LowMemoryStart,\r
+ (VOID *)(UINTN)(LowMemoryStart + LowMemorySize)\r
+ );\r
+\r
+ SetHobList ((VOID *)(UINT64)HobList);\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ * This function is to find a memory region which is the largest one below 4GB.\r
+ * It will be used as the firmware hoblist.\r
+ *\r
+ * @param VmmHobList Vmm passed hoblist which constains the memory information.\r
+ * @return EFI_SUCCESS Successfully construct the firmware hoblist.\r
+ */\r
+EFI_STATUS\r
+EFIAPI\r
+ConstructFwHobList (\r
+ IN CONST VOID *VmmHobList\r
+ )\r
+{\r
+ EFI_PEI_HOB_POINTERS Hob;\r
+ EFI_PHYSICAL_ADDRESS PhysicalEnd;\r
+ UINT64 ResourceLength;\r
+ EFI_PHYSICAL_ADDRESS LowMemoryStart;\r
+ UINT64 LowMemoryLength;\r
+\r
+ ASSERT (VmmHobList != NULL);\r
+\r
+ Hob.Raw = (UINT8 *)VmmHobList;\r
+\r
+ LowMemoryLength = 0;\r
+ LowMemoryStart = 0;\r
+\r
+ //\r
+ // Parse the HOB list until end of list or matching type is found.\r
+ //\r
+ while (!END_OF_HOB_LIST (Hob)) {\r
+ if (Hob.Header->HobType == EFI_HOB_TYPE_RESOURCE_DESCRIPTOR) {\r
+ if (Hob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) {\r
+ PhysicalEnd = Hob.ResourceDescriptor->PhysicalStart + Hob.ResourceDescriptor->ResourceLength;\r
+ ResourceLength = Hob.ResourceDescriptor->ResourceLength;\r
+\r
+ if (PhysicalEnd <= BASE_4GB) {\r
+ if (ResourceLength > LowMemoryLength) {\r
+ LowMemoryStart = Hob.ResourceDescriptor->PhysicalStart;\r
+ LowMemoryLength = ResourceLength;\r
+ }\r
+ } else {\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ Hob.Raw = GET_NEXT_HOB (Hob);\r
+ }\r
+\r
+ if (LowMemoryLength == 0) {\r
+ DEBUG ((DEBUG_ERROR, "Cannot find a memory region under 4GB for Fw hoblist.\n"));\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // HobLib doesn't like HobStart at address 0 so adjust is needed\r
+ //\r
+ if (LowMemoryStart == 0) {\r
+ LowMemoryStart += EFI_PAGE_SIZE;\r
+ LowMemoryLength -= EFI_PAGE_SIZE;\r
+ }\r
+\r
+ DEBUG ((DEBUG_INFO, "LowMemory Start and End: %x, %x\n", LowMemoryStart, LowMemoryStart + LowMemoryLength));\r
+ HobConstructor (\r
+ (VOID *)LowMemoryStart,\r
+ LowMemoryLength,\r
+ (VOID *)LowMemoryStart,\r
+ (VOID *)(LowMemoryStart + LowMemoryLength)\r
+ );\r
+\r
+ SetHobList ((VOID *)(UINT64)LowMemoryStart);\r
+\r
+ return EFI_SUCCESS;\r
+}\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>\r
+\r
+ SPDX-License-Identifier: BSD-2-Clause-Patent\r
+\r
+**/\r
+\r
+#include <PiPei.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Protocol/DebugSupport.h>\r
+#include <Library/TdxLib.h>\r
+#include <IndustryStandard/Tdx.h>\r
+#include <Library/PrePiLib.h>\r
+#include <Library/PeilessStartupLib.h>\r
+#include <Library/PlatformInitLib.h>\r
+#include <ConfidentialComputingGuestAttr.h>\r
+#include <Guid/MemoryTypeInformation.h>\r
+#include <OvmfPlatforms.h>\r
+#include "PeilessStartupInternal.h"\r
+\r
+#define GET_GPAW_INIT_STATE(INFO) ((UINT8) ((INFO) & 0x3f))\r
+\r
+EFI_MEMORY_TYPE_INFORMATION mDefaultMemoryTypeInformation[] = {\r
+ { EfiACPIMemoryNVS, 0x004 },\r
+ { EfiACPIReclaimMemory, 0x008 },\r
+ { EfiReservedMemoryType, 0x004 },\r
+ { EfiRuntimeServicesData, 0x024 },\r
+ { EfiRuntimeServicesCode, 0x030 },\r
+ { EfiBootServicesCode, 0x180 },\r
+ { EfiBootServicesData, 0xF00 },\r
+ { EfiMaxMemoryType, 0x000 }\r
+};\r
+\r
+EFI_STATUS\r
+EFIAPI\r
+InitializePlatform (\r
+ EFI_HOB_PLATFORM_INFO *PlatformInfoHob\r
+ )\r
+{\r
+ UINT32 LowerMemorySize;\r
+\r
+ DEBUG ((DEBUG_INFO, "InitializePlatform in Pei-less boot\n"));\r
+ PlatformDebugDumpCmos ();\r
+\r
+ PlatformInfoHob->DefaultMaxCpuNumber = 64;\r
+ PlatformInfoHob->PcdPciMmio64Size = 0x800000000;\r
+\r
+ PlatformInfoHob->HostBridgeDevId = PciRead16 (OVMF_HOSTBRIDGE_DID);\r
+ DEBUG ((DEBUG_INFO, "HostBridgeDeviceId = 0x%x\n", PlatformInfoHob->HostBridgeDevId));\r
+\r
+ PlatformAddressWidthInitialization (PlatformInfoHob);\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "PhysMemAddressWidth=0x%x, Pci64Base=0x%llx, Pci64Size=0x%llx\n",\r
+ PlatformInfoHob->PhysMemAddressWidth,\r
+ PlatformInfoHob->PcdPciMmio64Base,\r
+ PlatformInfoHob->PcdPciMmio64Size\r
+ ));\r
+\r
+ PlatformMaxCpuCountInitialization (PlatformInfoHob);\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "MaxCpuCount=%d, BootCpuCount=%d\n",\r
+ PlatformInfoHob->PcdCpuMaxLogicalProcessorNumber,\r
+ PlatformInfoHob->PcdCpuBootLogicalProcessorNumber\r
+ ));\r
+\r
+ LowerMemorySize = PlatformGetSystemMemorySizeBelow4gb (PlatformInfoHob);\r
+ PlatformQemuUc32BaseInitialization (PlatformInfoHob);\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "Uc32Base = 0x%x, Uc32Size = 0x%x, LowerMemorySize = 0x%x\n",\r
+ PlatformInfoHob->Uc32Base,\r
+ PlatformInfoHob->Uc32Size,\r
+ LowerMemorySize\r
+ ));\r
+\r
+ if (TdIsEnabled ()) {\r
+ PlatformTdxPublishRamRegions ();\r
+ } else {\r
+ PlatformQemuInitializeRam (PlatformInfoHob);\r
+ PlatformQemuInitializeRamForS3 (PlatformInfoHob);\r
+ }\r
+\r
+ //\r
+ // Create Memory Type Information HOB\r
+ //\r
+ BuildGuidDataHob (\r
+ &gEfiMemoryTypeInformationGuid,\r
+ mDefaultMemoryTypeInformation,\r
+ sizeof (mDefaultMemoryTypeInformation)\r
+ );\r
+\r
+ PlatformMemMapInitialization (PlatformInfoHob);\r
+\r
+ PlatformNoexecDxeInitialization (PlatformInfoHob);\r
+\r
+ if (TdIsEnabled ()) {\r
+ PlatformInfoHob->PcdConfidentialComputingGuestAttr = CCAttrIntelTdx;\r
+ PlatformInfoHob->PcdTdxSharedBitMask = TdSharedPageMask ();\r
+ PlatformInfoHob->PcdSetNxForStack = TRUE;\r
+ }\r
+\r
+ PlatformMiscInitialization (PlatformInfoHob);\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ * This function brings up the Tdx guest from SEC phase to DXE phase.\r
+ * PEI phase is skipped because most of the components in PEI phase\r
+ * is not needed for Tdx guest, for example, MP Services, TPM etc.\r
+ * In this way, the attack surfaces are reduced as much as possible.\r
+ *\r
+ * @param Context The pointer to the SecCoreData\r
+ * @return VOID This function never returns\r
+ */\r
+VOID\r
+EFIAPI\r
+PeilessStartup (\r
+ IN VOID *Context\r
+ )\r
+{\r
+ EFI_SEC_PEI_HAND_OFF *SecCoreData;\r
+ EFI_FIRMWARE_VOLUME_HEADER *BootFv;\r
+ EFI_STATUS Status;\r
+ EFI_HOB_PLATFORM_INFO PlatformInfoHob;\r
+ UINT32 DxeCodeBase;\r
+ UINT32 DxeCodeSize;\r
+ TD_RETURN_DATA TdReturnData;\r
+ VOID *VmmHobList;\r
+\r
+ Status = EFI_SUCCESS;\r
+ BootFv = NULL;\r
+ VmmHobList = NULL;\r
+ SecCoreData = (EFI_SEC_PEI_HAND_OFF *)Context;\r
+\r
+ ZeroMem (&PlatformInfoHob, sizeof (PlatformInfoHob));\r
+\r
+ if (TdIsEnabled ()) {\r
+ VmmHobList = (VOID *)(UINTN)FixedPcdGet32 (PcdOvmfSecGhcbBase);\r
+ Status = TdCall (TDCALL_TDINFO, 0, 0, 0, &TdReturnData);\r
+ ASSERT (Status == EFI_SUCCESS);\r
+\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "Tdx started with(Hob: 0x%x, Gpaw: 0x%x, Cpus: %d)\n",\r
+ (UINT32)(UINTN)VmmHobList,\r
+ GET_GPAW_INIT_STATE (TdReturnData.TdInfo.Gpaw),\r
+ TdReturnData.TdInfo.NumVcpus\r
+ ));\r
+\r
+ Status = ConstructFwHobList (VmmHobList);\r
+ } else {\r
+ DEBUG ((DEBUG_INFO, "Ovmf started\n"));\r
+ Status = ConstructSecHobList ();\r
+ }\r
+\r
+ if (EFI_ERROR (Status)) {\r
+ ASSERT (FALSE);\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ DEBUG ((DEBUG_INFO, "HobList: %p\n", GetHobList ()));\r
+\r
+ //\r
+ // Initialize the Platform\r
+ //\r
+ Status = InitializePlatform (&PlatformInfoHob);\r
+ if (EFI_ERROR (Status)) {\r
+ ASSERT (FALSE);\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ BuildGuidDataHob (&gUefiOvmfPkgPlatformInfoGuid, &PlatformInfoHob, sizeof (EFI_HOB_PLATFORM_INFO));\r
+\r
+ //\r
+ // SecFV\r
+ //\r
+ BootFv = (EFI_FIRMWARE_VOLUME_HEADER *)SecCoreData->BootFirmwareVolumeBase;\r
+ BuildFvHob ((UINTN)BootFv, BootFv->FvLength);\r
+\r
+ //\r
+ // DxeFV\r
+ //\r
+ DxeCodeBase = PcdGet32 (PcdBfvBase);\r
+ DxeCodeSize = PcdGet32 (PcdBfvRawDataSize) - (UINT32)BootFv->FvLength;\r
+ BuildFvHob (DxeCodeBase, DxeCodeSize);\r
+\r
+ DEBUG ((DEBUG_INFO, "SecFv : %p, 0x%x\n", BootFv, BootFv->FvLength));\r
+ DEBUG ((DEBUG_INFO, "DxeFv : %x, 0x%x\n", DxeCodeBase, DxeCodeSize));\r
+\r
+ BuildStackHob ((UINTN)SecCoreData->StackBase, SecCoreData->StackSize <<= 1);\r
+\r
+ BuildResourceDescriptorHob (\r
+ EFI_RESOURCE_SYSTEM_MEMORY,\r
+ EFI_RESOURCE_ATTRIBUTE_PRESENT |\r
+ EFI_RESOURCE_ATTRIBUTE_INITIALIZED |\r
+ EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE |\r
+ EFI_RESOURCE_ATTRIBUTE_WRITE_COMBINEABLE |\r
+ EFI_RESOURCE_ATTRIBUTE_WRITE_THROUGH_CACHEABLE |\r
+ EFI_RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE |\r
+ EFI_RESOURCE_ATTRIBUTE_TESTED,\r
+ (UINT64)SecCoreData->TemporaryRamBase,\r
+ (UINT64)SecCoreData->TemporaryRamSize\r
+ );\r
+\r
+ //\r
+ // Load the DXE Core and transfer control to it.\r
+ // Only DxeFV is in the compressed section.\r
+ //\r
+ Status = DxeLoadCore (1);\r
+\r
+ //\r
+ // Never arrive here.\r
+ //\r
+ ASSERT (FALSE);\r
+ CpuDeadLoop ();\r
+}\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>\r
+\r
+ SPDX-License-Identifier: BSD-2-Clause-Patent\r
+\r
+**/\r
+\r
+#ifndef PEILESS_STARTUP_INTERNAL_LIB_H_\r
+#define PEILESS_STARTUP_INTERNAL_LIB_H_\r
+\r
+#include <PiPei.h>\r
+#include <Library/BaseLib.h>\r
+#include <Uefi/UefiSpec.h>\r
+#include <Uefi/UefiBaseType.h>\r
+#include <IndustryStandard/IntelTdx.h>\r
+\r
+EFI_STATUS\r
+EFIAPI\r
+DxeLoadCore (\r
+ IN INTN FvInstance\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+TransferHobList (\r
+ IN CONST VOID *HobStart\r
+ );\r
+\r
+/**\r
+ * This function is to find a memory region which is the largest one below 4GB.\r
+ * It will be used as the firmware hoblist.\r
+ *\r
+ * @param VmmHobList Vmm passed hoblist which constains the memory information.\r
+ * @return EFI_SUCCESS Successfully construct the firmware hoblist.\r
+ * @return EFI_NOT_FOUND Cannot find a memory region to be the fw hoblist.\r
+ */\r
+EFI_STATUS\r
+EFIAPI\r
+ConstructFwHobList (\r
+ IN CONST VOID *VmmHobList\r
+ );\r
+\r
+/**\r
+ * Construct the HobList in SEC phase.\r
+ *\r
+ * @return EFI_SUCCESS Successfully construct the firmware hoblist.\r
+ * @return EFI_NOT_FOUND Cannot find a memory region to be the fw hoblist.\r
+ */\r
+EFI_STATUS\r
+EFIAPI\r
+ConstructSecHobList (\r
+ );\r
+\r
+#endif\r
--- /dev/null
+#/** @file\r
+# Component description file for TDX Pre PI Library\r
+#\r
+# LIbrary helps you build a platform that skips PEI and loads DXE Core\r
+# directly. Helps building HOBs, reading data from the FV, and doing\r
+# decompression.\r
+#\r
+# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>\r
+# Copyright (c) 2008, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#\r
+#**/\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = PeilessStartupLib\r
+ FILE_GUID = 8FA74135-F841-40A4-86C8-69C923D2E85F\r
+ MODULE_TYPE = BASE\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = PeilessStartupLib|SEC\r
+\r
+#\r
+# VALID_ARCHITECTURES = X64\r
+#\r
+\r
+[Sources]\r
+ PeilessStartup.c\r
+ Hob.c\r
+ DxeLoad.c\r
+\r
+[Sources.X64]\r
+ X64/VirtualMemory.c\r
+\r
+[Packages]\r
+ MdePkg/MdePkg.dec\r
+ MdeModulePkg/MdeModulePkg.dec\r
+ UefiCpuPkg/UefiCpuPkg.dec\r
+ OvmfPkg/OvmfPkg.dec\r
+ EmbeddedPkg/EmbeddedPkg.dec\r
+\r
+[LibraryClasses]\r
+ BaseLib\r
+ DebugLib\r
+ BaseMemoryLib\r
+ PcdLib\r
+ UefiCpuLib\r
+ DebugAgentLib\r
+ IoLib\r
+ LocalApicLib\r
+ SynchronizationLib\r
+ HobLib\r
+ TdxLib\r
+ MemoryAllocationLib\r
+ PrePiLib\r
+ QemuFwCfgLib\r
+ PlatformInitLib\r
+\r
+[Guids]\r
+ gEfiHobMemoryAllocModuleGuid\r
+ gEfiHobMemoryAllocStackGuid\r
+ gUefiOvmfPkgPlatformInfoGuid\r
+ gEfiMemoryTypeInformationGuid\r
+ gPcdDataBaseHobGuid\r
+\r
+[Pcd]\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdCfvBase\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdCfvRawDataOffset\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdCfvRawDataSize\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdBfvBase\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdBfvRawDataOffset\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdBfvRawDataSize\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupBase\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupSize\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbSize\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBase\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdDxeIplBuildPageTables ## CONSUMES\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard ## CONSUMES\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdUse1GPageTable ## SOMETIMES_CONSUMES\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdDxeNxMemoryProtectionPolicy ## SOMETIMES_CONSUMES\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdImageProtectionPolicy ## SOMETIMES_CONSUMES\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMask ## CONSUMES\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdNullPointerDetectionPropertyMask ## CONSUMES\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfDxeMemFvBase\r
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfDxeMemFvSize\r
--- /dev/null
+/** @file\r
+ x64 Long Mode Virtual Memory Management Definitions\r
+\r
+ References:\r
+ 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
+ 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
+ 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
+ 4) AMD64 Architecture Programmer's Manual Volume 2: System Programming\r
+\r
+Copyright (c) 2006 - 2020, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
+\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
+\r
+**/\r
+\r
+#ifndef PAGE_TABLES_H_\r
+#define PAGE_TABLES_H_\r
+\r
+#define SYS_CODE64_SEL 0x38\r
+\r
+#pragma pack(1)\r
+\r
+typedef union {\r
+ struct {\r
+ UINT32 LimitLow : 16;\r
+ UINT32 BaseLow : 16;\r
+ UINT32 BaseMid : 8;\r
+ UINT32 Type : 4;\r
+ UINT32 System : 1;\r
+ UINT32 Dpl : 2;\r
+ UINT32 Present : 1;\r
+ UINT32 LimitHigh : 4;\r
+ UINT32 Software : 1;\r
+ UINT32 Reserved : 1;\r
+ UINT32 DefaultSize : 1;\r
+ UINT32 Granularity : 1;\r
+ UINT32 BaseHigh : 8;\r
+ } Bits;\r
+ UINT64 Uint64;\r
+} IA32_GDT;\r
+\r
+typedef struct {\r
+ IA32_IDT_GATE_DESCRIPTOR Ia32IdtEntry;\r
+ UINT32 Offset32To63;\r
+ UINT32 Reserved;\r
+} X64_IDT_GATE_DESCRIPTOR;\r
+\r
+//\r
+// Page-Map Level-4 Offset (PML4) and\r
+// Page-Directory-Pointer Offset (PDPE) entries 4K & 2MB\r
+//\r
+\r
+typedef union {\r
+ struct {\r
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory\r
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write\r
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User\r
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching\r
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached\r
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)\r
+ UINT64 Reserved : 1; // Reserved\r
+ UINT64 MustBeZero : 2; // Must Be Zero\r
+ UINT64 Available : 3; // Available for use by system software\r
+ UINT64 PageTableBaseAddress : 40; // Page Table Base Address\r
+ UINT64 AvabilableHigh : 11; // Available for use by system software\r
+ UINT64 Nx : 1; // No Execute bit\r
+ } Bits;\r
+ UINT64 Uint64;\r
+} PAGE_MAP_AND_DIRECTORY_POINTER;\r
+\r
+//\r
+// Page Table Entry 4KB\r
+//\r
+typedef union {\r
+ struct {\r
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory\r
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write\r
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User\r
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching\r
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached\r
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)\r
+ UINT64 Dirty : 1; // 0 = Not Dirty, 1 = written by processor on access to page\r
+ UINT64 PAT : 1; //\r
+ UINT64 Global : 1; // 0 = Not global page, 1 = global page TLB not cleared on CR3 write\r
+ UINT64 Available : 3; // Available for use by system software\r
+ UINT64 PageTableBaseAddress : 40; // Page Table Base Address\r
+ UINT64 AvabilableHigh : 11; // Available for use by system software\r
+ UINT64 Nx : 1; // 0 = Execute Code, 1 = No Code Execution\r
+ } Bits;\r
+ UINT64 Uint64;\r
+} PAGE_TABLE_4K_ENTRY;\r
+\r
+//\r
+// Page Table Entry 2MB\r
+//\r
+typedef union {\r
+ struct {\r
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory\r
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write\r
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User\r
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching\r
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached\r
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)\r
+ UINT64 Dirty : 1; // 0 = Not Dirty, 1 = written by processor on access to page\r
+ UINT64 MustBe1 : 1; // Must be 1\r
+ UINT64 Global : 1; // 0 = Not global page, 1 = global page TLB not cleared on CR3 write\r
+ UINT64 Available : 3; // Available for use by system software\r
+ UINT64 PAT : 1; //\r
+ UINT64 MustBeZero : 8; // Must be zero;\r
+ UINT64 PageTableBaseAddress : 31; // Page Table Base Address\r
+ UINT64 AvabilableHigh : 11; // Available for use by system software\r
+ UINT64 Nx : 1; // 0 = Execute Code, 1 = No Code Execution\r
+ } Bits;\r
+ UINT64 Uint64;\r
+} PAGE_TABLE_ENTRY;\r
+\r
+//\r
+// Page Table Entry 1GB\r
+//\r
+typedef union {\r
+ struct {\r
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory\r
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write\r
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User\r
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching\r
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached\r
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)\r
+ UINT64 Dirty : 1; // 0 = Not Dirty, 1 = written by processor on access to page\r
+ UINT64 MustBe1 : 1; // Must be 1\r
+ UINT64 Global : 1; // 0 = Not global page, 1 = global page TLB not cleared on CR3 write\r
+ UINT64 Available : 3; // Available for use by system software\r
+ UINT64 PAT : 1; //\r
+ UINT64 MustBeZero : 17; // Must be zero;\r
+ UINT64 PageTableBaseAddress : 22; // Page Table Base Address\r
+ UINT64 AvabilableHigh : 11; // Available for use by system software\r
+ UINT64 Nx : 1; // 0 = Execute Code, 1 = No Code Execution\r
+ } Bits;\r
+ UINT64 Uint64;\r
+} PAGE_TABLE_1G_ENTRY;\r
+\r
+#pragma pack()\r
+\r
+#define CR0_WP BIT16\r
+\r
+#define IA32_PG_P BIT0\r
+#define IA32_PG_RW BIT1\r
+#define IA32_PG_PS BIT7\r
+\r
+#define PAGING_PAE_INDEX_MASK 0x1FF\r
+\r
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull\r
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull\r
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull\r
+\r
+#define PAGING_L1_ADDRESS_SHIFT 12\r
+#define PAGING_L2_ADDRESS_SHIFT 21\r
+#define PAGING_L3_ADDRESS_SHIFT 30\r
+#define PAGING_L4_ADDRESS_SHIFT 39\r
+\r
+#define PAGING_PML4E_NUMBER 4\r
+\r
+#define PAGE_TABLE_POOL_ALIGNMENT BASE_2MB\r
+#define PAGE_TABLE_POOL_UNIT_SIZE SIZE_2MB\r
+#define PAGE_TABLE_POOL_UNIT_PAGES EFI_SIZE_TO_PAGES (PAGE_TABLE_POOL_UNIT_SIZE)\r
+#define PAGE_TABLE_POOL_ALIGN_MASK \\r
+ (~(EFI_PHYSICAL_ADDRESS)(PAGE_TABLE_POOL_ALIGNMENT - 1))\r
+\r
+typedef struct {\r
+ VOID *NextPool;\r
+ UINTN Offset;\r
+ UINTN FreePages;\r
+} PAGE_TABLE_POOL;\r
+\r
+UINTN\r
+CreateIdentityMappingPageTables (\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ );\r
+\r
+/**\r
+ Clear legacy memory located at the first 4K-page.\r
+\r
+ This function traverses the whole HOB list to check if memory from 0 to 4095\r
+ exists and has not been allocated, and then clear it if so.\r
+\r
+ @param HobStart The start of HobList passed to DxeCore.\r
+\r
+**/\r
+VOID\r
+ClearFirst4KPage (\r
+ IN VOID *HobStart\r
+ );\r
+\r
+/**\r
+ Return configure status of NULL pointer detection feature.\r
+\r
+ @return TRUE NULL pointer detection feature is enabled\r
+ @return FALSE NULL pointer detection feature is disabled\r
+**/\r
+BOOLEAN\r
+IsNullDetectionEnabled (\r
+ VOID\r
+ );\r
+\r
+#endif\r
--- /dev/null
+/** @file\r
+ x64-specifc functionality for Page Table Setup.\r
+\r
+Copyright (c) 2006 - 2020, Intel Corporation. All rights reserved.<BR>\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
+**/\r
+\r
+#include <Uefi/UefiBaseType.h>\r
+#include <Uefi/UefiSpec.h>\r
+#include <Pi/PiBootMode.h>\r
+#include <Pi/PiHob.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/HobLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/PcdLib.h>\r
+#include <Guid/MemoryTypeInformation.h>\r
+#include <Guid/MemoryAllocationHob.h>\r
+#include <Register/Intel/Cpuid.h>\r
+#include <Library/PlatformInitLib.h>\r
+#include "PageTables.h"\r
+\r
+//\r
+// Global variable to keep track current available memory used as page table.\r
+//\r
+PAGE_TABLE_POOL *mPageTablePool = NULL;\r
+\r
+UINTN mLevelShift[5] = {\r
+ 0,\r
+ PAGING_L1_ADDRESS_SHIFT,\r
+ PAGING_L2_ADDRESS_SHIFT,\r
+ PAGING_L3_ADDRESS_SHIFT,\r
+ PAGING_L4_ADDRESS_SHIFT\r
+};\r
+\r
+UINT64 mLevelMask[5] = {\r
+ 0,\r
+ PAGING_4K_ADDRESS_MASK_64,\r
+ PAGING_2M_ADDRESS_MASK_64,\r
+ PAGING_1G_ADDRESS_MASK_64,\r
+ PAGING_1G_ADDRESS_MASK_64\r
+};\r
+\r
+UINT64 mLevelSize[5] = {\r
+ 0,\r
+ SIZE_4KB,\r
+ SIZE_2MB,\r
+ SIZE_1GB,\r
+ SIZE_512GB\r
+};\r
+\r
+BOOLEAN\r
+IsSetNxForStack (\r
+ VOID\r
+ )\r
+{\r
+ EFI_HOB_GUID_TYPE *GuidHob;\r
+ EFI_HOB_PLATFORM_INFO *PlatformInfo;\r
+\r
+ GuidHob = GetFirstGuidHob (&gUefiOvmfPkgPlatformInfoGuid);\r
+ if (GuidHob == NULL) {\r
+ ASSERT (FALSE);\r
+ return FALSE;\r
+ }\r
+\r
+ PlatformInfo = (EFI_HOB_PLATFORM_INFO *)GET_GUID_HOB_DATA (GuidHob);\r
+\r
+ return PlatformInfo->PcdSetNxForStack;\r
+}\r
+\r
+/**\r
+ Clear legacy memory located at the first 4K-page, if available.\r
+\r
+ This function traverses the whole HOB list to check if memory from 0 to 4095\r
+ exists and has not been allocated, and then clear it if so.\r
+\r
+ @param HobStart The start of HobList passed to DxeCore.\r
+\r
+**/\r
+VOID\r
+ClearFirst4KPage (\r
+ IN VOID *HobStart\r
+ )\r
+{\r
+ EFI_PEI_HOB_POINTERS RscHob;\r
+ EFI_PEI_HOB_POINTERS MemHob;\r
+ BOOLEAN DoClear;\r
+\r
+ RscHob.Raw = HobStart;\r
+ MemHob.Raw = HobStart;\r
+ DoClear = FALSE;\r
+\r
+ //\r
+ // Check if page 0 exists and free\r
+ //\r
+ while ((RscHob.Raw = GetNextHob (\r
+ EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
+ RscHob.Raw\r
+ )) != NULL)\r
+ {\r
+ if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&\r
+ (RscHob.ResourceDescriptor->PhysicalStart == 0))\r
+ {\r
+ DoClear = TRUE;\r
+ //\r
+ // Make sure memory at 0-4095 has not been allocated.\r
+ //\r
+ while ((MemHob.Raw = GetNextHob (\r
+ EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
+ MemHob.Raw\r
+ )) != NULL)\r
+ {\r
+ if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
+ < EFI_PAGE_SIZE)\r
+ {\r
+ DoClear = FALSE;\r
+ break;\r
+ }\r
+\r
+ MemHob.Raw = GET_NEXT_HOB (MemHob);\r
+ }\r
+\r
+ break;\r
+ }\r
+\r
+ RscHob.Raw = GET_NEXT_HOB (RscHob);\r
+ }\r
+\r
+ if (DoClear) {\r
+ DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
+ SetMem (NULL, EFI_PAGE_SIZE, 0);\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+/**\r
+ Return configure status of NULL pointer detection feature.\r
+\r
+ @return TRUE NULL pointer detection feature is enabled\r
+ @return FALSE NULL pointer detection feature is disabled\r
+\r
+**/\r
+BOOLEAN\r
+IsNullDetectionEnabled (\r
+ VOID\r
+ )\r
+{\r
+ return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
+}\r
+\r
+/**\r
+ The function will check if Execute Disable Bit is available.\r
+\r
+ @retval TRUE Execute Disable Bit is available.\r
+ @retval FALSE Execute Disable Bit is not available.\r
+\r
+**/\r
+BOOLEAN\r
+IsExecuteDisableBitAvailable (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ BOOLEAN Available;\r
+\r
+ Available = FALSE;\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000001) {\r
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT20) != 0) {\r
+ //\r
+ // Bit 20: Execute Disable Bit available.\r
+ //\r
+ Available = TRUE;\r
+ }\r
+ }\r
+\r
+ return Available;\r
+}\r
+\r
+/**\r
+ Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
+\r
+ @retval TRUE IA32_EFER.NXE should be enabled.\r
+ @retval FALSE IA32_EFER.NXE should not be enabled.\r
+\r
+**/\r
+BOOLEAN\r
+IsEnableNonExecNeeded (\r
+ VOID\r
+ )\r
+{\r
+ if (!IsExecuteDisableBitAvailable ()) {\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
+ // Features controlled by Following PCDs need this feature to be enabled.\r
+ //\r
+ return (IsSetNxForStack () ||\r
+ FixedPcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
+ PcdGet32 (PcdImageProtectionPolicy) != 0);\r
+}\r
+\r
+/**\r
+ Enable Execute Disable Bit.\r
+\r
+**/\r
+VOID\r
+EnableExecuteDisableBit (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 MsrRegisters;\r
+\r
+ MsrRegisters = AsmReadMsr64 (0xC0000080);\r
+ MsrRegisters |= BIT11;\r
+ AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
+}\r
+\r
+/**\r
+ The function will check if page table entry should be splitted to smaller\r
+ granularity.\r
+\r
+ @param Address Physical memory address.\r
+ @param Size Size of the given physical memory.\r
+ @param StackBase Base address of stack.\r
+ @param StackSize Size of stack.\r
+\r
+ @retval TRUE Page table should be split.\r
+ @retval FALSE Page table should not be split.\r
+**/\r
+BOOLEAN\r
+ToSplitPageTable (\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN UINTN Size,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ if (IsNullDetectionEnabled () && (Address == 0)) {\r
+ return TRUE;\r
+ }\r
+\r
+ if (FixedPcdGetBool (PcdCpuStackGuard)) {\r
+ if ((StackBase >= Address) && (StackBase < (Address + Size))) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ if (IsSetNxForStack ()) {\r
+ if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Initialize a buffer pool for page table use only.\r
+\r
+ To reduce the potential split operation on page table, the pages reserved for\r
+ page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
+ at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
+ initialized with number of pages greater than or equal to the given PoolPages.\r
+\r
+ Once the pages in the pool are used up, this method should be called again to\r
+ reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
+ happen in practice.\r
+\r
+ @param PoolPages The least page number of the pool to be created.\r
+\r
+ @retval TRUE The pool is initialized successfully.\r
+ @retval FALSE The memory is out of resource.\r
+**/\r
+BOOLEAN\r
+InitializePageTablePool (\r
+ IN UINTN PoolPages\r
+ )\r
+{\r
+ VOID *Buffer;\r
+\r
+ DEBUG ((DEBUG_INFO, "InitializePageTablePool PoolPages=%d\n", PoolPages));\r
+\r
+ //\r
+ // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
+ // header.\r
+ //\r
+ PoolPages += 1; // Add one page for header.\r
+ PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
+ PAGE_TABLE_POOL_UNIT_PAGES;\r
+ Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
+ if (Buffer == NULL) {\r
+ DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // Link all pools into a list for easier track later.\r
+ //\r
+ if (mPageTablePool == NULL) {\r
+ mPageTablePool = Buffer;\r
+ mPageTablePool->NextPool = mPageTablePool;\r
+ } else {\r
+ ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
+ mPageTablePool->NextPool = Buffer;\r
+ mPageTablePool = Buffer;\r
+ }\r
+\r
+ //\r
+ // Reserve one page for pool header.\r
+ //\r
+ mPageTablePool->FreePages = PoolPages - 1;\r
+ mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
+\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+ This API provides a way to allocate memory for page table.\r
+\r
+ This API can be called more than once to allocate memory for page tables.\r
+\r
+ Allocates the number of 4KB pages and returns a pointer to the allocated\r
+ buffer. The buffer returned is aligned on a 4KB boundary.\r
+\r
+ If Pages is 0, then NULL is returned.\r
+ If there is not enough memory remaining to satisfy the request, then NULL is\r
+ returned.\r
+\r
+ @param Pages The number of 4 KB pages to allocate.\r
+\r
+ @return A pointer to the allocated buffer or NULL if allocation fails.\r
+\r
+**/\r
+VOID *\r
+AllocatePageTableMemory (\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ VOID *Buffer;\r
+\r
+ if (Pages == 0) {\r
+ return NULL;\r
+ }\r
+\r
+ DEBUG ((DEBUG_INFO, "AllocatePageTableMemory. mPageTablePool=%p, Pages=%d\n", mPageTablePool, Pages));\r
+ //\r
+ // Renew the pool if necessary.\r
+ //\r
+ if ((mPageTablePool == NULL) ||\r
+ (Pages > mPageTablePool->FreePages))\r
+ {\r
+ if (!InitializePageTablePool (Pages)) {\r
+ return NULL;\r
+ }\r
+ }\r
+\r
+ Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
+\r
+ mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
+ mPageTablePool->FreePages -= Pages;\r
+\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "%a:%a: Buffer=0x%Lx Pages=%ld\n",\r
+ gEfiCallerBaseName,\r
+ __FUNCTION__,\r
+ Buffer,\r
+ Pages\r
+ ));\r
+\r
+ return Buffer;\r
+}\r
+\r
+/**\r
+ Split 2M page to 4K.\r
+\r
+ @param[in] PhysicalAddress Start physical address the 2M page covered.\r
+ @param[in, out] PageEntry2M Pointer to 2M page entry.\r
+ @param[in] StackBase Stack base address.\r
+ @param[in] StackSize Stack size.\r
+\r
+**/\r
+VOID\r
+Split2MPageTo4K (\r
+ IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry2M,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
+ UINTN IndexOfPageTableEntries;\r
+ PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
+\r
+ DEBUG ((DEBUG_INFO, "Split2MPageTo4K\n"));\r
+\r
+ PageTableEntry = AllocatePageTableMemory (1);\r
+\r
+ if (PageTableEntry == NULL) {\r
+ ASSERT (FALSE);\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Fill in 2M page entry.\r
+ //\r
+ *PageEntry2M = (UINT64)(UINTN)PageTableEntry | IA32_PG_P | IA32_PG_RW;\r
+\r
+ PhysicalAddress4K = PhysicalAddress;\r
+ for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
+ //\r
+ // Fill in the Page Table entries\r
+ //\r
+ PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;\r
+ PageTableEntry->Bits.ReadWrite = 1;\r
+\r
+ if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||\r
+ (FixedPcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))\r
+ {\r
+ PageTableEntry->Bits.Present = 0;\r
+ } else {\r
+ PageTableEntry->Bits.Present = 1;\r
+ }\r
+\r
+ if ( IsSetNxForStack ()\r
+ && (PhysicalAddress4K >= StackBase)\r
+ && (PhysicalAddress4K < StackBase + StackSize))\r
+ {\r
+ //\r
+ // Set Nx bit for stack.\r
+ //\r
+ PageTableEntry->Bits.Nx = 1;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Split 1G page to 2M.\r
+\r
+ @param[in] PhysicalAddress Start physical address the 1G page covered.\r
+ @param[in, out] PageEntry1G Pointer to 1G page entry.\r
+ @param[in] StackBase Stack base address.\r
+ @param[in] StackSize Stack size.\r
+\r
+**/\r
+VOID\r
+Split1GPageTo2M (\r
+ IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry1G,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
+\r
+ PageDirectoryEntry = AllocatePageTableMemory (1);\r
+\r
+ if (PageDirectoryEntry == NULL) {\r
+ ASSERT (FALSE);\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Fill in 1G page entry.\r
+ //\r
+ *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | IA32_PG_P | IA32_PG_RW;\r
+\r
+ PhysicalAddress2M = PhysicalAddress;\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
+ if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {\r
+ //\r
+ // Need to split this 2M page that covers NULL or stack range.\r
+ //\r
+ Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize);\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M;\r
+ PageDirectoryEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Set one page of page table pool memory to be read-only.\r
+\r
+ @param[in] PageTableBase Base address of page table (CR3).\r
+ @param[in] Address Start address of a page to be set as read-only.\r
+ @param[in] Level4Paging Level 4 paging flag.\r
+\r
+**/\r
+VOID\r
+SetPageTablePoolReadOnly (\r
+ IN UINTN PageTableBase,\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN BOOLEAN Level4Paging\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN EntryIndex;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
+ UINT64 *PageTable;\r
+ UINT64 *NewPageTable;\r
+ UINT64 PageAttr;\r
+ UINTN Level;\r
+ UINT64 PoolUnitSize;\r
+\r
+ if (PageTableBase == 0) {\r
+ ASSERT (FALSE);\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Since the page table is always from page table pool, which is always\r
+ // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
+ // set the whole pool unit to be read-only.\r
+ //\r
+ Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
+\r
+ PageTable = (UINT64 *)(UINTN)PageTableBase;\r
+ PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
+\r
+ for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
+ Index = ((UINTN)RShiftU64 (Address, mLevelShift[Level]));\r
+ Index &= PAGING_PAE_INDEX_MASK;\r
+\r
+ PageAttr = PageTable[Index];\r
+ if ((PageAttr & IA32_PG_PS) == 0) {\r
+ //\r
+ // Go to next level of table.\r
+ //\r
+ PageTable = (UINT64 *)(UINTN)(PageAttr & PAGING_4K_ADDRESS_MASK_64);\r
+ continue;\r
+ }\r
+\r
+ if (PoolUnitSize >= mLevelSize[Level]) {\r
+ //\r
+ // Clear R/W bit if current page granularity is not larger than pool unit\r
+ // size.\r
+ //\r
+ if ((PageAttr & IA32_PG_RW) != 0) {\r
+ while (PoolUnitSize > 0) {\r
+ //\r
+ // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
+ // one page (2MB). Then we don't need to update attributes for pages\r
+ // crossing page directory. ASSERT below is for that purpose.\r
+ //\r
+ ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
+\r
+ PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
+ PoolUnitSize -= mLevelSize[Level];\r
+\r
+ ++Index;\r
+ }\r
+ }\r
+\r
+ break;\r
+ } else {\r
+ //\r
+ // The smaller granularity of page must be needed.\r
+ //\r
+ ASSERT (Level > 1);\r
+\r
+ NewPageTable = AllocatePageTableMemory (1);\r
+\r
+ if (NewPageTable == NULL) {\r
+ ASSERT (FALSE);\r
+ return;\r
+ }\r
+\r
+ PhysicalAddress = PageAttr & mLevelMask[Level];\r
+ for (EntryIndex = 0;\r
+ EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
+ ++EntryIndex)\r
+ {\r
+ NewPageTable[EntryIndex] = PhysicalAddress |\r
+ IA32_PG_P | IA32_PG_RW;\r
+ if (Level > 2) {\r
+ NewPageTable[EntryIndex] |= IA32_PG_PS;\r
+ }\r
+\r
+ PhysicalAddress += mLevelSize[Level - 1];\r
+ }\r
+\r
+ PageTable[Index] = (UINT64)(UINTN)NewPageTable |\r
+ IA32_PG_P | IA32_PG_RW;\r
+ PageTable = NewPageTable;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Prevent the memory pages used for page table from been overwritten.\r
+\r
+ @param[in] PageTableBase Base address of page table (CR3).\r
+ @param[in] Level4Paging Level 4 paging flag.\r
+\r
+**/\r
+VOID\r
+EnablePageTableProtection (\r
+ IN UINTN PageTableBase,\r
+ IN BOOLEAN Level4Paging\r
+ )\r
+{\r
+ PAGE_TABLE_POOL *HeadPool;\r
+ PAGE_TABLE_POOL *Pool;\r
+ UINT64 PoolSize;\r
+ EFI_PHYSICAL_ADDRESS Address;\r
+\r
+ DEBUG ((DEBUG_INFO, "EnablePageTableProtection\n"));\r
+\r
+ if (mPageTablePool == NULL) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Disable write protection, because we need to mark page table to be write\r
+ // protected.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);\r
+\r
+ //\r
+ // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
+ // remember original one in advance.\r
+ //\r
+ HeadPool = mPageTablePool;\r
+ Pool = HeadPool;\r
+ do {\r
+ Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
+ PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
+\r
+ //\r
+ // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
+ // is one of page size of the processor (2MB by default). Let's apply the\r
+ // protection to them one by one.\r
+ //\r
+ while (PoolSize > 0) {\r
+ SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
+ Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
+ PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
+ }\r
+\r
+ Pool = Pool->NextPool;\r
+ } while (Pool != HeadPool);\r
+\r
+ //\r
+ // Enable write protection, after page table attribute updated.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0 () | CR0_WP);\r
+}\r
+\r
+/**\r
+ Allocates and fills in the Page Directory and Page Table Entries to\r
+ establish a 1:1 Virtual to Physical mapping.\r
+\r
+ @param[in] StackBase Stack base address.\r
+ @param[in] StackSize Stack size.\r
+\r
+ @return The address of 4 level page map.\r
+\r
+**/\r
+UINTN\r
+CreateIdentityMappingPageTables (\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ UINT8 PhysicalAddressBits;\r
+ EFI_PHYSICAL_ADDRESS PageAddress;\r
+ UINTN IndexOfPml5Entries;\r
+ UINTN IndexOfPml4Entries;\r
+ UINTN IndexOfPdpEntries;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ UINT32 NumberOfPml5EntriesNeeded;\r
+ UINT32 NumberOfPml4EntriesNeeded;\r
+ UINT32 NumberOfPdpEntriesNeeded;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
+ UINTN TotalPagesNum;\r
+ UINTN BigPageAddress;\r
+ VOID *Hob;\r
+ BOOLEAN Page5LevelSupport;\r
+ BOOLEAN Page1GSupport;\r
+ PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
+ IA32_CR4 Cr4;\r
+\r
+ //\r
+ // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings\r
+ //\r
+ PageMapLevel5Entry = NULL;\r
+\r
+ Page1GSupport = FALSE;\r
+ if (FixedPcdGetBool (PcdUse1GPageTable)) {\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000001) {\r
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT26) != 0) {\r
+ Page1GSupport = TRUE;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Get physical address bits supported.\r
+ //\r
+ Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
+ if (Hob == NULL) {\r
+ ASSERT (FALSE);\r
+ return 0;\r
+ }\r
+\r
+ PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
+\r
+ //\r
+ // CPU will already have LA57 enabled so just check CR4\r
+ Cr4.UintN = AsmReadCr4 ();\r
+ Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);\r
+\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "AddressBits=%u 5LevelPaging=%u 1GPage=%u \n",\r
+ PhysicalAddressBits,\r
+ Page5LevelSupport,\r
+ Page1GSupport\r
+ ));\r
+\r
+ //\r
+ // Calculate the table entries needed.\r
+ //\r
+ NumberOfPml5EntriesNeeded = 1;\r
+ if (PhysicalAddressBits > 48) {\r
+ NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);\r
+ PhysicalAddressBits = 48;\r
+ }\r
+\r
+ NumberOfPml4EntriesNeeded = 1;\r
+ if (PhysicalAddressBits > 39) {\r
+ NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);\r
+ PhysicalAddressBits = 39;\r
+ }\r
+\r
+ NumberOfPdpEntriesNeeded = 1;\r
+ ASSERT (PhysicalAddressBits > 30);\r
+ NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);\r
+\r
+ //\r
+ // Pre-allocate big pages to avoid later allocations.\r
+ //\r
+ if (!Page1GSupport) {\r
+ TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
+ } else {\r
+ TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
+ }\r
+\r
+ //\r
+ // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.\r
+ //\r
+ if (!Page5LevelSupport) {\r
+ TotalPagesNum--;\r
+ }\r
+\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",\r
+ NumberOfPml5EntriesNeeded,\r
+ NumberOfPml4EntriesNeeded,\r
+ NumberOfPdpEntriesNeeded,\r
+ (UINT64)TotalPagesNum\r
+ ));\r
+\r
+ BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);\r
+ if (BigPageAddress == 0) {\r
+ ASSERT (FALSE);\r
+ return 0;\r
+ }\r
+\r
+ DEBUG ((DEBUG_INFO, "BigPageAddress = 0x%llx\n", BigPageAddress));\r
+\r
+ //\r
+ // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
+ //\r
+ PageMap = (VOID *)BigPageAddress;\r
+ if (Page5LevelSupport) {\r
+ //\r
+ // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
+ //\r
+ PageMapLevel5Entry = PageMap;\r
+ BigPageAddress += SIZE_4KB;\r
+ }\r
+\r
+ PageAddress = 0;\r
+\r
+ for ( IndexOfPml5Entries = 0\r
+ ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
+ ; IndexOfPml5Entries++)\r
+ {\r
+ //\r
+ // Each PML5 entry points to a page of PML4 entires.\r
+ // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
+ // When 5-Level Paging is disabled, below allocation happens only once.\r
+ //\r
+ PageMapLevel4Entry = (VOID *)BigPageAddress;\r
+ BigPageAddress += SIZE_4KB;\r
+\r
+ if (Page5LevelSupport) {\r
+ //\r
+ // Make a PML5 Entry\r
+ //\r
+ PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry;\r
+ PageMapLevel5Entry->Bits.ReadWrite = 1;\r
+ PageMapLevel5Entry->Bits.Present = 1;\r
+ PageMapLevel5Entry++;\r
+ }\r
+\r
+ for ( IndexOfPml4Entries = 0\r
+ ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)\r
+ ; IndexOfPml4Entries++, PageMapLevel4Entry++)\r
+ {\r
+ //\r
+ // Each PML4 entry points to a page of Page Directory Pointer entires.\r
+ // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
+ //\r
+ PageDirectoryPointerEntry = (VOID *)BigPageAddress;\r
+ BigPageAddress += SIZE_4KB;\r
+\r
+ //\r
+ // Make a PML4 Entry\r
+ //\r
+ PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry;\r
+ PageMapLevel4Entry->Bits.ReadWrite = 1;\r
+ PageMapLevel4Entry->Bits.Present = 1;\r
+\r
+ if (Page1GSupport) {\r
+ PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;\r
+\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
+ if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {\r
+ Split1GPageTo2M (\r
+ PageAddress,\r
+ (UINT64 *)PageDirectory1GEntry,\r
+ StackBase,\r
+ StackSize\r
+ );\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectory1GEntry->Uint64 = (UINT64)PageAddress;\r
+ PageDirectory1GEntry->Bits.ReadWrite = 1;\r
+ PageDirectory1GEntry->Bits.Present = 1;\r
+ PageDirectory1GEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+ } else {\r
+ for ( IndexOfPdpEntries = 0\r
+ ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)\r
+ ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)\r
+ {\r
+ //\r
+ // Each Directory Pointer entries points to a page of Page Directory entires.\r
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
+ //\r
+ PageDirectoryEntry = (VOID *)BigPageAddress;\r
+ BigPageAddress += SIZE_4KB;\r
+\r
+ //\r
+ // Fill in a Page Directory Pointer Entries\r
+ //\r
+ PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry;\r
+ PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryPointerEntry->Bits.Present = 1;\r
+\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
+ if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {\r
+ //\r
+ // Need to split this 2M page that covers NULL or stack range.\r
+ //\r
+ Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize);\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectoryEntry->Uint64 = (UINT64)PageAddress;\r
+ PageDirectoryEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Fill with null entry for unused PDPTE\r
+ //\r
+ ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
+ }\r
+ }\r
+\r
+ //\r
+ // For the PML4 entries we are not using fill in a null entry.\r
+ //\r
+ ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
+ }\r
+\r
+ if (Page5LevelSupport) {\r
+ //\r
+ // For the PML5 entries we are not using fill in a null entry.\r
+ //\r
+ ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
+ }\r
+\r
+ //\r
+ // Protect the page table by marking the memory used for page table to be\r
+ // read-only.\r
+ //\r
+ EnablePageTableProtection ((UINTN)PageMap, TRUE);\r
+\r
+ return (UINTN)PageMap;\r
+}\r
#\r
PlatformInitLib|Include/Library/PlatformInitLib.h\r
\r
+ ## @libraryclass PeilessStartupLib\r
+ #\r
+ PeilessStartupLib|Include/Library/PeilessStartupLib.h\r
+\r
[Guids]\r
gUefiOvmfPkgTokenSpaceGuid = {0x93bb96af, 0xb9f2, 0x4eb8, {0x94, 0x62, 0xe0, 0xba, 0x74, 0x56, 0x42, 0x36}}\r
gEfiXenInfoGuid = {0xd3b46f3b, 0xd441, 0x1244, {0x9a, 0x12, 0x0, 0x12, 0x27, 0x3f, 0xc1, 0x4d}}\r