/** @file\r
- x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
+ x64 Virtual Memory Management Services in the form of an IA-32 driver.\r
Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
enter Long Mode (x64 64-bit mode).\r
\r
- While we make a 1:1 mapping (identity mapping) for all physical pages \r
+ While we make a 1:1 mapping (identity mapping) for all physical pages\r
we still need to use the MTRR's to ensure that the cachability attributes\r
for all memory regions is correct.\r
\r
2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
\r
-Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>\r
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
-\r
-**/ \r
+**/\r
\r
+#include <Register/Intel/Cpuid.h>\r
#include "DxeIpl.h"\r
#include "VirtualMemory.h"\r
\r
+//\r
+// Global variable to keep track current available memory used as page table.\r
+//\r
+PAGE_TABLE_POOL *mPageTablePool = NULL;\r
+\r
/**\r
- Clear legacy memory located at the first 4K-page, if available.\r
+ Clear legacy memory located at the first 4K-page, if available.\r
\r
- This function traverses the whole HOB list to check if memory from 0 to 4095\r
- exists and has not been allocated, and then clear it if so.\r
+ This function traverses the whole HOB list to check if memory from 0 to 4095\r
+ exists and has not been allocated, and then clear it if so.\r
\r
- @param HoStart The start of HobList passed to DxeCore.\r
+ @param HobStart The start of HobList passed to DxeCore.\r
\r
**/\r
VOID\r
ClearFirst4KPage (\r
- IN VOID *HobStart\r
+ IN VOID *HobStart\r
)\r
{\r
- EFI_PEI_HOB_POINTERS RscHob;\r
- EFI_PEI_HOB_POINTERS MemHob;\r
- BOOLEAN DoClear;\r
+ EFI_PEI_HOB_POINTERS RscHob;\r
+ EFI_PEI_HOB_POINTERS MemHob;\r
+ BOOLEAN DoClear;\r
\r
RscHob.Raw = HobStart;\r
MemHob.Raw = HobStart;\r
- DoClear = FALSE;\r
+ DoClear = FALSE;\r
\r
//\r
// Check if page 0 exists and free\r
//\r
- while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
- RscHob.Raw)) != NULL) {\r
- if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&\r
- RscHob.ResourceDescriptor->PhysicalStart == 0) {\r
+ while ((RscHob.Raw = GetNextHob (\r
+ EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
+ RscHob.Raw\r
+ )) != NULL)\r
+ {\r
+ if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&\r
+ (RscHob.ResourceDescriptor->PhysicalStart == 0))\r
+ {\r
DoClear = TRUE;\r
//\r
// Make sure memory at 0-4095 has not been allocated.\r
//\r
- while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
- MemHob.Raw)) != NULL) {\r
+ while ((MemHob.Raw = GetNextHob (\r
+ EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
+ MemHob.Raw\r
+ )) != NULL)\r
+ {\r
if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
- < EFI_PAGE_SIZE) {\r
+ < EFI_PAGE_SIZE)\r
+ {\r
DoClear = FALSE;\r
break;\r
}\r
+\r
MemHob.Raw = GET_NEXT_HOB (MemHob);\r
}\r
+\r
break;\r
}\r
+\r
RscHob.Raw = GET_NEXT_HOB (RscHob);\r
}\r
\r
return;\r
}\r
\r
+/**\r
+ Return configure status of NULL pointer detection feature.\r
+\r
+ @return TRUE NULL pointer detection feature is enabled\r
+ @return FALSE NULL pointer detection feature is disabled\r
+\r
+**/\r
BOOLEAN\r
IsNullDetectionEnabled (\r
VOID\r
return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
}\r
\r
+/**\r
+ The function will check if Execute Disable Bit is available.\r
+\r
+ @retval TRUE Execute Disable Bit is available.\r
+ @retval FALSE Execute Disable Bit is not available.\r
+\r
+**/\r
+BOOLEAN\r
+IsExecuteDisableBitAvailable (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ BOOLEAN Available;\r
+\r
+ Available = FALSE;\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000001) {\r
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT20) != 0) {\r
+ //\r
+ // Bit 20: Execute Disable Bit available.\r
+ //\r
+ Available = TRUE;\r
+ }\r
+ }\r
+\r
+ return Available;\r
+}\r
+\r
+/**\r
+ Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
+\r
+ @retval TRUE IA32_EFER.NXE should be enabled.\r
+ @retval FALSE IA32_EFER.NXE should not be enabled.\r
+\r
+**/\r
+BOOLEAN\r
+IsEnableNonExecNeeded (\r
+ VOID\r
+ )\r
+{\r
+ if (!IsExecuteDisableBitAvailable ()) {\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
+ // Features controlled by Following PCDs need this feature to be enabled.\r
+ //\r
+ return (PcdGetBool (PcdSetNxForStack) ||\r
+ PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
+ PcdGet32 (PcdImageProtectionPolicy) != 0);\r
+}\r
+\r
/**\r
Enable Execute Disable Bit.\r
\r
VOID\r
)\r
{\r
- UINT64 MsrRegisters;\r
+ UINT64 MsrRegisters;\r
\r
- MsrRegisters = AsmReadMsr64 (0xC0000080);\r
+ MsrRegisters = AsmReadMsr64 (0xC0000080);\r
MsrRegisters |= BIT11;\r
AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
}\r
\r
+/**\r
+ The function will check if page table entry should be splitted to smaller\r
+ granularity.\r
+\r
+ @param Address Physical memory address.\r
+ @param Size Size of the given physical memory.\r
+ @param StackBase Base address of stack.\r
+ @param StackSize Size of stack.\r
+ @param GhcbBase Base address of GHCB pages.\r
+ @param GhcbSize Size of GHCB area.\r
+\r
+ @retval TRUE Page table should be split.\r
+ @retval FALSE Page table should not be split.\r
+**/\r
+BOOLEAN\r
+ToSplitPageTable (\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN UINTN Size,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize,\r
+ IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
+ IN UINTN GhcbSize\r
+ )\r
+{\r
+ if (IsNullDetectionEnabled () && (Address == 0)) {\r
+ return TRUE;\r
+ }\r
+\r
+ if (PcdGetBool (PcdCpuStackGuard)) {\r
+ if ((StackBase >= Address) && (StackBase < (Address + Size))) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ if (PcdGetBool (PcdSetNxForStack)) {\r
+ if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ if (GhcbBase != 0) {\r
+ if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Initialize a buffer pool for page table use only.\r
+\r
+ To reduce the potential split operation on page table, the pages reserved for\r
+ page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
+ at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
+ initialized with number of pages greater than or equal to the given PoolPages.\r
+\r
+ Once the pages in the pool are used up, this method should be called again to\r
+ reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
+ happen in practice.\r
+\r
+ @param PoolPages The least page number of the pool to be created.\r
+\r
+ @retval TRUE The pool is initialized successfully.\r
+ @retval FALSE The memory is out of resource.\r
+**/\r
+BOOLEAN\r
+InitializePageTablePool (\r
+ IN UINTN PoolPages\r
+ )\r
+{\r
+ VOID *Buffer;\r
+\r
+ //\r
+ // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
+ // header.\r
+ //\r
+ PoolPages += 1; // Add one page for header.\r
+ PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
+ PAGE_TABLE_POOL_UNIT_PAGES;\r
+ Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
+ if (Buffer == NULL) {\r
+ DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // Link all pools into a list for easier track later.\r
+ //\r
+ if (mPageTablePool == NULL) {\r
+ mPageTablePool = Buffer;\r
+ mPageTablePool->NextPool = mPageTablePool;\r
+ } else {\r
+ ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
+ mPageTablePool->NextPool = Buffer;\r
+ mPageTablePool = Buffer;\r
+ }\r
+\r
+ //\r
+ // Reserve one page for pool header.\r
+ //\r
+ mPageTablePool->FreePages = PoolPages - 1;\r
+ mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
+\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+ This API provides a way to allocate memory for page table.\r
+\r
+ This API can be called more than once to allocate memory for page tables.\r
+\r
+ Allocates the number of 4KB pages and returns a pointer to the allocated\r
+ buffer. The buffer returned is aligned on a 4KB boundary.\r
+\r
+ If Pages is 0, then NULL is returned.\r
+ If there is not enough memory remaining to satisfy the request, then NULL is\r
+ returned.\r
+\r
+ @param Pages The number of 4 KB pages to allocate.\r
+\r
+ @return A pointer to the allocated buffer or NULL if allocation fails.\r
+\r
+**/\r
+VOID *\r
+AllocatePageTableMemory (\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ VOID *Buffer;\r
+\r
+ if (Pages == 0) {\r
+ return NULL;\r
+ }\r
+\r
+ //\r
+ // Renew the pool if necessary.\r
+ //\r
+ if ((mPageTablePool == NULL) ||\r
+ (Pages > mPageTablePool->FreePages))\r
+ {\r
+ if (!InitializePageTablePool (Pages)) {\r
+ return NULL;\r
+ }\r
+ }\r
+\r
+ Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
+\r
+ mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
+ mPageTablePool->FreePages -= Pages;\r
+\r
+ return Buffer;\r
+}\r
+\r
/**\r
Split 2M page to 4K.\r
\r
@param[in, out] PageEntry2M Pointer to 2M page entry.\r
@param[in] StackBase Stack base address.\r
@param[in] StackSize Stack size.\r
+ @param[in] GhcbBase GHCB page area base address.\r
+ @param[in] GhcbSize GHCB page area size.\r
\r
**/\r
VOID\r
Split2MPageTo4K (\r
- IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
- IN OUT UINT64 *PageEntry2M,\r
- IN EFI_PHYSICAL_ADDRESS StackBase,\r
- IN UINTN StackSize\r
+ IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry2M,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize,\r
+ IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
+ IN UINTN GhcbSize\r
)\r
{\r
- EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
- UINTN IndexOfPageTableEntries;\r
- PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
- UINT64 AddressEncMask;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
+ UINTN IndexOfPageTableEntries;\r
+ PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
+ UINT64 AddressEncMask;\r
\r
//\r
// Make sure AddressEncMask is contained to smallest supported address field\r
//\r
AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
\r
- PageTableEntry = AllocatePages (1);\r
+ PageTableEntry = AllocatePageTableMemory (1);\r
ASSERT (PageTableEntry != NULL);\r
\r
//\r
// Fill in 2M page entry.\r
//\r
- *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
+ *PageEntry2M = (UINT64)(UINTN)PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
\r
PhysicalAddress4K = PhysicalAddress;\r
for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
//\r
// Fill in the Page Table entries\r
//\r
- PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
+ PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;\r
+\r
+ //\r
+ // The GHCB range consists of two pages per CPU, the GHCB and a\r
+ // per-CPU variable page. The GHCB page needs to be mapped as an\r
+ // unencrypted page while the per-CPU variable page needs to be\r
+ // mapped encrypted. These pages alternate in assignment.\r
+ //\r
+ if ( (GhcbBase == 0)\r
+ || (PhysicalAddress4K < GhcbBase)\r
+ || (PhysicalAddress4K >= GhcbBase + GhcbSize)\r
+ || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0))\r
+ {\r
+ PageTableEntry->Uint64 |= AddressEncMask;\r
+ }\r
+\r
PageTableEntry->Bits.ReadWrite = 1;\r
\r
- if (IsNullDetectionEnabled () && PhysicalAddress4K == 0) {\r
+ if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||\r
+ (PcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))\r
+ {\r
PageTableEntry->Bits.Present = 0;\r
} else {\r
PageTableEntry->Bits.Present = 1;\r
}\r
\r
- if (PcdGetBool (PcdSetNxForStack)\r
- && (PhysicalAddress4K >= StackBase)\r
- && (PhysicalAddress4K < StackBase + StackSize)) {\r
+ if ( PcdGetBool (PcdSetNxForStack)\r
+ && (PhysicalAddress4K >= StackBase)\r
+ && (PhysicalAddress4K < StackBase + StackSize))\r
+ {\r
//\r
// Set Nx bit for stack.\r
//\r
@param[in, out] PageEntry1G Pointer to 1G page entry.\r
@param[in] StackBase Stack base address.\r
@param[in] StackSize Stack size.\r
+ @param[in] GhcbBase GHCB page area base address.\r
+ @param[in] GhcbSize GHCB page area size.\r
\r
**/\r
VOID\r
Split1GPageTo2M (\r
- IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
- IN OUT UINT64 *PageEntry1G,\r
- IN EFI_PHYSICAL_ADDRESS StackBase,\r
- IN UINTN StackSize\r
+ IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry1G,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize,\r
+ IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
+ IN UINTN GhcbSize\r
)\r
{\r
- EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
- UINTN IndexOfPageDirectoryEntries;\r
- PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
- UINT64 AddressEncMask;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
+ UINT64 AddressEncMask;\r
\r
//\r
// Make sure AddressEncMask is contained to smallest supported address field\r
//\r
AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
\r
- PageDirectoryEntry = AllocatePages (1);\r
+ PageDirectoryEntry = AllocatePageTableMemory (1);\r
ASSERT (PageDirectoryEntry != NULL);\r
\r
//\r
// Fill in 1G page entry.\r
//\r
- *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
+ *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
\r
PhysicalAddress2M = PhysicalAddress;\r
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
- if ((IsNullDetectionEnabled () && PhysicalAddress2M == 0)\r
- || (PcdGetBool (PcdSetNxForStack)\r
- && (PhysicalAddress2M < StackBase + StackSize)\r
- && ((PhysicalAddress2M + SIZE_2MB) > StackBase))) {\r
+ if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
//\r
// Need to split this 2M page that covers NULL or stack range.\r
//\r
- Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
+ Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
} else {\r
//\r
// Fill in the Page Directory entries\r
//\r
- PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
+ PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;\r
PageDirectoryEntry->Bits.ReadWrite = 1;\r
- PageDirectoryEntry->Bits.Present = 1;\r
- PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
}\r
}\r
}\r
\r
+/**\r
+ Set one page of page table pool memory to be read-only.\r
+\r
+ @param[in] PageTableBase Base address of page table (CR3).\r
+ @param[in] Address Start address of a page to be set as read-only.\r
+ @param[in] Level4Paging Level 4 paging flag.\r
+\r
+**/\r
+VOID\r
+SetPageTablePoolReadOnly (\r
+ IN UINTN PageTableBase,\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN BOOLEAN Level4Paging\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN EntryIndex;\r
+ UINT64 AddressEncMask;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
+ UINT64 *PageTable;\r
+ UINT64 *NewPageTable;\r
+ UINT64 PageAttr;\r
+ UINT64 LevelSize[5];\r
+ UINT64 LevelMask[5];\r
+ UINTN LevelShift[5];\r
+ UINTN Level;\r
+ UINT64 PoolUnitSize;\r
+\r
+ ASSERT (PageTableBase != 0);\r
+\r
+ //\r
+ // Since the page table is always from page table pool, which is always\r
+ // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
+ // set the whole pool unit to be read-only.\r
+ //\r
+ Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
+\r
+ LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
+ LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
+ LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
+ LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
+\r
+ LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
+ LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
+ LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
+ LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
+\r
+ LevelSize[1] = SIZE_4KB;\r
+ LevelSize[2] = SIZE_2MB;\r
+ LevelSize[3] = SIZE_1GB;\r
+ LevelSize[4] = SIZE_512GB;\r
+\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
+ PAGING_1G_ADDRESS_MASK_64;\r
+ PageTable = (UINT64 *)(UINTN)PageTableBase;\r
+ PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
+\r
+ for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
+ Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
+ Index &= PAGING_PAE_INDEX_MASK;\r
+\r
+ PageAttr = PageTable[Index];\r
+ if ((PageAttr & IA32_PG_PS) == 0) {\r
+ //\r
+ // Go to next level of table.\r
+ //\r
+ PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
+ PAGING_4K_ADDRESS_MASK_64);\r
+ continue;\r
+ }\r
+\r
+ if (PoolUnitSize >= LevelSize[Level]) {\r
+ //\r
+ // Clear R/W bit if current page granularity is not larger than pool unit\r
+ // size.\r
+ //\r
+ if ((PageAttr & IA32_PG_RW) != 0) {\r
+ while (PoolUnitSize > 0) {\r
+ //\r
+ // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
+ // one page (2MB). Then we don't need to update attributes for pages\r
+ // crossing page directory. ASSERT below is for that purpose.\r
+ //\r
+ ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
+\r
+ PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
+ PoolUnitSize -= LevelSize[Level];\r
+\r
+ ++Index;\r
+ }\r
+ }\r
+\r
+ break;\r
+ } else {\r
+ //\r
+ // The smaller granularity of page must be needed.\r
+ //\r
+ ASSERT (Level > 1);\r
+\r
+ NewPageTable = AllocatePageTableMemory (1);\r
+ ASSERT (NewPageTable != NULL);\r
+\r
+ PhysicalAddress = PageAttr & LevelMask[Level];\r
+ for (EntryIndex = 0;\r
+ EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
+ ++EntryIndex)\r
+ {\r
+ NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
+ IA32_PG_P | IA32_PG_RW;\r
+ if (Level > 2) {\r
+ NewPageTable[EntryIndex] |= IA32_PG_PS;\r
+ }\r
+\r
+ PhysicalAddress += LevelSize[Level - 1];\r
+ }\r
+\r
+ PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
+ IA32_PG_P | IA32_PG_RW;\r
+ PageTable = NewPageTable;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Prevent the memory pages used for page table from been overwritten.\r
+\r
+ @param[in] PageTableBase Base address of page table (CR3).\r
+ @param[in] Level4Paging Level 4 paging flag.\r
+\r
+**/\r
+VOID\r
+EnablePageTableProtection (\r
+ IN UINTN PageTableBase,\r
+ IN BOOLEAN Level4Paging\r
+ )\r
+{\r
+ PAGE_TABLE_POOL *HeadPool;\r
+ PAGE_TABLE_POOL *Pool;\r
+ UINT64 PoolSize;\r
+ EFI_PHYSICAL_ADDRESS Address;\r
+\r
+ if (mPageTablePool == NULL) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Disable write protection, because we need to mark page table to be write\r
+ // protected.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);\r
+\r
+ //\r
+ // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
+ // remember original one in advance.\r
+ //\r
+ HeadPool = mPageTablePool;\r
+ Pool = HeadPool;\r
+ do {\r
+ Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
+ PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
+\r
+ //\r
+ // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
+ // is one of page size of the processor (2MB by default). Let's apply the\r
+ // protection to them one by one.\r
+ //\r
+ while (PoolSize > 0) {\r
+ SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
+ Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
+ PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
+ }\r
+\r
+ Pool = Pool->NextPool;\r
+ } while (Pool != HeadPool);\r
+\r
+ //\r
+ // Enable write protection, after page table attribute updated.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0 () | CR0_WP);\r
+}\r
+\r
/**\r
Allocates and fills in the Page Directory and Page Table Entries to\r
establish a 1:1 Virtual to Physical mapping.\r
\r
@param[in] StackBase Stack base address.\r
@param[in] StackSize Stack size.\r
+ @param[in] GhcbBase GHCB base address.\r
+ @param[in] GhcbSize GHCB size.\r
\r
@return The address of 4 level page map.\r
\r
**/\r
UINTN\r
CreateIdentityMappingPageTables (\r
- IN EFI_PHYSICAL_ADDRESS StackBase,\r
- IN UINTN StackSize\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize,\r
+ IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
+ IN UINTN GhcbSize\r
)\r
-{ \r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
- UINT8 PhysicalAddressBits;\r
- EFI_PHYSICAL_ADDRESS PageAddress;\r
- UINTN IndexOfPml4Entries;\r
- UINTN IndexOfPdpEntries;\r
- UINTN IndexOfPageDirectoryEntries;\r
- UINT32 NumberOfPml4EntriesNeeded;\r
- UINT32 NumberOfPdpEntriesNeeded;\r
- PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
- PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
- PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
- PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
- UINTN TotalPagesNum;\r
- UINTN BigPageAddress;\r
- VOID *Hob;\r
- BOOLEAN Page1GSupport;\r
- PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
- UINT64 AddressEncMask;\r
+{\r
+ UINT32 RegEax;\r
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
+ UINT32 RegEdx;\r
+ UINT8 PhysicalAddressBits;\r
+ EFI_PHYSICAL_ADDRESS PageAddress;\r
+ UINTN IndexOfPml5Entries;\r
+ UINTN IndexOfPml4Entries;\r
+ UINTN IndexOfPdpEntries;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ UINT32 NumberOfPml5EntriesNeeded;\r
+ UINT32 NumberOfPml4EntriesNeeded;\r
+ UINT32 NumberOfPdpEntriesNeeded;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
+ UINTN TotalPagesNum;\r
+ UINTN BigPageAddress;\r
+ VOID *Hob;\r
+ BOOLEAN Page5LevelSupport;\r
+ BOOLEAN Page1GSupport;\r
+ PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
+ UINT64 AddressEncMask;\r
+ IA32_CR4 Cr4;\r
+\r
+ //\r
+ // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings\r
+ //\r
+ PageMapLevel5Entry = NULL;\r
\r
//\r
// Make sure AddressEncMask is contained to smallest supported address field\r
AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
\r
Page1GSupport = FALSE;\r
- if (PcdGetBool(PcdUse1GPageTable)) {\r
+ if (PcdGetBool (PcdUse1GPageTable)) {\r
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
if (RegEax >= 0x80000001) {\r
AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
//\r
Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
if (Hob != NULL) {\r
- PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
+ PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
} else {\r
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
if (RegEax >= 0x80000008) {\r
AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
- PhysicalAddressBits = (UINT8) RegEax;\r
+ PhysicalAddressBits = (UINT8)RegEax;\r
} else {\r
PhysicalAddressBits = 36;\r
}\r
}\r
\r
+ Page5LevelSupport = FALSE;\r
+ if (PcdGetBool (PcdUse5LevelPageTable)) {\r
+ AsmCpuidEx (\r
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
+ NULL,\r
+ &EcxFlags.Uint32,\r
+ NULL,\r
+ NULL\r
+ );\r
+ if (EcxFlags.Bits.FiveLevelPage != 0) {\r
+ Page5LevelSupport = TRUE;\r
+ }\r
+ }\r
+\r
+ DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));\r
+\r
//\r
- // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
+ // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
+ // when 5-Level Paging is disabled,\r
+ // due to either unsupported by HW, or disabled by PCD.\r
//\r
ASSERT (PhysicalAddressBits <= 52);\r
- if (PhysicalAddressBits > 48) {\r
+ if (!Page5LevelSupport && (PhysicalAddressBits > 48)) {\r
PhysicalAddressBits = 48;\r
}\r
\r
//\r
// Calculate the table entries needed.\r
//\r
- if (PhysicalAddressBits <= 39 ) {\r
- NumberOfPml4EntriesNeeded = 1;\r
- NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
- } else {\r
- NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
- NumberOfPdpEntriesNeeded = 512;\r
+ NumberOfPml5EntriesNeeded = 1;\r
+ if (PhysicalAddressBits > 48) {\r
+ NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);\r
+ PhysicalAddressBits = 48;\r
+ }\r
+\r
+ NumberOfPml4EntriesNeeded = 1;\r
+ if (PhysicalAddressBits > 39) {\r
+ NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);\r
+ PhysicalAddressBits = 39;\r
}\r
\r
+ NumberOfPdpEntriesNeeded = 1;\r
+ ASSERT (PhysicalAddressBits > 30);\r
+ NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);\r
+\r
//\r
- // Pre-allocate big pages to avoid later allocations. \r
+ // Pre-allocate big pages to avoid later allocations.\r
//\r
if (!Page1GSupport) {\r
- TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
+ TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
} else {\r
- TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
+ TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
}\r
- BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
+\r
+ //\r
+ // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.\r
+ //\r
+ if (!Page5LevelSupport) {\r
+ TotalPagesNum--;\r
+ }\r
+\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",\r
+ NumberOfPml5EntriesNeeded,\r
+ NumberOfPml4EntriesNeeded,\r
+ NumberOfPdpEntriesNeeded,\r
+ (UINT64)TotalPagesNum\r
+ ));\r
+\r
+ BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);\r
ASSERT (BigPageAddress != 0);\r
\r
//\r
// By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
//\r
- PageMap = (VOID *) BigPageAddress;\r
- BigPageAddress += SIZE_4KB;\r
-\r
- PageMapLevel4Entry = PageMap;\r
- PageAddress = 0;\r
- for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
+ PageMap = (VOID *)BigPageAddress;\r
+ if (Page5LevelSupport) {\r
//\r
- // Each PML4 entry points to a page of Page Directory Pointer entires.\r
- // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
+ // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
//\r
- PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
- BigPageAddress += SIZE_4KB;\r
+ PageMapLevel5Entry = PageMap;\r
+ BigPageAddress += SIZE_4KB;\r
+ }\r
+\r
+ PageAddress = 0;\r
\r
+ for ( IndexOfPml5Entries = 0\r
+ ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
+ ; IndexOfPml5Entries++)\r
+ {\r
//\r
- // Make a PML4 Entry\r
+ // Each PML5 entry points to a page of PML4 entires.\r
+ // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
+ // When 5-Level Paging is disabled, below allocation happens only once.\r
//\r
- PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
- PageMapLevel4Entry->Bits.ReadWrite = 1;\r
- PageMapLevel4Entry->Bits.Present = 1;\r
-\r
- if (Page1GSupport) {\r
- PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
- \r
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
- if ((IsNullDetectionEnabled () && PageAddress == 0)\r
- || (PcdGetBool (PcdSetNxForStack)\r
- && (PageAddress < StackBase + StackSize)\r
- && ((PageAddress + SIZE_1GB) > StackBase))) {\r
- Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
- } else {\r
- //\r
- // Fill in the Page Directory entries\r
- //\r
- PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
- PageDirectory1GEntry->Bits.ReadWrite = 1;\r
- PageDirectory1GEntry->Bits.Present = 1;\r
- PageDirectory1GEntry->Bits.MustBe1 = 1;\r
- }\r
- }\r
- } else {\r
- for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
- //\r
- // Each Directory Pointer entries points to a page of Page Directory entires.\r
- // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
- // \r
- PageDirectoryEntry = (VOID *) BigPageAddress;\r
- BigPageAddress += SIZE_4KB;\r
+ PageMapLevel4Entry = (VOID *)BigPageAddress;\r
+ BigPageAddress += SIZE_4KB;\r
\r
- //\r
- // Fill in a Page Directory Pointer Entries\r
- //\r
- PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
- PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
- PageDirectoryPointerEntry->Bits.Present = 1;\r
-\r
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
- if ((IsNullDetectionEnabled () && PageAddress == 0)\r
- || (PcdGetBool (PcdSetNxForStack)\r
- && (PageAddress < StackBase + StackSize)\r
- && ((PageAddress + SIZE_2MB) > StackBase))) {\r
- //\r
- // Need to split this 2M page that covers NULL or stack range.\r
- //\r
- Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
+ if (Page5LevelSupport) {\r
+ //\r
+ // Make a PML5 Entry\r
+ //\r
+ PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry | AddressEncMask;\r
+ PageMapLevel5Entry->Bits.ReadWrite = 1;\r
+ PageMapLevel5Entry->Bits.Present = 1;\r
+ PageMapLevel5Entry++;\r
+ }\r
+\r
+ for ( IndexOfPml4Entries = 0\r
+ ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)\r
+ ; IndexOfPml4Entries++, PageMapLevel4Entry++)\r
+ {\r
+ //\r
+ // Each PML4 entry points to a page of Page Directory Pointer entires.\r
+ // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
+ //\r
+ PageDirectoryPointerEntry = (VOID *)BigPageAddress;\r
+ BigPageAddress += SIZE_4KB;\r
+\r
+ //\r
+ // Make a PML4 Entry\r
+ //\r
+ PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
+ PageMapLevel4Entry->Bits.ReadWrite = 1;\r
+ PageMapLevel4Entry->Bits.Present = 1;\r
+\r
+ if (Page1GSupport) {\r
+ PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;\r
+\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
+ if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
+ Split1GPageTo2M (PageAddress, (UINT64 *)PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
} else {\r
//\r
// Fill in the Page Directory entries\r
//\r
- PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
- PageDirectoryEntry->Bits.ReadWrite = 1;\r
- PageDirectoryEntry->Bits.Present = 1;\r
- PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
+ PageDirectory1GEntry->Bits.ReadWrite = 1;\r
+ PageDirectory1GEntry->Bits.Present = 1;\r
+ PageDirectory1GEntry->Bits.MustBe1 = 1;\r
}\r
}\r
- }\r
+ } else {\r
+ for ( IndexOfPdpEntries = 0\r
+ ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)\r
+ ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)\r
+ {\r
+ //\r
+ // Each Directory Pointer entries points to a page of Page Directory entires.\r
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
+ //\r
+ PageDirectoryEntry = (VOID *)BigPageAddress;\r
+ BigPageAddress += SIZE_4KB;\r
\r
- for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
- ZeroMem (\r
- PageDirectoryPointerEntry,\r
- sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
- );\r
+ //\r
+ // Fill in a Page Directory Pointer Entries\r
+ //\r
+ PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
+ PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryPointerEntry->Bits.Present = 1;\r
+\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
+ if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
+ //\r
+ // Need to split this 2M page that covers NULL or stack range.\r
+ //\r
+ Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
+ PageDirectoryEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Fill with null entry for unused PDPTE\r
+ //\r
+ ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
}\r
}\r
+\r
+ //\r
+ // For the PML4 entries we are not using fill in a null entry.\r
+ //\r
+ ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
+ }\r
+\r
+ if (Page5LevelSupport) {\r
+ Cr4.UintN = AsmReadCr4 ();\r
+ Cr4.Bits.LA57 = 1;\r
+ AsmWriteCr4 (Cr4.UintN);\r
+ //\r
+ // For the PML5 entries we are not using fill in a null entry.\r
+ //\r
+ ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
}\r
\r
//\r
- // For the PML4 entries we are not using fill in a null entry.\r
+ // Protect the page table by marking the memory used for page table to be\r
+ // read-only.\r
//\r
- for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
- ZeroMem (\r
- PageMapLevel4Entry,\r
- sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
- );\r
- }\r
+ EnablePageTableProtection ((UINTN)PageMap, TRUE);\r
\r
- if (PcdGetBool (PcdSetNxForStack)) {\r
+ //\r
+ // Set IA32_EFER.NXE if necessary.\r
+ //\r
+ if (IsEnableNonExecNeeded ()) {\r
EnableExecuteDisableBit ();\r
}\r
\r
return (UINTN)PageMap;\r
}\r
-\r