X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=MdeModulePkg%2FCore%2FDxeIplPeim%2FX64%2FVirtualMemory.c;h=b40b7e0c9813b06459f7008c25919ab778a64d18;hb=073f2cede820782e37e31bd6664aa53b79bbade4;hp=29b6205e884d8a58aa583ba3d89c965ecd731284;hpb=382aeac2c83dc7fb0c88b7a5ade1038820b3beab;p=mirror_edk2.git
diff --git a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
index 29b6205e88..b40b7e0c98 100644
--- a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
+++ b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
@@ -1,9 +1,9 @@
/** @file
- x64 Virtual Memory Management Services in the form of an IA-32 driver.
+ x64 Virtual Memory Management Services in the form of an IA-32 driver.
Used to establish a 1:1 Virtual to Physical Mapping that is required to
enter Long Mode (x64 64-bit mode).
- While we make a 1:1 mapping (identity mapping) for all physical pages
+ While we make a 1:1 mapping (identity mapping) for all physical pages
we still need to use the MTRR's to ensure that the cachability attributes
for all memory regions is correct.
@@ -15,22 +15,22 @@
2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
-Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.
+Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.
Copyright (c) 2017, AMD Incorporated. All rights reserved.
-This program and the accompanying materials
-are licensed and made available under the terms and conditions of the BSD License
-which accompanies this distribution. The full text of the license may be found at
-http://opensource.org/licenses/bsd-license.php
+SPDX-License-Identifier: BSD-2-Clause-Patent
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-**/
+**/
+#include
#include "DxeIpl.h"
#include "VirtualMemory.h"
+//
+// Global variable to keep track current available memory used as page table.
+//
+PAGE_TABLE_POOL *mPageTablePool = NULL;
+
/**
Clear legacy memory located at the first 4K-page, if available.
@@ -101,6 +101,62 @@ IsNullDetectionEnabled (
return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
}
+/**
+ The function will check if Execute Disable Bit is available.
+
+ @retval TRUE Execute Disable Bit is available.
+ @retval FALSE Execute Disable Bit is not available.
+
+**/
+BOOLEAN
+IsExecuteDisableBitAvailable (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT32 RegEdx;
+ BOOLEAN Available;
+
+ Available = FALSE;
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000001) {
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & BIT20) != 0) {
+ //
+ // Bit 20: Execute Disable Bit available.
+ //
+ Available = TRUE;
+ }
+ }
+
+ return Available;
+}
+
+/**
+ Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.
+
+ @retval TRUE IA32_EFER.NXE should be enabled.
+ @retval FALSE IA32_EFER.NXE should not be enabled.
+
+**/
+BOOLEAN
+IsEnableNonExecNeeded (
+ VOID
+ )
+{
+ if (!IsExecuteDisableBitAvailable ()) {
+ return FALSE;
+ }
+
+ //
+ // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.
+ // Features controlled by Following PCDs need this feature to be enabled.
+ //
+ return (PcdGetBool (PcdSetNxForStack) ||
+ PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||
+ PcdGet32 (PcdImageProtectionPolicy) != 0);
+}
+
/**
Enable Execute Disable Bit.
@@ -117,6 +173,148 @@ EnableExecuteDisableBit (
AsmWriteMsr64 (0xC0000080, MsrRegisters);
}
+/**
+ The function will check if page table entry should be splitted to smaller
+ granularity.
+
+ @param Address Physical memory address.
+ @param Size Size of the given physical memory.
+ @param StackBase Base address of stack.
+ @param StackSize Size of stack.
+
+ @retval TRUE Page table should be split.
+ @retval FALSE Page table should not be split.
+**/
+BOOLEAN
+ToSplitPageTable (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN Size,
+ IN EFI_PHYSICAL_ADDRESS StackBase,
+ IN UINTN StackSize
+ )
+{
+ if (IsNullDetectionEnabled () && Address == 0) {
+ return TRUE;
+ }
+
+ if (PcdGetBool (PcdCpuStackGuard)) {
+ if (StackBase >= Address && StackBase < (Address + Size)) {
+ return TRUE;
+ }
+ }
+
+ if (PcdGetBool (PcdSetNxForStack)) {
+ if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+/**
+ Initialize a buffer pool for page table use only.
+
+ To reduce the potential split operation on page table, the pages reserved for
+ page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
+ at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
+ initialized with number of pages greater than or equal to the given PoolPages.
+
+ Once the pages in the pool are used up, this method should be called again to
+ reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't
+ happen in practice.
+
+ @param PoolPages The least page number of the pool to be created.
+
+ @retval TRUE The pool is initialized successfully.
+ @retval FALSE The memory is out of resource.
+**/
+BOOLEAN
+InitializePageTablePool (
+ IN UINTN PoolPages
+ )
+{
+ VOID *Buffer;
+
+ //
+ // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
+ // header.
+ //
+ PoolPages += 1; // Add one page for header.
+ PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
+ PAGE_TABLE_POOL_UNIT_PAGES;
+ Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
+ return FALSE;
+ }
+
+ //
+ // Link all pools into a list for easier track later.
+ //
+ if (mPageTablePool == NULL) {
+ mPageTablePool = Buffer;
+ mPageTablePool->NextPool = mPageTablePool;
+ } else {
+ ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
+ mPageTablePool->NextPool = Buffer;
+ mPageTablePool = Buffer;
+ }
+
+ //
+ // Reserve one page for pool header.
+ //
+ mPageTablePool->FreePages = PoolPages - 1;
+ mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
+
+ return TRUE;
+}
+
+/**
+ This API provides a way to allocate memory for page table.
+
+ This API can be called more than once to allocate memory for page tables.
+
+ Allocates the number of 4KB pages and returns a pointer to the allocated
+ buffer. The buffer returned is aligned on a 4KB boundary.
+
+ If Pages is 0, then NULL is returned.
+ If there is not enough memory remaining to satisfy the request, then NULL is
+ returned.
+
+ @param Pages The number of 4 KB pages to allocate.
+
+ @return A pointer to the allocated buffer or NULL if allocation fails.
+
+**/
+VOID *
+AllocatePageTableMemory (
+ IN UINTN Pages
+ )
+{
+ VOID *Buffer;
+
+ if (Pages == 0) {
+ return NULL;
+ }
+
+ //
+ // Renew the pool if necessary.
+ //
+ if (mPageTablePool == NULL ||
+ Pages > mPageTablePool->FreePages) {
+ if (!InitializePageTablePool (Pages)) {
+ return NULL;
+ }
+ }
+
+ Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
+
+ mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
+ mPageTablePool->FreePages -= Pages;
+
+ return Buffer;
+}
+
/**
Split 2M page to 4K.
@@ -144,7 +342,7 @@ Split2MPageTo4K (
//
AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
- PageTableEntry = AllocatePages (1);
+ PageTableEntry = AllocatePageTableMemory (1);
ASSERT (PageTableEntry != NULL);
//
@@ -160,7 +358,8 @@ Split2MPageTo4K (
PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
PageTableEntry->Bits.ReadWrite = 1;
- if (IsNullDetectionEnabled () && PhysicalAddress4K == 0) {
+ if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||
+ (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {
PageTableEntry->Bits.Present = 0;
} else {
PageTableEntry->Bits.Present = 1;
@@ -204,7 +403,7 @@ Split1GPageTo2M (
//
AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
- PageDirectoryEntry = AllocatePages (1);
+ PageDirectoryEntry = AllocatePageTableMemory (1);
ASSERT (PageDirectoryEntry != NULL);
//
@@ -214,10 +413,7 @@ Split1GPageTo2M (
PhysicalAddress2M = PhysicalAddress;
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
- if ((IsNullDetectionEnabled () && PhysicalAddress2M == 0)
- || (PcdGetBool (PcdSetNxForStack)
- && (PhysicalAddress2M < StackBase + StackSize)
- && ((PhysicalAddress2M + SIZE_2MB) > StackBase))) {
+ if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {
//
// Need to split this 2M page that covers NULL or stack range.
//
@@ -234,6 +430,186 @@ Split1GPageTo2M (
}
}
+/**
+ Set one page of page table pool memory to be read-only.
+
+ @param[in] PageTableBase Base address of page table (CR3).
+ @param[in] Address Start address of a page to be set as read-only.
+ @param[in] Level4Paging Level 4 paging flag.
+
+**/
+VOID
+SetPageTablePoolReadOnly (
+ IN UINTN PageTableBase,
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN Level4Paging
+ )
+{
+ UINTN Index;
+ UINTN EntryIndex;
+ UINT64 AddressEncMask;
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;
+ UINT64 *PageTable;
+ UINT64 *NewPageTable;
+ UINT64 PageAttr;
+ UINT64 LevelSize[5];
+ UINT64 LevelMask[5];
+ UINTN LevelShift[5];
+ UINTN Level;
+ UINT64 PoolUnitSize;
+
+ ASSERT (PageTableBase != 0);
+
+ //
+ // Since the page table is always from page table pool, which is always
+ // located at the boundary of PcdPageTablePoolAlignment, we just need to
+ // set the whole pool unit to be read-only.
+ //
+ Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
+
+ LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
+ LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
+ LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
+ LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
+
+ LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
+ LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
+ LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
+ LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
+
+ LevelSize[1] = SIZE_4KB;
+ LevelSize[2] = SIZE_2MB;
+ LevelSize[3] = SIZE_1GB;
+ LevelSize[4] = SIZE_512GB;
+
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
+ PAGING_1G_ADDRESS_MASK_64;
+ PageTable = (UINT64 *)(UINTN)PageTableBase;
+ PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
+
+ for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
+ Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
+ Index &= PAGING_PAE_INDEX_MASK;
+
+ PageAttr = PageTable[Index];
+ if ((PageAttr & IA32_PG_PS) == 0) {
+ //
+ // Go to next level of table.
+ //
+ PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
+ PAGING_4K_ADDRESS_MASK_64);
+ continue;
+ }
+
+ if (PoolUnitSize >= LevelSize[Level]) {
+ //
+ // Clear R/W bit if current page granularity is not larger than pool unit
+ // size.
+ //
+ if ((PageAttr & IA32_PG_RW) != 0) {
+ while (PoolUnitSize > 0) {
+ //
+ // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
+ // one page (2MB). Then we don't need to update attributes for pages
+ // crossing page directory. ASSERT below is for that purpose.
+ //
+ ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
+
+ PageTable[Index] &= ~(UINT64)IA32_PG_RW;
+ PoolUnitSize -= LevelSize[Level];
+
+ ++Index;
+ }
+ }
+
+ break;
+
+ } else {
+ //
+ // The smaller granularity of page must be needed.
+ //
+ ASSERT (Level > 1);
+
+ NewPageTable = AllocatePageTableMemory (1);
+ ASSERT (NewPageTable != NULL);
+
+ PhysicalAddress = PageAttr & LevelMask[Level];
+ for (EntryIndex = 0;
+ EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
+ ++EntryIndex) {
+ NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
+ IA32_PG_P | IA32_PG_RW;
+ if (Level > 2) {
+ NewPageTable[EntryIndex] |= IA32_PG_PS;
+ }
+ PhysicalAddress += LevelSize[Level - 1];
+ }
+
+ PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
+ IA32_PG_P | IA32_PG_RW;
+ PageTable = NewPageTable;
+ }
+ }
+}
+
+/**
+ Prevent the memory pages used for page table from been overwritten.
+
+ @param[in] PageTableBase Base address of page table (CR3).
+ @param[in] Level4Paging Level 4 paging flag.
+
+**/
+VOID
+EnablePageTableProtection (
+ IN UINTN PageTableBase,
+ IN BOOLEAN Level4Paging
+ )
+{
+ PAGE_TABLE_POOL *HeadPool;
+ PAGE_TABLE_POOL *Pool;
+ UINT64 PoolSize;
+ EFI_PHYSICAL_ADDRESS Address;
+
+ if (mPageTablePool == NULL) {
+ return;
+ }
+
+ //
+ // Disable write protection, because we need to mark page table to be write
+ // protected.
+ //
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ //
+ // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
+ // remember original one in advance.
+ //
+ HeadPool = mPageTablePool;
+ Pool = HeadPool;
+ do {
+ Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
+ PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
+
+ //
+ // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
+ // is one of page size of the processor (2MB by default). Let's apply the
+ // protection to them one by one.
+ //
+ while (PoolSize > 0) {
+ SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);
+ Address += PAGE_TABLE_POOL_UNIT_SIZE;
+ PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
+ }
+
+ Pool = Pool->NextPool;
+ } while (Pool != HeadPool);
+
+ //
+ // Enable write protection, after page table attribute updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+}
+
/**
Allocates and fills in the Page Directory and Page Table Entries to
establish a 1:1 Virtual to Physical mapping.
@@ -249,16 +625,20 @@ CreateIdentityMappingPageTables (
IN EFI_PHYSICAL_ADDRESS StackBase,
IN UINTN StackSize
)
-{
+{
UINT32 RegEax;
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
UINT32 RegEdx;
UINT8 PhysicalAddressBits;
EFI_PHYSICAL_ADDRESS PageAddress;
+ UINTN IndexOfPml5Entries;
UINTN IndexOfPml4Entries;
UINTN IndexOfPdpEntries;
UINTN IndexOfPageDirectoryEntries;
+ UINT32 NumberOfPml5EntriesNeeded;
UINT32 NumberOfPml4EntriesNeeded;
UINT32 NumberOfPdpEntriesNeeded;
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;
PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
@@ -266,9 +646,11 @@ CreateIdentityMappingPageTables (
UINTN TotalPagesNum;
UINTN BigPageAddress;
VOID *Hob;
+ BOOLEAN Page5LevelSupport;
BOOLEAN Page1GSupport;
PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
UINT64 AddressEncMask;
+ IA32_CR4 Cr4;
//
// Make sure AddressEncMask is contained to smallest supported address field
@@ -302,135 +684,206 @@ CreateIdentityMappingPageTables (
}
}
+ Page5LevelSupport = FALSE;
+ if (PcdGetBool (PcdUse5LevelPageTable)) {
+ AsmCpuidEx (
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL,
+ &EcxFlags.Uint32, NULL, NULL
+ );
+ if (EcxFlags.Bits.FiveLevelPage != 0) {
+ Page5LevelSupport = TRUE;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));
+
//
- // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
+ // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
+ // when 5-Level Paging is disabled,
+ // due to either unsupported by HW, or disabled by PCD.
//
ASSERT (PhysicalAddressBits <= 52);
- if (PhysicalAddressBits > 48) {
+ if (!Page5LevelSupport && PhysicalAddressBits > 48) {
PhysicalAddressBits = 48;
}
//
// Calculate the table entries needed.
//
- if (PhysicalAddressBits <= 39 ) {
- NumberOfPml4EntriesNeeded = 1;
- NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
- } else {
- NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));
- NumberOfPdpEntriesNeeded = 512;
+ NumberOfPml5EntriesNeeded = 1;
+ if (PhysicalAddressBits > 48) {
+ NumberOfPml5EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 48);
+ PhysicalAddressBits = 48;
}
+ NumberOfPml4EntriesNeeded = 1;
+ if (PhysicalAddressBits > 39) {
+ NumberOfPml4EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 39);
+ PhysicalAddressBits = 39;
+ }
+
+ NumberOfPdpEntriesNeeded = 1;
+ ASSERT (PhysicalAddressBits > 30);
+ NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 30);
+
//
- // Pre-allocate big pages to avoid later allocations.
+ // Pre-allocate big pages to avoid later allocations.
//
if (!Page1GSupport) {
- TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;
+ TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
} else {
- TotalPagesNum = NumberOfPml4EntriesNeeded + 1;
+ TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
}
- BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);
+
+ //
+ // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.
+ //
+ if (!Page5LevelSupport) {
+ TotalPagesNum--;
+ }
+
+ DEBUG ((DEBUG_INFO, "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",
+ NumberOfPml5EntriesNeeded, NumberOfPml4EntriesNeeded,
+ NumberOfPdpEntriesNeeded, (UINT64)TotalPagesNum));
+
+ BigPageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);
ASSERT (BigPageAddress != 0);
//
// By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
//
PageMap = (VOID *) BigPageAddress;
- BigPageAddress += SIZE_4KB;
-
- PageMapLevel4Entry = PageMap;
- PageAddress = 0;
- for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
+ if (Page5LevelSupport) {
//
- // Each PML4 entry points to a page of Page Directory Pointer entires.
- // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
+ // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
//
- PageDirectoryPointerEntry = (VOID *) BigPageAddress;
- BigPageAddress += SIZE_4KB;
+ PageMapLevel5Entry = PageMap;
+ BigPageAddress += SIZE_4KB;
+ }
+ PageAddress = 0;
+ for ( IndexOfPml5Entries = 0
+ ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
+ ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
//
- // Make a PML4 Entry
+ // Each PML5 entry points to a page of PML4 entires.
+ // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
+ // When 5-Level Paging is disabled, below allocation happens only once.
//
- PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
- PageMapLevel4Entry->Bits.ReadWrite = 1;
- PageMapLevel4Entry->Bits.Present = 1;
-
- if (Page1GSupport) {
- PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
-
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
- if ((IsNullDetectionEnabled () && PageAddress == 0)
- || (PcdGetBool (PcdSetNxForStack)
- && (PageAddress < StackBase + StackSize)
- && ((PageAddress + SIZE_1GB) > StackBase))) {
- Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
- } else {
- //
- // Fill in the Page Directory entries
- //
- PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
- PageDirectory1GEntry->Bits.ReadWrite = 1;
- PageDirectory1GEntry->Bits.Present = 1;
- PageDirectory1GEntry->Bits.MustBe1 = 1;
- }
- }
- } else {
- for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
- //
- // Each Directory Pointer entries points to a page of Page Directory entires.
- // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
- //
- PageDirectoryEntry = (VOID *) BigPageAddress;
- BigPageAddress += SIZE_4KB;
+ PageMapLevel4Entry = (VOID *) BigPageAddress;
+ BigPageAddress += SIZE_4KB;
- //
- // Fill in a Page Directory Pointer Entries
- //
- PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
- PageDirectoryPointerEntry->Bits.ReadWrite = 1;
- PageDirectoryPointerEntry->Bits.Present = 1;
-
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
- if ((IsNullDetectionEnabled () && PageAddress == 0)
- || (PcdGetBool (PcdSetNxForStack)
- && (PageAddress < StackBase + StackSize)
- && ((PageAddress + SIZE_2MB) > StackBase))) {
- //
- // Need to split this 2M page that covers NULL or stack range.
- //
- Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
+ if (Page5LevelSupport) {
+ //
+ // Make a PML5 Entry
+ //
+ PageMapLevel5Entry->Uint64 = (UINT64) (UINTN) PageMapLevel4Entry | AddressEncMask;
+ PageMapLevel5Entry->Bits.ReadWrite = 1;
+ PageMapLevel5Entry->Bits.Present = 1;
+ }
+
+ for ( IndexOfPml4Entries = 0
+ ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)
+ ; IndexOfPml4Entries++, PageMapLevel4Entry++) {
+ //
+ // Each PML4 entry points to a page of Page Directory Pointer entires.
+ // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
+ //
+ PageDirectoryPointerEntry = (VOID *) BigPageAddress;
+ BigPageAddress += SIZE_4KB;
+
+ //
+ // Make a PML4 Entry
+ //
+ PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
+ PageMapLevel4Entry->Bits.ReadWrite = 1;
+ PageMapLevel4Entry->Bits.Present = 1;
+
+ if (Page1GSupport) {
+ PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
+
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
+ if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {
+ Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
} else {
//
// Fill in the Page Directory entries
//
- PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
- PageDirectoryEntry->Bits.ReadWrite = 1;
- PageDirectoryEntry->Bits.Present = 1;
- PageDirectoryEntry->Bits.MustBe1 = 1;
+ PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
+ PageDirectory1GEntry->Bits.ReadWrite = 1;
+ PageDirectory1GEntry->Bits.Present = 1;
+ PageDirectory1GEntry->Bits.MustBe1 = 1;
}
}
- }
+ } else {
+ for ( IndexOfPdpEntries = 0
+ ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)
+ ; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
+ //
+ // Each Directory Pointer entries points to a page of Page Directory entires.
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
+ //
+ PageDirectoryEntry = (VOID *) BigPageAddress;
+ BigPageAddress += SIZE_4KB;
- for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
- ZeroMem (
- PageDirectoryPointerEntry,
- sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)
- );
+ //
+ // Fill in a Page Directory Pointer Entries
+ //
+ PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
+ PageDirectoryPointerEntry->Bits.ReadWrite = 1;
+ PageDirectoryPointerEntry->Bits.Present = 1;
+
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
+ if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {
+ //
+ // Need to split this 2M page that covers NULL or stack range.
+ //
+ Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
+ } else {
+ //
+ // Fill in the Page Directory entries
+ //
+ PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
+ PageDirectoryEntry->Bits.ReadWrite = 1;
+ PageDirectoryEntry->Bits.Present = 1;
+ PageDirectoryEntry->Bits.MustBe1 = 1;
+ }
+ }
+ }
+
+ //
+ // Fill with null entry for unused PDPTE
+ //
+ ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof(PAGE_MAP_AND_DIRECTORY_POINTER));
}
}
+
+ //
+ // For the PML4 entries we are not using fill in a null entry.
+ //
+ ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
+ }
+
+ if (Page5LevelSupport) {
+ Cr4.UintN = AsmReadCr4 ();
+ Cr4.Bits.LA57 = 1;
+ AsmWriteCr4 (Cr4.UintN);
+ //
+ // For the PML5 entries we are not using fill in a null entry.
+ //
+ ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
}
//
- // For the PML4 entries we are not using fill in a null entry.
+ // Protect the page table by marking the memory used for page table to be
+ // read-only.
//
- for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {
- ZeroMem (
- PageMapLevel4Entry,
- sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
- );
- }
+ EnablePageTableProtection ((UINTN)PageMap, TRUE);
- if (PcdGetBool (PcdSetNxForStack)) {
+ //
+ // Set IA32_EFER.NXE if necessary.
+ //
+ if (IsEnableNonExecNeeded ()) {
EnableExecuteDisableBit ();
}