/** @file\r
- x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
+ x64 Virtual Memory Management Services in the form of an IA-32 driver.\r
Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
enter Long Mode (x64 64-bit mode).\r
\r
- While we make a 1:1 mapping (identity mapping) for all physical pages \r
+ While we make a 1:1 mapping (identity mapping) for all physical pages\r
we still need to use the MTRR's to ensure that the cachability attributes\r
for all memory regions is correct.\r
\r
2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
\r
-Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
-**/ \r
+**/\r
\r
#include "DxeIpl.h"\r
#include "VirtualMemory.h"\r
\r
+//\r
+// Global variable to keep track current available memory used as page table.\r
+//\r
+PAGE_TABLE_POOL *mPageTablePool = NULL;\r
+\r
+/**\r
+ Clear legacy memory located at the first 4K-page, if available.\r
+\r
+ This function traverses the whole HOB list to check if memory from 0 to 4095\r
+ exists and has not been allocated, and then clear it if so.\r
+\r
+ @param HobStart The start of HobList passed to DxeCore.\r
+\r
+**/\r
+VOID\r
+ClearFirst4KPage (\r
+ IN VOID *HobStart\r
+ )\r
+{\r
+ EFI_PEI_HOB_POINTERS RscHob;\r
+ EFI_PEI_HOB_POINTERS MemHob;\r
+ BOOLEAN DoClear;\r
+\r
+ RscHob.Raw = HobStart;\r
+ MemHob.Raw = HobStart;\r
+ DoClear = FALSE;\r
+\r
+ //\r
+ // Check if page 0 exists and free\r
+ //\r
+ while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
+ RscHob.Raw)) != NULL) {\r
+ if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&\r
+ RscHob.ResourceDescriptor->PhysicalStart == 0) {\r
+ DoClear = TRUE;\r
+ //\r
+ // Make sure memory at 0-4095 has not been allocated.\r
+ //\r
+ while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
+ MemHob.Raw)) != NULL) {\r
+ if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
+ < EFI_PAGE_SIZE) {\r
+ DoClear = FALSE;\r
+ break;\r
+ }\r
+ MemHob.Raw = GET_NEXT_HOB (MemHob);\r
+ }\r
+ break;\r
+ }\r
+ RscHob.Raw = GET_NEXT_HOB (RscHob);\r
+ }\r
+\r
+ if (DoClear) {\r
+ DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
+ SetMem (NULL, EFI_PAGE_SIZE, 0);\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+/**\r
+ Return configure status of NULL pointer detection feature.\r
+\r
+ @return TRUE NULL pointer detection feature is enabled\r
+ @return FALSE NULL pointer detection feature is disabled\r
+\r
+**/\r
+BOOLEAN\r
+IsNullDetectionEnabled (\r
+ VOID\r
+ )\r
+{\r
+ return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
+}\r
+\r
+/**\r
+ The function will check if Execute Disable Bit is available.\r
+\r
+ @retval TRUE Execute Disable Bit is available.\r
+ @retval FALSE Execute Disable Bit is not available.\r
+\r
+**/\r
+BOOLEAN\r
+IsExecuteDisableBitAvailable (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ BOOLEAN Available;\r
+\r
+ Available = FALSE;\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000001) {\r
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT20) != 0) {\r
+ //\r
+ // Bit 20: Execute Disable Bit available.\r
+ //\r
+ Available = TRUE;\r
+ }\r
+ }\r
+\r
+ return Available;\r
+}\r
+\r
+/**\r
+ Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
+\r
+ @retval TRUE IA32_EFER.NXE should be enabled.\r
+ @retval FALSE IA32_EFER.NXE should not be enabled.\r
+\r
+**/\r
+BOOLEAN\r
+IsEnableNonExecNeeded (\r
+ VOID\r
+ )\r
+{\r
+ if (!IsExecuteDisableBitAvailable ()) {\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
+ // Features controlled by Following PCDs need this feature to be enabled.\r
+ //\r
+ return (PcdGetBool (PcdSetNxForStack) ||\r
+ PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
+ PcdGet32 (PcdImageProtectionPolicy) != 0);\r
+}\r
+\r
+/**\r
+ Enable Execute Disable Bit.\r
+\r
+**/\r
+VOID\r
+EnableExecuteDisableBit (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 MsrRegisters;\r
+\r
+ MsrRegisters = AsmReadMsr64 (0xC0000080);\r
+ MsrRegisters |= BIT11;\r
+ AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
+}\r
+\r
+/**\r
+ The function will check if page table entry should be splitted to smaller\r
+ granularity.\r
+\r
+ @param Address Physical memory address.\r
+ @param Size Size of the given physical memory.\r
+ @param StackBase Base address of stack.\r
+ @param StackSize Size of stack.\r
+\r
+ @retval TRUE Page table should be split.\r
+ @retval FALSE Page table should not be split.\r
+**/\r
+BOOLEAN\r
+ToSplitPageTable (\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN UINTN Size,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ if (IsNullDetectionEnabled () && Address == 0) {\r
+ return TRUE;\r
+ }\r
+\r
+ if (PcdGetBool (PcdCpuStackGuard)) {\r
+ if (StackBase >= Address && StackBase < (Address + Size)) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ if (PcdGetBool (PcdSetNxForStack)) {\r
+ if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+/**\r
+ Initialize a buffer pool for page table use only.\r
+\r
+ To reduce the potential split operation on page table, the pages reserved for\r
+ page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
+ at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
+ initialized with number of pages greater than or equal to the given PoolPages.\r
+\r
+ Once the pages in the pool are used up, this method should be called again to\r
+ reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
+ happen in practice.\r
+\r
+ @param PoolPages The least page number of the pool to be created.\r
+\r
+ @retval TRUE The pool is initialized successfully.\r
+ @retval FALSE The memory is out of resource.\r
+**/\r
+BOOLEAN\r
+InitializePageTablePool (\r
+ IN UINTN PoolPages\r
+ )\r
+{\r
+ VOID *Buffer;\r
+\r
+ //\r
+ // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
+ // header.\r
+ //\r
+ PoolPages += 1; // Add one page for header.\r
+ PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
+ PAGE_TABLE_POOL_UNIT_PAGES;\r
+ Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
+ if (Buffer == NULL) {\r
+ DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // Link all pools into a list for easier track later.\r
+ //\r
+ if (mPageTablePool == NULL) {\r
+ mPageTablePool = Buffer;\r
+ mPageTablePool->NextPool = mPageTablePool;\r
+ } else {\r
+ ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
+ mPageTablePool->NextPool = Buffer;\r
+ mPageTablePool = Buffer;\r
+ }\r
+\r
+ //\r
+ // Reserve one page for pool header.\r
+ //\r
+ mPageTablePool->FreePages = PoolPages - 1;\r
+ mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
+\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+ This API provides a way to allocate memory for page table.\r
+\r
+ This API can be called more than once to allocate memory for page tables.\r
+\r
+ Allocates the number of 4KB pages and returns a pointer to the allocated\r
+ buffer. The buffer returned is aligned on a 4KB boundary.\r
+\r
+ If Pages is 0, then NULL is returned.\r
+ If there is not enough memory remaining to satisfy the request, then NULL is\r
+ returned.\r
+\r
+ @param Pages The number of 4 KB pages to allocate.\r
+\r
+ @return A pointer to the allocated buffer or NULL if allocation fails.\r
+\r
+**/\r
+VOID *\r
+AllocatePageTableMemory (\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ VOID *Buffer;\r
+\r
+ if (Pages == 0) {\r
+ return NULL;\r
+ }\r
+\r
+ //\r
+ // Renew the pool if necessary.\r
+ //\r
+ if (mPageTablePool == NULL ||\r
+ Pages > mPageTablePool->FreePages) {\r
+ if (!InitializePageTablePool (Pages)) {\r
+ return NULL;\r
+ }\r
+ }\r
+\r
+ Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
+\r
+ mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
+ mPageTablePool->FreePages -= Pages;\r
+\r
+ return Buffer;\r
+}\r
+\r
+/**\r
+ Split 2M page to 4K.\r
+\r
+ @param[in] PhysicalAddress Start physical address the 2M page covered.\r
+ @param[in, out] PageEntry2M Pointer to 2M page entry.\r
+ @param[in] StackBase Stack base address.\r
+ @param[in] StackSize Stack size.\r
+\r
+**/\r
+VOID\r
+Split2MPageTo4K (\r
+ IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry2M,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
+ UINTN IndexOfPageTableEntries;\r
+ PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
+ UINT64 AddressEncMask;\r
+\r
+ //\r
+ // Make sure AddressEncMask is contained to smallest supported address field\r
+ //\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
+\r
+ PageTableEntry = AllocatePageTableMemory (1);\r
+ ASSERT (PageTableEntry != NULL);\r
+\r
+ //\r
+ // Fill in 2M page entry.\r
+ //\r
+ *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
+\r
+ PhysicalAddress4K = PhysicalAddress;\r
+ for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
+ //\r
+ // Fill in the Page Table entries\r
+ //\r
+ PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
+ PageTableEntry->Bits.ReadWrite = 1;\r
+\r
+ if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||\r
+ (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {\r
+ PageTableEntry->Bits.Present = 0;\r
+ } else {\r
+ PageTableEntry->Bits.Present = 1;\r
+ }\r
+\r
+ if (PcdGetBool (PcdSetNxForStack)\r
+ && (PhysicalAddress4K >= StackBase)\r
+ && (PhysicalAddress4K < StackBase + StackSize)) {\r
+ //\r
+ // Set Nx bit for stack.\r
+ //\r
+ PageTableEntry->Bits.Nx = 1;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Split 1G page to 2M.\r
+\r
+ @param[in] PhysicalAddress Start physical address the 1G page covered.\r
+ @param[in, out] PageEntry1G Pointer to 1G page entry.\r
+ @param[in] StackBase Stack base address.\r
+ @param[in] StackSize Stack size.\r
+\r
+**/\r
+VOID\r
+Split1GPageTo2M (\r
+ IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry1G,\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
+ UINT64 AddressEncMask;\r
+\r
+ //\r
+ // Make sure AddressEncMask is contained to smallest supported address field\r
+ //\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
+\r
+ PageDirectoryEntry = AllocatePageTableMemory (1);\r
+ ASSERT (PageDirectoryEntry != NULL);\r
+\r
+ //\r
+ // Fill in 1G page entry.\r
+ //\r
+ *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
+\r
+ PhysicalAddress2M = PhysicalAddress;\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
+ if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {\r
+ //\r
+ // Need to split this 2M page that covers NULL or stack range.\r
+ //\r
+ Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
+ PageDirectoryEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Set one page of page table pool memory to be read-only.\r
+\r
+ @param[in] PageTableBase Base address of page table (CR3).\r
+ @param[in] Address Start address of a page to be set as read-only.\r
+ @param[in] Level4Paging Level 4 paging flag.\r
+\r
+**/\r
+VOID\r
+SetPageTablePoolReadOnly (\r
+ IN UINTN PageTableBase,\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN BOOLEAN Level4Paging\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN EntryIndex;\r
+ UINT64 AddressEncMask;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
+ UINT64 *PageTable;\r
+ UINT64 *NewPageTable;\r
+ UINT64 PageAttr;\r
+ UINT64 LevelSize[5];\r
+ UINT64 LevelMask[5];\r
+ UINTN LevelShift[5];\r
+ UINTN Level;\r
+ UINT64 PoolUnitSize;\r
+\r
+ ASSERT (PageTableBase != 0);\r
+\r
+ //\r
+ // Since the page table is always from page table pool, which is always\r
+ // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
+ // set the whole pool unit to be read-only.\r
+ //\r
+ Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
+\r
+ LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
+ LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
+ LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
+ LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
+\r
+ LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
+ LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
+ LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
+ LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
+\r
+ LevelSize[1] = SIZE_4KB;\r
+ LevelSize[2] = SIZE_2MB;\r
+ LevelSize[3] = SIZE_1GB;\r
+ LevelSize[4] = SIZE_512GB;\r
+\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
+ PAGING_1G_ADDRESS_MASK_64;\r
+ PageTable = (UINT64 *)(UINTN)PageTableBase;\r
+ PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
+\r
+ for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
+ Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
+ Index &= PAGING_PAE_INDEX_MASK;\r
+\r
+ PageAttr = PageTable[Index];\r
+ if ((PageAttr & IA32_PG_PS) == 0) {\r
+ //\r
+ // Go to next level of table.\r
+ //\r
+ PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
+ PAGING_4K_ADDRESS_MASK_64);\r
+ continue;\r
+ }\r
+\r
+ if (PoolUnitSize >= LevelSize[Level]) {\r
+ //\r
+ // Clear R/W bit if current page granularity is not larger than pool unit\r
+ // size.\r
+ //\r
+ if ((PageAttr & IA32_PG_RW) != 0) {\r
+ while (PoolUnitSize > 0) {\r
+ //\r
+ // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
+ // one page (2MB). Then we don't need to update attributes for pages\r
+ // crossing page directory. ASSERT below is for that purpose.\r
+ //\r
+ ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
+\r
+ PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
+ PoolUnitSize -= LevelSize[Level];\r
+\r
+ ++Index;\r
+ }\r
+ }\r
+\r
+ break;\r
+\r
+ } else {\r
+ //\r
+ // The smaller granularity of page must be needed.\r
+ //\r
+ ASSERT (Level > 1);\r
+\r
+ NewPageTable = AllocatePageTableMemory (1);\r
+ ASSERT (NewPageTable != NULL);\r
+\r
+ PhysicalAddress = PageAttr & LevelMask[Level];\r
+ for (EntryIndex = 0;\r
+ EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
+ ++EntryIndex) {\r
+ NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
+ IA32_PG_P | IA32_PG_RW;\r
+ if (Level > 2) {\r
+ NewPageTable[EntryIndex] |= IA32_PG_PS;\r
+ }\r
+ PhysicalAddress += LevelSize[Level - 1];\r
+ }\r
+\r
+ PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
+ IA32_PG_P | IA32_PG_RW;\r
+ PageTable = NewPageTable;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Prevent the memory pages used for page table from been overwritten.\r
+\r
+ @param[in] PageTableBase Base address of page table (CR3).\r
+ @param[in] Level4Paging Level 4 paging flag.\r
+\r
+**/\r
+VOID\r
+EnablePageTableProtection (\r
+ IN UINTN PageTableBase,\r
+ IN BOOLEAN Level4Paging\r
+ )\r
+{\r
+ PAGE_TABLE_POOL *HeadPool;\r
+ PAGE_TABLE_POOL *Pool;\r
+ UINT64 PoolSize;\r
+ EFI_PHYSICAL_ADDRESS Address;\r
+\r
+ if (mPageTablePool == NULL) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Disable write protection, because we need to mark page table to be write\r
+ // protected.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
+\r
+ //\r
+ // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
+ // remember original one in advance.\r
+ //\r
+ HeadPool = mPageTablePool;\r
+ Pool = HeadPool;\r
+ do {\r
+ Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
+ PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
+\r
+ //\r
+ // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
+ // is one of page size of the processor (2MB by default). Let's apply the\r
+ // protection to them one by one.\r
+ //\r
+ while (PoolSize > 0) {\r
+ SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);\r
+ Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
+ PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
+ }\r
+\r
+ Pool = Pool->NextPool;\r
+ } while (Pool != HeadPool);\r
+\r
+ //\r
+ // Enable write protection, after page table attribute updated.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
+}\r
+\r
/**\r
Allocates and fills in the Page Directory and Page Table Entries to\r
establish a 1:1 Virtual to Physical mapping.\r
\r
- @param NumberOfProcessorPhysicalAddressBits Number of processor address bits \r
- to use. Limits the number of page \r
- table entries to the physical \r
- address space. \r
+ @param[in] StackBase Stack base address.\r
+ @param[in] StackSize Stack size.\r
\r
@return The address of 4 level page map.\r
\r
**/\r
UINTN\r
CreateIdentityMappingPageTables (\r
- VOID\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
)\r
-{ \r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
UINT8 PhysicalAddressBits;\r
EFI_PHYSICAL_ADDRESS PageAddress;\r
UINTN IndexOfPml4Entries;\r
UINTN TotalPagesNum;\r
UINTN BigPageAddress;\r
VOID *Hob;\r
+ BOOLEAN Page1GSupport;\r
+ PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
+ UINT64 AddressEncMask;\r
+\r
+ //\r
+ // Make sure AddressEncMask is contained to smallest supported address field\r
+ //\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
+\r
+ Page1GSupport = FALSE;\r
+ if (PcdGetBool(PcdUse1GPageTable)) {\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000001) {\r
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT26) != 0) {\r
+ Page1GSupport = TRUE;\r
+ }\r
+ }\r
+ }\r
\r
//\r
- // Get physical address bits supported from CPU HOB.\r
+ // Get physical address bits supported.\r
//\r
- PhysicalAddressBits = 36;\r
- \r
Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
if (Hob != NULL) {\r
PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
+ } else {\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000008) {\r
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
+ PhysicalAddressBits = (UINT8) RegEax;\r
+ } else {\r
+ PhysicalAddressBits = 36;\r
+ }\r
}\r
\r
//\r
//\r
if (PhysicalAddressBits <= 39 ) {\r
NumberOfPml4EntriesNeeded = 1;\r
- NumberOfPdpEntriesNeeded = 1 << (PhysicalAddressBits - 30);\r
+ NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
} else {\r
- NumberOfPml4EntriesNeeded = 1 << (PhysicalAddressBits - 39);\r
+ NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
NumberOfPdpEntriesNeeded = 512;\r
}\r
\r
//\r
- // Pre-allocate big pages to avoid later allocations. \r
+ // Pre-allocate big pages to avoid later allocations.\r
//\r
- TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
- BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
+ if (!Page1GSupport) {\r
+ TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
+ } else {\r
+ TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
+ }\r
+ BigPageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);\r
ASSERT (BigPageAddress != 0);\r
\r
//\r
// By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
//\r
PageMap = (VOID *) BigPageAddress;\r
- BigPageAddress += EFI_PAGE_SIZE;\r
+ BigPageAddress += SIZE_4KB;\r
\r
PageMapLevel4Entry = PageMap;\r
PageAddress = 0;\r
// So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
//\r
PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
- BigPageAddress += EFI_PAGE_SIZE;\r
+ BigPageAddress += SIZE_4KB;\r
\r
//\r
// Make a PML4 Entry\r
//\r
- PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry;\r
+ PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
PageMapLevel4Entry->Bits.ReadWrite = 1;\r
PageMapLevel4Entry->Bits.Present = 1;\r
\r
- for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
- //\r
- // Each Directory Pointer entries points to a page of Page Directory entires.\r
- // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
- // \r
- PageDirectoryEntry = (VOID *) BigPageAddress;\r
- BigPageAddress += EFI_PAGE_SIZE;\r
+ if (Page1GSupport) {\r
+ PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
\r
- //\r
- // Fill in a Page Directory Pointer Entries\r
- //\r
- PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry;\r
- PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
- PageDirectoryPointerEntry->Bits.Present = 1;\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
+ if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {\r
+ Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
+ PageDirectory1GEntry->Bits.ReadWrite = 1;\r
+ PageDirectory1GEntry->Bits.Present = 1;\r
+ PageDirectory1GEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+ } else {\r
+ for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
+ //\r
+ // Each Directory Pointer entries points to a page of Page Directory entires.\r
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
+ //\r
+ PageDirectoryEntry = (VOID *) BigPageAddress;\r
+ BigPageAddress += SIZE_4KB;\r
\r
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += 0x200000) {\r
//\r
- // Fill in the Page Directory entries\r
+ // Fill in a Page Directory Pointer Entries\r
//\r
- PageDirectoryEntry->Uint64 = (UINT64)PageAddress;\r
- PageDirectoryEntry->Bits.ReadWrite = 1;\r
- PageDirectoryEntry->Bits.Present = 1;\r
- PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
+ PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryPointerEntry->Bits.Present = 1;\r
+\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
+ if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {\r
+ //\r
+ // Need to split this 2M page that covers NULL or stack range.\r
+ //\r
+ Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
+ PageDirectoryEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+ }\r
\r
+ for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
+ ZeroMem (\r
+ PageDirectoryPointerEntry,\r
+ sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
+ );\r
}\r
}\r
}\r
\r
//\r
// For the PML4 entries we are not using fill in a null entry.\r
- // For now we just copy the first entry.\r
//\r
for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
- CopyMem (\r
- PageMapLevel4Entry,\r
- PageMap,\r
- sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
- );\r
+ ZeroMem (\r
+ PageMapLevel4Entry,\r
+ sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
+ );\r
+ }\r
+\r
+ //\r
+ // Protect the page table by marking the memory used for page table to be\r
+ // read-only.\r
+ //\r
+ EnablePageTableProtection ((UINTN)PageMap, TRUE);\r
+\r
+ //\r
+ // Set IA32_EFER.NXE if necessary.\r
+ //\r
+ if (IsEnableNonExecNeeded ()) {\r
+ EnableExecuteDisableBit ();\r
}\r
\r
return (UINTN)PageMap;\r