/** @file\r
Page Fault (#PF) handler for X64 processors\r
\r
-Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>\r
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
-\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
#include "PiSmmCpuDxeSmm.h"\r
\r
-#define PAGE_TABLE_PAGES 8\r
-#define ACC_MAX_BIT BIT3\r
+#define PAGE_TABLE_PAGES 8\r
+#define ACC_MAX_BIT BIT3\r
+\r
+extern UINTN mSmmShadowStackSize;\r
\r
-LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
-BOOLEAN m1GPageTableSupport = FALSE;\r
-BOOLEAN mCpuSmmStaticPageTable;\r
+LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
+BOOLEAN m1GPageTableSupport = FALSE;\r
+BOOLEAN mCpuSmmRestrictedMemoryAccess;\r
+X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;\r
\r
/**\r
Check if 1-GByte pages is supported by processor or not.\r
VOID\r
)\r
{\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
\r
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
if (RegEax >= 0x80000001) {\r
return TRUE;\r
}\r
}\r
+\r
return FALSE;\r
}\r
\r
+/**\r
+ The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and\r
+ the max physical address bits is bigger than 48. Because 4-level paging can support\r
+ to address physical address up to 2^48 - 1, there is no need to enable 5-level paging\r
+ with max physical address bits <= 48.\r
+\r
+ @retval TRUE 5-level paging enabling is needed.\r
+ @retval FALSE 5-level paging enabling is not needed.\r
+**/\r
+BOOLEAN\r
+Is5LevelPagingNeeded (\r
+ VOID\r
+ )\r
+{\r
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;\r
+ UINT32 MaxExtendedFunctionId;\r
+\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);\r
+ if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
+ } else {\r
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
+ }\r
+\r
+ AsmCpuidEx (\r
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
+ NULL,\r
+ NULL,\r
+ &ExtFeatureEcx.Uint32,\r
+ NULL\r
+ );\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "PhysicalAddressBits = %d, 5LPageTable = %d.\n",\r
+ VirPhyAddressSize.Bits.PhysicalAddressBits,\r
+ ExtFeatureEcx.Bits.FiveLevelPage\r
+ ));\r
+\r
+ if ((VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) &&\r
+ (ExtFeatureEcx.Bits.FiveLevelPage == 1))\r
+ {\r
+ return TRUE;\r
+ } else {\r
+ return FALSE;\r
+ }\r
+}\r
+\r
/**\r
Set sub-entries number in entry.\r
\r
**/\r
VOID\r
SetSubEntriesNum (\r
- IN OUT UINT64 *Entry,\r
- IN UINT64 SubEntryNum\r
+ IN OUT UINT64 *Entry,\r
+ IN UINT64 SubEntryNum\r
)\r
{\r
//\r
**/\r
UINT64\r
GetSubEntriesNum (\r
- IN UINT64 *Entry\r
+ IN UINT64 *Entry\r
)\r
{\r
//\r
VOID\r
)\r
{\r
- UINT32 RegEax;\r
- UINT8 PhysicalAddressBits;\r
- VOID *Hob;\r
+ UINT32 RegEax;\r
+ UINT8 PhysicalAddressBits;\r
+ VOID *Hob;\r
\r
//\r
// Get physical address bits supported.\r
//\r
Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
if (Hob != NULL) {\r
- PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
+ PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
} else {\r
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
if (RegEax >= 0x80000008) {\r
AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
- PhysicalAddressBits = (UINT8) RegEax;\r
+ PhysicalAddressBits = (UINT8)RegEax;\r
} else {\r
PhysicalAddressBits = 36;\r
}\r
}\r
\r
- //\r
- // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
- //\r
- ASSERT (PhysicalAddressBits <= 52);\r
- if (PhysicalAddressBits > 48) {\r
- PhysicalAddressBits = 48;\r
- }\r
return PhysicalAddressBits;\r
}\r
\r
/**\r
Set static page table.\r
\r
- @param[in] PageTable Address of page table.\r
+ @param[in] PageTable Address of page table.\r
+ @param[in] PhysicalAddressBits The maximum physical address bits supported.\r
**/\r
VOID\r
SetStaticPageTable (\r
- IN UINTN PageTable\r
+ IN UINTN PageTable,\r
+ IN UINT8 PhysicalAddressBits\r
)\r
{\r
- UINT64 PageAddress;\r
- UINTN NumberOfPml4EntriesNeeded;\r
- UINTN NumberOfPdpEntriesNeeded;\r
- UINTN IndexOfPml4Entries;\r
- UINTN IndexOfPdpEntries;\r
- UINTN IndexOfPageDirectoryEntries;\r
- UINT64 *PageMapLevel4Entry;\r
- UINT64 *PageMap;\r
- UINT64 *PageDirectoryPointerEntry;\r
- UINT64 *PageDirectory1GEntry;\r
- UINT64 *PageDirectoryEntry;\r
-\r
- if (mPhysicalAddressBits <= 39 ) {\r
- NumberOfPml4EntriesNeeded = 1;\r
- NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));\r
- } else {\r
- NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));\r
- NumberOfPdpEntriesNeeded = 512;\r
+ UINT64 PageAddress;\r
+ UINTN NumberOfPml5EntriesNeeded;\r
+ UINTN NumberOfPml4EntriesNeeded;\r
+ UINTN NumberOfPdpEntriesNeeded;\r
+ UINTN IndexOfPml5Entries;\r
+ UINTN IndexOfPml4Entries;\r
+ UINTN IndexOfPdpEntries;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ UINT64 *PageMapLevel5Entry;\r
+ UINT64 *PageMapLevel4Entry;\r
+ UINT64 *PageMap;\r
+ UINT64 *PageDirectoryPointerEntry;\r
+ UINT64 *PageDirectory1GEntry;\r
+ UINT64 *PageDirectoryEntry;\r
+\r
+ //\r
+ // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
+ // when 5-Level Paging is disabled.\r
+ //\r
+ ASSERT (PhysicalAddressBits <= 52);\r
+ if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {\r
+ PhysicalAddressBits = 48;\r
}\r
\r
+ NumberOfPml5EntriesNeeded = 1;\r
+ if (PhysicalAddressBits > 48) {\r
+ NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);\r
+ PhysicalAddressBits = 48;\r
+ }\r
+\r
+ NumberOfPml4EntriesNeeded = 1;\r
+ if (PhysicalAddressBits > 39) {\r
+ NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);\r
+ PhysicalAddressBits = 39;\r
+ }\r
+\r
+ NumberOfPdpEntriesNeeded = 1;\r
+ ASSERT (PhysicalAddressBits > 30);\r
+ NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);\r
+\r
//\r
// By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
//\r
- PageMap = (VOID *) PageTable;\r
+ PageMap = (VOID *)PageTable;\r
\r
PageMapLevel4Entry = PageMap;\r
- PageAddress = 0;\r
- for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
+ PageMapLevel5Entry = NULL;\r
+ if (m5LevelPagingNeeded) {\r
//\r
- // Each PML4 entry points to a page of Page Directory Pointer entries.\r
+ // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
//\r
- PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
- if (PageDirectoryPointerEntry == NULL) {\r
- PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
- ASSERT(PageDirectoryPointerEntry != NULL);\r
- ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
+ PageMapLevel5Entry = PageMap;\r
+ }\r
+\r
+ PageAddress = 0;\r
\r
- *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ for ( IndexOfPml5Entries = 0\r
+ ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
+ ; IndexOfPml5Entries++, PageMapLevel5Entry++)\r
+ {\r
+ //\r
+ // Each PML5 entry points to a page of PML4 entires.\r
+ // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
+ // When 5-Level Paging is disabled, below allocation happens only once.\r
+ //\r
+ if (m5LevelPagingNeeded) {\r
+ PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
+ if (PageMapLevel4Entry == NULL) {\r
+ PageMapLevel4Entry = AllocatePageTableMemory (1);\r
+ ASSERT (PageMapLevel4Entry != NULL);\r
+ ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));\r
+\r
+ *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ }\r
}\r
\r
- if (m1GPageTableSupport) {\r
- PageDirectory1GEntry = PageDirectoryPointerEntry;\r
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
- if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
- //\r
- // Skip the < 4G entries\r
- //\r
- continue;\r
- }\r
- //\r
- // Fill in the Page Directory entries\r
- //\r
- *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
+ for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
+ //\r
+ // Each PML4 entry points to a page of Page Directory Pointer entries.\r
+ //\r
+ PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
+ if (PageDirectoryPointerEntry == NULL) {\r
+ PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
+ ASSERT (PageDirectoryPointerEntry != NULL);\r
+ ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));\r
+\r
+ *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
}\r
- } else {\r
- PageAddress = BASE_4GB;\r
- for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
- if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
- //\r
- // Skip the < 4G entries\r
- //\r
- continue;\r
- }\r
- //\r
- // Each Directory Pointer entries points to a page of Page Directory entires.\r
- // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
- //\r
- PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
- if (PageDirectoryEntry == NULL) {\r
- PageDirectoryEntry = AllocatePageTableMemory (1);\r
- ASSERT(PageDirectoryEntry != NULL);\r
- ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
+\r
+ if (m1GPageTableSupport) {\r
+ PageDirectory1GEntry = PageDirectoryPointerEntry;\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
+ if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {\r
+ //\r
+ // Skip the < 4G entries\r
+ //\r
+ continue;\r
+ }\r
\r
//\r
- // Fill in a Page Directory Pointer Entries\r
+ // Fill in the Page Directory entries\r
//\r
- *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
}\r
+ } else {\r
+ PageAddress = BASE_4GB;\r
+ for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
+ if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {\r
+ //\r
+ // Skip the < 4G entries\r
+ //\r
+ continue;\r
+ }\r
\r
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
//\r
- // Fill in the Page Directory entries\r
+ // Each Directory Pointer entries points to a page of Page Directory entires.\r
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
//\r
- *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
+ PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
+ if (PageDirectoryEntry == NULL) {\r
+ PageDirectoryEntry = AllocatePageTableMemory (1);\r
+ ASSERT (PageDirectoryEntry != NULL);\r
+ ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));\r
+\r
+ //\r
+ // Fill in a Page Directory Pointer Entries\r
+ //\r
+ *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ }\r
+\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
+ }\r
}\r
}\r
}\r
VOID\r
)\r
{\r
- EFI_PHYSICAL_ADDRESS Pages;\r
- UINT64 *PTEntry;\r
- LIST_ENTRY *FreePage;\r
- UINTN Index;\r
- UINTN PageFaultHandlerHookAddress;\r
- IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
- EFI_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS Pages;\r
+ UINT64 *PTEntry;\r
+ LIST_ENTRY *FreePage;\r
+ UINTN Index;\r
+ UINTN PageFaultHandlerHookAddress;\r
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
+ EFI_STATUS Status;\r
+ UINT64 *Pml4Entry;\r
+ UINT64 *Pml5Entry;\r
\r
//\r
// Initialize spin lock\r
//\r
InitializeSpinLock (mPFLock);\r
\r
- mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
- m1GPageTableSupport = Is1GPageSupport ();\r
- DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));\r
- DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));\r
-\r
- mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
- DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));\r
+ mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);\r
+ m1GPageTableSupport = Is1GPageSupport ();\r
+ m5LevelPagingNeeded = Is5LevelPagingNeeded ();\r
+ mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
+ PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);\r
+ DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));\r
+ DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));\r
+ DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
//\r
// Generate PAE page table for the first 4GB memory space\r
//\r
//\r
// Set IA32_PG_PMNT bit to mask this entry\r
//\r
- PTEntry = (UINT64*)(UINTN)Pages;\r
+ PTEntry = (UINT64 *)(UINTN)Pages;\r
for (Index = 0; Index < 4; Index++) {\r
PTEntry[Index] |= IA32_PG_PMNT;\r
}\r
//\r
// Fill Page-Table-Level4 (PML4) entry\r
//\r
- PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
- ASSERT (PTEntry != NULL);\r
- *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
- ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
+ Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);\r
+ ASSERT (Pml4Entry != NULL);\r
+ *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
\r
//\r
// Set sub-entries number\r
//\r
- SetSubEntriesNum (PTEntry, 3);\r
+ SetSubEntriesNum (Pml4Entry, 3);\r
+ PTEntry = Pml4Entry;\r
+\r
+ if (m5LevelPagingNeeded) {\r
+ //\r
+ // Fill PML5 entry\r
+ //\r
+ Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);\r
+ ASSERT (Pml5Entry != NULL);\r
+ *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
+ //\r
+ // Set sub-entries number\r
+ //\r
+ SetSubEntriesNum (Pml5Entry, 1);\r
+ PTEntry = Pml5Entry;\r
+ }\r
\r
- if (mCpuSmmStaticPageTable) {\r
- SetStaticPageTable ((UINTN)PTEntry);\r
+ if (mCpuSmmRestrictedMemoryAccess) {\r
+ //\r
+ // When access to non-SMRAM memory is restricted, create page table\r
+ // that covers all memory space.\r
+ //\r
+ SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);\r
} else {\r
//\r
// Add pages to page pool\r
//\r
- FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
+ FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
ASSERT (FreePage != NULL);\r
for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
InsertTailList (&mPagePool, FreePage);\r
}\r
}\r
\r
- if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
+ HEAP_GUARD_NONSTOP_MODE ||\r
+ NULL_DETECTION_NONSTOP_MODE)\r
+ {\r
//\r
// Set own Page Fault entry instead of the default one, because SMM Profile\r
// feature depends on IRET instruction to do Single Step\r
//\r
PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
- IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
- IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
- IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
- IdtEntry->Bits.Reserved_0 = 0;\r
- IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
- IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
- IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
- IdtEntry->Bits.Reserved_1 = 0;\r
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;\r
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
+ IdtEntry->Bits.Reserved_0 = 0;\r
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
+ IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
+ IdtEntry->Bits.Reserved_1 = 0;\r
} else {\r
//\r
// Register Smm Page Fault Handler\r
// Additional SMM IDT initialization for SMM stack guard\r
//\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
- InitializeIDTSmmStackGuard ();\r
+ DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));\r
+ InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);\r
+ }\r
+\r
+ //\r
+ // Additional SMM IDT initialization for SMM CET shadow stack\r
+ //\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));\r
+ InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);\r
+ InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);\r
}\r
\r
//\r
- // Return the address of PML4 (to set CR3)\r
+ // Return the address of PML4/PML5 (to set CR3)\r
//\r
return (UINT32)(UINTN)PTEntry;\r
}\r
**/\r
VOID\r
SetAccNum (\r
- IN OUT UINT64 *Entry,\r
- IN UINT64 Acc\r
+ IN OUT UINT64 *Entry,\r
+ IN UINT64 Acc\r
)\r
{\r
//\r
**/\r
UINT64\r
GetAccNum (\r
- IN UINT64 *Entry\r
+ IN UINT64 *Entry\r
)\r
{\r
//\r
**/\r
UINT64\r
GetAndUpdateAccNum (\r
- IN OUT UINT64 *Entry\r
+ IN OUT UINT64 *Entry\r
)\r
{\r
- UINT64 Acc;\r
+ UINT64 Acc;\r
\r
Acc = GetAccNum (Entry);\r
if ((*Entry & IA32_PG_A) != 0) {\r
SetAccNum (Entry, Acc - 1);\r
}\r
}\r
+\r
return Acc;\r
}\r
\r
VOID\r
)\r
{\r
- UINT64 *Pml4;\r
- UINT64 *Pdpt;\r
- UINT64 *Pdt;\r
- UINTN Pml4Index;\r
- UINTN PdptIndex;\r
- UINTN PdtIndex;\r
- UINTN MinPml4;\r
- UINTN MinPdpt;\r
- UINTN MinPdt;\r
- UINT64 MinAcc;\r
- UINT64 Acc;\r
- UINT64 SubEntriesNum;\r
- BOOLEAN PML4EIgnore;\r
- BOOLEAN PDPTEIgnore;\r
- UINT64 *ReleasePageAddress;\r
-\r
- Pml4 = NULL;\r
- Pdpt = NULL;\r
- Pdt = NULL;\r
- MinAcc = (UINT64)-1;\r
- MinPml4 = (UINTN)-1;\r
- MinPdpt = (UINTN)-1;\r
- MinPdt = (UINTN)-1;\r
- Acc = 0;\r
+ UINT64 Pml5Entry;\r
+ UINT64 *Pml5;\r
+ UINT64 *Pml4;\r
+ UINT64 *Pdpt;\r
+ UINT64 *Pdt;\r
+ UINTN Pml5Index;\r
+ UINTN Pml4Index;\r
+ UINTN PdptIndex;\r
+ UINTN PdtIndex;\r
+ UINTN MinPml5;\r
+ UINTN MinPml4;\r
+ UINTN MinPdpt;\r
+ UINTN MinPdt;\r
+ UINT64 MinAcc;\r
+ UINT64 Acc;\r
+ UINT64 SubEntriesNum;\r
+ BOOLEAN PML4EIgnore;\r
+ BOOLEAN PDPTEIgnore;\r
+ UINT64 *ReleasePageAddress;\r
+ IA32_CR4 Cr4;\r
+ BOOLEAN Enable5LevelPaging;\r
+ UINT64 PFAddress;\r
+ UINT64 PFAddressPml5Index;\r
+ UINT64 PFAddressPml4Index;\r
+ UINT64 PFAddressPdptIndex;\r
+ UINT64 PFAddressPdtIndex;\r
+\r
+ Pml4 = NULL;\r
+ Pdpt = NULL;\r
+ Pdt = NULL;\r
+ MinAcc = (UINT64)-1;\r
+ MinPml4 = (UINTN)-1;\r
+ MinPml5 = (UINTN)-1;\r
+ MinPdpt = (UINTN)-1;\r
+ MinPdt = (UINTN)-1;\r
+ Acc = 0;\r
ReleasePageAddress = 0;\r
+ PFAddress = AsmReadCr2 ();\r
+ PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);\r
+ PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);\r
+ PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);\r
+ PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);\r
+\r
+ Cr4.UintN = AsmReadCr4 ();\r
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
+ Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);\r
+\r
+ if (!Enable5LevelPaging) {\r
+ //\r
+ // Create one fake PML5 entry for 4-Level Paging\r
+ // so that the page table parsing logic only handles 5-Level page structure.\r
+ //\r
+ Pml5Entry = (UINTN)Pml5 | IA32_PG_P;\r
+ Pml5 = &Pml5Entry;\r
+ }\r
\r
//\r
// First, find the leaf entry has the smallest access record value\r
//\r
- Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
- for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
- if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
+ for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
+ if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {\r
//\r
- // If the PML4 entry is not present or is masked, skip it\r
+ // If the PML5 entry is not present or is masked, skip it\r
//\r
continue;\r
}\r
- Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
- PML4EIgnore = FALSE;\r
- for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
- if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
+\r
+ Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
+ for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
+ if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {\r
//\r
- // If the PDPT entry is not present or is masked, skip it\r
+ // If the PML4 entry is not present or is masked, skip it\r
//\r
- if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
- //\r
- // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
- //\r
- PML4EIgnore = TRUE;\r
- }\r
continue;\r
}\r
- if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
- //\r
- // It's not 1-GByte pages entry, it should be a PDPT entry,\r
- // we will not check PML4 entry more\r
- //\r
- PML4EIgnore = TRUE;\r
- Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
- PDPTEIgnore = FALSE;\r
- for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
- if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
+\r
+ Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
+ PML4EIgnore = FALSE;\r
+ for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
+ if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {\r
+ //\r
+ // If the PDPT entry is not present or is masked, skip it\r
+ //\r
+ if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
//\r
- // If the PD entry is not present or is masked, skip it\r
+ // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
//\r
- if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
+ PML4EIgnore = TRUE;\r
+ }\r
+\r
+ continue;\r
+ }\r
+\r
+ if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
+ //\r
+ // It's not 1-GByte pages entry, it should be a PDPT entry,\r
+ // we will not check PML4 entry more\r
+ //\r
+ PML4EIgnore = TRUE;\r
+ Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
+ PDPTEIgnore = FALSE;\r
+ for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {\r
+ if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {\r
+ //\r
+ // If the PD entry is not present or is masked, skip it\r
//\r
- // If the PD entry is masked, we will not PDPT entry more\r
+ if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
+ //\r
+ // If the PD entry is masked, we will not PDPT entry more\r
+ //\r
+ PDPTEIgnore = TRUE;\r
+ }\r
+\r
+ continue;\r
+ }\r
+\r
+ if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
+ //\r
+ // It's not 2 MByte page table entry, it should be PD entry\r
+ // we will find the entry has the smallest access record value\r
//\r
PDPTEIgnore = TRUE;\r
+ if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||\r
+ (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))\r
+ {\r
+ Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
+ if (Acc < MinAcc) {\r
+ //\r
+ // If the PD entry has the smallest access record value,\r
+ // save the Page address to be released\r
+ //\r
+ MinAcc = Acc;\r
+ MinPml5 = Pml5Index;\r
+ MinPml4 = Pml4Index;\r
+ MinPdpt = PdptIndex;\r
+ MinPdt = PdtIndex;\r
+ ReleasePageAddress = Pdt + PdtIndex;\r
+ }\r
+ }\r
}\r
- continue;\r
}\r
- if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
+\r
+ if (!PDPTEIgnore) {\r
//\r
- // It's not 2 MByte page table entry, it should be PD entry\r
- // we will find the entry has the smallest access record value\r
+ // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
+ // it should only has the entries point to 2 MByte Pages\r
//\r
- PDPTEIgnore = TRUE;\r
- Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
- if (Acc < MinAcc) {\r
- //\r
- // If the PD entry has the smallest access record value,\r
- // save the Page address to be released\r
- //\r
- MinAcc = Acc;\r
- MinPml4 = Pml4Index;\r
- MinPdpt = PdptIndex;\r
- MinPdt = PdtIndex;\r
- ReleasePageAddress = Pdt + PdtIndex;\r
+ if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||\r
+ (Pml5Index != PFAddressPml5Index))\r
+ {\r
+ Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
+ if (Acc < MinAcc) {\r
+ //\r
+ // If the PDPT entry has the smallest access record value,\r
+ // save the Page address to be released\r
+ //\r
+ MinAcc = Acc;\r
+ MinPml5 = Pml5Index;\r
+ MinPml4 = Pml4Index;\r
+ MinPdpt = PdptIndex;\r
+ MinPdt = (UINTN)-1;\r
+ ReleasePageAddress = Pdpt + PdptIndex;\r
+ }\r
}\r
}\r
}\r
- if (!PDPTEIgnore) {\r
- //\r
- // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
- // it should only has the entries point to 2 MByte Pages\r
- //\r
- Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
+ }\r
+\r
+ if (!PML4EIgnore) {\r
+ //\r
+ // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
+ // it should only has the entries point to 1 GByte Pages\r
+ //\r
+ if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {\r
+ Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
if (Acc < MinAcc) {\r
//\r
- // If the PDPT entry has the smallest access record value,\r
+ // If the PML4 entry has the smallest access record value,\r
// save the Page address to be released\r
//\r
- MinAcc = Acc;\r
- MinPml4 = Pml4Index;\r
- MinPdpt = PdptIndex;\r
- MinPdt = (UINTN)-1;\r
- ReleasePageAddress = Pdpt + PdptIndex;\r
+ MinAcc = Acc;\r
+ MinPml5 = Pml5Index;\r
+ MinPml4 = Pml4Index;\r
+ MinPdpt = (UINTN)-1;\r
+ MinPdt = (UINTN)-1;\r
+ ReleasePageAddress = Pml4 + Pml4Index;\r
}\r
}\r
}\r
}\r
- if (!PML4EIgnore) {\r
- //\r
- // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
- // it should only has the entries point to 1 GByte Pages\r
- //\r
- Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
- if (Acc < MinAcc) {\r
- //\r
- // If the PML4 entry has the smallest access record value,\r
- // save the Page address to be released\r
- //\r
- MinAcc = Acc;\r
- MinPml4 = Pml4Index;\r
- MinPdpt = (UINTN)-1;\r
- MinPdt = (UINTN)-1;\r
- ReleasePageAddress = Pml4 + Pml4Index;\r
- }\r
- }\r
}\r
+\r
//\r
// Make sure one PML4/PDPT/PD entry is selected\r
//\r
//\r
// Secondly, insert the page pointed by this entry into page pool and clear this entry\r
//\r
- InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
+ InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
*ReleasePageAddress = 0;\r
\r
//\r
//\r
// If 4 KByte Page Table is released, check the PDPT entry\r
//\r
- Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
- SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
- if (SubEntriesNum == 0) {\r
+ Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);\r
+ Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
+ SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);\r
+ if ((SubEntriesNum == 0) &&\r
+ ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))\r
+ {\r
//\r
// Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
// clear the Page directory entry\r
//\r
- InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
+ InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
Pdpt[MinPdpt] = 0;\r
//\r
// Go on checking the PML4 table\r
MinPdt = (UINTN)-1;\r
continue;\r
}\r
+\r
//\r
// Update the sub-entries filed in PDPT entry and exit\r
//\r
- SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
+ SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);\r
break;\r
}\r
+\r
if (MinPdpt != (UINTN)-1) {\r
//\r
// One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
//\r
SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
- if (SubEntriesNum == 0) {\r
+ if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {\r
//\r
// Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
// clear the Page directory entry\r
//\r
- InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
+ InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
Pml4[MinPml4] = 0;\r
- MinPdpt = (UINTN)-1;\r
+ MinPdpt = (UINTN)-1;\r
continue;\r
}\r
+\r
//\r
// Update the sub-entries filed in PML4 entry and exit\r
//\r
- SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
+ SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);\r
break;\r
}\r
+\r
//\r
// PLM4 table has been released before, exit it\r
//\r
VOID\r
)\r
{\r
- UINT64 RetVal;\r
+ UINT64 RetVal;\r
\r
if (IsListEmpty (&mPagePool)) {\r
//\r
//\r
// Clean this page and return\r
//\r
- ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
+ ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);\r
return RetVal;\r
}\r
\r
VOID\r
)\r
{\r
- UINT64 *PageTable;\r
- UINT64 *Pml4;\r
- UINT64 PFAddress;\r
- UINTN StartBit;\r
- UINTN EndBit;\r
- UINT64 PTIndex;\r
- UINTN Index;\r
- SMM_PAGE_SIZE_TYPE PageSize;\r
- UINTN NumOfPages;\r
- UINTN PageAttribute;\r
- EFI_STATUS Status;\r
- UINT64 *UpperEntry;\r
+ UINT64 *PageTable;\r
+ UINT64 *PageTableTop;\r
+ UINT64 PFAddress;\r
+ UINTN StartBit;\r
+ UINTN EndBit;\r
+ UINT64 PTIndex;\r
+ UINTN Index;\r
+ SMM_PAGE_SIZE_TYPE PageSize;\r
+ UINTN NumOfPages;\r
+ UINTN PageAttribute;\r
+ EFI_STATUS Status;\r
+ UINT64 *UpperEntry;\r
+ BOOLEAN Enable5LevelPaging;\r
+ IA32_CR4 Cr4;\r
\r
//\r
// Set default SMM page attribute\r
//\r
- PageSize = SmmPageSize2M;\r
- NumOfPages = 1;\r
+ PageSize = SmmPageSize2M;\r
+ NumOfPages = 1;\r
PageAttribute = 0;\r
\r
- EndBit = 0;\r
- Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
- PFAddress = AsmReadCr2 ();\r
+ EndBit = 0;\r
+ PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);\r
+ PFAddress = AsmReadCr2 ();\r
+\r
+ Cr4.UintN = AsmReadCr4 ();\r
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);\r
\r
Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
//\r
// If platform not support page table attribute, set default SMM page attribute\r
//\r
if (Status != EFI_SUCCESS) {\r
- PageSize = SmmPageSize2M;\r
- NumOfPages = 1;\r
+ PageSize = SmmPageSize2M;\r
+ NumOfPages = 1;\r
PageAttribute = 0;\r
}\r
+\r
if (PageSize >= MaxSmmPageSizeType) {\r
PageSize = SmmPageSize2M;\r
}\r
+\r
if (NumOfPages > 512) {\r
NumOfPages = 512;\r
}\r
\r
switch (PageSize) {\r
- case SmmPageSize4K:\r
- //\r
- // BIT12 to BIT20 is Page Table index\r
- //\r
- EndBit = 12;\r
- break;\r
- case SmmPageSize2M:\r
- //\r
- // BIT21 to BIT29 is Page Directory index\r
- //\r
- EndBit = 21;\r
- PageAttribute |= (UINTN)IA32_PG_PS;\r
- break;\r
- case SmmPageSize1G:\r
- if (!m1GPageTableSupport) {\r
- DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
+ case SmmPageSize4K:\r
+ //\r
+ // BIT12 to BIT20 is Page Table index\r
+ //\r
+ EndBit = 12;\r
+ break;\r
+ case SmmPageSize2M:\r
+ //\r
+ // BIT21 to BIT29 is Page Directory index\r
+ //\r
+ EndBit = 21;\r
+ PageAttribute |= (UINTN)IA32_PG_PS;\r
+ break;\r
+ case SmmPageSize1G:\r
+ if (!m1GPageTableSupport) {\r
+ DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
+ ASSERT (FALSE);\r
+ }\r
+\r
+ //\r
+ // BIT30 to BIT38 is Page Directory Pointer Table index\r
+ //\r
+ EndBit = 30;\r
+ PageAttribute |= (UINTN)IA32_PG_PS;\r
+ break;\r
+ default:\r
ASSERT (FALSE);\r
- }\r
- //\r
- // BIT30 to BIT38 is Page Directory Pointer Table index\r
- //\r
- EndBit = 30;\r
- PageAttribute |= (UINTN)IA32_PG_PS;\r
- break;\r
- default:\r
- ASSERT (FALSE);\r
}\r
\r
//\r
}\r
\r
for (Index = 0; Index < NumOfPages; Index++) {\r
- PageTable = Pml4;\r
+ PageTable = PageTableTop;\r
UpperEntry = NULL;\r
- for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
+ for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
//\r
//\r
UpperEntry = PageTable + PTIndex;\r
}\r
+\r
//\r
// BIT9 to BIT11 of entry is used to save access record,\r
// initialize value is 7\r
//\r
PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
SetAccNum (PageTable + PTIndex, 7);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
}\r
\r
PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
ASSERT (FALSE);\r
}\r
+\r
//\r
// Fill the new entry\r
//\r
PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
if (UpperEntry != NULL) {\r
- SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
+ SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);\r
}\r
+\r
//\r
// Get the next page address if we need to create more page tables\r
//\r
VOID\r
EFIAPI\r
SmiPFHandler (\r
- IN EFI_EXCEPTION_TYPE InterruptType,\r
- IN EFI_SYSTEM_CONTEXT SystemContext\r
+ IN EFI_EXCEPTION_TYPE InterruptType,\r
+ IN EFI_SYSTEM_CONTEXT SystemContext\r
)\r
{\r
- UINTN PFAddress;\r
- UINTN GuardPageAddress;\r
- UINTN CpuIndex;\r
+ UINTN PFAddress;\r
+ UINTN GuardPageAddress;\r
+ UINTN ShadowStackGuardPageAddress;\r
+ UINTN CpuIndex;\r
\r
ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
\r
\r
PFAddress = AsmReadCr2 ();\r
\r
- if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
+ if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
DumpCpuContext (InterruptType, SystemContext);\r
DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
CpuDeadLoop ();\r
+ goto Exit;\r
}\r
\r
//\r
- // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
+ // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,\r
// or SMM page protection violation.\r
//\r
if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
- (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))\r
+ {\r
DumpCpuContext (InterruptType, SystemContext);\r
- CpuIndex = GetCpuIndex ();\r
- GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
+ CpuIndex = GetCpuIndex ();\r
+ GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
+ ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
(PFAddress >= GuardPageAddress) &&\r
- (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
+ (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))\r
+ {\r
DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
+ } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
+ (mSmmShadowStackSize > 0) &&\r
+ (PFAddress >= ShadowStackGuardPageAddress) &&\r
+ (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))\r
+ {\r
+ DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));\r
} else {\r
if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
DEBUG_CODE (\r
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
- );\r
+ );\r
} else {\r
DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
DEBUG_CODE (\r
DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
- );\r
+ );\r
+ }\r
+\r
+ if (HEAP_GUARD_NONSTOP_MODE) {\r
+ GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
+ goto Exit;\r
}\r
}\r
+\r
CpuDeadLoop ();\r
+ goto Exit;\r
}\r
\r
//\r
- // If a page fault occurs in SMM range\r
+ // If a page fault occurs in non-SMRAM range.\r
//\r
if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
- (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
- DumpCpuContext (InterruptType, SystemContext);\r
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))\r
+ {\r
if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
+ DumpCpuContext (InterruptType, SystemContext);\r
DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
DEBUG_CODE (\r
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
- );\r
+ );\r
CpuDeadLoop ();\r
+ goto Exit;\r
}\r
- if (IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
- DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
+\r
+ //\r
+ // If NULL pointer was just accessed\r
+ //\r
+ if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&\r
+ (PFAddress < EFI_PAGE_SIZE))\r
+ {\r
+ DumpCpuContext (InterruptType, SystemContext);\r
+ DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
DEBUG_CODE (\r
DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
- );\r
+ );\r
+\r
+ if (NULL_DETECTION_NONSTOP_MODE) {\r
+ GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
+ goto Exit;\r
+ }\r
+\r
CpuDeadLoop ();\r
+ goto Exit;\r
}\r
- }\r
\r
- //\r
- // If NULL pointer was just accessed\r
- //\r
- if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
- (PFAddress < EFI_PAGE_SIZE)) {\r
- DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
- DEBUG_CODE (\r
- DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
- );\r
- CpuDeadLoop ();\r
+ if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
+ DumpCpuContext (InterruptType, SystemContext);\r
+ DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
+ DEBUG_CODE (\r
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
+ );\r
+ CpuDeadLoop ();\r
+ goto Exit;\r
+ }\r
}\r
\r
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
SmiDefaultPFHandler ();\r
}\r
\r
+Exit:\r
ReleaseSpinLock (mPFLock);\r
}\r
\r
/**\r
- This function sets memory attribute for page table.\r
+ This function reads CR2 register when on-demand paging is enabled.\r
+\r
+ @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
**/\r
VOID\r
-SetPageTableAttributes (\r
- VOID\r
+SaveCr2 (\r
+ OUT UINTN *Cr2\r
)\r
{\r
- UINTN Index2;\r
- UINTN Index3;\r
- UINTN Index4;\r
- UINT64 *L1PageTable;\r
- UINT64 *L2PageTable;\r
- UINT64 *L3PageTable;\r
- UINT64 *L4PageTable;\r
- BOOLEAN IsSplitted;\r
- BOOLEAN PageTableSplitted;\r
-\r
- //\r
- // Don't do this if\r
- // - no static page table; or\r
- // - SMM heap guard feature enabled\r
- // BIT2: SMM page guard enabled\r
- // BIT3: SMM pool guard enabled\r
- //\r
- if (!mCpuSmmStaticPageTable ||\r
- (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) {\r
+ if (!mCpuSmmRestrictedMemoryAccess) {\r
//\r
- // Static paging and heap guard should not be enabled at the same time.\r
+ // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
//\r
- ASSERT (!(mCpuSmmStaticPageTable &&\r
- (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
- return ;\r
+ *Cr2 = AsmReadCr2 ();\r
}\r
+}\r
\r
- DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
-\r
- //\r
- // Disable write protection, because we need mark page table to be write protected.\r
- // We need *write* page table memory, to mark itself to be *read only*.\r
- //\r
- AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
-\r
- do {\r
- DEBUG ((DEBUG_INFO, "Start...\n"));\r
- PageTableSplitted = FALSE;\r
-\r
- L4PageTable = (UINT64 *)GetPageTableBase ();\r
- SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
- PageTableSplitted = (PageTableSplitted || IsSplitted);\r
-\r
- for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
- L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
- if (L3PageTable == NULL) {\r
- continue;\r
- }\r
-\r
- SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
- PageTableSplitted = (PageTableSplitted || IsSplitted);\r
-\r
- for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
- if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
- // 1G\r
- continue;\r
- }\r
- L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
- if (L2PageTable == NULL) {\r
- continue;\r
- }\r
-\r
- SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
- PageTableSplitted = (PageTableSplitted || IsSplitted);\r
+/**\r
+ This function restores CR2 register when on-demand paging is enabled.\r
\r
- for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
- if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
- // 2M\r
- continue;\r
- }\r
- L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
- if (L1PageTable == NULL) {\r
- continue;\r
- }\r
- SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
- PageTableSplitted = (PageTableSplitted || IsSplitted);\r
- }\r
- }\r
- }\r
- } while (PageTableSplitted);\r
+ @param[in] Cr2 Value to write into CR2 register.\r
+**/\r
+VOID\r
+RestoreCr2 (\r
+ IN UINTN Cr2\r
+ )\r
+{\r
+ if (!mCpuSmmRestrictedMemoryAccess) {\r
+ //\r
+ // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
+ //\r
+ AsmWriteCr2 (Cr2);\r
+ }\r
+}\r
\r
- //\r
- // Enable write protection, after page table updated.\r
- //\r
- AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
+/**\r
+ Return whether access to non-SMRAM is restricted.\r
\r
- return ;\r
+ @retval TRUE Access to non-SMRAM is restricted.\r
+ @retval FALSE Access to non-SMRAM is not restricted.\r
+**/\r
+BOOLEAN\r
+IsRestrictedMemoryAccess (\r
+ VOID\r
+ )\r
+{\r
+ return mCpuSmmRestrictedMemoryAccess;\r
}\r