2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
16 extern UINTN mSmmShadowStackSize
;
18 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
19 BOOLEAN m1GPageTableSupport
= FALSE
;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess
;
21 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded
;
42 Check if 1-GByte pages is supported by processor or not.
44 @retval TRUE 1-GByte pages is supported.
45 @retval FALSE 1-GByte pages is not supported.
56 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
57 if (RegEax
>= 0x80000001) {
58 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
59 if ((RegEdx
& BIT26
) != 0) {
68 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
69 the max physical address bits is bigger than 48. Because 4-level paging can support
70 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
71 with max physical address bits <= 48.
73 @retval TRUE 5-level paging enabling is needed.
74 @retval FALSE 5-level paging enabling is not needed.
77 Is5LevelPagingNeeded (
81 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize
;
82 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx
;
83 UINT32 MaxExtendedFunctionId
;
85 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &MaxExtendedFunctionId
, NULL
, NULL
, NULL
);
86 if (MaxExtendedFunctionId
>= CPUID_VIR_PHY_ADDRESS_SIZE
) {
87 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE
, &VirPhyAddressSize
.Uint32
, NULL
, NULL
, NULL
);
89 VirPhyAddressSize
.Bits
.PhysicalAddressBits
= 36;
93 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
,
94 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
,
97 &ExtFeatureEcx
.Uint32
,
102 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
103 VirPhyAddressSize
.Bits
.PhysicalAddressBits
,
104 ExtFeatureEcx
.Bits
.FiveLevelPage
107 if ((VirPhyAddressSize
.Bits
.PhysicalAddressBits
> 4 * 9 + 12) &&
108 (ExtFeatureEcx
.Bits
.FiveLevelPage
== 1))
117 Set sub-entries number in entry.
119 @param[in, out] Entry Pointer to entry
120 @param[in] SubEntryNum Sub-entries number based on 0:
121 0 means there is 1 sub-entry under this entry
122 0x1ff means there is 512 sub-entries under this entry
127 IN OUT UINT64
*Entry
,
128 IN UINT64 SubEntryNum
132 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
134 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
138 Return sub-entries number in entry.
140 @param[in] Entry Pointer to entry
142 @return Sub-entries number based on 0:
143 0 means there is 1 sub-entry under this entry
144 0x1ff means there is 512 sub-entries under this entry
152 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
154 return BitFieldRead64 (*Entry
, 52, 60);
158 Calculate the maximum support address.
160 @return the maximum support address.
163 CalculateMaximumSupportAddress (
168 UINT8 PhysicalAddressBits
;
172 // Get physical address bits supported.
174 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
176 PhysicalAddressBits
= ((EFI_HOB_CPU
*)Hob
)->SizeOfMemorySpace
;
178 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
179 if (RegEax
>= 0x80000008) {
180 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
181 PhysicalAddressBits
= (UINT8
)RegEax
;
183 PhysicalAddressBits
= 36;
187 return PhysicalAddressBits
;
191 Set static page table.
193 @param[in] PageTable Address of page table.
194 @param[in] PhysicalAddressBits The maximum physical address bits supported.
199 IN UINT8 PhysicalAddressBits
203 UINTN NumberOfPml5EntriesNeeded
;
204 UINTN NumberOfPml4EntriesNeeded
;
205 UINTN NumberOfPdpEntriesNeeded
;
206 UINTN IndexOfPml5Entries
;
207 UINTN IndexOfPml4Entries
;
208 UINTN IndexOfPdpEntries
;
209 UINTN IndexOfPageDirectoryEntries
;
210 UINT64
*PageMapLevel5Entry
;
211 UINT64
*PageMapLevel4Entry
;
213 UINT64
*PageDirectoryPointerEntry
;
214 UINT64
*PageDirectory1GEntry
;
215 UINT64
*PageDirectoryEntry
;
218 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
219 // when 5-Level Paging is disabled.
221 ASSERT (PhysicalAddressBits
<= 52);
222 if (!m5LevelPagingNeeded
&& (PhysicalAddressBits
> 48)) {
223 PhysicalAddressBits
= 48;
226 NumberOfPml5EntriesNeeded
= 1;
227 if (PhysicalAddressBits
> 48) {
228 NumberOfPml5EntriesNeeded
= (UINTN
)LShiftU64 (1, PhysicalAddressBits
- 48);
229 PhysicalAddressBits
= 48;
232 NumberOfPml4EntriesNeeded
= 1;
233 if (PhysicalAddressBits
> 39) {
234 NumberOfPml4EntriesNeeded
= (UINTN
)LShiftU64 (1, PhysicalAddressBits
- 39);
235 PhysicalAddressBits
= 39;
238 NumberOfPdpEntriesNeeded
= 1;
239 ASSERT (PhysicalAddressBits
> 30);
240 NumberOfPdpEntriesNeeded
= (UINTN
)LShiftU64 (1, PhysicalAddressBits
- 30);
243 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
245 PageMap
= (VOID
*)PageTable
;
247 PageMapLevel4Entry
= PageMap
;
248 PageMapLevel5Entry
= NULL
;
249 if (m5LevelPagingNeeded
) {
251 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
253 PageMapLevel5Entry
= PageMap
;
258 for ( IndexOfPml5Entries
= 0
259 ; IndexOfPml5Entries
< NumberOfPml5EntriesNeeded
260 ; IndexOfPml5Entries
++, PageMapLevel5Entry
++)
263 // Each PML5 entry points to a page of PML4 entires.
264 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
265 // When 5-Level Paging is disabled, below allocation happens only once.
267 if (m5LevelPagingNeeded
) {
268 PageMapLevel4Entry
= (UINT64
*)((*PageMapLevel5Entry
) & ~mAddressEncMask
& gPhyMask
);
269 if (PageMapLevel4Entry
== NULL
) {
270 PageMapLevel4Entry
= AllocatePageTableMemory (1);
271 ASSERT (PageMapLevel4Entry
!= NULL
);
272 ZeroMem (PageMapLevel4Entry
, EFI_PAGES_TO_SIZE (1));
274 *PageMapLevel5Entry
= (UINT64
)(UINTN
)PageMapLevel4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
278 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< (NumberOfPml5EntriesNeeded
== 1 ? NumberOfPml4EntriesNeeded
: 512); IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
280 // Each PML4 entry points to a page of Page Directory Pointer entries.
282 PageDirectoryPointerEntry
= (UINT64
*)((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
283 if (PageDirectoryPointerEntry
== NULL
) {
284 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
285 ASSERT (PageDirectoryPointerEntry
!= NULL
);
286 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE (1));
288 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
291 if (m1GPageTableSupport
) {
292 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
293 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
294 if ((IndexOfPml4Entries
== 0) && (IndexOfPageDirectoryEntries
< 4)) {
296 // Skip the < 4G entries
302 // Fill in the Page Directory entries
304 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
307 PageAddress
= BASE_4GB
;
308 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< (NumberOfPml4EntriesNeeded
== 1 ? NumberOfPdpEntriesNeeded
: 512); IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
309 if ((IndexOfPml4Entries
== 0) && (IndexOfPdpEntries
< 4)) {
311 // Skip the < 4G entries
317 // Each Directory Pointer entries points to a page of Page Directory entires.
318 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
320 PageDirectoryEntry
= (UINT64
*)((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
321 if (PageDirectoryEntry
== NULL
) {
322 PageDirectoryEntry
= AllocatePageTableMemory (1);
323 ASSERT (PageDirectoryEntry
!= NULL
);
324 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE (1));
327 // Fill in a Page Directory Pointer Entries
329 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
332 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
334 // Fill in the Page Directory entries
336 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
345 Create PageTable for SMM use.
347 @return The address of PML4 (to set CR3).
355 EFI_PHYSICAL_ADDRESS Pages
;
357 LIST_ENTRY
*FreePage
;
359 UINTN PageFaultHandlerHookAddress
;
360 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
366 // Initialize spin lock
368 InitializeSpinLock (mPFLock
);
370 mCpuSmmRestrictedMemoryAccess
= PcdGetBool (PcdCpuSmmRestrictedMemoryAccess
);
371 m1GPageTableSupport
= Is1GPageSupport ();
372 m5LevelPagingNeeded
= Is5LevelPagingNeeded ();
373 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
374 PatchInstructionX86 (gPatch5LevelPagingNeeded
, m5LevelPagingNeeded
, 1);
375 DEBUG ((DEBUG_INFO
, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded
));
376 DEBUG ((DEBUG_INFO
, "1GPageTable Support - %d\n", m1GPageTableSupport
));
377 DEBUG ((DEBUG_INFO
, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess
));
378 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - %d\n", mPhysicalAddressBits
));
380 // Generate PAE page table for the first 4GB memory space
382 Pages
= Gen4GPageTable (FALSE
);
385 // Set IA32_PG_PMNT bit to mask this entry
387 PTEntry
= (UINT64
*)(UINTN
)Pages
;
388 for (Index
= 0; Index
< 4; Index
++) {
389 PTEntry
[Index
] |= IA32_PG_PMNT
;
393 // Fill Page-Table-Level4 (PML4) entry
395 Pml4Entry
= (UINT64
*)AllocatePageTableMemory (1);
396 ASSERT (Pml4Entry
!= NULL
);
397 *Pml4Entry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
398 ZeroMem (Pml4Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml4Entry
));
401 // Set sub-entries number
403 SetSubEntriesNum (Pml4Entry
, 3);
406 if (m5LevelPagingNeeded
) {
410 Pml5Entry
= (UINT64
*)AllocatePageTableMemory (1);
411 ASSERT (Pml5Entry
!= NULL
);
412 *Pml5Entry
= (UINTN
)Pml4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
413 ZeroMem (Pml5Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml5Entry
));
415 // Set sub-entries number
417 SetSubEntriesNum (Pml5Entry
, 1);
421 if (mCpuSmmRestrictedMemoryAccess
) {
423 // When access to non-SMRAM memory is restricted, create page table
424 // that covers all memory space.
426 SetStaticPageTable ((UINTN
)PTEntry
, mPhysicalAddressBits
);
429 // Add pages to page pool
431 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
432 ASSERT (FreePage
!= NULL
);
433 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
434 InsertTailList (&mPagePool
, FreePage
);
435 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
439 if (FeaturePcdGet (PcdCpuSmmProfileEnable
) ||
440 HEAP_GUARD_NONSTOP_MODE
||
441 NULL_DETECTION_NONSTOP_MODE
)
444 // Set own Page Fault entry instead of the default one, because SMM Profile
445 // feature depends on IRET instruction to do Single Step
447 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
448 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*)gcSmiIdtr
.Base
;
449 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
450 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
451 IdtEntry
->Bits
.Reserved_0
= 0;
452 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
453 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
454 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
455 IdtEntry
->Bits
.Reserved_1
= 0;
458 // Register Smm Page Fault Handler
460 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
461 ASSERT_EFI_ERROR (Status
);
465 // Additional SMM IDT initialization for SMM stack guard
467 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
468 DEBUG ((DEBUG_INFO
, "Initialize IDT IST field for SMM Stack Guard\n"));
469 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT
, 1);
473 // Additional SMM IDT initialization for SMM CET shadow stack
475 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
476 DEBUG ((DEBUG_INFO
, "Initialize IDT IST field for SMM Shadow Stack\n"));
477 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT
, 1);
478 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK
, 1);
482 // Return the address of PML4/PML5 (to set CR3)
484 return (UINT32
)(UINTN
)PTEntry
;
488 Set access record in entry.
490 @param[in, out] Entry Pointer to entry
491 @param[in] Acc Access record value
496 IN OUT UINT64
*Entry
,
501 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
503 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
507 Return access record in entry.
509 @param[in] Entry Pointer to entry
511 @return Access record value.
520 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
522 return BitFieldRead64 (*Entry
, 9, 11);
526 Return and update the access record in entry.
528 @param[in, out] Entry Pointer to entry
530 @return Access record value.
540 Acc
= GetAccNum (Entry
);
541 if ((*Entry
& IA32_PG_A
) != 0) {
543 // If this entry has been accessed, clear access flag in Entry and update access record
544 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
546 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
547 SetAccNum (Entry
, 0x7);
548 return (0x7 + ACC_MAX_BIT
);
552 // If the access record is not the smallest value 0, minus 1 and update the access record field
554 SetAccNum (Entry
, Acc
- 1);
562 Reclaim free pages for PageFault handler.
564 Search the whole entries tree to find the leaf entry that has the smallest
565 access record value. Insert the page pointed by this leaf entry into the
566 page pool. And check its upper entries if need to be inserted into the page
590 UINT64 SubEntriesNum
;
593 UINT64
*ReleasePageAddress
;
595 BOOLEAN Enable5LevelPaging
;
597 UINT64 PFAddressPml5Index
;
598 UINT64 PFAddressPml4Index
;
599 UINT64 PFAddressPdptIndex
;
600 UINT64 PFAddressPdtIndex
;
611 ReleasePageAddress
= 0;
612 PFAddress
= AsmReadCr2 ();
613 PFAddressPml5Index
= BitFieldRead64 (PFAddress
, 48, 48 + 8);
614 PFAddressPml4Index
= BitFieldRead64 (PFAddress
, 39, 39 + 8);
615 PFAddressPdptIndex
= BitFieldRead64 (PFAddress
, 30, 30 + 8);
616 PFAddressPdtIndex
= BitFieldRead64 (PFAddress
, 21, 21 + 8);
618 Cr4
.UintN
= AsmReadCr4 ();
619 Enable5LevelPaging
= (BOOLEAN
)(Cr4
.Bits
.LA57
== 1);
620 Pml5
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
622 if (!Enable5LevelPaging
) {
624 // Create one fake PML5 entry for 4-Level Paging
625 // so that the page table parsing logic only handles 5-Level page structure.
627 Pml5Entry
= (UINTN
)Pml5
| IA32_PG_P
;
632 // First, find the leaf entry has the smallest access record value
634 for (Pml5Index
= 0; Pml5Index
< (Enable5LevelPaging
? (EFI_PAGE_SIZE
/ sizeof (*Pml4
)) : 1); Pml5Index
++) {
635 if (((Pml5
[Pml5Index
] & IA32_PG_P
) == 0) || ((Pml5
[Pml5Index
] & IA32_PG_PMNT
) != 0)) {
637 // If the PML5 entry is not present or is masked, skip it
642 Pml4
= (UINT64
*)(UINTN
)(Pml5
[Pml5Index
] & gPhyMask
);
643 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
644 if (((Pml4
[Pml4Index
] & IA32_PG_P
) == 0) || ((Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0)) {
646 // If the PML4 entry is not present or is masked, skip it
651 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
653 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
654 if (((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0) || ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0)) {
656 // If the PDPT entry is not present or is masked, skip it
658 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
660 // If the PDPT entry is masked, we will ignore checking the PML4 entry
668 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
670 // It's not 1-GByte pages entry, it should be a PDPT entry,
671 // we will not check PML4 entry more
674 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
676 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdt
); PdtIndex
++) {
677 if (((Pdt
[PdtIndex
] & IA32_PG_P
) == 0) || ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0)) {
679 // If the PD entry is not present or is masked, skip it
681 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
683 // If the PD entry is masked, we will not PDPT entry more
691 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
693 // It's not 2 MByte page table entry, it should be PD entry
694 // we will find the entry has the smallest access record value
697 if ((PdtIndex
!= PFAddressPdtIndex
) || (PdptIndex
!= PFAddressPdptIndex
) ||
698 (Pml4Index
!= PFAddressPml4Index
) || (Pml5Index
!= PFAddressPml5Index
))
700 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
703 // If the PD entry has the smallest access record value,
704 // save the Page address to be released
711 ReleasePageAddress
= Pdt
+ PdtIndex
;
719 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
720 // it should only has the entries point to 2 MByte Pages
722 if ((PdptIndex
!= PFAddressPdptIndex
) || (Pml4Index
!= PFAddressPml4Index
) ||
723 (Pml5Index
!= PFAddressPml5Index
))
725 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
728 // If the PDPT entry has the smallest access record value,
729 // save the Page address to be released
736 ReleasePageAddress
= Pdpt
+ PdptIndex
;
745 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
746 // it should only has the entries point to 1 GByte Pages
748 if ((Pml4Index
!= PFAddressPml4Index
) || (Pml5Index
!= PFAddressPml5Index
)) {
749 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
752 // If the PML4 entry has the smallest access record value,
753 // save the Page address to be released
760 ReleasePageAddress
= Pml4
+ Pml4Index
;
768 // Make sure one PML4/PDPT/PD entry is selected
770 ASSERT (MinAcc
!= (UINT64
)-1);
773 // Secondly, insert the page pointed by this entry into page pool and clear this entry
775 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
776 *ReleasePageAddress
= 0;
779 // Lastly, check this entry's upper entries if need to be inserted into page pool
783 if (MinPdt
!= (UINTN
)-1) {
785 // If 4 KByte Page Table is released, check the PDPT entry
787 Pml4
= (UINT64
*)(UINTN
)(Pml5
[MinPml5
] & gPhyMask
);
788 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
789 SubEntriesNum
= GetSubEntriesNum (Pdpt
+ MinPdpt
);
790 if ((SubEntriesNum
== 0) &&
791 ((MinPdpt
!= PFAddressPdptIndex
) || (MinPml4
!= PFAddressPml4Index
) || (MinPml5
!= PFAddressPml5Index
)))
794 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
795 // clear the Page directory entry
797 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
800 // Go on checking the PML4 table
807 // Update the sub-entries filed in PDPT entry and exit
809 SetSubEntriesNum (Pdpt
+ MinPdpt
, (SubEntriesNum
- 1) & 0x1FF);
813 if (MinPdpt
!= (UINTN
)-1) {
815 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
817 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
818 if ((SubEntriesNum
== 0) && ((MinPml4
!= PFAddressPml4Index
) || (MinPml5
!= PFAddressPml5Index
))) {
820 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
821 // clear the Page directory entry
823 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
830 // Update the sub-entries filed in PML4 entry and exit
832 SetSubEntriesNum (Pml4
+ MinPml4
, (SubEntriesNum
- 1) & 0x1FF);
837 // PLM4 table has been released before, exit it
844 Allocate free Page for PageFault handler use.
846 @return Page address.
856 if (IsListEmpty (&mPagePool
)) {
858 // If page pool is empty, reclaim the used pages and insert one into page pool
864 // Get one free page and remove it from page pool
866 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
867 RemoveEntryList (mPagePool
.ForwardLink
);
869 // Clean this page and return
871 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
876 Page Fault handler for SMM use.
880 SmiDefaultPFHandler (
885 UINT64
*PageTableTop
;
891 SMM_PAGE_SIZE_TYPE PageSize
;
896 BOOLEAN Enable5LevelPaging
;
900 // Set default SMM page attribute
902 PageSize
= SmmPageSize2M
;
907 PageTableTop
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
908 PFAddress
= AsmReadCr2 ();
910 Cr4
.UintN
= AsmReadCr4 ();
911 Enable5LevelPaging
= (BOOLEAN
)(Cr4
.Bits
.LA57
!= 0);
913 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
915 // If platform not support page table attribute, set default SMM page attribute
917 if (Status
!= EFI_SUCCESS
) {
918 PageSize
= SmmPageSize2M
;
923 if (PageSize
>= MaxSmmPageSizeType
) {
924 PageSize
= SmmPageSize2M
;
927 if (NumOfPages
> 512) {
934 // BIT12 to BIT20 is Page Table index
940 // BIT21 to BIT29 is Page Directory index
943 PageAttribute
|= (UINTN
)IA32_PG_PS
;
946 if (!m1GPageTableSupport
) {
947 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
952 // BIT30 to BIT38 is Page Directory Pointer Table index
955 PageAttribute
|= (UINTN
)IA32_PG_PS
;
962 // If execute-disable is enabled, set NX bit
965 PageAttribute
|= IA32_PG_NX
;
968 for (Index
= 0; Index
< NumOfPages
; Index
++) {
969 PageTable
= PageTableTop
;
971 for (StartBit
= Enable5LevelPaging
? 48 : 39; StartBit
> EndBit
; StartBit
-= 9) {
972 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
973 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
975 // If the entry is not present, allocate one page from page pool for it
977 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
980 // Save the upper entry address
982 UpperEntry
= PageTable
+ PTIndex
;
986 // BIT9 to BIT11 of entry is used to save access record,
987 // initialize value is 7
989 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
990 SetAccNum (PageTable
+ PTIndex
, 7);
991 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
994 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
995 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
997 // Check if the entry has already existed, this issue may occur when the different
998 // size page entries created under the same entry
1000 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
1001 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
1006 // Fill the new entry
1008 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
1009 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
1010 if (UpperEntry
!= NULL
) {
1011 SetSubEntriesNum (UpperEntry
, (GetSubEntriesNum (UpperEntry
) + 1) & 0x1FF);
1015 // Get the next page address if we need to create more page tables
1017 PFAddress
+= (1ull << EndBit
);
1022 ThePage Fault handler wrapper for SMM use.
1024 @param InterruptType Defines the type of interrupt or exception that
1025 occurred on the processor.This parameter is processor architecture specific.
1026 @param SystemContext A pointer to the processor context when
1027 the interrupt occurred on the processor.
1032 IN EFI_EXCEPTION_TYPE InterruptType
,
1033 IN EFI_SYSTEM_CONTEXT SystemContext
1037 UINTN GuardPageAddress
;
1038 UINTN ShadowStackGuardPageAddress
;
1041 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
1043 AcquireSpinLock (mPFLock
);
1045 PFAddress
= AsmReadCr2 ();
1047 if (mCpuSmmRestrictedMemoryAccess
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
1048 DumpCpuContext (InterruptType
, SystemContext
);
1049 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
1055 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1056 // or SMM page protection violation.
1058 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
1059 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)))
1061 DumpCpuContext (InterruptType
, SystemContext
);
1062 CpuIndex
= GetCpuIndex ();
1063 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* (mSmmStackSize
+ mSmmShadowStackSize
));
1064 ShadowStackGuardPageAddress
= (mSmmStackArrayBase
+ mSmmStackSize
+ EFI_PAGE_SIZE
+ CpuIndex
* (mSmmStackSize
+ mSmmShadowStackSize
));
1065 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
1066 (PFAddress
>= GuardPageAddress
) &&
1067 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
)))
1069 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
1070 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
1071 (mSmmShadowStackSize
> 0) &&
1072 (PFAddress
>= ShadowStackGuardPageAddress
) &&
1073 (PFAddress
< (ShadowStackGuardPageAddress
+ EFI_PAGE_SIZE
)))
1075 DEBUG ((DEBUG_ERROR
, "SMM shadow stack overflow!\n"));
1077 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1078 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
1080 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1083 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
1085 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1089 if (HEAP_GUARD_NONSTOP_MODE
) {
1090 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1100 // If a page fault occurs in non-SMRAM range.
1102 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
1103 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))
1105 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1106 DumpCpuContext (InterruptType
, SystemContext
);
1107 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
1109 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1116 // If NULL pointer was just accessed
1118 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0) &&
1119 (PFAddress
< EFI_PAGE_SIZE
))
1121 DumpCpuContext (InterruptType
, SystemContext
);
1122 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
1124 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1127 if (NULL_DETECTION_NONSTOP_MODE
) {
1128 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1136 if (mCpuSmmRestrictedMemoryAccess
&& IsSmmCommBufferForbiddenAddress (PFAddress
)) {
1137 DumpCpuContext (InterruptType
, SystemContext
);
1138 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
1140 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1147 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1148 SmmProfilePFHandler (
1149 SystemContext
.SystemContextX64
->Rip
,
1150 SystemContext
.SystemContextX64
->ExceptionData
1153 SmiDefaultPFHandler ();
1157 ReleaseSpinLock (mPFLock
);
1161 This function sets memory attribute for page table.
1164 SetPageTableAttributes (
1172 UINT64
*L1PageTable
;
1173 UINT64
*L2PageTable
;
1174 UINT64
*L3PageTable
;
1175 UINT64
*L4PageTable
;
1176 UINT64
*L5PageTable
;
1177 UINTN PageTableBase
;
1179 BOOLEAN PageTableSplitted
;
1181 BOOLEAN Enable5LevelPaging
;
1185 // Don't mark page table memory as read-only if
1186 // - no restriction on access to non-SMRAM memory; or
1187 // - SMM heap guard feature enabled; or
1188 // BIT2: SMM page guard enabled
1189 // BIT3: SMM pool guard enabled
1190 // - SMM profile feature enabled
1192 if (!mCpuSmmRestrictedMemoryAccess
||
1193 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
1194 FeaturePcdGet (PcdCpuSmmProfileEnable
))
1197 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1200 !(mCpuSmmRestrictedMemoryAccess
&&
1201 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0)
1205 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1207 ASSERT (!(mCpuSmmRestrictedMemoryAccess
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
1211 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
1214 // Disable write protection, because we need mark page table to be write protected.
1215 // We need *write* page table memory, to mark itself to be *read only*.
1217 CetEnabled
= ((AsmReadCr4 () & CR4_CET_ENABLE
) != 0) ? TRUE
: FALSE
;
1220 // CET must be disabled if WP is disabled.
1225 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP
);
1228 DEBUG ((DEBUG_INFO
, "Start...\n"));
1229 PageTableSplitted
= FALSE
;
1232 PageTableBase
= AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64
;
1233 Cr4
.UintN
= AsmReadCr4 ();
1234 Enable5LevelPaging
= (BOOLEAN
)(Cr4
.Bits
.LA57
== 1);
1236 if (Enable5LevelPaging
) {
1237 L5PageTable
= (UINT64
*)PageTableBase
;
1238 SmmSetMemoryAttributesEx (PageTableBase
, Enable5LevelPaging
, (EFI_PHYSICAL_ADDRESS
)PageTableBase
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1239 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1242 for (Index5
= 0; Index5
< (Enable5LevelPaging
? SIZE_4KB
/sizeof (UINT64
) : 1); Index5
++) {
1243 if (Enable5LevelPaging
) {
1244 L4PageTable
= (UINT64
*)(UINTN
)(L5PageTable
[Index5
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1245 if (L4PageTable
== NULL
) {
1249 L4PageTable
= (UINT64
*)PageTableBase
;
1252 SmmSetMemoryAttributesEx (PageTableBase
, Enable5LevelPaging
, (EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1253 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1255 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof (UINT64
); Index4
++) {
1256 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1257 if (L3PageTable
== NULL
) {
1261 SmmSetMemoryAttributesEx (PageTableBase
, Enable5LevelPaging
, (EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1262 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1264 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof (UINT64
); Index3
++) {
1265 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
1270 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1271 if (L2PageTable
== NULL
) {
1275 SmmSetMemoryAttributesEx (PageTableBase
, Enable5LevelPaging
, (EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1276 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1278 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof (UINT64
); Index2
++) {
1279 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
1284 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1285 if (L1PageTable
== NULL
) {
1289 SmmSetMemoryAttributesEx (PageTableBase
, Enable5LevelPaging
, (EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1290 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1295 } while (PageTableSplitted
);
1298 // Enable write protection, after page table updated.
1300 AsmWriteCr0 (AsmReadCr0 () | CR0_WP
);
1312 This function reads CR2 register when on-demand paging is enabled.
1314 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1321 if (!mCpuSmmRestrictedMemoryAccess
) {
1323 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1325 *Cr2
= AsmReadCr2 ();
1330 This function restores CR2 register when on-demand paging is enabled.
1332 @param[in] Cr2 Value to write into CR2 register.
1339 if (!mCpuSmmRestrictedMemoryAccess
) {
1341 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1348 Return whether access to non-SMRAM is restricted.
1350 @retval TRUE Access to non-SMRAM is restricted.
1351 @retval FALSE Access to non-SMRAM is not restricted.
1354 IsRestrictedMemoryAccess (
1358 return mCpuSmmRestrictedMemoryAccess
;