2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
16 extern UINTN mSmmShadowStackSize
;
18 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
19 BOOLEAN m1GPageTableSupport
= FALSE
;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess
;
21 BOOLEAN m5LevelPagingNeeded
;
22 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded
;
43 Check if 1-GByte pages is supported by processor or not.
45 @retval TRUE 1-GByte pages is supported.
46 @retval FALSE 1-GByte pages is not supported.
57 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
58 if (RegEax
>= 0x80000001) {
59 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
60 if ((RegEdx
& BIT26
) != 0) {
68 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
69 the max physical address bits is bigger than 48. Because 4-level paging can support
70 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
71 with max physical address bits <= 48.
73 @retval TRUE 5-level paging enabling is needed.
74 @retval FALSE 5-level paging enabling is not needed.
77 Is5LevelPagingNeeded (
81 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize
;
82 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx
;
83 UINT32 MaxExtendedFunctionId
;
85 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &MaxExtendedFunctionId
, NULL
, NULL
, NULL
);
86 if (MaxExtendedFunctionId
>= CPUID_VIR_PHY_ADDRESS_SIZE
) {
87 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE
, &VirPhyAddressSize
.Uint32
, NULL
, NULL
, NULL
);
89 VirPhyAddressSize
.Bits
.PhysicalAddressBits
= 36;
92 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
,
93 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
,
94 NULL
, NULL
, &ExtFeatureEcx
.Uint32
, NULL
97 DEBUG_INFO
, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
98 VirPhyAddressSize
.Bits
.PhysicalAddressBits
, ExtFeatureEcx
.Bits
.FiveLevelPage
101 if (VirPhyAddressSize
.Bits
.PhysicalAddressBits
> 4 * 9 + 12) {
102 ASSERT (ExtFeatureEcx
.Bits
.FiveLevelPage
== 1);
110 Get page table base address and the depth of the page table.
112 @param[out] Base Page table base address.
113 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
118 OUT BOOLEAN
*FiveLevels OPTIONAL
123 if (mInternalCr3
== 0) {
124 *Base
= AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64
;
125 if (FiveLevels
!= NULL
) {
126 Cr4
.UintN
= AsmReadCr4 ();
127 *FiveLevels
= (BOOLEAN
)(Cr4
.Bits
.LA57
== 1);
132 *Base
= mInternalCr3
;
133 if (FiveLevels
!= NULL
) {
134 *FiveLevels
= m5LevelPagingNeeded
;
139 Set sub-entries number in entry.
141 @param[in, out] Entry Pointer to entry
142 @param[in] SubEntryNum Sub-entries number based on 0:
143 0 means there is 1 sub-entry under this entry
144 0x1ff means there is 512 sub-entries under this entry
149 IN OUT UINT64
*Entry
,
150 IN UINT64 SubEntryNum
154 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
156 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
160 Return sub-entries number in entry.
162 @param[in] Entry Pointer to entry
164 @return Sub-entries number based on 0:
165 0 means there is 1 sub-entry under this entry
166 0x1ff means there is 512 sub-entries under this entry
174 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
176 return BitFieldRead64 (*Entry
, 52, 60);
180 Calculate the maximum support address.
182 @return the maximum support address.
185 CalculateMaximumSupportAddress (
190 UINT8 PhysicalAddressBits
;
194 // Get physical address bits supported.
196 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
198 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
200 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
201 if (RegEax
>= 0x80000008) {
202 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
203 PhysicalAddressBits
= (UINT8
) RegEax
;
205 PhysicalAddressBits
= 36;
208 return PhysicalAddressBits
;
212 Set static page table.
214 @param[in] PageTable Address of page table.
215 @param[in] PhysicalAddressBits The maximum physical address bits supported.
220 IN UINT8 PhysicalAddressBits
224 UINTN NumberOfPml5EntriesNeeded
;
225 UINTN NumberOfPml4EntriesNeeded
;
226 UINTN NumberOfPdpEntriesNeeded
;
227 UINTN IndexOfPml5Entries
;
228 UINTN IndexOfPml4Entries
;
229 UINTN IndexOfPdpEntries
;
230 UINTN IndexOfPageDirectoryEntries
;
231 UINT64
*PageMapLevel5Entry
;
232 UINT64
*PageMapLevel4Entry
;
234 UINT64
*PageDirectoryPointerEntry
;
235 UINT64
*PageDirectory1GEntry
;
236 UINT64
*PageDirectoryEntry
;
239 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
240 // when 5-Level Paging is disabled.
242 ASSERT (PhysicalAddressBits
<= 52);
243 if (!m5LevelPagingNeeded
&& PhysicalAddressBits
> 48) {
244 PhysicalAddressBits
= 48;
247 NumberOfPml5EntriesNeeded
= 1;
248 if (PhysicalAddressBits
> 48) {
249 NumberOfPml5EntriesNeeded
= (UINTN
) LShiftU64 (1, PhysicalAddressBits
- 48);
250 PhysicalAddressBits
= 48;
253 NumberOfPml4EntriesNeeded
= 1;
254 if (PhysicalAddressBits
> 39) {
255 NumberOfPml4EntriesNeeded
= (UINTN
) LShiftU64 (1, PhysicalAddressBits
- 39);
256 PhysicalAddressBits
= 39;
259 NumberOfPdpEntriesNeeded
= 1;
260 ASSERT (PhysicalAddressBits
> 30);
261 NumberOfPdpEntriesNeeded
= (UINTN
) LShiftU64 (1, PhysicalAddressBits
- 30);
264 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
266 PageMap
= (VOID
*) PageTable
;
268 PageMapLevel4Entry
= PageMap
;
269 PageMapLevel5Entry
= NULL
;
270 if (m5LevelPagingNeeded
) {
272 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
274 PageMapLevel5Entry
= PageMap
;
278 for ( IndexOfPml5Entries
= 0
279 ; IndexOfPml5Entries
< NumberOfPml5EntriesNeeded
280 ; IndexOfPml5Entries
++, PageMapLevel5Entry
++) {
282 // Each PML5 entry points to a page of PML4 entires.
283 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
284 // When 5-Level Paging is disabled, below allocation happens only once.
286 if (m5LevelPagingNeeded
) {
287 PageMapLevel4Entry
= (UINT64
*) ((*PageMapLevel5Entry
) & ~mAddressEncMask
& gPhyMask
);
288 if (PageMapLevel4Entry
== NULL
) {
289 PageMapLevel4Entry
= AllocatePageTableMemory (1);
290 ASSERT(PageMapLevel4Entry
!= NULL
);
291 ZeroMem (PageMapLevel4Entry
, EFI_PAGES_TO_SIZE(1));
293 *PageMapLevel5Entry
= (UINT64
)(UINTN
)PageMapLevel4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
297 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< (NumberOfPml5EntriesNeeded
== 1 ? NumberOfPml4EntriesNeeded
: 512); IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
299 // Each PML4 entry points to a page of Page Directory Pointer entries.
301 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
302 if (PageDirectoryPointerEntry
== NULL
) {
303 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
304 ASSERT(PageDirectoryPointerEntry
!= NULL
);
305 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
307 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
310 if (m1GPageTableSupport
) {
311 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
312 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
313 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
315 // Skip the < 4G entries
320 // Fill in the Page Directory entries
322 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
325 PageAddress
= BASE_4GB
;
326 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< (NumberOfPml4EntriesNeeded
== 1 ? NumberOfPdpEntriesNeeded
: 512); IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
327 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
329 // Skip the < 4G entries
334 // Each Directory Pointer entries points to a page of Page Directory entires.
335 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
337 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
338 if (PageDirectoryEntry
== NULL
) {
339 PageDirectoryEntry
= AllocatePageTableMemory (1);
340 ASSERT(PageDirectoryEntry
!= NULL
);
341 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
344 // Fill in a Page Directory Pointer Entries
346 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
349 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
351 // Fill in the Page Directory entries
353 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
362 Create PageTable for SMM use.
364 @return The address of PML4 (to set CR3).
372 EFI_PHYSICAL_ADDRESS Pages
;
374 LIST_ENTRY
*FreePage
;
376 UINTN PageFaultHandlerHookAddress
;
377 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
383 // Initialize spin lock
385 InitializeSpinLock (mPFLock
);
387 mCpuSmmRestrictedMemoryAccess
= PcdGetBool (PcdCpuSmmRestrictedMemoryAccess
);
388 m1GPageTableSupport
= Is1GPageSupport ();
389 m5LevelPagingNeeded
= Is5LevelPagingNeeded ();
390 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
391 PatchInstructionX86 (gPatch5LevelPagingNeeded
, m5LevelPagingNeeded
, 1);
392 DEBUG ((DEBUG_INFO
, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded
));
393 DEBUG ((DEBUG_INFO
, "1GPageTable Support - %d\n", m1GPageTableSupport
));
394 DEBUG ((DEBUG_INFO
, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess
));
395 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - %d\n", mPhysicalAddressBits
));
397 // Generate PAE page table for the first 4GB memory space
399 Pages
= Gen4GPageTable (FALSE
);
402 // Set IA32_PG_PMNT bit to mask this entry
404 PTEntry
= (UINT64
*)(UINTN
)Pages
;
405 for (Index
= 0; Index
< 4; Index
++) {
406 PTEntry
[Index
] |= IA32_PG_PMNT
;
410 // Fill Page-Table-Level4 (PML4) entry
412 Pml4Entry
= (UINT64
*)AllocatePageTableMemory (1);
413 ASSERT (Pml4Entry
!= NULL
);
414 *Pml4Entry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
415 ZeroMem (Pml4Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml4Entry
));
418 // Set sub-entries number
420 SetSubEntriesNum (Pml4Entry
, 3);
423 if (m5LevelPagingNeeded
) {
427 Pml5Entry
= (UINT64
*)AllocatePageTableMemory (1);
428 ASSERT (Pml5Entry
!= NULL
);
429 *Pml5Entry
= (UINTN
) Pml4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
430 ZeroMem (Pml5Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml5Entry
));
432 // Set sub-entries number
434 SetSubEntriesNum (Pml5Entry
, 1);
438 if (mCpuSmmRestrictedMemoryAccess
) {
440 // When access to non-SMRAM memory is restricted, create page table
441 // that covers all memory space.
443 SetStaticPageTable ((UINTN
)PTEntry
, mPhysicalAddressBits
);
446 // Add pages to page pool
448 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
449 ASSERT (FreePage
!= NULL
);
450 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
451 InsertTailList (&mPagePool
, FreePage
);
452 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
456 if (FeaturePcdGet (PcdCpuSmmProfileEnable
) ||
457 HEAP_GUARD_NONSTOP_MODE
||
458 NULL_DETECTION_NONSTOP_MODE
) {
460 // Set own Page Fault entry instead of the default one, because SMM Profile
461 // feature depends on IRET instruction to do Single Step
463 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
464 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
465 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
466 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
467 IdtEntry
->Bits
.Reserved_0
= 0;
468 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
469 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
470 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
471 IdtEntry
->Bits
.Reserved_1
= 0;
474 // Register Smm Page Fault Handler
476 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
477 ASSERT_EFI_ERROR (Status
);
481 // Additional SMM IDT initialization for SMM stack guard
483 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
484 DEBUG ((DEBUG_INFO
, "Initialize IDT IST field for SMM Stack Guard\n"));
485 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT
, 1);
489 // Additional SMM IDT initialization for SMM CET shadow stack
491 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
492 DEBUG ((DEBUG_INFO
, "Initialize IDT IST field for SMM Shadow Stack\n"));
493 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT
, 1);
494 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK
, 1);
498 // Return the address of PML4/PML5 (to set CR3)
500 return (UINT32
)(UINTN
)PTEntry
;
504 Set access record in entry.
506 @param[in, out] Entry Pointer to entry
507 @param[in] Acc Access record value
512 IN OUT UINT64
*Entry
,
517 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
519 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
523 Return access record in entry.
525 @param[in] Entry Pointer to entry
527 @return Access record value.
536 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
538 return BitFieldRead64 (*Entry
, 9, 11);
542 Return and update the access record in entry.
544 @param[in, out] Entry Pointer to entry
546 @return Access record value.
556 Acc
= GetAccNum (Entry
);
557 if ((*Entry
& IA32_PG_A
) != 0) {
559 // If this entry has been accessed, clear access flag in Entry and update access record
560 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
562 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
563 SetAccNum (Entry
, 0x7);
564 return (0x7 + ACC_MAX_BIT
);
568 // If the access record is not the smallest value 0, minus 1 and update the access record field
570 SetAccNum (Entry
, Acc
- 1);
577 Reclaim free pages for PageFault handler.
579 Search the whole entries tree to find the leaf entry that has the smallest
580 access record value. Insert the page pointed by this leaf entry into the
581 page pool. And check its upper entries if need to be inserted into the page
605 UINT64 SubEntriesNum
;
608 UINT64
*ReleasePageAddress
;
610 BOOLEAN Enable5LevelPaging
;
612 UINT64 PFAddressPml5Index
;
613 UINT64 PFAddressPml4Index
;
614 UINT64 PFAddressPdptIndex
;
615 UINT64 PFAddressPdtIndex
;
626 ReleasePageAddress
= 0;
627 PFAddress
= AsmReadCr2 ();
628 PFAddressPml5Index
= BitFieldRead64 (PFAddress
, 48, 48 + 8);
629 PFAddressPml4Index
= BitFieldRead64 (PFAddress
, 39, 39 + 8);
630 PFAddressPdptIndex
= BitFieldRead64 (PFAddress
, 30, 30 + 8);
631 PFAddressPdtIndex
= BitFieldRead64 (PFAddress
, 21, 21 + 8);
633 Cr4
.UintN
= AsmReadCr4 ();
634 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
== 1);
635 Pml5
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
637 if (!Enable5LevelPaging
) {
639 // Create one fake PML5 entry for 4-Level Paging
640 // so that the page table parsing logic only handles 5-Level page structure.
642 Pml5Entry
= (UINTN
) Pml5
| IA32_PG_P
;
647 // First, find the leaf entry has the smallest access record value
649 for (Pml5Index
= 0; Pml5Index
< (Enable5LevelPaging
? (EFI_PAGE_SIZE
/ sizeof (*Pml4
)) : 1); Pml5Index
++) {
650 if ((Pml5
[Pml5Index
] & IA32_PG_P
) == 0 || (Pml5
[Pml5Index
] & IA32_PG_PMNT
) != 0) {
652 // If the PML5 entry is not present or is masked, skip it
656 Pml4
= (UINT64
*)(UINTN
)(Pml5
[Pml5Index
] & gPhyMask
);
657 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
658 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
660 // If the PML4 entry is not present or is masked, skip it
664 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
666 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
667 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
669 // If the PDPT entry is not present or is masked, skip it
671 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
673 // If the PDPT entry is masked, we will ignore checking the PML4 entry
679 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
681 // It's not 1-GByte pages entry, it should be a PDPT entry,
682 // we will not check PML4 entry more
685 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
687 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
688 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
690 // If the PD entry is not present or is masked, skip it
692 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
694 // If the PD entry is masked, we will not PDPT entry more
700 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
702 // It's not 2 MByte page table entry, it should be PD entry
703 // we will find the entry has the smallest access record value
706 if (PdtIndex
!= PFAddressPdtIndex
|| PdptIndex
!= PFAddressPdptIndex
||
707 Pml4Index
!= PFAddressPml4Index
|| Pml5Index
!= PFAddressPml5Index
) {
708 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
711 // If the PD entry has the smallest access record value,
712 // save the Page address to be released
719 ReleasePageAddress
= Pdt
+ PdtIndex
;
726 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
727 // it should only has the entries point to 2 MByte Pages
729 if (PdptIndex
!= PFAddressPdptIndex
|| Pml4Index
!= PFAddressPml4Index
||
730 Pml5Index
!= PFAddressPml5Index
) {
731 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
734 // If the PDPT entry has the smallest access record value,
735 // save the Page address to be released
742 ReleasePageAddress
= Pdpt
+ PdptIndex
;
750 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
751 // it should only has the entries point to 1 GByte Pages
753 if (Pml4Index
!= PFAddressPml4Index
|| Pml5Index
!= PFAddressPml5Index
) {
754 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
757 // If the PML4 entry has the smallest access record value,
758 // save the Page address to be released
765 ReleasePageAddress
= Pml4
+ Pml4Index
;
772 // Make sure one PML4/PDPT/PD entry is selected
774 ASSERT (MinAcc
!= (UINT64
)-1);
777 // Secondly, insert the page pointed by this entry into page pool and clear this entry
779 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
780 *ReleasePageAddress
= 0;
783 // Lastly, check this entry's upper entries if need to be inserted into page pool
787 if (MinPdt
!= (UINTN
)-1) {
789 // If 4 KByte Page Table is released, check the PDPT entry
791 Pml4
= (UINT64
*) (UINTN
) (Pml5
[MinPml5
] & gPhyMask
);
792 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
793 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
794 if (SubEntriesNum
== 0 &&
795 (MinPdpt
!= PFAddressPdptIndex
|| MinPml4
!= PFAddressPml4Index
|| MinPml5
!= PFAddressPml5Index
)) {
797 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
798 // clear the Page directory entry
800 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
803 // Go on checking the PML4 table
809 // Update the sub-entries filed in PDPT entry and exit
811 SetSubEntriesNum (Pdpt
+ MinPdpt
, (SubEntriesNum
- 1) & 0x1FF);
814 if (MinPdpt
!= (UINTN
)-1) {
816 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
818 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
819 if (SubEntriesNum
== 0 && (MinPml4
!= PFAddressPml4Index
|| MinPml5
!= PFAddressPml5Index
)) {
821 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
822 // clear the Page directory entry
824 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
830 // Update the sub-entries filed in PML4 entry and exit
832 SetSubEntriesNum (Pml4
+ MinPml4
, (SubEntriesNum
- 1) & 0x1FF);
836 // PLM4 table has been released before, exit it
843 Allocate free Page for PageFault handler use.
845 @return Page address.
855 if (IsListEmpty (&mPagePool
)) {
857 // If page pool is empty, reclaim the used pages and insert one into page pool
863 // Get one free page and remove it from page pool
865 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
866 RemoveEntryList (mPagePool
.ForwardLink
);
868 // Clean this page and return
870 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
875 Page Fault handler for SMM use.
879 SmiDefaultPFHandler (
884 UINT64
*PageTableTop
;
890 SMM_PAGE_SIZE_TYPE PageSize
;
895 BOOLEAN Enable5LevelPaging
;
899 // Set default SMM page attribute
901 PageSize
= SmmPageSize2M
;
906 PageTableTop
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
907 PFAddress
= AsmReadCr2 ();
909 Cr4
.UintN
= AsmReadCr4 ();
910 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
!= 0);
912 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
914 // If platform not support page table attribute, set default SMM page attribute
916 if (Status
!= EFI_SUCCESS
) {
917 PageSize
= SmmPageSize2M
;
921 if (PageSize
>= MaxSmmPageSizeType
) {
922 PageSize
= SmmPageSize2M
;
924 if (NumOfPages
> 512) {
931 // BIT12 to BIT20 is Page Table index
937 // BIT21 to BIT29 is Page Directory index
940 PageAttribute
|= (UINTN
)IA32_PG_PS
;
943 if (!m1GPageTableSupport
) {
944 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
948 // BIT30 to BIT38 is Page Directory Pointer Table index
951 PageAttribute
|= (UINTN
)IA32_PG_PS
;
958 // If execute-disable is enabled, set NX bit
961 PageAttribute
|= IA32_PG_NX
;
964 for (Index
= 0; Index
< NumOfPages
; Index
++) {
965 PageTable
= PageTableTop
;
967 for (StartBit
= Enable5LevelPaging
? 48 : 39; StartBit
> EndBit
; StartBit
-= 9) {
968 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
969 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
971 // If the entry is not present, allocate one page from page pool for it
973 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
976 // Save the upper entry address
978 UpperEntry
= PageTable
+ PTIndex
;
981 // BIT9 to BIT11 of entry is used to save access record,
982 // initialize value is 7
984 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
985 SetAccNum (PageTable
+ PTIndex
, 7);
986 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
989 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
990 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
992 // Check if the entry has already existed, this issue may occur when the different
993 // size page entries created under the same entry
995 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
996 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
1000 // Fill the new entry
1002 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
1003 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
1004 if (UpperEntry
!= NULL
) {
1005 SetSubEntriesNum (UpperEntry
, (GetSubEntriesNum (UpperEntry
) + 1) & 0x1FF);
1008 // Get the next page address if we need to create more page tables
1010 PFAddress
+= (1ull << EndBit
);
1015 ThePage Fault handler wrapper for SMM use.
1017 @param InterruptType Defines the type of interrupt or exception that
1018 occurred on the processor.This parameter is processor architecture specific.
1019 @param SystemContext A pointer to the processor context when
1020 the interrupt occurred on the processor.
1025 IN EFI_EXCEPTION_TYPE InterruptType
,
1026 IN EFI_SYSTEM_CONTEXT SystemContext
1030 UINTN GuardPageAddress
;
1031 UINTN ShadowStackGuardPageAddress
;
1034 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
1036 AcquireSpinLock (mPFLock
);
1038 PFAddress
= AsmReadCr2 ();
1040 if (mCpuSmmRestrictedMemoryAccess
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
1041 DumpCpuContext (InterruptType
, SystemContext
);
1042 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
1048 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1049 // or SMM page protection violation.
1051 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
1052 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
1053 DumpCpuContext (InterruptType
, SystemContext
);
1054 CpuIndex
= GetCpuIndex ();
1055 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* (mSmmStackSize
+ mSmmShadowStackSize
));
1056 ShadowStackGuardPageAddress
= (mSmmStackArrayBase
+ mSmmStackSize
+ EFI_PAGE_SIZE
+ CpuIndex
* (mSmmStackSize
+ mSmmShadowStackSize
));
1057 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
1058 (PFAddress
>= GuardPageAddress
) &&
1059 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
))) {
1060 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
1061 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
1062 (mSmmShadowStackSize
> 0) &&
1063 (PFAddress
>= ShadowStackGuardPageAddress
) &&
1064 (PFAddress
< (ShadowStackGuardPageAddress
+ EFI_PAGE_SIZE
))) {
1065 DEBUG ((DEBUG_ERROR
, "SMM shadow stack overflow!\n"));
1067 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1068 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
1070 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1073 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
1075 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1079 if (HEAP_GUARD_NONSTOP_MODE
) {
1080 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1089 // If a page fault occurs in non-SMRAM range.
1091 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
1092 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
1093 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1094 DumpCpuContext (InterruptType
, SystemContext
);
1095 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
1097 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1104 // If NULL pointer was just accessed
1106 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0 &&
1107 (PFAddress
< EFI_PAGE_SIZE
)) {
1108 DumpCpuContext (InterruptType
, SystemContext
);
1109 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
1111 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1114 if (NULL_DETECTION_NONSTOP_MODE
) {
1115 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1123 if (mCpuSmmRestrictedMemoryAccess
&& IsSmmCommBufferForbiddenAddress (PFAddress
)) {
1124 DumpCpuContext (InterruptType
, SystemContext
);
1125 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
1127 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1134 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1135 SmmProfilePFHandler (
1136 SystemContext
.SystemContextX64
->Rip
,
1137 SystemContext
.SystemContextX64
->ExceptionData
1140 SmiDefaultPFHandler ();
1144 ReleaseSpinLock (mPFLock
);
1148 This function sets memory attribute for page table.
1151 SetPageTableAttributes (
1159 UINT64
*L1PageTable
;
1160 UINT64
*L2PageTable
;
1161 UINT64
*L3PageTable
;
1162 UINT64
*L4PageTable
;
1163 UINT64
*L5PageTable
;
1164 UINTN PageTableBase
;
1166 BOOLEAN PageTableSplitted
;
1168 BOOLEAN Enable5LevelPaging
;
1171 // Don't mark page table memory as read-only if
1172 // - no restriction on access to non-SMRAM memory; or
1173 // - SMM heap guard feature enabled; or
1174 // BIT2: SMM page guard enabled
1175 // BIT3: SMM pool guard enabled
1176 // - SMM profile feature enabled
1178 if (!mCpuSmmRestrictedMemoryAccess
||
1179 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
1180 FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1182 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1184 ASSERT (!(mCpuSmmRestrictedMemoryAccess
&&
1185 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0));
1188 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1190 ASSERT (!(mCpuSmmRestrictedMemoryAccess
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
1194 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
1197 // Disable write protection, because we need mark page table to be write protected.
1198 // We need *write* page table memory, to mark itself to be *read only*.
1200 CetEnabled
= ((AsmReadCr4() & CR4_CET_ENABLE
) != 0) ? TRUE
: FALSE
;
1203 // CET must be disabled if WP is disabled.
1207 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
1210 DEBUG ((DEBUG_INFO
, "Start...\n"));
1211 PageTableSplitted
= FALSE
;
1214 GetPageTable (&PageTableBase
, &Enable5LevelPaging
);
1216 if (Enable5LevelPaging
) {
1217 L5PageTable
= (UINT64
*)PageTableBase
;
1218 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)PageTableBase
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1219 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1222 for (Index5
= 0; Index5
< (Enable5LevelPaging
? SIZE_4KB
/sizeof(UINT64
) : 1); Index5
++) {
1223 if (Enable5LevelPaging
) {
1224 L4PageTable
= (UINT64
*)(UINTN
)(L5PageTable
[Index5
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1225 if (L4PageTable
== NULL
) {
1229 L4PageTable
= (UINT64
*)PageTableBase
;
1231 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1232 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1234 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
1235 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1236 if (L3PageTable
== NULL
) {
1240 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1241 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1243 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
1244 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
1248 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1249 if (L2PageTable
== NULL
) {
1253 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1254 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1256 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
1257 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
1261 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1262 if (L1PageTable
== NULL
) {
1265 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1266 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1271 } while (PageTableSplitted
);
1274 // Enable write protection, after page table updated.
1276 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);
1288 This function reads CR2 register when on-demand paging is enabled.
1290 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1297 if (!mCpuSmmRestrictedMemoryAccess
) {
1299 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1301 *Cr2
= AsmReadCr2 ();
1306 This function restores CR2 register when on-demand paging is enabled.
1308 @param[in] Cr2 Value to write into CR2 register.
1315 if (!mCpuSmmRestrictedMemoryAccess
) {
1317 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1324 Return whether access to non-SMRAM is restricted.
1326 @retval TRUE Access to non-SMRAM is restricted.
1327 @retval FALSE Access to non-SMRAM is not restricted.
1330 IsRestrictedMemoryAccess (
1334 return mCpuSmmRestrictedMemoryAccess
;