2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
16 extern UINTN mSmmShadowStackSize
;
18 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
19 BOOLEAN m1GPageTableSupport
= FALSE
;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess
;
21 BOOLEAN m5LevelPagingNeeded
;
22 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded
;
43 Check if 1-GByte pages is supported by processor or not.
45 @retval TRUE 1-GByte pages is supported.
46 @retval FALSE 1-GByte pages is not supported.
57 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
58 if (RegEax
>= 0x80000001) {
59 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
60 if ((RegEdx
& BIT26
) != 0) {
69 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
70 the max physical address bits is bigger than 48. Because 4-level paging can support
71 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
72 with max physical address bits <= 48.
74 @retval TRUE 5-level paging enabling is needed.
75 @retval FALSE 5-level paging enabling is not needed.
78 Is5LevelPagingNeeded (
82 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize
;
83 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx
;
84 UINT32 MaxExtendedFunctionId
;
86 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &MaxExtendedFunctionId
, NULL
, NULL
, NULL
);
87 if (MaxExtendedFunctionId
>= CPUID_VIR_PHY_ADDRESS_SIZE
) {
88 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE
, &VirPhyAddressSize
.Uint32
, NULL
, NULL
, NULL
);
90 VirPhyAddressSize
.Bits
.PhysicalAddressBits
= 36;
94 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
,
95 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
,
98 &ExtFeatureEcx
.Uint32
,
103 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
104 VirPhyAddressSize
.Bits
.PhysicalAddressBits
,
105 ExtFeatureEcx
.Bits
.FiveLevelPage
108 if (VirPhyAddressSize
.Bits
.PhysicalAddressBits
> 4 * 9 + 12) {
109 ASSERT (ExtFeatureEcx
.Bits
.FiveLevelPage
== 1);
117 Get page table base address and the depth of the page table.
119 @param[out] Base Page table base address.
120 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
125 OUT BOOLEAN
*FiveLevels OPTIONAL
130 if (mInternalCr3
== 0) {
131 *Base
= AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64
;
132 if (FiveLevels
!= NULL
) {
133 Cr4
.UintN
= AsmReadCr4 ();
134 *FiveLevels
= (BOOLEAN
)(Cr4
.Bits
.LA57
== 1);
140 *Base
= mInternalCr3
;
141 if (FiveLevels
!= NULL
) {
142 *FiveLevels
= m5LevelPagingNeeded
;
147 Set sub-entries number in entry.
149 @param[in, out] Entry Pointer to entry
150 @param[in] SubEntryNum Sub-entries number based on 0:
151 0 means there is 1 sub-entry under this entry
152 0x1ff means there is 512 sub-entries under this entry
157 IN OUT UINT64
*Entry
,
158 IN UINT64 SubEntryNum
162 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
164 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
168 Return sub-entries number in entry.
170 @param[in] Entry Pointer to entry
172 @return Sub-entries number based on 0:
173 0 means there is 1 sub-entry under this entry
174 0x1ff means there is 512 sub-entries under this entry
182 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
184 return BitFieldRead64 (*Entry
, 52, 60);
188 Calculate the maximum support address.
190 @return the maximum support address.
193 CalculateMaximumSupportAddress (
198 UINT8 PhysicalAddressBits
;
202 // Get physical address bits supported.
204 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
206 PhysicalAddressBits
= ((EFI_HOB_CPU
*)Hob
)->SizeOfMemorySpace
;
208 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
209 if (RegEax
>= 0x80000008) {
210 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
211 PhysicalAddressBits
= (UINT8
)RegEax
;
213 PhysicalAddressBits
= 36;
217 return PhysicalAddressBits
;
221 Set static page table.
223 @param[in] PageTable Address of page table.
224 @param[in] PhysicalAddressBits The maximum physical address bits supported.
229 IN UINT8 PhysicalAddressBits
233 UINTN NumberOfPml5EntriesNeeded
;
234 UINTN NumberOfPml4EntriesNeeded
;
235 UINTN NumberOfPdpEntriesNeeded
;
236 UINTN IndexOfPml5Entries
;
237 UINTN IndexOfPml4Entries
;
238 UINTN IndexOfPdpEntries
;
239 UINTN IndexOfPageDirectoryEntries
;
240 UINT64
*PageMapLevel5Entry
;
241 UINT64
*PageMapLevel4Entry
;
243 UINT64
*PageDirectoryPointerEntry
;
244 UINT64
*PageDirectory1GEntry
;
245 UINT64
*PageDirectoryEntry
;
248 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
249 // when 5-Level Paging is disabled.
251 ASSERT (PhysicalAddressBits
<= 52);
252 if (!m5LevelPagingNeeded
&& (PhysicalAddressBits
> 48)) {
253 PhysicalAddressBits
= 48;
256 NumberOfPml5EntriesNeeded
= 1;
257 if (PhysicalAddressBits
> 48) {
258 NumberOfPml5EntriesNeeded
= (UINTN
)LShiftU64 (1, PhysicalAddressBits
- 48);
259 PhysicalAddressBits
= 48;
262 NumberOfPml4EntriesNeeded
= 1;
263 if (PhysicalAddressBits
> 39) {
264 NumberOfPml4EntriesNeeded
= (UINTN
)LShiftU64 (1, PhysicalAddressBits
- 39);
265 PhysicalAddressBits
= 39;
268 NumberOfPdpEntriesNeeded
= 1;
269 ASSERT (PhysicalAddressBits
> 30);
270 NumberOfPdpEntriesNeeded
= (UINTN
)LShiftU64 (1, PhysicalAddressBits
- 30);
273 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
275 PageMap
= (VOID
*)PageTable
;
277 PageMapLevel4Entry
= PageMap
;
278 PageMapLevel5Entry
= NULL
;
279 if (m5LevelPagingNeeded
) {
281 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
283 PageMapLevel5Entry
= PageMap
;
288 for ( IndexOfPml5Entries
= 0
289 ; IndexOfPml5Entries
< NumberOfPml5EntriesNeeded
290 ; IndexOfPml5Entries
++, PageMapLevel5Entry
++)
293 // Each PML5 entry points to a page of PML4 entires.
294 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
295 // When 5-Level Paging is disabled, below allocation happens only once.
297 if (m5LevelPagingNeeded
) {
298 PageMapLevel4Entry
= (UINT64
*)((*PageMapLevel5Entry
) & ~mAddressEncMask
& gPhyMask
);
299 if (PageMapLevel4Entry
== NULL
) {
300 PageMapLevel4Entry
= AllocatePageTableMemory (1);
301 ASSERT (PageMapLevel4Entry
!= NULL
);
302 ZeroMem (PageMapLevel4Entry
, EFI_PAGES_TO_SIZE (1));
304 *PageMapLevel5Entry
= (UINT64
)(UINTN
)PageMapLevel4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
308 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< (NumberOfPml5EntriesNeeded
== 1 ? NumberOfPml4EntriesNeeded
: 512); IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
310 // Each PML4 entry points to a page of Page Directory Pointer entries.
312 PageDirectoryPointerEntry
= (UINT64
*)((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
313 if (PageDirectoryPointerEntry
== NULL
) {
314 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
315 ASSERT (PageDirectoryPointerEntry
!= NULL
);
316 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE (1));
318 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
321 if (m1GPageTableSupport
) {
322 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
323 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
324 if ((IndexOfPml4Entries
== 0) && (IndexOfPageDirectoryEntries
< 4)) {
326 // Skip the < 4G entries
332 // Fill in the Page Directory entries
334 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
337 PageAddress
= BASE_4GB
;
338 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< (NumberOfPml4EntriesNeeded
== 1 ? NumberOfPdpEntriesNeeded
: 512); IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
339 if ((IndexOfPml4Entries
== 0) && (IndexOfPdpEntries
< 4)) {
341 // Skip the < 4G entries
347 // Each Directory Pointer entries points to a page of Page Directory entires.
348 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
350 PageDirectoryEntry
= (UINT64
*)((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
351 if (PageDirectoryEntry
== NULL
) {
352 PageDirectoryEntry
= AllocatePageTableMemory (1);
353 ASSERT (PageDirectoryEntry
!= NULL
);
354 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE (1));
357 // Fill in a Page Directory Pointer Entries
359 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
362 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
364 // Fill in the Page Directory entries
366 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
375 Create PageTable for SMM use.
377 @return The address of PML4 (to set CR3).
385 EFI_PHYSICAL_ADDRESS Pages
;
387 LIST_ENTRY
*FreePage
;
389 UINTN PageFaultHandlerHookAddress
;
390 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
396 // Initialize spin lock
398 InitializeSpinLock (mPFLock
);
400 mCpuSmmRestrictedMemoryAccess
= PcdGetBool (PcdCpuSmmRestrictedMemoryAccess
);
401 m1GPageTableSupport
= Is1GPageSupport ();
402 m5LevelPagingNeeded
= Is5LevelPagingNeeded ();
403 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
404 PatchInstructionX86 (gPatch5LevelPagingNeeded
, m5LevelPagingNeeded
, 1);
405 DEBUG ((DEBUG_INFO
, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded
));
406 DEBUG ((DEBUG_INFO
, "1GPageTable Support - %d\n", m1GPageTableSupport
));
407 DEBUG ((DEBUG_INFO
, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess
));
408 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - %d\n", mPhysicalAddressBits
));
410 // Generate PAE page table for the first 4GB memory space
412 Pages
= Gen4GPageTable (FALSE
);
415 // Set IA32_PG_PMNT bit to mask this entry
417 PTEntry
= (UINT64
*)(UINTN
)Pages
;
418 for (Index
= 0; Index
< 4; Index
++) {
419 PTEntry
[Index
] |= IA32_PG_PMNT
;
423 // Fill Page-Table-Level4 (PML4) entry
425 Pml4Entry
= (UINT64
*)AllocatePageTableMemory (1);
426 ASSERT (Pml4Entry
!= NULL
);
427 *Pml4Entry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
428 ZeroMem (Pml4Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml4Entry
));
431 // Set sub-entries number
433 SetSubEntriesNum (Pml4Entry
, 3);
436 if (m5LevelPagingNeeded
) {
440 Pml5Entry
= (UINT64
*)AllocatePageTableMemory (1);
441 ASSERT (Pml5Entry
!= NULL
);
442 *Pml5Entry
= (UINTN
)Pml4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
443 ZeroMem (Pml5Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml5Entry
));
445 // Set sub-entries number
447 SetSubEntriesNum (Pml5Entry
, 1);
451 if (mCpuSmmRestrictedMemoryAccess
) {
453 // When access to non-SMRAM memory is restricted, create page table
454 // that covers all memory space.
456 SetStaticPageTable ((UINTN
)PTEntry
, mPhysicalAddressBits
);
459 // Add pages to page pool
461 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
462 ASSERT (FreePage
!= NULL
);
463 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
464 InsertTailList (&mPagePool
, FreePage
);
465 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
469 if (FeaturePcdGet (PcdCpuSmmProfileEnable
) ||
470 HEAP_GUARD_NONSTOP_MODE
||
471 NULL_DETECTION_NONSTOP_MODE
)
474 // Set own Page Fault entry instead of the default one, because SMM Profile
475 // feature depends on IRET instruction to do Single Step
477 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
478 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*)gcSmiIdtr
.Base
;
479 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
480 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
481 IdtEntry
->Bits
.Reserved_0
= 0;
482 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
483 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
484 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
485 IdtEntry
->Bits
.Reserved_1
= 0;
488 // Register Smm Page Fault Handler
490 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
491 ASSERT_EFI_ERROR (Status
);
495 // Additional SMM IDT initialization for SMM stack guard
497 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
498 DEBUG ((DEBUG_INFO
, "Initialize IDT IST field for SMM Stack Guard\n"));
499 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT
, 1);
503 // Additional SMM IDT initialization for SMM CET shadow stack
505 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
506 DEBUG ((DEBUG_INFO
, "Initialize IDT IST field for SMM Shadow Stack\n"));
507 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT
, 1);
508 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK
, 1);
512 // Return the address of PML4/PML5 (to set CR3)
514 return (UINT32
)(UINTN
)PTEntry
;
518 Set access record in entry.
520 @param[in, out] Entry Pointer to entry
521 @param[in] Acc Access record value
526 IN OUT UINT64
*Entry
,
531 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
533 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
537 Return access record in entry.
539 @param[in] Entry Pointer to entry
541 @return Access record value.
550 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
552 return BitFieldRead64 (*Entry
, 9, 11);
556 Return and update the access record in entry.
558 @param[in, out] Entry Pointer to entry
560 @return Access record value.
570 Acc
= GetAccNum (Entry
);
571 if ((*Entry
& IA32_PG_A
) != 0) {
573 // If this entry has been accessed, clear access flag in Entry and update access record
574 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
576 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
577 SetAccNum (Entry
, 0x7);
578 return (0x7 + ACC_MAX_BIT
);
582 // If the access record is not the smallest value 0, minus 1 and update the access record field
584 SetAccNum (Entry
, Acc
- 1);
592 Reclaim free pages for PageFault handler.
594 Search the whole entries tree to find the leaf entry that has the smallest
595 access record value. Insert the page pointed by this leaf entry into the
596 page pool. And check its upper entries if need to be inserted into the page
620 UINT64 SubEntriesNum
;
623 UINT64
*ReleasePageAddress
;
625 BOOLEAN Enable5LevelPaging
;
627 UINT64 PFAddressPml5Index
;
628 UINT64 PFAddressPml4Index
;
629 UINT64 PFAddressPdptIndex
;
630 UINT64 PFAddressPdtIndex
;
641 ReleasePageAddress
= 0;
642 PFAddress
= AsmReadCr2 ();
643 PFAddressPml5Index
= BitFieldRead64 (PFAddress
, 48, 48 + 8);
644 PFAddressPml4Index
= BitFieldRead64 (PFAddress
, 39, 39 + 8);
645 PFAddressPdptIndex
= BitFieldRead64 (PFAddress
, 30, 30 + 8);
646 PFAddressPdtIndex
= BitFieldRead64 (PFAddress
, 21, 21 + 8);
648 Cr4
.UintN
= AsmReadCr4 ();
649 Enable5LevelPaging
= (BOOLEAN
)(Cr4
.Bits
.LA57
== 1);
650 Pml5
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
652 if (!Enable5LevelPaging
) {
654 // Create one fake PML5 entry for 4-Level Paging
655 // so that the page table parsing logic only handles 5-Level page structure.
657 Pml5Entry
= (UINTN
)Pml5
| IA32_PG_P
;
662 // First, find the leaf entry has the smallest access record value
664 for (Pml5Index
= 0; Pml5Index
< (Enable5LevelPaging
? (EFI_PAGE_SIZE
/ sizeof (*Pml4
)) : 1); Pml5Index
++) {
665 if (((Pml5
[Pml5Index
] & IA32_PG_P
) == 0) || ((Pml5
[Pml5Index
] & IA32_PG_PMNT
) != 0)) {
667 // If the PML5 entry is not present or is masked, skip it
672 Pml4
= (UINT64
*)(UINTN
)(Pml5
[Pml5Index
] & gPhyMask
);
673 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
674 if (((Pml4
[Pml4Index
] & IA32_PG_P
) == 0) || ((Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0)) {
676 // If the PML4 entry is not present or is masked, skip it
681 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
683 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
684 if (((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0) || ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0)) {
686 // If the PDPT entry is not present or is masked, skip it
688 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
690 // If the PDPT entry is masked, we will ignore checking the PML4 entry
698 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
700 // It's not 1-GByte pages entry, it should be a PDPT entry,
701 // we will not check PML4 entry more
704 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
706 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdt
); PdtIndex
++) {
707 if (((Pdt
[PdtIndex
] & IA32_PG_P
) == 0) || ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0)) {
709 // If the PD entry is not present or is masked, skip it
711 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
713 // If the PD entry is masked, we will not PDPT entry more
721 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
723 // It's not 2 MByte page table entry, it should be PD entry
724 // we will find the entry has the smallest access record value
727 if ((PdtIndex
!= PFAddressPdtIndex
) || (PdptIndex
!= PFAddressPdptIndex
) ||
728 (Pml4Index
!= PFAddressPml4Index
) || (Pml5Index
!= PFAddressPml5Index
))
730 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
733 // If the PD entry has the smallest access record value,
734 // save the Page address to be released
741 ReleasePageAddress
= Pdt
+ PdtIndex
;
749 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
750 // it should only has the entries point to 2 MByte Pages
752 if ((PdptIndex
!= PFAddressPdptIndex
) || (Pml4Index
!= PFAddressPml4Index
) ||
753 (Pml5Index
!= PFAddressPml5Index
))
755 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
758 // If the PDPT entry has the smallest access record value,
759 // save the Page address to be released
766 ReleasePageAddress
= Pdpt
+ PdptIndex
;
775 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
776 // it should only has the entries point to 1 GByte Pages
778 if ((Pml4Index
!= PFAddressPml4Index
) || (Pml5Index
!= PFAddressPml5Index
)) {
779 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
782 // If the PML4 entry has the smallest access record value,
783 // save the Page address to be released
790 ReleasePageAddress
= Pml4
+ Pml4Index
;
798 // Make sure one PML4/PDPT/PD entry is selected
800 ASSERT (MinAcc
!= (UINT64
)-1);
803 // Secondly, insert the page pointed by this entry into page pool and clear this entry
805 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
806 *ReleasePageAddress
= 0;
809 // Lastly, check this entry's upper entries if need to be inserted into page pool
813 if (MinPdt
!= (UINTN
)-1) {
815 // If 4 KByte Page Table is released, check the PDPT entry
817 Pml4
= (UINT64
*)(UINTN
)(Pml5
[MinPml5
] & gPhyMask
);
818 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
819 SubEntriesNum
= GetSubEntriesNum (Pdpt
+ MinPdpt
);
820 if ((SubEntriesNum
== 0) &&
821 ((MinPdpt
!= PFAddressPdptIndex
) || (MinPml4
!= PFAddressPml4Index
) || (MinPml5
!= PFAddressPml5Index
)))
824 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
825 // clear the Page directory entry
827 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
830 // Go on checking the PML4 table
837 // Update the sub-entries filed in PDPT entry and exit
839 SetSubEntriesNum (Pdpt
+ MinPdpt
, (SubEntriesNum
- 1) & 0x1FF);
843 if (MinPdpt
!= (UINTN
)-1) {
845 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
847 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
848 if ((SubEntriesNum
== 0) && ((MinPml4
!= PFAddressPml4Index
) || (MinPml5
!= PFAddressPml5Index
))) {
850 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
851 // clear the Page directory entry
853 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
860 // Update the sub-entries filed in PML4 entry and exit
862 SetSubEntriesNum (Pml4
+ MinPml4
, (SubEntriesNum
- 1) & 0x1FF);
867 // PLM4 table has been released before, exit it
874 Allocate free Page for PageFault handler use.
876 @return Page address.
886 if (IsListEmpty (&mPagePool
)) {
888 // If page pool is empty, reclaim the used pages and insert one into page pool
894 // Get one free page and remove it from page pool
896 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
897 RemoveEntryList (mPagePool
.ForwardLink
);
899 // Clean this page and return
901 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
906 Page Fault handler for SMM use.
910 SmiDefaultPFHandler (
915 UINT64
*PageTableTop
;
921 SMM_PAGE_SIZE_TYPE PageSize
;
926 BOOLEAN Enable5LevelPaging
;
930 // Set default SMM page attribute
932 PageSize
= SmmPageSize2M
;
937 PageTableTop
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
938 PFAddress
= AsmReadCr2 ();
940 Cr4
.UintN
= AsmReadCr4 ();
941 Enable5LevelPaging
= (BOOLEAN
)(Cr4
.Bits
.LA57
!= 0);
943 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
945 // If platform not support page table attribute, set default SMM page attribute
947 if (Status
!= EFI_SUCCESS
) {
948 PageSize
= SmmPageSize2M
;
953 if (PageSize
>= MaxSmmPageSizeType
) {
954 PageSize
= SmmPageSize2M
;
957 if (NumOfPages
> 512) {
964 // BIT12 to BIT20 is Page Table index
970 // BIT21 to BIT29 is Page Directory index
973 PageAttribute
|= (UINTN
)IA32_PG_PS
;
976 if (!m1GPageTableSupport
) {
977 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
982 // BIT30 to BIT38 is Page Directory Pointer Table index
985 PageAttribute
|= (UINTN
)IA32_PG_PS
;
992 // If execute-disable is enabled, set NX bit
995 PageAttribute
|= IA32_PG_NX
;
998 for (Index
= 0; Index
< NumOfPages
; Index
++) {
999 PageTable
= PageTableTop
;
1001 for (StartBit
= Enable5LevelPaging
? 48 : 39; StartBit
> EndBit
; StartBit
-= 9) {
1002 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
1003 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
1005 // If the entry is not present, allocate one page from page pool for it
1007 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
1010 // Save the upper entry address
1012 UpperEntry
= PageTable
+ PTIndex
;
1016 // BIT9 to BIT11 of entry is used to save access record,
1017 // initialize value is 7
1019 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
1020 SetAccNum (PageTable
+ PTIndex
, 7);
1021 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
1024 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
1025 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
1027 // Check if the entry has already existed, this issue may occur when the different
1028 // size page entries created under the same entry
1030 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
1031 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
1036 // Fill the new entry
1038 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
1039 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
1040 if (UpperEntry
!= NULL
) {
1041 SetSubEntriesNum (UpperEntry
, (GetSubEntriesNum (UpperEntry
) + 1) & 0x1FF);
1045 // Get the next page address if we need to create more page tables
1047 PFAddress
+= (1ull << EndBit
);
1052 ThePage Fault handler wrapper for SMM use.
1054 @param InterruptType Defines the type of interrupt or exception that
1055 occurred on the processor.This parameter is processor architecture specific.
1056 @param SystemContext A pointer to the processor context when
1057 the interrupt occurred on the processor.
1062 IN EFI_EXCEPTION_TYPE InterruptType
,
1063 IN EFI_SYSTEM_CONTEXT SystemContext
1067 UINTN GuardPageAddress
;
1068 UINTN ShadowStackGuardPageAddress
;
1071 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
1073 AcquireSpinLock (mPFLock
);
1075 PFAddress
= AsmReadCr2 ();
1077 if (mCpuSmmRestrictedMemoryAccess
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
1078 DumpCpuContext (InterruptType
, SystemContext
);
1079 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
1085 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1086 // or SMM page protection violation.
1088 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
1089 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)))
1091 DumpCpuContext (InterruptType
, SystemContext
);
1092 CpuIndex
= GetCpuIndex ();
1093 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* (mSmmStackSize
+ mSmmShadowStackSize
));
1094 ShadowStackGuardPageAddress
= (mSmmStackArrayBase
+ mSmmStackSize
+ EFI_PAGE_SIZE
+ CpuIndex
* (mSmmStackSize
+ mSmmShadowStackSize
));
1095 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
1096 (PFAddress
>= GuardPageAddress
) &&
1097 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
)))
1099 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
1100 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
1101 (mSmmShadowStackSize
> 0) &&
1102 (PFAddress
>= ShadowStackGuardPageAddress
) &&
1103 (PFAddress
< (ShadowStackGuardPageAddress
+ EFI_PAGE_SIZE
)))
1105 DEBUG ((DEBUG_ERROR
, "SMM shadow stack overflow!\n"));
1107 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1108 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
1110 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1113 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
1115 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1119 if (HEAP_GUARD_NONSTOP_MODE
) {
1120 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1130 // If a page fault occurs in non-SMRAM range.
1132 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
1133 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))
1135 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1136 DumpCpuContext (InterruptType
, SystemContext
);
1137 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
1139 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1146 // If NULL pointer was just accessed
1148 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0) &&
1149 (PFAddress
< EFI_PAGE_SIZE
))
1151 DumpCpuContext (InterruptType
, SystemContext
);
1152 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
1154 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1157 if (NULL_DETECTION_NONSTOP_MODE
) {
1158 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1166 if (mCpuSmmRestrictedMemoryAccess
&& IsSmmCommBufferForbiddenAddress (PFAddress
)) {
1167 DumpCpuContext (InterruptType
, SystemContext
);
1168 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
1170 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1177 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1178 SmmProfilePFHandler (
1179 SystemContext
.SystemContextX64
->Rip
,
1180 SystemContext
.SystemContextX64
->ExceptionData
1183 SmiDefaultPFHandler ();
1187 ReleaseSpinLock (mPFLock
);
1191 This function sets memory attribute for page table.
1194 SetPageTableAttributes (
1202 UINT64
*L1PageTable
;
1203 UINT64
*L2PageTable
;
1204 UINT64
*L3PageTable
;
1205 UINT64
*L4PageTable
;
1206 UINT64
*L5PageTable
;
1207 UINTN PageTableBase
;
1209 BOOLEAN PageTableSplitted
;
1211 BOOLEAN Enable5LevelPaging
;
1214 // Don't mark page table memory as read-only if
1215 // - no restriction on access to non-SMRAM memory; or
1216 // - SMM heap guard feature enabled; or
1217 // BIT2: SMM page guard enabled
1218 // BIT3: SMM pool guard enabled
1219 // - SMM profile feature enabled
1221 if (!mCpuSmmRestrictedMemoryAccess
||
1222 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
1223 FeaturePcdGet (PcdCpuSmmProfileEnable
))
1226 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1229 !(mCpuSmmRestrictedMemoryAccess
&&
1230 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0)
1234 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1236 ASSERT (!(mCpuSmmRestrictedMemoryAccess
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
1240 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
1243 // Disable write protection, because we need mark page table to be write protected.
1244 // We need *write* page table memory, to mark itself to be *read only*.
1246 CetEnabled
= ((AsmReadCr4 () & CR4_CET_ENABLE
) != 0) ? TRUE
: FALSE
;
1249 // CET must be disabled if WP is disabled.
1254 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP
);
1257 DEBUG ((DEBUG_INFO
, "Start...\n"));
1258 PageTableSplitted
= FALSE
;
1261 GetPageTable (&PageTableBase
, &Enable5LevelPaging
);
1263 if (Enable5LevelPaging
) {
1264 L5PageTable
= (UINT64
*)PageTableBase
;
1265 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)PageTableBase
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1266 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1269 for (Index5
= 0; Index5
< (Enable5LevelPaging
? SIZE_4KB
/sizeof (UINT64
) : 1); Index5
++) {
1270 if (Enable5LevelPaging
) {
1271 L4PageTable
= (UINT64
*)(UINTN
)(L5PageTable
[Index5
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1272 if (L4PageTable
== NULL
) {
1276 L4PageTable
= (UINT64
*)PageTableBase
;
1279 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1280 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1282 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof (UINT64
); Index4
++) {
1283 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1284 if (L3PageTable
== NULL
) {
1288 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1289 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1291 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof (UINT64
); Index3
++) {
1292 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
1297 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1298 if (L2PageTable
== NULL
) {
1302 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1303 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1305 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof (UINT64
); Index2
++) {
1306 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
1311 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1312 if (L1PageTable
== NULL
) {
1316 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1317 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1322 } while (PageTableSplitted
);
1325 // Enable write protection, after page table updated.
1327 AsmWriteCr0 (AsmReadCr0 () | CR0_WP
);
1339 This function reads CR2 register when on-demand paging is enabled.
1341 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1348 if (!mCpuSmmRestrictedMemoryAccess
) {
1350 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1352 *Cr2
= AsmReadCr2 ();
1357 This function restores CR2 register when on-demand paging is enabled.
1359 @param[in] Cr2 Value to write into CR2 register.
1366 if (!mCpuSmmRestrictedMemoryAccess
) {
1368 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1375 Return whether access to non-SMRAM is restricted.
1377 @retval TRUE Access to non-SMRAM is restricted.
1378 @retval FALSE Access to non-SMRAM is not restricted.
1381 IsRestrictedMemoryAccess (
1385 return mCpuSmmRestrictedMemoryAccess
;