2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
16 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
17 BOOLEAN m1GPageTableSupport
= FALSE
;
18 BOOLEAN mCpuSmmRestrictedMemoryAccess
;
19 BOOLEAN m5LevelPagingSupport
;
20 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport
;
41 Check if 1-GByte pages is supported by processor or not.
43 @retval TRUE 1-GByte pages is supported.
44 @retval FALSE 1-GByte pages is not supported.
55 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
56 if (RegEax
>= 0x80000001) {
57 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
58 if ((RegEdx
& BIT26
) != 0) {
66 Check if 5-level paging is supported by processor or not.
68 @retval TRUE 5-level paging is supported.
69 @retval FALSE 5-level paging is not supported.
73 Is5LevelPagingSupport (
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags
;
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
,
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
,
87 return (BOOLEAN
) (EcxFlags
.Bits
.FiveLevelPage
!= 0);
91 Set sub-entries number in entry.
93 @param[in, out] Entry Pointer to entry
94 @param[in] SubEntryNum Sub-entries number based on 0:
95 0 means there is 1 sub-entry under this entry
96 0x1ff means there is 512 sub-entries under this entry
101 IN OUT UINT64
*Entry
,
102 IN UINT64 SubEntryNum
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
108 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
112 Return sub-entries number in entry.
114 @param[in] Entry Pointer to entry
116 @return Sub-entries number based on 0:
117 0 means there is 1 sub-entry under this entry
118 0x1ff means there is 512 sub-entries under this entry
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
128 return BitFieldRead64 (*Entry
, 52, 60);
132 Calculate the maximum support address.
134 @return the maximum support address.
137 CalculateMaximumSupportAddress (
142 UINT8 PhysicalAddressBits
;
146 // Get physical address bits supported.
148 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
150 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
152 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
153 if (RegEax
>= 0x80000008) {
154 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
155 PhysicalAddressBits
= (UINT8
) RegEax
;
157 PhysicalAddressBits
= 36;
160 return PhysicalAddressBits
;
164 Set static page table.
166 @param[in] PageTable Address of page table.
174 UINTN NumberOfPml5EntriesNeeded
;
175 UINTN NumberOfPml4EntriesNeeded
;
176 UINTN NumberOfPdpEntriesNeeded
;
177 UINTN IndexOfPml5Entries
;
178 UINTN IndexOfPml4Entries
;
179 UINTN IndexOfPdpEntries
;
180 UINTN IndexOfPageDirectoryEntries
;
181 UINT64
*PageMapLevel5Entry
;
182 UINT64
*PageMapLevel4Entry
;
184 UINT64
*PageDirectoryPointerEntry
;
185 UINT64
*PageDirectory1GEntry
;
186 UINT64
*PageDirectoryEntry
;
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
190 // when 5-Level Paging is disabled.
192 ASSERT (mPhysicalAddressBits
<= 52);
193 if (!m5LevelPagingSupport
&& mPhysicalAddressBits
> 48) {
194 mPhysicalAddressBits
= 48;
197 NumberOfPml5EntriesNeeded
= 1;
198 if (mPhysicalAddressBits
> 48) {
199 NumberOfPml5EntriesNeeded
= (UINTN
) LShiftU64 (1, mPhysicalAddressBits
- 48);
200 mPhysicalAddressBits
= 48;
203 NumberOfPml4EntriesNeeded
= 1;
204 if (mPhysicalAddressBits
> 39) {
205 NumberOfPml4EntriesNeeded
= (UINTN
) LShiftU64 (1, mPhysicalAddressBits
- 39);
206 mPhysicalAddressBits
= 39;
209 NumberOfPdpEntriesNeeded
= 1;
210 ASSERT (mPhysicalAddressBits
> 30);
211 NumberOfPdpEntriesNeeded
= (UINTN
) LShiftU64 (1, mPhysicalAddressBits
- 30);
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
216 PageMap
= (VOID
*) PageTable
;
218 PageMapLevel4Entry
= PageMap
;
219 PageMapLevel5Entry
= NULL
;
220 if (m5LevelPagingSupport
) {
222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
224 PageMapLevel5Entry
= PageMap
;
228 for ( IndexOfPml5Entries
= 0
229 ; IndexOfPml5Entries
< NumberOfPml5EntriesNeeded
230 ; IndexOfPml5Entries
++, PageMapLevel5Entry
++) {
232 // Each PML5 entry points to a page of PML4 entires.
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
234 // When 5-Level Paging is disabled, below allocation happens only once.
236 if (m5LevelPagingSupport
) {
237 PageMapLevel4Entry
= (UINT64
*) ((*PageMapLevel5Entry
) & ~mAddressEncMask
& gPhyMask
);
238 if (PageMapLevel4Entry
== NULL
) {
239 PageMapLevel4Entry
= AllocatePageTableMemory (1);
240 ASSERT(PageMapLevel4Entry
!= NULL
);
241 ZeroMem (PageMapLevel4Entry
, EFI_PAGES_TO_SIZE(1));
243 *PageMapLevel5Entry
= (UINT64
)(UINTN
)PageMapLevel4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
247 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< (NumberOfPml5EntriesNeeded
== 1 ? NumberOfPml4EntriesNeeded
: 512); IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
249 // Each PML4 entry points to a page of Page Directory Pointer entries.
251 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
252 if (PageDirectoryPointerEntry
== NULL
) {
253 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
254 ASSERT(PageDirectoryPointerEntry
!= NULL
);
255 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
257 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
260 if (m1GPageTableSupport
) {
261 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
262 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
263 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
265 // Skip the < 4G entries
270 // Fill in the Page Directory entries
272 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
275 PageAddress
= BASE_4GB
;
276 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< (NumberOfPml4EntriesNeeded
== 1 ? NumberOfPdpEntriesNeeded
: 512); IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
277 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
279 // Skip the < 4G entries
284 // Each Directory Pointer entries points to a page of Page Directory entires.
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
287 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
288 if (PageDirectoryEntry
== NULL
) {
289 PageDirectoryEntry
= AllocatePageTableMemory (1);
290 ASSERT(PageDirectoryEntry
!= NULL
);
291 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
294 // Fill in a Page Directory Pointer Entries
296 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
299 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
301 // Fill in the Page Directory entries
303 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
312 Create PageTable for SMM use.
314 @return The address of PML4 (to set CR3).
322 EFI_PHYSICAL_ADDRESS Pages
;
324 LIST_ENTRY
*FreePage
;
326 UINTN PageFaultHandlerHookAddress
;
327 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
333 // Initialize spin lock
335 InitializeSpinLock (mPFLock
);
337 mCpuSmmRestrictedMemoryAccess
= PcdGetBool (PcdCpuSmmRestrictedMemoryAccess
);
338 m1GPageTableSupport
= Is1GPageSupport ();
339 m5LevelPagingSupport
= Is5LevelPagingSupport ();
340 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
341 PatchInstructionX86 (gPatch5LevelPagingSupport
, m5LevelPagingSupport
, 1);
342 DEBUG ((DEBUG_INFO
, "5LevelPaging Support - %d\n", m5LevelPagingSupport
));
343 DEBUG ((DEBUG_INFO
, "1GPageTable Support - %d\n", m1GPageTableSupport
));
344 DEBUG ((DEBUG_INFO
, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess
));
345 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - %d\n", mPhysicalAddressBits
));
347 // Generate PAE page table for the first 4GB memory space
349 Pages
= Gen4GPageTable (FALSE
);
352 // Set IA32_PG_PMNT bit to mask this entry
354 PTEntry
= (UINT64
*)(UINTN
)Pages
;
355 for (Index
= 0; Index
< 4; Index
++) {
356 PTEntry
[Index
] |= IA32_PG_PMNT
;
360 // Fill Page-Table-Level4 (PML4) entry
362 Pml4Entry
= (UINT64
*)AllocatePageTableMemory (1);
363 ASSERT (Pml4Entry
!= NULL
);
364 *Pml4Entry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
365 ZeroMem (Pml4Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml4Entry
));
368 // Set sub-entries number
370 SetSubEntriesNum (Pml4Entry
, 3);
373 if (m5LevelPagingSupport
) {
377 Pml5Entry
= (UINT64
*)AllocatePageTableMemory (1);
378 ASSERT (Pml5Entry
!= NULL
);
379 *Pml5Entry
= (UINTN
) Pml4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
380 ZeroMem (Pml5Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml5Entry
));
382 // Set sub-entries number
384 SetSubEntriesNum (Pml5Entry
, 1);
388 if (mCpuSmmRestrictedMemoryAccess
) {
390 // When access to non-SMRAM memory is restricted, create page table
391 // that covers all memory space.
393 SetStaticPageTable ((UINTN
)PTEntry
);
396 // Add pages to page pool
398 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
399 ASSERT (FreePage
!= NULL
);
400 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
401 InsertTailList (&mPagePool
, FreePage
);
402 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
406 if (FeaturePcdGet (PcdCpuSmmProfileEnable
) ||
407 HEAP_GUARD_NONSTOP_MODE
||
408 NULL_DETECTION_NONSTOP_MODE
) {
410 // Set own Page Fault entry instead of the default one, because SMM Profile
411 // feature depends on IRET instruction to do Single Step
413 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
414 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
415 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
416 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
417 IdtEntry
->Bits
.Reserved_0
= 0;
418 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
419 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
420 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
421 IdtEntry
->Bits
.Reserved_1
= 0;
424 // Register Smm Page Fault Handler
426 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
427 ASSERT_EFI_ERROR (Status
);
431 // Additional SMM IDT initialization for SMM stack guard
433 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
434 InitializeIDTSmmStackGuard ();
438 // Return the address of PML4/PML5 (to set CR3)
440 return (UINT32
)(UINTN
)PTEntry
;
444 Set access record in entry.
446 @param[in, out] Entry Pointer to entry
447 @param[in] Acc Access record value
452 IN OUT UINT64
*Entry
,
457 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
459 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
463 Return access record in entry.
465 @param[in] Entry Pointer to entry
467 @return Access record value.
476 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
478 return BitFieldRead64 (*Entry
, 9, 11);
482 Return and update the access record in entry.
484 @param[in, out] Entry Pointer to entry
486 @return Access record value.
496 Acc
= GetAccNum (Entry
);
497 if ((*Entry
& IA32_PG_A
) != 0) {
499 // If this entry has been accessed, clear access flag in Entry and update access record
500 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
502 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
503 SetAccNum (Entry
, 0x7);
504 return (0x7 + ACC_MAX_BIT
);
508 // If the access record is not the smallest value 0, minus 1 and update the access record field
510 SetAccNum (Entry
, Acc
- 1);
517 Reclaim free pages for PageFault handler.
519 Search the whole entries tree to find the leaf entry that has the smallest
520 access record value. Insert the page pointed by this leaf entry into the
521 page pool. And check its upper entries if need to be inserted into the page
545 UINT64 SubEntriesNum
;
548 UINT64
*ReleasePageAddress
;
550 BOOLEAN Enable5LevelPaging
;
552 UINT64 PFAddressPml5Index
;
553 UINT64 PFAddressPml4Index
;
554 UINT64 PFAddressPdptIndex
;
555 UINT64 PFAddressPdtIndex
;
566 ReleasePageAddress
= 0;
567 PFAddress
= AsmReadCr2 ();
568 PFAddressPml5Index
= BitFieldRead64 (PFAddress
, 48, 48 + 8);
569 PFAddressPml4Index
= BitFieldRead64 (PFAddress
, 39, 39 + 8);
570 PFAddressPdptIndex
= BitFieldRead64 (PFAddress
, 30, 30 + 8);
571 PFAddressPdtIndex
= BitFieldRead64 (PFAddress
, 21, 21 + 8);
573 Cr4
.UintN
= AsmReadCr4 ();
574 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
== 1);
575 Pml5
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
577 if (!Enable5LevelPaging
) {
579 // Create one fake PML5 entry for 4-Level Paging
580 // so that the page table parsing logic only handles 5-Level page structure.
582 Pml5Entry
= (UINTN
) Pml5
| IA32_PG_P
;
587 // First, find the leaf entry has the smallest access record value
589 for (Pml5Index
= 0; Pml5Index
< (Enable5LevelPaging
? (EFI_PAGE_SIZE
/ sizeof (*Pml4
)) : 1); Pml5Index
++) {
590 if ((Pml5
[Pml5Index
] & IA32_PG_P
) == 0 || (Pml5
[Pml5Index
] & IA32_PG_PMNT
) != 0) {
592 // If the PML5 entry is not present or is masked, skip it
596 Pml4
= (UINT64
*)(UINTN
)(Pml5
[Pml5Index
] & gPhyMask
);
597 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
598 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
600 // If the PML4 entry is not present or is masked, skip it
604 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
606 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
607 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
609 // If the PDPT entry is not present or is masked, skip it
611 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
613 // If the PDPT entry is masked, we will ignore checking the PML4 entry
619 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
621 // It's not 1-GByte pages entry, it should be a PDPT entry,
622 // we will not check PML4 entry more
625 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
627 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
628 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
630 // If the PD entry is not present or is masked, skip it
632 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
634 // If the PD entry is masked, we will not PDPT entry more
640 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
642 // It's not 2 MByte page table entry, it should be PD entry
643 // we will find the entry has the smallest access record value
646 if (PdtIndex
!= PFAddressPdtIndex
|| PdptIndex
!= PFAddressPdptIndex
||
647 Pml4Index
!= PFAddressPml4Index
|| Pml5Index
!= PFAddressPml5Index
) {
648 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
651 // If the PD entry has the smallest access record value,
652 // save the Page address to be released
659 ReleasePageAddress
= Pdt
+ PdtIndex
;
666 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
667 // it should only has the entries point to 2 MByte Pages
669 if (PdptIndex
!= PFAddressPdptIndex
|| Pml4Index
!= PFAddressPml4Index
||
670 Pml5Index
!= PFAddressPml5Index
) {
671 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
674 // If the PDPT entry has the smallest access record value,
675 // save the Page address to be released
682 ReleasePageAddress
= Pdpt
+ PdptIndex
;
690 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
691 // it should only has the entries point to 1 GByte Pages
693 if (Pml4Index
!= PFAddressPml4Index
|| Pml5Index
!= PFAddressPml5Index
) {
694 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
697 // If the PML4 entry has the smallest access record value,
698 // save the Page address to be released
705 ReleasePageAddress
= Pml4
+ Pml4Index
;
712 // Make sure one PML4/PDPT/PD entry is selected
714 ASSERT (MinAcc
!= (UINT64
)-1);
717 // Secondly, insert the page pointed by this entry into page pool and clear this entry
719 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
720 *ReleasePageAddress
= 0;
723 // Lastly, check this entry's upper entries if need to be inserted into page pool
727 if (MinPdt
!= (UINTN
)-1) {
729 // If 4 KByte Page Table is released, check the PDPT entry
731 Pml4
= (UINT64
*) (UINTN
) (Pml5
[MinPml5
] & gPhyMask
);
732 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
733 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
734 if (SubEntriesNum
== 0 &&
735 (MinPdpt
!= PFAddressPdptIndex
|| MinPml4
!= PFAddressPml4Index
|| MinPml5
!= PFAddressPml5Index
)) {
737 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
738 // clear the Page directory entry
740 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
743 // Go on checking the PML4 table
749 // Update the sub-entries filed in PDPT entry and exit
751 SetSubEntriesNum (Pdpt
+ MinPdpt
, (SubEntriesNum
- 1) & 0x1FF);
754 if (MinPdpt
!= (UINTN
)-1) {
756 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
758 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
759 if (SubEntriesNum
== 0 && (MinPml4
!= PFAddressPml4Index
|| MinPml5
!= PFAddressPml5Index
)) {
761 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
762 // clear the Page directory entry
764 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
770 // Update the sub-entries filed in PML4 entry and exit
772 SetSubEntriesNum (Pml4
+ MinPml4
, (SubEntriesNum
- 1) & 0x1FF);
776 // PLM4 table has been released before, exit it
783 Allocate free Page for PageFault handler use.
785 @return Page address.
795 if (IsListEmpty (&mPagePool
)) {
797 // If page pool is empty, reclaim the used pages and insert one into page pool
803 // Get one free page and remove it from page pool
805 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
806 RemoveEntryList (mPagePool
.ForwardLink
);
808 // Clean this page and return
810 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
815 Page Fault handler for SMM use.
819 SmiDefaultPFHandler (
824 UINT64
*PageTableTop
;
830 SMM_PAGE_SIZE_TYPE PageSize
;
835 BOOLEAN Enable5LevelPaging
;
839 // Set default SMM page attribute
841 PageSize
= SmmPageSize2M
;
846 PageTableTop
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
847 PFAddress
= AsmReadCr2 ();
849 Cr4
.UintN
= AsmReadCr4 ();
850 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
!= 0);
852 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
854 // If platform not support page table attribute, set default SMM page attribute
856 if (Status
!= EFI_SUCCESS
) {
857 PageSize
= SmmPageSize2M
;
861 if (PageSize
>= MaxSmmPageSizeType
) {
862 PageSize
= SmmPageSize2M
;
864 if (NumOfPages
> 512) {
871 // BIT12 to BIT20 is Page Table index
877 // BIT21 to BIT29 is Page Directory index
880 PageAttribute
|= (UINTN
)IA32_PG_PS
;
883 if (!m1GPageTableSupport
) {
884 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
888 // BIT30 to BIT38 is Page Directory Pointer Table index
891 PageAttribute
|= (UINTN
)IA32_PG_PS
;
898 // If execute-disable is enabled, set NX bit
901 PageAttribute
|= IA32_PG_NX
;
904 for (Index
= 0; Index
< NumOfPages
; Index
++) {
905 PageTable
= PageTableTop
;
907 for (StartBit
= Enable5LevelPaging
? 48 : 39; StartBit
> EndBit
; StartBit
-= 9) {
908 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
909 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
911 // If the entry is not present, allocate one page from page pool for it
913 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
916 // Save the upper entry address
918 UpperEntry
= PageTable
+ PTIndex
;
921 // BIT9 to BIT11 of entry is used to save access record,
922 // initialize value is 7
924 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
925 SetAccNum (PageTable
+ PTIndex
, 7);
926 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
929 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
930 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
932 // Check if the entry has already existed, this issue may occur when the different
933 // size page entries created under the same entry
935 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
936 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
940 // Fill the new entry
942 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
943 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
944 if (UpperEntry
!= NULL
) {
945 SetSubEntriesNum (UpperEntry
, (GetSubEntriesNum (UpperEntry
) + 1) & 0x1FF);
948 // Get the next page address if we need to create more page tables
950 PFAddress
+= (1ull << EndBit
);
955 ThePage Fault handler wrapper for SMM use.
957 @param InterruptType Defines the type of interrupt or exception that
958 occurred on the processor.This parameter is processor architecture specific.
959 @param SystemContext A pointer to the processor context when
960 the interrupt occurred on the processor.
965 IN EFI_EXCEPTION_TYPE InterruptType
,
966 IN EFI_SYSTEM_CONTEXT SystemContext
970 UINTN GuardPageAddress
;
973 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
975 AcquireSpinLock (mPFLock
);
977 PFAddress
= AsmReadCr2 ();
979 if (mCpuSmmRestrictedMemoryAccess
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
980 DumpCpuContext (InterruptType
, SystemContext
);
981 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
987 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
988 // or SMM page protection violation.
990 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
991 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
992 DumpCpuContext (InterruptType
, SystemContext
);
993 CpuIndex
= GetCpuIndex ();
994 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* mSmmStackSize
);
995 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
996 (PFAddress
>= GuardPageAddress
) &&
997 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
))) {
998 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
1000 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1001 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
1003 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1006 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
1008 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1012 if (HEAP_GUARD_NONSTOP_MODE
) {
1013 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1022 // If a page fault occurs in non-SMRAM range.
1024 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
1025 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
1026 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1027 DumpCpuContext (InterruptType
, SystemContext
);
1028 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
1030 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1037 // If NULL pointer was just accessed
1039 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0 &&
1040 (PFAddress
< EFI_PAGE_SIZE
)) {
1041 DumpCpuContext (InterruptType
, SystemContext
);
1042 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
1044 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1047 if (NULL_DETECTION_NONSTOP_MODE
) {
1048 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1056 if (mCpuSmmRestrictedMemoryAccess
&& IsSmmCommBufferForbiddenAddress (PFAddress
)) {
1057 DumpCpuContext (InterruptType
, SystemContext
);
1058 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
1060 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1067 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1068 SmmProfilePFHandler (
1069 SystemContext
.SystemContextX64
->Rip
,
1070 SystemContext
.SystemContextX64
->ExceptionData
1073 SmiDefaultPFHandler ();
1077 ReleaseSpinLock (mPFLock
);
1081 This function sets memory attribute for page table.
1084 SetPageTableAttributes (
1092 UINT64
*L1PageTable
;
1093 UINT64
*L2PageTable
;
1094 UINT64
*L3PageTable
;
1095 UINT64
*L4PageTable
;
1096 UINT64
*L5PageTable
;
1098 BOOLEAN PageTableSplitted
;
1101 BOOLEAN Enable5LevelPaging
;
1103 Cr4
.UintN
= AsmReadCr4 ();
1104 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
== 1);
1107 // Don't mark page table memory as read-only if
1108 // - no restriction on access to non-SMRAM memory; or
1109 // - SMM heap guard feature enabled; or
1110 // BIT2: SMM page guard enabled
1111 // BIT3: SMM pool guard enabled
1112 // - SMM profile feature enabled
1114 if (!mCpuSmmRestrictedMemoryAccess
||
1115 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
1116 FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1118 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1120 ASSERT (!(mCpuSmmRestrictedMemoryAccess
&&
1121 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0));
1124 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1126 ASSERT (!(mCpuSmmRestrictedMemoryAccess
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
1130 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
1133 // Disable write protection, because we need mark page table to be write protected.
1134 // We need *write* page table memory, to mark itself to be *read only*.
1136 CetEnabled
= ((AsmReadCr4() & CR4_CET_ENABLE
) != 0) ? TRUE
: FALSE
;
1139 // CET must be disabled if WP is disabled.
1143 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
1146 DEBUG ((DEBUG_INFO
, "Start...\n"));
1147 PageTableSplitted
= FALSE
;
1149 if (Enable5LevelPaging
) {
1150 L5PageTable
= (UINT64
*)GetPageTableBase ();
1151 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L5PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1152 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1155 for (Index5
= 0; Index5
< (Enable5LevelPaging
? SIZE_4KB
/sizeof(UINT64
) : 1); Index5
++) {
1156 if (Enable5LevelPaging
) {
1157 L4PageTable
= (UINT64
*)(UINTN
)(L5PageTable
[Index5
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1158 if (L4PageTable
== NULL
) {
1162 L4PageTable
= (UINT64
*)GetPageTableBase ();
1164 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1165 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1167 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
1168 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1169 if (L3PageTable
== NULL
) {
1173 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1174 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1176 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
1177 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
1181 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1182 if (L2PageTable
== NULL
) {
1186 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1187 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1189 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
1190 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
1194 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1195 if (L1PageTable
== NULL
) {
1198 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1199 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1204 } while (PageTableSplitted
);
1207 // Enable write protection, after page table updated.
1209 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);
1221 This function reads CR2 register when on-demand paging is enabled.
1223 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1230 if (!mCpuSmmRestrictedMemoryAccess
) {
1232 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1234 *Cr2
= AsmReadCr2 ();
1239 This function restores CR2 register when on-demand paging is enabled.
1241 @param[in] Cr2 Value to write into CR2 register.
1248 if (!mCpuSmmRestrictedMemoryAccess
) {
1250 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1257 Return whether access to non-SMRAM is restricted.
1259 @retval TRUE Access to non-SMRAM is restricted.
1260 @retval FALSE Access to non-SMRAM is not restricted.
1263 IsRestrictedMemoryAccess (
1267 return mCpuSmmRestrictedMemoryAccess
;