2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
16 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
17 BOOLEAN m1GPageTableSupport
= FALSE
;
18 BOOLEAN mCpuSmmStaticPageTable
;
19 BOOLEAN m5LevelPagingSupport
;
20 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport
;
41 Check if 1-GByte pages is supported by processor or not.
43 @retval TRUE 1-GByte pages is supported.
44 @retval FALSE 1-GByte pages is not supported.
55 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
56 if (RegEax
>= 0x80000001) {
57 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
58 if ((RegEdx
& BIT26
) != 0) {
66 Check if 5-level paging is supported by processor or not.
68 @retval TRUE 5-level paging is supported.
69 @retval FALSE 5-level paging is not supported.
73 Is5LevelPagingSupport (
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags
;
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
,
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
,
87 return (BOOLEAN
) (EcxFlags
.Bits
.FiveLevelPage
!= 0);
91 Set sub-entries number in entry.
93 @param[in, out] Entry Pointer to entry
94 @param[in] SubEntryNum Sub-entries number based on 0:
95 0 means there is 1 sub-entry under this entry
96 0x1ff means there is 512 sub-entries under this entry
101 IN OUT UINT64
*Entry
,
102 IN UINT64 SubEntryNum
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
108 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
112 Return sub-entries number in entry.
114 @param[in] Entry Pointer to entry
116 @return Sub-entries number based on 0:
117 0 means there is 1 sub-entry under this entry
118 0x1ff means there is 512 sub-entries under this entry
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
128 return BitFieldRead64 (*Entry
, 52, 60);
132 Calculate the maximum support address.
134 @return the maximum support address.
137 CalculateMaximumSupportAddress (
142 UINT8 PhysicalAddressBits
;
146 // Get physical address bits supported.
148 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
150 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
152 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
153 if (RegEax
>= 0x80000008) {
154 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
155 PhysicalAddressBits
= (UINT8
) RegEax
;
157 PhysicalAddressBits
= 36;
160 return PhysicalAddressBits
;
164 Set static page table.
166 @param[in] PageTable Address of page table.
174 UINTN NumberOfPml5EntriesNeeded
;
175 UINTN NumberOfPml4EntriesNeeded
;
176 UINTN NumberOfPdpEntriesNeeded
;
177 UINTN IndexOfPml5Entries
;
178 UINTN IndexOfPml4Entries
;
179 UINTN IndexOfPdpEntries
;
180 UINTN IndexOfPageDirectoryEntries
;
181 UINT64
*PageMapLevel5Entry
;
182 UINT64
*PageMapLevel4Entry
;
184 UINT64
*PageDirectoryPointerEntry
;
185 UINT64
*PageDirectory1GEntry
;
186 UINT64
*PageDirectoryEntry
;
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
190 // when 5-Level Paging is disabled.
192 ASSERT (mPhysicalAddressBits
<= 52);
193 if (!m5LevelPagingSupport
&& mPhysicalAddressBits
> 48) {
194 mPhysicalAddressBits
= 48;
197 NumberOfPml5EntriesNeeded
= 1;
198 if (mPhysicalAddressBits
> 48) {
199 NumberOfPml5EntriesNeeded
= (UINTN
) LShiftU64 (1, mPhysicalAddressBits
- 48);
200 mPhysicalAddressBits
= 48;
203 NumberOfPml4EntriesNeeded
= 1;
204 if (mPhysicalAddressBits
> 39) {
205 NumberOfPml4EntriesNeeded
= (UINTN
) LShiftU64 (1, mPhysicalAddressBits
- 39);
206 mPhysicalAddressBits
= 39;
209 NumberOfPdpEntriesNeeded
= 1;
210 ASSERT (mPhysicalAddressBits
> 30);
211 NumberOfPdpEntriesNeeded
= (UINTN
) LShiftU64 (1, mPhysicalAddressBits
- 30);
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
216 PageMap
= (VOID
*) PageTable
;
218 PageMapLevel4Entry
= PageMap
;
219 PageMapLevel5Entry
= NULL
;
220 if (m5LevelPagingSupport
) {
222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
224 PageMapLevel5Entry
= PageMap
;
228 for ( IndexOfPml5Entries
= 0
229 ; IndexOfPml5Entries
< NumberOfPml5EntriesNeeded
230 ; IndexOfPml5Entries
++, PageMapLevel5Entry
++) {
232 // Each PML5 entry points to a page of PML4 entires.
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
234 // When 5-Level Paging is disabled, below allocation happens only once.
236 if (m5LevelPagingSupport
) {
237 PageMapLevel4Entry
= (UINT64
*) ((*PageMapLevel5Entry
) & ~mAddressEncMask
& gPhyMask
);
238 if (PageMapLevel4Entry
== NULL
) {
239 PageMapLevel4Entry
= AllocatePageTableMemory (1);
240 ASSERT(PageMapLevel4Entry
!= NULL
);
241 ZeroMem (PageMapLevel4Entry
, EFI_PAGES_TO_SIZE(1));
243 *PageMapLevel5Entry
= (UINT64
)(UINTN
)PageMapLevel4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
247 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< (NumberOfPml5EntriesNeeded
== 1 ? NumberOfPml4EntriesNeeded
: 512); IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
249 // Each PML4 entry points to a page of Page Directory Pointer entries.
251 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
252 if (PageDirectoryPointerEntry
== NULL
) {
253 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
254 ASSERT(PageDirectoryPointerEntry
!= NULL
);
255 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
257 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
260 if (m1GPageTableSupport
) {
261 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
262 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
263 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
265 // Skip the < 4G entries
270 // Fill in the Page Directory entries
272 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
275 PageAddress
= BASE_4GB
;
276 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< (NumberOfPml4EntriesNeeded
== 1 ? NumberOfPdpEntriesNeeded
: 512); IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
277 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
279 // Skip the < 4G entries
284 // Each Directory Pointer entries points to a page of Page Directory entires.
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
287 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
288 if (PageDirectoryEntry
== NULL
) {
289 PageDirectoryEntry
= AllocatePageTableMemory (1);
290 ASSERT(PageDirectoryEntry
!= NULL
);
291 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
294 // Fill in a Page Directory Pointer Entries
296 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
299 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
301 // Fill in the Page Directory entries
303 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
312 Create PageTable for SMM use.
314 @return The address of PML4 (to set CR3).
322 EFI_PHYSICAL_ADDRESS Pages
;
324 LIST_ENTRY
*FreePage
;
326 UINTN PageFaultHandlerHookAddress
;
327 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
333 // Initialize spin lock
335 InitializeSpinLock (mPFLock
);
337 mCpuSmmStaticPageTable
= PcdGetBool (PcdCpuSmmStaticPageTable
);
338 m1GPageTableSupport
= Is1GPageSupport ();
339 m5LevelPagingSupport
= Is5LevelPagingSupport ();
340 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
341 PatchInstructionX86 (gPatch5LevelPagingSupport
, m5LevelPagingSupport
, 1);
342 DEBUG ((DEBUG_INFO
, "5LevelPaging Support - %d\n", m5LevelPagingSupport
));
343 DEBUG ((DEBUG_INFO
, "1GPageTable Support - %d\n", m1GPageTableSupport
));
344 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable
));
345 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - %d\n", mPhysicalAddressBits
));
347 // Generate PAE page table for the first 4GB memory space
349 Pages
= Gen4GPageTable (FALSE
);
352 // Set IA32_PG_PMNT bit to mask this entry
354 PTEntry
= (UINT64
*)(UINTN
)Pages
;
355 for (Index
= 0; Index
< 4; Index
++) {
356 PTEntry
[Index
] |= IA32_PG_PMNT
;
360 // Fill Page-Table-Level4 (PML4) entry
362 Pml4Entry
= (UINT64
*)AllocatePageTableMemory (1);
363 ASSERT (Pml4Entry
!= NULL
);
364 *Pml4Entry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
365 ZeroMem (Pml4Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml4Entry
));
368 // Set sub-entries number
370 SetSubEntriesNum (Pml4Entry
, 3);
373 if (m5LevelPagingSupport
) {
377 Pml5Entry
= (UINT64
*)AllocatePageTableMemory (1);
378 ASSERT (Pml5Entry
!= NULL
);
379 *Pml5Entry
= (UINTN
) Pml4Entry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
380 ZeroMem (Pml5Entry
+ 1, EFI_PAGE_SIZE
- sizeof (*Pml5Entry
));
382 // Set sub-entries number
384 SetSubEntriesNum (Pml5Entry
, 1);
388 if (mCpuSmmStaticPageTable
) {
389 SetStaticPageTable ((UINTN
)PTEntry
);
392 // Add pages to page pool
394 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
395 ASSERT (FreePage
!= NULL
);
396 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
397 InsertTailList (&mPagePool
, FreePage
);
398 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
402 if (FeaturePcdGet (PcdCpuSmmProfileEnable
) ||
403 HEAP_GUARD_NONSTOP_MODE
||
404 NULL_DETECTION_NONSTOP_MODE
) {
406 // Set own Page Fault entry instead of the default one, because SMM Profile
407 // feature depends on IRET instruction to do Single Step
409 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
410 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
411 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
412 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
413 IdtEntry
->Bits
.Reserved_0
= 0;
414 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
415 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
416 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
417 IdtEntry
->Bits
.Reserved_1
= 0;
420 // Register Smm Page Fault Handler
422 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
423 ASSERT_EFI_ERROR (Status
);
427 // Additional SMM IDT initialization for SMM stack guard
429 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
430 InitializeIDTSmmStackGuard ();
434 // Return the address of PML4/PML5 (to set CR3)
436 return (UINT32
)(UINTN
)PTEntry
;
440 Set access record in entry.
442 @param[in, out] Entry Pointer to entry
443 @param[in] Acc Access record value
448 IN OUT UINT64
*Entry
,
453 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
455 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
459 Return access record in entry.
461 @param[in] Entry Pointer to entry
463 @return Access record value.
472 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
474 return BitFieldRead64 (*Entry
, 9, 11);
478 Return and update the access record in entry.
480 @param[in, out] Entry Pointer to entry
482 @return Access record value.
492 Acc
= GetAccNum (Entry
);
493 if ((*Entry
& IA32_PG_A
) != 0) {
495 // If this entry has been accessed, clear access flag in Entry and update access record
496 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
498 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
499 SetAccNum (Entry
, 0x7);
500 return (0x7 + ACC_MAX_BIT
);
504 // If the access record is not the smallest value 0, minus 1 and update the access record field
506 SetAccNum (Entry
, Acc
- 1);
513 Reclaim free pages for PageFault handler.
515 Search the whole entries tree to find the leaf entry that has the smallest
516 access record value. Insert the page pointed by this leaf entry into the
517 page pool. And check its upper entries if need to be inserted into the page
541 UINT64 SubEntriesNum
;
544 UINT64
*ReleasePageAddress
;
546 BOOLEAN Enable5LevelPaging
;
557 ReleasePageAddress
= 0;
559 Cr4
.UintN
= AsmReadCr4 ();
560 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
== 1);
561 Pml5
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
563 if (!Enable5LevelPaging
) {
565 // Create one fake PML5 entry for 4-Level Paging
566 // so that the page table parsing logic only handles 5-Level page structure.
568 Pml5Entry
= (UINTN
) Pml5
| IA32_PG_P
;
573 // First, find the leaf entry has the smallest access record value
575 for (Pml5Index
= 0; Pml5Index
< (Enable5LevelPaging
? (EFI_PAGE_SIZE
/ sizeof (*Pml4
)) : 1); Pml5Index
++) {
576 if ((Pml5
[Pml5Index
] & IA32_PG_P
) == 0 || (Pml5
[Pml5Index
] & IA32_PG_PMNT
) != 0) {
578 // If the PML5 entry is not present or is masked, skip it
582 Pml4
= (UINT64
*)(UINTN
)(Pml5
[Pml5Index
] & gPhyMask
);
583 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
584 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
586 // If the PML4 entry is not present or is masked, skip it
590 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
592 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
593 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
595 // If the PDPT entry is not present or is masked, skip it
597 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
599 // If the PDPT entry is masked, we will ignore checking the PML4 entry
605 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
607 // It's not 1-GByte pages entry, it should be a PDPT entry,
608 // we will not check PML4 entry more
611 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
613 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
614 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
616 // If the PD entry is not present or is masked, skip it
618 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
620 // If the PD entry is masked, we will not PDPT entry more
626 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
628 // It's not 2 MByte page table entry, it should be PD entry
629 // we will find the entry has the smallest access record value
632 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
635 // If the PD entry has the smallest access record value,
636 // save the Page address to be released
643 ReleasePageAddress
= Pdt
+ PdtIndex
;
649 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
650 // it should only has the entries point to 2 MByte Pages
652 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
655 // If the PDPT entry has the smallest access record value,
656 // save the Page address to be released
663 ReleasePageAddress
= Pdpt
+ PdptIndex
;
670 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
671 // it should only has the entries point to 1 GByte Pages
673 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
676 // If the PML4 entry has the smallest access record value,
677 // save the Page address to be released
684 ReleasePageAddress
= Pml4
+ Pml4Index
;
690 // Make sure one PML4/PDPT/PD entry is selected
692 ASSERT (MinAcc
!= (UINT64
)-1);
695 // Secondly, insert the page pointed by this entry into page pool and clear this entry
697 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
698 *ReleasePageAddress
= 0;
701 // Lastly, check this entry's upper entries if need to be inserted into page pool
705 if (MinPdt
!= (UINTN
)-1) {
707 // If 4 KByte Page Table is released, check the PDPT entry
709 Pml4
= (UINT64
*) (UINTN
) (Pml5
[MinPml5
] & gPhyMask
);
710 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
711 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
712 if (SubEntriesNum
== 0) {
714 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
715 // clear the Page directory entry
717 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
720 // Go on checking the PML4 table
726 // Update the sub-entries filed in PDPT entry and exit
728 SetSubEntriesNum (Pdpt
+ MinPdpt
, SubEntriesNum
- 1);
731 if (MinPdpt
!= (UINTN
)-1) {
733 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
735 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
736 if (SubEntriesNum
== 0) {
738 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
739 // clear the Page directory entry
741 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
747 // Update the sub-entries filed in PML4 entry and exit
749 SetSubEntriesNum (Pml4
+ MinPml4
, SubEntriesNum
- 1);
753 // PLM4 table has been released before, exit it
760 Allocate free Page for PageFault handler use.
762 @return Page address.
772 if (IsListEmpty (&mPagePool
)) {
774 // If page pool is empty, reclaim the used pages and insert one into page pool
780 // Get one free page and remove it from page pool
782 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
783 RemoveEntryList (mPagePool
.ForwardLink
);
785 // Clean this page and return
787 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
792 Page Fault handler for SMM use.
796 SmiDefaultPFHandler (
801 UINT64
*PageTableTop
;
807 SMM_PAGE_SIZE_TYPE PageSize
;
812 BOOLEAN Enable5LevelPaging
;
816 // Set default SMM page attribute
818 PageSize
= SmmPageSize2M
;
823 PageTableTop
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
824 PFAddress
= AsmReadCr2 ();
826 Cr4
.UintN
= AsmReadCr4 ();
827 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
!= 0);
829 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
831 // If platform not support page table attribute, set default SMM page attribute
833 if (Status
!= EFI_SUCCESS
) {
834 PageSize
= SmmPageSize2M
;
838 if (PageSize
>= MaxSmmPageSizeType
) {
839 PageSize
= SmmPageSize2M
;
841 if (NumOfPages
> 512) {
848 // BIT12 to BIT20 is Page Table index
854 // BIT21 to BIT29 is Page Directory index
857 PageAttribute
|= (UINTN
)IA32_PG_PS
;
860 if (!m1GPageTableSupport
) {
861 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
865 // BIT30 to BIT38 is Page Directory Pointer Table index
868 PageAttribute
|= (UINTN
)IA32_PG_PS
;
875 // If execute-disable is enabled, set NX bit
878 PageAttribute
|= IA32_PG_NX
;
881 for (Index
= 0; Index
< NumOfPages
; Index
++) {
882 PageTable
= PageTableTop
;
884 for (StartBit
= Enable5LevelPaging
? 48 : 39; StartBit
> EndBit
; StartBit
-= 9) {
885 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
886 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
888 // If the entry is not present, allocate one page from page pool for it
890 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
893 // Save the upper entry address
895 UpperEntry
= PageTable
+ PTIndex
;
898 // BIT9 to BIT11 of entry is used to save access record,
899 // initialize value is 7
901 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
902 SetAccNum (PageTable
+ PTIndex
, 7);
903 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
906 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
907 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
909 // Check if the entry has already existed, this issue may occur when the different
910 // size page entries created under the same entry
912 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
913 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
917 // Fill the new entry
919 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
920 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
921 if (UpperEntry
!= NULL
) {
922 SetSubEntriesNum (UpperEntry
, GetSubEntriesNum (UpperEntry
) + 1);
925 // Get the next page address if we need to create more page tables
927 PFAddress
+= (1ull << EndBit
);
932 ThePage Fault handler wrapper for SMM use.
934 @param InterruptType Defines the type of interrupt or exception that
935 occurred on the processor.This parameter is processor architecture specific.
936 @param SystemContext A pointer to the processor context when
937 the interrupt occurred on the processor.
942 IN EFI_EXCEPTION_TYPE InterruptType
,
943 IN EFI_SYSTEM_CONTEXT SystemContext
947 UINTN GuardPageAddress
;
950 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
952 AcquireSpinLock (mPFLock
);
954 PFAddress
= AsmReadCr2 ();
956 if (mCpuSmmStaticPageTable
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
957 DumpCpuContext (InterruptType
, SystemContext
);
958 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
964 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
965 // or SMM page protection violation.
967 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
968 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
969 DumpCpuContext (InterruptType
, SystemContext
);
970 CpuIndex
= GetCpuIndex ();
971 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* mSmmStackSize
);
972 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
973 (PFAddress
>= GuardPageAddress
) &&
974 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
))) {
975 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
977 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
978 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
980 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
983 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
985 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
989 if (HEAP_GUARD_NONSTOP_MODE
) {
990 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
999 // If a page fault occurs in non-SMRAM range.
1001 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
1002 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
1003 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
1004 DumpCpuContext (InterruptType
, SystemContext
);
1005 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
1007 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
1014 // If NULL pointer was just accessed
1016 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0 &&
1017 (PFAddress
< EFI_PAGE_SIZE
)) {
1018 DumpCpuContext (InterruptType
, SystemContext
);
1019 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
1021 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1024 if (NULL_DETECTION_NONSTOP_MODE
) {
1025 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
1033 if (mCpuSmmStaticPageTable
&& IsSmmCommBufferForbiddenAddress (PFAddress
)) {
1034 DumpCpuContext (InterruptType
, SystemContext
);
1035 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
1037 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
1044 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1045 SmmProfilePFHandler (
1046 SystemContext
.SystemContextX64
->Rip
,
1047 SystemContext
.SystemContextX64
->ExceptionData
1050 SmiDefaultPFHandler ();
1054 ReleaseSpinLock (mPFLock
);
1058 This function sets memory attribute for page table.
1061 SetPageTableAttributes (
1069 UINT64
*L1PageTable
;
1070 UINT64
*L2PageTable
;
1071 UINT64
*L3PageTable
;
1072 UINT64
*L4PageTable
;
1073 UINT64
*L5PageTable
;
1075 BOOLEAN PageTableSplitted
;
1078 BOOLEAN Enable5LevelPaging
;
1080 Cr4
.UintN
= AsmReadCr4 ();
1081 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
== 1);
1085 // - no static page table; or
1086 // - SMM heap guard feature enabled; or
1087 // BIT2: SMM page guard enabled
1088 // BIT3: SMM pool guard enabled
1089 // - SMM profile feature enabled
1091 if (!mCpuSmmStaticPageTable
||
1092 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
1093 FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1095 // Static paging and heap guard could not be enabled at the same time.
1097 ASSERT (!(mCpuSmmStaticPageTable
&&
1098 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0));
1101 // Static paging and SMM profile could not be enabled at the same time.
1103 ASSERT (!(mCpuSmmStaticPageTable
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
1107 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
1110 // Disable write protection, because we need mark page table to be write protected.
1111 // We need *write* page table memory, to mark itself to be *read only*.
1113 CetEnabled
= ((AsmReadCr4() & CR4_CET_ENABLE
) != 0) ? TRUE
: FALSE
;
1116 // CET must be disabled if WP is disabled.
1120 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
1123 DEBUG ((DEBUG_INFO
, "Start...\n"));
1124 PageTableSplitted
= FALSE
;
1126 if (Enable5LevelPaging
) {
1127 L5PageTable
= (UINT64
*)GetPageTableBase ();
1128 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L5PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1129 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1132 for (Index5
= 0; Index5
< (Enable5LevelPaging
? SIZE_4KB
/sizeof(UINT64
) : 1); Index5
++) {
1133 if (Enable5LevelPaging
) {
1134 L4PageTable
= (UINT64
*)(UINTN
)(L5PageTable
[Index5
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1135 if (L4PageTable
== NULL
) {
1139 L4PageTable
= (UINT64
*)GetPageTableBase ();
1141 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1142 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1144 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
1145 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1146 if (L3PageTable
== NULL
) {
1150 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1151 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1153 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
1154 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
1158 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1159 if (L2PageTable
== NULL
) {
1163 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1164 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1166 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
1167 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
1171 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1172 if (L1PageTable
== NULL
) {
1175 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1176 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1181 } while (PageTableSplitted
);
1184 // Enable write protection, after page table updated.
1186 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);
1198 This function reads CR2 register when on-demand paging is enabled.
1200 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1207 if (!mCpuSmmStaticPageTable
) {
1208 *Cr2
= AsmReadCr2 ();
1213 This function restores CR2 register when on-demand paging is enabled.
1215 @param[in] Cr2 Value to write into CR2 register.
1222 if (!mCpuSmmStaticPageTable
) {