2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
16 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
17 BOOLEAN m1GPageTableSupport
= FALSE
;
18 BOOLEAN mCpuSmmStaticPageTable
;
39 Check if 1-GByte pages is supported by processor or not.
41 @retval TRUE 1-GByte pages is supported.
42 @retval FALSE 1-GByte pages is not supported.
53 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
54 if (RegEax
>= 0x80000001) {
55 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
56 if ((RegEdx
& BIT26
) != 0) {
64 Set sub-entries number in entry.
66 @param[in, out] Entry Pointer to entry
67 @param[in] SubEntryNum Sub-entries number based on 0:
68 0 means there is 1 sub-entry under this entry
69 0x1ff means there is 512 sub-entries under this entry
79 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
81 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
85 Return sub-entries number in entry.
87 @param[in] Entry Pointer to entry
89 @return Sub-entries number based on 0:
90 0 means there is 1 sub-entry under this entry
91 0x1ff means there is 512 sub-entries under this entry
99 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
101 return BitFieldRead64 (*Entry
, 52, 60);
105 Calculate the maximum support address.
107 @return the maximum support address.
110 CalculateMaximumSupportAddress (
115 UINT8 PhysicalAddressBits
;
119 // Get physical address bits supported.
121 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
123 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
125 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
126 if (RegEax
>= 0x80000008) {
127 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
128 PhysicalAddressBits
= (UINT8
) RegEax
;
130 PhysicalAddressBits
= 36;
135 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
137 ASSERT (PhysicalAddressBits
<= 52);
138 if (PhysicalAddressBits
> 48) {
139 PhysicalAddressBits
= 48;
141 return PhysicalAddressBits
;
145 Set static page table.
147 @param[in] PageTable Address of page table.
155 UINTN NumberOfPml4EntriesNeeded
;
156 UINTN NumberOfPdpEntriesNeeded
;
157 UINTN IndexOfPml4Entries
;
158 UINTN IndexOfPdpEntries
;
159 UINTN IndexOfPageDirectoryEntries
;
160 UINT64
*PageMapLevel4Entry
;
162 UINT64
*PageDirectoryPointerEntry
;
163 UINT64
*PageDirectory1GEntry
;
164 UINT64
*PageDirectoryEntry
;
166 if (mPhysicalAddressBits
<= 39 ) {
167 NumberOfPml4EntriesNeeded
= 1;
168 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 30));
170 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 39));
171 NumberOfPdpEntriesNeeded
= 512;
175 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
177 PageMap
= (VOID
*) PageTable
;
179 PageMapLevel4Entry
= PageMap
;
181 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
183 // Each PML4 entry points to a page of Page Directory Pointer entries.
185 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
186 if (PageDirectoryPointerEntry
== NULL
) {
187 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
188 ASSERT(PageDirectoryPointerEntry
!= NULL
);
189 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
191 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
194 if (m1GPageTableSupport
) {
195 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
196 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
197 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
199 // Skip the < 4G entries
204 // Fill in the Page Directory entries
206 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
209 PageAddress
= BASE_4GB
;
210 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
211 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
213 // Skip the < 4G entries
218 // Each Directory Pointer entries points to a page of Page Directory entires.
219 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
221 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
222 if (PageDirectoryEntry
== NULL
) {
223 PageDirectoryEntry
= AllocatePageTableMemory (1);
224 ASSERT(PageDirectoryEntry
!= NULL
);
225 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
228 // Fill in a Page Directory Pointer Entries
230 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
233 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
235 // Fill in the Page Directory entries
237 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
245 Create PageTable for SMM use.
247 @return The address of PML4 (to set CR3).
255 EFI_PHYSICAL_ADDRESS Pages
;
257 LIST_ENTRY
*FreePage
;
259 UINTN PageFaultHandlerHookAddress
;
260 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
264 // Initialize spin lock
266 InitializeSpinLock (mPFLock
);
268 mCpuSmmStaticPageTable
= PcdGetBool (PcdCpuSmmStaticPageTable
);
269 m1GPageTableSupport
= Is1GPageSupport ();
270 DEBUG ((DEBUG_INFO
, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport
));
271 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable
));
273 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
274 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits
));
276 // Generate PAE page table for the first 4GB memory space
278 Pages
= Gen4GPageTable (FALSE
);
281 // Set IA32_PG_PMNT bit to mask this entry
283 PTEntry
= (UINT64
*)(UINTN
)Pages
;
284 for (Index
= 0; Index
< 4; Index
++) {
285 PTEntry
[Index
] |= IA32_PG_PMNT
;
289 // Fill Page-Table-Level4 (PML4) entry
291 PTEntry
= (UINT64
*)AllocatePageTableMemory (1);
292 ASSERT (PTEntry
!= NULL
);
293 *PTEntry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
294 ZeroMem (PTEntry
+ 1, EFI_PAGE_SIZE
- sizeof (*PTEntry
));
297 // Set sub-entries number
299 SetSubEntriesNum (PTEntry
, 3);
301 if (mCpuSmmStaticPageTable
) {
302 SetStaticPageTable ((UINTN
)PTEntry
);
305 // Add pages to page pool
307 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
308 ASSERT (FreePage
!= NULL
);
309 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
310 InsertTailList (&mPagePool
, FreePage
);
311 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
315 if (FeaturePcdGet (PcdCpuSmmProfileEnable
) ||
316 HEAP_GUARD_NONSTOP_MODE
||
317 NULL_DETECTION_NONSTOP_MODE
) {
319 // Set own Page Fault entry instead of the default one, because SMM Profile
320 // feature depends on IRET instruction to do Single Step
322 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
323 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
324 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
325 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
326 IdtEntry
->Bits
.Reserved_0
= 0;
327 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
328 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
329 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
330 IdtEntry
->Bits
.Reserved_1
= 0;
333 // Register Smm Page Fault Handler
335 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
336 ASSERT_EFI_ERROR (Status
);
340 // Additional SMM IDT initialization for SMM stack guard
342 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
343 InitializeIDTSmmStackGuard ();
347 // Return the address of PML4 (to set CR3)
349 return (UINT32
)(UINTN
)PTEntry
;
353 Set access record in entry.
355 @param[in, out] Entry Pointer to entry
356 @param[in] Acc Access record value
361 IN OUT UINT64
*Entry
,
366 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
368 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
372 Return access record in entry.
374 @param[in] Entry Pointer to entry
376 @return Access record value.
385 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
387 return BitFieldRead64 (*Entry
, 9, 11);
391 Return and update the access record in entry.
393 @param[in, out] Entry Pointer to entry
395 @return Access record value.
405 Acc
= GetAccNum (Entry
);
406 if ((*Entry
& IA32_PG_A
) != 0) {
408 // If this entry has been accessed, clear access flag in Entry and update access record
409 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
411 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
412 SetAccNum (Entry
, 0x7);
413 return (0x7 + ACC_MAX_BIT
);
417 // If the access record is not the smallest value 0, minus 1 and update the access record field
419 SetAccNum (Entry
, Acc
- 1);
426 Reclaim free pages for PageFault handler.
428 Search the whole entries tree to find the leaf entry that has the smallest
429 access record value. Insert the page pointed by this leaf entry into the
430 page pool. And check its upper entries if need to be inserted into the page
450 UINT64 SubEntriesNum
;
453 UINT64
*ReleasePageAddress
;
463 ReleasePageAddress
= 0;
466 // First, find the leaf entry has the smallest access record value
468 Pml4
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
469 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
470 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
472 // If the PML4 entry is not present or is masked, skip it
476 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
478 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
479 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
481 // If the PDPT entry is not present or is masked, skip it
483 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
485 // If the PDPT entry is masked, we will ignore checking the PML4 entry
491 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
493 // It's not 1-GByte pages entry, it should be a PDPT entry,
494 // we will not check PML4 entry more
497 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
499 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
500 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
502 // If the PD entry is not present or is masked, skip it
504 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
506 // If the PD entry is masked, we will not PDPT entry more
512 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
514 // It's not 2 MByte page table entry, it should be PD entry
515 // we will find the entry has the smallest access record value
518 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
521 // If the PD entry has the smallest access record value,
522 // save the Page address to be released
528 ReleasePageAddress
= Pdt
+ PdtIndex
;
534 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
535 // it should only has the entries point to 2 MByte Pages
537 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
540 // If the PDPT entry has the smallest access record value,
541 // save the Page address to be released
547 ReleasePageAddress
= Pdpt
+ PdptIndex
;
554 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
555 // it should only has the entries point to 1 GByte Pages
557 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
560 // If the PML4 entry has the smallest access record value,
561 // save the Page address to be released
567 ReleasePageAddress
= Pml4
+ Pml4Index
;
572 // Make sure one PML4/PDPT/PD entry is selected
574 ASSERT (MinAcc
!= (UINT64
)-1);
577 // Secondly, insert the page pointed by this entry into page pool and clear this entry
579 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
580 *ReleasePageAddress
= 0;
583 // Lastly, check this entry's upper entries if need to be inserted into page pool
587 if (MinPdt
!= (UINTN
)-1) {
589 // If 4 KByte Page Table is released, check the PDPT entry
591 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
592 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
593 if (SubEntriesNum
== 0) {
595 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
596 // clear the Page directory entry
598 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
601 // Go on checking the PML4 table
607 // Update the sub-entries filed in PDPT entry and exit
609 SetSubEntriesNum (Pdpt
+ MinPdpt
, SubEntriesNum
- 1);
612 if (MinPdpt
!= (UINTN
)-1) {
614 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
616 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
617 if (SubEntriesNum
== 0) {
619 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
620 // clear the Page directory entry
622 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
628 // Update the sub-entries filed in PML4 entry and exit
630 SetSubEntriesNum (Pml4
+ MinPml4
, SubEntriesNum
- 1);
634 // PLM4 table has been released before, exit it
641 Allocate free Page for PageFault handler use.
643 @return Page address.
653 if (IsListEmpty (&mPagePool
)) {
655 // If page pool is empty, reclaim the used pages and insert one into page pool
661 // Get one free page and remove it from page pool
663 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
664 RemoveEntryList (mPagePool
.ForwardLink
);
666 // Clean this page and return
668 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
673 Page Fault handler for SMM use.
677 SmiDefaultPFHandler (
688 SMM_PAGE_SIZE_TYPE PageSize
;
695 // Set default SMM page attribute
697 PageSize
= SmmPageSize2M
;
702 Pml4
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
703 PFAddress
= AsmReadCr2 ();
705 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
707 // If platform not support page table attribute, set default SMM page attribute
709 if (Status
!= EFI_SUCCESS
) {
710 PageSize
= SmmPageSize2M
;
714 if (PageSize
>= MaxSmmPageSizeType
) {
715 PageSize
= SmmPageSize2M
;
717 if (NumOfPages
> 512) {
724 // BIT12 to BIT20 is Page Table index
730 // BIT21 to BIT29 is Page Directory index
733 PageAttribute
|= (UINTN
)IA32_PG_PS
;
736 if (!m1GPageTableSupport
) {
737 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
741 // BIT30 to BIT38 is Page Directory Pointer Table index
744 PageAttribute
|= (UINTN
)IA32_PG_PS
;
751 // If execute-disable is enabled, set NX bit
754 PageAttribute
|= IA32_PG_NX
;
757 for (Index
= 0; Index
< NumOfPages
; Index
++) {
760 for (StartBit
= 39; StartBit
> EndBit
; StartBit
-= 9) {
761 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
762 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
764 // If the entry is not present, allocate one page from page pool for it
766 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
769 // Save the upper entry address
771 UpperEntry
= PageTable
+ PTIndex
;
774 // BIT9 to BIT11 of entry is used to save access record,
775 // initialize value is 7
777 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
778 SetAccNum (PageTable
+ PTIndex
, 7);
779 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
782 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
783 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
785 // Check if the entry has already existed, this issue may occur when the different
786 // size page entries created under the same entry
788 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
789 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
793 // Fill the new entry
795 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
796 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
797 if (UpperEntry
!= NULL
) {
798 SetSubEntriesNum (UpperEntry
, GetSubEntriesNum (UpperEntry
) + 1);
801 // Get the next page address if we need to create more page tables
803 PFAddress
+= (1ull << EndBit
);
808 ThePage Fault handler wrapper for SMM use.
810 @param InterruptType Defines the type of interrupt or exception that
811 occurred on the processor.This parameter is processor architecture specific.
812 @param SystemContext A pointer to the processor context when
813 the interrupt occurred on the processor.
818 IN EFI_EXCEPTION_TYPE InterruptType
,
819 IN EFI_SYSTEM_CONTEXT SystemContext
823 UINTN GuardPageAddress
;
826 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
828 AcquireSpinLock (mPFLock
);
830 PFAddress
= AsmReadCr2 ();
832 if (mCpuSmmStaticPageTable
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
833 DumpCpuContext (InterruptType
, SystemContext
);
834 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
840 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
841 // or SMM page protection violation.
843 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
844 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
845 DumpCpuContext (InterruptType
, SystemContext
);
846 CpuIndex
= GetCpuIndex ();
847 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* mSmmStackSize
);
848 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
849 (PFAddress
>= GuardPageAddress
) &&
850 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
))) {
851 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
853 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
854 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
856 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
859 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
861 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
865 if (HEAP_GUARD_NONSTOP_MODE
) {
866 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
875 // If a page fault occurs in non-SMRAM range.
877 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
878 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
879 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
880 DumpCpuContext (InterruptType
, SystemContext
);
881 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
883 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
890 // If NULL pointer was just accessed
892 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0 &&
893 (PFAddress
< EFI_PAGE_SIZE
)) {
894 DumpCpuContext (InterruptType
, SystemContext
);
895 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
897 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
900 if (NULL_DETECTION_NONSTOP_MODE
) {
901 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
909 if (mCpuSmmStaticPageTable
&& IsSmmCommBufferForbiddenAddress (PFAddress
)) {
910 DumpCpuContext (InterruptType
, SystemContext
);
911 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
913 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
920 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
921 SmmProfilePFHandler (
922 SystemContext
.SystemContextX64
->Rip
,
923 SystemContext
.SystemContextX64
->ExceptionData
926 SmiDefaultPFHandler ();
930 ReleaseSpinLock (mPFLock
);
934 This function sets memory attribute for page table.
937 SetPageTableAttributes (
949 BOOLEAN PageTableSplitted
;
954 // - no static page table; or
955 // - SMM heap guard feature enabled; or
956 // BIT2: SMM page guard enabled
957 // BIT3: SMM pool guard enabled
958 // - SMM profile feature enabled
960 if (!mCpuSmmStaticPageTable
||
961 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
962 FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
964 // Static paging and heap guard could not be enabled at the same time.
966 ASSERT (!(mCpuSmmStaticPageTable
&&
967 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0));
970 // Static paging and SMM profile could not be enabled at the same time.
972 ASSERT (!(mCpuSmmStaticPageTable
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
976 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
979 // Disable write protection, because we need mark page table to be write protected.
980 // We need *write* page table memory, to mark itself to be *read only*.
982 CetEnabled
= ((AsmReadCr4() & CR4_CET_ENABLE
) != 0) ? TRUE
: FALSE
;
985 // CET must be disabled if WP is disabled.
989 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
992 DEBUG ((DEBUG_INFO
, "Start...\n"));
993 PageTableSplitted
= FALSE
;
995 L4PageTable
= (UINT64
*)GetPageTableBase ();
996 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
997 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
999 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
1000 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1001 if (L3PageTable
== NULL
) {
1005 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1006 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1008 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
1009 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
1013 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1014 if (L2PageTable
== NULL
) {
1018 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1019 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1021 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
1022 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
1026 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1027 if (L1PageTable
== NULL
) {
1030 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1031 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1035 } while (PageTableSplitted
);
1038 // Enable write protection, after page table updated.
1040 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);
1052 This function reads CR2 register when on-demand paging is enabled.
1054 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1061 if (!mCpuSmmStaticPageTable
) {
1062 *Cr2
= AsmReadCr2 ();
1067 This function restores CR2 register when on-demand paging is enabled.
1069 @param[in] Cr2 Value to write into CR2 register.
1076 if (!mCpuSmmStaticPageTable
) {