2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
19 #define PAGE_TABLE_PAGES 8
20 #define ACC_MAX_BIT BIT3
22 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
23 BOOLEAN m1GPageTableSupport
= FALSE
;
24 BOOLEAN mCpuSmmStaticPageTable
;
45 Check if 1-GByte pages is supported by processor or not.
47 @retval TRUE 1-GByte pages is supported.
48 @retval FALSE 1-GByte pages is not supported.
59 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
60 if (RegEax
>= 0x80000001) {
61 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
62 if ((RegEdx
& BIT26
) != 0) {
70 Set sub-entries number in entry.
72 @param[in, out] Entry Pointer to entry
73 @param[in] SubEntryNum Sub-entries number based on 0:
74 0 means there is 1 sub-entry under this entry
75 0x1ff means there is 512 sub-entries under this entry
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
87 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
91 Return sub-entries number in entry.
93 @param[in] Entry Pointer to entry
95 @return Sub-entries number based on 0:
96 0 means there is 1 sub-entry under this entry
97 0x1ff means there is 512 sub-entries under this entry
105 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
107 return BitFieldRead64 (*Entry
, 52, 60);
111 Calculate the maximum support address.
113 @return the maximum support address.
116 CalculateMaximumSupportAddress (
121 UINT8 PhysicalAddressBits
;
125 // Get physical address bits supported.
127 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
129 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
131 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
132 if (RegEax
>= 0x80000008) {
133 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
134 PhysicalAddressBits
= (UINT8
) RegEax
;
136 PhysicalAddressBits
= 36;
141 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
143 ASSERT (PhysicalAddressBits
<= 52);
144 if (PhysicalAddressBits
> 48) {
145 PhysicalAddressBits
= 48;
147 return PhysicalAddressBits
;
151 Set static page table.
153 @param[in] PageTable Address of page table.
161 UINTN NumberOfPml4EntriesNeeded
;
162 UINTN NumberOfPdpEntriesNeeded
;
163 UINTN IndexOfPml4Entries
;
164 UINTN IndexOfPdpEntries
;
165 UINTN IndexOfPageDirectoryEntries
;
166 UINT64
*PageMapLevel4Entry
;
168 UINT64
*PageDirectoryPointerEntry
;
169 UINT64
*PageDirectory1GEntry
;
170 UINT64
*PageDirectoryEntry
;
172 if (mPhysicalAddressBits
<= 39 ) {
173 NumberOfPml4EntriesNeeded
= 1;
174 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 30));
176 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 39));
177 NumberOfPdpEntriesNeeded
= 512;
181 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
183 PageMap
= (VOID
*) PageTable
;
185 PageMapLevel4Entry
= PageMap
;
187 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
189 // Each PML4 entry points to a page of Page Directory Pointer entries.
191 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
192 if (PageDirectoryPointerEntry
== NULL
) {
193 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
194 ASSERT(PageDirectoryPointerEntry
!= NULL
);
195 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
197 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
200 if (m1GPageTableSupport
) {
201 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
202 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
203 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
205 // Skip the < 4G entries
210 // Fill in the Page Directory entries
212 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
215 PageAddress
= BASE_4GB
;
216 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
217 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
219 // Skip the < 4G entries
224 // Each Directory Pointer entries points to a page of Page Directory entires.
225 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
227 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
228 if (PageDirectoryEntry
== NULL
) {
229 PageDirectoryEntry
= AllocatePageTableMemory (1);
230 ASSERT(PageDirectoryEntry
!= NULL
);
231 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
234 // Fill in a Page Directory Pointer Entries
236 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
239 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
241 // Fill in the Page Directory entries
243 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
251 Create PageTable for SMM use.
253 @return The address of PML4 (to set CR3).
261 EFI_PHYSICAL_ADDRESS Pages
;
263 LIST_ENTRY
*FreePage
;
265 UINTN PageFaultHandlerHookAddress
;
266 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
270 // Initialize spin lock
272 InitializeSpinLock (mPFLock
);
274 mCpuSmmStaticPageTable
= PcdGetBool (PcdCpuSmmStaticPageTable
);
275 m1GPageTableSupport
= Is1GPageSupport ();
276 DEBUG ((DEBUG_INFO
, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport
));
277 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable
));
279 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
280 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits
));
282 // Generate PAE page table for the first 4GB memory space
284 Pages
= Gen4GPageTable (FALSE
);
287 // Set IA32_PG_PMNT bit to mask this entry
289 PTEntry
= (UINT64
*)(UINTN
)Pages
;
290 for (Index
= 0; Index
< 4; Index
++) {
291 PTEntry
[Index
] |= IA32_PG_PMNT
;
295 // Fill Page-Table-Level4 (PML4) entry
297 PTEntry
= (UINT64
*)AllocatePageTableMemory (1);
298 ASSERT (PTEntry
!= NULL
);
299 *PTEntry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
300 ZeroMem (PTEntry
+ 1, EFI_PAGE_SIZE
- sizeof (*PTEntry
));
303 // Set sub-entries number
305 SetSubEntriesNum (PTEntry
, 3);
307 if (mCpuSmmStaticPageTable
) {
308 SetStaticPageTable ((UINTN
)PTEntry
);
311 // Add pages to page pool
313 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
314 ASSERT (FreePage
!= NULL
);
315 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
316 InsertTailList (&mPagePool
, FreePage
);
317 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
321 if (FeaturePcdGet (PcdCpuSmmProfileEnable
) ||
322 HEAP_GUARD_NONSTOP_MODE
||
323 NULL_DETECTION_NONSTOP_MODE
) {
325 // Set own Page Fault entry instead of the default one, because SMM Profile
326 // feature depends on IRET instruction to do Single Step
328 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
329 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
330 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
331 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
332 IdtEntry
->Bits
.Reserved_0
= 0;
333 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
334 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
335 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
336 IdtEntry
->Bits
.Reserved_1
= 0;
339 // Register Smm Page Fault Handler
341 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
342 ASSERT_EFI_ERROR (Status
);
346 // Additional SMM IDT initialization for SMM stack guard
348 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
349 InitializeIDTSmmStackGuard ();
353 // Return the address of PML4 (to set CR3)
355 return (UINT32
)(UINTN
)PTEntry
;
359 Set access record in entry.
361 @param[in, out] Entry Pointer to entry
362 @param[in] Acc Access record value
367 IN OUT UINT64
*Entry
,
372 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
374 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
378 Return access record in entry.
380 @param[in] Entry Pointer to entry
382 @return Access record value.
391 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
393 return BitFieldRead64 (*Entry
, 9, 11);
397 Return and update the access record in entry.
399 @param[in, out] Entry Pointer to entry
401 @return Access record value.
411 Acc
= GetAccNum (Entry
);
412 if ((*Entry
& IA32_PG_A
) != 0) {
414 // If this entry has been accessed, clear access flag in Entry and update access record
415 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
417 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
418 SetAccNum (Entry
, 0x7);
419 return (0x7 + ACC_MAX_BIT
);
423 // If the access record is not the smallest value 0, minus 1 and update the access record field
425 SetAccNum (Entry
, Acc
- 1);
432 Reclaim free pages for PageFault handler.
434 Search the whole entries tree to find the leaf entry that has the smallest
435 access record value. Insert the page pointed by this leaf entry into the
436 page pool. And check its upper entries if need to be inserted into the page
456 UINT64 SubEntriesNum
;
459 UINT64
*ReleasePageAddress
;
469 ReleasePageAddress
= 0;
472 // First, find the leaf entry has the smallest access record value
474 Pml4
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
475 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
476 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
478 // If the PML4 entry is not present or is masked, skip it
482 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
484 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
485 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
487 // If the PDPT entry is not present or is masked, skip it
489 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
491 // If the PDPT entry is masked, we will ignore checking the PML4 entry
497 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
499 // It's not 1-GByte pages entry, it should be a PDPT entry,
500 // we will not check PML4 entry more
503 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
505 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
506 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
508 // If the PD entry is not present or is masked, skip it
510 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
512 // If the PD entry is masked, we will not PDPT entry more
518 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
520 // It's not 2 MByte page table entry, it should be PD entry
521 // we will find the entry has the smallest access record value
524 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
527 // If the PD entry has the smallest access record value,
528 // save the Page address to be released
534 ReleasePageAddress
= Pdt
+ PdtIndex
;
540 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
541 // it should only has the entries point to 2 MByte Pages
543 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
546 // If the PDPT entry has the smallest access record value,
547 // save the Page address to be released
553 ReleasePageAddress
= Pdpt
+ PdptIndex
;
560 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
561 // it should only has the entries point to 1 GByte Pages
563 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
566 // If the PML4 entry has the smallest access record value,
567 // save the Page address to be released
573 ReleasePageAddress
= Pml4
+ Pml4Index
;
578 // Make sure one PML4/PDPT/PD entry is selected
580 ASSERT (MinAcc
!= (UINT64
)-1);
583 // Secondly, insert the page pointed by this entry into page pool and clear this entry
585 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
586 *ReleasePageAddress
= 0;
589 // Lastly, check this entry's upper entries if need to be inserted into page pool
593 if (MinPdt
!= (UINTN
)-1) {
595 // If 4 KByte Page Table is released, check the PDPT entry
597 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
598 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
599 if (SubEntriesNum
== 0) {
601 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
602 // clear the Page directory entry
604 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
607 // Go on checking the PML4 table
613 // Update the sub-entries filed in PDPT entry and exit
615 SetSubEntriesNum (Pdpt
+ MinPdpt
, SubEntriesNum
- 1);
618 if (MinPdpt
!= (UINTN
)-1) {
620 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
622 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
623 if (SubEntriesNum
== 0) {
625 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
626 // clear the Page directory entry
628 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
634 // Update the sub-entries filed in PML4 entry and exit
636 SetSubEntriesNum (Pml4
+ MinPml4
, SubEntriesNum
- 1);
640 // PLM4 table has been released before, exit it
647 Allocate free Page for PageFault handler use.
649 @return Page address.
659 if (IsListEmpty (&mPagePool
)) {
661 // If page pool is empty, reclaim the used pages and insert one into page pool
667 // Get one free page and remove it from page pool
669 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
670 RemoveEntryList (mPagePool
.ForwardLink
);
672 // Clean this page and return
674 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
679 Page Fault handler for SMM use.
683 SmiDefaultPFHandler (
694 SMM_PAGE_SIZE_TYPE PageSize
;
701 // Set default SMM page attribute
703 PageSize
= SmmPageSize2M
;
708 Pml4
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
709 PFAddress
= AsmReadCr2 ();
711 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
713 // If platform not support page table attribute, set default SMM page attribute
715 if (Status
!= EFI_SUCCESS
) {
716 PageSize
= SmmPageSize2M
;
720 if (PageSize
>= MaxSmmPageSizeType
) {
721 PageSize
= SmmPageSize2M
;
723 if (NumOfPages
> 512) {
730 // BIT12 to BIT20 is Page Table index
736 // BIT21 to BIT29 is Page Directory index
739 PageAttribute
|= (UINTN
)IA32_PG_PS
;
742 if (!m1GPageTableSupport
) {
743 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
747 // BIT30 to BIT38 is Page Directory Pointer Table index
750 PageAttribute
|= (UINTN
)IA32_PG_PS
;
757 // If execute-disable is enabled, set NX bit
760 PageAttribute
|= IA32_PG_NX
;
763 for (Index
= 0; Index
< NumOfPages
; Index
++) {
766 for (StartBit
= 39; StartBit
> EndBit
; StartBit
-= 9) {
767 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
768 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
770 // If the entry is not present, allocate one page from page pool for it
772 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
775 // Save the upper entry address
777 UpperEntry
= PageTable
+ PTIndex
;
780 // BIT9 to BIT11 of entry is used to save access record,
781 // initialize value is 7
783 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
784 SetAccNum (PageTable
+ PTIndex
, 7);
785 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
788 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
789 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
791 // Check if the entry has already existed, this issue may occur when the different
792 // size page entries created under the same entry
794 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
795 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
799 // Fill the new entry
801 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
802 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
803 if (UpperEntry
!= NULL
) {
804 SetSubEntriesNum (UpperEntry
, GetSubEntriesNum (UpperEntry
) + 1);
807 // Get the next page address if we need to create more page tables
809 PFAddress
+= (1ull << EndBit
);
814 ThePage Fault handler wrapper for SMM use.
816 @param InterruptType Defines the type of interrupt or exception that
817 occurred on the processor.This parameter is processor architecture specific.
818 @param SystemContext A pointer to the processor context when
819 the interrupt occurred on the processor.
824 IN EFI_EXCEPTION_TYPE InterruptType
,
825 IN EFI_SYSTEM_CONTEXT SystemContext
829 UINTN GuardPageAddress
;
832 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
834 AcquireSpinLock (mPFLock
);
836 PFAddress
= AsmReadCr2 ();
838 if (mCpuSmmStaticPageTable
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
839 DumpCpuContext (InterruptType
, SystemContext
);
840 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
846 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
847 // or SMM page protection violation.
849 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
850 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
851 DumpCpuContext (InterruptType
, SystemContext
);
852 CpuIndex
= GetCpuIndex ();
853 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* mSmmStackSize
);
854 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
855 (PFAddress
>= GuardPageAddress
) &&
856 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
))) {
857 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
859 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
860 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
862 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
865 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
867 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
871 if (HEAP_GUARD_NONSTOP_MODE
) {
872 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
881 // If a page fault occurs in non-SMRAM range.
883 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
884 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
885 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
886 DumpCpuContext (InterruptType
, SystemContext
);
887 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
889 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
896 // If NULL pointer was just accessed
898 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0 &&
899 (PFAddress
< EFI_PAGE_SIZE
)) {
900 DumpCpuContext (InterruptType
, SystemContext
);
901 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
903 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
906 if (NULL_DETECTION_NONSTOP_MODE
) {
907 GuardPagePFHandler (SystemContext
.SystemContextX64
->ExceptionData
);
915 if (mCpuSmmStaticPageTable
&& IsSmmCommBufferForbiddenAddress (PFAddress
)) {
916 DumpCpuContext (InterruptType
, SystemContext
);
917 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
919 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
926 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
927 SmmProfilePFHandler (
928 SystemContext
.SystemContextX64
->Rip
,
929 SystemContext
.SystemContextX64
->ExceptionData
932 SmiDefaultPFHandler ();
936 ReleaseSpinLock (mPFLock
);
940 This function sets memory attribute for page table.
943 SetPageTableAttributes (
955 BOOLEAN PageTableSplitted
;
960 // - no static page table; or
961 // - SMM heap guard feature enabled; or
962 // BIT2: SMM page guard enabled
963 // BIT3: SMM pool guard enabled
964 // - SMM profile feature enabled
966 if (!mCpuSmmStaticPageTable
||
967 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
968 FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
970 // Static paging and heap guard could not be enabled at the same time.
972 ASSERT (!(mCpuSmmStaticPageTable
&&
973 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0));
976 // Static paging and SMM profile could not be enabled at the same time.
978 ASSERT (!(mCpuSmmStaticPageTable
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
982 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
985 // Disable write protection, because we need mark page table to be write protected.
986 // We need *write* page table memory, to mark itself to be *read only*.
988 CetEnabled
= ((AsmReadCr4() & CR4_CET_ENABLE
) != 0) ? TRUE
: FALSE
;
991 // CET must be disabled if WP is disabled.
995 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
998 DEBUG ((DEBUG_INFO
, "Start...\n"));
999 PageTableSplitted
= FALSE
;
1001 L4PageTable
= (UINT64
*)GetPageTableBase ();
1002 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1003 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1005 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
1006 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1007 if (L3PageTable
== NULL
) {
1011 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1012 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1014 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
1015 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
1019 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1020 if (L2PageTable
== NULL
) {
1024 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1025 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1027 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
1028 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
1032 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
1033 if (L1PageTable
== NULL
) {
1036 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
1037 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
1041 } while (PageTableSplitted
);
1044 // Enable write protection, after page table updated.
1046 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);
1058 This function reads CR2 register when on-demand paging is enabled.
1060 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1067 if (!mCpuSmmStaticPageTable
) {
1068 *Cr2
= AsmReadCr2 ();
1073 This function restores CR2 register when on-demand paging is enabled.
1075 @param[in] Cr2 Value to write into CR2 register.
1082 if (!mCpuSmmStaticPageTable
) {