2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
19 #define PAGE_TABLE_PAGES 8
20 #define ACC_MAX_BIT BIT3
22 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
23 BOOLEAN m1GPageTableSupport
= FALSE
;
24 BOOLEAN mCpuSmmStaticPageTable
;
27 Check if 1-GByte pages is supported by processor or not.
29 @retval TRUE 1-GByte pages is supported.
30 @retval FALSE 1-GByte pages is not supported.
41 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
42 if (RegEax
>= 0x80000001) {
43 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
44 if ((RegEdx
& BIT26
) != 0) {
52 Set sub-entries number in entry.
54 @param[in, out] Entry Pointer to entry
55 @param[in] SubEntryNum Sub-entries number based on 0:
56 0 means there is 1 sub-entry under this entry
57 0x1ff means there is 512 sub-entries under this entry
67 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
69 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
73 Return sub-entries number in entry.
75 @param[in] Entry Pointer to entry
77 @return Sub-entries number based on 0:
78 0 means there is 1 sub-entry under this entry
79 0x1ff means there is 512 sub-entries under this entry
87 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
89 return BitFieldRead64 (*Entry
, 52, 60);
93 Calculate the maximum support address.
95 @return the maximum support address.
98 CalculateMaximumSupportAddress (
103 UINT8 PhysicalAddressBits
;
107 // Get physical address bits supported.
109 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
111 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
113 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
114 if (RegEax
>= 0x80000008) {
115 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
116 PhysicalAddressBits
= (UINT8
) RegEax
;
118 PhysicalAddressBits
= 36;
123 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
125 ASSERT (PhysicalAddressBits
<= 52);
126 if (PhysicalAddressBits
> 48) {
127 PhysicalAddressBits
= 48;
129 return PhysicalAddressBits
;
133 Set static page table.
135 @param[in] PageTable Address of page table.
143 UINTN NumberOfPml4EntriesNeeded
;
144 UINTN NumberOfPdpEntriesNeeded
;
145 UINTN IndexOfPml4Entries
;
146 UINTN IndexOfPdpEntries
;
147 UINTN IndexOfPageDirectoryEntries
;
148 UINT64
*PageMapLevel4Entry
;
150 UINT64
*PageDirectoryPointerEntry
;
151 UINT64
*PageDirectory1GEntry
;
152 UINT64
*PageDirectoryEntry
;
154 if (mPhysicalAddressBits
<= 39 ) {
155 NumberOfPml4EntriesNeeded
= 1;
156 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 30));
158 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 39));
159 NumberOfPdpEntriesNeeded
= 512;
163 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
165 PageMap
= (VOID
*) PageTable
;
167 PageMapLevel4Entry
= PageMap
;
169 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
171 // Each PML4 entry points to a page of Page Directory Pointer entries.
173 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & ~mAddressEncMask
& gPhyMask
);
174 if (PageDirectoryPointerEntry
== NULL
) {
175 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
176 ASSERT(PageDirectoryPointerEntry
!= NULL
);
177 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
179 *PageMapLevel4Entry
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
182 if (m1GPageTableSupport
) {
183 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
184 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
185 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
187 // Skip the < 4G entries
192 // Fill in the Page Directory entries
194 *PageDirectory1GEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
197 PageAddress
= BASE_4GB
;
198 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
199 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
201 // Skip the < 4G entries
206 // Each Directory Pointer entries points to a page of Page Directory entires.
207 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
209 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & ~mAddressEncMask
& gPhyMask
);
210 if (PageDirectoryEntry
== NULL
) {
211 PageDirectoryEntry
= AllocatePageTableMemory (1);
212 ASSERT(PageDirectoryEntry
!= NULL
);
213 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
216 // Fill in a Page Directory Pointer Entries
218 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
221 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
223 // Fill in the Page Directory entries
225 *PageDirectoryEntry
= PageAddress
| mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
233 Create PageTable for SMM use.
235 @return The address of PML4 (to set CR3).
243 EFI_PHYSICAL_ADDRESS Pages
;
245 LIST_ENTRY
*FreePage
;
247 UINTN PageFaultHandlerHookAddress
;
248 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
252 // Initialize spin lock
254 InitializeSpinLock (mPFLock
);
256 mCpuSmmStaticPageTable
= PcdGetBool (PcdCpuSmmStaticPageTable
);
257 m1GPageTableSupport
= Is1GPageSupport ();
258 DEBUG ((DEBUG_INFO
, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport
));
259 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable
));
261 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
262 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits
));
264 // Generate PAE page table for the first 4GB memory space
266 Pages
= Gen4GPageTable (FALSE
);
269 // Set IA32_PG_PMNT bit to mask this entry
271 PTEntry
= (UINT64
*)(UINTN
)Pages
;
272 for (Index
= 0; Index
< 4; Index
++) {
273 PTEntry
[Index
] |= IA32_PG_PMNT
;
277 // Fill Page-Table-Level4 (PML4) entry
279 PTEntry
= (UINT64
*)AllocatePageTableMemory (1);
280 ASSERT (PTEntry
!= NULL
);
281 *PTEntry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
282 ZeroMem (PTEntry
+ 1, EFI_PAGE_SIZE
- sizeof (*PTEntry
));
285 // Set sub-entries number
287 SetSubEntriesNum (PTEntry
, 3);
289 if (mCpuSmmStaticPageTable
) {
290 SetStaticPageTable ((UINTN
)PTEntry
);
293 // Add pages to page pool
295 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
296 ASSERT (FreePage
!= NULL
);
297 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
298 InsertTailList (&mPagePool
, FreePage
);
299 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
303 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
305 // Set own Page Fault entry instead of the default one, because SMM Profile
306 // feature depends on IRET instruction to do Single Step
308 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
309 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
310 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
311 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
312 IdtEntry
->Bits
.Reserved_0
= 0;
313 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
314 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
315 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
316 IdtEntry
->Bits
.Reserved_1
= 0;
319 // Register Smm Page Fault Handler
321 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
322 ASSERT_EFI_ERROR (Status
);
326 // Additional SMM IDT initialization for SMM stack guard
328 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
329 InitializeIDTSmmStackGuard ();
333 // Return the address of PML4 (to set CR3)
335 return (UINT32
)(UINTN
)PTEntry
;
339 Set access record in entry.
341 @param[in, out] Entry Pointer to entry
342 @param[in] Acc Access record value
347 IN OUT UINT64
*Entry
,
352 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
354 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
358 Return access record in entry.
360 @param[in] Entry Pointer to entry
362 @return Access record value.
371 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
373 return BitFieldRead64 (*Entry
, 9, 11);
377 Return and update the access record in entry.
379 @param[in, out] Entry Pointer to entry
381 @return Access record value.
391 Acc
= GetAccNum (Entry
);
392 if ((*Entry
& IA32_PG_A
) != 0) {
394 // If this entry has been accessed, clear access flag in Entry and update access record
395 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
397 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
398 SetAccNum (Entry
, 0x7);
399 return (0x7 + ACC_MAX_BIT
);
403 // If the access record is not the smallest value 0, minus 1 and update the access record field
405 SetAccNum (Entry
, Acc
- 1);
412 Reclaim free pages for PageFault handler.
414 Search the whole entries tree to find the leaf entry that has the smallest
415 access record value. Insert the page pointed by this leaf entry into the
416 page pool. And check its upper entries if need to be inserted into the page
436 UINT64 SubEntriesNum
;
439 UINT64
*ReleasePageAddress
;
449 ReleasePageAddress
= 0;
452 // First, find the leaf entry has the smallest access record value
454 Pml4
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
455 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
456 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
458 // If the PML4 entry is not present or is masked, skip it
462 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & ~mAddressEncMask
& gPhyMask
);
464 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
465 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
467 // If the PDPT entry is not present or is masked, skip it
469 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
471 // If the PDPT entry is masked, we will ignore checking the PML4 entry
477 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
479 // It's not 1-GByte pages entry, it should be a PDPT entry,
480 // we will not check PML4 entry more
483 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & ~mAddressEncMask
& gPhyMask
);
485 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
486 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
488 // If the PD entry is not present or is masked, skip it
490 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
492 // If the PD entry is masked, we will not PDPT entry more
498 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
500 // It's not 2 MByte page table entry, it should be PD entry
501 // we will find the entry has the smallest access record value
504 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
507 // If the PD entry has the smallest access record value,
508 // save the Page address to be released
514 ReleasePageAddress
= Pdt
+ PdtIndex
;
520 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
521 // it should only has the entries point to 2 MByte Pages
523 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
526 // If the PDPT entry has the smallest access record value,
527 // save the Page address to be released
533 ReleasePageAddress
= Pdpt
+ PdptIndex
;
540 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
541 // it should only has the entries point to 1 GByte Pages
543 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
546 // If the PML4 entry has the smallest access record value,
547 // save the Page address to be released
553 ReleasePageAddress
= Pml4
+ Pml4Index
;
558 // Make sure one PML4/PDPT/PD entry is selected
560 ASSERT (MinAcc
!= (UINT64
)-1);
563 // Secondly, insert the page pointed by this entry into page pool and clear this entry
565 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& ~mAddressEncMask
& gPhyMask
));
566 *ReleasePageAddress
= 0;
569 // Lastly, check this entry's upper entries if need to be inserted into page pool
573 if (MinPdt
!= (UINTN
)-1) {
575 // If 4 KByte Page Table is released, check the PDPT entry
577 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
);
578 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
579 if (SubEntriesNum
== 0) {
581 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
582 // clear the Page directory entry
584 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & ~mAddressEncMask
& gPhyMask
));
587 // Go on checking the PML4 table
593 // Update the sub-entries filed in PDPT entry and exit
595 SetSubEntriesNum (Pdpt
+ MinPdpt
, SubEntriesNum
- 1);
598 if (MinPdpt
!= (UINTN
)-1) {
600 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
602 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
603 if (SubEntriesNum
== 0) {
605 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
606 // clear the Page directory entry
608 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & ~mAddressEncMask
& gPhyMask
));
614 // Update the sub-entries filed in PML4 entry and exit
616 SetSubEntriesNum (Pml4
+ MinPml4
, SubEntriesNum
- 1);
620 // PLM4 table has been released before, exit it
627 Allocate free Page for PageFault handler use.
629 @return Page address.
639 if (IsListEmpty (&mPagePool
)) {
641 // If page pool is empty, reclaim the used pages and insert one into page pool
647 // Get one free page and remove it from page pool
649 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
650 RemoveEntryList (mPagePool
.ForwardLink
);
652 // Clean this page and return
654 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
659 Page Fault handler for SMM use.
663 SmiDefaultPFHandler (
674 SMM_PAGE_SIZE_TYPE PageSize
;
681 // Set default SMM page attribute
683 PageSize
= SmmPageSize2M
;
688 Pml4
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
689 PFAddress
= AsmReadCr2 ();
691 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
693 // If platform not support page table attribute, set default SMM page attribute
695 if (Status
!= EFI_SUCCESS
) {
696 PageSize
= SmmPageSize2M
;
700 if (PageSize
>= MaxSmmPageSizeType
) {
701 PageSize
= SmmPageSize2M
;
703 if (NumOfPages
> 512) {
710 // BIT12 to BIT20 is Page Table index
716 // BIT21 to BIT29 is Page Directory index
719 PageAttribute
|= (UINTN
)IA32_PG_PS
;
722 if (!m1GPageTableSupport
) {
723 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
727 // BIT30 to BIT38 is Page Directory Pointer Table index
730 PageAttribute
|= (UINTN
)IA32_PG_PS
;
737 // If execute-disable is enabled, set NX bit
740 PageAttribute
|= IA32_PG_NX
;
743 for (Index
= 0; Index
< NumOfPages
; Index
++) {
746 for (StartBit
= 39; StartBit
> EndBit
; StartBit
-= 9) {
747 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
748 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
750 // If the entry is not present, allocate one page from page pool for it
752 PageTable
[PTIndex
] = AllocPage () | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
755 // Save the upper entry address
757 UpperEntry
= PageTable
+ PTIndex
;
760 // BIT9 to BIT11 of entry is used to save access record,
761 // initialize value is 7
763 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
764 SetAccNum (PageTable
+ PTIndex
, 7);
765 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& gPhyMask
);
768 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
769 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
771 // Check if the entry has already existed, this issue may occur when the different
772 // size page entries created under the same entry
774 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
775 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
779 // Fill the new entry
781 PageTable
[PTIndex
] = ((PFAddress
| mAddressEncMask
) & gPhyMask
& ~((1ull << EndBit
) - 1)) |
782 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
783 if (UpperEntry
!= NULL
) {
784 SetSubEntriesNum (UpperEntry
, GetSubEntriesNum (UpperEntry
) + 1);
787 // Get the next page address if we need to create more page tables
789 PFAddress
+= (1ull << EndBit
);
794 ThePage Fault handler wrapper for SMM use.
796 @param InterruptType Defines the type of interrupt or exception that
797 occurred on the processor.This parameter is processor architecture specific.
798 @param SystemContext A pointer to the processor context when
799 the interrupt occurred on the processor.
804 IN EFI_EXCEPTION_TYPE InterruptType
,
805 IN EFI_SYSTEM_CONTEXT SystemContext
809 UINTN GuardPageAddress
;
812 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
814 AcquireSpinLock (mPFLock
);
816 PFAddress
= AsmReadCr2 ();
818 if (mCpuSmmStaticPageTable
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
819 DumpCpuContext (InterruptType
, SystemContext
);
820 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
825 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
826 // or SMM page protection violation.
828 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
829 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
830 DumpCpuContext (InterruptType
, SystemContext
);
831 CpuIndex
= GetCpuIndex ();
832 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* mSmmStackSize
);
833 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
834 (PFAddress
>= GuardPageAddress
) &&
835 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
))) {
836 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
838 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
839 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
841 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
844 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
846 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
854 // If a page fault occurs in non-SMRAM range.
856 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
857 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
858 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
859 DumpCpuContext (InterruptType
, SystemContext
);
860 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
862 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
866 if (IsSmmCommBufferForbiddenAddress (PFAddress
)) {
867 DumpCpuContext (InterruptType
, SystemContext
);
868 DEBUG ((DEBUG_ERROR
, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress
));
870 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
877 // If NULL pointer was just accessed
879 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0 &&
880 (PFAddress
< EFI_PAGE_SIZE
)) {
881 DumpCpuContext (InterruptType
, SystemContext
);
882 DEBUG ((DEBUG_ERROR
, "!!! NULL pointer access !!!\n"));
884 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
889 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
890 SmmProfilePFHandler (
891 SystemContext
.SystemContextX64
->Rip
,
892 SystemContext
.SystemContextX64
->ExceptionData
895 SmiDefaultPFHandler ();
898 ReleaseSpinLock (mPFLock
);
902 This function sets memory attribute for page table.
905 SetPageTableAttributes (
917 BOOLEAN PageTableSplitted
;
921 // - no static page table; or
922 // - SMM heap guard feature enabled; or
923 // BIT2: SMM page guard enabled
924 // BIT3: SMM pool guard enabled
925 // - SMM profile feature enabled
927 if (!mCpuSmmStaticPageTable
||
928 ((PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0) ||
929 FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
931 // Static paging and heap guard could not be enabled at the same time.
933 ASSERT (!(mCpuSmmStaticPageTable
&&
934 (PcdGet8 (PcdHeapGuardPropertyMask
) & (BIT3
| BIT2
)) != 0));
937 // Static paging and SMM profile could not be enabled at the same time.
939 ASSERT (!(mCpuSmmStaticPageTable
&& FeaturePcdGet (PcdCpuSmmProfileEnable
)));
943 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
946 // Disable write protection, because we need mark page table to be write protected.
947 // We need *write* page table memory, to mark itself to be *read only*.
949 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
952 DEBUG ((DEBUG_INFO
, "Start...\n"));
953 PageTableSplitted
= FALSE
;
955 L4PageTable
= (UINT64
*)GetPageTableBase ();
956 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
957 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
959 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
960 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
961 if (L3PageTable
== NULL
) {
965 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
966 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
968 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
969 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
973 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
974 if (L2PageTable
== NULL
) {
978 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
979 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
981 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
982 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
986 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & ~mAddressEncMask
& PAGING_4K_ADDRESS_MASK_64
);
987 if (L1PageTable
== NULL
) {
990 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
991 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
995 } while (PageTableSplitted
);
998 // Enable write protection, after page table updated.
1000 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);