2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
17 #define PAGE_TABLE_PAGES 8
18 #define ACC_MAX_BIT BIT3
19 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
20 BOOLEAN m1GPageTableSupport
= FALSE
;
21 UINT8 mPhysicalAddressBits
;
22 BOOLEAN mCpuSmmStaticPageTable
;
25 Check if 1-GByte pages is supported by processor or not.
27 @retval TRUE 1-GByte pages is supported.
28 @retval FALSE 1-GByte pages is not supported.
39 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
40 if (RegEax
>= 0x80000001) {
41 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
42 if ((RegEdx
& BIT26
) != 0) {
50 Set sub-entries number in entry.
52 @param[in, out] Entry Pointer to entry
53 @param[in] SubEntryNum Sub-entries number based on 0:
54 0 means there is 1 sub-entry under this entry
55 0x1ff means there is 512 sub-entries under this entry
65 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
67 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
71 Return sub-entries number in entry.
73 @param[in] Entry Pointer to entry
75 @return Sub-entries number based on 0:
76 0 means there is 1 sub-entry under this entry
77 0x1ff means there is 512 sub-entries under this entry
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
87 return BitFieldRead64 (*Entry
, 52, 60);
91 Calculate the maximum support address.
93 @return the maximum support address.
96 CalculateMaximumSupportAddress (
101 UINT8 PhysicalAddressBits
;
105 // Get physical address bits supported.
107 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
109 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
111 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
112 if (RegEax
>= 0x80000008) {
113 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
114 PhysicalAddressBits
= (UINT8
) RegEax
;
116 PhysicalAddressBits
= 36;
121 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
123 ASSERT (PhysicalAddressBits
<= 52);
124 if (PhysicalAddressBits
> 48) {
125 PhysicalAddressBits
= 48;
127 return PhysicalAddressBits
;
131 Set static page table.
133 @param[in] PageTable Address of page table.
141 UINTN NumberOfPml4EntriesNeeded
;
142 UINTN NumberOfPdpEntriesNeeded
;
143 UINTN IndexOfPml4Entries
;
144 UINTN IndexOfPdpEntries
;
145 UINTN IndexOfPageDirectoryEntries
;
146 UINT64
*PageMapLevel4Entry
;
148 UINT64
*PageDirectoryPointerEntry
;
149 UINT64
*PageDirectory1GEntry
;
150 UINT64
*PageDirectoryEntry
;
152 if (mPhysicalAddressBits
<= 39 ) {
153 NumberOfPml4EntriesNeeded
= 1;
154 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 30));
156 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 39));
157 NumberOfPdpEntriesNeeded
= 512;
161 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
163 PageMap
= (VOID
*) PageTable
;
165 PageMapLevel4Entry
= PageMap
;
167 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
169 // Each PML4 entry points to a page of Page Directory Pointer entries.
171 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & gPhyMask
);
172 if (PageDirectoryPointerEntry
== NULL
) {
173 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
174 ASSERT(PageDirectoryPointerEntry
!= NULL
);
175 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
177 *PageMapLevel4Entry
= ((UINTN
)PageDirectoryPointerEntry
& gPhyMask
) | PAGE_ATTRIBUTE_BITS
;
180 if (m1GPageTableSupport
) {
181 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
182 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
183 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
185 // Skip the < 4G entries
190 // Fill in the Page Directory entries
192 *PageDirectory1GEntry
= (PageAddress
& gPhyMask
) | IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
195 PageAddress
= BASE_4GB
;
196 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
197 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
199 // Skip the < 4G entries
204 // Each Directory Pointer entries points to a page of Page Directory entires.
205 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
207 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & gPhyMask
);
208 if (PageDirectoryEntry
== NULL
) {
209 PageDirectoryEntry
= AllocatePageTableMemory (1);
210 ASSERT(PageDirectoryEntry
!= NULL
);
211 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
214 // Fill in a Page Directory Pointer Entries
216 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| PAGE_ATTRIBUTE_BITS
;
219 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
221 // Fill in the Page Directory entries
223 *PageDirectoryEntry
= (UINT64
)PageAddress
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
231 Create PageTable for SMM use.
233 @return The address of PML4 (to set CR3).
241 EFI_PHYSICAL_ADDRESS Pages
;
243 LIST_ENTRY
*FreePage
;
245 UINTN PageFaultHandlerHookAddress
;
246 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
250 // Initialize spin lock
252 InitializeSpinLock (mPFLock
);
254 mCpuSmmStaticPageTable
= PcdGetBool (PcdCpuSmmStaticPageTable
);
255 m1GPageTableSupport
= Is1GPageSupport ();
256 DEBUG ((DEBUG_INFO
, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport
));
257 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable
));
259 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
260 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits
));
262 // Generate PAE page table for the first 4GB memory space
264 Pages
= Gen4GPageTable (FALSE
);
267 // Set IA32_PG_PMNT bit to mask this entry
269 PTEntry
= (UINT64
*)(UINTN
)Pages
;
270 for (Index
= 0; Index
< 4; Index
++) {
271 PTEntry
[Index
] |= IA32_PG_PMNT
;
275 // Fill Page-Table-Level4 (PML4) entry
277 PTEntry
= (UINT64
*)AllocatePageTableMemory (1);
278 ASSERT (PTEntry
!= NULL
);
279 *PTEntry
= Pages
| PAGE_ATTRIBUTE_BITS
;
280 ZeroMem (PTEntry
+ 1, EFI_PAGE_SIZE
- sizeof (*PTEntry
));
283 // Set sub-entries number
285 SetSubEntriesNum (PTEntry
, 3);
287 if (mCpuSmmStaticPageTable
) {
288 SetStaticPageTable ((UINTN
)PTEntry
);
291 // Add pages to page pool
293 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
294 ASSERT (FreePage
!= NULL
);
295 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
296 InsertTailList (&mPagePool
, FreePage
);
297 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
301 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
303 // Set own Page Fault entry instead of the default one, because SMM Profile
304 // feature depends on IRET instruction to do Single Step
306 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
307 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
308 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
309 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
310 IdtEntry
->Bits
.Reserved_0
= 0;
311 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
312 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
313 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
314 IdtEntry
->Bits
.Reserved_1
= 0;
317 // Register Smm Page Fault Handler
319 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
320 ASSERT_EFI_ERROR (Status
);
324 // Additional SMM IDT initialization for SMM stack guard
326 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
327 InitializeIDTSmmStackGuard ();
331 // Return the address of PML4 (to set CR3)
333 return (UINT32
)(UINTN
)PTEntry
;
337 Set access record in entry.
339 @param[in, out] Entry Pointer to entry
340 @param[in] Acc Access record value
345 IN OUT UINT64
*Entry
,
350 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
352 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
356 Return access record in entry.
358 @param[in] Entry Pointer to entry
360 @return Access record value.
369 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
371 return BitFieldRead64 (*Entry
, 9, 11);
375 Return and update the access record in entry.
377 @param[in, out] Entry Pointer to entry
379 @return Access record value.
389 Acc
= GetAccNum (Entry
);
390 if ((*Entry
& IA32_PG_A
) != 0) {
392 // If this entry has been accessed, clear access flag in Entry and update access record
393 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
395 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
396 SetAccNum (Entry
, 0x7);
397 return (0x7 + ACC_MAX_BIT
);
401 // If the access record is not the smallest value 0, minus 1 and update the access record field
403 SetAccNum (Entry
, Acc
- 1);
410 Reclaim free pages for PageFault handler.
412 Search the whole entries tree to find the leaf entry that has the smallest
413 access record value. Insert the page pointed by this leaf entry into the
414 page pool. And check its upper entries if need to be inserted into the page
434 UINT64 SubEntriesNum
;
437 UINT64
*ReleasePageAddress
;
447 ReleasePageAddress
= 0;
450 // First, find the leaf entry has the smallest access record value
452 Pml4
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
453 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
454 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
456 // If the PML4 entry is not present or is masked, skip it
460 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & gPhyMask
);
462 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
463 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
465 // If the PDPT entry is not present or is masked, skip it
467 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
469 // If the PDPT entry is masked, we will ignore checking the PML4 entry
475 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
477 // It's not 1-GByte pages entry, it should be a PDPT entry,
478 // we will not check PML4 entry more
481 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & gPhyMask
);
483 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
484 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
486 // If the PD entry is not present or is masked, skip it
488 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
490 // If the PD entry is masked, we will not PDPT entry more
496 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
498 // It's not 2 MByte page table entry, it should be PD entry
499 // we will find the entry has the smallest access record value
502 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
505 // If the PD entry has the smallest access record value,
506 // save the Page address to be released
512 ReleasePageAddress
= Pdt
+ PdtIndex
;
518 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
519 // it should only has the entries point to 2 MByte Pages
521 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
524 // If the PDPT entry has the smallest access record value,
525 // save the Page address to be released
531 ReleasePageAddress
= Pdpt
+ PdptIndex
;
538 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
539 // it should only has the entries point to 1 GByte Pages
541 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
544 // If the PML4 entry has the smallest access record value,
545 // save the Page address to be released
551 ReleasePageAddress
= Pml4
+ Pml4Index
;
556 // Make sure one PML4/PDPT/PD entry is selected
558 ASSERT (MinAcc
!= (UINT64
)-1);
561 // Secondly, insert the page pointed by this entry into page pool and clear this entry
563 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& gPhyMask
));
564 *ReleasePageAddress
= 0;
567 // Lastly, check this entry's upper entries if need to be inserted into page pool
571 if (MinPdt
!= (UINTN
)-1) {
573 // If 4 KByte Page Table is released, check the PDPT entry
575 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & gPhyMask
);
576 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
577 if (SubEntriesNum
== 0) {
579 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
580 // clear the Page directory entry
582 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & gPhyMask
));
585 // Go on checking the PML4 table
591 // Update the sub-entries filed in PDPT entry and exit
593 SetSubEntriesNum (Pdpt
+ MinPdpt
, SubEntriesNum
- 1);
596 if (MinPdpt
!= (UINTN
)-1) {
598 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
600 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
601 if (SubEntriesNum
== 0) {
603 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
604 // clear the Page directory entry
606 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & gPhyMask
));
612 // Update the sub-entries filed in PML4 entry and exit
614 SetSubEntriesNum (Pml4
+ MinPml4
, SubEntriesNum
- 1);
618 // PLM4 table has been released before, exit it
625 Allocate free Page for PageFault handler use.
627 @return Page address.
637 if (IsListEmpty (&mPagePool
)) {
639 // If page pool is empty, reclaim the used pages and insert one into page pool
645 // Get one free page and remove it from page pool
647 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
648 RemoveEntryList (mPagePool
.ForwardLink
);
650 // Clean this page and return
652 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
657 Page Fault handler for SMM use.
661 SmiDefaultPFHandler (
672 SMM_PAGE_SIZE_TYPE PageSize
;
679 // Set default SMM page attribute
681 PageSize
= SmmPageSize2M
;
686 Pml4
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
687 PFAddress
= AsmReadCr2 ();
689 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
691 // If platform not support page table attribute, set default SMM page attribute
693 if (Status
!= EFI_SUCCESS
) {
694 PageSize
= SmmPageSize2M
;
698 if (PageSize
>= MaxSmmPageSizeType
) {
699 PageSize
= SmmPageSize2M
;
701 if (NumOfPages
> 512) {
708 // BIT12 to BIT20 is Page Table index
714 // BIT21 to BIT29 is Page Directory index
717 PageAttribute
|= (UINTN
)IA32_PG_PS
;
720 if (!m1GPageTableSupport
) {
721 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
725 // BIT30 to BIT38 is Page Directory Pointer Table index
728 PageAttribute
|= (UINTN
)IA32_PG_PS
;
735 // If execute-disable is enabled, set NX bit
738 PageAttribute
|= IA32_PG_NX
;
741 for (Index
= 0; Index
< NumOfPages
; Index
++) {
744 for (StartBit
= 39; StartBit
> EndBit
; StartBit
-= 9) {
745 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
746 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
748 // If the entry is not present, allocate one page from page pool for it
750 PageTable
[PTIndex
] = AllocPage () | PAGE_ATTRIBUTE_BITS
;
753 // Save the upper entry address
755 UpperEntry
= PageTable
+ PTIndex
;
758 // BIT9 to BIT11 of entry is used to save access record,
759 // initialize value is 7
761 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
762 SetAccNum (PageTable
+ PTIndex
, 7);
763 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
766 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
767 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
769 // Check if the entry has already existed, this issue may occur when the different
770 // size page entries created under the same entry
772 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
773 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
777 // Fill the new entry
779 PageTable
[PTIndex
] = (PFAddress
& gPhyMask
& ~((1ull << EndBit
) - 1)) |
780 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
781 if (UpperEntry
!= NULL
) {
782 SetSubEntriesNum (UpperEntry
, GetSubEntriesNum (UpperEntry
) + 1);
785 // Get the next page address if we need to create more page tables
787 PFAddress
+= (1ull << EndBit
);
792 ThePage Fault handler wrapper for SMM use.
794 @param InterruptType Defines the type of interrupt or exception that
795 occurred on the processor.This parameter is processor architecture specific.
796 @param SystemContext A pointer to the processor context when
797 the interrupt occurred on the processor.
802 IN EFI_EXCEPTION_TYPE InterruptType
,
803 IN EFI_SYSTEM_CONTEXT SystemContext
807 UINTN GuardPageAddress
;
810 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
812 AcquireSpinLock (mPFLock
);
814 PFAddress
= AsmReadCr2 ();
816 if (mCpuSmmStaticPageTable
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
817 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
822 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
823 // or SMM page protection violation.
825 if ((PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
826 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
827 CpuIndex
= GetCpuIndex ();
828 GuardPageAddress
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ CpuIndex
* mSmmStackSize
);
829 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
830 (PFAddress
>= GuardPageAddress
) &&
831 (PFAddress
< (GuardPageAddress
+ EFI_PAGE_SIZE
))) {
832 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
834 DEBUG ((DEBUG_ERROR
, "SMM exception data - 0x%lx(", SystemContext
.SystemContextX64
->ExceptionData
));
835 DEBUG ((DEBUG_ERROR
, "I:%x, R:%x, U:%x, W:%x, P:%x",
836 (SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0,
837 (SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_RSVD
) != 0,
838 (SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_US
) != 0,
839 (SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_WR
) != 0,
840 (SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_P
) != 0
842 DEBUG ((DEBUG_ERROR
, ")\n"));
843 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
844 DEBUG ((DEBUG_ERROR
, "SMM exception at execution (0x%lx)\n", PFAddress
));
846 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
849 DEBUG ((DEBUG_ERROR
, "SMM exception at access (0x%lx)\n", PFAddress
));
851 DumpModuleInfoByIp ((UINTN
)SystemContext
.SystemContextX64
->Rip
);
859 // If a page fault occurs in SMM range
861 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
862 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
863 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
864 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
866 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
872 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
873 SmmProfilePFHandler (
874 SystemContext
.SystemContextX64
->Rip
,
875 SystemContext
.SystemContextX64
->ExceptionData
878 SmiDefaultPFHandler ();
881 ReleaseSpinLock (mPFLock
);
885 This function sets memory attribute for page table.
888 SetPageTableAttributes (
900 BOOLEAN PageTableSplitted
;
902 if (!mCpuSmmStaticPageTable
) {
906 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
909 // Disable write protection, because we need mark page table to be write protected.
910 // We need *write* page table memory, to mark itself to be *read only*.
912 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
915 DEBUG ((DEBUG_INFO
, "Start...\n"));
916 PageTableSplitted
= FALSE
;
918 L4PageTable
= (UINT64
*)GetPageTableBase ();
919 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
920 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
922 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
923 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & PAGING_4K_ADDRESS_MASK_64
);
924 if (L3PageTable
== NULL
) {
928 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
929 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
931 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
932 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
936 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & PAGING_4K_ADDRESS_MASK_64
);
937 if (L2PageTable
== NULL
) {
941 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
942 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
944 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
945 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
949 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & PAGING_4K_ADDRESS_MASK_64
);
950 if (L1PageTable
== NULL
) {
953 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
954 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
958 } while (PageTableSplitted
);
961 // Enable write protection, after page table updated.
963 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);