2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
17 #define PAGE_TABLE_PAGES 8
18 #define ACC_MAX_BIT BIT3
19 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
20 BOOLEAN m1GPageTableSupport
= FALSE
;
21 UINT8 mPhysicalAddressBits
;
22 BOOLEAN mCpuSmmStaticPageTable
;
25 Check if 1-GByte pages is supported by processor or not.
27 @retval TRUE 1-GByte pages is supported.
28 @retval FALSE 1-GByte pages is not supported.
39 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
40 if (RegEax
>= 0x80000001) {
41 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
42 if ((RegEdx
& BIT26
) != 0) {
50 Set sub-entries number in entry.
52 @param[in, out] Entry Pointer to entry
53 @param[in] SubEntryNum Sub-entries number based on 0:
54 0 means there is 1 sub-entry under this entry
55 0x1ff means there is 512 sub-entries under this entry
65 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
67 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
71 Return sub-entries number in entry.
73 @param[in] Entry Pointer to entry
75 @return Sub-entries number based on 0:
76 0 means there is 1 sub-entry under this entry
77 0x1ff means there is 512 sub-entries under this entry
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
87 return BitFieldRead64 (*Entry
, 52, 60);
91 Calculate the maximum support address.
93 @return the maximum support address.
96 CalculateMaximumSupportAddress (
101 UINT8 PhysicalAddressBits
;
105 // Get physical address bits supported.
107 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
109 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
111 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
112 if (RegEax
>= 0x80000008) {
113 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
114 PhysicalAddressBits
= (UINT8
) RegEax
;
116 PhysicalAddressBits
= 36;
121 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
123 ASSERT (PhysicalAddressBits
<= 52);
124 if (PhysicalAddressBits
> 48) {
125 PhysicalAddressBits
= 48;
127 return PhysicalAddressBits
;
131 Set static page table.
133 @param[in] PageTable Address of page table.
141 UINTN NumberOfPml4EntriesNeeded
;
142 UINTN NumberOfPdpEntriesNeeded
;
143 UINTN IndexOfPml4Entries
;
144 UINTN IndexOfPdpEntries
;
145 UINTN IndexOfPageDirectoryEntries
;
146 UINT64
*PageMapLevel4Entry
;
148 UINT64
*PageDirectoryPointerEntry
;
149 UINT64
*PageDirectory1GEntry
;
150 UINT64
*PageDirectoryEntry
;
152 if (mPhysicalAddressBits
<= 39 ) {
153 NumberOfPml4EntriesNeeded
= 1;
154 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 30));
156 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (mPhysicalAddressBits
- 39));
157 NumberOfPdpEntriesNeeded
= 512;
161 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
163 PageMap
= (VOID
*) PageTable
;
165 PageMapLevel4Entry
= PageMap
;
167 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
169 // Each PML4 entry points to a page of Page Directory Pointer entries.
171 PageDirectoryPointerEntry
= (UINT64
*) ((*PageMapLevel4Entry
) & gPhyMask
);
172 if (PageDirectoryPointerEntry
== NULL
) {
173 PageDirectoryPointerEntry
= AllocatePageTableMemory (1);
174 ASSERT(PageDirectoryPointerEntry
!= NULL
);
175 ZeroMem (PageDirectoryPointerEntry
, EFI_PAGES_TO_SIZE(1));
177 *PageMapLevel4Entry
= ((UINTN
)PageDirectoryPointerEntry
& gPhyMask
) | PAGE_ATTRIBUTE_BITS
;
180 if (m1GPageTableSupport
) {
181 PageDirectory1GEntry
= PageDirectoryPointerEntry
;
182 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
183 if (IndexOfPml4Entries
== 0 && IndexOfPageDirectoryEntries
< 4) {
185 // Skip the < 4G entries
190 // Fill in the Page Directory entries
192 *PageDirectory1GEntry
= (PageAddress
& gPhyMask
) | IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
195 PageAddress
= BASE_4GB
;
196 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
197 if (IndexOfPml4Entries
== 0 && IndexOfPdpEntries
< 4) {
199 // Skip the < 4G entries
204 // Each Directory Pointer entries points to a page of Page Directory entires.
205 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
207 PageDirectoryEntry
= (UINT64
*) ((*PageDirectoryPointerEntry
) & gPhyMask
);
208 if (PageDirectoryEntry
== NULL
) {
209 PageDirectoryEntry
= AllocatePageTableMemory (1);
210 ASSERT(PageDirectoryEntry
!= NULL
);
211 ZeroMem (PageDirectoryEntry
, EFI_PAGES_TO_SIZE(1));
214 // Fill in a Page Directory Pointer Entries
216 *PageDirectoryPointerEntry
= (UINT64
)(UINTN
)PageDirectoryEntry
| PAGE_ATTRIBUTE_BITS
;
219 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
221 // Fill in the Page Directory entries
223 *PageDirectoryEntry
= (UINT64
)PageAddress
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
231 Create PageTable for SMM use.
233 @return The address of PML4 (to set CR3).
241 EFI_PHYSICAL_ADDRESS Pages
;
243 LIST_ENTRY
*FreePage
;
245 UINTN PageFaultHandlerHookAddress
;
246 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
249 // Initialize spin lock
251 InitializeSpinLock (mPFLock
);
253 mCpuSmmStaticPageTable
= PcdGetBool (PcdCpuSmmStaticPageTable
);
254 m1GPageTableSupport
= Is1GPageSupport ();
255 DEBUG ((DEBUG_INFO
, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport
));
256 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable
));
258 mPhysicalAddressBits
= CalculateMaximumSupportAddress ();
259 DEBUG ((DEBUG_INFO
, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits
));
261 // Generate PAE page table for the first 4GB memory space
263 Pages
= Gen4GPageTable (FALSE
);
266 // Set IA32_PG_PMNT bit to mask this entry
268 PTEntry
= (UINT64
*)(UINTN
)Pages
;
269 for (Index
= 0; Index
< 4; Index
++) {
270 PTEntry
[Index
] |= IA32_PG_PMNT
;
274 // Fill Page-Table-Level4 (PML4) entry
276 PTEntry
= (UINT64
*)AllocatePageTableMemory (1);
277 ASSERT (PTEntry
!= NULL
);
278 *PTEntry
= Pages
| PAGE_ATTRIBUTE_BITS
;
279 ZeroMem (PTEntry
+ 1, EFI_PAGE_SIZE
- sizeof (*PTEntry
));
282 // Set sub-entries number
284 SetSubEntriesNum (PTEntry
, 3);
286 if (mCpuSmmStaticPageTable
) {
287 SetStaticPageTable ((UINTN
)PTEntry
);
290 // Add pages to page pool
292 FreePage
= (LIST_ENTRY
*)AllocatePageTableMemory (PAGE_TABLE_PAGES
);
293 ASSERT (FreePage
!= NULL
);
294 for (Index
= 0; Index
< PAGE_TABLE_PAGES
; Index
++) {
295 InsertTailList (&mPagePool
, FreePage
);
296 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
300 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
302 // Set own Page Fault entry instead of the default one, because SMM Profile
303 // feature depends on IRET instruction to do Single Step
305 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
306 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
307 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
308 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
309 IdtEntry
->Bits
.Reserved_0
= 0;
310 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
311 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
312 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
313 IdtEntry
->Bits
.Reserved_1
= 0;
316 // Register Smm Page Fault Handler
318 SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
322 // Additional SMM IDT initialization for SMM stack guard
324 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
325 InitializeIDTSmmStackGuard ();
329 // Return the address of PML4 (to set CR3)
331 return (UINT32
)(UINTN
)PTEntry
;
335 Set access record in entry.
337 @param[in, out] Entry Pointer to entry
338 @param[in] Acc Access record value
343 IN OUT UINT64
*Entry
,
348 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
350 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
354 Return access record in entry.
356 @param[in] Entry Pointer to entry
358 @return Access record value.
367 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
369 return BitFieldRead64 (*Entry
, 9, 11);
373 Return and update the access record in entry.
375 @param[in, out] Entry Pointer to entry
377 @return Access record value.
387 Acc
= GetAccNum (Entry
);
388 if ((*Entry
& IA32_PG_A
) != 0) {
390 // If this entry has been accessed, clear access flag in Entry and update access record
391 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
393 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
394 SetAccNum (Entry
, 0x7);
395 return (0x7 + ACC_MAX_BIT
);
399 // If the access record is not the smallest value 0, minus 1 and update the access record field
401 SetAccNum (Entry
, Acc
- 1);
408 Reclaim free pages for PageFault handler.
410 Search the whole entries tree to find the leaf entry that has the smallest
411 access record value. Insert the page pointed by this leaf entry into the
412 page pool. And check its upper entries if need to be inserted into the page
432 UINT64 SubEntriesNum
;
435 UINT64
*ReleasePageAddress
;
445 ReleasePageAddress
= 0;
448 // First, find the leaf entry has the smallest access record value
450 Pml4
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
451 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
452 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
454 // If the PML4 entry is not present or is masked, skip it
458 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & gPhyMask
);
460 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
461 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
463 // If the PDPT entry is not present or is masked, skip it
465 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
467 // If the PDPT entry is masked, we will ignore checking the PML4 entry
473 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
475 // It's not 1-GByte pages entry, it should be a PDPT entry,
476 // we will not check PML4 entry more
479 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & gPhyMask
);
481 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
482 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
484 // If the PD entry is not present or is masked, skip it
486 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
488 // If the PD entry is masked, we will not PDPT entry more
494 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
496 // It's not 2 MByte page table entry, it should be PD entry
497 // we will find the entry has the smallest access record value
500 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
503 // If the PD entry has the smallest access record value,
504 // save the Page address to be released
510 ReleasePageAddress
= Pdt
+ PdtIndex
;
516 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
517 // it should only has the entries point to 2 MByte Pages
519 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
522 // If the PDPT entry has the smallest access record value,
523 // save the Page address to be released
529 ReleasePageAddress
= Pdpt
+ PdptIndex
;
536 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
537 // it should only has the entries point to 1 GByte Pages
539 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
542 // If the PML4 entry has the smallest access record value,
543 // save the Page address to be released
549 ReleasePageAddress
= Pml4
+ Pml4Index
;
554 // Make sure one PML4/PDPT/PD entry is selected
556 ASSERT (MinAcc
!= (UINT64
)-1);
559 // Secondly, insert the page pointed by this entry into page pool and clear this entry
561 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& gPhyMask
));
562 *ReleasePageAddress
= 0;
565 // Lastly, check this entry's upper entries if need to be inserted into page pool
569 if (MinPdt
!= (UINTN
)-1) {
571 // If 4 KByte Page Table is released, check the PDPT entry
573 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & gPhyMask
);
574 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
575 if (SubEntriesNum
== 0) {
577 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
578 // clear the Page directory entry
580 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & gPhyMask
));
583 // Go on checking the PML4 table
589 // Update the sub-entries filed in PDPT entry and exit
591 SetSubEntriesNum (Pdpt
+ MinPdpt
, SubEntriesNum
- 1);
594 if (MinPdpt
!= (UINTN
)-1) {
596 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
598 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
599 if (SubEntriesNum
== 0) {
601 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
602 // clear the Page directory entry
604 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & gPhyMask
));
610 // Update the sub-entries filed in PML4 entry and exit
612 SetSubEntriesNum (Pml4
+ MinPml4
, SubEntriesNum
- 1);
616 // PLM4 table has been released before, exit it
623 Allocate free Page for PageFault handler use.
625 @return Page address.
635 if (IsListEmpty (&mPagePool
)) {
637 // If page pool is empty, reclaim the used pages and insert one into page pool
643 // Get one free page and remove it from page pool
645 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
646 RemoveEntryList (mPagePool
.ForwardLink
);
648 // Clean this page and return
650 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
655 Page Fault handler for SMM use.
659 SmiDefaultPFHandler (
670 SMM_PAGE_SIZE_TYPE PageSize
;
677 // Set default SMM page attribute
679 PageSize
= SmmPageSize2M
;
684 Pml4
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
685 PFAddress
= AsmReadCr2 ();
687 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
689 // If platform not support page table attribute, set default SMM page attribute
691 if (Status
!= EFI_SUCCESS
) {
692 PageSize
= SmmPageSize2M
;
696 if (PageSize
>= MaxSmmPageSizeType
) {
697 PageSize
= SmmPageSize2M
;
699 if (NumOfPages
> 512) {
706 // BIT12 to BIT20 is Page Table index
712 // BIT21 to BIT29 is Page Directory index
715 PageAttribute
|= (UINTN
)IA32_PG_PS
;
718 if (!m1GPageTableSupport
) {
719 DEBUG ((DEBUG_ERROR
, "1-GByte pages is not supported!"));
723 // BIT30 to BIT38 is Page Directory Pointer Table index
726 PageAttribute
|= (UINTN
)IA32_PG_PS
;
733 // If execute-disable is enabled, set NX bit
736 PageAttribute
|= IA32_PG_NX
;
739 for (Index
= 0; Index
< NumOfPages
; Index
++) {
742 for (StartBit
= 39; StartBit
> EndBit
; StartBit
-= 9) {
743 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
744 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
746 // If the entry is not present, allocate one page from page pool for it
748 PageTable
[PTIndex
] = AllocPage () | PAGE_ATTRIBUTE_BITS
;
751 // Save the upper entry address
753 UpperEntry
= PageTable
+ PTIndex
;
756 // BIT9 to BIT11 of entry is used to save access record,
757 // initialize value is 7
759 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
760 SetAccNum (PageTable
+ PTIndex
, 7);
761 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
764 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
765 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
767 // Check if the entry has already existed, this issue may occur when the different
768 // size page entries created under the same entry
770 DEBUG ((DEBUG_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
771 DEBUG ((DEBUG_ERROR
, "New page table overlapped with old page table!\n"));
775 // Fill the new entry
777 PageTable
[PTIndex
] = (PFAddress
& gPhyMask
& ~((1ull << EndBit
) - 1)) |
778 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
779 if (UpperEntry
!= NULL
) {
780 SetSubEntriesNum (UpperEntry
, GetSubEntriesNum (UpperEntry
) + 1);
783 // Get the next page address if we need to create more page tables
785 PFAddress
+= (1ull << EndBit
);
790 ThePage Fault handler wrapper for SMM use.
792 @param InterruptType Defines the type of interrupt or exception that
793 occurred on the processor.This parameter is processor architecture specific.
794 @param SystemContext A pointer to the processor context when
795 the interrupt occurred on the processor.
800 IN EFI_EXCEPTION_TYPE InterruptType
,
801 IN EFI_SYSTEM_CONTEXT SystemContext
806 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
808 AcquireSpinLock (mPFLock
);
810 PFAddress
= AsmReadCr2 ();
812 if (mCpuSmmStaticPageTable
&& (PFAddress
>= LShiftU64 (1, (mPhysicalAddressBits
- 1)))) {
813 DEBUG ((DEBUG_ERROR
, "Do not support address 0x%lx by processor!\n", PFAddress
));
818 // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.
820 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
821 (PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
822 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
823 DEBUG ((DEBUG_ERROR
, "SMM stack overflow!\n"));
828 // If a page fault occurs in SMM range
830 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
831 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
832 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
833 DEBUG ((DEBUG_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
835 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
841 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
842 SmmProfilePFHandler (
843 SystemContext
.SystemContextX64
->Rip
,
844 SystemContext
.SystemContextX64
->ExceptionData
847 SmiDefaultPFHandler ();
850 ReleaseSpinLock (mPFLock
);
854 This function sets memory attribute for page table.
857 SetPageTableAttributes (
869 BOOLEAN PageTableSplitted
;
871 if (!mCpuSmmStaticPageTable
) {
875 DEBUG ((DEBUG_INFO
, "SetPageTableAttributes\n"));
878 // Disable write protection, because we need mark page table to be write protected.
879 // We need *write* page table memory, to mark itself to be *read only*.
881 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP
);
884 DEBUG ((DEBUG_INFO
, "Start...\n"));
885 PageTableSplitted
= FALSE
;
887 L4PageTable
= (UINT64
*)GetPageTableBase ();
888 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L4PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
889 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
891 for (Index4
= 0; Index4
< SIZE_4KB
/sizeof(UINT64
); Index4
++) {
892 L3PageTable
= (UINT64
*)(UINTN
)(L4PageTable
[Index4
] & PAGING_4K_ADDRESS_MASK_64
);
893 if (L3PageTable
== NULL
) {
897 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L3PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
898 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
900 for (Index3
= 0; Index3
< SIZE_4KB
/sizeof(UINT64
); Index3
++) {
901 if ((L3PageTable
[Index3
] & IA32_PG_PS
) != 0) {
905 L2PageTable
= (UINT64
*)(UINTN
)(L3PageTable
[Index3
] & PAGING_4K_ADDRESS_MASK_64
);
906 if (L2PageTable
== NULL
) {
910 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L2PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
911 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
913 for (Index2
= 0; Index2
< SIZE_4KB
/sizeof(UINT64
); Index2
++) {
914 if ((L2PageTable
[Index2
] & IA32_PG_PS
) != 0) {
918 L1PageTable
= (UINT64
*)(UINTN
)(L2PageTable
[Index2
] & PAGING_4K_ADDRESS_MASK_64
);
919 if (L1PageTable
== NULL
) {
922 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS
)(UINTN
)L1PageTable
, SIZE_4KB
, EFI_MEMORY_RO
, &IsSplitted
);
923 PageTableSplitted
= (PageTableSplitted
|| IsSplitted
);
927 } while (PageTableSplitted
);
930 // Enable write protection, after page table updated.
932 AsmWriteCr0 (AsmReadCr0() | CR0_WP
);