2 Page Fault (#PF) handler for X64 processors
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
17 #define PAGE_TABLE_PAGES 8
18 #define ACC_MAX_BIT BIT3
19 LIST_ENTRY mPagePool
= INITIALIZE_LIST_HEAD_VARIABLE (mPagePool
);
20 BOOLEAN m1GPageTableSupport
= FALSE
;
23 Check if 1-GByte pages is supported by processor or not.
25 @retval TRUE 1-GByte pages is supported.
26 @retval FALSE 1-GByte pages is not supported.
37 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
38 if (RegEax
>= 0x80000001) {
39 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
40 if ((RegEdx
& BIT26
) != 0) {
48 Set sub-entries number in entry.
50 @param[in, out] Entry Pointer to entry
51 @param[in] SubEntryNum Sub-entries number based on 0:
52 0 means there is 1 sub-entry under this entry
53 0x1ff means there is 512 sub-entries under this entry
63 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
65 *Entry
= BitFieldWrite64 (*Entry
, 52, 60, SubEntryNum
);
69 Return sub-entries number in entry.
71 @param[in] Entry Pointer to entry
73 @return Sub-entries number based on 0:
74 0 means there is 1 sub-entry under this entry
75 0x1ff means there is 512 sub-entries under this entry
83 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
85 return BitFieldRead64 (*Entry
, 52, 60);
89 Create PageTable for SMM use.
91 @return The address of PML4 (to set CR3).
99 EFI_PHYSICAL_ADDRESS Pages
;
101 LIST_ENTRY
*FreePage
;
103 UINTN PageFaultHandlerHookAddress
;
104 IA32_IDT_GATE_DESCRIPTOR
*IdtEntry
;
107 // Initialize spin lock
109 InitializeSpinLock (mPFLock
);
111 m1GPageTableSupport
= Is1GPageSupport ();
113 // Generate PAE page table for the first 4GB memory space
115 Pages
= Gen4GPageTable (PAGE_TABLE_PAGES
+ 1, FALSE
);
118 // Set IA32_PG_PMNT bit to mask this entry
120 PTEntry
= (UINT64
*)(UINTN
)Pages
;
121 for (Index
= 0; Index
< 4; Index
++) {
122 PTEntry
[Index
] |= IA32_PG_PMNT
;
126 // Fill Page-Table-Level4 (PML4) entry
128 PTEntry
= (UINT64
*)(UINTN
)(Pages
- EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES
+ 1));
129 *PTEntry
= Pages
+ PAGE_ATTRIBUTE_BITS
;
130 ZeroMem (PTEntry
+ 1, EFI_PAGE_SIZE
- sizeof (*PTEntry
));
132 // Set sub-entries number
134 SetSubEntriesNum (PTEntry
, 3);
137 // Add remaining pages to page pool
139 FreePage
= (LIST_ENTRY
*)(PTEntry
+ EFI_PAGE_SIZE
/ sizeof (*PTEntry
));
140 while ((UINTN
)FreePage
< Pages
) {
141 InsertTailList (&mPagePool
, FreePage
);
142 FreePage
+= EFI_PAGE_SIZE
/ sizeof (*FreePage
);
145 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
147 // Set own Page Fault entry instead of the default one, because SMM Profile
148 // feature depends on IRET instruction to do Single Step
150 PageFaultHandlerHookAddress
= (UINTN
)PageFaultIdtHandlerSmmProfile
;
151 IdtEntry
= (IA32_IDT_GATE_DESCRIPTOR
*) gcSmiIdtr
.Base
;
152 IdtEntry
+= EXCEPT_IA32_PAGE_FAULT
;
153 IdtEntry
->Bits
.OffsetLow
= (UINT16
)PageFaultHandlerHookAddress
;
154 IdtEntry
->Bits
.Reserved_0
= 0;
155 IdtEntry
->Bits
.GateType
= IA32_IDT_GATE_TYPE_INTERRUPT_32
;
156 IdtEntry
->Bits
.OffsetHigh
= (UINT16
)(PageFaultHandlerHookAddress
>> 16);
157 IdtEntry
->Bits
.OffsetUpper
= (UINT32
)(PageFaultHandlerHookAddress
>> 32);
158 IdtEntry
->Bits
.Reserved_1
= 0;
161 // Register Smm Page Fault Handler
163 SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_PAGE_FAULT
, SmiPFHandler
);
167 // Additional SMM IDT initialization for SMM stack guard
169 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
170 InitializeIDTSmmStackGuard ();
174 // Return the address of PML4 (to set CR3)
176 return (UINT32
)(UINTN
)PTEntry
;
180 Set access record in entry.
182 @param[in, out] Entry Pointer to entry
183 @param[in] Acc Access record value
188 IN OUT UINT64
*Entry
,
193 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
195 *Entry
= BitFieldWrite64 (*Entry
, 9, 11, Acc
);
199 Return access record in entry.
201 @param[in] Entry Pointer to entry
203 @return Access record value.
212 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
214 return BitFieldRead64 (*Entry
, 9, 11);
218 Return and update the access record in entry.
220 @param[in, out] Entry Pointer to entry
222 @return Access record value.
232 Acc
= GetAccNum (Entry
);
233 if ((*Entry
& IA32_PG_A
) != 0) {
235 // If this entry has been accessed, clear access flag in Entry and update access record
236 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
238 *Entry
&= ~(UINT64
)(UINTN
)IA32_PG_A
;
239 SetAccNum (Entry
, 0x7);
240 return (0x7 + ACC_MAX_BIT
);
244 // If the access record is not the smallest value 0, minus 1 and update the access record field
246 SetAccNum (Entry
, Acc
- 1);
253 Reclaim free pages for PageFault handler.
255 Search the whole entries tree to find the leaf entry that has the smallest
256 access record value. Insert the page pointed by this leaf entry into the
257 page pool. And check its upper entries if need to be inserted into the page
277 UINT64 SubEntriesNum
;
280 UINT64
*ReleasePageAddress
;
290 ReleasePageAddress
= 0;
293 // First, find the leaf entry has the smallest access record value
295 Pml4
= (UINT64
*)(UINTN
)(AsmReadCr3 () & gPhyMask
);
296 for (Pml4Index
= 0; Pml4Index
< EFI_PAGE_SIZE
/ sizeof (*Pml4
); Pml4Index
++) {
297 if ((Pml4
[Pml4Index
] & IA32_PG_P
) == 0 || (Pml4
[Pml4Index
] & IA32_PG_PMNT
) != 0) {
299 // If the PML4 entry is not present or is masked, skip it
303 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[Pml4Index
] & gPhyMask
);
305 for (PdptIndex
= 0; PdptIndex
< EFI_PAGE_SIZE
/ sizeof (*Pdpt
); PdptIndex
++) {
306 if ((Pdpt
[PdptIndex
] & IA32_PG_P
) == 0 || (Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
308 // If the PDPT entry is not present or is masked, skip it
310 if ((Pdpt
[PdptIndex
] & IA32_PG_PMNT
) != 0) {
312 // If the PDPT entry is masked, we will ignore checking the PML4 entry
318 if ((Pdpt
[PdptIndex
] & IA32_PG_PS
) == 0) {
320 // It's not 1-GByte pages entry, it should be a PDPT entry,
321 // we will not check PML4 entry more
324 Pdt
= (UINT64
*)(UINTN
)(Pdpt
[PdptIndex
] & gPhyMask
);
326 for (PdtIndex
= 0; PdtIndex
< EFI_PAGE_SIZE
/ sizeof(*Pdt
); PdtIndex
++) {
327 if ((Pdt
[PdtIndex
] & IA32_PG_P
) == 0 || (Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
329 // If the PD entry is not present or is masked, skip it
331 if ((Pdt
[PdtIndex
] & IA32_PG_PMNT
) != 0) {
333 // If the PD entry is masked, we will not PDPT entry more
339 if ((Pdt
[PdtIndex
] & IA32_PG_PS
) == 0) {
341 // It's not 2 MByte page table entry, it should be PD entry
342 // we will find the entry has the smallest access record value
345 Acc
= GetAndUpdateAccNum (Pdt
+ PdtIndex
);
348 // If the PD entry has the smallest access record value,
349 // save the Page address to be released
355 ReleasePageAddress
= Pdt
+ PdtIndex
;
361 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
362 // it should only has the entries point to 2 MByte Pages
364 Acc
= GetAndUpdateAccNum (Pdpt
+ PdptIndex
);
367 // If the PDPT entry has the smallest access record value,
368 // save the Page address to be released
374 ReleasePageAddress
= Pdpt
+ PdptIndex
;
381 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
382 // it should only has the entries point to 1 GByte Pages
384 Acc
= GetAndUpdateAccNum (Pml4
+ Pml4Index
);
387 // If the PML4 entry has the smallest access record value,
388 // save the Page address to be released
394 ReleasePageAddress
= Pml4
+ Pml4Index
;
399 // Make sure one PML4/PDPT/PD entry is selected
401 ASSERT (MinAcc
!= (UINT64
)-1);
404 // Secondly, insert the page pointed by this entry into page pool and clear this entry
406 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(*ReleasePageAddress
& gPhyMask
));
407 *ReleasePageAddress
= 0;
410 // Lastly, check this entry's upper entries if need to be inserted into page pool
414 if (MinPdt
!= (UINTN
)-1) {
416 // If 4 KByte Page Table is released, check the PDPT entry
418 Pdpt
= (UINT64
*)(UINTN
)(Pml4
[MinPml4
] & gPhyMask
);
419 SubEntriesNum
= GetSubEntriesNum(Pdpt
+ MinPdpt
);
420 if (SubEntriesNum
== 0) {
422 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
423 // clear the Page directory entry
425 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pdpt
[MinPdpt
] & gPhyMask
));
428 // Go on checking the PML4 table
434 // Update the sub-entries filed in PDPT entry and exit
436 SetSubEntriesNum (Pdpt
+ MinPdpt
, SubEntriesNum
- 1);
439 if (MinPdpt
!= (UINTN
)-1) {
441 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
443 SubEntriesNum
= GetSubEntriesNum (Pml4
+ MinPml4
);
444 if (SubEntriesNum
== 0) {
446 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
447 // clear the Page directory entry
449 InsertTailList (&mPagePool
, (LIST_ENTRY
*)(UINTN
)(Pml4
[MinPml4
] & gPhyMask
));
455 // Update the sub-entries filed in PML4 entry and exit
457 SetSubEntriesNum (Pml4
+ MinPml4
, SubEntriesNum
- 1);
461 // PLM4 table has been released before, exit it
468 Allocate free Page for PageFault handler use.
470 @return Page address.
480 if (IsListEmpty (&mPagePool
)) {
482 // If page pool is empty, reclaim the used pages and insert one into page pool
488 // Get one free page and remove it from page pool
490 RetVal
= (UINT64
)(UINTN
)mPagePool
.ForwardLink
;
491 RemoveEntryList (mPagePool
.ForwardLink
);
493 // Clean this page and return
495 ZeroMem ((VOID
*)(UINTN
)RetVal
, EFI_PAGE_SIZE
);
500 Page Fault handler for SMM use.
504 SmiDefaultPFHandler (
515 SMM_PAGE_SIZE_TYPE PageSize
;
522 // Set default SMM page attribute
524 PageSize
= SmmPageSize2M
;
529 Pml4
= (UINT64
*)(AsmReadCr3 () & gPhyMask
);
530 PFAddress
= AsmReadCr2 ();
532 Status
= GetPlatformPageTableAttribute (PFAddress
, &PageSize
, &NumOfPages
, &PageAttribute
);
534 // If platform not support page table attribute, set default SMM page attribute
536 if (Status
!= EFI_SUCCESS
) {
537 PageSize
= SmmPageSize2M
;
541 if (PageSize
>= MaxSmmPageSizeType
) {
542 PageSize
= SmmPageSize2M
;
544 if (NumOfPages
> 512) {
551 // BIT12 to BIT20 is Page Table index
557 // BIT21 to BIT29 is Page Directory index
560 PageAttribute
|= (UINTN
)IA32_PG_PS
;
563 if (!m1GPageTableSupport
) {
564 DEBUG ((EFI_D_ERROR
, "1-GByte pages is not supported!"));
568 // BIT30 to BIT38 is Page Directory Pointer Table index
571 PageAttribute
|= (UINTN
)IA32_PG_PS
;
578 // If execute-disable is enabled, set NX bit
581 PageAttribute
|= IA32_PG_NX
;
584 for (Index
= 0; Index
< NumOfPages
; Index
++) {
587 for (StartBit
= 39; StartBit
> EndBit
; StartBit
-= 9) {
588 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
589 if ((PageTable
[PTIndex
] & IA32_PG_P
) == 0) {
591 // If the entry is not present, allocate one page from page pool for it
593 PageTable
[PTIndex
] = AllocPage () | PAGE_ATTRIBUTE_BITS
;
596 // Save the upper entry address
598 UpperEntry
= PageTable
+ PTIndex
;
601 // BIT9 to BIT11 of entry is used to save access record,
602 // initialize value is 7
604 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_A
;
605 SetAccNum (PageTable
+ PTIndex
, 7);
606 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
609 PTIndex
= BitFieldRead64 (PFAddress
, StartBit
, StartBit
+ 8);
610 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
612 // Check if the entry has already existed, this issue may occur when the different
613 // size page entries created under the same entry
615 DEBUG ((EFI_D_ERROR
, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable
, PTIndex
, PageTable
[PTIndex
]));
616 DEBUG ((EFI_D_ERROR
, "New page table overlapped with old page table!\n"));
620 // Fill the new entry
622 PageTable
[PTIndex
] = (PFAddress
& gPhyMask
& ~((1ull << EndBit
) - 1)) |
623 PageAttribute
| IA32_PG_A
| PAGE_ATTRIBUTE_BITS
;
624 if (UpperEntry
!= NULL
) {
625 SetSubEntriesNum (UpperEntry
, GetSubEntriesNum (UpperEntry
) + 1);
628 // Get the next page address if we need to create more page tables
630 PFAddress
+= (1ull << EndBit
);
635 ThePage Fault handler wrapper for SMM use.
637 @param InterruptType Defines the type of interrupt or exception that
638 occurred on the processor.This parameter is processor architecture specific.
639 @param SystemContext A pointer to the processor context when
640 the interrupt occurred on the processor.
645 IN EFI_EXCEPTION_TYPE InterruptType
,
646 IN EFI_SYSTEM_CONTEXT SystemContext
651 ASSERT (InterruptType
== EXCEPT_IA32_PAGE_FAULT
);
653 AcquireSpinLock (mPFLock
);
655 PFAddress
= AsmReadCr2 ();
658 // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.
660 if ((FeaturePcdGet (PcdCpuSmmStackGuard
)) &&
661 (PFAddress
>= mCpuHotPlugData
.SmrrBase
) &&
662 (PFAddress
< (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
))) {
663 DEBUG ((EFI_D_ERROR
, "SMM stack overflow!\n"));
668 // If a page fault occurs in SMM range
670 if ((PFAddress
< mCpuHotPlugData
.SmrrBase
) ||
671 (PFAddress
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
672 if ((SystemContext
.SystemContextX64
->ExceptionData
& IA32_PF_EC_ID
) != 0) {
673 DEBUG ((EFI_D_ERROR
, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress
));
675 DumpModuleInfoByIp (*(UINTN
*)(UINTN
)SystemContext
.SystemContextX64
->Rsp
);
681 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
682 SmmProfilePFHandler (
683 SystemContext
.SystemContextX64
->Rip
,
684 SystemContext
.SystemContextX64
->ExceptionData
687 SmiDefaultPFHandler ();
690 ReleaseSpinLock (mPFLock
);