2 X64 processor specific functions to enable SMM profile.
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
15 // Current page index.
20 // Pool for dynamically creating page table in page fault handler.
25 // Store the uplink information for each page being used.
27 UINT64
*mPFPageUplink
[MAX_PF_PAGE_COUNT
];
30 Create SMM page table for S3 path.
38 EFI_PHYSICAL_ADDRESS Pages
;
42 // Generate PAE page table for the first 4GB memory space
44 Pages
= Gen4GPageTable (FALSE
);
47 // Fill Page-Table-Level4 (PML4) entry
49 PTEntry
= (UINT64
*)AllocatePageTableMemory (1);
50 ASSERT (PTEntry
!= NULL
);
51 *PTEntry
= Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
52 ZeroMem (PTEntry
+ 1, EFI_PAGE_SIZE
- sizeof (*PTEntry
));
55 // Return the address of PML4 (to set CR3)
57 mSmmS3ResumeState
->SmmS3Cr3
= (UINT32
)(UINTN
)PTEntry
;
63 Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
67 InitPagesForPFHandler (
74 // Pre-Allocate memory for page fault handler
77 Address
= AllocatePages (MAX_PF_PAGE_COUNT
);
78 ASSERT (Address
!= NULL
);
80 mPFPageBuffer
= (UINT64
)(UINTN
) Address
;
82 ZeroMem ((VOID
*) (UINTN
) mPFPageBuffer
, EFI_PAGE_SIZE
* MAX_PF_PAGE_COUNT
);
83 ZeroMem (mPFPageUplink
, sizeof (mPFPageUplink
));
89 Allocate one page for creating 4KB-page based on 2MB-page.
91 @param Uplink The address of Page-Directory entry.
104 Address
= mPFPageBuffer
+ EFI_PAGES_TO_SIZE (mPFPageIndex
);
105 ZeroMem ((VOID
*) (UINTN
) Address
, EFI_PAGE_SIZE
);
108 // Cut the previous uplink if it exists and wasn't overwritten
110 if ((mPFPageUplink
[mPFPageIndex
] != NULL
) && ((*mPFPageUplink
[mPFPageIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
) == Address
)) {
111 *mPFPageUplink
[mPFPageIndex
] = 0;
115 // Link & Record the current uplink
117 *Uplink
= Address
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
118 mPFPageUplink
[mPFPageIndex
] = Uplink
;
120 mPFPageIndex
= (mPFPageIndex
+ 1) % MAX_PF_PAGE_COUNT
;
124 Update page table to map the memory correctly in order to make the instruction
125 which caused page fault execute successfully. And it also save the original page
126 table to be restored in single-step exception.
128 @param PageTable PageTable Address.
129 @param PFAddress The memory address which caused page fault exception.
130 @param CpuIndex The index of the processor.
131 @param ErrorCode The Error code of exception.
132 @param IsValidPFAddress The flag indicates if SMM profile data need be added.
136 RestorePageTableAbove4G (
141 BOOLEAN
*IsValidPFAddress
151 BOOLEAN Enable5LevelPaging
;
153 ASSERT ((PageTable
!= NULL
) && (IsValidPFAddress
!= NULL
));
155 Cr4
.UintN
= AsmReadCr4 ();
156 Enable5LevelPaging
= (BOOLEAN
) (Cr4
.Bits
.LA57
== 1);
159 // If page fault address is 4GB above.
163 // Check if page fault address has existed in page table.
164 // If it exists in page table but page fault is generated,
165 // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
168 PageTable
= (UINT64
*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK
);
170 if (Enable5LevelPaging
) {
171 PTIndex
= BitFieldRead64 (PFAddress
, 48, 56);
173 if ((!Enable5LevelPaging
) || ((PageTable
[PTIndex
] & IA32_PG_P
) != 0)) {
175 if (Enable5LevelPaging
) {
176 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
178 PTIndex
= BitFieldRead64 (PFAddress
, 39, 47);
179 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
181 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
182 PTIndex
= BitFieldRead64 (PFAddress
, 30, 38);
183 if ((PageTable
[PTIndex
] & IA32_PG_P
) != 0) {
185 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
186 PTIndex
= BitFieldRead64 (PFAddress
, 21, 29);
188 if ((PageTable
[PTIndex
] & IA32_PG_PS
) != 0) {
192 Address
= (UINT64
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
193 if ((Address
& ~((1ull << 21) - 1)) == ((PFAddress
& PHYSICAL_ADDRESS_MASK
& ~((1ull << 21) - 1)))) {
200 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
201 if (PageTable
!= 0) {
203 // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
205 PTIndex
= BitFieldRead64 (PFAddress
, 12, 20);
206 Address
= (UINT64
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
207 if ((Address
& ~((1ull << 12) - 1)) == (PFAddress
& PHYSICAL_ADDRESS_MASK
& ~((1ull << 12) - 1))) {
217 // If page entry does not existed in page table at all, create a new entry.
221 if (IsAddressValid (PFAddress
, &Nx
)) {
223 // If page fault address above 4GB is in protected range but it causes a page fault exception,
224 // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
225 // this access is not saved into SMM profile data.
227 *IsValidPFAddress
= TRUE
;
231 // Create one entry in page table for page fault address.
233 SmiDefaultPFHandler ();
235 // Find the page table entry created just now.
237 PageTable
= (UINT64
*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK
);
238 PFAddress
= AsmReadCr2 ();
240 if (Enable5LevelPaging
) {
241 PTIndex
= BitFieldRead64 (PFAddress
, 48, 56);
242 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
245 PTIndex
= BitFieldRead64 (PFAddress
, 39, 47);
246 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
248 PTIndex
= BitFieldRead64 (PFAddress
, 30, 38);
249 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
251 PTIndex
= BitFieldRead64 (PFAddress
, 21, 29);
252 Address
= PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
;
254 // Check if 2MB-page entry need be changed to 4KB-page entry.
256 if (IsAddressSplit (Address
)) {
257 AcquirePage (&PageTable
[PTIndex
]);
260 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
261 for (Index
= 0; Index
< 512; Index
++) {
262 PageTable
[Index
] = Address
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
263 if (!IsAddressValid (Address
, &Nx
)) {
264 PageTable
[Index
] = PageTable
[Index
] & (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
266 if (Nx
&& mXdSupported
) {
267 PageTable
[Index
] = PageTable
[Index
] | IA32_PG_NX
;
269 if (Address
== (PFAddress
& PHYSICAL_ADDRESS_MASK
& ~((1ull << 12) - 1))) {
276 // Update 2MB page entry.
278 if (!IsAddressValid (Address
, &Nx
)) {
280 // Patch to remove present flag and rw flag.
282 PageTable
[PTIndex
] = PageTable
[PTIndex
] & (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
287 if (Nx
&& mXdSupported
) {
288 PageTable
[PTIndex
] = PageTable
[PTIndex
] | IA32_PG_NX
;
294 // Record old entries with non-present status
295 // Old entries include the memory which instruction is at and the memory which instruction access.
298 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
299 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
300 PFIndex
= mPFEntryCount
[CpuIndex
];
301 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
302 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
303 mPFEntryCount
[CpuIndex
]++;
307 // Add present flag or clear XD flag to make page fault handler succeed.
309 PageTable
[PTIndex
] |= (UINT64
)(PAGE_ATTRIBUTE_BITS
);
310 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
312 // If page fault is caused by instruction fetch, clear XD bit in the entry.
314 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
323 @param SystemContext A pointer to the processor context when
324 the interrupt occurred on the processor.
329 IN OUT EFI_SYSTEM_CONTEXT SystemContext
332 SystemContext
.SystemContextX64
->Rflags
&= (UINTN
) ~BIT8
;