]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / SmmProfileArch.c
CommitLineData
427e3573
MK
1/** @file\r
2X64 processor specific functions to enable SMM profile.\r
3\r
4eee0cc7 4Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12#include "SmmProfileInternal.h"\r
13\r
14//\r
15// Current page index.\r
16//\r
053e878b 17UINTN mPFPageIndex;\r
427e3573
MK
18\r
19//\r
20// Pool for dynamically creating page table in page fault handler.\r
21//\r
053e878b 22UINT64 mPFPageBuffer;\r
427e3573
MK
23\r
24//\r
25// Store the uplink information for each page being used.\r
26//\r
053e878b 27UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];\r
427e3573
MK
28\r
29/**\r
30 Create SMM page table for S3 path.\r
31\r
32**/\r
33VOID\r
34InitSmmS3Cr3 (\r
35 VOID\r
36 )\r
37{\r
053e878b
MK
38 EFI_PHYSICAL_ADDRESS Pages;\r
39 UINT64 *PTEntry;\r
427e3573
MK
40\r
41 //\r
42 // Generate PAE page table for the first 4GB memory space\r
43 //\r
717fb604 44 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
45\r
46 //\r
47 // Fill Page-Table-Level4 (PML4) entry\r
48 //\r
053e878b 49 PTEntry = (UINT64 *)AllocatePageTableMemory (1);\r
717fb604 50 ASSERT (PTEntry != NULL);\r
241f9149 51 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
52 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
53\r
54 //\r
55 // Return the address of PML4 (to set CR3)\r
56 //\r
57 mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;\r
58\r
053e878b 59 return;\r
427e3573
MK
60}\r
61\r
62/**\r
63 Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
64\r
65**/\r
66VOID\r
67InitPagesForPFHandler (\r
68 VOID\r
69 )\r
70{\r
053e878b 71 VOID *Address;\r
427e3573
MK
72\r
73 //\r
74 // Pre-Allocate memory for page fault handler\r
75 //\r
76 Address = NULL;\r
77 Address = AllocatePages (MAX_PF_PAGE_COUNT);\r
ef3e20e3 78 ASSERT (Address != NULL);\r
427e3573 79\r
053e878b
MK
80 mPFPageBuffer = (UINT64)(UINTN)Address;\r
81 mPFPageIndex = 0;\r
82 ZeroMem ((VOID *)(UINTN)mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);\r
427e3573
MK
83 ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));\r
84\r
85 return;\r
86}\r
87\r
88/**\r
89 Allocate one page for creating 4KB-page based on 2MB-page.\r
90\r
91 @param Uplink The address of Page-Directory entry.\r
92\r
93**/\r
94VOID\r
95AcquirePage (\r
053e878b 96 UINT64 *Uplink\r
427e3573
MK
97 )\r
98{\r
053e878b 99 UINT64 Address;\r
427e3573
MK
100\r
101 //\r
102 // Get the buffer\r
103 //\r
104 Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);\r
053e878b 105 ZeroMem ((VOID *)(UINTN)Address, EFI_PAGE_SIZE);\r
427e3573
MK
106\r
107 //\r
108 // Cut the previous uplink if it exists and wasn't overwritten\r
109 //\r
241f9149 110 if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {\r
427e3573
MK
111 *mPFPageUplink[mPFPageIndex] = 0;\r
112 }\r
113\r
114 //\r
115 // Link & Record the current uplink\r
116 //\r
053e878b 117 *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
118 mPFPageUplink[mPFPageIndex] = Uplink;\r
119\r
120 mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;\r
121}\r
122\r
123/**\r
124 Update page table to map the memory correctly in order to make the instruction\r
125 which caused page fault execute successfully. And it also save the original page\r
126 table to be restored in single-step exception.\r
127\r
128 @param PageTable PageTable Address.\r
129 @param PFAddress The memory address which caused page fault exception.\r
130 @param CpuIndex The index of the processor.\r
131 @param ErrorCode The Error code of exception.\r
132 @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
133\r
134**/\r
135VOID\r
136RestorePageTableAbove4G (\r
053e878b
MK
137 UINT64 *PageTable,\r
138 UINT64 PFAddress,\r
139 UINTN CpuIndex,\r
140 UINTN ErrorCode,\r
141 BOOLEAN *IsValidPFAddress\r
427e3573
MK
142 )\r
143{\r
053e878b
MK
144 UINTN PTIndex;\r
145 UINT64 Address;\r
146 BOOLEAN Nx;\r
147 BOOLEAN Existed;\r
148 UINTN Index;\r
149 UINTN PFIndex;\r
150 IA32_CR4 Cr4;\r
151 BOOLEAN Enable5LevelPaging;\r
427e3573
MK
152\r
153 ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));\r
154\r
053e878b
MK
155 Cr4.UintN = AsmReadCr4 ();\r
156 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
4eee0cc7 157\r
427e3573
MK
158 //\r
159 // If page fault address is 4GB above.\r
160 //\r
161\r
162 //\r
163 // Check if page fault address has existed in page table.\r
164 // If it exists in page table but page fault is generated,\r
165 // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.\r
166 //\r
053e878b
MK
167 Existed = FALSE;\r
168 PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
169 PTIndex = 0;\r
4eee0cc7
RN
170 if (Enable5LevelPaging) {\r
171 PTIndex = BitFieldRead64 (PFAddress, 48, 56);\r
172 }\r
053e878b 173\r
4eee0cc7
RN
174 if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {\r
175 // PML5E\r
176 if (Enable5LevelPaging) {\r
053e878b 177 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
4eee0cc7 178 }\r
053e878b 179\r
4eee0cc7 180 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
427e3573 181 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
4eee0cc7 182 // PML4E\r
053e878b
MK
183 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
184 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
4eee0cc7
RN
185 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
186 // PDPTE\r
053e878b
MK
187 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
188 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
4eee0cc7
RN
189 // PD\r
190 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
427e3573 191 //\r
4eee0cc7 192 // 2MB page\r
427e3573 193 //\r
241f9149 194 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
4eee0cc7 195 if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {\r
427e3573
MK
196 Existed = TRUE;\r
197 }\r
4eee0cc7
RN
198 } else {\r
199 //\r
200 // 4KB page\r
201 //\r
053e878b 202 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);\r
4eee0cc7
RN
203 if (PageTable != 0) {\r
204 //\r
205 // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.\r
206 //\r
207 PTIndex = BitFieldRead64 (PFAddress, 12, 20);\r
208 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
209 if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
210 Existed = TRUE;\r
211 }\r
212 }\r
427e3573
MK
213 }\r
214 }\r
215 }\r
216 }\r
217\r
218 //\r
219 // If page entry does not existed in page table at all, create a new entry.\r
220 //\r
221 if (!Existed) {\r
427e3573
MK
222 if (IsAddressValid (PFAddress, &Nx)) {\r
223 //\r
224 // If page fault address above 4GB is in protected range but it causes a page fault exception,\r
225 // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.\r
226 // this access is not saved into SMM profile data.\r
227 //\r
228 *IsValidPFAddress = TRUE;\r
229 }\r
230\r
231 //\r
232 // Create one entry in page table for page fault address.\r
233 //\r
234 SmiDefaultPFHandler ();\r
235 //\r
236 // Find the page table entry created just now.\r
237 //\r
053e878b 238 PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
427e3573 239 PFAddress = AsmReadCr2 ();\r
4eee0cc7
RN
240 // PML5E\r
241 if (Enable5LevelPaging) {\r
053e878b
MK
242 PTIndex = BitFieldRead64 (PFAddress, 48, 56);\r
243 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
4eee0cc7 244 }\r
053e878b 245\r
427e3573 246 // PML4E\r
053e878b
MK
247 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
248 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
427e3573 249 // PDPTE\r
053e878b
MK
250 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
251 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
427e3573
MK
252 // PD\r
253 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
241f9149 254 Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;\r
427e3573
MK
255 //\r
256 // Check if 2MB-page entry need be changed to 4KB-page entry.\r
257 //\r
258 if (IsAddressSplit (Address)) {\r
259 AcquirePage (&PageTable[PTIndex]);\r
260\r
261 // PTE\r
053e878b 262 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
427e3573 263 for (Index = 0; Index < 512; Index++) {\r
241f9149 264 PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573 265 if (!IsAddressValid (Address, &Nx)) {\r
881520ea 266 PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
427e3573 267 }\r
053e878b 268\r
427e3573
MK
269 if (Nx && mXdSupported) {\r
270 PageTable[Index] = PageTable[Index] | IA32_PG_NX;\r
271 }\r
053e878b 272\r
427e3573
MK
273 if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
274 PTIndex = Index;\r
275 }\r
053e878b 276\r
427e3573
MK
277 Address += SIZE_4KB;\r
278 } // end for PT\r
279 } else {\r
280 //\r
281 // Update 2MB page entry.\r
282 //\r
283 if (!IsAddressValid (Address, &Nx)) {\r
284 //\r
285 // Patch to remove present flag and rw flag.\r
286 //\r
881520ea 287 PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
427e3573 288 }\r
053e878b 289\r
427e3573
MK
290 //\r
291 // Set XD bit to 1\r
292 //\r
293 if (Nx && mXdSupported) {\r
294 PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;\r
295 }\r
296 }\r
297 }\r
298\r
299 //\r
300 // Record old entries with non-present status\r
301 // Old entries include the memory which instruction is at and the memory which instruction access.\r
302 //\r
303 //\r
304 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
305 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
053e878b 306 PFIndex = mPFEntryCount[CpuIndex];\r
427e3573
MK
307 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
308 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
309 mPFEntryCount[CpuIndex]++;\r
310 }\r
311\r
312 //\r
313 // Add present flag or clear XD flag to make page fault handler succeed.\r
314 //\r
881520ea 315 PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);\r
427e3573
MK
316 if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
317 //\r
318 // If page fault is caused by instruction fetch, clear XD bit in the entry.\r
319 //\r
320 PageTable[PTIndex] &= ~IA32_PG_NX;\r
321 }\r
322\r
323 return;\r
324}\r
325\r
326/**\r
327 Clear TF in FLAGS.\r
328\r
329 @param SystemContext A pointer to the processor context when\r
330 the interrupt occurred on the processor.\r
331\r
332**/\r
333VOID\r
334ClearTrapFlag (\r
053e878b 335 IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
336 )\r
337{\r
338 SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;\r
339}\r