]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
UefiCpuPkg PiSmmCpuDxeSmm: SMM profile and static paging mutual exclusion
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / SmmProfileArch.c
... / ...
CommitLineData
1/** @file\r
2X64 processor specific functions to enable SMM profile.\r
3\r
4Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18#include "SmmProfileInternal.h"\r
19\r
20//\r
21// Current page index.\r
22//\r
23UINTN mPFPageIndex;\r
24\r
25//\r
26// Pool for dynamically creating page table in page fault handler.\r
27//\r
28UINT64 mPFPageBuffer;\r
29\r
30//\r
31// Store the uplink information for each page being used.\r
32//\r
33UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];\r
34\r
35/**\r
36 Create SMM page table for S3 path.\r
37\r
38**/\r
39VOID\r
40InitSmmS3Cr3 (\r
41 VOID\r
42 )\r
43{\r
44 EFI_PHYSICAL_ADDRESS Pages;\r
45 UINT64 *PTEntry;\r
46\r
47 //\r
48 // Generate PAE page table for the first 4GB memory space\r
49 //\r
50 Pages = Gen4GPageTable (FALSE);\r
51\r
52 //\r
53 // Fill Page-Table-Level4 (PML4) entry\r
54 //\r
55 PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
56 ASSERT (PTEntry != NULL);\r
57 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
58 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
59\r
60 //\r
61 // Return the address of PML4 (to set CR3)\r
62 //\r
63 mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;\r
64\r
65 return ;\r
66}\r
67\r
68/**\r
69 Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
70\r
71**/\r
72VOID\r
73InitPagesForPFHandler (\r
74 VOID\r
75 )\r
76{\r
77 VOID *Address;\r
78\r
79 //\r
80 // Pre-Allocate memory for page fault handler\r
81 //\r
82 Address = NULL;\r
83 Address = AllocatePages (MAX_PF_PAGE_COUNT);\r
84 ASSERT (Address != NULL);\r
85\r
86 mPFPageBuffer = (UINT64)(UINTN) Address;\r
87 mPFPageIndex = 0;\r
88 ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);\r
89 ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));\r
90\r
91 return;\r
92}\r
93\r
94/**\r
95 Allocate one page for creating 4KB-page based on 2MB-page.\r
96\r
97 @param Uplink The address of Page-Directory entry.\r
98\r
99**/\r
100VOID\r
101AcquirePage (\r
102 UINT64 *Uplink\r
103 )\r
104{\r
105 UINT64 Address;\r
106\r
107 //\r
108 // Get the buffer\r
109 //\r
110 Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);\r
111 ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);\r
112\r
113 //\r
114 // Cut the previous uplink if it exists and wasn't overwritten\r
115 //\r
116 if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {\r
117 *mPFPageUplink[mPFPageIndex] = 0;\r
118 }\r
119\r
120 //\r
121 // Link & Record the current uplink\r
122 //\r
123 *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
124 mPFPageUplink[mPFPageIndex] = Uplink;\r
125\r
126 mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;\r
127}\r
128\r
129/**\r
130 Update page table to map the memory correctly in order to make the instruction\r
131 which caused page fault execute successfully. And it also save the original page\r
132 table to be restored in single-step exception.\r
133\r
134 @param PageTable PageTable Address.\r
135 @param PFAddress The memory address which caused page fault exception.\r
136 @param CpuIndex The index of the processor.\r
137 @param ErrorCode The Error code of exception.\r
138 @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
139\r
140**/\r
141VOID\r
142RestorePageTableAbove4G (\r
143 UINT64 *PageTable,\r
144 UINT64 PFAddress,\r
145 UINTN CpuIndex,\r
146 UINTN ErrorCode,\r
147 BOOLEAN *IsValidPFAddress\r
148 )\r
149{\r
150 UINTN PTIndex;\r
151 UINT64 Address;\r
152 BOOLEAN Nx;\r
153 BOOLEAN Existed;\r
154 UINTN Index;\r
155 UINTN PFIndex;\r
156\r
157 ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));\r
158\r
159 //\r
160 // If page fault address is 4GB above.\r
161 //\r
162\r
163 //\r
164 // Check if page fault address has existed in page table.\r
165 // If it exists in page table but page fault is generated,\r
166 // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.\r
167 //\r
168 Existed = FALSE;\r
169 PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
170 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
171 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
172 // PML4E\r
173 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
174 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
175 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
176 // PDPTE\r
177 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
178 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
179 // PD\r
180 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
181 //\r
182 // 2MB page\r
183 //\r
184 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
185 if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {\r
186 Existed = TRUE;\r
187 }\r
188 } else {\r
189 //\r
190 // 4KB page\r
191 //\r
192 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);\r
193 if (PageTable != 0) {\r
194 //\r
195 // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.\r
196 //\r
197 PTIndex = BitFieldRead64 (PFAddress, 12, 20);\r
198 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
199 if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
200 Existed = TRUE;\r
201 }\r
202 }\r
203 }\r
204 }\r
205 }\r
206\r
207 //\r
208 // If page entry does not existed in page table at all, create a new entry.\r
209 //\r
210 if (!Existed) {\r
211\r
212 if (IsAddressValid (PFAddress, &Nx)) {\r
213 //\r
214 // If page fault address above 4GB is in protected range but it causes a page fault exception,\r
215 // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.\r
216 // this access is not saved into SMM profile data.\r
217 //\r
218 *IsValidPFAddress = TRUE;\r
219 }\r
220\r
221 //\r
222 // Create one entry in page table for page fault address.\r
223 //\r
224 SmiDefaultPFHandler ();\r
225 //\r
226 // Find the page table entry created just now.\r
227 //\r
228 PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
229 PFAddress = AsmReadCr2 ();\r
230 // PML4E\r
231 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
232 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
233 // PDPTE\r
234 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
235 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
236 // PD\r
237 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
238 Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;\r
239 //\r
240 // Check if 2MB-page entry need be changed to 4KB-page entry.\r
241 //\r
242 if (IsAddressSplit (Address)) {\r
243 AcquirePage (&PageTable[PTIndex]);\r
244\r
245 // PTE\r
246 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
247 for (Index = 0; Index < 512; Index++) {\r
248 PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
249 if (!IsAddressValid (Address, &Nx)) {\r
250 PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
251 }\r
252 if (Nx && mXdSupported) {\r
253 PageTable[Index] = PageTable[Index] | IA32_PG_NX;\r
254 }\r
255 if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
256 PTIndex = Index;\r
257 }\r
258 Address += SIZE_4KB;\r
259 } // end for PT\r
260 } else {\r
261 //\r
262 // Update 2MB page entry.\r
263 //\r
264 if (!IsAddressValid (Address, &Nx)) {\r
265 //\r
266 // Patch to remove present flag and rw flag.\r
267 //\r
268 PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
269 }\r
270 //\r
271 // Set XD bit to 1\r
272 //\r
273 if (Nx && mXdSupported) {\r
274 PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;\r
275 }\r
276 }\r
277 }\r
278\r
279 //\r
280 // Record old entries with non-present status\r
281 // Old entries include the memory which instruction is at and the memory which instruction access.\r
282 //\r
283 //\r
284 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
285 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
286 PFIndex = mPFEntryCount[CpuIndex];\r
287 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
288 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
289 mPFEntryCount[CpuIndex]++;\r
290 }\r
291\r
292 //\r
293 // Add present flag or clear XD flag to make page fault handler succeed.\r
294 //\r
295 PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);\r
296 if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
297 //\r
298 // If page fault is caused by instruction fetch, clear XD bit in the entry.\r
299 //\r
300 PageTable[PTIndex] &= ~IA32_PG_NX;\r
301 }\r
302\r
303 return;\r
304}\r
305\r
306/**\r
307 Clear TF in FLAGS.\r
308\r
309 @param SystemContext A pointer to the processor context when\r
310 the interrupt occurred on the processor.\r
311\r
312**/\r
313VOID\r
314ClearTrapFlag (\r
315 IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
316 )\r
317{\r
318 SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;\r
319}\r