]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / SmmProfileArch.c
CommitLineData
427e3573
MK
1/** @file\r
2X64 processor specific functions to enable SMM profile.\r
3\r
717fb604 4Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>\r
427e3573
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16#include "SmmProfileInternal.h"\r
17\r
18//\r
19// Current page index.\r
20//\r
21UINTN mPFPageIndex;\r
22\r
23//\r
24// Pool for dynamically creating page table in page fault handler.\r
25//\r
26UINT64 mPFPageBuffer;\r
27\r
28//\r
29// Store the uplink information for each page being used.\r
30//\r
31UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];\r
32\r
33/**\r
34 Create SMM page table for S3 path.\r
35\r
36**/\r
37VOID\r
38InitSmmS3Cr3 (\r
39 VOID\r
40 )\r
41{\r
42 EFI_PHYSICAL_ADDRESS Pages;\r
43 UINT64 *PTEntry;\r
44\r
45 //\r
46 // Generate PAE page table for the first 4GB memory space\r
47 //\r
717fb604 48 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
49\r
50 //\r
51 // Fill Page-Table-Level4 (PML4) entry\r
52 //\r
717fb604
JY
53 PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
54 ASSERT (PTEntry != NULL);\r
881520ea 55 *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
56 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
57\r
58 //\r
59 // Return the address of PML4 (to set CR3)\r
60 //\r
61 mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;\r
62\r
63 return ;\r
64}\r
65\r
66/**\r
67 Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
68\r
69**/\r
70VOID\r
71InitPagesForPFHandler (\r
72 VOID\r
73 )\r
74{\r
75 VOID *Address;\r
76\r
77 //\r
78 // Pre-Allocate memory for page fault handler\r
79 //\r
80 Address = NULL;\r
81 Address = AllocatePages (MAX_PF_PAGE_COUNT);\r
ef3e20e3 82 ASSERT (Address != NULL);\r
427e3573
MK
83\r
84 mPFPageBuffer = (UINT64)(UINTN) Address;\r
85 mPFPageIndex = 0;\r
86 ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);\r
87 ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));\r
88\r
89 return;\r
90}\r
91\r
92/**\r
93 Allocate one page for creating 4KB-page based on 2MB-page.\r
94\r
95 @param Uplink The address of Page-Directory entry.\r
96\r
97**/\r
98VOID\r
99AcquirePage (\r
100 UINT64 *Uplink\r
101 )\r
102{\r
103 UINT64 Address;\r
104\r
105 //\r
106 // Get the buffer\r
107 //\r
108 Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);\r
109 ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);\r
110\r
111 //\r
112 // Cut the previous uplink if it exists and wasn't overwritten\r
113 //\r
114 if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & PHYSICAL_ADDRESS_MASK) == Address)) {\r
115 *mPFPageUplink[mPFPageIndex] = 0;\r
116 }\r
117\r
118 //\r
119 // Link & Record the current uplink\r
120 //\r
881520ea 121 *Uplink = Address | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
122 mPFPageUplink[mPFPageIndex] = Uplink;\r
123\r
124 mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;\r
125}\r
126\r
127/**\r
128 Update page table to map the memory correctly in order to make the instruction\r
129 which caused page fault execute successfully. And it also save the original page\r
130 table to be restored in single-step exception.\r
131\r
132 @param PageTable PageTable Address.\r
133 @param PFAddress The memory address which caused page fault exception.\r
134 @param CpuIndex The index of the processor.\r
135 @param ErrorCode The Error code of exception.\r
136 @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
137\r
138**/\r
139VOID\r
140RestorePageTableAbove4G (\r
141 UINT64 *PageTable,\r
142 UINT64 PFAddress,\r
143 UINTN CpuIndex,\r
144 UINTN ErrorCode,\r
145 BOOLEAN *IsValidPFAddress\r
146 )\r
147{\r
148 UINTN PTIndex;\r
149 UINT64 Address;\r
150 BOOLEAN Nx;\r
151 BOOLEAN Existed;\r
152 UINTN Index;\r
153 UINTN PFIndex;\r
154\r
155 ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));\r
156\r
157 //\r
158 // If page fault address is 4GB above.\r
159 //\r
160\r
161 //\r
162 // Check if page fault address has existed in page table.\r
163 // If it exists in page table but page fault is generated,\r
164 // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.\r
165 //\r
166 Existed = FALSE;\r
167 PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
168 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
169 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
170 // PML4E\r
171 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
172 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
173 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
174 // PDPTE\r
175 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
176 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
177 // PD\r
178 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
179 //\r
180 // 2MB page\r
181 //\r
182 Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
183 if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {\r
184 Existed = TRUE;\r
185 }\r
186 } else {\r
187 //\r
188 // 4KB page\r
189 //\r
190 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
191 if (PageTable != 0) {\r
192 //\r
193 // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.\r
194 //\r
195 PTIndex = BitFieldRead64 (PFAddress, 12, 20);\r
196 Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
197 if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
198 Existed = TRUE;\r
199 }\r
200 }\r
201 }\r
202 }\r
203 }\r
204\r
205 //\r
206 // If page entry does not existed in page table at all, create a new entry.\r
207 //\r
208 if (!Existed) {\r
209\r
210 if (IsAddressValid (PFAddress, &Nx)) {\r
211 //\r
212 // If page fault address above 4GB is in protected range but it causes a page fault exception,\r
213 // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.\r
214 // this access is not saved into SMM profile data.\r
215 //\r
216 *IsValidPFAddress = TRUE;\r
217 }\r
218\r
219 //\r
220 // Create one entry in page table for page fault address.\r
221 //\r
222 SmiDefaultPFHandler ();\r
223 //\r
224 // Find the page table entry created just now.\r
225 //\r
226 PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
227 PFAddress = AsmReadCr2 ();\r
228 // PML4E\r
229 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
230 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
231 // PDPTE\r
232 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
233 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
234 // PD\r
235 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
236 Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK;\r
237 //\r
238 // Check if 2MB-page entry need be changed to 4KB-page entry.\r
239 //\r
240 if (IsAddressSplit (Address)) {\r
241 AcquirePage (&PageTable[PTIndex]);\r
242\r
243 // PTE\r
244 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
245 for (Index = 0; Index < 512; Index++) {\r
881520ea 246 PageTable[Index] = Address | PAGE_ATTRIBUTE_BITS;\r
427e3573 247 if (!IsAddressValid (Address, &Nx)) {\r
881520ea 248 PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
427e3573
MK
249 }\r
250 if (Nx && mXdSupported) {\r
251 PageTable[Index] = PageTable[Index] | IA32_PG_NX;\r
252 }\r
253 if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
254 PTIndex = Index;\r
255 }\r
256 Address += SIZE_4KB;\r
257 } // end for PT\r
258 } else {\r
259 //\r
260 // Update 2MB page entry.\r
261 //\r
262 if (!IsAddressValid (Address, &Nx)) {\r
263 //\r
264 // Patch to remove present flag and rw flag.\r
265 //\r
881520ea 266 PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
427e3573
MK
267 }\r
268 //\r
269 // Set XD bit to 1\r
270 //\r
271 if (Nx && mXdSupported) {\r
272 PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;\r
273 }\r
274 }\r
275 }\r
276\r
277 //\r
278 // Record old entries with non-present status\r
279 // Old entries include the memory which instruction is at and the memory which instruction access.\r
280 //\r
281 //\r
282 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
283 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
284 PFIndex = mPFEntryCount[CpuIndex];\r
285 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
286 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
287 mPFEntryCount[CpuIndex]++;\r
288 }\r
289\r
290 //\r
291 // Add present flag or clear XD flag to make page fault handler succeed.\r
292 //\r
881520ea 293 PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);\r
427e3573
MK
294 if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
295 //\r
296 // If page fault is caused by instruction fetch, clear XD bit in the entry.\r
297 //\r
298 PageTable[PTIndex] &= ~IA32_PG_NX;\r
299 }\r
300\r
301 return;\r
302}\r
303\r
304/**\r
305 Clear TF in FLAGS.\r
306\r
307 @param SystemContext A pointer to the processor context when\r
308 the interrupt occurred on the processor.\r
309\r
310**/\r
311VOID\r
312ClearTrapFlag (\r
313 IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
314 )\r
315{\r
316 SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;\r
317}\r