]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
Always set WP in CR0.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / SmmProfileArch.c
CommitLineData
427e3573
MK
1/** @file\r
2X64 processor specific functions to enable SMM profile.\r
3\r
4Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16#include "SmmProfileInternal.h"\r
17\r
18//\r
19// Current page index.\r
20//\r
21UINTN mPFPageIndex;\r
22\r
23//\r
24// Pool for dynamically creating page table in page fault handler.\r
25//\r
26UINT64 mPFPageBuffer;\r
27\r
28//\r
29// Store the uplink information for each page being used.\r
30//\r
31UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];\r
32\r
33/**\r
34 Create SMM page table for S3 path.\r
35\r
36**/\r
37VOID\r
38InitSmmS3Cr3 (\r
39 VOID\r
40 )\r
41{\r
42 EFI_PHYSICAL_ADDRESS Pages;\r
43 UINT64 *PTEntry;\r
44\r
45 //\r
46 // Generate PAE page table for the first 4GB memory space\r
47 //\r
48 Pages = Gen4GPageTable (1);\r
49\r
50 //\r
51 // Fill Page-Table-Level4 (PML4) entry\r
52 //\r
53 PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (1));\r
8e496a7a 54 *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
55 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
56\r
57 //\r
58 // Return the address of PML4 (to set CR3)\r
59 //\r
60 mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;\r
61\r
62 return ;\r
63}\r
64\r
65/**\r
66 Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
67\r
68**/\r
69VOID\r
70InitPagesForPFHandler (\r
71 VOID\r
72 )\r
73{\r
74 VOID *Address;\r
75\r
76 //\r
77 // Pre-Allocate memory for page fault handler\r
78 //\r
79 Address = NULL;\r
80 Address = AllocatePages (MAX_PF_PAGE_COUNT);\r
81 ASSERT_EFI_ERROR (Address != NULL);\r
82\r
83 mPFPageBuffer = (UINT64)(UINTN) Address;\r
84 mPFPageIndex = 0;\r
85 ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);\r
86 ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));\r
87\r
88 return;\r
89}\r
90\r
91/**\r
92 Allocate one page for creating 4KB-page based on 2MB-page.\r
93\r
94 @param Uplink The address of Page-Directory entry.\r
95\r
96**/\r
97VOID\r
98AcquirePage (\r
99 UINT64 *Uplink\r
100 )\r
101{\r
102 UINT64 Address;\r
103\r
104 //\r
105 // Get the buffer\r
106 //\r
107 Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);\r
108 ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);\r
109\r
110 //\r
111 // Cut the previous uplink if it exists and wasn't overwritten\r
112 //\r
113 if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & PHYSICAL_ADDRESS_MASK) == Address)) {\r
114 *mPFPageUplink[mPFPageIndex] = 0;\r
115 }\r
116\r
117 //\r
118 // Link & Record the current uplink\r
119 //\r
8e496a7a 120 *Uplink = Address | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
121 mPFPageUplink[mPFPageIndex] = Uplink;\r
122\r
123 mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;\r
124}\r
125\r
126/**\r
127 Update page table to map the memory correctly in order to make the instruction\r
128 which caused page fault execute successfully. And it also save the original page\r
129 table to be restored in single-step exception.\r
130\r
131 @param PageTable PageTable Address.\r
132 @param PFAddress The memory address which caused page fault exception.\r
133 @param CpuIndex The index of the processor.\r
134 @param ErrorCode The Error code of exception.\r
135 @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
136\r
137**/\r
138VOID\r
139RestorePageTableAbove4G (\r
140 UINT64 *PageTable,\r
141 UINT64 PFAddress,\r
142 UINTN CpuIndex,\r
143 UINTN ErrorCode,\r
144 BOOLEAN *IsValidPFAddress\r
145 )\r
146{\r
147 UINTN PTIndex;\r
148 UINT64 Address;\r
149 BOOLEAN Nx;\r
150 BOOLEAN Existed;\r
151 UINTN Index;\r
152 UINTN PFIndex;\r
153\r
154 ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));\r
155\r
156 //\r
157 // If page fault address is 4GB above.\r
158 //\r
159\r
160 //\r
161 // Check if page fault address has existed in page table.\r
162 // If it exists in page table but page fault is generated,\r
163 // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.\r
164 //\r
165 Existed = FALSE;\r
166 PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
167 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
168 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
169 // PML4E\r
170 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
171 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
172 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
173 // PDPTE\r
174 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
175 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
176 // PD\r
177 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
178 //\r
179 // 2MB page\r
180 //\r
181 Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
182 if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {\r
183 Existed = TRUE;\r
184 }\r
185 } else {\r
186 //\r
187 // 4KB page\r
188 //\r
189 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
190 if (PageTable != 0) {\r
191 //\r
192 // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.\r
193 //\r
194 PTIndex = BitFieldRead64 (PFAddress, 12, 20);\r
195 Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
196 if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
197 Existed = TRUE;\r
198 }\r
199 }\r
200 }\r
201 }\r
202 }\r
203\r
204 //\r
205 // If page entry does not existed in page table at all, create a new entry.\r
206 //\r
207 if (!Existed) {\r
208\r
209 if (IsAddressValid (PFAddress, &Nx)) {\r
210 //\r
211 // If page fault address above 4GB is in protected range but it causes a page fault exception,\r
212 // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.\r
213 // this access is not saved into SMM profile data.\r
214 //\r
215 *IsValidPFAddress = TRUE;\r
216 }\r
217\r
218 //\r
219 // Create one entry in page table for page fault address.\r
220 //\r
221 SmiDefaultPFHandler ();\r
222 //\r
223 // Find the page table entry created just now.\r
224 //\r
225 PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
226 PFAddress = AsmReadCr2 ();\r
227 // PML4E\r
228 PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
229 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
230 // PDPTE\r
231 PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
232 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
233 // PD\r
234 PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
235 Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK;\r
236 //\r
237 // Check if 2MB-page entry need be changed to 4KB-page entry.\r
238 //\r
239 if (IsAddressSplit (Address)) {\r
240 AcquirePage (&PageTable[PTIndex]);\r
241\r
242 // PTE\r
243 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
244 for (Index = 0; Index < 512; Index++) {\r
8e496a7a 245 PageTable[Index] = Address | PAGE_ATTRIBUTE_BITS;\r
427e3573 246 if (!IsAddressValid (Address, &Nx)) {\r
8e496a7a 247 PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
427e3573
MK
248 }\r
249 if (Nx && mXdSupported) {\r
250 PageTable[Index] = PageTable[Index] | IA32_PG_NX;\r
251 }\r
252 if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
253 PTIndex = Index;\r
254 }\r
255 Address += SIZE_4KB;\r
256 } // end for PT\r
257 } else {\r
258 //\r
259 // Update 2MB page entry.\r
260 //\r
261 if (!IsAddressValid (Address, &Nx)) {\r
262 //\r
263 // Patch to remove present flag and rw flag.\r
264 //\r
8e496a7a 265 PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
427e3573
MK
266 }\r
267 //\r
268 // Set XD bit to 1\r
269 //\r
270 if (Nx && mXdSupported) {\r
271 PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;\r
272 }\r
273 }\r
274 }\r
275\r
276 //\r
277 // Record old entries with non-present status\r
278 // Old entries include the memory which instruction is at and the memory which instruction access.\r
279 //\r
280 //\r
281 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
282 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
283 PFIndex = mPFEntryCount[CpuIndex];\r
284 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
285 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
286 mPFEntryCount[CpuIndex]++;\r
287 }\r
288\r
289 //\r
290 // Add present flag or clear XD flag to make page fault handler succeed.\r
291 //\r
8e496a7a 292 PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);\r
427e3573
MK
293 if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
294 //\r
295 // If page fault is caused by instruction fetch, clear XD bit in the entry.\r
296 //\r
297 PageTable[PTIndex] &= ~IA32_PG_NX;\r
298 }\r
299\r
300 return;\r
301}\r
302\r
303/**\r
304 Clear TF in FLAGS.\r
305\r
306 @param SystemContext A pointer to the processor context when\r
307 the interrupt occurred on the processor.\r
308\r
309**/\r
310VOID\r
311ClearTrapFlag (\r
312 IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
313 )\r
314{\r
315 SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;\r
316}\r