]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Update the coding styles
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
241f9149 15\r
427e3573 16LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 17BOOLEAN m1GPageTableSupport = FALSE;\r
09f7c82b 18BOOLEAN mCpuSmmRestrictedMemoryAccess;\r
86ad762f
RN
19BOOLEAN m5LevelPagingNeeded;\r
20X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;\r
427e3573 21\r
3eb69b08
JY
22/**\r
23 Disable CET.\r
24**/\r
25VOID\r
26EFIAPI\r
27DisableCet (\r
28 VOID\r
29 );\r
30\r
31/**\r
32 Enable CET.\r
33**/\r
34VOID\r
35EFIAPI\r
36EnableCet (\r
37 VOID\r
38 );\r
39\r
427e3573
MK
40/**\r
41 Check if 1-GByte pages is supported by processor or not.\r
42\r
43 @retval TRUE 1-GByte pages is supported.\r
44 @retval FALSE 1-GByte pages is not supported.\r
45\r
46**/\r
47BOOLEAN\r
48Is1GPageSupport (\r
49 VOID\r
50 )\r
51{\r
52 UINT32 RegEax;\r
53 UINT32 RegEdx;\r
54\r
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
56 if (RegEax >= 0x80000001) {\r
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
58 if ((RegEdx & BIT26) != 0) {\r
59 return TRUE;\r
60 }\r
61 }\r
62 return FALSE;\r
63}\r
64\r
4eee0cc7 65/**\r
86ad762f
RN
66 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and\r
67 the max physical address bits is bigger than 48. Because 4-level paging can support\r
68 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging\r
69 with max physical address bits <= 48.\r
4eee0cc7 70\r
86ad762f
RN
71 @retval TRUE 5-level paging enabling is needed.\r
72 @retval FALSE 5-level paging enabling is not needed.\r
4eee0cc7
RN
73**/\r
74BOOLEAN\r
86ad762f 75Is5LevelPagingNeeded (\r
4eee0cc7
RN
76 VOID\r
77 )\r
78{\r
86ad762f
RN
79 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;\r
81 UINT32 MaxExtendedFunctionId;\r
4eee0cc7 82\r
86ad762f
RN
83 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);\r
84 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
85 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
86 } else {\r
87 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
88 }\r
4eee0cc7
RN
89 AsmCpuidEx (\r
90 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
91 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
86ad762f 92 NULL, NULL, &ExtFeatureEcx.Uint32, NULL\r
4eee0cc7 93 );\r
86ad762f
RN
94 DEBUG ((\r
95 DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",\r
96 VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage\r
97 ));\r
98\r
99 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {\r
100 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);\r
101 return TRUE;\r
102 } else {\r
103 return FALSE;\r
104 }\r
4eee0cc7
RN
105}\r
106\r
427e3573
MK
107/**\r
108 Set sub-entries number in entry.\r
109\r
110 @param[in, out] Entry Pointer to entry\r
111 @param[in] SubEntryNum Sub-entries number based on 0:\r
112 0 means there is 1 sub-entry under this entry\r
113 0x1ff means there is 512 sub-entries under this entry\r
114\r
115**/\r
116VOID\r
117SetSubEntriesNum (\r
118 IN OUT UINT64 *Entry,\r
119 IN UINT64 SubEntryNum\r
120 )\r
121{\r
122 //\r
123 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
124 //\r
125 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
126}\r
127\r
128/**\r
129 Return sub-entries number in entry.\r
130\r
131 @param[in] Entry Pointer to entry\r
132\r
133 @return Sub-entries number based on 0:\r
134 0 means there is 1 sub-entry under this entry\r
135 0x1ff means there is 512 sub-entries under this entry\r
136**/\r
137UINT64\r
138GetSubEntriesNum (\r
139 IN UINT64 *Entry\r
140 )\r
141{\r
142 //\r
143 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
144 //\r
145 return BitFieldRead64 (*Entry, 52, 60);\r
146}\r
147\r
717fb604
JY
148/**\r
149 Calculate the maximum support address.\r
150\r
151 @return the maximum support address.\r
152**/\r
153UINT8\r
154CalculateMaximumSupportAddress (\r
155 VOID\r
156 )\r
157{\r
158 UINT32 RegEax;\r
159 UINT8 PhysicalAddressBits;\r
160 VOID *Hob;\r
161\r
162 //\r
163 // Get physical address bits supported.\r
164 //\r
165 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
166 if (Hob != NULL) {\r
167 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
168 } else {\r
169 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
170 if (RegEax >= 0x80000008) {\r
171 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
172 PhysicalAddressBits = (UINT8) RegEax;\r
173 } else {\r
174 PhysicalAddressBits = 36;\r
175 }\r
176 }\r
717fb604
JY
177 return PhysicalAddressBits;\r
178}\r
179\r
180/**\r
181 Set static page table.\r
182\r
183 @param[in] PageTable Address of page table.\r
184**/\r
185VOID\r
186SetStaticPageTable (\r
187 IN UINTN PageTable\r
188 )\r
189{\r
190 UINT64 PageAddress;\r
4eee0cc7 191 UINTN NumberOfPml5EntriesNeeded;\r
717fb604
JY
192 UINTN NumberOfPml4EntriesNeeded;\r
193 UINTN NumberOfPdpEntriesNeeded;\r
4eee0cc7 194 UINTN IndexOfPml5Entries;\r
717fb604
JY
195 UINTN IndexOfPml4Entries;\r
196 UINTN IndexOfPdpEntries;\r
197 UINTN IndexOfPageDirectoryEntries;\r
4eee0cc7 198 UINT64 *PageMapLevel5Entry;\r
717fb604
JY
199 UINT64 *PageMapLevel4Entry;\r
200 UINT64 *PageMap;\r
201 UINT64 *PageDirectoryPointerEntry;\r
202 UINT64 *PageDirectory1GEntry;\r
203 UINT64 *PageDirectoryEntry;\r
204\r
4eee0cc7
RN
205 //\r
206 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
207 // when 5-Level Paging is disabled.\r
208 //\r
209 ASSERT (mPhysicalAddressBits <= 52);\r
86ad762f 210 if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {\r
4eee0cc7
RN
211 mPhysicalAddressBits = 48;\r
212 }\r
213\r
214 NumberOfPml5EntriesNeeded = 1;\r
215 if (mPhysicalAddressBits > 48) {\r
216 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);\r
217 mPhysicalAddressBits = 48;\r
218 }\r
219\r
220 NumberOfPml4EntriesNeeded = 1;\r
221 if (mPhysicalAddressBits > 39) {\r
222 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);\r
223 mPhysicalAddressBits = 39;\r
717fb604
JY
224 }\r
225\r
4eee0cc7
RN
226 NumberOfPdpEntriesNeeded = 1;\r
227 ASSERT (mPhysicalAddressBits > 30);\r
228 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);\r
229\r
717fb604
JY
230 //\r
231 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
232 //\r
233 PageMap = (VOID *) PageTable;\r
234\r
235 PageMapLevel4Entry = PageMap;\r
4eee0cc7 236 PageMapLevel5Entry = NULL;\r
86ad762f 237 if (m5LevelPagingNeeded) {\r
7365eb2c 238 //\r
4eee0cc7 239 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
7365eb2c 240 //\r
4eee0cc7
RN
241 PageMapLevel5Entry = PageMap;\r
242 }\r
243 PageAddress = 0;\r
7365eb2c 244\r
4eee0cc7
RN
245 for ( IndexOfPml5Entries = 0\r
246 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
247 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {\r
248 //\r
249 // Each PML5 entry points to a page of PML4 entires.\r
250 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
251 // When 5-Level Paging is disabled, below allocation happens only once.\r
252 //\r
86ad762f 253 if (m5LevelPagingNeeded) {\r
4eee0cc7
RN
254 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
255 if (PageMapLevel4Entry == NULL) {\r
256 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
257 ASSERT(PageMapLevel4Entry != NULL);\r
258 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));\r
259\r
260 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
261 }\r
4e78c7be 262 }\r
717fb604 263\r
4eee0cc7
RN
264 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
265 //\r
266 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
267 //\r
268 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
269 if (PageDirectoryPointerEntry == NULL) {\r
270 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
271 ASSERT(PageDirectoryPointerEntry != NULL);\r
272 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
273\r
274 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 275 }\r
7365eb2c 276\r
4eee0cc7
RN
277 if (m1GPageTableSupport) {\r
278 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
279 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
280 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
281 //\r
282 // Skip the < 4G entries\r
283 //\r
284 continue;\r
285 }\r
4e78c7be 286 //\r
4eee0cc7 287 // Fill in the Page Directory entries\r
4e78c7be 288 //\r
4eee0cc7 289 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 290 }\r
4eee0cc7
RN
291 } else {\r
292 PageAddress = BASE_4GB;\r
293 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
294 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
295 //\r
296 // Skip the < 4G entries\r
297 //\r
298 continue;\r
299 }\r
4e78c7be 300 //\r
4eee0cc7
RN
301 // Each Directory Pointer entries points to a page of Page Directory entires.\r
302 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
4e78c7be 303 //\r
4eee0cc7
RN
304 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
305 if (PageDirectoryEntry == NULL) {\r
306 PageDirectoryEntry = AllocatePageTableMemory (1);\r
307 ASSERT(PageDirectoryEntry != NULL);\r
308 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
309\r
310 //\r
311 // Fill in a Page Directory Pointer Entries\r
312 //\r
313 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
314 }\r
315\r
316 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
317 //\r
318 // Fill in the Page Directory entries\r
319 //\r
320 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
321 }\r
717fb604
JY
322 }\r
323 }\r
324 }\r
325 }\r
326}\r
327\r
427e3573
MK
328/**\r
329 Create PageTable for SMM use.\r
330\r
331 @return The address of PML4 (to set CR3).\r
332\r
333**/\r
334UINT32\r
335SmmInitPageTable (\r
336 VOID\r
337 )\r
338{\r
339 EFI_PHYSICAL_ADDRESS Pages;\r
340 UINT64 *PTEntry;\r
341 LIST_ENTRY *FreePage;\r
342 UINTN Index;\r
343 UINTN PageFaultHandlerHookAddress;\r
344 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 345 EFI_STATUS Status;\r
4eee0cc7
RN
346 UINT64 *Pml4Entry;\r
347 UINT64 *Pml5Entry;\r
427e3573
MK
348\r
349 //\r
350 // Initialize spin lock\r
351 //\r
fe3a75bc 352 InitializeSpinLock (mPFLock);\r
427e3573 353\r
09f7c82b
RN
354 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);\r
355 m1GPageTableSupport = Is1GPageSupport ();\r
86ad762f 356 m5LevelPagingNeeded = Is5LevelPagingNeeded ();\r
09f7c82b 357 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
86ad762f
RN
358 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);\r
359 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));\r
09f7c82b
RN
360 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
361 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));\r
362 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
427e3573
MK
363 //\r
364 // Generate PAE page table for the first 4GB memory space\r
365 //\r
717fb604 366 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
367\r
368 //\r
369 // Set IA32_PG_PMNT bit to mask this entry\r
370 //\r
371 PTEntry = (UINT64*)(UINTN)Pages;\r
372 for (Index = 0; Index < 4; Index++) {\r
373 PTEntry[Index] |= IA32_PG_PMNT;\r
374 }\r
375\r
376 //\r
377 // Fill Page-Table-Level4 (PML4) entry\r
378 //\r
4eee0cc7
RN
379 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);\r
380 ASSERT (Pml4Entry != NULL);\r
381 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
382 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
717fb604 383\r
427e3573
MK
384 //\r
385 // Set sub-entries number\r
386 //\r
4eee0cc7
RN
387 SetSubEntriesNum (Pml4Entry, 3);\r
388 PTEntry = Pml4Entry;\r
389\r
86ad762f 390 if (m5LevelPagingNeeded) {\r
4eee0cc7
RN
391 //\r
392 // Fill PML5 entry\r
393 //\r
394 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);\r
aefcf2f7 395 ASSERT (Pml5Entry != NULL);\r
4eee0cc7
RN
396 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
397 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
398 //\r
399 // Set sub-entries number\r
400 //\r
401 SetSubEntriesNum (Pml5Entry, 1);\r
402 PTEntry = Pml5Entry;\r
403 }\r
427e3573 404\r
09f7c82b
RN
405 if (mCpuSmmRestrictedMemoryAccess) {\r
406 //\r
407 // When access to non-SMRAM memory is restricted, create page table\r
408 // that covers all memory space.\r
409 //\r
717fb604
JY
410 SetStaticPageTable ((UINTN)PTEntry);\r
411 } else {\r
412 //\r
413 // Add pages to page pool\r
414 //\r
415 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
416 ASSERT (FreePage != NULL);\r
417 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
418 InsertTailList (&mPagePool, FreePage);\r
419 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
420 }\r
427e3573
MK
421 }\r
422\r
09afd9a4
JW
423 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
424 HEAP_GUARD_NONSTOP_MODE ||\r
425 NULL_DETECTION_NONSTOP_MODE) {\r
427e3573
MK
426 //\r
427 // Set own Page Fault entry instead of the default one, because SMM Profile\r
428 // feature depends on IRET instruction to do Single Step\r
429 //\r
430 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
431 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
432 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
433 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
434 IdtEntry->Bits.Reserved_0 = 0;\r
435 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
436 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
437 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
438 IdtEntry->Bits.Reserved_1 = 0;\r
439 } else {\r
440 //\r
441 // Register Smm Page Fault Handler\r
442 //\r
5c88af79
JF
443 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
444 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
445 }\r
446\r
447 //\r
448 // Additional SMM IDT initialization for SMM stack guard\r
449 //\r
450 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
451 InitializeIDTSmmStackGuard ();\r
452 }\r
453\r
454 //\r
4eee0cc7 455 // Return the address of PML4/PML5 (to set CR3)\r
427e3573
MK
456 //\r
457 return (UINT32)(UINTN)PTEntry;\r
458}\r
459\r
460/**\r
461 Set access record in entry.\r
462\r
463 @param[in, out] Entry Pointer to entry\r
464 @param[in] Acc Access record value\r
465\r
466**/\r
467VOID\r
468SetAccNum (\r
469 IN OUT UINT64 *Entry,\r
470 IN UINT64 Acc\r
471 )\r
472{\r
473 //\r
474 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
475 //\r
476 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
477}\r
478\r
479/**\r
480 Return access record in entry.\r
481\r
482 @param[in] Entry Pointer to entry\r
483\r
484 @return Access record value.\r
485\r
486**/\r
487UINT64\r
488GetAccNum (\r
489 IN UINT64 *Entry\r
490 )\r
491{\r
492 //\r
493 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
494 //\r
495 return BitFieldRead64 (*Entry, 9, 11);\r
496}\r
497\r
498/**\r
499 Return and update the access record in entry.\r
500\r
501 @param[in, out] Entry Pointer to entry\r
502\r
503 @return Access record value.\r
504\r
505**/\r
506UINT64\r
507GetAndUpdateAccNum (\r
508 IN OUT UINT64 *Entry\r
509 )\r
510{\r
511 UINT64 Acc;\r
512\r
513 Acc = GetAccNum (Entry);\r
514 if ((*Entry & IA32_PG_A) != 0) {\r
515 //\r
516 // If this entry has been accessed, clear access flag in Entry and update access record\r
517 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
518 //\r
519 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
520 SetAccNum (Entry, 0x7);\r
521 return (0x7 + ACC_MAX_BIT);\r
522 } else {\r
523 if (Acc != 0) {\r
524 //\r
525 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
526 //\r
527 SetAccNum (Entry, Acc - 1);\r
528 }\r
529 }\r
530 return Acc;\r
531}\r
532\r
533/**\r
534 Reclaim free pages for PageFault handler.\r
535\r
536 Search the whole entries tree to find the leaf entry that has the smallest\r
537 access record value. Insert the page pointed by this leaf entry into the\r
538 page pool. And check its upper entries if need to be inserted into the page\r
539 pool or not.\r
540\r
541**/\r
542VOID\r
543ReclaimPages (\r
544 VOID\r
545 )\r
546{\r
4eee0cc7
RN
547 UINT64 Pml5Entry;\r
548 UINT64 *Pml5;\r
427e3573
MK
549 UINT64 *Pml4;\r
550 UINT64 *Pdpt;\r
551 UINT64 *Pdt;\r
4eee0cc7 552 UINTN Pml5Index;\r
427e3573
MK
553 UINTN Pml4Index;\r
554 UINTN PdptIndex;\r
555 UINTN PdtIndex;\r
4eee0cc7 556 UINTN MinPml5;\r
427e3573
MK
557 UINTN MinPml4;\r
558 UINTN MinPdpt;\r
559 UINTN MinPdt;\r
560 UINT64 MinAcc;\r
561 UINT64 Acc;\r
562 UINT64 SubEntriesNum;\r
563 BOOLEAN PML4EIgnore;\r
564 BOOLEAN PDPTEIgnore;\r
565 UINT64 *ReleasePageAddress;\r
4eee0cc7
RN
566 IA32_CR4 Cr4;\r
567 BOOLEAN Enable5LevelPaging;\r
4201098e
DN
568 UINT64 PFAddress;\r
569 UINT64 PFAddressPml5Index;\r
570 UINT64 PFAddressPml4Index;\r
571 UINT64 PFAddressPdptIndex;\r
572 UINT64 PFAddressPdtIndex;\r
427e3573
MK
573\r
574 Pml4 = NULL;\r
575 Pdpt = NULL;\r
576 Pdt = NULL;\r
577 MinAcc = (UINT64)-1;\r
578 MinPml4 = (UINTN)-1;\r
4eee0cc7 579 MinPml5 = (UINTN)-1;\r
427e3573
MK
580 MinPdpt = (UINTN)-1;\r
581 MinPdt = (UINTN)-1;\r
582 Acc = 0;\r
583 ReleasePageAddress = 0;\r
4201098e
DN
584 PFAddress = AsmReadCr2 ();\r
585 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);\r
586 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);\r
587 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);\r
588 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);\r
427e3573 589\r
4eee0cc7
RN
590 Cr4.UintN = AsmReadCr4 ();\r
591 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
592 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
593\r
594 if (!Enable5LevelPaging) {\r
595 //\r
596 // Create one fake PML5 entry for 4-Level Paging\r
597 // so that the page table parsing logic only handles 5-Level page structure.\r
598 //\r
599 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;\r
600 Pml5 = &Pml5Entry;\r
601 }\r
602\r
427e3573
MK
603 //\r
604 // First, find the leaf entry has the smallest access record value\r
605 //\r
c630f69d 606 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
4eee0cc7 607 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {\r
427e3573 608 //\r
4eee0cc7 609 // If the PML5 entry is not present or is masked, skip it\r
427e3573
MK
610 //\r
611 continue;\r
612 }\r
4eee0cc7
RN
613 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
614 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
615 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
427e3573 616 //\r
4eee0cc7 617 // If the PML4 entry is not present or is masked, skip it\r
427e3573 618 //\r
4e78c7be
RN
619 continue;\r
620 }\r
4eee0cc7
RN
621 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
622 PML4EIgnore = FALSE;\r
623 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
624 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
625 //\r
626 // If the PDPT entry is not present or is masked, skip it\r
627 //\r
628 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
427e3573 629 //\r
4eee0cc7 630 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
427e3573 631 //\r
4eee0cc7
RN
632 PML4EIgnore = TRUE;\r
633 }\r
634 continue;\r
635 }\r
636 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
637 //\r
638 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
639 // we will not check PML4 entry more\r
640 //\r
641 PML4EIgnore = TRUE;\r
642 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
643 PDPTEIgnore = FALSE;\r
644 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
645 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
646 //\r
647 // If the PD entry is not present or is masked, skip it\r
648 //\r
649 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
650 //\r
651 // If the PD entry is masked, we will not PDPT entry more\r
652 //\r
653 PDPTEIgnore = TRUE;\r
654 }\r
655 continue;\r
656 }\r
657 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
427e3573 658 //\r
4eee0cc7
RN
659 // It's not 2 MByte page table entry, it should be PD entry\r
660 // we will find the entry has the smallest access record value\r
427e3573
MK
661 //\r
662 PDPTEIgnore = TRUE;\r
4201098e
DN
663 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||\r
664 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
665 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
666 if (Acc < MinAcc) {\r
667 //\r
668 // If the PD entry has the smallest access record value,\r
669 // save the Page address to be released\r
670 //\r
671 MinAcc = Acc;\r
672 MinPml5 = Pml5Index;\r
673 MinPml4 = Pml4Index;\r
674 MinPdpt = PdptIndex;\r
675 MinPdt = PdtIndex;\r
676 ReleasePageAddress = Pdt + PdtIndex;\r
677 }\r
678 }\r
679 }\r
680 }\r
681 if (!PDPTEIgnore) {\r
682 //\r
683 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
684 // it should only has the entries point to 2 MByte Pages\r
685 //\r
686 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||\r
687 Pml5Index != PFAddressPml5Index) {\r
688 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
4eee0cc7
RN
689 if (Acc < MinAcc) {\r
690 //\r
4201098e 691 // If the PDPT entry has the smallest access record value,\r
4eee0cc7
RN
692 // save the Page address to be released\r
693 //\r
694 MinAcc = Acc;\r
695 MinPml5 = Pml5Index;\r
696 MinPml4 = Pml4Index;\r
697 MinPdpt = PdptIndex;\r
4201098e
DN
698 MinPdt = (UINTN)-1;\r
699 ReleasePageAddress = Pdpt + PdptIndex;\r
4eee0cc7 700 }\r
427e3573 701 }\r
427e3573 702 }\r
427e3573 703 }\r
427e3573 704 }\r
4eee0cc7 705 if (!PML4EIgnore) {\r
4e78c7be 706 //\r
4eee0cc7
RN
707 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
708 // it should only has the entries point to 1 GByte Pages\r
4e78c7be 709 //\r
4201098e
DN
710 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
711 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
712 if (Acc < MinAcc) {\r
713 //\r
714 // If the PML4 entry has the smallest access record value,\r
715 // save the Page address to be released\r
716 //\r
717 MinAcc = Acc;\r
718 MinPml5 = Pml5Index;\r
719 MinPml4 = Pml4Index;\r
720 MinPdpt = (UINTN)-1;\r
721 MinPdt = (UINTN)-1;\r
722 ReleasePageAddress = Pml4 + Pml4Index;\r
723 }\r
4eee0cc7 724 }\r
4e78c7be
RN
725 }\r
726 }\r
427e3573
MK
727 }\r
728 //\r
729 // Make sure one PML4/PDPT/PD entry is selected\r
730 //\r
731 ASSERT (MinAcc != (UINT64)-1);\r
732\r
733 //\r
734 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
735 //\r
241f9149 736 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
737 *ReleasePageAddress = 0;\r
738\r
739 //\r
740 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
741 // or not\r
742 //\r
743 while (TRUE) {\r
744 if (MinPdt != (UINTN)-1) {\r
745 //\r
746 // If 4 KByte Page Table is released, check the PDPT entry\r
747 //\r
4eee0cc7 748 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);\r
241f9149 749 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573 750 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
4201098e
DN
751 if (SubEntriesNum == 0 &&\r
752 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
427e3573
MK
753 //\r
754 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
755 // clear the Page directory entry\r
756 //\r
241f9149 757 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
758 Pdpt[MinPdpt] = 0;\r
759 //\r
760 // Go on checking the PML4 table\r
761 //\r
762 MinPdt = (UINTN)-1;\r
763 continue;\r
764 }\r
765 //\r
766 // Update the sub-entries filed in PDPT entry and exit\r
767 //\r
4201098e 768 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
769 break;\r
770 }\r
771 if (MinPdpt != (UINTN)-1) {\r
772 //\r
773 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
774 //\r
775 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
4201098e 776 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
427e3573
MK
777 //\r
778 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
779 // clear the Page directory entry\r
780 //\r
241f9149 781 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
782 Pml4[MinPml4] = 0;\r
783 MinPdpt = (UINTN)-1;\r
784 continue;\r
785 }\r
786 //\r
787 // Update the sub-entries filed in PML4 entry and exit\r
788 //\r
4201098e 789 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
790 break;\r
791 }\r
792 //\r
793 // PLM4 table has been released before, exit it\r
794 //\r
795 break;\r
796 }\r
797}\r
798\r
799/**\r
800 Allocate free Page for PageFault handler use.\r
801\r
802 @return Page address.\r
803\r
804**/\r
805UINT64\r
806AllocPage (\r
807 VOID\r
808 )\r
809{\r
810 UINT64 RetVal;\r
811\r
812 if (IsListEmpty (&mPagePool)) {\r
813 //\r
814 // If page pool is empty, reclaim the used pages and insert one into page pool\r
815 //\r
816 ReclaimPages ();\r
817 }\r
818\r
819 //\r
820 // Get one free page and remove it from page pool\r
821 //\r
822 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
823 RemoveEntryList (mPagePool.ForwardLink);\r
824 //\r
825 // Clean this page and return\r
826 //\r
827 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
828 return RetVal;\r
829}\r
830\r
831/**\r
832 Page Fault handler for SMM use.\r
833\r
834**/\r
835VOID\r
836SmiDefaultPFHandler (\r
837 VOID\r
838 )\r
839{\r
840 UINT64 *PageTable;\r
4eee0cc7 841 UINT64 *PageTableTop;\r
427e3573
MK
842 UINT64 PFAddress;\r
843 UINTN StartBit;\r
844 UINTN EndBit;\r
845 UINT64 PTIndex;\r
846 UINTN Index;\r
847 SMM_PAGE_SIZE_TYPE PageSize;\r
848 UINTN NumOfPages;\r
849 UINTN PageAttribute;\r
850 EFI_STATUS Status;\r
851 UINT64 *UpperEntry;\r
4eee0cc7
RN
852 BOOLEAN Enable5LevelPaging;\r
853 IA32_CR4 Cr4;\r
427e3573
MK
854\r
855 //\r
856 // Set default SMM page attribute\r
857 //\r
858 PageSize = SmmPageSize2M;\r
859 NumOfPages = 1;\r
860 PageAttribute = 0;\r
861\r
862 EndBit = 0;\r
4eee0cc7 863 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
427e3573
MK
864 PFAddress = AsmReadCr2 ();\r
865\r
4eee0cc7
RN
866 Cr4.UintN = AsmReadCr4 ();\r
867 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);\r
868\r
427e3573
MK
869 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
870 //\r
871 // If platform not support page table attribute, set default SMM page attribute\r
872 //\r
873 if (Status != EFI_SUCCESS) {\r
874 PageSize = SmmPageSize2M;\r
875 NumOfPages = 1;\r
876 PageAttribute = 0;\r
877 }\r
878 if (PageSize >= MaxSmmPageSizeType) {\r
879 PageSize = SmmPageSize2M;\r
880 }\r
881 if (NumOfPages > 512) {\r
882 NumOfPages = 512;\r
883 }\r
884\r
885 switch (PageSize) {\r
886 case SmmPageSize4K:\r
887 //\r
888 // BIT12 to BIT20 is Page Table index\r
889 //\r
890 EndBit = 12;\r
891 break;\r
892 case SmmPageSize2M:\r
893 //\r
894 // BIT21 to BIT29 is Page Directory index\r
895 //\r
896 EndBit = 21;\r
897 PageAttribute |= (UINTN)IA32_PG_PS;\r
898 break;\r
899 case SmmPageSize1G:\r
900 if (!m1GPageTableSupport) {\r
717fb604 901 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
902 ASSERT (FALSE);\r
903 }\r
904 //\r
905 // BIT30 to BIT38 is Page Directory Pointer Table index\r
906 //\r
907 EndBit = 30;\r
908 PageAttribute |= (UINTN)IA32_PG_PS;\r
909 break;\r
910 default:\r
911 ASSERT (FALSE);\r
912 }\r
913\r
914 //\r
915 // If execute-disable is enabled, set NX bit\r
916 //\r
917 if (mXdEnabled) {\r
918 PageAttribute |= IA32_PG_NX;\r
919 }\r
920\r
921 for (Index = 0; Index < NumOfPages; Index++) {\r
4eee0cc7 922 PageTable = PageTableTop;\r
427e3573 923 UpperEntry = NULL;\r
4eee0cc7 924 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
427e3573
MK
925 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
926 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
927 //\r
928 // If the entry is not present, allocate one page from page pool for it\r
929 //\r
241f9149 930 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
931 } else {\r
932 //\r
933 // Save the upper entry address\r
934 //\r
935 UpperEntry = PageTable + PTIndex;\r
936 }\r
937 //\r
938 // BIT9 to BIT11 of entry is used to save access record,\r
939 // initialize value is 7\r
940 //\r
941 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
942 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 943 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
944 }\r
945\r
946 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
947 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
948 //\r
949 // Check if the entry has already existed, this issue may occur when the different\r
950 // size page entries created under the same entry\r
951 //\r
717fb604
JY
952 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
953 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
954 ASSERT (FALSE);\r
955 }\r
956 //\r
957 // Fill the new entry\r
958 //\r
241f9149 959 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 960 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573 961 if (UpperEntry != NULL) {\r
4201098e 962 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);\r
427e3573
MK
963 }\r
964 //\r
965 // Get the next page address if we need to create more page tables\r
966 //\r
967 PFAddress += (1ull << EndBit);\r
968 }\r
969}\r
970\r
971/**\r
972 ThePage Fault handler wrapper for SMM use.\r
973\r
974 @param InterruptType Defines the type of interrupt or exception that\r
975 occurred on the processor.This parameter is processor architecture specific.\r
976 @param SystemContext A pointer to the processor context when\r
977 the interrupt occurred on the processor.\r
978**/\r
979VOID\r
980EFIAPI\r
981SmiPFHandler (\r
b8caae19
JF
982 IN EFI_EXCEPTION_TYPE InterruptType,\r
983 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
984 )\r
985{\r
986 UINTN PFAddress;\r
7fa1376c
JY
987 UINTN GuardPageAddress;\r
988 UINTN CpuIndex;\r
427e3573
MK
989\r
990 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
991\r
fe3a75bc 992 AcquireSpinLock (mPFLock);\r
427e3573
MK
993\r
994 PFAddress = AsmReadCr2 ();\r
995\r
09f7c82b 996 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 997 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
998 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
999 CpuDeadLoop ();\r
3eb69b08 1000 goto Exit;\r
717fb604
JY
1001 }\r
1002\r
427e3573 1003 //\r
7fa1376c
JY
1004 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
1005 // or SMM page protection violation.\r
427e3573 1006 //\r
7fa1376c 1007 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 1008 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
b8caae19 1009 DumpCpuContext (InterruptType, SystemContext);\r
7fa1376c
JY
1010 CpuIndex = GetCpuIndex ();\r
1011 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
1012 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
1013 (PFAddress >= GuardPageAddress) &&\r
1014 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
1015 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
1016 } else {\r
7fa1376c
JY
1017 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
1018 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
1019 DEBUG_CODE (\r
1020 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1021 );\r
1022 } else {\r
1023 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
1024 DEBUG_CODE (\r
1025 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1026 );\r
1027 }\r
09afd9a4
JW
1028\r
1029 if (HEAP_GUARD_NONSTOP_MODE) {\r
1030 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1031 goto Exit;\r
1032 }\r
7fa1376c 1033 }\r
427e3573 1034 CpuDeadLoop ();\r
3eb69b08 1035 goto Exit;\r
427e3573
MK
1036 }\r
1037\r
1038 //\r
8bf0380e 1039 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
1040 //\r
1041 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
1042 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
1043 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 1044 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 1045 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
1046 DEBUG_CODE (\r
1047 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1048 );\r
1049 CpuDeadLoop ();\r
3eb69b08 1050 goto Exit;\r
427e3573 1051 }\r
09afd9a4
JW
1052\r
1053 //\r
1054 // If NULL pointer was just accessed\r
1055 //\r
1056 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
1057 (PFAddress < EFI_PAGE_SIZE)) {\r
1058 DumpCpuContext (InterruptType, SystemContext);\r
1059 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1060 DEBUG_CODE (\r
1061 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1062 );\r
1063\r
1064 if (NULL_DETECTION_NONSTOP_MODE) {\r
1065 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1066 goto Exit;\r
1067 }\r
1068\r
1069 CpuDeadLoop ();\r
3eb69b08 1070 goto Exit;\r
09afd9a4
JW
1071 }\r
1072\r
09f7c82b 1073 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 1074 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
1075 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1076 DEBUG_CODE (\r
1077 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1078 );\r
1079 CpuDeadLoop ();\r
3eb69b08 1080 goto Exit;\r
d2fc7711 1081 }\r
427e3573
MK
1082 }\r
1083\r
1084 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1085 SmmProfilePFHandler (\r
1086 SystemContext.SystemContextX64->Rip,\r
1087 SystemContext.SystemContextX64->ExceptionData\r
1088 );\r
1089 } else {\r
1090 SmiDefaultPFHandler ();\r
1091 }\r
1092\r
09afd9a4 1093Exit:\r
fe3a75bc 1094 ReleaseSpinLock (mPFLock);\r
427e3573 1095}\r
717fb604
JY
1096\r
1097/**\r
1098 This function sets memory attribute for page table.\r
1099**/\r
1100VOID\r
1101SetPageTableAttributes (\r
1102 VOID\r
1103 )\r
1104{\r
1105 UINTN Index2;\r
1106 UINTN Index3;\r
1107 UINTN Index4;\r
4eee0cc7 1108 UINTN Index5;\r
717fb604
JY
1109 UINT64 *L1PageTable;\r
1110 UINT64 *L2PageTable;\r
1111 UINT64 *L3PageTable;\r
1112 UINT64 *L4PageTable;\r
4eee0cc7 1113 UINT64 *L5PageTable;\r
717fb604
JY
1114 BOOLEAN IsSplitted;\r
1115 BOOLEAN PageTableSplitted;\r
3eb69b08 1116 BOOLEAN CetEnabled;\r
4eee0cc7
RN
1117 IA32_CR4 Cr4;\r
1118 BOOLEAN Enable5LevelPaging;\r
1119\r
1120 Cr4.UintN = AsmReadCr4 ();\r
1121 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
717fb604 1122\r
827330cc 1123 //\r
09f7c82b
RN
1124 // Don't mark page table memory as read-only if\r
1125 // - no restriction on access to non-SMRAM memory; or\r
1015fb3c 1126 // - SMM heap guard feature enabled; or\r
827330cc
JW
1127 // BIT2: SMM page guard enabled\r
1128 // BIT3: SMM pool guard enabled\r
1015fb3c 1129 // - SMM profile feature enabled\r
827330cc 1130 //\r
09f7c82b 1131 if (!mCpuSmmRestrictedMemoryAccess ||\r
1015fb3c
SZ
1132 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
1133 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
827330cc 1134 //\r
09f7c82b 1135 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.\r
827330cc 1136 //\r
09f7c82b 1137 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&\r
827330cc 1138 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1015fb3c
SZ
1139\r
1140 //\r
09f7c82b 1141 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.\r
1015fb3c 1142 //\r
09f7c82b 1143 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
717fb604
JY
1144 return ;\r
1145 }\r
1146\r
1147 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
1148\r
1149 //\r
1150 // Disable write protection, because we need mark page table to be write protected.\r
1151 // We need *write* page table memory, to mark itself to be *read only*.\r
1152 //\r
3eb69b08
JY
1153 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
1154 if (CetEnabled) {\r
1155 //\r
1156 // CET must be disabled if WP is disabled.\r
1157 //\r
1158 DisableCet();\r
1159 }\r
717fb604
JY
1160 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
1161\r
1162 do {\r
1163 DEBUG ((DEBUG_INFO, "Start...\n"));\r
1164 PageTableSplitted = FALSE;\r
4eee0cc7
RN
1165 L5PageTable = NULL;\r
1166 if (Enable5LevelPaging) {\r
1167 L5PageTable = (UINT64 *)GetPageTableBase ();\r
1168 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
7365eb2c 1169 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7 1170 }\r
7365eb2c 1171\r
4eee0cc7
RN
1172 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {\r
1173 if (Enable5LevelPaging) {\r
1174 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1175 if (L4PageTable == NULL) {\r
4e78c7be
RN
1176 continue;\r
1177 }\r
4eee0cc7
RN
1178 } else {\r
1179 L4PageTable = (UINT64 *)GetPageTableBase ();\r
1180 }\r
1181 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1182 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1183\r
1184 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
1185 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1186 if (L3PageTable == NULL) {\r
717fb604
JY
1187 continue;\r
1188 }\r
1189\r
4eee0cc7 1190 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604
JY
1191 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1192\r
4eee0cc7
RN
1193 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
1194 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1195 // 1G\r
717fb604
JY
1196 continue;\r
1197 }\r
4eee0cc7
RN
1198 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1199 if (L2PageTable == NULL) {\r
717fb604
JY
1200 continue;\r
1201 }\r
4eee0cc7
RN
1202\r
1203 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604 1204 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7
RN
1205\r
1206 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
1207 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1208 // 2M\r
1209 continue;\r
1210 }\r
1211 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1212 if (L1PageTable == NULL) {\r
1213 continue;\r
1214 }\r
1215 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1216 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1217 }\r
717fb604
JY
1218 }\r
1219 }\r
1220 }\r
1221 } while (PageTableSplitted);\r
1222\r
1223 //\r
1224 // Enable write protection, after page table updated.\r
1225 //\r
1226 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
3eb69b08
JY
1227 if (CetEnabled) {\r
1228 //\r
1229 // re-enable CET.\r
1230 //\r
1231 EnableCet();\r
1232 }\r
717fb604
JY
1233\r
1234 return ;\r
1235}\r
37f9fea5
VN
1236\r
1237/**\r
1238 This function reads CR2 register when on-demand paging is enabled.\r
1239\r
1240 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1241**/\r
1242VOID\r
1243SaveCr2 (\r
1244 OUT UINTN *Cr2\r
1245 )\r
1246{\r
09f7c82b
RN
1247 if (!mCpuSmmRestrictedMemoryAccess) {\r
1248 //\r
1249 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1250 //\r
37f9fea5
VN
1251 *Cr2 = AsmReadCr2 ();\r
1252 }\r
1253}\r
1254\r
1255/**\r
1256 This function restores CR2 register when on-demand paging is enabled.\r
1257\r
1258 @param[in] Cr2 Value to write into CR2 register.\r
1259**/\r
1260VOID\r
1261RestoreCr2 (\r
1262 IN UINTN Cr2\r
1263 )\r
1264{\r
09f7c82b
RN
1265 if (!mCpuSmmRestrictedMemoryAccess) {\r
1266 //\r
1267 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1268 //\r
37f9fea5
VN
1269 AsmWriteCr2 (Cr2);\r
1270 }\r
1271}\r
79186ddc
RN
1272\r
1273/**\r
1274 Return whether access to non-SMRAM is restricted.\r
1275\r
1276 @retval TRUE Access to non-SMRAM is restricted.\r
1277 @retval FALSE Access to non-SMRAM is not restricted.\r
9c33f16f 1278**/\r
79186ddc
RN
1279BOOLEAN\r
1280IsRestrictedMemoryAccess (\r
1281 VOID\r
1282 )\r
1283{\r
1284 return mCpuSmmRestrictedMemoryAccess;\r
1285}\r