]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Add PcdCpuSmmRestrictedMemoryAccess
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
241f9149 15\r
427e3573 16LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 17BOOLEAN m1GPageTableSupport = FALSE;\r
717fb604 18BOOLEAN mCpuSmmStaticPageTable;\r
4eee0cc7
RN
19BOOLEAN m5LevelPagingSupport;\r
20X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;\r
427e3573 21\r
3eb69b08
JY
22/**\r
23 Disable CET.\r
24**/\r
25VOID\r
26EFIAPI\r
27DisableCet (\r
28 VOID\r
29 );\r
30\r
31/**\r
32 Enable CET.\r
33**/\r
34VOID\r
35EFIAPI\r
36EnableCet (\r
37 VOID\r
38 );\r
39\r
427e3573
MK
40/**\r
41 Check if 1-GByte pages is supported by processor or not.\r
42\r
43 @retval TRUE 1-GByte pages is supported.\r
44 @retval FALSE 1-GByte pages is not supported.\r
45\r
46**/\r
47BOOLEAN\r
48Is1GPageSupport (\r
49 VOID\r
50 )\r
51{\r
52 UINT32 RegEax;\r
53 UINT32 RegEdx;\r
54\r
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
56 if (RegEax >= 0x80000001) {\r
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
58 if ((RegEdx & BIT26) != 0) {\r
59 return TRUE;\r
60 }\r
61 }\r
62 return FALSE;\r
63}\r
64\r
4eee0cc7
RN
65/**\r
66 Check if 5-level paging is supported by processor or not.\r
67\r
68 @retval TRUE 5-level paging is supported.\r
69 @retval FALSE 5-level paging is not supported.\r
70\r
71**/\r
72BOOLEAN\r
73Is5LevelPagingSupport (\r
74 VOID\r
75 )\r
76{\r
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
78\r
79 AsmCpuidEx (\r
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
82 NULL,\r
83 NULL,\r
84 &EcxFlags.Uint32,\r
85 NULL\r
86 );\r
87 return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);\r
88}\r
89\r
427e3573
MK
90/**\r
91 Set sub-entries number in entry.\r
92\r
93 @param[in, out] Entry Pointer to entry\r
94 @param[in] SubEntryNum Sub-entries number based on 0:\r
95 0 means there is 1 sub-entry under this entry\r
96 0x1ff means there is 512 sub-entries under this entry\r
97\r
98**/\r
99VOID\r
100SetSubEntriesNum (\r
101 IN OUT UINT64 *Entry,\r
102 IN UINT64 SubEntryNum\r
103 )\r
104{\r
105 //\r
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
107 //\r
108 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
109}\r
110\r
111/**\r
112 Return sub-entries number in entry.\r
113\r
114 @param[in] Entry Pointer to entry\r
115\r
116 @return Sub-entries number based on 0:\r
117 0 means there is 1 sub-entry under this entry\r
118 0x1ff means there is 512 sub-entries under this entry\r
119**/\r
120UINT64\r
121GetSubEntriesNum (\r
122 IN UINT64 *Entry\r
123 )\r
124{\r
125 //\r
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
127 //\r
128 return BitFieldRead64 (*Entry, 52, 60);\r
129}\r
130\r
717fb604
JY
131/**\r
132 Calculate the maximum support address.\r
133\r
134 @return the maximum support address.\r
135**/\r
136UINT8\r
137CalculateMaximumSupportAddress (\r
138 VOID\r
139 )\r
140{\r
141 UINT32 RegEax;\r
142 UINT8 PhysicalAddressBits;\r
143 VOID *Hob;\r
144\r
145 //\r
146 // Get physical address bits supported.\r
147 //\r
148 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
149 if (Hob != NULL) {\r
150 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
151 } else {\r
152 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
153 if (RegEax >= 0x80000008) {\r
154 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
155 PhysicalAddressBits = (UINT8) RegEax;\r
156 } else {\r
157 PhysicalAddressBits = 36;\r
158 }\r
159 }\r
717fb604
JY
160 return PhysicalAddressBits;\r
161}\r
162\r
163/**\r
164 Set static page table.\r
165\r
166 @param[in] PageTable Address of page table.\r
167**/\r
168VOID\r
169SetStaticPageTable (\r
170 IN UINTN PageTable\r
171 )\r
172{\r
173 UINT64 PageAddress;\r
4eee0cc7 174 UINTN NumberOfPml5EntriesNeeded;\r
717fb604
JY
175 UINTN NumberOfPml4EntriesNeeded;\r
176 UINTN NumberOfPdpEntriesNeeded;\r
4eee0cc7 177 UINTN IndexOfPml5Entries;\r
717fb604
JY
178 UINTN IndexOfPml4Entries;\r
179 UINTN IndexOfPdpEntries;\r
180 UINTN IndexOfPageDirectoryEntries;\r
4eee0cc7 181 UINT64 *PageMapLevel5Entry;\r
717fb604
JY
182 UINT64 *PageMapLevel4Entry;\r
183 UINT64 *PageMap;\r
184 UINT64 *PageDirectoryPointerEntry;\r
185 UINT64 *PageDirectory1GEntry;\r
186 UINT64 *PageDirectoryEntry;\r
187\r
4eee0cc7
RN
188 //\r
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
190 // when 5-Level Paging is disabled.\r
191 //\r
192 ASSERT (mPhysicalAddressBits <= 52);\r
193 if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {\r
194 mPhysicalAddressBits = 48;\r
195 }\r
196\r
197 NumberOfPml5EntriesNeeded = 1;\r
198 if (mPhysicalAddressBits > 48) {\r
199 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);\r
200 mPhysicalAddressBits = 48;\r
201 }\r
202\r
203 NumberOfPml4EntriesNeeded = 1;\r
204 if (mPhysicalAddressBits > 39) {\r
205 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);\r
206 mPhysicalAddressBits = 39;\r
717fb604
JY
207 }\r
208\r
4eee0cc7
RN
209 NumberOfPdpEntriesNeeded = 1;\r
210 ASSERT (mPhysicalAddressBits > 30);\r
211 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);\r
212\r
717fb604
JY
213 //\r
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
215 //\r
216 PageMap = (VOID *) PageTable;\r
217\r
218 PageMapLevel4Entry = PageMap;\r
4eee0cc7
RN
219 PageMapLevel5Entry = NULL;\r
220 if (m5LevelPagingSupport) {\r
7365eb2c 221 //\r
4eee0cc7 222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
7365eb2c 223 //\r
4eee0cc7
RN
224 PageMapLevel5Entry = PageMap;\r
225 }\r
226 PageAddress = 0;\r
7365eb2c 227\r
4eee0cc7
RN
228 for ( IndexOfPml5Entries = 0\r
229 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
230 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {\r
231 //\r
232 // Each PML5 entry points to a page of PML4 entires.\r
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
234 // When 5-Level Paging is disabled, below allocation happens only once.\r
235 //\r
236 if (m5LevelPagingSupport) {\r
237 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
238 if (PageMapLevel4Entry == NULL) {\r
239 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
240 ASSERT(PageMapLevel4Entry != NULL);\r
241 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));\r
242\r
243 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
244 }\r
4e78c7be 245 }\r
717fb604 246\r
4eee0cc7
RN
247 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
248 //\r
249 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
250 //\r
251 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
252 if (PageDirectoryPointerEntry == NULL) {\r
253 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
254 ASSERT(PageDirectoryPointerEntry != NULL);\r
255 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
256\r
257 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 258 }\r
7365eb2c 259\r
4eee0cc7
RN
260 if (m1GPageTableSupport) {\r
261 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
262 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
263 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
264 //\r
265 // Skip the < 4G entries\r
266 //\r
267 continue;\r
268 }\r
4e78c7be 269 //\r
4eee0cc7 270 // Fill in the Page Directory entries\r
4e78c7be 271 //\r
4eee0cc7 272 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 273 }\r
4eee0cc7
RN
274 } else {\r
275 PageAddress = BASE_4GB;\r
276 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
277 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
278 //\r
279 // Skip the < 4G entries\r
280 //\r
281 continue;\r
282 }\r
4e78c7be 283 //\r
4eee0cc7
RN
284 // Each Directory Pointer entries points to a page of Page Directory entires.\r
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
4e78c7be 286 //\r
4eee0cc7
RN
287 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
288 if (PageDirectoryEntry == NULL) {\r
289 PageDirectoryEntry = AllocatePageTableMemory (1);\r
290 ASSERT(PageDirectoryEntry != NULL);\r
291 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
292\r
293 //\r
294 // Fill in a Page Directory Pointer Entries\r
295 //\r
296 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
297 }\r
298\r
299 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
300 //\r
301 // Fill in the Page Directory entries\r
302 //\r
303 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
304 }\r
717fb604
JY
305 }\r
306 }\r
307 }\r
308 }\r
309}\r
310\r
427e3573
MK
311/**\r
312 Create PageTable for SMM use.\r
313\r
314 @return The address of PML4 (to set CR3).\r
315\r
316**/\r
317UINT32\r
318SmmInitPageTable (\r
319 VOID\r
320 )\r
321{\r
322 EFI_PHYSICAL_ADDRESS Pages;\r
323 UINT64 *PTEntry;\r
324 LIST_ENTRY *FreePage;\r
325 UINTN Index;\r
326 UINTN PageFaultHandlerHookAddress;\r
327 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 328 EFI_STATUS Status;\r
4eee0cc7
RN
329 UINT64 *Pml4Entry;\r
330 UINT64 *Pml5Entry;\r
427e3573
MK
331\r
332 //\r
333 // Initialize spin lock\r
334 //\r
fe3a75bc 335 InitializeSpinLock (mPFLock);\r
427e3573 336\r
717fb604 337 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
4eee0cc7
RN
338 m1GPageTableSupport = Is1GPageSupport ();\r
339 m5LevelPagingSupport = Is5LevelPagingSupport ();\r
340 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
341 PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);\r
342 DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));\r
343 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
344 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable));\r
345 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
427e3573
MK
346 //\r
347 // Generate PAE page table for the first 4GB memory space\r
348 //\r
717fb604 349 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
350\r
351 //\r
352 // Set IA32_PG_PMNT bit to mask this entry\r
353 //\r
354 PTEntry = (UINT64*)(UINTN)Pages;\r
355 for (Index = 0; Index < 4; Index++) {\r
356 PTEntry[Index] |= IA32_PG_PMNT;\r
357 }\r
358\r
359 //\r
360 // Fill Page-Table-Level4 (PML4) entry\r
361 //\r
4eee0cc7
RN
362 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);\r
363 ASSERT (Pml4Entry != NULL);\r
364 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
365 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
717fb604 366\r
427e3573
MK
367 //\r
368 // Set sub-entries number\r
369 //\r
4eee0cc7
RN
370 SetSubEntriesNum (Pml4Entry, 3);\r
371 PTEntry = Pml4Entry;\r
372\r
373 if (m5LevelPagingSupport) {\r
374 //\r
375 // Fill PML5 entry\r
376 //\r
377 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);\r
aefcf2f7 378 ASSERT (Pml5Entry != NULL);\r
4eee0cc7
RN
379 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
380 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
381 //\r
382 // Set sub-entries number\r
383 //\r
384 SetSubEntriesNum (Pml5Entry, 1);\r
385 PTEntry = Pml5Entry;\r
386 }\r
427e3573 387\r
717fb604
JY
388 if (mCpuSmmStaticPageTable) {\r
389 SetStaticPageTable ((UINTN)PTEntry);\r
390 } else {\r
391 //\r
392 // Add pages to page pool\r
393 //\r
394 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
395 ASSERT (FreePage != NULL);\r
396 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
397 InsertTailList (&mPagePool, FreePage);\r
398 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
399 }\r
427e3573
MK
400 }\r
401\r
09afd9a4
JW
402 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
403 HEAP_GUARD_NONSTOP_MODE ||\r
404 NULL_DETECTION_NONSTOP_MODE) {\r
427e3573
MK
405 //\r
406 // Set own Page Fault entry instead of the default one, because SMM Profile\r
407 // feature depends on IRET instruction to do Single Step\r
408 //\r
409 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
410 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
411 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
412 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
413 IdtEntry->Bits.Reserved_0 = 0;\r
414 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
415 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
416 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
417 IdtEntry->Bits.Reserved_1 = 0;\r
418 } else {\r
419 //\r
420 // Register Smm Page Fault Handler\r
421 //\r
5c88af79
JF
422 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
423 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
424 }\r
425\r
426 //\r
427 // Additional SMM IDT initialization for SMM stack guard\r
428 //\r
429 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
430 InitializeIDTSmmStackGuard ();\r
431 }\r
432\r
433 //\r
4eee0cc7 434 // Return the address of PML4/PML5 (to set CR3)\r
427e3573
MK
435 //\r
436 return (UINT32)(UINTN)PTEntry;\r
437}\r
438\r
439/**\r
440 Set access record in entry.\r
441\r
442 @param[in, out] Entry Pointer to entry\r
443 @param[in] Acc Access record value\r
444\r
445**/\r
446VOID\r
447SetAccNum (\r
448 IN OUT UINT64 *Entry,\r
449 IN UINT64 Acc\r
450 )\r
451{\r
452 //\r
453 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
454 //\r
455 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
456}\r
457\r
458/**\r
459 Return access record in entry.\r
460\r
461 @param[in] Entry Pointer to entry\r
462\r
463 @return Access record value.\r
464\r
465**/\r
466UINT64\r
467GetAccNum (\r
468 IN UINT64 *Entry\r
469 )\r
470{\r
471 //\r
472 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
473 //\r
474 return BitFieldRead64 (*Entry, 9, 11);\r
475}\r
476\r
477/**\r
478 Return and update the access record in entry.\r
479\r
480 @param[in, out] Entry Pointer to entry\r
481\r
482 @return Access record value.\r
483\r
484**/\r
485UINT64\r
486GetAndUpdateAccNum (\r
487 IN OUT UINT64 *Entry\r
488 )\r
489{\r
490 UINT64 Acc;\r
491\r
492 Acc = GetAccNum (Entry);\r
493 if ((*Entry & IA32_PG_A) != 0) {\r
494 //\r
495 // If this entry has been accessed, clear access flag in Entry and update access record\r
496 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
497 //\r
498 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
499 SetAccNum (Entry, 0x7);\r
500 return (0x7 + ACC_MAX_BIT);\r
501 } else {\r
502 if (Acc != 0) {\r
503 //\r
504 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
505 //\r
506 SetAccNum (Entry, Acc - 1);\r
507 }\r
508 }\r
509 return Acc;\r
510}\r
511\r
512/**\r
513 Reclaim free pages for PageFault handler.\r
514\r
515 Search the whole entries tree to find the leaf entry that has the smallest\r
516 access record value. Insert the page pointed by this leaf entry into the\r
517 page pool. And check its upper entries if need to be inserted into the page\r
518 pool or not.\r
519\r
520**/\r
521VOID\r
522ReclaimPages (\r
523 VOID\r
524 )\r
525{\r
4eee0cc7
RN
526 UINT64 Pml5Entry;\r
527 UINT64 *Pml5;\r
427e3573
MK
528 UINT64 *Pml4;\r
529 UINT64 *Pdpt;\r
530 UINT64 *Pdt;\r
4eee0cc7 531 UINTN Pml5Index;\r
427e3573
MK
532 UINTN Pml4Index;\r
533 UINTN PdptIndex;\r
534 UINTN PdtIndex;\r
4eee0cc7 535 UINTN MinPml5;\r
427e3573
MK
536 UINTN MinPml4;\r
537 UINTN MinPdpt;\r
538 UINTN MinPdt;\r
539 UINT64 MinAcc;\r
540 UINT64 Acc;\r
541 UINT64 SubEntriesNum;\r
542 BOOLEAN PML4EIgnore;\r
543 BOOLEAN PDPTEIgnore;\r
544 UINT64 *ReleasePageAddress;\r
4eee0cc7
RN
545 IA32_CR4 Cr4;\r
546 BOOLEAN Enable5LevelPaging;\r
4201098e
DN
547 UINT64 PFAddress;\r
548 UINT64 PFAddressPml5Index;\r
549 UINT64 PFAddressPml4Index;\r
550 UINT64 PFAddressPdptIndex;\r
551 UINT64 PFAddressPdtIndex;\r
427e3573
MK
552\r
553 Pml4 = NULL;\r
554 Pdpt = NULL;\r
555 Pdt = NULL;\r
556 MinAcc = (UINT64)-1;\r
557 MinPml4 = (UINTN)-1;\r
4eee0cc7 558 MinPml5 = (UINTN)-1;\r
427e3573
MK
559 MinPdpt = (UINTN)-1;\r
560 MinPdt = (UINTN)-1;\r
561 Acc = 0;\r
562 ReleasePageAddress = 0;\r
4201098e
DN
563 PFAddress = AsmReadCr2 ();\r
564 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);\r
565 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);\r
566 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);\r
567 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);\r
427e3573 568\r
4eee0cc7
RN
569 Cr4.UintN = AsmReadCr4 ();\r
570 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
571 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
572\r
573 if (!Enable5LevelPaging) {\r
574 //\r
575 // Create one fake PML5 entry for 4-Level Paging\r
576 // so that the page table parsing logic only handles 5-Level page structure.\r
577 //\r
578 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;\r
579 Pml5 = &Pml5Entry;\r
580 }\r
581\r
427e3573
MK
582 //\r
583 // First, find the leaf entry has the smallest access record value\r
584 //\r
c630f69d 585 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
4eee0cc7 586 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {\r
427e3573 587 //\r
4eee0cc7 588 // If the PML5 entry is not present or is masked, skip it\r
427e3573
MK
589 //\r
590 continue;\r
591 }\r
4eee0cc7
RN
592 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
593 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
594 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
427e3573 595 //\r
4eee0cc7 596 // If the PML4 entry is not present or is masked, skip it\r
427e3573 597 //\r
4e78c7be
RN
598 continue;\r
599 }\r
4eee0cc7
RN
600 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
601 PML4EIgnore = FALSE;\r
602 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
603 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
604 //\r
605 // If the PDPT entry is not present or is masked, skip it\r
606 //\r
607 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
427e3573 608 //\r
4eee0cc7 609 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
427e3573 610 //\r
4eee0cc7
RN
611 PML4EIgnore = TRUE;\r
612 }\r
613 continue;\r
614 }\r
615 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
616 //\r
617 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
618 // we will not check PML4 entry more\r
619 //\r
620 PML4EIgnore = TRUE;\r
621 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
622 PDPTEIgnore = FALSE;\r
623 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
624 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
625 //\r
626 // If the PD entry is not present or is masked, skip it\r
627 //\r
628 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
629 //\r
630 // If the PD entry is masked, we will not PDPT entry more\r
631 //\r
632 PDPTEIgnore = TRUE;\r
633 }\r
634 continue;\r
635 }\r
636 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
427e3573 637 //\r
4eee0cc7
RN
638 // It's not 2 MByte page table entry, it should be PD entry\r
639 // we will find the entry has the smallest access record value\r
427e3573
MK
640 //\r
641 PDPTEIgnore = TRUE;\r
4201098e
DN
642 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||\r
643 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
644 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
645 if (Acc < MinAcc) {\r
646 //\r
647 // If the PD entry has the smallest access record value,\r
648 // save the Page address to be released\r
649 //\r
650 MinAcc = Acc;\r
651 MinPml5 = Pml5Index;\r
652 MinPml4 = Pml4Index;\r
653 MinPdpt = PdptIndex;\r
654 MinPdt = PdtIndex;\r
655 ReleasePageAddress = Pdt + PdtIndex;\r
656 }\r
657 }\r
658 }\r
659 }\r
660 if (!PDPTEIgnore) {\r
661 //\r
662 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
663 // it should only has the entries point to 2 MByte Pages\r
664 //\r
665 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||\r
666 Pml5Index != PFAddressPml5Index) {\r
667 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
4eee0cc7
RN
668 if (Acc < MinAcc) {\r
669 //\r
4201098e 670 // If the PDPT entry has the smallest access record value,\r
4eee0cc7
RN
671 // save the Page address to be released\r
672 //\r
673 MinAcc = Acc;\r
674 MinPml5 = Pml5Index;\r
675 MinPml4 = Pml4Index;\r
676 MinPdpt = PdptIndex;\r
4201098e
DN
677 MinPdt = (UINTN)-1;\r
678 ReleasePageAddress = Pdpt + PdptIndex;\r
4eee0cc7 679 }\r
427e3573 680 }\r
427e3573 681 }\r
427e3573 682 }\r
427e3573 683 }\r
4eee0cc7 684 if (!PML4EIgnore) {\r
4e78c7be 685 //\r
4eee0cc7
RN
686 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
687 // it should only has the entries point to 1 GByte Pages\r
4e78c7be 688 //\r
4201098e
DN
689 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
690 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
691 if (Acc < MinAcc) {\r
692 //\r
693 // If the PML4 entry has the smallest access record value,\r
694 // save the Page address to be released\r
695 //\r
696 MinAcc = Acc;\r
697 MinPml5 = Pml5Index;\r
698 MinPml4 = Pml4Index;\r
699 MinPdpt = (UINTN)-1;\r
700 MinPdt = (UINTN)-1;\r
701 ReleasePageAddress = Pml4 + Pml4Index;\r
702 }\r
4eee0cc7 703 }\r
4e78c7be
RN
704 }\r
705 }\r
427e3573
MK
706 }\r
707 //\r
708 // Make sure one PML4/PDPT/PD entry is selected\r
709 //\r
710 ASSERT (MinAcc != (UINT64)-1);\r
711\r
712 //\r
713 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
714 //\r
241f9149 715 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
716 *ReleasePageAddress = 0;\r
717\r
718 //\r
719 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
720 // or not\r
721 //\r
722 while (TRUE) {\r
723 if (MinPdt != (UINTN)-1) {\r
724 //\r
725 // If 4 KByte Page Table is released, check the PDPT entry\r
726 //\r
4eee0cc7 727 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);\r
241f9149 728 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573 729 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
4201098e
DN
730 if (SubEntriesNum == 0 &&\r
731 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
427e3573
MK
732 //\r
733 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
734 // clear the Page directory entry\r
735 //\r
241f9149 736 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
737 Pdpt[MinPdpt] = 0;\r
738 //\r
739 // Go on checking the PML4 table\r
740 //\r
741 MinPdt = (UINTN)-1;\r
742 continue;\r
743 }\r
744 //\r
745 // Update the sub-entries filed in PDPT entry and exit\r
746 //\r
4201098e 747 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
748 break;\r
749 }\r
750 if (MinPdpt != (UINTN)-1) {\r
751 //\r
752 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
753 //\r
754 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
4201098e 755 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
427e3573
MK
756 //\r
757 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
758 // clear the Page directory entry\r
759 //\r
241f9149 760 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
761 Pml4[MinPml4] = 0;\r
762 MinPdpt = (UINTN)-1;\r
763 continue;\r
764 }\r
765 //\r
766 // Update the sub-entries filed in PML4 entry and exit\r
767 //\r
4201098e 768 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
769 break;\r
770 }\r
771 //\r
772 // PLM4 table has been released before, exit it\r
773 //\r
774 break;\r
775 }\r
776}\r
777\r
778/**\r
779 Allocate free Page for PageFault handler use.\r
780\r
781 @return Page address.\r
782\r
783**/\r
784UINT64\r
785AllocPage (\r
786 VOID\r
787 )\r
788{\r
789 UINT64 RetVal;\r
790\r
791 if (IsListEmpty (&mPagePool)) {\r
792 //\r
793 // If page pool is empty, reclaim the used pages and insert one into page pool\r
794 //\r
795 ReclaimPages ();\r
796 }\r
797\r
798 //\r
799 // Get one free page and remove it from page pool\r
800 //\r
801 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
802 RemoveEntryList (mPagePool.ForwardLink);\r
803 //\r
804 // Clean this page and return\r
805 //\r
806 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
807 return RetVal;\r
808}\r
809\r
810/**\r
811 Page Fault handler for SMM use.\r
812\r
813**/\r
814VOID\r
815SmiDefaultPFHandler (\r
816 VOID\r
817 )\r
818{\r
819 UINT64 *PageTable;\r
4eee0cc7 820 UINT64 *PageTableTop;\r
427e3573
MK
821 UINT64 PFAddress;\r
822 UINTN StartBit;\r
823 UINTN EndBit;\r
824 UINT64 PTIndex;\r
825 UINTN Index;\r
826 SMM_PAGE_SIZE_TYPE PageSize;\r
827 UINTN NumOfPages;\r
828 UINTN PageAttribute;\r
829 EFI_STATUS Status;\r
830 UINT64 *UpperEntry;\r
4eee0cc7
RN
831 BOOLEAN Enable5LevelPaging;\r
832 IA32_CR4 Cr4;\r
427e3573
MK
833\r
834 //\r
835 // Set default SMM page attribute\r
836 //\r
837 PageSize = SmmPageSize2M;\r
838 NumOfPages = 1;\r
839 PageAttribute = 0;\r
840\r
841 EndBit = 0;\r
4eee0cc7 842 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
427e3573
MK
843 PFAddress = AsmReadCr2 ();\r
844\r
4eee0cc7
RN
845 Cr4.UintN = AsmReadCr4 ();\r
846 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);\r
847\r
427e3573
MK
848 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
849 //\r
850 // If platform not support page table attribute, set default SMM page attribute\r
851 //\r
852 if (Status != EFI_SUCCESS) {\r
853 PageSize = SmmPageSize2M;\r
854 NumOfPages = 1;\r
855 PageAttribute = 0;\r
856 }\r
857 if (PageSize >= MaxSmmPageSizeType) {\r
858 PageSize = SmmPageSize2M;\r
859 }\r
860 if (NumOfPages > 512) {\r
861 NumOfPages = 512;\r
862 }\r
863\r
864 switch (PageSize) {\r
865 case SmmPageSize4K:\r
866 //\r
867 // BIT12 to BIT20 is Page Table index\r
868 //\r
869 EndBit = 12;\r
870 break;\r
871 case SmmPageSize2M:\r
872 //\r
873 // BIT21 to BIT29 is Page Directory index\r
874 //\r
875 EndBit = 21;\r
876 PageAttribute |= (UINTN)IA32_PG_PS;\r
877 break;\r
878 case SmmPageSize1G:\r
879 if (!m1GPageTableSupport) {\r
717fb604 880 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
881 ASSERT (FALSE);\r
882 }\r
883 //\r
884 // BIT30 to BIT38 is Page Directory Pointer Table index\r
885 //\r
886 EndBit = 30;\r
887 PageAttribute |= (UINTN)IA32_PG_PS;\r
888 break;\r
889 default:\r
890 ASSERT (FALSE);\r
891 }\r
892\r
893 //\r
894 // If execute-disable is enabled, set NX bit\r
895 //\r
896 if (mXdEnabled) {\r
897 PageAttribute |= IA32_PG_NX;\r
898 }\r
899\r
900 for (Index = 0; Index < NumOfPages; Index++) {\r
4eee0cc7 901 PageTable = PageTableTop;\r
427e3573 902 UpperEntry = NULL;\r
4eee0cc7 903 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
427e3573
MK
904 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
905 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
906 //\r
907 // If the entry is not present, allocate one page from page pool for it\r
908 //\r
241f9149 909 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
910 } else {\r
911 //\r
912 // Save the upper entry address\r
913 //\r
914 UpperEntry = PageTable + PTIndex;\r
915 }\r
916 //\r
917 // BIT9 to BIT11 of entry is used to save access record,\r
918 // initialize value is 7\r
919 //\r
920 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
921 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 922 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
923 }\r
924\r
925 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
926 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
927 //\r
928 // Check if the entry has already existed, this issue may occur when the different\r
929 // size page entries created under the same entry\r
930 //\r
717fb604
JY
931 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
932 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
933 ASSERT (FALSE);\r
934 }\r
935 //\r
936 // Fill the new entry\r
937 //\r
241f9149 938 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 939 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573 940 if (UpperEntry != NULL) {\r
4201098e 941 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);\r
427e3573
MK
942 }\r
943 //\r
944 // Get the next page address if we need to create more page tables\r
945 //\r
946 PFAddress += (1ull << EndBit);\r
947 }\r
948}\r
949\r
950/**\r
951 ThePage Fault handler wrapper for SMM use.\r
952\r
953 @param InterruptType Defines the type of interrupt or exception that\r
954 occurred on the processor.This parameter is processor architecture specific.\r
955 @param SystemContext A pointer to the processor context when\r
956 the interrupt occurred on the processor.\r
957**/\r
958VOID\r
959EFIAPI\r
960SmiPFHandler (\r
b8caae19
JF
961 IN EFI_EXCEPTION_TYPE InterruptType,\r
962 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
963 )\r
964{\r
965 UINTN PFAddress;\r
7fa1376c
JY
966 UINTN GuardPageAddress;\r
967 UINTN CpuIndex;\r
427e3573
MK
968\r
969 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
970\r
fe3a75bc 971 AcquireSpinLock (mPFLock);\r
427e3573
MK
972\r
973 PFAddress = AsmReadCr2 ();\r
974\r
717fb604 975 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 976 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
977 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
978 CpuDeadLoop ();\r
3eb69b08 979 goto Exit;\r
717fb604
JY
980 }\r
981\r
427e3573 982 //\r
7fa1376c
JY
983 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
984 // or SMM page protection violation.\r
427e3573 985 //\r
7fa1376c 986 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 987 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
b8caae19 988 DumpCpuContext (InterruptType, SystemContext);\r
7fa1376c
JY
989 CpuIndex = GetCpuIndex ();\r
990 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
991 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
992 (PFAddress >= GuardPageAddress) &&\r
993 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
994 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
995 } else {\r
7fa1376c
JY
996 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
997 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
998 DEBUG_CODE (\r
999 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1000 );\r
1001 } else {\r
1002 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
1003 DEBUG_CODE (\r
1004 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1005 );\r
1006 }\r
09afd9a4
JW
1007\r
1008 if (HEAP_GUARD_NONSTOP_MODE) {\r
1009 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1010 goto Exit;\r
1011 }\r
7fa1376c 1012 }\r
427e3573 1013 CpuDeadLoop ();\r
3eb69b08 1014 goto Exit;\r
427e3573
MK
1015 }\r
1016\r
1017 //\r
8bf0380e 1018 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
1019 //\r
1020 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
1021 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
1022 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 1023 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 1024 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
1025 DEBUG_CODE (\r
1026 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1027 );\r
1028 CpuDeadLoop ();\r
3eb69b08 1029 goto Exit;\r
427e3573 1030 }\r
09afd9a4
JW
1031\r
1032 //\r
1033 // If NULL pointer was just accessed\r
1034 //\r
1035 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
1036 (PFAddress < EFI_PAGE_SIZE)) {\r
1037 DumpCpuContext (InterruptType, SystemContext);\r
1038 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1039 DEBUG_CODE (\r
1040 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1041 );\r
1042\r
1043 if (NULL_DETECTION_NONSTOP_MODE) {\r
1044 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1045 goto Exit;\r
1046 }\r
1047\r
1048 CpuDeadLoop ();\r
3eb69b08 1049 goto Exit;\r
09afd9a4
JW
1050 }\r
1051\r
c60d36b4 1052 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 1053 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
1054 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1055 DEBUG_CODE (\r
1056 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1057 );\r
1058 CpuDeadLoop ();\r
3eb69b08 1059 goto Exit;\r
d2fc7711 1060 }\r
427e3573
MK
1061 }\r
1062\r
1063 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1064 SmmProfilePFHandler (\r
1065 SystemContext.SystemContextX64->Rip,\r
1066 SystemContext.SystemContextX64->ExceptionData\r
1067 );\r
1068 } else {\r
1069 SmiDefaultPFHandler ();\r
1070 }\r
1071\r
09afd9a4 1072Exit:\r
fe3a75bc 1073 ReleaseSpinLock (mPFLock);\r
427e3573 1074}\r
717fb604
JY
1075\r
1076/**\r
1077 This function sets memory attribute for page table.\r
1078**/\r
1079VOID\r
1080SetPageTableAttributes (\r
1081 VOID\r
1082 )\r
1083{\r
1084 UINTN Index2;\r
1085 UINTN Index3;\r
1086 UINTN Index4;\r
4eee0cc7 1087 UINTN Index5;\r
717fb604
JY
1088 UINT64 *L1PageTable;\r
1089 UINT64 *L2PageTable;\r
1090 UINT64 *L3PageTable;\r
1091 UINT64 *L4PageTable;\r
4eee0cc7 1092 UINT64 *L5PageTable;\r
717fb604
JY
1093 BOOLEAN IsSplitted;\r
1094 BOOLEAN PageTableSplitted;\r
3eb69b08 1095 BOOLEAN CetEnabled;\r
4eee0cc7
RN
1096 IA32_CR4 Cr4;\r
1097 BOOLEAN Enable5LevelPaging;\r
1098\r
1099 Cr4.UintN = AsmReadCr4 ();\r
1100 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
717fb604 1101\r
827330cc
JW
1102 //\r
1103 // Don't do this if\r
1104 // - no static page table; or\r
1015fb3c 1105 // - SMM heap guard feature enabled; or\r
827330cc
JW
1106 // BIT2: SMM page guard enabled\r
1107 // BIT3: SMM pool guard enabled\r
1015fb3c 1108 // - SMM profile feature enabled\r
827330cc
JW
1109 //\r
1110 if (!mCpuSmmStaticPageTable ||\r
1015fb3c
SZ
1111 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
1112 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
827330cc 1113 //\r
1015fb3c 1114 // Static paging and heap guard could not be enabled at the same time.\r
827330cc
JW
1115 //\r
1116 ASSERT (!(mCpuSmmStaticPageTable &&\r
1117 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1015fb3c
SZ
1118\r
1119 //\r
1120 // Static paging and SMM profile could not be enabled at the same time.\r
1121 //\r
1122 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
717fb604
JY
1123 return ;\r
1124 }\r
1125\r
1126 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
1127\r
1128 //\r
1129 // Disable write protection, because we need mark page table to be write protected.\r
1130 // We need *write* page table memory, to mark itself to be *read only*.\r
1131 //\r
3eb69b08
JY
1132 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
1133 if (CetEnabled) {\r
1134 //\r
1135 // CET must be disabled if WP is disabled.\r
1136 //\r
1137 DisableCet();\r
1138 }\r
717fb604
JY
1139 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
1140\r
1141 do {\r
1142 DEBUG ((DEBUG_INFO, "Start...\n"));\r
1143 PageTableSplitted = FALSE;\r
4eee0cc7
RN
1144 L5PageTable = NULL;\r
1145 if (Enable5LevelPaging) {\r
1146 L5PageTable = (UINT64 *)GetPageTableBase ();\r
1147 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
7365eb2c 1148 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7 1149 }\r
7365eb2c 1150\r
4eee0cc7
RN
1151 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {\r
1152 if (Enable5LevelPaging) {\r
1153 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1154 if (L4PageTable == NULL) {\r
4e78c7be
RN
1155 continue;\r
1156 }\r
4eee0cc7
RN
1157 } else {\r
1158 L4PageTable = (UINT64 *)GetPageTableBase ();\r
1159 }\r
1160 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1161 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1162\r
1163 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
1164 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1165 if (L3PageTable == NULL) {\r
717fb604
JY
1166 continue;\r
1167 }\r
1168\r
4eee0cc7 1169 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604
JY
1170 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1171\r
4eee0cc7
RN
1172 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
1173 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1174 // 1G\r
717fb604
JY
1175 continue;\r
1176 }\r
4eee0cc7
RN
1177 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1178 if (L2PageTable == NULL) {\r
717fb604
JY
1179 continue;\r
1180 }\r
4eee0cc7
RN
1181\r
1182 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604 1183 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7
RN
1184\r
1185 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
1186 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1187 // 2M\r
1188 continue;\r
1189 }\r
1190 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1191 if (L1PageTable == NULL) {\r
1192 continue;\r
1193 }\r
1194 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1195 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1196 }\r
717fb604
JY
1197 }\r
1198 }\r
1199 }\r
1200 } while (PageTableSplitted);\r
1201\r
1202 //\r
1203 // Enable write protection, after page table updated.\r
1204 //\r
1205 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
3eb69b08
JY
1206 if (CetEnabled) {\r
1207 //\r
1208 // re-enable CET.\r
1209 //\r
1210 EnableCet();\r
1211 }\r
717fb604
JY
1212\r
1213 return ;\r
1214}\r
37f9fea5
VN
1215\r
1216/**\r
1217 This function reads CR2 register when on-demand paging is enabled.\r
1218\r
1219 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1220**/\r
1221VOID\r
1222SaveCr2 (\r
1223 OUT UINTN *Cr2\r
1224 )\r
1225{\r
1226 if (!mCpuSmmStaticPageTable) {\r
1227 *Cr2 = AsmReadCr2 ();\r
1228 }\r
1229}\r
1230\r
1231/**\r
1232 This function restores CR2 register when on-demand paging is enabled.\r
1233\r
1234 @param[in] Cr2 Value to write into CR2 register.\r
1235**/\r
1236VOID\r
1237RestoreCr2 (\r
1238 IN UINTN Cr2\r
1239 )\r
1240{\r
1241 if (!mCpuSmmStaticPageTable) {\r
1242 AsmWriteCr2 (Cr2);\r
1243 }\r
1244}\r