]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: PiSmmCpuDxeSmm: Not to Change Bitwidth During Static Paging
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
... / ...
CommitLineData
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7SPDX-License-Identifier: BSD-2-Clause-Patent\r
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
15\r
16extern UINTN mSmmShadowStackSize;\r
17\r
18LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
19BOOLEAN m1GPageTableSupport = FALSE;\r
20BOOLEAN mCpuSmmRestrictedMemoryAccess;\r
21BOOLEAN m5LevelPagingNeeded;\r
22X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;\r
23\r
24/**\r
25 Disable CET.\r
26**/\r
27VOID\r
28EFIAPI\r
29DisableCet (\r
30 VOID\r
31 );\r
32\r
33/**\r
34 Enable CET.\r
35**/\r
36VOID\r
37EFIAPI\r
38EnableCet (\r
39 VOID\r
40 );\r
41\r
42/**\r
43 Check if 1-GByte pages is supported by processor or not.\r
44\r
45 @retval TRUE 1-GByte pages is supported.\r
46 @retval FALSE 1-GByte pages is not supported.\r
47\r
48**/\r
49BOOLEAN\r
50Is1GPageSupport (\r
51 VOID\r
52 )\r
53{\r
54 UINT32 RegEax;\r
55 UINT32 RegEdx;\r
56\r
57 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
58 if (RegEax >= 0x80000001) {\r
59 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
60 if ((RegEdx & BIT26) != 0) {\r
61 return TRUE;\r
62 }\r
63 }\r
64 return FALSE;\r
65}\r
66\r
67/**\r
68 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and\r
69 the max physical address bits is bigger than 48. Because 4-level paging can support\r
70 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging\r
71 with max physical address bits <= 48.\r
72\r
73 @retval TRUE 5-level paging enabling is needed.\r
74 @retval FALSE 5-level paging enabling is not needed.\r
75**/\r
76BOOLEAN\r
77Is5LevelPagingNeeded (\r
78 VOID\r
79 )\r
80{\r
81 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
82 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;\r
83 UINT32 MaxExtendedFunctionId;\r
84\r
85 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);\r
86 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
87 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
88 } else {\r
89 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
90 }\r
91 AsmCpuidEx (\r
92 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
93 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
94 NULL, NULL, &ExtFeatureEcx.Uint32, NULL\r
95 );\r
96 DEBUG ((\r
97 DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",\r
98 VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage\r
99 ));\r
100\r
101 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {\r
102 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);\r
103 return TRUE;\r
104 } else {\r
105 return FALSE;\r
106 }\r
107}\r
108\r
109/**\r
110 Get page table base address and the depth of the page table.\r
111\r
112 @param[out] Base Page table base address.\r
113 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.\r
114**/\r
115VOID\r
116GetPageTable (\r
117 OUT UINTN *Base,\r
118 OUT BOOLEAN *FiveLevels OPTIONAL\r
119 )\r
120{\r
121 IA32_CR4 Cr4;\r
122\r
123 if (mInternalCr3 == 0) {\r
124 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;\r
125 if (FiveLevels != NULL) {\r
126 Cr4.UintN = AsmReadCr4 ();\r
127 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
128 }\r
129 return;\r
130 }\r
131\r
132 *Base = mInternalCr3;\r
133 if (FiveLevels != NULL) {\r
134 *FiveLevels = m5LevelPagingNeeded;\r
135 }\r
136}\r
137\r
138/**\r
139 Set sub-entries number in entry.\r
140\r
141 @param[in, out] Entry Pointer to entry\r
142 @param[in] SubEntryNum Sub-entries number based on 0:\r
143 0 means there is 1 sub-entry under this entry\r
144 0x1ff means there is 512 sub-entries under this entry\r
145\r
146**/\r
147VOID\r
148SetSubEntriesNum (\r
149 IN OUT UINT64 *Entry,\r
150 IN UINT64 SubEntryNum\r
151 )\r
152{\r
153 //\r
154 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
155 //\r
156 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
157}\r
158\r
159/**\r
160 Return sub-entries number in entry.\r
161\r
162 @param[in] Entry Pointer to entry\r
163\r
164 @return Sub-entries number based on 0:\r
165 0 means there is 1 sub-entry under this entry\r
166 0x1ff means there is 512 sub-entries under this entry\r
167**/\r
168UINT64\r
169GetSubEntriesNum (\r
170 IN UINT64 *Entry\r
171 )\r
172{\r
173 //\r
174 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
175 //\r
176 return BitFieldRead64 (*Entry, 52, 60);\r
177}\r
178\r
179/**\r
180 Calculate the maximum support address.\r
181\r
182 @return the maximum support address.\r
183**/\r
184UINT8\r
185CalculateMaximumSupportAddress (\r
186 VOID\r
187 )\r
188{\r
189 UINT32 RegEax;\r
190 UINT8 PhysicalAddressBits;\r
191 VOID *Hob;\r
192\r
193 //\r
194 // Get physical address bits supported.\r
195 //\r
196 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
197 if (Hob != NULL) {\r
198 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
199 } else {\r
200 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
201 if (RegEax >= 0x80000008) {\r
202 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
203 PhysicalAddressBits = (UINT8) RegEax;\r
204 } else {\r
205 PhysicalAddressBits = 36;\r
206 }\r
207 }\r
208 return PhysicalAddressBits;\r
209}\r
210\r
211/**\r
212 Set static page table.\r
213\r
214 @param[in] PageTable Address of page table.\r
215 @param[in] PhysicalAddressBits The maximum physical address bits supported.\r
216**/\r
217VOID\r
218SetStaticPageTable (\r
219 IN UINTN PageTable,\r
220 IN UINT8 PhysicalAddressBits\r
221 )\r
222{\r
223 UINT64 PageAddress;\r
224 UINTN NumberOfPml5EntriesNeeded;\r
225 UINTN NumberOfPml4EntriesNeeded;\r
226 UINTN NumberOfPdpEntriesNeeded;\r
227 UINTN IndexOfPml5Entries;\r
228 UINTN IndexOfPml4Entries;\r
229 UINTN IndexOfPdpEntries;\r
230 UINTN IndexOfPageDirectoryEntries;\r
231 UINT64 *PageMapLevel5Entry;\r
232 UINT64 *PageMapLevel4Entry;\r
233 UINT64 *PageMap;\r
234 UINT64 *PageDirectoryPointerEntry;\r
235 UINT64 *PageDirectory1GEntry;\r
236 UINT64 *PageDirectoryEntry;\r
237\r
238 //\r
239 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
240 // when 5-Level Paging is disabled.\r
241 //\r
242 ASSERT (PhysicalAddressBits <= 52);\r
243 if (!m5LevelPagingNeeded && PhysicalAddressBits > 48) {\r
244 PhysicalAddressBits = 48;\r
245 }\r
246\r
247 NumberOfPml5EntriesNeeded = 1;\r
248 if (PhysicalAddressBits > 48) {\r
249 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 48);\r
250 PhysicalAddressBits = 48;\r
251 }\r
252\r
253 NumberOfPml4EntriesNeeded = 1;\r
254 if (PhysicalAddressBits > 39) {\r
255 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 39);\r
256 PhysicalAddressBits = 39;\r
257 }\r
258\r
259 NumberOfPdpEntriesNeeded = 1;\r
260 ASSERT (PhysicalAddressBits > 30);\r
261 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 30);\r
262\r
263 //\r
264 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
265 //\r
266 PageMap = (VOID *) PageTable;\r
267\r
268 PageMapLevel4Entry = PageMap;\r
269 PageMapLevel5Entry = NULL;\r
270 if (m5LevelPagingNeeded) {\r
271 //\r
272 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
273 //\r
274 PageMapLevel5Entry = PageMap;\r
275 }\r
276 PageAddress = 0;\r
277\r
278 for ( IndexOfPml5Entries = 0\r
279 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
280 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {\r
281 //\r
282 // Each PML5 entry points to a page of PML4 entires.\r
283 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
284 // When 5-Level Paging is disabled, below allocation happens only once.\r
285 //\r
286 if (m5LevelPagingNeeded) {\r
287 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
288 if (PageMapLevel4Entry == NULL) {\r
289 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
290 ASSERT(PageMapLevel4Entry != NULL);\r
291 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));\r
292\r
293 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
294 }\r
295 }\r
296\r
297 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
298 //\r
299 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
300 //\r
301 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
302 if (PageDirectoryPointerEntry == NULL) {\r
303 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
304 ASSERT(PageDirectoryPointerEntry != NULL);\r
305 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
306\r
307 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
308 }\r
309\r
310 if (m1GPageTableSupport) {\r
311 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
312 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
313 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
314 //\r
315 // Skip the < 4G entries\r
316 //\r
317 continue;\r
318 }\r
319 //\r
320 // Fill in the Page Directory entries\r
321 //\r
322 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
323 }\r
324 } else {\r
325 PageAddress = BASE_4GB;\r
326 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
327 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
328 //\r
329 // Skip the < 4G entries\r
330 //\r
331 continue;\r
332 }\r
333 //\r
334 // Each Directory Pointer entries points to a page of Page Directory entires.\r
335 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
336 //\r
337 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
338 if (PageDirectoryEntry == NULL) {\r
339 PageDirectoryEntry = AllocatePageTableMemory (1);\r
340 ASSERT(PageDirectoryEntry != NULL);\r
341 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
342\r
343 //\r
344 // Fill in a Page Directory Pointer Entries\r
345 //\r
346 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
347 }\r
348\r
349 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
350 //\r
351 // Fill in the Page Directory entries\r
352 //\r
353 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
354 }\r
355 }\r
356 }\r
357 }\r
358 }\r
359}\r
360\r
361/**\r
362 Create PageTable for SMM use.\r
363\r
364 @return The address of PML4 (to set CR3).\r
365\r
366**/\r
367UINT32\r
368SmmInitPageTable (\r
369 VOID\r
370 )\r
371{\r
372 EFI_PHYSICAL_ADDRESS Pages;\r
373 UINT64 *PTEntry;\r
374 LIST_ENTRY *FreePage;\r
375 UINTN Index;\r
376 UINTN PageFaultHandlerHookAddress;\r
377 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
378 EFI_STATUS Status;\r
379 UINT64 *Pml4Entry;\r
380 UINT64 *Pml5Entry;\r
381\r
382 //\r
383 // Initialize spin lock\r
384 //\r
385 InitializeSpinLock (mPFLock);\r
386\r
387 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);\r
388 m1GPageTableSupport = Is1GPageSupport ();\r
389 m5LevelPagingNeeded = Is5LevelPagingNeeded ();\r
390 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
391 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);\r
392 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));\r
393 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
394 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));\r
395 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
396 //\r
397 // Generate PAE page table for the first 4GB memory space\r
398 //\r
399 Pages = Gen4GPageTable (FALSE);\r
400\r
401 //\r
402 // Set IA32_PG_PMNT bit to mask this entry\r
403 //\r
404 PTEntry = (UINT64*)(UINTN)Pages;\r
405 for (Index = 0; Index < 4; Index++) {\r
406 PTEntry[Index] |= IA32_PG_PMNT;\r
407 }\r
408\r
409 //\r
410 // Fill Page-Table-Level4 (PML4) entry\r
411 //\r
412 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);\r
413 ASSERT (Pml4Entry != NULL);\r
414 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
415 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
416\r
417 //\r
418 // Set sub-entries number\r
419 //\r
420 SetSubEntriesNum (Pml4Entry, 3);\r
421 PTEntry = Pml4Entry;\r
422\r
423 if (m5LevelPagingNeeded) {\r
424 //\r
425 // Fill PML5 entry\r
426 //\r
427 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);\r
428 ASSERT (Pml5Entry != NULL);\r
429 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
430 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
431 //\r
432 // Set sub-entries number\r
433 //\r
434 SetSubEntriesNum (Pml5Entry, 1);\r
435 PTEntry = Pml5Entry;\r
436 }\r
437\r
438 if (mCpuSmmRestrictedMemoryAccess) {\r
439 //\r
440 // When access to non-SMRAM memory is restricted, create page table\r
441 // that covers all memory space.\r
442 //\r
443 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);\r
444 } else {\r
445 //\r
446 // Add pages to page pool\r
447 //\r
448 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
449 ASSERT (FreePage != NULL);\r
450 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
451 InsertTailList (&mPagePool, FreePage);\r
452 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
453 }\r
454 }\r
455\r
456 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
457 HEAP_GUARD_NONSTOP_MODE ||\r
458 NULL_DETECTION_NONSTOP_MODE) {\r
459 //\r
460 // Set own Page Fault entry instead of the default one, because SMM Profile\r
461 // feature depends on IRET instruction to do Single Step\r
462 //\r
463 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
464 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
465 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
466 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
467 IdtEntry->Bits.Reserved_0 = 0;\r
468 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
469 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
470 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
471 IdtEntry->Bits.Reserved_1 = 0;\r
472 } else {\r
473 //\r
474 // Register Smm Page Fault Handler\r
475 //\r
476 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
477 ASSERT_EFI_ERROR (Status);\r
478 }\r
479\r
480 //\r
481 // Additional SMM IDT initialization for SMM stack guard\r
482 //\r
483 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
484 InitializeIDTSmmStackGuard ();\r
485 }\r
486\r
487 //\r
488 // Return the address of PML4/PML5 (to set CR3)\r
489 //\r
490 return (UINT32)(UINTN)PTEntry;\r
491}\r
492\r
493/**\r
494 Set access record in entry.\r
495\r
496 @param[in, out] Entry Pointer to entry\r
497 @param[in] Acc Access record value\r
498\r
499**/\r
500VOID\r
501SetAccNum (\r
502 IN OUT UINT64 *Entry,\r
503 IN UINT64 Acc\r
504 )\r
505{\r
506 //\r
507 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
508 //\r
509 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
510}\r
511\r
512/**\r
513 Return access record in entry.\r
514\r
515 @param[in] Entry Pointer to entry\r
516\r
517 @return Access record value.\r
518\r
519**/\r
520UINT64\r
521GetAccNum (\r
522 IN UINT64 *Entry\r
523 )\r
524{\r
525 //\r
526 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
527 //\r
528 return BitFieldRead64 (*Entry, 9, 11);\r
529}\r
530\r
531/**\r
532 Return and update the access record in entry.\r
533\r
534 @param[in, out] Entry Pointer to entry\r
535\r
536 @return Access record value.\r
537\r
538**/\r
539UINT64\r
540GetAndUpdateAccNum (\r
541 IN OUT UINT64 *Entry\r
542 )\r
543{\r
544 UINT64 Acc;\r
545\r
546 Acc = GetAccNum (Entry);\r
547 if ((*Entry & IA32_PG_A) != 0) {\r
548 //\r
549 // If this entry has been accessed, clear access flag in Entry and update access record\r
550 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
551 //\r
552 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
553 SetAccNum (Entry, 0x7);\r
554 return (0x7 + ACC_MAX_BIT);\r
555 } else {\r
556 if (Acc != 0) {\r
557 //\r
558 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
559 //\r
560 SetAccNum (Entry, Acc - 1);\r
561 }\r
562 }\r
563 return Acc;\r
564}\r
565\r
566/**\r
567 Reclaim free pages for PageFault handler.\r
568\r
569 Search the whole entries tree to find the leaf entry that has the smallest\r
570 access record value. Insert the page pointed by this leaf entry into the\r
571 page pool. And check its upper entries if need to be inserted into the page\r
572 pool or not.\r
573\r
574**/\r
575VOID\r
576ReclaimPages (\r
577 VOID\r
578 )\r
579{\r
580 UINT64 Pml5Entry;\r
581 UINT64 *Pml5;\r
582 UINT64 *Pml4;\r
583 UINT64 *Pdpt;\r
584 UINT64 *Pdt;\r
585 UINTN Pml5Index;\r
586 UINTN Pml4Index;\r
587 UINTN PdptIndex;\r
588 UINTN PdtIndex;\r
589 UINTN MinPml5;\r
590 UINTN MinPml4;\r
591 UINTN MinPdpt;\r
592 UINTN MinPdt;\r
593 UINT64 MinAcc;\r
594 UINT64 Acc;\r
595 UINT64 SubEntriesNum;\r
596 BOOLEAN PML4EIgnore;\r
597 BOOLEAN PDPTEIgnore;\r
598 UINT64 *ReleasePageAddress;\r
599 IA32_CR4 Cr4;\r
600 BOOLEAN Enable5LevelPaging;\r
601 UINT64 PFAddress;\r
602 UINT64 PFAddressPml5Index;\r
603 UINT64 PFAddressPml4Index;\r
604 UINT64 PFAddressPdptIndex;\r
605 UINT64 PFAddressPdtIndex;\r
606\r
607 Pml4 = NULL;\r
608 Pdpt = NULL;\r
609 Pdt = NULL;\r
610 MinAcc = (UINT64)-1;\r
611 MinPml4 = (UINTN)-1;\r
612 MinPml5 = (UINTN)-1;\r
613 MinPdpt = (UINTN)-1;\r
614 MinPdt = (UINTN)-1;\r
615 Acc = 0;\r
616 ReleasePageAddress = 0;\r
617 PFAddress = AsmReadCr2 ();\r
618 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);\r
619 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);\r
620 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);\r
621 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);\r
622\r
623 Cr4.UintN = AsmReadCr4 ();\r
624 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
625 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
626\r
627 if (!Enable5LevelPaging) {\r
628 //\r
629 // Create one fake PML5 entry for 4-Level Paging\r
630 // so that the page table parsing logic only handles 5-Level page structure.\r
631 //\r
632 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;\r
633 Pml5 = &Pml5Entry;\r
634 }\r
635\r
636 //\r
637 // First, find the leaf entry has the smallest access record value\r
638 //\r
639 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
640 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {\r
641 //\r
642 // If the PML5 entry is not present or is masked, skip it\r
643 //\r
644 continue;\r
645 }\r
646 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
647 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
648 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
649 //\r
650 // If the PML4 entry is not present or is masked, skip it\r
651 //\r
652 continue;\r
653 }\r
654 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
655 PML4EIgnore = FALSE;\r
656 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
657 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
658 //\r
659 // If the PDPT entry is not present or is masked, skip it\r
660 //\r
661 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
662 //\r
663 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
664 //\r
665 PML4EIgnore = TRUE;\r
666 }\r
667 continue;\r
668 }\r
669 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
670 //\r
671 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
672 // we will not check PML4 entry more\r
673 //\r
674 PML4EIgnore = TRUE;\r
675 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
676 PDPTEIgnore = FALSE;\r
677 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
678 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
679 //\r
680 // If the PD entry is not present or is masked, skip it\r
681 //\r
682 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
683 //\r
684 // If the PD entry is masked, we will not PDPT entry more\r
685 //\r
686 PDPTEIgnore = TRUE;\r
687 }\r
688 continue;\r
689 }\r
690 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
691 //\r
692 // It's not 2 MByte page table entry, it should be PD entry\r
693 // we will find the entry has the smallest access record value\r
694 //\r
695 PDPTEIgnore = TRUE;\r
696 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||\r
697 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
698 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
699 if (Acc < MinAcc) {\r
700 //\r
701 // If the PD entry has the smallest access record value,\r
702 // save the Page address to be released\r
703 //\r
704 MinAcc = Acc;\r
705 MinPml5 = Pml5Index;\r
706 MinPml4 = Pml4Index;\r
707 MinPdpt = PdptIndex;\r
708 MinPdt = PdtIndex;\r
709 ReleasePageAddress = Pdt + PdtIndex;\r
710 }\r
711 }\r
712 }\r
713 }\r
714 if (!PDPTEIgnore) {\r
715 //\r
716 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
717 // it should only has the entries point to 2 MByte Pages\r
718 //\r
719 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||\r
720 Pml5Index != PFAddressPml5Index) {\r
721 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
722 if (Acc < MinAcc) {\r
723 //\r
724 // If the PDPT entry has the smallest access record value,\r
725 // save the Page address to be released\r
726 //\r
727 MinAcc = Acc;\r
728 MinPml5 = Pml5Index;\r
729 MinPml4 = Pml4Index;\r
730 MinPdpt = PdptIndex;\r
731 MinPdt = (UINTN)-1;\r
732 ReleasePageAddress = Pdpt + PdptIndex;\r
733 }\r
734 }\r
735 }\r
736 }\r
737 }\r
738 if (!PML4EIgnore) {\r
739 //\r
740 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
741 // it should only has the entries point to 1 GByte Pages\r
742 //\r
743 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
744 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
745 if (Acc < MinAcc) {\r
746 //\r
747 // If the PML4 entry has the smallest access record value,\r
748 // save the Page address to be released\r
749 //\r
750 MinAcc = Acc;\r
751 MinPml5 = Pml5Index;\r
752 MinPml4 = Pml4Index;\r
753 MinPdpt = (UINTN)-1;\r
754 MinPdt = (UINTN)-1;\r
755 ReleasePageAddress = Pml4 + Pml4Index;\r
756 }\r
757 }\r
758 }\r
759 }\r
760 }\r
761 //\r
762 // Make sure one PML4/PDPT/PD entry is selected\r
763 //\r
764 ASSERT (MinAcc != (UINT64)-1);\r
765\r
766 //\r
767 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
768 //\r
769 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
770 *ReleasePageAddress = 0;\r
771\r
772 //\r
773 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
774 // or not\r
775 //\r
776 while (TRUE) {\r
777 if (MinPdt != (UINTN)-1) {\r
778 //\r
779 // If 4 KByte Page Table is released, check the PDPT entry\r
780 //\r
781 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);\r
782 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
783 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
784 if (SubEntriesNum == 0 &&\r
785 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
786 //\r
787 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
788 // clear the Page directory entry\r
789 //\r
790 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
791 Pdpt[MinPdpt] = 0;\r
792 //\r
793 // Go on checking the PML4 table\r
794 //\r
795 MinPdt = (UINTN)-1;\r
796 continue;\r
797 }\r
798 //\r
799 // Update the sub-entries filed in PDPT entry and exit\r
800 //\r
801 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);\r
802 break;\r
803 }\r
804 if (MinPdpt != (UINTN)-1) {\r
805 //\r
806 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
807 //\r
808 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
809 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
810 //\r
811 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
812 // clear the Page directory entry\r
813 //\r
814 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
815 Pml4[MinPml4] = 0;\r
816 MinPdpt = (UINTN)-1;\r
817 continue;\r
818 }\r
819 //\r
820 // Update the sub-entries filed in PML4 entry and exit\r
821 //\r
822 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);\r
823 break;\r
824 }\r
825 //\r
826 // PLM4 table has been released before, exit it\r
827 //\r
828 break;\r
829 }\r
830}\r
831\r
832/**\r
833 Allocate free Page for PageFault handler use.\r
834\r
835 @return Page address.\r
836\r
837**/\r
838UINT64\r
839AllocPage (\r
840 VOID\r
841 )\r
842{\r
843 UINT64 RetVal;\r
844\r
845 if (IsListEmpty (&mPagePool)) {\r
846 //\r
847 // If page pool is empty, reclaim the used pages and insert one into page pool\r
848 //\r
849 ReclaimPages ();\r
850 }\r
851\r
852 //\r
853 // Get one free page and remove it from page pool\r
854 //\r
855 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
856 RemoveEntryList (mPagePool.ForwardLink);\r
857 //\r
858 // Clean this page and return\r
859 //\r
860 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
861 return RetVal;\r
862}\r
863\r
864/**\r
865 Page Fault handler for SMM use.\r
866\r
867**/\r
868VOID\r
869SmiDefaultPFHandler (\r
870 VOID\r
871 )\r
872{\r
873 UINT64 *PageTable;\r
874 UINT64 *PageTableTop;\r
875 UINT64 PFAddress;\r
876 UINTN StartBit;\r
877 UINTN EndBit;\r
878 UINT64 PTIndex;\r
879 UINTN Index;\r
880 SMM_PAGE_SIZE_TYPE PageSize;\r
881 UINTN NumOfPages;\r
882 UINTN PageAttribute;\r
883 EFI_STATUS Status;\r
884 UINT64 *UpperEntry;\r
885 BOOLEAN Enable5LevelPaging;\r
886 IA32_CR4 Cr4;\r
887\r
888 //\r
889 // Set default SMM page attribute\r
890 //\r
891 PageSize = SmmPageSize2M;\r
892 NumOfPages = 1;\r
893 PageAttribute = 0;\r
894\r
895 EndBit = 0;\r
896 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
897 PFAddress = AsmReadCr2 ();\r
898\r
899 Cr4.UintN = AsmReadCr4 ();\r
900 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);\r
901\r
902 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
903 //\r
904 // If platform not support page table attribute, set default SMM page attribute\r
905 //\r
906 if (Status != EFI_SUCCESS) {\r
907 PageSize = SmmPageSize2M;\r
908 NumOfPages = 1;\r
909 PageAttribute = 0;\r
910 }\r
911 if (PageSize >= MaxSmmPageSizeType) {\r
912 PageSize = SmmPageSize2M;\r
913 }\r
914 if (NumOfPages > 512) {\r
915 NumOfPages = 512;\r
916 }\r
917\r
918 switch (PageSize) {\r
919 case SmmPageSize4K:\r
920 //\r
921 // BIT12 to BIT20 is Page Table index\r
922 //\r
923 EndBit = 12;\r
924 break;\r
925 case SmmPageSize2M:\r
926 //\r
927 // BIT21 to BIT29 is Page Directory index\r
928 //\r
929 EndBit = 21;\r
930 PageAttribute |= (UINTN)IA32_PG_PS;\r
931 break;\r
932 case SmmPageSize1G:\r
933 if (!m1GPageTableSupport) {\r
934 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
935 ASSERT (FALSE);\r
936 }\r
937 //\r
938 // BIT30 to BIT38 is Page Directory Pointer Table index\r
939 //\r
940 EndBit = 30;\r
941 PageAttribute |= (UINTN)IA32_PG_PS;\r
942 break;\r
943 default:\r
944 ASSERT (FALSE);\r
945 }\r
946\r
947 //\r
948 // If execute-disable is enabled, set NX bit\r
949 //\r
950 if (mXdEnabled) {\r
951 PageAttribute |= IA32_PG_NX;\r
952 }\r
953\r
954 for (Index = 0; Index < NumOfPages; Index++) {\r
955 PageTable = PageTableTop;\r
956 UpperEntry = NULL;\r
957 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
958 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
959 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
960 //\r
961 // If the entry is not present, allocate one page from page pool for it\r
962 //\r
963 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
964 } else {\r
965 //\r
966 // Save the upper entry address\r
967 //\r
968 UpperEntry = PageTable + PTIndex;\r
969 }\r
970 //\r
971 // BIT9 to BIT11 of entry is used to save access record,\r
972 // initialize value is 7\r
973 //\r
974 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
975 SetAccNum (PageTable + PTIndex, 7);\r
976 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
977 }\r
978\r
979 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
980 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
981 //\r
982 // Check if the entry has already existed, this issue may occur when the different\r
983 // size page entries created under the same entry\r
984 //\r
985 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
986 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
987 ASSERT (FALSE);\r
988 }\r
989 //\r
990 // Fill the new entry\r
991 //\r
992 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
993 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
994 if (UpperEntry != NULL) {\r
995 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);\r
996 }\r
997 //\r
998 // Get the next page address if we need to create more page tables\r
999 //\r
1000 PFAddress += (1ull << EndBit);\r
1001 }\r
1002}\r
1003\r
1004/**\r
1005 ThePage Fault handler wrapper for SMM use.\r
1006\r
1007 @param InterruptType Defines the type of interrupt or exception that\r
1008 occurred on the processor.This parameter is processor architecture specific.\r
1009 @param SystemContext A pointer to the processor context when\r
1010 the interrupt occurred on the processor.\r
1011**/\r
1012VOID\r
1013EFIAPI\r
1014SmiPFHandler (\r
1015 IN EFI_EXCEPTION_TYPE InterruptType,\r
1016 IN EFI_SYSTEM_CONTEXT SystemContext\r
1017 )\r
1018{\r
1019 UINTN PFAddress;\r
1020 UINTN GuardPageAddress;\r
1021 UINTN ShadowStackGuardPageAddress;\r
1022 UINTN CpuIndex;\r
1023\r
1024 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
1025\r
1026 AcquireSpinLock (mPFLock);\r
1027\r
1028 PFAddress = AsmReadCr2 ();\r
1029\r
1030 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
1031 DumpCpuContext (InterruptType, SystemContext);\r
1032 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
1033 CpuDeadLoop ();\r
1034 goto Exit;\r
1035 }\r
1036\r
1037 //\r
1038 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,\r
1039 // or SMM page protection violation.\r
1040 //\r
1041 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
1042 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
1043 DumpCpuContext (InterruptType, SystemContext);\r
1044 CpuIndex = GetCpuIndex ();\r
1045 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
1046 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
1047 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
1048 (PFAddress >= GuardPageAddress) &&\r
1049 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
1050 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
1051 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
1052 (mSmmShadowStackSize > 0) &&\r
1053 (PFAddress >= ShadowStackGuardPageAddress) &&\r
1054 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE))) {\r
1055 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));\r
1056 } else {\r
1057 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
1058 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
1059 DEBUG_CODE (\r
1060 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1061 );\r
1062 } else {\r
1063 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
1064 DEBUG_CODE (\r
1065 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1066 );\r
1067 }\r
1068\r
1069 if (HEAP_GUARD_NONSTOP_MODE) {\r
1070 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1071 goto Exit;\r
1072 }\r
1073 }\r
1074 CpuDeadLoop ();\r
1075 goto Exit;\r
1076 }\r
1077\r
1078 //\r
1079 // If a page fault occurs in non-SMRAM range.\r
1080 //\r
1081 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
1082 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
1083 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
1084 DumpCpuContext (InterruptType, SystemContext);\r
1085 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
1086 DEBUG_CODE (\r
1087 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1088 );\r
1089 CpuDeadLoop ();\r
1090 goto Exit;\r
1091 }\r
1092\r
1093 //\r
1094 // If NULL pointer was just accessed\r
1095 //\r
1096 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
1097 (PFAddress < EFI_PAGE_SIZE)) {\r
1098 DumpCpuContext (InterruptType, SystemContext);\r
1099 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1100 DEBUG_CODE (\r
1101 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1102 );\r
1103\r
1104 if (NULL_DETECTION_NONSTOP_MODE) {\r
1105 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1106 goto Exit;\r
1107 }\r
1108\r
1109 CpuDeadLoop ();\r
1110 goto Exit;\r
1111 }\r
1112\r
1113 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
1114 DumpCpuContext (InterruptType, SystemContext);\r
1115 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1116 DEBUG_CODE (\r
1117 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1118 );\r
1119 CpuDeadLoop ();\r
1120 goto Exit;\r
1121 }\r
1122 }\r
1123\r
1124 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1125 SmmProfilePFHandler (\r
1126 SystemContext.SystemContextX64->Rip,\r
1127 SystemContext.SystemContextX64->ExceptionData\r
1128 );\r
1129 } else {\r
1130 SmiDefaultPFHandler ();\r
1131 }\r
1132\r
1133Exit:\r
1134 ReleaseSpinLock (mPFLock);\r
1135}\r
1136\r
1137/**\r
1138 This function sets memory attribute for page table.\r
1139**/\r
1140VOID\r
1141SetPageTableAttributes (\r
1142 VOID\r
1143 )\r
1144{\r
1145 UINTN Index2;\r
1146 UINTN Index3;\r
1147 UINTN Index4;\r
1148 UINTN Index5;\r
1149 UINT64 *L1PageTable;\r
1150 UINT64 *L2PageTable;\r
1151 UINT64 *L3PageTable;\r
1152 UINT64 *L4PageTable;\r
1153 UINT64 *L5PageTable;\r
1154 UINTN PageTableBase;\r
1155 BOOLEAN IsSplitted;\r
1156 BOOLEAN PageTableSplitted;\r
1157 BOOLEAN CetEnabled;\r
1158 BOOLEAN Enable5LevelPaging;\r
1159\r
1160 //\r
1161 // Don't mark page table memory as read-only if\r
1162 // - no restriction on access to non-SMRAM memory; or\r
1163 // - SMM heap guard feature enabled; or\r
1164 // BIT2: SMM page guard enabled\r
1165 // BIT3: SMM pool guard enabled\r
1166 // - SMM profile feature enabled\r
1167 //\r
1168 if (!mCpuSmmRestrictedMemoryAccess ||\r
1169 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
1170 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1171 //\r
1172 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.\r
1173 //\r
1174 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&\r
1175 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1176\r
1177 //\r
1178 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.\r
1179 //\r
1180 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
1181 return ;\r
1182 }\r
1183\r
1184 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
1185\r
1186 //\r
1187 // Disable write protection, because we need mark page table to be write protected.\r
1188 // We need *write* page table memory, to mark itself to be *read only*.\r
1189 //\r
1190 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
1191 if (CetEnabled) {\r
1192 //\r
1193 // CET must be disabled if WP is disabled.\r
1194 //\r
1195 DisableCet();\r
1196 }\r
1197 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
1198\r
1199 do {\r
1200 DEBUG ((DEBUG_INFO, "Start...\n"));\r
1201 PageTableSplitted = FALSE;\r
1202 L5PageTable = NULL;\r
1203\r
1204 GetPageTable (&PageTableBase, &Enable5LevelPaging);\r
1205\r
1206 if (Enable5LevelPaging) {\r
1207 L5PageTable = (UINT64 *)PageTableBase;\r
1208 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1209 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1210 }\r
1211\r
1212 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {\r
1213 if (Enable5LevelPaging) {\r
1214 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1215 if (L4PageTable == NULL) {\r
1216 continue;\r
1217 }\r
1218 } else {\r
1219 L4PageTable = (UINT64 *)PageTableBase;\r
1220 }\r
1221 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1222 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1223\r
1224 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
1225 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1226 if (L3PageTable == NULL) {\r
1227 continue;\r
1228 }\r
1229\r
1230 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1231 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1232\r
1233 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
1234 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1235 // 1G\r
1236 continue;\r
1237 }\r
1238 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1239 if (L2PageTable == NULL) {\r
1240 continue;\r
1241 }\r
1242\r
1243 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1244 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1245\r
1246 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
1247 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1248 // 2M\r
1249 continue;\r
1250 }\r
1251 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1252 if (L1PageTable == NULL) {\r
1253 continue;\r
1254 }\r
1255 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1256 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1257 }\r
1258 }\r
1259 }\r
1260 }\r
1261 } while (PageTableSplitted);\r
1262\r
1263 //\r
1264 // Enable write protection, after page table updated.\r
1265 //\r
1266 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
1267 if (CetEnabled) {\r
1268 //\r
1269 // re-enable CET.\r
1270 //\r
1271 EnableCet();\r
1272 }\r
1273\r
1274 return ;\r
1275}\r
1276\r
1277/**\r
1278 This function reads CR2 register when on-demand paging is enabled.\r
1279\r
1280 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1281**/\r
1282VOID\r
1283SaveCr2 (\r
1284 OUT UINTN *Cr2\r
1285 )\r
1286{\r
1287 if (!mCpuSmmRestrictedMemoryAccess) {\r
1288 //\r
1289 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1290 //\r
1291 *Cr2 = AsmReadCr2 ();\r
1292 }\r
1293}\r
1294\r
1295/**\r
1296 This function restores CR2 register when on-demand paging is enabled.\r
1297\r
1298 @param[in] Cr2 Value to write into CR2 register.\r
1299**/\r
1300VOID\r
1301RestoreCr2 (\r
1302 IN UINTN Cr2\r
1303 )\r
1304{\r
1305 if (!mCpuSmmRestrictedMemoryAccess) {\r
1306 //\r
1307 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1308 //\r
1309 AsmWriteCr2 (Cr2);\r
1310 }\r
1311}\r
1312\r
1313/**\r
1314 Return whether access to non-SMRAM is restricted.\r
1315\r
1316 @retval TRUE Access to non-SMRAM is restricted.\r
1317 @retval FALSE Access to non-SMRAM is not restricted.\r
1318**/\r
1319BOOLEAN\r
1320IsRestrictedMemoryAccess (\r
1321 VOID\r
1322 )\r
1323{\r
1324 return mCpuSmmRestrictedMemoryAccess;\r
1325}\r