]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Use SMM Interrupt Shadow Stack
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 extern UINTN mSmmShadowStackSize;
17
18 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
19 BOOLEAN m1GPageTableSupport = FALSE;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess;
21 BOOLEAN m5LevelPagingNeeded;
22 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
23
24 /**
25 Disable CET.
26 **/
27 VOID
28 EFIAPI
29 DisableCet (
30 VOID
31 );
32
33 /**
34 Enable CET.
35 **/
36 VOID
37 EFIAPI
38 EnableCet (
39 VOID
40 );
41
42 /**
43 Check if 1-GByte pages is supported by processor or not.
44
45 @retval TRUE 1-GByte pages is supported.
46 @retval FALSE 1-GByte pages is not supported.
47
48 **/
49 BOOLEAN
50 Is1GPageSupport (
51 VOID
52 )
53 {
54 UINT32 RegEax;
55 UINT32 RegEdx;
56
57 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
58 if (RegEax >= 0x80000001) {
59 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
60 if ((RegEdx & BIT26) != 0) {
61 return TRUE;
62 }
63 }
64 return FALSE;
65 }
66
67 /**
68 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
69 the max physical address bits is bigger than 48. Because 4-level paging can support
70 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
71 with max physical address bits <= 48.
72
73 @retval TRUE 5-level paging enabling is needed.
74 @retval FALSE 5-level paging enabling is not needed.
75 **/
76 BOOLEAN
77 Is5LevelPagingNeeded (
78 VOID
79 )
80 {
81 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
82 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
83 UINT32 MaxExtendedFunctionId;
84
85 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
86 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
87 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
88 } else {
89 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
90 }
91 AsmCpuidEx (
92 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
93 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
94 NULL, NULL, &ExtFeatureEcx.Uint32, NULL
95 );
96 DEBUG ((
97 DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
98 VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
99 ));
100
101 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
102 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
103 return TRUE;
104 } else {
105 return FALSE;
106 }
107 }
108
109 /**
110 Get page table base address and the depth of the page table.
111
112 @param[out] Base Page table base address.
113 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
114 **/
115 VOID
116 GetPageTable (
117 OUT UINTN *Base,
118 OUT BOOLEAN *FiveLevels OPTIONAL
119 )
120 {
121 IA32_CR4 Cr4;
122
123 if (mInternalCr3 == 0) {
124 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
125 if (FiveLevels != NULL) {
126 Cr4.UintN = AsmReadCr4 ();
127 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
128 }
129 return;
130 }
131
132 *Base = mInternalCr3;
133 if (FiveLevels != NULL) {
134 *FiveLevels = m5LevelPagingNeeded;
135 }
136 }
137
138 /**
139 Set sub-entries number in entry.
140
141 @param[in, out] Entry Pointer to entry
142 @param[in] SubEntryNum Sub-entries number based on 0:
143 0 means there is 1 sub-entry under this entry
144 0x1ff means there is 512 sub-entries under this entry
145
146 **/
147 VOID
148 SetSubEntriesNum (
149 IN OUT UINT64 *Entry,
150 IN UINT64 SubEntryNum
151 )
152 {
153 //
154 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
155 //
156 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
157 }
158
159 /**
160 Return sub-entries number in entry.
161
162 @param[in] Entry Pointer to entry
163
164 @return Sub-entries number based on 0:
165 0 means there is 1 sub-entry under this entry
166 0x1ff means there is 512 sub-entries under this entry
167 **/
168 UINT64
169 GetSubEntriesNum (
170 IN UINT64 *Entry
171 )
172 {
173 //
174 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
175 //
176 return BitFieldRead64 (*Entry, 52, 60);
177 }
178
179 /**
180 Calculate the maximum support address.
181
182 @return the maximum support address.
183 **/
184 UINT8
185 CalculateMaximumSupportAddress (
186 VOID
187 )
188 {
189 UINT32 RegEax;
190 UINT8 PhysicalAddressBits;
191 VOID *Hob;
192
193 //
194 // Get physical address bits supported.
195 //
196 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
197 if (Hob != NULL) {
198 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
199 } else {
200 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
201 if (RegEax >= 0x80000008) {
202 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
203 PhysicalAddressBits = (UINT8) RegEax;
204 } else {
205 PhysicalAddressBits = 36;
206 }
207 }
208 return PhysicalAddressBits;
209 }
210
211 /**
212 Set static page table.
213
214 @param[in] PageTable Address of page table.
215 @param[in] PhysicalAddressBits The maximum physical address bits supported.
216 **/
217 VOID
218 SetStaticPageTable (
219 IN UINTN PageTable,
220 IN UINT8 PhysicalAddressBits
221 )
222 {
223 UINT64 PageAddress;
224 UINTN NumberOfPml5EntriesNeeded;
225 UINTN NumberOfPml4EntriesNeeded;
226 UINTN NumberOfPdpEntriesNeeded;
227 UINTN IndexOfPml5Entries;
228 UINTN IndexOfPml4Entries;
229 UINTN IndexOfPdpEntries;
230 UINTN IndexOfPageDirectoryEntries;
231 UINT64 *PageMapLevel5Entry;
232 UINT64 *PageMapLevel4Entry;
233 UINT64 *PageMap;
234 UINT64 *PageDirectoryPointerEntry;
235 UINT64 *PageDirectory1GEntry;
236 UINT64 *PageDirectoryEntry;
237
238 //
239 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
240 // when 5-Level Paging is disabled.
241 //
242 ASSERT (PhysicalAddressBits <= 52);
243 if (!m5LevelPagingNeeded && PhysicalAddressBits > 48) {
244 PhysicalAddressBits = 48;
245 }
246
247 NumberOfPml5EntriesNeeded = 1;
248 if (PhysicalAddressBits > 48) {
249 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 48);
250 PhysicalAddressBits = 48;
251 }
252
253 NumberOfPml4EntriesNeeded = 1;
254 if (PhysicalAddressBits > 39) {
255 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 39);
256 PhysicalAddressBits = 39;
257 }
258
259 NumberOfPdpEntriesNeeded = 1;
260 ASSERT (PhysicalAddressBits > 30);
261 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 30);
262
263 //
264 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
265 //
266 PageMap = (VOID *) PageTable;
267
268 PageMapLevel4Entry = PageMap;
269 PageMapLevel5Entry = NULL;
270 if (m5LevelPagingNeeded) {
271 //
272 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
273 //
274 PageMapLevel5Entry = PageMap;
275 }
276 PageAddress = 0;
277
278 for ( IndexOfPml5Entries = 0
279 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
280 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
281 //
282 // Each PML5 entry points to a page of PML4 entires.
283 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
284 // When 5-Level Paging is disabled, below allocation happens only once.
285 //
286 if (m5LevelPagingNeeded) {
287 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
288 if (PageMapLevel4Entry == NULL) {
289 PageMapLevel4Entry = AllocatePageTableMemory (1);
290 ASSERT(PageMapLevel4Entry != NULL);
291 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
292
293 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
294 }
295 }
296
297 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
298 //
299 // Each PML4 entry points to a page of Page Directory Pointer entries.
300 //
301 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
302 if (PageDirectoryPointerEntry == NULL) {
303 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
304 ASSERT(PageDirectoryPointerEntry != NULL);
305 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
306
307 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
308 }
309
310 if (m1GPageTableSupport) {
311 PageDirectory1GEntry = PageDirectoryPointerEntry;
312 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
313 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
314 //
315 // Skip the < 4G entries
316 //
317 continue;
318 }
319 //
320 // Fill in the Page Directory entries
321 //
322 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
323 }
324 } else {
325 PageAddress = BASE_4GB;
326 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
327 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
328 //
329 // Skip the < 4G entries
330 //
331 continue;
332 }
333 //
334 // Each Directory Pointer entries points to a page of Page Directory entires.
335 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
336 //
337 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
338 if (PageDirectoryEntry == NULL) {
339 PageDirectoryEntry = AllocatePageTableMemory (1);
340 ASSERT(PageDirectoryEntry != NULL);
341 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
342
343 //
344 // Fill in a Page Directory Pointer Entries
345 //
346 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
347 }
348
349 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
350 //
351 // Fill in the Page Directory entries
352 //
353 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
354 }
355 }
356 }
357 }
358 }
359 }
360
361 /**
362 Create PageTable for SMM use.
363
364 @return The address of PML4 (to set CR3).
365
366 **/
367 UINT32
368 SmmInitPageTable (
369 VOID
370 )
371 {
372 EFI_PHYSICAL_ADDRESS Pages;
373 UINT64 *PTEntry;
374 LIST_ENTRY *FreePage;
375 UINTN Index;
376 UINTN PageFaultHandlerHookAddress;
377 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
378 EFI_STATUS Status;
379 UINT64 *Pml4Entry;
380 UINT64 *Pml5Entry;
381
382 //
383 // Initialize spin lock
384 //
385 InitializeSpinLock (mPFLock);
386
387 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
388 m1GPageTableSupport = Is1GPageSupport ();
389 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
390 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
391 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
392 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
393 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
394 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
395 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
396 //
397 // Generate PAE page table for the first 4GB memory space
398 //
399 Pages = Gen4GPageTable (FALSE);
400
401 //
402 // Set IA32_PG_PMNT bit to mask this entry
403 //
404 PTEntry = (UINT64*)(UINTN)Pages;
405 for (Index = 0; Index < 4; Index++) {
406 PTEntry[Index] |= IA32_PG_PMNT;
407 }
408
409 //
410 // Fill Page-Table-Level4 (PML4) entry
411 //
412 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
413 ASSERT (Pml4Entry != NULL);
414 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
415 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
416
417 //
418 // Set sub-entries number
419 //
420 SetSubEntriesNum (Pml4Entry, 3);
421 PTEntry = Pml4Entry;
422
423 if (m5LevelPagingNeeded) {
424 //
425 // Fill PML5 entry
426 //
427 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
428 ASSERT (Pml5Entry != NULL);
429 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
430 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
431 //
432 // Set sub-entries number
433 //
434 SetSubEntriesNum (Pml5Entry, 1);
435 PTEntry = Pml5Entry;
436 }
437
438 if (mCpuSmmRestrictedMemoryAccess) {
439 //
440 // When access to non-SMRAM memory is restricted, create page table
441 // that covers all memory space.
442 //
443 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
444 } else {
445 //
446 // Add pages to page pool
447 //
448 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
449 ASSERT (FreePage != NULL);
450 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
451 InsertTailList (&mPagePool, FreePage);
452 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
453 }
454 }
455
456 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
457 HEAP_GUARD_NONSTOP_MODE ||
458 NULL_DETECTION_NONSTOP_MODE) {
459 //
460 // Set own Page Fault entry instead of the default one, because SMM Profile
461 // feature depends on IRET instruction to do Single Step
462 //
463 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
464 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
465 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
466 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
467 IdtEntry->Bits.Reserved_0 = 0;
468 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
469 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
470 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
471 IdtEntry->Bits.Reserved_1 = 0;
472 } else {
473 //
474 // Register Smm Page Fault Handler
475 //
476 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
477 ASSERT_EFI_ERROR (Status);
478 }
479
480 //
481 // Additional SMM IDT initialization for SMM stack guard
482 //
483 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
484 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));
485 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
486 }
487
488 //
489 // Additional SMM IDT initialization for SMM CET shadow stack
490 //
491 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
492 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));
493 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
494 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);
495 }
496
497 //
498 // Return the address of PML4/PML5 (to set CR3)
499 //
500 return (UINT32)(UINTN)PTEntry;
501 }
502
503 /**
504 Set access record in entry.
505
506 @param[in, out] Entry Pointer to entry
507 @param[in] Acc Access record value
508
509 **/
510 VOID
511 SetAccNum (
512 IN OUT UINT64 *Entry,
513 IN UINT64 Acc
514 )
515 {
516 //
517 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
518 //
519 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
520 }
521
522 /**
523 Return access record in entry.
524
525 @param[in] Entry Pointer to entry
526
527 @return Access record value.
528
529 **/
530 UINT64
531 GetAccNum (
532 IN UINT64 *Entry
533 )
534 {
535 //
536 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
537 //
538 return BitFieldRead64 (*Entry, 9, 11);
539 }
540
541 /**
542 Return and update the access record in entry.
543
544 @param[in, out] Entry Pointer to entry
545
546 @return Access record value.
547
548 **/
549 UINT64
550 GetAndUpdateAccNum (
551 IN OUT UINT64 *Entry
552 )
553 {
554 UINT64 Acc;
555
556 Acc = GetAccNum (Entry);
557 if ((*Entry & IA32_PG_A) != 0) {
558 //
559 // If this entry has been accessed, clear access flag in Entry and update access record
560 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
561 //
562 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
563 SetAccNum (Entry, 0x7);
564 return (0x7 + ACC_MAX_BIT);
565 } else {
566 if (Acc != 0) {
567 //
568 // If the access record is not the smallest value 0, minus 1 and update the access record field
569 //
570 SetAccNum (Entry, Acc - 1);
571 }
572 }
573 return Acc;
574 }
575
576 /**
577 Reclaim free pages for PageFault handler.
578
579 Search the whole entries tree to find the leaf entry that has the smallest
580 access record value. Insert the page pointed by this leaf entry into the
581 page pool. And check its upper entries if need to be inserted into the page
582 pool or not.
583
584 **/
585 VOID
586 ReclaimPages (
587 VOID
588 )
589 {
590 UINT64 Pml5Entry;
591 UINT64 *Pml5;
592 UINT64 *Pml4;
593 UINT64 *Pdpt;
594 UINT64 *Pdt;
595 UINTN Pml5Index;
596 UINTN Pml4Index;
597 UINTN PdptIndex;
598 UINTN PdtIndex;
599 UINTN MinPml5;
600 UINTN MinPml4;
601 UINTN MinPdpt;
602 UINTN MinPdt;
603 UINT64 MinAcc;
604 UINT64 Acc;
605 UINT64 SubEntriesNum;
606 BOOLEAN PML4EIgnore;
607 BOOLEAN PDPTEIgnore;
608 UINT64 *ReleasePageAddress;
609 IA32_CR4 Cr4;
610 BOOLEAN Enable5LevelPaging;
611 UINT64 PFAddress;
612 UINT64 PFAddressPml5Index;
613 UINT64 PFAddressPml4Index;
614 UINT64 PFAddressPdptIndex;
615 UINT64 PFAddressPdtIndex;
616
617 Pml4 = NULL;
618 Pdpt = NULL;
619 Pdt = NULL;
620 MinAcc = (UINT64)-1;
621 MinPml4 = (UINTN)-1;
622 MinPml5 = (UINTN)-1;
623 MinPdpt = (UINTN)-1;
624 MinPdt = (UINTN)-1;
625 Acc = 0;
626 ReleasePageAddress = 0;
627 PFAddress = AsmReadCr2 ();
628 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
629 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
630 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
631 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
632
633 Cr4.UintN = AsmReadCr4 ();
634 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
635 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
636
637 if (!Enable5LevelPaging) {
638 //
639 // Create one fake PML5 entry for 4-Level Paging
640 // so that the page table parsing logic only handles 5-Level page structure.
641 //
642 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
643 Pml5 = &Pml5Entry;
644 }
645
646 //
647 // First, find the leaf entry has the smallest access record value
648 //
649 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
650 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
651 //
652 // If the PML5 entry is not present or is masked, skip it
653 //
654 continue;
655 }
656 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
657 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
658 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
659 //
660 // If the PML4 entry is not present or is masked, skip it
661 //
662 continue;
663 }
664 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
665 PML4EIgnore = FALSE;
666 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
667 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
668 //
669 // If the PDPT entry is not present or is masked, skip it
670 //
671 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
672 //
673 // If the PDPT entry is masked, we will ignore checking the PML4 entry
674 //
675 PML4EIgnore = TRUE;
676 }
677 continue;
678 }
679 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
680 //
681 // It's not 1-GByte pages entry, it should be a PDPT entry,
682 // we will not check PML4 entry more
683 //
684 PML4EIgnore = TRUE;
685 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
686 PDPTEIgnore = FALSE;
687 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
688 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
689 //
690 // If the PD entry is not present or is masked, skip it
691 //
692 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
693 //
694 // If the PD entry is masked, we will not PDPT entry more
695 //
696 PDPTEIgnore = TRUE;
697 }
698 continue;
699 }
700 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
701 //
702 // It's not 2 MByte page table entry, it should be PD entry
703 // we will find the entry has the smallest access record value
704 //
705 PDPTEIgnore = TRUE;
706 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
707 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
708 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
709 if (Acc < MinAcc) {
710 //
711 // If the PD entry has the smallest access record value,
712 // save the Page address to be released
713 //
714 MinAcc = Acc;
715 MinPml5 = Pml5Index;
716 MinPml4 = Pml4Index;
717 MinPdpt = PdptIndex;
718 MinPdt = PdtIndex;
719 ReleasePageAddress = Pdt + PdtIndex;
720 }
721 }
722 }
723 }
724 if (!PDPTEIgnore) {
725 //
726 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
727 // it should only has the entries point to 2 MByte Pages
728 //
729 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
730 Pml5Index != PFAddressPml5Index) {
731 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
732 if (Acc < MinAcc) {
733 //
734 // If the PDPT entry has the smallest access record value,
735 // save the Page address to be released
736 //
737 MinAcc = Acc;
738 MinPml5 = Pml5Index;
739 MinPml4 = Pml4Index;
740 MinPdpt = PdptIndex;
741 MinPdt = (UINTN)-1;
742 ReleasePageAddress = Pdpt + PdptIndex;
743 }
744 }
745 }
746 }
747 }
748 if (!PML4EIgnore) {
749 //
750 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
751 // it should only has the entries point to 1 GByte Pages
752 //
753 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
754 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
755 if (Acc < MinAcc) {
756 //
757 // If the PML4 entry has the smallest access record value,
758 // save the Page address to be released
759 //
760 MinAcc = Acc;
761 MinPml5 = Pml5Index;
762 MinPml4 = Pml4Index;
763 MinPdpt = (UINTN)-1;
764 MinPdt = (UINTN)-1;
765 ReleasePageAddress = Pml4 + Pml4Index;
766 }
767 }
768 }
769 }
770 }
771 //
772 // Make sure one PML4/PDPT/PD entry is selected
773 //
774 ASSERT (MinAcc != (UINT64)-1);
775
776 //
777 // Secondly, insert the page pointed by this entry into page pool and clear this entry
778 //
779 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
780 *ReleasePageAddress = 0;
781
782 //
783 // Lastly, check this entry's upper entries if need to be inserted into page pool
784 // or not
785 //
786 while (TRUE) {
787 if (MinPdt != (UINTN)-1) {
788 //
789 // If 4 KByte Page Table is released, check the PDPT entry
790 //
791 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
792 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
793 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
794 if (SubEntriesNum == 0 &&
795 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
796 //
797 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
798 // clear the Page directory entry
799 //
800 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
801 Pdpt[MinPdpt] = 0;
802 //
803 // Go on checking the PML4 table
804 //
805 MinPdt = (UINTN)-1;
806 continue;
807 }
808 //
809 // Update the sub-entries filed in PDPT entry and exit
810 //
811 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
812 break;
813 }
814 if (MinPdpt != (UINTN)-1) {
815 //
816 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
817 //
818 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
819 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
820 //
821 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
822 // clear the Page directory entry
823 //
824 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
825 Pml4[MinPml4] = 0;
826 MinPdpt = (UINTN)-1;
827 continue;
828 }
829 //
830 // Update the sub-entries filed in PML4 entry and exit
831 //
832 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
833 break;
834 }
835 //
836 // PLM4 table has been released before, exit it
837 //
838 break;
839 }
840 }
841
842 /**
843 Allocate free Page for PageFault handler use.
844
845 @return Page address.
846
847 **/
848 UINT64
849 AllocPage (
850 VOID
851 )
852 {
853 UINT64 RetVal;
854
855 if (IsListEmpty (&mPagePool)) {
856 //
857 // If page pool is empty, reclaim the used pages and insert one into page pool
858 //
859 ReclaimPages ();
860 }
861
862 //
863 // Get one free page and remove it from page pool
864 //
865 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
866 RemoveEntryList (mPagePool.ForwardLink);
867 //
868 // Clean this page and return
869 //
870 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
871 return RetVal;
872 }
873
874 /**
875 Page Fault handler for SMM use.
876
877 **/
878 VOID
879 SmiDefaultPFHandler (
880 VOID
881 )
882 {
883 UINT64 *PageTable;
884 UINT64 *PageTableTop;
885 UINT64 PFAddress;
886 UINTN StartBit;
887 UINTN EndBit;
888 UINT64 PTIndex;
889 UINTN Index;
890 SMM_PAGE_SIZE_TYPE PageSize;
891 UINTN NumOfPages;
892 UINTN PageAttribute;
893 EFI_STATUS Status;
894 UINT64 *UpperEntry;
895 BOOLEAN Enable5LevelPaging;
896 IA32_CR4 Cr4;
897
898 //
899 // Set default SMM page attribute
900 //
901 PageSize = SmmPageSize2M;
902 NumOfPages = 1;
903 PageAttribute = 0;
904
905 EndBit = 0;
906 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
907 PFAddress = AsmReadCr2 ();
908
909 Cr4.UintN = AsmReadCr4 ();
910 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
911
912 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
913 //
914 // If platform not support page table attribute, set default SMM page attribute
915 //
916 if (Status != EFI_SUCCESS) {
917 PageSize = SmmPageSize2M;
918 NumOfPages = 1;
919 PageAttribute = 0;
920 }
921 if (PageSize >= MaxSmmPageSizeType) {
922 PageSize = SmmPageSize2M;
923 }
924 if (NumOfPages > 512) {
925 NumOfPages = 512;
926 }
927
928 switch (PageSize) {
929 case SmmPageSize4K:
930 //
931 // BIT12 to BIT20 is Page Table index
932 //
933 EndBit = 12;
934 break;
935 case SmmPageSize2M:
936 //
937 // BIT21 to BIT29 is Page Directory index
938 //
939 EndBit = 21;
940 PageAttribute |= (UINTN)IA32_PG_PS;
941 break;
942 case SmmPageSize1G:
943 if (!m1GPageTableSupport) {
944 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
945 ASSERT (FALSE);
946 }
947 //
948 // BIT30 to BIT38 is Page Directory Pointer Table index
949 //
950 EndBit = 30;
951 PageAttribute |= (UINTN)IA32_PG_PS;
952 break;
953 default:
954 ASSERT (FALSE);
955 }
956
957 //
958 // If execute-disable is enabled, set NX bit
959 //
960 if (mXdEnabled) {
961 PageAttribute |= IA32_PG_NX;
962 }
963
964 for (Index = 0; Index < NumOfPages; Index++) {
965 PageTable = PageTableTop;
966 UpperEntry = NULL;
967 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
968 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
969 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
970 //
971 // If the entry is not present, allocate one page from page pool for it
972 //
973 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
974 } else {
975 //
976 // Save the upper entry address
977 //
978 UpperEntry = PageTable + PTIndex;
979 }
980 //
981 // BIT9 to BIT11 of entry is used to save access record,
982 // initialize value is 7
983 //
984 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
985 SetAccNum (PageTable + PTIndex, 7);
986 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
987 }
988
989 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
990 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
991 //
992 // Check if the entry has already existed, this issue may occur when the different
993 // size page entries created under the same entry
994 //
995 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
996 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
997 ASSERT (FALSE);
998 }
999 //
1000 // Fill the new entry
1001 //
1002 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
1003 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
1004 if (UpperEntry != NULL) {
1005 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
1006 }
1007 //
1008 // Get the next page address if we need to create more page tables
1009 //
1010 PFAddress += (1ull << EndBit);
1011 }
1012 }
1013
1014 /**
1015 ThePage Fault handler wrapper for SMM use.
1016
1017 @param InterruptType Defines the type of interrupt or exception that
1018 occurred on the processor.This parameter is processor architecture specific.
1019 @param SystemContext A pointer to the processor context when
1020 the interrupt occurred on the processor.
1021 **/
1022 VOID
1023 EFIAPI
1024 SmiPFHandler (
1025 IN EFI_EXCEPTION_TYPE InterruptType,
1026 IN EFI_SYSTEM_CONTEXT SystemContext
1027 )
1028 {
1029 UINTN PFAddress;
1030 UINTN GuardPageAddress;
1031 UINTN ShadowStackGuardPageAddress;
1032 UINTN CpuIndex;
1033
1034 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
1035
1036 AcquireSpinLock (mPFLock);
1037
1038 PFAddress = AsmReadCr2 ();
1039
1040 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
1041 DumpCpuContext (InterruptType, SystemContext);
1042 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
1043 CpuDeadLoop ();
1044 goto Exit;
1045 }
1046
1047 //
1048 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1049 // or SMM page protection violation.
1050 //
1051 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1052 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
1053 DumpCpuContext (InterruptType, SystemContext);
1054 CpuIndex = GetCpuIndex ();
1055 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1056 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1057 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1058 (PFAddress >= GuardPageAddress) &&
1059 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
1060 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1061 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1062 (mSmmShadowStackSize > 0) &&
1063 (PFAddress >= ShadowStackGuardPageAddress) &&
1064 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE))) {
1065 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
1066 } else {
1067 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1068 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1069 DEBUG_CODE (
1070 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1071 );
1072 } else {
1073 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1074 DEBUG_CODE (
1075 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1076 );
1077 }
1078
1079 if (HEAP_GUARD_NONSTOP_MODE) {
1080 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1081 goto Exit;
1082 }
1083 }
1084 CpuDeadLoop ();
1085 goto Exit;
1086 }
1087
1088 //
1089 // If a page fault occurs in non-SMRAM range.
1090 //
1091 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1092 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1093 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1094 DumpCpuContext (InterruptType, SystemContext);
1095 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1096 DEBUG_CODE (
1097 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1098 );
1099 CpuDeadLoop ();
1100 goto Exit;
1101 }
1102
1103 //
1104 // If NULL pointer was just accessed
1105 //
1106 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1107 (PFAddress < EFI_PAGE_SIZE)) {
1108 DumpCpuContext (InterruptType, SystemContext);
1109 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1110 DEBUG_CODE (
1111 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1112 );
1113
1114 if (NULL_DETECTION_NONSTOP_MODE) {
1115 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1116 goto Exit;
1117 }
1118
1119 CpuDeadLoop ();
1120 goto Exit;
1121 }
1122
1123 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1124 DumpCpuContext (InterruptType, SystemContext);
1125 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1126 DEBUG_CODE (
1127 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1128 );
1129 CpuDeadLoop ();
1130 goto Exit;
1131 }
1132 }
1133
1134 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1135 SmmProfilePFHandler (
1136 SystemContext.SystemContextX64->Rip,
1137 SystemContext.SystemContextX64->ExceptionData
1138 );
1139 } else {
1140 SmiDefaultPFHandler ();
1141 }
1142
1143 Exit:
1144 ReleaseSpinLock (mPFLock);
1145 }
1146
1147 /**
1148 This function sets memory attribute for page table.
1149 **/
1150 VOID
1151 SetPageTableAttributes (
1152 VOID
1153 )
1154 {
1155 UINTN Index2;
1156 UINTN Index3;
1157 UINTN Index4;
1158 UINTN Index5;
1159 UINT64 *L1PageTable;
1160 UINT64 *L2PageTable;
1161 UINT64 *L3PageTable;
1162 UINT64 *L4PageTable;
1163 UINT64 *L5PageTable;
1164 UINTN PageTableBase;
1165 BOOLEAN IsSplitted;
1166 BOOLEAN PageTableSplitted;
1167 BOOLEAN CetEnabled;
1168 BOOLEAN Enable5LevelPaging;
1169
1170 //
1171 // Don't mark page table memory as read-only if
1172 // - no restriction on access to non-SMRAM memory; or
1173 // - SMM heap guard feature enabled; or
1174 // BIT2: SMM page guard enabled
1175 // BIT3: SMM pool guard enabled
1176 // - SMM profile feature enabled
1177 //
1178 if (!mCpuSmmRestrictedMemoryAccess ||
1179 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1180 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1181 //
1182 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1183 //
1184 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
1185 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1186
1187 //
1188 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1189 //
1190 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1191 return ;
1192 }
1193
1194 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1195
1196 //
1197 // Disable write protection, because we need mark page table to be write protected.
1198 // We need *write* page table memory, to mark itself to be *read only*.
1199 //
1200 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1201 if (CetEnabled) {
1202 //
1203 // CET must be disabled if WP is disabled.
1204 //
1205 DisableCet();
1206 }
1207 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1208
1209 do {
1210 DEBUG ((DEBUG_INFO, "Start...\n"));
1211 PageTableSplitted = FALSE;
1212 L5PageTable = NULL;
1213
1214 GetPageTable (&PageTableBase, &Enable5LevelPaging);
1215
1216 if (Enable5LevelPaging) {
1217 L5PageTable = (UINT64 *)PageTableBase;
1218 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1219 PageTableSplitted = (PageTableSplitted || IsSplitted);
1220 }
1221
1222 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1223 if (Enable5LevelPaging) {
1224 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1225 if (L4PageTable == NULL) {
1226 continue;
1227 }
1228 } else {
1229 L4PageTable = (UINT64 *)PageTableBase;
1230 }
1231 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1232 PageTableSplitted = (PageTableSplitted || IsSplitted);
1233
1234 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1235 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1236 if (L3PageTable == NULL) {
1237 continue;
1238 }
1239
1240 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1241 PageTableSplitted = (PageTableSplitted || IsSplitted);
1242
1243 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1244 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1245 // 1G
1246 continue;
1247 }
1248 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1249 if (L2PageTable == NULL) {
1250 continue;
1251 }
1252
1253 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1254 PageTableSplitted = (PageTableSplitted || IsSplitted);
1255
1256 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1257 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1258 // 2M
1259 continue;
1260 }
1261 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1262 if (L1PageTable == NULL) {
1263 continue;
1264 }
1265 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1266 PageTableSplitted = (PageTableSplitted || IsSplitted);
1267 }
1268 }
1269 }
1270 }
1271 } while (PageTableSplitted);
1272
1273 //
1274 // Enable write protection, after page table updated.
1275 //
1276 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1277 if (CetEnabled) {
1278 //
1279 // re-enable CET.
1280 //
1281 EnableCet();
1282 }
1283
1284 return ;
1285 }
1286
1287 /**
1288 This function reads CR2 register when on-demand paging is enabled.
1289
1290 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1291 **/
1292 VOID
1293 SaveCr2 (
1294 OUT UINTN *Cr2
1295 )
1296 {
1297 if (!mCpuSmmRestrictedMemoryAccess) {
1298 //
1299 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1300 //
1301 *Cr2 = AsmReadCr2 ();
1302 }
1303 }
1304
1305 /**
1306 This function restores CR2 register when on-demand paging is enabled.
1307
1308 @param[in] Cr2 Value to write into CR2 register.
1309 **/
1310 VOID
1311 RestoreCr2 (
1312 IN UINTN Cr2
1313 )
1314 {
1315 if (!mCpuSmmRestrictedMemoryAccess) {
1316 //
1317 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1318 //
1319 AsmWriteCr2 (Cr2);
1320 }
1321 }
1322
1323 /**
1324 Return whether access to non-SMRAM is restricted.
1325
1326 @retval TRUE Access to non-SMRAM is restricted.
1327 @retval FALSE Access to non-SMRAM is not restricted.
1328 **/
1329 BOOLEAN
1330 IsRestrictedMemoryAccess (
1331 VOID
1332 )
1333 {
1334 return mCpuSmmRestrictedMemoryAccess;
1335 }