]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: PiSmmCpuDxeSmm: Not to Change Bitwidth During Static Paging
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 extern UINTN mSmmShadowStackSize;
17
18 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
19 BOOLEAN m1GPageTableSupport = FALSE;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess;
21 BOOLEAN m5LevelPagingNeeded;
22 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
23
24 /**
25 Disable CET.
26 **/
27 VOID
28 EFIAPI
29 DisableCet (
30 VOID
31 );
32
33 /**
34 Enable CET.
35 **/
36 VOID
37 EFIAPI
38 EnableCet (
39 VOID
40 );
41
42 /**
43 Check if 1-GByte pages is supported by processor or not.
44
45 @retval TRUE 1-GByte pages is supported.
46 @retval FALSE 1-GByte pages is not supported.
47
48 **/
49 BOOLEAN
50 Is1GPageSupport (
51 VOID
52 )
53 {
54 UINT32 RegEax;
55 UINT32 RegEdx;
56
57 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
58 if (RegEax >= 0x80000001) {
59 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
60 if ((RegEdx & BIT26) != 0) {
61 return TRUE;
62 }
63 }
64 return FALSE;
65 }
66
67 /**
68 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
69 the max physical address bits is bigger than 48. Because 4-level paging can support
70 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
71 with max physical address bits <= 48.
72
73 @retval TRUE 5-level paging enabling is needed.
74 @retval FALSE 5-level paging enabling is not needed.
75 **/
76 BOOLEAN
77 Is5LevelPagingNeeded (
78 VOID
79 )
80 {
81 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
82 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
83 UINT32 MaxExtendedFunctionId;
84
85 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
86 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
87 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
88 } else {
89 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
90 }
91 AsmCpuidEx (
92 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
93 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
94 NULL, NULL, &ExtFeatureEcx.Uint32, NULL
95 );
96 DEBUG ((
97 DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
98 VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
99 ));
100
101 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
102 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
103 return TRUE;
104 } else {
105 return FALSE;
106 }
107 }
108
109 /**
110 Get page table base address and the depth of the page table.
111
112 @param[out] Base Page table base address.
113 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
114 **/
115 VOID
116 GetPageTable (
117 OUT UINTN *Base,
118 OUT BOOLEAN *FiveLevels OPTIONAL
119 )
120 {
121 IA32_CR4 Cr4;
122
123 if (mInternalCr3 == 0) {
124 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
125 if (FiveLevels != NULL) {
126 Cr4.UintN = AsmReadCr4 ();
127 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
128 }
129 return;
130 }
131
132 *Base = mInternalCr3;
133 if (FiveLevels != NULL) {
134 *FiveLevels = m5LevelPagingNeeded;
135 }
136 }
137
138 /**
139 Set sub-entries number in entry.
140
141 @param[in, out] Entry Pointer to entry
142 @param[in] SubEntryNum Sub-entries number based on 0:
143 0 means there is 1 sub-entry under this entry
144 0x1ff means there is 512 sub-entries under this entry
145
146 **/
147 VOID
148 SetSubEntriesNum (
149 IN OUT UINT64 *Entry,
150 IN UINT64 SubEntryNum
151 )
152 {
153 //
154 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
155 //
156 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
157 }
158
159 /**
160 Return sub-entries number in entry.
161
162 @param[in] Entry Pointer to entry
163
164 @return Sub-entries number based on 0:
165 0 means there is 1 sub-entry under this entry
166 0x1ff means there is 512 sub-entries under this entry
167 **/
168 UINT64
169 GetSubEntriesNum (
170 IN UINT64 *Entry
171 )
172 {
173 //
174 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
175 //
176 return BitFieldRead64 (*Entry, 52, 60);
177 }
178
179 /**
180 Calculate the maximum support address.
181
182 @return the maximum support address.
183 **/
184 UINT8
185 CalculateMaximumSupportAddress (
186 VOID
187 )
188 {
189 UINT32 RegEax;
190 UINT8 PhysicalAddressBits;
191 VOID *Hob;
192
193 //
194 // Get physical address bits supported.
195 //
196 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
197 if (Hob != NULL) {
198 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
199 } else {
200 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
201 if (RegEax >= 0x80000008) {
202 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
203 PhysicalAddressBits = (UINT8) RegEax;
204 } else {
205 PhysicalAddressBits = 36;
206 }
207 }
208 return PhysicalAddressBits;
209 }
210
211 /**
212 Set static page table.
213
214 @param[in] PageTable Address of page table.
215 @param[in] PhysicalAddressBits The maximum physical address bits supported.
216 **/
217 VOID
218 SetStaticPageTable (
219 IN UINTN PageTable,
220 IN UINT8 PhysicalAddressBits
221 )
222 {
223 UINT64 PageAddress;
224 UINTN NumberOfPml5EntriesNeeded;
225 UINTN NumberOfPml4EntriesNeeded;
226 UINTN NumberOfPdpEntriesNeeded;
227 UINTN IndexOfPml5Entries;
228 UINTN IndexOfPml4Entries;
229 UINTN IndexOfPdpEntries;
230 UINTN IndexOfPageDirectoryEntries;
231 UINT64 *PageMapLevel5Entry;
232 UINT64 *PageMapLevel4Entry;
233 UINT64 *PageMap;
234 UINT64 *PageDirectoryPointerEntry;
235 UINT64 *PageDirectory1GEntry;
236 UINT64 *PageDirectoryEntry;
237
238 //
239 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
240 // when 5-Level Paging is disabled.
241 //
242 ASSERT (PhysicalAddressBits <= 52);
243 if (!m5LevelPagingNeeded && PhysicalAddressBits > 48) {
244 PhysicalAddressBits = 48;
245 }
246
247 NumberOfPml5EntriesNeeded = 1;
248 if (PhysicalAddressBits > 48) {
249 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 48);
250 PhysicalAddressBits = 48;
251 }
252
253 NumberOfPml4EntriesNeeded = 1;
254 if (PhysicalAddressBits > 39) {
255 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 39);
256 PhysicalAddressBits = 39;
257 }
258
259 NumberOfPdpEntriesNeeded = 1;
260 ASSERT (PhysicalAddressBits > 30);
261 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 30);
262
263 //
264 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
265 //
266 PageMap = (VOID *) PageTable;
267
268 PageMapLevel4Entry = PageMap;
269 PageMapLevel5Entry = NULL;
270 if (m5LevelPagingNeeded) {
271 //
272 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
273 //
274 PageMapLevel5Entry = PageMap;
275 }
276 PageAddress = 0;
277
278 for ( IndexOfPml5Entries = 0
279 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
280 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
281 //
282 // Each PML5 entry points to a page of PML4 entires.
283 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
284 // When 5-Level Paging is disabled, below allocation happens only once.
285 //
286 if (m5LevelPagingNeeded) {
287 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
288 if (PageMapLevel4Entry == NULL) {
289 PageMapLevel4Entry = AllocatePageTableMemory (1);
290 ASSERT(PageMapLevel4Entry != NULL);
291 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
292
293 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
294 }
295 }
296
297 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
298 //
299 // Each PML4 entry points to a page of Page Directory Pointer entries.
300 //
301 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
302 if (PageDirectoryPointerEntry == NULL) {
303 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
304 ASSERT(PageDirectoryPointerEntry != NULL);
305 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
306
307 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
308 }
309
310 if (m1GPageTableSupport) {
311 PageDirectory1GEntry = PageDirectoryPointerEntry;
312 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
313 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
314 //
315 // Skip the < 4G entries
316 //
317 continue;
318 }
319 //
320 // Fill in the Page Directory entries
321 //
322 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
323 }
324 } else {
325 PageAddress = BASE_4GB;
326 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
327 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
328 //
329 // Skip the < 4G entries
330 //
331 continue;
332 }
333 //
334 // Each Directory Pointer entries points to a page of Page Directory entires.
335 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
336 //
337 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
338 if (PageDirectoryEntry == NULL) {
339 PageDirectoryEntry = AllocatePageTableMemory (1);
340 ASSERT(PageDirectoryEntry != NULL);
341 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
342
343 //
344 // Fill in a Page Directory Pointer Entries
345 //
346 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
347 }
348
349 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
350 //
351 // Fill in the Page Directory entries
352 //
353 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
354 }
355 }
356 }
357 }
358 }
359 }
360
361 /**
362 Create PageTable for SMM use.
363
364 @return The address of PML4 (to set CR3).
365
366 **/
367 UINT32
368 SmmInitPageTable (
369 VOID
370 )
371 {
372 EFI_PHYSICAL_ADDRESS Pages;
373 UINT64 *PTEntry;
374 LIST_ENTRY *FreePage;
375 UINTN Index;
376 UINTN PageFaultHandlerHookAddress;
377 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
378 EFI_STATUS Status;
379 UINT64 *Pml4Entry;
380 UINT64 *Pml5Entry;
381
382 //
383 // Initialize spin lock
384 //
385 InitializeSpinLock (mPFLock);
386
387 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
388 m1GPageTableSupport = Is1GPageSupport ();
389 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
390 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
391 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
392 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
393 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
394 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
395 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
396 //
397 // Generate PAE page table for the first 4GB memory space
398 //
399 Pages = Gen4GPageTable (FALSE);
400
401 //
402 // Set IA32_PG_PMNT bit to mask this entry
403 //
404 PTEntry = (UINT64*)(UINTN)Pages;
405 for (Index = 0; Index < 4; Index++) {
406 PTEntry[Index] |= IA32_PG_PMNT;
407 }
408
409 //
410 // Fill Page-Table-Level4 (PML4) entry
411 //
412 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
413 ASSERT (Pml4Entry != NULL);
414 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
415 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
416
417 //
418 // Set sub-entries number
419 //
420 SetSubEntriesNum (Pml4Entry, 3);
421 PTEntry = Pml4Entry;
422
423 if (m5LevelPagingNeeded) {
424 //
425 // Fill PML5 entry
426 //
427 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
428 ASSERT (Pml5Entry != NULL);
429 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
430 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
431 //
432 // Set sub-entries number
433 //
434 SetSubEntriesNum (Pml5Entry, 1);
435 PTEntry = Pml5Entry;
436 }
437
438 if (mCpuSmmRestrictedMemoryAccess) {
439 //
440 // When access to non-SMRAM memory is restricted, create page table
441 // that covers all memory space.
442 //
443 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
444 } else {
445 //
446 // Add pages to page pool
447 //
448 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
449 ASSERT (FreePage != NULL);
450 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
451 InsertTailList (&mPagePool, FreePage);
452 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
453 }
454 }
455
456 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
457 HEAP_GUARD_NONSTOP_MODE ||
458 NULL_DETECTION_NONSTOP_MODE) {
459 //
460 // Set own Page Fault entry instead of the default one, because SMM Profile
461 // feature depends on IRET instruction to do Single Step
462 //
463 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
464 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
465 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
466 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
467 IdtEntry->Bits.Reserved_0 = 0;
468 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
469 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
470 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
471 IdtEntry->Bits.Reserved_1 = 0;
472 } else {
473 //
474 // Register Smm Page Fault Handler
475 //
476 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
477 ASSERT_EFI_ERROR (Status);
478 }
479
480 //
481 // Additional SMM IDT initialization for SMM stack guard
482 //
483 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
484 InitializeIDTSmmStackGuard ();
485 }
486
487 //
488 // Return the address of PML4/PML5 (to set CR3)
489 //
490 return (UINT32)(UINTN)PTEntry;
491 }
492
493 /**
494 Set access record in entry.
495
496 @param[in, out] Entry Pointer to entry
497 @param[in] Acc Access record value
498
499 **/
500 VOID
501 SetAccNum (
502 IN OUT UINT64 *Entry,
503 IN UINT64 Acc
504 )
505 {
506 //
507 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
508 //
509 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
510 }
511
512 /**
513 Return access record in entry.
514
515 @param[in] Entry Pointer to entry
516
517 @return Access record value.
518
519 **/
520 UINT64
521 GetAccNum (
522 IN UINT64 *Entry
523 )
524 {
525 //
526 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
527 //
528 return BitFieldRead64 (*Entry, 9, 11);
529 }
530
531 /**
532 Return and update the access record in entry.
533
534 @param[in, out] Entry Pointer to entry
535
536 @return Access record value.
537
538 **/
539 UINT64
540 GetAndUpdateAccNum (
541 IN OUT UINT64 *Entry
542 )
543 {
544 UINT64 Acc;
545
546 Acc = GetAccNum (Entry);
547 if ((*Entry & IA32_PG_A) != 0) {
548 //
549 // If this entry has been accessed, clear access flag in Entry and update access record
550 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
551 //
552 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
553 SetAccNum (Entry, 0x7);
554 return (0x7 + ACC_MAX_BIT);
555 } else {
556 if (Acc != 0) {
557 //
558 // If the access record is not the smallest value 0, minus 1 and update the access record field
559 //
560 SetAccNum (Entry, Acc - 1);
561 }
562 }
563 return Acc;
564 }
565
566 /**
567 Reclaim free pages for PageFault handler.
568
569 Search the whole entries tree to find the leaf entry that has the smallest
570 access record value. Insert the page pointed by this leaf entry into the
571 page pool. And check its upper entries if need to be inserted into the page
572 pool or not.
573
574 **/
575 VOID
576 ReclaimPages (
577 VOID
578 )
579 {
580 UINT64 Pml5Entry;
581 UINT64 *Pml5;
582 UINT64 *Pml4;
583 UINT64 *Pdpt;
584 UINT64 *Pdt;
585 UINTN Pml5Index;
586 UINTN Pml4Index;
587 UINTN PdptIndex;
588 UINTN PdtIndex;
589 UINTN MinPml5;
590 UINTN MinPml4;
591 UINTN MinPdpt;
592 UINTN MinPdt;
593 UINT64 MinAcc;
594 UINT64 Acc;
595 UINT64 SubEntriesNum;
596 BOOLEAN PML4EIgnore;
597 BOOLEAN PDPTEIgnore;
598 UINT64 *ReleasePageAddress;
599 IA32_CR4 Cr4;
600 BOOLEAN Enable5LevelPaging;
601 UINT64 PFAddress;
602 UINT64 PFAddressPml5Index;
603 UINT64 PFAddressPml4Index;
604 UINT64 PFAddressPdptIndex;
605 UINT64 PFAddressPdtIndex;
606
607 Pml4 = NULL;
608 Pdpt = NULL;
609 Pdt = NULL;
610 MinAcc = (UINT64)-1;
611 MinPml4 = (UINTN)-1;
612 MinPml5 = (UINTN)-1;
613 MinPdpt = (UINTN)-1;
614 MinPdt = (UINTN)-1;
615 Acc = 0;
616 ReleasePageAddress = 0;
617 PFAddress = AsmReadCr2 ();
618 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
619 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
620 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
621 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
622
623 Cr4.UintN = AsmReadCr4 ();
624 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
625 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
626
627 if (!Enable5LevelPaging) {
628 //
629 // Create one fake PML5 entry for 4-Level Paging
630 // so that the page table parsing logic only handles 5-Level page structure.
631 //
632 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
633 Pml5 = &Pml5Entry;
634 }
635
636 //
637 // First, find the leaf entry has the smallest access record value
638 //
639 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
640 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
641 //
642 // If the PML5 entry is not present or is masked, skip it
643 //
644 continue;
645 }
646 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
647 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
648 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
649 //
650 // If the PML4 entry is not present or is masked, skip it
651 //
652 continue;
653 }
654 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
655 PML4EIgnore = FALSE;
656 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
657 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
658 //
659 // If the PDPT entry is not present or is masked, skip it
660 //
661 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
662 //
663 // If the PDPT entry is masked, we will ignore checking the PML4 entry
664 //
665 PML4EIgnore = TRUE;
666 }
667 continue;
668 }
669 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
670 //
671 // It's not 1-GByte pages entry, it should be a PDPT entry,
672 // we will not check PML4 entry more
673 //
674 PML4EIgnore = TRUE;
675 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
676 PDPTEIgnore = FALSE;
677 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
678 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
679 //
680 // If the PD entry is not present or is masked, skip it
681 //
682 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
683 //
684 // If the PD entry is masked, we will not PDPT entry more
685 //
686 PDPTEIgnore = TRUE;
687 }
688 continue;
689 }
690 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
691 //
692 // It's not 2 MByte page table entry, it should be PD entry
693 // we will find the entry has the smallest access record value
694 //
695 PDPTEIgnore = TRUE;
696 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
697 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
698 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
699 if (Acc < MinAcc) {
700 //
701 // If the PD entry has the smallest access record value,
702 // save the Page address to be released
703 //
704 MinAcc = Acc;
705 MinPml5 = Pml5Index;
706 MinPml4 = Pml4Index;
707 MinPdpt = PdptIndex;
708 MinPdt = PdtIndex;
709 ReleasePageAddress = Pdt + PdtIndex;
710 }
711 }
712 }
713 }
714 if (!PDPTEIgnore) {
715 //
716 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
717 // it should only has the entries point to 2 MByte Pages
718 //
719 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
720 Pml5Index != PFAddressPml5Index) {
721 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
722 if (Acc < MinAcc) {
723 //
724 // If the PDPT entry has the smallest access record value,
725 // save the Page address to be released
726 //
727 MinAcc = Acc;
728 MinPml5 = Pml5Index;
729 MinPml4 = Pml4Index;
730 MinPdpt = PdptIndex;
731 MinPdt = (UINTN)-1;
732 ReleasePageAddress = Pdpt + PdptIndex;
733 }
734 }
735 }
736 }
737 }
738 if (!PML4EIgnore) {
739 //
740 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
741 // it should only has the entries point to 1 GByte Pages
742 //
743 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
744 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
745 if (Acc < MinAcc) {
746 //
747 // If the PML4 entry has the smallest access record value,
748 // save the Page address to be released
749 //
750 MinAcc = Acc;
751 MinPml5 = Pml5Index;
752 MinPml4 = Pml4Index;
753 MinPdpt = (UINTN)-1;
754 MinPdt = (UINTN)-1;
755 ReleasePageAddress = Pml4 + Pml4Index;
756 }
757 }
758 }
759 }
760 }
761 //
762 // Make sure one PML4/PDPT/PD entry is selected
763 //
764 ASSERT (MinAcc != (UINT64)-1);
765
766 //
767 // Secondly, insert the page pointed by this entry into page pool and clear this entry
768 //
769 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
770 *ReleasePageAddress = 0;
771
772 //
773 // Lastly, check this entry's upper entries if need to be inserted into page pool
774 // or not
775 //
776 while (TRUE) {
777 if (MinPdt != (UINTN)-1) {
778 //
779 // If 4 KByte Page Table is released, check the PDPT entry
780 //
781 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
782 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
783 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
784 if (SubEntriesNum == 0 &&
785 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
786 //
787 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
788 // clear the Page directory entry
789 //
790 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
791 Pdpt[MinPdpt] = 0;
792 //
793 // Go on checking the PML4 table
794 //
795 MinPdt = (UINTN)-1;
796 continue;
797 }
798 //
799 // Update the sub-entries filed in PDPT entry and exit
800 //
801 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
802 break;
803 }
804 if (MinPdpt != (UINTN)-1) {
805 //
806 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
807 //
808 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
809 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
810 //
811 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
812 // clear the Page directory entry
813 //
814 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
815 Pml4[MinPml4] = 0;
816 MinPdpt = (UINTN)-1;
817 continue;
818 }
819 //
820 // Update the sub-entries filed in PML4 entry and exit
821 //
822 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
823 break;
824 }
825 //
826 // PLM4 table has been released before, exit it
827 //
828 break;
829 }
830 }
831
832 /**
833 Allocate free Page for PageFault handler use.
834
835 @return Page address.
836
837 **/
838 UINT64
839 AllocPage (
840 VOID
841 )
842 {
843 UINT64 RetVal;
844
845 if (IsListEmpty (&mPagePool)) {
846 //
847 // If page pool is empty, reclaim the used pages and insert one into page pool
848 //
849 ReclaimPages ();
850 }
851
852 //
853 // Get one free page and remove it from page pool
854 //
855 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
856 RemoveEntryList (mPagePool.ForwardLink);
857 //
858 // Clean this page and return
859 //
860 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
861 return RetVal;
862 }
863
864 /**
865 Page Fault handler for SMM use.
866
867 **/
868 VOID
869 SmiDefaultPFHandler (
870 VOID
871 )
872 {
873 UINT64 *PageTable;
874 UINT64 *PageTableTop;
875 UINT64 PFAddress;
876 UINTN StartBit;
877 UINTN EndBit;
878 UINT64 PTIndex;
879 UINTN Index;
880 SMM_PAGE_SIZE_TYPE PageSize;
881 UINTN NumOfPages;
882 UINTN PageAttribute;
883 EFI_STATUS Status;
884 UINT64 *UpperEntry;
885 BOOLEAN Enable5LevelPaging;
886 IA32_CR4 Cr4;
887
888 //
889 // Set default SMM page attribute
890 //
891 PageSize = SmmPageSize2M;
892 NumOfPages = 1;
893 PageAttribute = 0;
894
895 EndBit = 0;
896 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
897 PFAddress = AsmReadCr2 ();
898
899 Cr4.UintN = AsmReadCr4 ();
900 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
901
902 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
903 //
904 // If platform not support page table attribute, set default SMM page attribute
905 //
906 if (Status != EFI_SUCCESS) {
907 PageSize = SmmPageSize2M;
908 NumOfPages = 1;
909 PageAttribute = 0;
910 }
911 if (PageSize >= MaxSmmPageSizeType) {
912 PageSize = SmmPageSize2M;
913 }
914 if (NumOfPages > 512) {
915 NumOfPages = 512;
916 }
917
918 switch (PageSize) {
919 case SmmPageSize4K:
920 //
921 // BIT12 to BIT20 is Page Table index
922 //
923 EndBit = 12;
924 break;
925 case SmmPageSize2M:
926 //
927 // BIT21 to BIT29 is Page Directory index
928 //
929 EndBit = 21;
930 PageAttribute |= (UINTN)IA32_PG_PS;
931 break;
932 case SmmPageSize1G:
933 if (!m1GPageTableSupport) {
934 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
935 ASSERT (FALSE);
936 }
937 //
938 // BIT30 to BIT38 is Page Directory Pointer Table index
939 //
940 EndBit = 30;
941 PageAttribute |= (UINTN)IA32_PG_PS;
942 break;
943 default:
944 ASSERT (FALSE);
945 }
946
947 //
948 // If execute-disable is enabled, set NX bit
949 //
950 if (mXdEnabled) {
951 PageAttribute |= IA32_PG_NX;
952 }
953
954 for (Index = 0; Index < NumOfPages; Index++) {
955 PageTable = PageTableTop;
956 UpperEntry = NULL;
957 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
958 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
959 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
960 //
961 // If the entry is not present, allocate one page from page pool for it
962 //
963 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
964 } else {
965 //
966 // Save the upper entry address
967 //
968 UpperEntry = PageTable + PTIndex;
969 }
970 //
971 // BIT9 to BIT11 of entry is used to save access record,
972 // initialize value is 7
973 //
974 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
975 SetAccNum (PageTable + PTIndex, 7);
976 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
977 }
978
979 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
980 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
981 //
982 // Check if the entry has already existed, this issue may occur when the different
983 // size page entries created under the same entry
984 //
985 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
986 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
987 ASSERT (FALSE);
988 }
989 //
990 // Fill the new entry
991 //
992 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
993 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
994 if (UpperEntry != NULL) {
995 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
996 }
997 //
998 // Get the next page address if we need to create more page tables
999 //
1000 PFAddress += (1ull << EndBit);
1001 }
1002 }
1003
1004 /**
1005 ThePage Fault handler wrapper for SMM use.
1006
1007 @param InterruptType Defines the type of interrupt or exception that
1008 occurred on the processor.This parameter is processor architecture specific.
1009 @param SystemContext A pointer to the processor context when
1010 the interrupt occurred on the processor.
1011 **/
1012 VOID
1013 EFIAPI
1014 SmiPFHandler (
1015 IN EFI_EXCEPTION_TYPE InterruptType,
1016 IN EFI_SYSTEM_CONTEXT SystemContext
1017 )
1018 {
1019 UINTN PFAddress;
1020 UINTN GuardPageAddress;
1021 UINTN ShadowStackGuardPageAddress;
1022 UINTN CpuIndex;
1023
1024 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
1025
1026 AcquireSpinLock (mPFLock);
1027
1028 PFAddress = AsmReadCr2 ();
1029
1030 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
1031 DumpCpuContext (InterruptType, SystemContext);
1032 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
1033 CpuDeadLoop ();
1034 goto Exit;
1035 }
1036
1037 //
1038 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1039 // or SMM page protection violation.
1040 //
1041 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1042 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
1043 DumpCpuContext (InterruptType, SystemContext);
1044 CpuIndex = GetCpuIndex ();
1045 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1046 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1047 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1048 (PFAddress >= GuardPageAddress) &&
1049 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
1050 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1051 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1052 (mSmmShadowStackSize > 0) &&
1053 (PFAddress >= ShadowStackGuardPageAddress) &&
1054 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE))) {
1055 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
1056 } else {
1057 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1058 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1059 DEBUG_CODE (
1060 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1061 );
1062 } else {
1063 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1064 DEBUG_CODE (
1065 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1066 );
1067 }
1068
1069 if (HEAP_GUARD_NONSTOP_MODE) {
1070 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1071 goto Exit;
1072 }
1073 }
1074 CpuDeadLoop ();
1075 goto Exit;
1076 }
1077
1078 //
1079 // If a page fault occurs in non-SMRAM range.
1080 //
1081 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1082 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1083 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1084 DumpCpuContext (InterruptType, SystemContext);
1085 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1086 DEBUG_CODE (
1087 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1088 );
1089 CpuDeadLoop ();
1090 goto Exit;
1091 }
1092
1093 //
1094 // If NULL pointer was just accessed
1095 //
1096 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1097 (PFAddress < EFI_PAGE_SIZE)) {
1098 DumpCpuContext (InterruptType, SystemContext);
1099 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1100 DEBUG_CODE (
1101 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1102 );
1103
1104 if (NULL_DETECTION_NONSTOP_MODE) {
1105 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1106 goto Exit;
1107 }
1108
1109 CpuDeadLoop ();
1110 goto Exit;
1111 }
1112
1113 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1114 DumpCpuContext (InterruptType, SystemContext);
1115 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1116 DEBUG_CODE (
1117 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1118 );
1119 CpuDeadLoop ();
1120 goto Exit;
1121 }
1122 }
1123
1124 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1125 SmmProfilePFHandler (
1126 SystemContext.SystemContextX64->Rip,
1127 SystemContext.SystemContextX64->ExceptionData
1128 );
1129 } else {
1130 SmiDefaultPFHandler ();
1131 }
1132
1133 Exit:
1134 ReleaseSpinLock (mPFLock);
1135 }
1136
1137 /**
1138 This function sets memory attribute for page table.
1139 **/
1140 VOID
1141 SetPageTableAttributes (
1142 VOID
1143 )
1144 {
1145 UINTN Index2;
1146 UINTN Index3;
1147 UINTN Index4;
1148 UINTN Index5;
1149 UINT64 *L1PageTable;
1150 UINT64 *L2PageTable;
1151 UINT64 *L3PageTable;
1152 UINT64 *L4PageTable;
1153 UINT64 *L5PageTable;
1154 UINTN PageTableBase;
1155 BOOLEAN IsSplitted;
1156 BOOLEAN PageTableSplitted;
1157 BOOLEAN CetEnabled;
1158 BOOLEAN Enable5LevelPaging;
1159
1160 //
1161 // Don't mark page table memory as read-only if
1162 // - no restriction on access to non-SMRAM memory; or
1163 // - SMM heap guard feature enabled; or
1164 // BIT2: SMM page guard enabled
1165 // BIT3: SMM pool guard enabled
1166 // - SMM profile feature enabled
1167 //
1168 if (!mCpuSmmRestrictedMemoryAccess ||
1169 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1170 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1171 //
1172 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1173 //
1174 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
1175 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1176
1177 //
1178 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1179 //
1180 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1181 return ;
1182 }
1183
1184 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1185
1186 //
1187 // Disable write protection, because we need mark page table to be write protected.
1188 // We need *write* page table memory, to mark itself to be *read only*.
1189 //
1190 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1191 if (CetEnabled) {
1192 //
1193 // CET must be disabled if WP is disabled.
1194 //
1195 DisableCet();
1196 }
1197 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1198
1199 do {
1200 DEBUG ((DEBUG_INFO, "Start...\n"));
1201 PageTableSplitted = FALSE;
1202 L5PageTable = NULL;
1203
1204 GetPageTable (&PageTableBase, &Enable5LevelPaging);
1205
1206 if (Enable5LevelPaging) {
1207 L5PageTable = (UINT64 *)PageTableBase;
1208 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1209 PageTableSplitted = (PageTableSplitted || IsSplitted);
1210 }
1211
1212 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1213 if (Enable5LevelPaging) {
1214 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1215 if (L4PageTable == NULL) {
1216 continue;
1217 }
1218 } else {
1219 L4PageTable = (UINT64 *)PageTableBase;
1220 }
1221 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1222 PageTableSplitted = (PageTableSplitted || IsSplitted);
1223
1224 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1225 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1226 if (L3PageTable == NULL) {
1227 continue;
1228 }
1229
1230 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1231 PageTableSplitted = (PageTableSplitted || IsSplitted);
1232
1233 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1234 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1235 // 1G
1236 continue;
1237 }
1238 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1239 if (L2PageTable == NULL) {
1240 continue;
1241 }
1242
1243 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1244 PageTableSplitted = (PageTableSplitted || IsSplitted);
1245
1246 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1247 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1248 // 2M
1249 continue;
1250 }
1251 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1252 if (L1PageTable == NULL) {
1253 continue;
1254 }
1255 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1256 PageTableSplitted = (PageTableSplitted || IsSplitted);
1257 }
1258 }
1259 }
1260 }
1261 } while (PageTableSplitted);
1262
1263 //
1264 // Enable write protection, after page table updated.
1265 //
1266 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1267 if (CetEnabled) {
1268 //
1269 // re-enable CET.
1270 //
1271 EnableCet();
1272 }
1273
1274 return ;
1275 }
1276
1277 /**
1278 This function reads CR2 register when on-demand paging is enabled.
1279
1280 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1281 **/
1282 VOID
1283 SaveCr2 (
1284 OUT UINTN *Cr2
1285 )
1286 {
1287 if (!mCpuSmmRestrictedMemoryAccess) {
1288 //
1289 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1290 //
1291 *Cr2 = AsmReadCr2 ();
1292 }
1293 }
1294
1295 /**
1296 This function restores CR2 register when on-demand paging is enabled.
1297
1298 @param[in] Cr2 Value to write into CR2 register.
1299 **/
1300 VOID
1301 RestoreCr2 (
1302 IN UINTN Cr2
1303 )
1304 {
1305 if (!mCpuSmmRestrictedMemoryAccess) {
1306 //
1307 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1308 //
1309 AsmWriteCr2 (Cr2);
1310 }
1311 }
1312
1313 /**
1314 Return whether access to non-SMRAM is restricted.
1315
1316 @retval TRUE Access to non-SMRAM is restricted.
1317 @retval FALSE Access to non-SMRAM is not restricted.
1318 **/
1319 BOOLEAN
1320 IsRestrictedMemoryAccess (
1321 VOID
1322 )
1323 {
1324 return mCpuSmmRestrictedMemoryAccess;
1325 }