]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Reflect page table depth with page table address
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17 BOOLEAN m1GPageTableSupport = FALSE;
18 BOOLEAN mCpuSmmRestrictedMemoryAccess;
19 BOOLEAN m5LevelPagingNeeded;
20 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
21
22 /**
23 Disable CET.
24 **/
25 VOID
26 EFIAPI
27 DisableCet (
28 VOID
29 );
30
31 /**
32 Enable CET.
33 **/
34 VOID
35 EFIAPI
36 EnableCet (
37 VOID
38 );
39
40 /**
41 Check if 1-GByte pages is supported by processor or not.
42
43 @retval TRUE 1-GByte pages is supported.
44 @retval FALSE 1-GByte pages is not supported.
45
46 **/
47 BOOLEAN
48 Is1GPageSupport (
49 VOID
50 )
51 {
52 UINT32 RegEax;
53 UINT32 RegEdx;
54
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
56 if (RegEax >= 0x80000001) {
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
58 if ((RegEdx & BIT26) != 0) {
59 return TRUE;
60 }
61 }
62 return FALSE;
63 }
64
65 /**
66 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
67 the max physical address bits is bigger than 48. Because 4-level paging can support
68 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
69 with max physical address bits <= 48.
70
71 @retval TRUE 5-level paging enabling is needed.
72 @retval FALSE 5-level paging enabling is not needed.
73 **/
74 BOOLEAN
75 Is5LevelPagingNeeded (
76 VOID
77 )
78 {
79 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
81 UINT32 MaxExtendedFunctionId;
82
83 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
84 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
85 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
86 } else {
87 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
88 }
89 AsmCpuidEx (
90 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
91 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
92 NULL, NULL, &ExtFeatureEcx.Uint32, NULL
93 );
94 DEBUG ((
95 DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
96 VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
97 ));
98
99 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
100 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
101 return TRUE;
102 } else {
103 return FALSE;
104 }
105 }
106
107 /**
108 Get page table base address and the depth of the page table.
109
110 @param[out] Base Page table base address.
111 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
112 **/
113 VOID
114 GetPageTable (
115 OUT UINTN *Base,
116 OUT BOOLEAN *FiveLevels OPTIONAL
117 )
118 {
119 IA32_CR4 Cr4;
120
121 if (mInternalCr3 == 0) {
122 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
123 if (FiveLevels != NULL) {
124 Cr4.UintN = AsmReadCr4 ();
125 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
126 }
127 return;
128 }
129
130 *Base = mInternalCr3;
131 if (FiveLevels != NULL) {
132 *FiveLevels = m5LevelPagingNeeded;
133 }
134 }
135
136 /**
137 Set sub-entries number in entry.
138
139 @param[in, out] Entry Pointer to entry
140 @param[in] SubEntryNum Sub-entries number based on 0:
141 0 means there is 1 sub-entry under this entry
142 0x1ff means there is 512 sub-entries under this entry
143
144 **/
145 VOID
146 SetSubEntriesNum (
147 IN OUT UINT64 *Entry,
148 IN UINT64 SubEntryNum
149 )
150 {
151 //
152 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
153 //
154 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
155 }
156
157 /**
158 Return sub-entries number in entry.
159
160 @param[in] Entry Pointer to entry
161
162 @return Sub-entries number based on 0:
163 0 means there is 1 sub-entry under this entry
164 0x1ff means there is 512 sub-entries under this entry
165 **/
166 UINT64
167 GetSubEntriesNum (
168 IN UINT64 *Entry
169 )
170 {
171 //
172 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
173 //
174 return BitFieldRead64 (*Entry, 52, 60);
175 }
176
177 /**
178 Calculate the maximum support address.
179
180 @return the maximum support address.
181 **/
182 UINT8
183 CalculateMaximumSupportAddress (
184 VOID
185 )
186 {
187 UINT32 RegEax;
188 UINT8 PhysicalAddressBits;
189 VOID *Hob;
190
191 //
192 // Get physical address bits supported.
193 //
194 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
195 if (Hob != NULL) {
196 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
197 } else {
198 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
199 if (RegEax >= 0x80000008) {
200 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
201 PhysicalAddressBits = (UINT8) RegEax;
202 } else {
203 PhysicalAddressBits = 36;
204 }
205 }
206 return PhysicalAddressBits;
207 }
208
209 /**
210 Set static page table.
211
212 @param[in] PageTable Address of page table.
213 **/
214 VOID
215 SetStaticPageTable (
216 IN UINTN PageTable
217 )
218 {
219 UINT64 PageAddress;
220 UINTN NumberOfPml5EntriesNeeded;
221 UINTN NumberOfPml4EntriesNeeded;
222 UINTN NumberOfPdpEntriesNeeded;
223 UINTN IndexOfPml5Entries;
224 UINTN IndexOfPml4Entries;
225 UINTN IndexOfPdpEntries;
226 UINTN IndexOfPageDirectoryEntries;
227 UINT64 *PageMapLevel5Entry;
228 UINT64 *PageMapLevel4Entry;
229 UINT64 *PageMap;
230 UINT64 *PageDirectoryPointerEntry;
231 UINT64 *PageDirectory1GEntry;
232 UINT64 *PageDirectoryEntry;
233
234 //
235 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
236 // when 5-Level Paging is disabled.
237 //
238 ASSERT (mPhysicalAddressBits <= 52);
239 if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {
240 mPhysicalAddressBits = 48;
241 }
242
243 NumberOfPml5EntriesNeeded = 1;
244 if (mPhysicalAddressBits > 48) {
245 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);
246 mPhysicalAddressBits = 48;
247 }
248
249 NumberOfPml4EntriesNeeded = 1;
250 if (mPhysicalAddressBits > 39) {
251 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);
252 mPhysicalAddressBits = 39;
253 }
254
255 NumberOfPdpEntriesNeeded = 1;
256 ASSERT (mPhysicalAddressBits > 30);
257 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);
258
259 //
260 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
261 //
262 PageMap = (VOID *) PageTable;
263
264 PageMapLevel4Entry = PageMap;
265 PageMapLevel5Entry = NULL;
266 if (m5LevelPagingNeeded) {
267 //
268 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
269 //
270 PageMapLevel5Entry = PageMap;
271 }
272 PageAddress = 0;
273
274 for ( IndexOfPml5Entries = 0
275 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
276 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
277 //
278 // Each PML5 entry points to a page of PML4 entires.
279 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
280 // When 5-Level Paging is disabled, below allocation happens only once.
281 //
282 if (m5LevelPagingNeeded) {
283 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
284 if (PageMapLevel4Entry == NULL) {
285 PageMapLevel4Entry = AllocatePageTableMemory (1);
286 ASSERT(PageMapLevel4Entry != NULL);
287 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
288
289 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
290 }
291 }
292
293 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
294 //
295 // Each PML4 entry points to a page of Page Directory Pointer entries.
296 //
297 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
298 if (PageDirectoryPointerEntry == NULL) {
299 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
300 ASSERT(PageDirectoryPointerEntry != NULL);
301 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
302
303 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
304 }
305
306 if (m1GPageTableSupport) {
307 PageDirectory1GEntry = PageDirectoryPointerEntry;
308 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
309 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
310 //
311 // Skip the < 4G entries
312 //
313 continue;
314 }
315 //
316 // Fill in the Page Directory entries
317 //
318 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
319 }
320 } else {
321 PageAddress = BASE_4GB;
322 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
323 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
324 //
325 // Skip the < 4G entries
326 //
327 continue;
328 }
329 //
330 // Each Directory Pointer entries points to a page of Page Directory entires.
331 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
332 //
333 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
334 if (PageDirectoryEntry == NULL) {
335 PageDirectoryEntry = AllocatePageTableMemory (1);
336 ASSERT(PageDirectoryEntry != NULL);
337 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
338
339 //
340 // Fill in a Page Directory Pointer Entries
341 //
342 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
343 }
344
345 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
346 //
347 // Fill in the Page Directory entries
348 //
349 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
350 }
351 }
352 }
353 }
354 }
355 }
356
357 /**
358 Create PageTable for SMM use.
359
360 @return The address of PML4 (to set CR3).
361
362 **/
363 UINT32
364 SmmInitPageTable (
365 VOID
366 )
367 {
368 EFI_PHYSICAL_ADDRESS Pages;
369 UINT64 *PTEntry;
370 LIST_ENTRY *FreePage;
371 UINTN Index;
372 UINTN PageFaultHandlerHookAddress;
373 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
374 EFI_STATUS Status;
375 UINT64 *Pml4Entry;
376 UINT64 *Pml5Entry;
377
378 //
379 // Initialize spin lock
380 //
381 InitializeSpinLock (mPFLock);
382
383 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
384 m1GPageTableSupport = Is1GPageSupport ();
385 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
386 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
387 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
388 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
389 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
390 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
391 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
392 //
393 // Generate PAE page table for the first 4GB memory space
394 //
395 Pages = Gen4GPageTable (FALSE);
396
397 //
398 // Set IA32_PG_PMNT bit to mask this entry
399 //
400 PTEntry = (UINT64*)(UINTN)Pages;
401 for (Index = 0; Index < 4; Index++) {
402 PTEntry[Index] |= IA32_PG_PMNT;
403 }
404
405 //
406 // Fill Page-Table-Level4 (PML4) entry
407 //
408 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
409 ASSERT (Pml4Entry != NULL);
410 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
411 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
412
413 //
414 // Set sub-entries number
415 //
416 SetSubEntriesNum (Pml4Entry, 3);
417 PTEntry = Pml4Entry;
418
419 if (m5LevelPagingNeeded) {
420 //
421 // Fill PML5 entry
422 //
423 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
424 ASSERT (Pml5Entry != NULL);
425 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
426 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
427 //
428 // Set sub-entries number
429 //
430 SetSubEntriesNum (Pml5Entry, 1);
431 PTEntry = Pml5Entry;
432 }
433
434 if (mCpuSmmRestrictedMemoryAccess) {
435 //
436 // When access to non-SMRAM memory is restricted, create page table
437 // that covers all memory space.
438 //
439 SetStaticPageTable ((UINTN)PTEntry);
440 } else {
441 //
442 // Add pages to page pool
443 //
444 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
445 ASSERT (FreePage != NULL);
446 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
447 InsertTailList (&mPagePool, FreePage);
448 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
449 }
450 }
451
452 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
453 HEAP_GUARD_NONSTOP_MODE ||
454 NULL_DETECTION_NONSTOP_MODE) {
455 //
456 // Set own Page Fault entry instead of the default one, because SMM Profile
457 // feature depends on IRET instruction to do Single Step
458 //
459 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
460 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
461 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
462 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
463 IdtEntry->Bits.Reserved_0 = 0;
464 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
465 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
466 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
467 IdtEntry->Bits.Reserved_1 = 0;
468 } else {
469 //
470 // Register Smm Page Fault Handler
471 //
472 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
473 ASSERT_EFI_ERROR (Status);
474 }
475
476 //
477 // Additional SMM IDT initialization for SMM stack guard
478 //
479 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
480 InitializeIDTSmmStackGuard ();
481 }
482
483 //
484 // Return the address of PML4/PML5 (to set CR3)
485 //
486 return (UINT32)(UINTN)PTEntry;
487 }
488
489 /**
490 Set access record in entry.
491
492 @param[in, out] Entry Pointer to entry
493 @param[in] Acc Access record value
494
495 **/
496 VOID
497 SetAccNum (
498 IN OUT UINT64 *Entry,
499 IN UINT64 Acc
500 )
501 {
502 //
503 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
504 //
505 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
506 }
507
508 /**
509 Return access record in entry.
510
511 @param[in] Entry Pointer to entry
512
513 @return Access record value.
514
515 **/
516 UINT64
517 GetAccNum (
518 IN UINT64 *Entry
519 )
520 {
521 //
522 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
523 //
524 return BitFieldRead64 (*Entry, 9, 11);
525 }
526
527 /**
528 Return and update the access record in entry.
529
530 @param[in, out] Entry Pointer to entry
531
532 @return Access record value.
533
534 **/
535 UINT64
536 GetAndUpdateAccNum (
537 IN OUT UINT64 *Entry
538 )
539 {
540 UINT64 Acc;
541
542 Acc = GetAccNum (Entry);
543 if ((*Entry & IA32_PG_A) != 0) {
544 //
545 // If this entry has been accessed, clear access flag in Entry and update access record
546 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
547 //
548 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
549 SetAccNum (Entry, 0x7);
550 return (0x7 + ACC_MAX_BIT);
551 } else {
552 if (Acc != 0) {
553 //
554 // If the access record is not the smallest value 0, minus 1 and update the access record field
555 //
556 SetAccNum (Entry, Acc - 1);
557 }
558 }
559 return Acc;
560 }
561
562 /**
563 Reclaim free pages for PageFault handler.
564
565 Search the whole entries tree to find the leaf entry that has the smallest
566 access record value. Insert the page pointed by this leaf entry into the
567 page pool. And check its upper entries if need to be inserted into the page
568 pool or not.
569
570 **/
571 VOID
572 ReclaimPages (
573 VOID
574 )
575 {
576 UINT64 Pml5Entry;
577 UINT64 *Pml5;
578 UINT64 *Pml4;
579 UINT64 *Pdpt;
580 UINT64 *Pdt;
581 UINTN Pml5Index;
582 UINTN Pml4Index;
583 UINTN PdptIndex;
584 UINTN PdtIndex;
585 UINTN MinPml5;
586 UINTN MinPml4;
587 UINTN MinPdpt;
588 UINTN MinPdt;
589 UINT64 MinAcc;
590 UINT64 Acc;
591 UINT64 SubEntriesNum;
592 BOOLEAN PML4EIgnore;
593 BOOLEAN PDPTEIgnore;
594 UINT64 *ReleasePageAddress;
595 IA32_CR4 Cr4;
596 BOOLEAN Enable5LevelPaging;
597 UINT64 PFAddress;
598 UINT64 PFAddressPml5Index;
599 UINT64 PFAddressPml4Index;
600 UINT64 PFAddressPdptIndex;
601 UINT64 PFAddressPdtIndex;
602
603 Pml4 = NULL;
604 Pdpt = NULL;
605 Pdt = NULL;
606 MinAcc = (UINT64)-1;
607 MinPml4 = (UINTN)-1;
608 MinPml5 = (UINTN)-1;
609 MinPdpt = (UINTN)-1;
610 MinPdt = (UINTN)-1;
611 Acc = 0;
612 ReleasePageAddress = 0;
613 PFAddress = AsmReadCr2 ();
614 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
615 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
616 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
617 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
618
619 Cr4.UintN = AsmReadCr4 ();
620 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
621 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
622
623 if (!Enable5LevelPaging) {
624 //
625 // Create one fake PML5 entry for 4-Level Paging
626 // so that the page table parsing logic only handles 5-Level page structure.
627 //
628 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
629 Pml5 = &Pml5Entry;
630 }
631
632 //
633 // First, find the leaf entry has the smallest access record value
634 //
635 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
636 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
637 //
638 // If the PML5 entry is not present or is masked, skip it
639 //
640 continue;
641 }
642 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
643 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
644 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
645 //
646 // If the PML4 entry is not present or is masked, skip it
647 //
648 continue;
649 }
650 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
651 PML4EIgnore = FALSE;
652 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
653 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
654 //
655 // If the PDPT entry is not present or is masked, skip it
656 //
657 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
658 //
659 // If the PDPT entry is masked, we will ignore checking the PML4 entry
660 //
661 PML4EIgnore = TRUE;
662 }
663 continue;
664 }
665 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
666 //
667 // It's not 1-GByte pages entry, it should be a PDPT entry,
668 // we will not check PML4 entry more
669 //
670 PML4EIgnore = TRUE;
671 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
672 PDPTEIgnore = FALSE;
673 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
674 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
675 //
676 // If the PD entry is not present or is masked, skip it
677 //
678 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
679 //
680 // If the PD entry is masked, we will not PDPT entry more
681 //
682 PDPTEIgnore = TRUE;
683 }
684 continue;
685 }
686 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
687 //
688 // It's not 2 MByte page table entry, it should be PD entry
689 // we will find the entry has the smallest access record value
690 //
691 PDPTEIgnore = TRUE;
692 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
693 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
694 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
695 if (Acc < MinAcc) {
696 //
697 // If the PD entry has the smallest access record value,
698 // save the Page address to be released
699 //
700 MinAcc = Acc;
701 MinPml5 = Pml5Index;
702 MinPml4 = Pml4Index;
703 MinPdpt = PdptIndex;
704 MinPdt = PdtIndex;
705 ReleasePageAddress = Pdt + PdtIndex;
706 }
707 }
708 }
709 }
710 if (!PDPTEIgnore) {
711 //
712 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
713 // it should only has the entries point to 2 MByte Pages
714 //
715 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
716 Pml5Index != PFAddressPml5Index) {
717 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
718 if (Acc < MinAcc) {
719 //
720 // If the PDPT entry has the smallest access record value,
721 // save the Page address to be released
722 //
723 MinAcc = Acc;
724 MinPml5 = Pml5Index;
725 MinPml4 = Pml4Index;
726 MinPdpt = PdptIndex;
727 MinPdt = (UINTN)-1;
728 ReleasePageAddress = Pdpt + PdptIndex;
729 }
730 }
731 }
732 }
733 }
734 if (!PML4EIgnore) {
735 //
736 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
737 // it should only has the entries point to 1 GByte Pages
738 //
739 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
740 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
741 if (Acc < MinAcc) {
742 //
743 // If the PML4 entry has the smallest access record value,
744 // save the Page address to be released
745 //
746 MinAcc = Acc;
747 MinPml5 = Pml5Index;
748 MinPml4 = Pml4Index;
749 MinPdpt = (UINTN)-1;
750 MinPdt = (UINTN)-1;
751 ReleasePageAddress = Pml4 + Pml4Index;
752 }
753 }
754 }
755 }
756 }
757 //
758 // Make sure one PML4/PDPT/PD entry is selected
759 //
760 ASSERT (MinAcc != (UINT64)-1);
761
762 //
763 // Secondly, insert the page pointed by this entry into page pool and clear this entry
764 //
765 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
766 *ReleasePageAddress = 0;
767
768 //
769 // Lastly, check this entry's upper entries if need to be inserted into page pool
770 // or not
771 //
772 while (TRUE) {
773 if (MinPdt != (UINTN)-1) {
774 //
775 // If 4 KByte Page Table is released, check the PDPT entry
776 //
777 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
778 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
779 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
780 if (SubEntriesNum == 0 &&
781 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
782 //
783 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
784 // clear the Page directory entry
785 //
786 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
787 Pdpt[MinPdpt] = 0;
788 //
789 // Go on checking the PML4 table
790 //
791 MinPdt = (UINTN)-1;
792 continue;
793 }
794 //
795 // Update the sub-entries filed in PDPT entry and exit
796 //
797 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
798 break;
799 }
800 if (MinPdpt != (UINTN)-1) {
801 //
802 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
803 //
804 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
805 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
806 //
807 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
808 // clear the Page directory entry
809 //
810 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
811 Pml4[MinPml4] = 0;
812 MinPdpt = (UINTN)-1;
813 continue;
814 }
815 //
816 // Update the sub-entries filed in PML4 entry and exit
817 //
818 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
819 break;
820 }
821 //
822 // PLM4 table has been released before, exit it
823 //
824 break;
825 }
826 }
827
828 /**
829 Allocate free Page for PageFault handler use.
830
831 @return Page address.
832
833 **/
834 UINT64
835 AllocPage (
836 VOID
837 )
838 {
839 UINT64 RetVal;
840
841 if (IsListEmpty (&mPagePool)) {
842 //
843 // If page pool is empty, reclaim the used pages and insert one into page pool
844 //
845 ReclaimPages ();
846 }
847
848 //
849 // Get one free page and remove it from page pool
850 //
851 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
852 RemoveEntryList (mPagePool.ForwardLink);
853 //
854 // Clean this page and return
855 //
856 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
857 return RetVal;
858 }
859
860 /**
861 Page Fault handler for SMM use.
862
863 **/
864 VOID
865 SmiDefaultPFHandler (
866 VOID
867 )
868 {
869 UINT64 *PageTable;
870 UINT64 *PageTableTop;
871 UINT64 PFAddress;
872 UINTN StartBit;
873 UINTN EndBit;
874 UINT64 PTIndex;
875 UINTN Index;
876 SMM_PAGE_SIZE_TYPE PageSize;
877 UINTN NumOfPages;
878 UINTN PageAttribute;
879 EFI_STATUS Status;
880 UINT64 *UpperEntry;
881 BOOLEAN Enable5LevelPaging;
882 IA32_CR4 Cr4;
883
884 //
885 // Set default SMM page attribute
886 //
887 PageSize = SmmPageSize2M;
888 NumOfPages = 1;
889 PageAttribute = 0;
890
891 EndBit = 0;
892 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
893 PFAddress = AsmReadCr2 ();
894
895 Cr4.UintN = AsmReadCr4 ();
896 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
897
898 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
899 //
900 // If platform not support page table attribute, set default SMM page attribute
901 //
902 if (Status != EFI_SUCCESS) {
903 PageSize = SmmPageSize2M;
904 NumOfPages = 1;
905 PageAttribute = 0;
906 }
907 if (PageSize >= MaxSmmPageSizeType) {
908 PageSize = SmmPageSize2M;
909 }
910 if (NumOfPages > 512) {
911 NumOfPages = 512;
912 }
913
914 switch (PageSize) {
915 case SmmPageSize4K:
916 //
917 // BIT12 to BIT20 is Page Table index
918 //
919 EndBit = 12;
920 break;
921 case SmmPageSize2M:
922 //
923 // BIT21 to BIT29 is Page Directory index
924 //
925 EndBit = 21;
926 PageAttribute |= (UINTN)IA32_PG_PS;
927 break;
928 case SmmPageSize1G:
929 if (!m1GPageTableSupport) {
930 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
931 ASSERT (FALSE);
932 }
933 //
934 // BIT30 to BIT38 is Page Directory Pointer Table index
935 //
936 EndBit = 30;
937 PageAttribute |= (UINTN)IA32_PG_PS;
938 break;
939 default:
940 ASSERT (FALSE);
941 }
942
943 //
944 // If execute-disable is enabled, set NX bit
945 //
946 if (mXdEnabled) {
947 PageAttribute |= IA32_PG_NX;
948 }
949
950 for (Index = 0; Index < NumOfPages; Index++) {
951 PageTable = PageTableTop;
952 UpperEntry = NULL;
953 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
954 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
955 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
956 //
957 // If the entry is not present, allocate one page from page pool for it
958 //
959 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
960 } else {
961 //
962 // Save the upper entry address
963 //
964 UpperEntry = PageTable + PTIndex;
965 }
966 //
967 // BIT9 to BIT11 of entry is used to save access record,
968 // initialize value is 7
969 //
970 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
971 SetAccNum (PageTable + PTIndex, 7);
972 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
973 }
974
975 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
976 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
977 //
978 // Check if the entry has already existed, this issue may occur when the different
979 // size page entries created under the same entry
980 //
981 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
982 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
983 ASSERT (FALSE);
984 }
985 //
986 // Fill the new entry
987 //
988 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
989 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
990 if (UpperEntry != NULL) {
991 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
992 }
993 //
994 // Get the next page address if we need to create more page tables
995 //
996 PFAddress += (1ull << EndBit);
997 }
998 }
999
1000 /**
1001 ThePage Fault handler wrapper for SMM use.
1002
1003 @param InterruptType Defines the type of interrupt or exception that
1004 occurred on the processor.This parameter is processor architecture specific.
1005 @param SystemContext A pointer to the processor context when
1006 the interrupt occurred on the processor.
1007 **/
1008 VOID
1009 EFIAPI
1010 SmiPFHandler (
1011 IN EFI_EXCEPTION_TYPE InterruptType,
1012 IN EFI_SYSTEM_CONTEXT SystemContext
1013 )
1014 {
1015 UINTN PFAddress;
1016 UINTN GuardPageAddress;
1017 UINTN CpuIndex;
1018
1019 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
1020
1021 AcquireSpinLock (mPFLock);
1022
1023 PFAddress = AsmReadCr2 ();
1024
1025 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
1026 DumpCpuContext (InterruptType, SystemContext);
1027 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
1028 CpuDeadLoop ();
1029 goto Exit;
1030 }
1031
1032 //
1033 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
1034 // or SMM page protection violation.
1035 //
1036 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1037 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
1038 DumpCpuContext (InterruptType, SystemContext);
1039 CpuIndex = GetCpuIndex ();
1040 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
1041 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1042 (PFAddress >= GuardPageAddress) &&
1043 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
1044 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1045 } else {
1046 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1047 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1048 DEBUG_CODE (
1049 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1050 );
1051 } else {
1052 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1053 DEBUG_CODE (
1054 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1055 );
1056 }
1057
1058 if (HEAP_GUARD_NONSTOP_MODE) {
1059 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1060 goto Exit;
1061 }
1062 }
1063 CpuDeadLoop ();
1064 goto Exit;
1065 }
1066
1067 //
1068 // If a page fault occurs in non-SMRAM range.
1069 //
1070 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1071 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1072 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1073 DumpCpuContext (InterruptType, SystemContext);
1074 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1075 DEBUG_CODE (
1076 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1077 );
1078 CpuDeadLoop ();
1079 goto Exit;
1080 }
1081
1082 //
1083 // If NULL pointer was just accessed
1084 //
1085 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1086 (PFAddress < EFI_PAGE_SIZE)) {
1087 DumpCpuContext (InterruptType, SystemContext);
1088 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1089 DEBUG_CODE (
1090 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1091 );
1092
1093 if (NULL_DETECTION_NONSTOP_MODE) {
1094 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1095 goto Exit;
1096 }
1097
1098 CpuDeadLoop ();
1099 goto Exit;
1100 }
1101
1102 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1103 DumpCpuContext (InterruptType, SystemContext);
1104 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1105 DEBUG_CODE (
1106 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1107 );
1108 CpuDeadLoop ();
1109 goto Exit;
1110 }
1111 }
1112
1113 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1114 SmmProfilePFHandler (
1115 SystemContext.SystemContextX64->Rip,
1116 SystemContext.SystemContextX64->ExceptionData
1117 );
1118 } else {
1119 SmiDefaultPFHandler ();
1120 }
1121
1122 Exit:
1123 ReleaseSpinLock (mPFLock);
1124 }
1125
1126 /**
1127 This function sets memory attribute for page table.
1128 **/
1129 VOID
1130 SetPageTableAttributes (
1131 VOID
1132 )
1133 {
1134 UINTN Index2;
1135 UINTN Index3;
1136 UINTN Index4;
1137 UINTN Index5;
1138 UINT64 *L1PageTable;
1139 UINT64 *L2PageTable;
1140 UINT64 *L3PageTable;
1141 UINT64 *L4PageTable;
1142 UINT64 *L5PageTable;
1143 UINTN PageTableBase;
1144 BOOLEAN IsSplitted;
1145 BOOLEAN PageTableSplitted;
1146 BOOLEAN CetEnabled;
1147 BOOLEAN Enable5LevelPaging;
1148
1149 //
1150 // Don't mark page table memory as read-only if
1151 // - no restriction on access to non-SMRAM memory; or
1152 // - SMM heap guard feature enabled; or
1153 // BIT2: SMM page guard enabled
1154 // BIT3: SMM pool guard enabled
1155 // - SMM profile feature enabled
1156 //
1157 if (!mCpuSmmRestrictedMemoryAccess ||
1158 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1159 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1160 //
1161 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1162 //
1163 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
1164 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1165
1166 //
1167 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1168 //
1169 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1170 return ;
1171 }
1172
1173 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1174
1175 //
1176 // Disable write protection, because we need mark page table to be write protected.
1177 // We need *write* page table memory, to mark itself to be *read only*.
1178 //
1179 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1180 if (CetEnabled) {
1181 //
1182 // CET must be disabled if WP is disabled.
1183 //
1184 DisableCet();
1185 }
1186 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1187
1188 do {
1189 DEBUG ((DEBUG_INFO, "Start...\n"));
1190 PageTableSplitted = FALSE;
1191 L5PageTable = NULL;
1192
1193 GetPageTable (&PageTableBase, &Enable5LevelPaging);
1194
1195 if (Enable5LevelPaging) {
1196 L5PageTable = (UINT64 *)PageTableBase;
1197 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1198 PageTableSplitted = (PageTableSplitted || IsSplitted);
1199 }
1200
1201 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1202 if (Enable5LevelPaging) {
1203 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1204 if (L4PageTable == NULL) {
1205 continue;
1206 }
1207 } else {
1208 L4PageTable = (UINT64 *)PageTableBase;
1209 }
1210 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1211 PageTableSplitted = (PageTableSplitted || IsSplitted);
1212
1213 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1214 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1215 if (L3PageTable == NULL) {
1216 continue;
1217 }
1218
1219 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1220 PageTableSplitted = (PageTableSplitted || IsSplitted);
1221
1222 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1223 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1224 // 1G
1225 continue;
1226 }
1227 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1228 if (L2PageTable == NULL) {
1229 continue;
1230 }
1231
1232 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1233 PageTableSplitted = (PageTableSplitted || IsSplitted);
1234
1235 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1236 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1237 // 2M
1238 continue;
1239 }
1240 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1241 if (L1PageTable == NULL) {
1242 continue;
1243 }
1244 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1245 PageTableSplitted = (PageTableSplitted || IsSplitted);
1246 }
1247 }
1248 }
1249 }
1250 } while (PageTableSplitted);
1251
1252 //
1253 // Enable write protection, after page table updated.
1254 //
1255 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1256 if (CetEnabled) {
1257 //
1258 // re-enable CET.
1259 //
1260 EnableCet();
1261 }
1262
1263 return ;
1264 }
1265
1266 /**
1267 This function reads CR2 register when on-demand paging is enabled.
1268
1269 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1270 **/
1271 VOID
1272 SaveCr2 (
1273 OUT UINTN *Cr2
1274 )
1275 {
1276 if (!mCpuSmmRestrictedMemoryAccess) {
1277 //
1278 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1279 //
1280 *Cr2 = AsmReadCr2 ();
1281 }
1282 }
1283
1284 /**
1285 This function restores CR2 register when on-demand paging is enabled.
1286
1287 @param[in] Cr2 Value to write into CR2 register.
1288 **/
1289 VOID
1290 RestoreCr2 (
1291 IN UINTN Cr2
1292 )
1293 {
1294 if (!mCpuSmmRestrictedMemoryAccess) {
1295 //
1296 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1297 //
1298 AsmWriteCr2 (Cr2);
1299 }
1300 }
1301
1302 /**
1303 Return whether access to non-SMRAM is restricted.
1304
1305 @retval TRUE Access to non-SMRAM is restricted.
1306 @retval FALSE Access to non-SMRAM is not restricted.
1307 **/
1308 BOOLEAN
1309 IsRestrictedMemoryAccess (
1310 VOID
1311 )
1312 {
1313 return mCpuSmmRestrictedMemoryAccess;
1314 }