]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Bug fix in 5LPage handling
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 extern UINTN mSmmShadowStackSize;
17
18 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
19 BOOLEAN m1GPageTableSupport = FALSE;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess;
21 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
22
23 /**
24 Disable CET.
25 **/
26 VOID
27 EFIAPI
28 DisableCet (
29 VOID
30 );
31
32 /**
33 Enable CET.
34 **/
35 VOID
36 EFIAPI
37 EnableCet (
38 VOID
39 );
40
41 /**
42 Check if 1-GByte pages is supported by processor or not.
43
44 @retval TRUE 1-GByte pages is supported.
45 @retval FALSE 1-GByte pages is not supported.
46
47 **/
48 BOOLEAN
49 Is1GPageSupport (
50 VOID
51 )
52 {
53 UINT32 RegEax;
54 UINT32 RegEdx;
55
56 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
57 if (RegEax >= 0x80000001) {
58 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
59 if ((RegEdx & BIT26) != 0) {
60 return TRUE;
61 }
62 }
63
64 return FALSE;
65 }
66
67 /**
68 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
69 the max physical address bits is bigger than 48. Because 4-level paging can support
70 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
71 with max physical address bits <= 48.
72
73 @retval TRUE 5-level paging enabling is needed.
74 @retval FALSE 5-level paging enabling is not needed.
75 **/
76 BOOLEAN
77 Is5LevelPagingNeeded (
78 VOID
79 )
80 {
81 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
82 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
83 UINT32 MaxExtendedFunctionId;
84
85 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
86 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
87 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
88 } else {
89 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
90 }
91
92 AsmCpuidEx (
93 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
94 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
95 NULL,
96 NULL,
97 &ExtFeatureEcx.Uint32,
98 NULL
99 );
100 DEBUG ((
101 DEBUG_INFO,
102 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
103 VirPhyAddressSize.Bits.PhysicalAddressBits,
104 ExtFeatureEcx.Bits.FiveLevelPage
105 ));
106
107 if ((VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) &&
108 (ExtFeatureEcx.Bits.FiveLevelPage == 1))
109 {
110 return TRUE;
111 } else {
112 return FALSE;
113 }
114 }
115
116 /**
117 Set sub-entries number in entry.
118
119 @param[in, out] Entry Pointer to entry
120 @param[in] SubEntryNum Sub-entries number based on 0:
121 0 means there is 1 sub-entry under this entry
122 0x1ff means there is 512 sub-entries under this entry
123
124 **/
125 VOID
126 SetSubEntriesNum (
127 IN OUT UINT64 *Entry,
128 IN UINT64 SubEntryNum
129 )
130 {
131 //
132 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
133 //
134 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
135 }
136
137 /**
138 Return sub-entries number in entry.
139
140 @param[in] Entry Pointer to entry
141
142 @return Sub-entries number based on 0:
143 0 means there is 1 sub-entry under this entry
144 0x1ff means there is 512 sub-entries under this entry
145 **/
146 UINT64
147 GetSubEntriesNum (
148 IN UINT64 *Entry
149 )
150 {
151 //
152 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
153 //
154 return BitFieldRead64 (*Entry, 52, 60);
155 }
156
157 /**
158 Calculate the maximum support address.
159
160 @return the maximum support address.
161 **/
162 UINT8
163 CalculateMaximumSupportAddress (
164 VOID
165 )
166 {
167 UINT32 RegEax;
168 UINT8 PhysicalAddressBits;
169 VOID *Hob;
170
171 //
172 // Get physical address bits supported.
173 //
174 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
175 if (Hob != NULL) {
176 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
177 } else {
178 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
179 if (RegEax >= 0x80000008) {
180 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
181 PhysicalAddressBits = (UINT8)RegEax;
182 } else {
183 PhysicalAddressBits = 36;
184 }
185 }
186
187 return PhysicalAddressBits;
188 }
189
190 /**
191 Set static page table.
192
193 @param[in] PageTable Address of page table.
194 @param[in] PhysicalAddressBits The maximum physical address bits supported.
195 **/
196 VOID
197 SetStaticPageTable (
198 IN UINTN PageTable,
199 IN UINT8 PhysicalAddressBits
200 )
201 {
202 UINT64 PageAddress;
203 UINTN NumberOfPml5EntriesNeeded;
204 UINTN NumberOfPml4EntriesNeeded;
205 UINTN NumberOfPdpEntriesNeeded;
206 UINTN IndexOfPml5Entries;
207 UINTN IndexOfPml4Entries;
208 UINTN IndexOfPdpEntries;
209 UINTN IndexOfPageDirectoryEntries;
210 UINT64 *PageMapLevel5Entry;
211 UINT64 *PageMapLevel4Entry;
212 UINT64 *PageMap;
213 UINT64 *PageDirectoryPointerEntry;
214 UINT64 *PageDirectory1GEntry;
215 UINT64 *PageDirectoryEntry;
216
217 //
218 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
219 // when 5-Level Paging is disabled.
220 //
221 ASSERT (PhysicalAddressBits <= 52);
222 if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
223 PhysicalAddressBits = 48;
224 }
225
226 NumberOfPml5EntriesNeeded = 1;
227 if (PhysicalAddressBits > 48) {
228 NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);
229 PhysicalAddressBits = 48;
230 }
231
232 NumberOfPml4EntriesNeeded = 1;
233 if (PhysicalAddressBits > 39) {
234 NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);
235 PhysicalAddressBits = 39;
236 }
237
238 NumberOfPdpEntriesNeeded = 1;
239 ASSERT (PhysicalAddressBits > 30);
240 NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);
241
242 //
243 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
244 //
245 PageMap = (VOID *)PageTable;
246
247 PageMapLevel4Entry = PageMap;
248 PageMapLevel5Entry = NULL;
249 if (m5LevelPagingNeeded) {
250 //
251 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
252 //
253 PageMapLevel5Entry = PageMap;
254 }
255
256 PageAddress = 0;
257
258 for ( IndexOfPml5Entries = 0
259 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
260 ; IndexOfPml5Entries++, PageMapLevel5Entry++)
261 {
262 //
263 // Each PML5 entry points to a page of PML4 entires.
264 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
265 // When 5-Level Paging is disabled, below allocation happens only once.
266 //
267 if (m5LevelPagingNeeded) {
268 PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
269 if (PageMapLevel4Entry == NULL) {
270 PageMapLevel4Entry = AllocatePageTableMemory (1);
271 ASSERT (PageMapLevel4Entry != NULL);
272 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
273
274 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
275 }
276 }
277
278 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
279 //
280 // Each PML4 entry points to a page of Page Directory Pointer entries.
281 //
282 PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
283 if (PageDirectoryPointerEntry == NULL) {
284 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
285 ASSERT (PageDirectoryPointerEntry != NULL);
286 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
287
288 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
289 }
290
291 if (m1GPageTableSupport) {
292 PageDirectory1GEntry = PageDirectoryPointerEntry;
293 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
294 if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
295 //
296 // Skip the < 4G entries
297 //
298 continue;
299 }
300
301 //
302 // Fill in the Page Directory entries
303 //
304 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
305 }
306 } else {
307 PageAddress = BASE_4GB;
308 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
309 if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
310 //
311 // Skip the < 4G entries
312 //
313 continue;
314 }
315
316 //
317 // Each Directory Pointer entries points to a page of Page Directory entires.
318 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
319 //
320 PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
321 if (PageDirectoryEntry == NULL) {
322 PageDirectoryEntry = AllocatePageTableMemory (1);
323 ASSERT (PageDirectoryEntry != NULL);
324 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
325
326 //
327 // Fill in a Page Directory Pointer Entries
328 //
329 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
330 }
331
332 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
333 //
334 // Fill in the Page Directory entries
335 //
336 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
337 }
338 }
339 }
340 }
341 }
342 }
343
344 /**
345 Create PageTable for SMM use.
346
347 @return The address of PML4 (to set CR3).
348
349 **/
350 UINT32
351 SmmInitPageTable (
352 VOID
353 )
354 {
355 EFI_PHYSICAL_ADDRESS Pages;
356 UINT64 *PTEntry;
357 LIST_ENTRY *FreePage;
358 UINTN Index;
359 UINTN PageFaultHandlerHookAddress;
360 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
361 EFI_STATUS Status;
362 UINT64 *Pml4Entry;
363 UINT64 *Pml5Entry;
364
365 //
366 // Initialize spin lock
367 //
368 InitializeSpinLock (mPFLock);
369
370 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
371 m1GPageTableSupport = Is1GPageSupport ();
372 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
373 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
374 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
375 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
376 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
377 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
378 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
379 //
380 // Generate PAE page table for the first 4GB memory space
381 //
382 Pages = Gen4GPageTable (FALSE);
383
384 //
385 // Set IA32_PG_PMNT bit to mask this entry
386 //
387 PTEntry = (UINT64 *)(UINTN)Pages;
388 for (Index = 0; Index < 4; Index++) {
389 PTEntry[Index] |= IA32_PG_PMNT;
390 }
391
392 //
393 // Fill Page-Table-Level4 (PML4) entry
394 //
395 Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
396 ASSERT (Pml4Entry != NULL);
397 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
398 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
399
400 //
401 // Set sub-entries number
402 //
403 SetSubEntriesNum (Pml4Entry, 3);
404 PTEntry = Pml4Entry;
405
406 if (m5LevelPagingNeeded) {
407 //
408 // Fill PML5 entry
409 //
410 Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
411 ASSERT (Pml5Entry != NULL);
412 *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
413 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
414 //
415 // Set sub-entries number
416 //
417 SetSubEntriesNum (Pml5Entry, 1);
418 PTEntry = Pml5Entry;
419 }
420
421 if (mCpuSmmRestrictedMemoryAccess) {
422 //
423 // When access to non-SMRAM memory is restricted, create page table
424 // that covers all memory space.
425 //
426 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
427 } else {
428 //
429 // Add pages to page pool
430 //
431 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);
432 ASSERT (FreePage != NULL);
433 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
434 InsertTailList (&mPagePool, FreePage);
435 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
436 }
437 }
438
439 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
440 HEAP_GUARD_NONSTOP_MODE ||
441 NULL_DETECTION_NONSTOP_MODE)
442 {
443 //
444 // Set own Page Fault entry instead of the default one, because SMM Profile
445 // feature depends on IRET instruction to do Single Step
446 //
447 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
448 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
449 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
450 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
451 IdtEntry->Bits.Reserved_0 = 0;
452 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
453 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
454 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
455 IdtEntry->Bits.Reserved_1 = 0;
456 } else {
457 //
458 // Register Smm Page Fault Handler
459 //
460 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
461 ASSERT_EFI_ERROR (Status);
462 }
463
464 //
465 // Additional SMM IDT initialization for SMM stack guard
466 //
467 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
468 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));
469 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
470 }
471
472 //
473 // Additional SMM IDT initialization for SMM CET shadow stack
474 //
475 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
476 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));
477 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
478 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);
479 }
480
481 //
482 // Return the address of PML4/PML5 (to set CR3)
483 //
484 return (UINT32)(UINTN)PTEntry;
485 }
486
487 /**
488 Set access record in entry.
489
490 @param[in, out] Entry Pointer to entry
491 @param[in] Acc Access record value
492
493 **/
494 VOID
495 SetAccNum (
496 IN OUT UINT64 *Entry,
497 IN UINT64 Acc
498 )
499 {
500 //
501 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
502 //
503 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
504 }
505
506 /**
507 Return access record in entry.
508
509 @param[in] Entry Pointer to entry
510
511 @return Access record value.
512
513 **/
514 UINT64
515 GetAccNum (
516 IN UINT64 *Entry
517 )
518 {
519 //
520 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
521 //
522 return BitFieldRead64 (*Entry, 9, 11);
523 }
524
525 /**
526 Return and update the access record in entry.
527
528 @param[in, out] Entry Pointer to entry
529
530 @return Access record value.
531
532 **/
533 UINT64
534 GetAndUpdateAccNum (
535 IN OUT UINT64 *Entry
536 )
537 {
538 UINT64 Acc;
539
540 Acc = GetAccNum (Entry);
541 if ((*Entry & IA32_PG_A) != 0) {
542 //
543 // If this entry has been accessed, clear access flag in Entry and update access record
544 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
545 //
546 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
547 SetAccNum (Entry, 0x7);
548 return (0x7 + ACC_MAX_BIT);
549 } else {
550 if (Acc != 0) {
551 //
552 // If the access record is not the smallest value 0, minus 1 and update the access record field
553 //
554 SetAccNum (Entry, Acc - 1);
555 }
556 }
557
558 return Acc;
559 }
560
561 /**
562 Reclaim free pages for PageFault handler.
563
564 Search the whole entries tree to find the leaf entry that has the smallest
565 access record value. Insert the page pointed by this leaf entry into the
566 page pool. And check its upper entries if need to be inserted into the page
567 pool or not.
568
569 **/
570 VOID
571 ReclaimPages (
572 VOID
573 )
574 {
575 UINT64 Pml5Entry;
576 UINT64 *Pml5;
577 UINT64 *Pml4;
578 UINT64 *Pdpt;
579 UINT64 *Pdt;
580 UINTN Pml5Index;
581 UINTN Pml4Index;
582 UINTN PdptIndex;
583 UINTN PdtIndex;
584 UINTN MinPml5;
585 UINTN MinPml4;
586 UINTN MinPdpt;
587 UINTN MinPdt;
588 UINT64 MinAcc;
589 UINT64 Acc;
590 UINT64 SubEntriesNum;
591 BOOLEAN PML4EIgnore;
592 BOOLEAN PDPTEIgnore;
593 UINT64 *ReleasePageAddress;
594 IA32_CR4 Cr4;
595 BOOLEAN Enable5LevelPaging;
596 UINT64 PFAddress;
597 UINT64 PFAddressPml5Index;
598 UINT64 PFAddressPml4Index;
599 UINT64 PFAddressPdptIndex;
600 UINT64 PFAddressPdtIndex;
601
602 Pml4 = NULL;
603 Pdpt = NULL;
604 Pdt = NULL;
605 MinAcc = (UINT64)-1;
606 MinPml4 = (UINTN)-1;
607 MinPml5 = (UINTN)-1;
608 MinPdpt = (UINTN)-1;
609 MinPdt = (UINTN)-1;
610 Acc = 0;
611 ReleasePageAddress = 0;
612 PFAddress = AsmReadCr2 ();
613 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
614 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
615 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
616 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
617
618 Cr4.UintN = AsmReadCr4 ();
619 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
620 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);
621
622 if (!Enable5LevelPaging) {
623 //
624 // Create one fake PML5 entry for 4-Level Paging
625 // so that the page table parsing logic only handles 5-Level page structure.
626 //
627 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;
628 Pml5 = &Pml5Entry;
629 }
630
631 //
632 // First, find the leaf entry has the smallest access record value
633 //
634 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
635 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {
636 //
637 // If the PML5 entry is not present or is masked, skip it
638 //
639 continue;
640 }
641
642 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);
643 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
644 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {
645 //
646 // If the PML4 entry is not present or is masked, skip it
647 //
648 continue;
649 }
650
651 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
652 PML4EIgnore = FALSE;
653 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
654 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {
655 //
656 // If the PDPT entry is not present or is masked, skip it
657 //
658 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
659 //
660 // If the PDPT entry is masked, we will ignore checking the PML4 entry
661 //
662 PML4EIgnore = TRUE;
663 }
664
665 continue;
666 }
667
668 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
669 //
670 // It's not 1-GByte pages entry, it should be a PDPT entry,
671 // we will not check PML4 entry more
672 //
673 PML4EIgnore = TRUE;
674 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
675 PDPTEIgnore = FALSE;
676 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {
677 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {
678 //
679 // If the PD entry is not present or is masked, skip it
680 //
681 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
682 //
683 // If the PD entry is masked, we will not PDPT entry more
684 //
685 PDPTEIgnore = TRUE;
686 }
687
688 continue;
689 }
690
691 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
692 //
693 // It's not 2 MByte page table entry, it should be PD entry
694 // we will find the entry has the smallest access record value
695 //
696 PDPTEIgnore = TRUE;
697 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||
698 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))
699 {
700 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
701 if (Acc < MinAcc) {
702 //
703 // If the PD entry has the smallest access record value,
704 // save the Page address to be released
705 //
706 MinAcc = Acc;
707 MinPml5 = Pml5Index;
708 MinPml4 = Pml4Index;
709 MinPdpt = PdptIndex;
710 MinPdt = PdtIndex;
711 ReleasePageAddress = Pdt + PdtIndex;
712 }
713 }
714 }
715 }
716
717 if (!PDPTEIgnore) {
718 //
719 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
720 // it should only has the entries point to 2 MByte Pages
721 //
722 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||
723 (Pml5Index != PFAddressPml5Index))
724 {
725 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
726 if (Acc < MinAcc) {
727 //
728 // If the PDPT entry has the smallest access record value,
729 // save the Page address to be released
730 //
731 MinAcc = Acc;
732 MinPml5 = Pml5Index;
733 MinPml4 = Pml4Index;
734 MinPdpt = PdptIndex;
735 MinPdt = (UINTN)-1;
736 ReleasePageAddress = Pdpt + PdptIndex;
737 }
738 }
739 }
740 }
741 }
742
743 if (!PML4EIgnore) {
744 //
745 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
746 // it should only has the entries point to 1 GByte Pages
747 //
748 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {
749 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
750 if (Acc < MinAcc) {
751 //
752 // If the PML4 entry has the smallest access record value,
753 // save the Page address to be released
754 //
755 MinAcc = Acc;
756 MinPml5 = Pml5Index;
757 MinPml4 = Pml4Index;
758 MinPdpt = (UINTN)-1;
759 MinPdt = (UINTN)-1;
760 ReleasePageAddress = Pml4 + Pml4Index;
761 }
762 }
763 }
764 }
765 }
766
767 //
768 // Make sure one PML4/PDPT/PD entry is selected
769 //
770 ASSERT (MinAcc != (UINT64)-1);
771
772 //
773 // Secondly, insert the page pointed by this entry into page pool and clear this entry
774 //
775 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
776 *ReleasePageAddress = 0;
777
778 //
779 // Lastly, check this entry's upper entries if need to be inserted into page pool
780 // or not
781 //
782 while (TRUE) {
783 if (MinPdt != (UINTN)-1) {
784 //
785 // If 4 KByte Page Table is released, check the PDPT entry
786 //
787 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);
788 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
789 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);
790 if ((SubEntriesNum == 0) &&
791 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))
792 {
793 //
794 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
795 // clear the Page directory entry
796 //
797 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
798 Pdpt[MinPdpt] = 0;
799 //
800 // Go on checking the PML4 table
801 //
802 MinPdt = (UINTN)-1;
803 continue;
804 }
805
806 //
807 // Update the sub-entries filed in PDPT entry and exit
808 //
809 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
810 break;
811 }
812
813 if (MinPdpt != (UINTN)-1) {
814 //
815 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
816 //
817 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
818 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {
819 //
820 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
821 // clear the Page directory entry
822 //
823 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
824 Pml4[MinPml4] = 0;
825 MinPdpt = (UINTN)-1;
826 continue;
827 }
828
829 //
830 // Update the sub-entries filed in PML4 entry and exit
831 //
832 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
833 break;
834 }
835
836 //
837 // PLM4 table has been released before, exit it
838 //
839 break;
840 }
841 }
842
843 /**
844 Allocate free Page for PageFault handler use.
845
846 @return Page address.
847
848 **/
849 UINT64
850 AllocPage (
851 VOID
852 )
853 {
854 UINT64 RetVal;
855
856 if (IsListEmpty (&mPagePool)) {
857 //
858 // If page pool is empty, reclaim the used pages and insert one into page pool
859 //
860 ReclaimPages ();
861 }
862
863 //
864 // Get one free page and remove it from page pool
865 //
866 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
867 RemoveEntryList (mPagePool.ForwardLink);
868 //
869 // Clean this page and return
870 //
871 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);
872 return RetVal;
873 }
874
875 /**
876 Page Fault handler for SMM use.
877
878 **/
879 VOID
880 SmiDefaultPFHandler (
881 VOID
882 )
883 {
884 UINT64 *PageTable;
885 UINT64 *PageTableTop;
886 UINT64 PFAddress;
887 UINTN StartBit;
888 UINTN EndBit;
889 UINT64 PTIndex;
890 UINTN Index;
891 SMM_PAGE_SIZE_TYPE PageSize;
892 UINTN NumOfPages;
893 UINTN PageAttribute;
894 EFI_STATUS Status;
895 UINT64 *UpperEntry;
896 BOOLEAN Enable5LevelPaging;
897 IA32_CR4 Cr4;
898
899 //
900 // Set default SMM page attribute
901 //
902 PageSize = SmmPageSize2M;
903 NumOfPages = 1;
904 PageAttribute = 0;
905
906 EndBit = 0;
907 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
908 PFAddress = AsmReadCr2 ();
909
910 Cr4.UintN = AsmReadCr4 ();
911 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
912
913 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
914 //
915 // If platform not support page table attribute, set default SMM page attribute
916 //
917 if (Status != EFI_SUCCESS) {
918 PageSize = SmmPageSize2M;
919 NumOfPages = 1;
920 PageAttribute = 0;
921 }
922
923 if (PageSize >= MaxSmmPageSizeType) {
924 PageSize = SmmPageSize2M;
925 }
926
927 if (NumOfPages > 512) {
928 NumOfPages = 512;
929 }
930
931 switch (PageSize) {
932 case SmmPageSize4K:
933 //
934 // BIT12 to BIT20 is Page Table index
935 //
936 EndBit = 12;
937 break;
938 case SmmPageSize2M:
939 //
940 // BIT21 to BIT29 is Page Directory index
941 //
942 EndBit = 21;
943 PageAttribute |= (UINTN)IA32_PG_PS;
944 break;
945 case SmmPageSize1G:
946 if (!m1GPageTableSupport) {
947 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
948 ASSERT (FALSE);
949 }
950
951 //
952 // BIT30 to BIT38 is Page Directory Pointer Table index
953 //
954 EndBit = 30;
955 PageAttribute |= (UINTN)IA32_PG_PS;
956 break;
957 default:
958 ASSERT (FALSE);
959 }
960
961 //
962 // If execute-disable is enabled, set NX bit
963 //
964 if (mXdEnabled) {
965 PageAttribute |= IA32_PG_NX;
966 }
967
968 for (Index = 0; Index < NumOfPages; Index++) {
969 PageTable = PageTableTop;
970 UpperEntry = NULL;
971 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
972 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
973 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
974 //
975 // If the entry is not present, allocate one page from page pool for it
976 //
977 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
978 } else {
979 //
980 // Save the upper entry address
981 //
982 UpperEntry = PageTable + PTIndex;
983 }
984
985 //
986 // BIT9 to BIT11 of entry is used to save access record,
987 // initialize value is 7
988 //
989 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
990 SetAccNum (PageTable + PTIndex, 7);
991 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
992 }
993
994 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
995 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
996 //
997 // Check if the entry has already existed, this issue may occur when the different
998 // size page entries created under the same entry
999 //
1000 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
1001 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
1002 ASSERT (FALSE);
1003 }
1004
1005 //
1006 // Fill the new entry
1007 //
1008 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
1009 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
1010 if (UpperEntry != NULL) {
1011 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
1012 }
1013
1014 //
1015 // Get the next page address if we need to create more page tables
1016 //
1017 PFAddress += (1ull << EndBit);
1018 }
1019 }
1020
1021 /**
1022 ThePage Fault handler wrapper for SMM use.
1023
1024 @param InterruptType Defines the type of interrupt or exception that
1025 occurred on the processor.This parameter is processor architecture specific.
1026 @param SystemContext A pointer to the processor context when
1027 the interrupt occurred on the processor.
1028 **/
1029 VOID
1030 EFIAPI
1031 SmiPFHandler (
1032 IN EFI_EXCEPTION_TYPE InterruptType,
1033 IN EFI_SYSTEM_CONTEXT SystemContext
1034 )
1035 {
1036 UINTN PFAddress;
1037 UINTN GuardPageAddress;
1038 UINTN ShadowStackGuardPageAddress;
1039 UINTN CpuIndex;
1040
1041 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
1042
1043 AcquireSpinLock (mPFLock);
1044
1045 PFAddress = AsmReadCr2 ();
1046
1047 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
1048 DumpCpuContext (InterruptType, SystemContext);
1049 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
1050 CpuDeadLoop ();
1051 goto Exit;
1052 }
1053
1054 //
1055 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1056 // or SMM page protection violation.
1057 //
1058 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1059 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))
1060 {
1061 DumpCpuContext (InterruptType, SystemContext);
1062 CpuIndex = GetCpuIndex ();
1063 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1064 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1065 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1066 (PFAddress >= GuardPageAddress) &&
1067 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))
1068 {
1069 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1070 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1071 (mSmmShadowStackSize > 0) &&
1072 (PFAddress >= ShadowStackGuardPageAddress) &&
1073 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))
1074 {
1075 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
1076 } else {
1077 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1078 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1079 DEBUG_CODE (
1080 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1081 );
1082 } else {
1083 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1084 DEBUG_CODE (
1085 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1086 );
1087 }
1088
1089 if (HEAP_GUARD_NONSTOP_MODE) {
1090 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1091 goto Exit;
1092 }
1093 }
1094
1095 CpuDeadLoop ();
1096 goto Exit;
1097 }
1098
1099 //
1100 // If a page fault occurs in non-SMRAM range.
1101 //
1102 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1103 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))
1104 {
1105 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1106 DumpCpuContext (InterruptType, SystemContext);
1107 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1108 DEBUG_CODE (
1109 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1110 );
1111 CpuDeadLoop ();
1112 goto Exit;
1113 }
1114
1115 //
1116 // If NULL pointer was just accessed
1117 //
1118 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&
1119 (PFAddress < EFI_PAGE_SIZE))
1120 {
1121 DumpCpuContext (InterruptType, SystemContext);
1122 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1123 DEBUG_CODE (
1124 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1125 );
1126
1127 if (NULL_DETECTION_NONSTOP_MODE) {
1128 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1129 goto Exit;
1130 }
1131
1132 CpuDeadLoop ();
1133 goto Exit;
1134 }
1135
1136 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1137 DumpCpuContext (InterruptType, SystemContext);
1138 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1139 DEBUG_CODE (
1140 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1141 );
1142 CpuDeadLoop ();
1143 goto Exit;
1144 }
1145 }
1146
1147 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1148 SmmProfilePFHandler (
1149 SystemContext.SystemContextX64->Rip,
1150 SystemContext.SystemContextX64->ExceptionData
1151 );
1152 } else {
1153 SmiDefaultPFHandler ();
1154 }
1155
1156 Exit:
1157 ReleaseSpinLock (mPFLock);
1158 }
1159
1160 /**
1161 This function sets memory attribute for page table.
1162 **/
1163 VOID
1164 SetPageTableAttributes (
1165 VOID
1166 )
1167 {
1168 UINTN Index2;
1169 UINTN Index3;
1170 UINTN Index4;
1171 UINTN Index5;
1172 UINT64 *L1PageTable;
1173 UINT64 *L2PageTable;
1174 UINT64 *L3PageTable;
1175 UINT64 *L4PageTable;
1176 UINT64 *L5PageTable;
1177 UINTN PageTableBase;
1178 BOOLEAN IsSplitted;
1179 BOOLEAN PageTableSplitted;
1180 BOOLEAN CetEnabled;
1181 BOOLEAN Enable5LevelPaging;
1182 IA32_CR4 Cr4;
1183
1184 //
1185 // Don't mark page table memory as read-only if
1186 // - no restriction on access to non-SMRAM memory; or
1187 // - SMM heap guard feature enabled; or
1188 // BIT2: SMM page guard enabled
1189 // BIT3: SMM pool guard enabled
1190 // - SMM profile feature enabled
1191 //
1192 if (!mCpuSmmRestrictedMemoryAccess ||
1193 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1194 FeaturePcdGet (PcdCpuSmmProfileEnable))
1195 {
1196 //
1197 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1198 //
1199 ASSERT (
1200 !(mCpuSmmRestrictedMemoryAccess &&
1201 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0)
1202 );
1203
1204 //
1205 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1206 //
1207 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1208 return;
1209 }
1210
1211 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1212
1213 //
1214 // Disable write protection, because we need mark page table to be write protected.
1215 // We need *write* page table memory, to mark itself to be *read only*.
1216 //
1217 CetEnabled = ((AsmReadCr4 () & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1218 if (CetEnabled) {
1219 //
1220 // CET must be disabled if WP is disabled.
1221 //
1222 DisableCet ();
1223 }
1224
1225 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);
1226
1227 do {
1228 DEBUG ((DEBUG_INFO, "Start...\n"));
1229 PageTableSplitted = FALSE;
1230 L5PageTable = NULL;
1231
1232 PageTableBase = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
1233 Cr4.UintN = AsmReadCr4 ();
1234 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
1235
1236 if (Enable5LevelPaging) {
1237 L5PageTable = (UINT64 *)PageTableBase;
1238 SmmSetMemoryAttributesEx (PageTableBase, Enable5LevelPaging, (EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1239 PageTableSplitted = (PageTableSplitted || IsSplitted);
1240 }
1241
1242 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof (UINT64) : 1); Index5++) {
1243 if (Enable5LevelPaging) {
1244 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1245 if (L4PageTable == NULL) {
1246 continue;
1247 }
1248 } else {
1249 L4PageTable = (UINT64 *)PageTableBase;
1250 }
1251
1252 SmmSetMemoryAttributesEx (PageTableBase, Enable5LevelPaging, (EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1253 PageTableSplitted = (PageTableSplitted || IsSplitted);
1254
1255 for (Index4 = 0; Index4 < SIZE_4KB/sizeof (UINT64); Index4++) {
1256 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1257 if (L3PageTable == NULL) {
1258 continue;
1259 }
1260
1261 SmmSetMemoryAttributesEx (PageTableBase, Enable5LevelPaging, (EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1262 PageTableSplitted = (PageTableSplitted || IsSplitted);
1263
1264 for (Index3 = 0; Index3 < SIZE_4KB/sizeof (UINT64); Index3++) {
1265 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1266 // 1G
1267 continue;
1268 }
1269
1270 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1271 if (L2PageTable == NULL) {
1272 continue;
1273 }
1274
1275 SmmSetMemoryAttributesEx (PageTableBase, Enable5LevelPaging, (EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1276 PageTableSplitted = (PageTableSplitted || IsSplitted);
1277
1278 for (Index2 = 0; Index2 < SIZE_4KB/sizeof (UINT64); Index2++) {
1279 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1280 // 2M
1281 continue;
1282 }
1283
1284 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1285 if (L1PageTable == NULL) {
1286 continue;
1287 }
1288
1289 SmmSetMemoryAttributesEx (PageTableBase, Enable5LevelPaging, (EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1290 PageTableSplitted = (PageTableSplitted || IsSplitted);
1291 }
1292 }
1293 }
1294 }
1295 } while (PageTableSplitted);
1296
1297 //
1298 // Enable write protection, after page table updated.
1299 //
1300 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);
1301 if (CetEnabled) {
1302 //
1303 // re-enable CET.
1304 //
1305 EnableCet ();
1306 }
1307
1308 return;
1309 }
1310
1311 /**
1312 This function reads CR2 register when on-demand paging is enabled.
1313
1314 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1315 **/
1316 VOID
1317 SaveCr2 (
1318 OUT UINTN *Cr2
1319 )
1320 {
1321 if (!mCpuSmmRestrictedMemoryAccess) {
1322 //
1323 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1324 //
1325 *Cr2 = AsmReadCr2 ();
1326 }
1327 }
1328
1329 /**
1330 This function restores CR2 register when on-demand paging is enabled.
1331
1332 @param[in] Cr2 Value to write into CR2 register.
1333 **/
1334 VOID
1335 RestoreCr2 (
1336 IN UINTN Cr2
1337 )
1338 {
1339 if (!mCpuSmmRestrictedMemoryAccess) {
1340 //
1341 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1342 //
1343 AsmWriteCr2 (Cr2);
1344 }
1345 }
1346
1347 /**
1348 Return whether access to non-SMRAM is restricted.
1349
1350 @retval TRUE Access to non-SMRAM is restricted.
1351 @retval FALSE Access to non-SMRAM is not restricted.
1352 **/
1353 BOOLEAN
1354 IsRestrictedMemoryAccess (
1355 VOID
1356 )
1357 {
1358 return mCpuSmmRestrictedMemoryAccess;
1359 }