]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 extern UINTN mSmmShadowStackSize;
17
18 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
19 BOOLEAN m1GPageTableSupport = FALSE;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess;
21 BOOLEAN m5LevelPagingNeeded;
22 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
23
24 /**
25 Disable CET.
26 **/
27 VOID
28 EFIAPI
29 DisableCet (
30 VOID
31 );
32
33 /**
34 Enable CET.
35 **/
36 VOID
37 EFIAPI
38 EnableCet (
39 VOID
40 );
41
42 /**
43 Check if 1-GByte pages is supported by processor or not.
44
45 @retval TRUE 1-GByte pages is supported.
46 @retval FALSE 1-GByte pages is not supported.
47
48 **/
49 BOOLEAN
50 Is1GPageSupport (
51 VOID
52 )
53 {
54 UINT32 RegEax;
55 UINT32 RegEdx;
56
57 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
58 if (RegEax >= 0x80000001) {
59 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
60 if ((RegEdx & BIT26) != 0) {
61 return TRUE;
62 }
63 }
64
65 return FALSE;
66 }
67
68 /**
69 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
70 the max physical address bits is bigger than 48. Because 4-level paging can support
71 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
72 with max physical address bits <= 48.
73
74 @retval TRUE 5-level paging enabling is needed.
75 @retval FALSE 5-level paging enabling is not needed.
76 **/
77 BOOLEAN
78 Is5LevelPagingNeeded (
79 VOID
80 )
81 {
82 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
83 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
84 UINT32 MaxExtendedFunctionId;
85
86 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
87 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
88 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
89 } else {
90 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
91 }
92
93 AsmCpuidEx (
94 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
95 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
96 NULL,
97 NULL,
98 &ExtFeatureEcx.Uint32,
99 NULL
100 );
101 DEBUG ((
102 DEBUG_INFO,
103 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
104 VirPhyAddressSize.Bits.PhysicalAddressBits,
105 ExtFeatureEcx.Bits.FiveLevelPage
106 ));
107
108 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
109 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
110 return TRUE;
111 } else {
112 return FALSE;
113 }
114 }
115
116 /**
117 Get page table base address and the depth of the page table.
118
119 @param[out] Base Page table base address.
120 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
121 **/
122 VOID
123 GetPageTable (
124 OUT UINTN *Base,
125 OUT BOOLEAN *FiveLevels OPTIONAL
126 )
127 {
128 IA32_CR4 Cr4;
129
130 if (mInternalCr3 == 0) {
131 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
132 if (FiveLevels != NULL) {
133 Cr4.UintN = AsmReadCr4 ();
134 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
135 }
136
137 return;
138 }
139
140 *Base = mInternalCr3;
141 if (FiveLevels != NULL) {
142 *FiveLevels = m5LevelPagingNeeded;
143 }
144 }
145
146 /**
147 Set sub-entries number in entry.
148
149 @param[in, out] Entry Pointer to entry
150 @param[in] SubEntryNum Sub-entries number based on 0:
151 0 means there is 1 sub-entry under this entry
152 0x1ff means there is 512 sub-entries under this entry
153
154 **/
155 VOID
156 SetSubEntriesNum (
157 IN OUT UINT64 *Entry,
158 IN UINT64 SubEntryNum
159 )
160 {
161 //
162 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
163 //
164 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
165 }
166
167 /**
168 Return sub-entries number in entry.
169
170 @param[in] Entry Pointer to entry
171
172 @return Sub-entries number based on 0:
173 0 means there is 1 sub-entry under this entry
174 0x1ff means there is 512 sub-entries under this entry
175 **/
176 UINT64
177 GetSubEntriesNum (
178 IN UINT64 *Entry
179 )
180 {
181 //
182 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
183 //
184 return BitFieldRead64 (*Entry, 52, 60);
185 }
186
187 /**
188 Calculate the maximum support address.
189
190 @return the maximum support address.
191 **/
192 UINT8
193 CalculateMaximumSupportAddress (
194 VOID
195 )
196 {
197 UINT32 RegEax;
198 UINT8 PhysicalAddressBits;
199 VOID *Hob;
200
201 //
202 // Get physical address bits supported.
203 //
204 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
205 if (Hob != NULL) {
206 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
207 } else {
208 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
209 if (RegEax >= 0x80000008) {
210 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
211 PhysicalAddressBits = (UINT8)RegEax;
212 } else {
213 PhysicalAddressBits = 36;
214 }
215 }
216
217 return PhysicalAddressBits;
218 }
219
220 /**
221 Set static page table.
222
223 @param[in] PageTable Address of page table.
224 @param[in] PhysicalAddressBits The maximum physical address bits supported.
225 **/
226 VOID
227 SetStaticPageTable (
228 IN UINTN PageTable,
229 IN UINT8 PhysicalAddressBits
230 )
231 {
232 UINT64 PageAddress;
233 UINTN NumberOfPml5EntriesNeeded;
234 UINTN NumberOfPml4EntriesNeeded;
235 UINTN NumberOfPdpEntriesNeeded;
236 UINTN IndexOfPml5Entries;
237 UINTN IndexOfPml4Entries;
238 UINTN IndexOfPdpEntries;
239 UINTN IndexOfPageDirectoryEntries;
240 UINT64 *PageMapLevel5Entry;
241 UINT64 *PageMapLevel4Entry;
242 UINT64 *PageMap;
243 UINT64 *PageDirectoryPointerEntry;
244 UINT64 *PageDirectory1GEntry;
245 UINT64 *PageDirectoryEntry;
246
247 //
248 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
249 // when 5-Level Paging is disabled.
250 //
251 ASSERT (PhysicalAddressBits <= 52);
252 if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
253 PhysicalAddressBits = 48;
254 }
255
256 NumberOfPml5EntriesNeeded = 1;
257 if (PhysicalAddressBits > 48) {
258 NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);
259 PhysicalAddressBits = 48;
260 }
261
262 NumberOfPml4EntriesNeeded = 1;
263 if (PhysicalAddressBits > 39) {
264 NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);
265 PhysicalAddressBits = 39;
266 }
267
268 NumberOfPdpEntriesNeeded = 1;
269 ASSERT (PhysicalAddressBits > 30);
270 NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);
271
272 //
273 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
274 //
275 PageMap = (VOID *)PageTable;
276
277 PageMapLevel4Entry = PageMap;
278 PageMapLevel5Entry = NULL;
279 if (m5LevelPagingNeeded) {
280 //
281 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
282 //
283 PageMapLevel5Entry = PageMap;
284 }
285
286 PageAddress = 0;
287
288 for ( IndexOfPml5Entries = 0
289 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
290 ; IndexOfPml5Entries++, PageMapLevel5Entry++)
291 {
292 //
293 // Each PML5 entry points to a page of PML4 entires.
294 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
295 // When 5-Level Paging is disabled, below allocation happens only once.
296 //
297 if (m5LevelPagingNeeded) {
298 PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
299 if (PageMapLevel4Entry == NULL) {
300 PageMapLevel4Entry = AllocatePageTableMemory (1);
301 ASSERT (PageMapLevel4Entry != NULL);
302 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
303
304 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
305 }
306 }
307
308 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
309 //
310 // Each PML4 entry points to a page of Page Directory Pointer entries.
311 //
312 PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
313 if (PageDirectoryPointerEntry == NULL) {
314 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
315 ASSERT (PageDirectoryPointerEntry != NULL);
316 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
317
318 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
319 }
320
321 if (m1GPageTableSupport) {
322 PageDirectory1GEntry = PageDirectoryPointerEntry;
323 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
324 if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
325 //
326 // Skip the < 4G entries
327 //
328 continue;
329 }
330
331 //
332 // Fill in the Page Directory entries
333 //
334 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
335 }
336 } else {
337 PageAddress = BASE_4GB;
338 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
339 if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
340 //
341 // Skip the < 4G entries
342 //
343 continue;
344 }
345
346 //
347 // Each Directory Pointer entries points to a page of Page Directory entires.
348 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
349 //
350 PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
351 if (PageDirectoryEntry == NULL) {
352 PageDirectoryEntry = AllocatePageTableMemory (1);
353 ASSERT (PageDirectoryEntry != NULL);
354 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
355
356 //
357 // Fill in a Page Directory Pointer Entries
358 //
359 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
360 }
361
362 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
363 //
364 // Fill in the Page Directory entries
365 //
366 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
367 }
368 }
369 }
370 }
371 }
372 }
373
374 /**
375 Create PageTable for SMM use.
376
377 @return The address of PML4 (to set CR3).
378
379 **/
380 UINT32
381 SmmInitPageTable (
382 VOID
383 )
384 {
385 EFI_PHYSICAL_ADDRESS Pages;
386 UINT64 *PTEntry;
387 LIST_ENTRY *FreePage;
388 UINTN Index;
389 UINTN PageFaultHandlerHookAddress;
390 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
391 EFI_STATUS Status;
392 UINT64 *Pml4Entry;
393 UINT64 *Pml5Entry;
394
395 //
396 // Initialize spin lock
397 //
398 InitializeSpinLock (mPFLock);
399
400 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
401 m1GPageTableSupport = Is1GPageSupport ();
402 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
403 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
404 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
405 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
406 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
407 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
408 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
409 //
410 // Generate PAE page table for the first 4GB memory space
411 //
412 Pages = Gen4GPageTable (FALSE);
413
414 //
415 // Set IA32_PG_PMNT bit to mask this entry
416 //
417 PTEntry = (UINT64 *)(UINTN)Pages;
418 for (Index = 0; Index < 4; Index++) {
419 PTEntry[Index] |= IA32_PG_PMNT;
420 }
421
422 //
423 // Fill Page-Table-Level4 (PML4) entry
424 //
425 Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
426 ASSERT (Pml4Entry != NULL);
427 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
428 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
429
430 //
431 // Set sub-entries number
432 //
433 SetSubEntriesNum (Pml4Entry, 3);
434 PTEntry = Pml4Entry;
435
436 if (m5LevelPagingNeeded) {
437 //
438 // Fill PML5 entry
439 //
440 Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
441 ASSERT (Pml5Entry != NULL);
442 *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
443 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
444 //
445 // Set sub-entries number
446 //
447 SetSubEntriesNum (Pml5Entry, 1);
448 PTEntry = Pml5Entry;
449 }
450
451 if (mCpuSmmRestrictedMemoryAccess) {
452 //
453 // When access to non-SMRAM memory is restricted, create page table
454 // that covers all memory space.
455 //
456 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
457 } else {
458 //
459 // Add pages to page pool
460 //
461 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);
462 ASSERT (FreePage != NULL);
463 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
464 InsertTailList (&mPagePool, FreePage);
465 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
466 }
467 }
468
469 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
470 HEAP_GUARD_NONSTOP_MODE ||
471 NULL_DETECTION_NONSTOP_MODE)
472 {
473 //
474 // Set own Page Fault entry instead of the default one, because SMM Profile
475 // feature depends on IRET instruction to do Single Step
476 //
477 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
478 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
479 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
480 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
481 IdtEntry->Bits.Reserved_0 = 0;
482 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
483 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
484 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
485 IdtEntry->Bits.Reserved_1 = 0;
486 } else {
487 //
488 // Register Smm Page Fault Handler
489 //
490 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
491 ASSERT_EFI_ERROR (Status);
492 }
493
494 //
495 // Additional SMM IDT initialization for SMM stack guard
496 //
497 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
498 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));
499 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
500 }
501
502 //
503 // Additional SMM IDT initialization for SMM CET shadow stack
504 //
505 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
506 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));
507 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
508 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);
509 }
510
511 //
512 // Return the address of PML4/PML5 (to set CR3)
513 //
514 return (UINT32)(UINTN)PTEntry;
515 }
516
517 /**
518 Set access record in entry.
519
520 @param[in, out] Entry Pointer to entry
521 @param[in] Acc Access record value
522
523 **/
524 VOID
525 SetAccNum (
526 IN OUT UINT64 *Entry,
527 IN UINT64 Acc
528 )
529 {
530 //
531 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
532 //
533 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
534 }
535
536 /**
537 Return access record in entry.
538
539 @param[in] Entry Pointer to entry
540
541 @return Access record value.
542
543 **/
544 UINT64
545 GetAccNum (
546 IN UINT64 *Entry
547 )
548 {
549 //
550 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
551 //
552 return BitFieldRead64 (*Entry, 9, 11);
553 }
554
555 /**
556 Return and update the access record in entry.
557
558 @param[in, out] Entry Pointer to entry
559
560 @return Access record value.
561
562 **/
563 UINT64
564 GetAndUpdateAccNum (
565 IN OUT UINT64 *Entry
566 )
567 {
568 UINT64 Acc;
569
570 Acc = GetAccNum (Entry);
571 if ((*Entry & IA32_PG_A) != 0) {
572 //
573 // If this entry has been accessed, clear access flag in Entry and update access record
574 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
575 //
576 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
577 SetAccNum (Entry, 0x7);
578 return (0x7 + ACC_MAX_BIT);
579 } else {
580 if (Acc != 0) {
581 //
582 // If the access record is not the smallest value 0, minus 1 and update the access record field
583 //
584 SetAccNum (Entry, Acc - 1);
585 }
586 }
587
588 return Acc;
589 }
590
591 /**
592 Reclaim free pages for PageFault handler.
593
594 Search the whole entries tree to find the leaf entry that has the smallest
595 access record value. Insert the page pointed by this leaf entry into the
596 page pool. And check its upper entries if need to be inserted into the page
597 pool or not.
598
599 **/
600 VOID
601 ReclaimPages (
602 VOID
603 )
604 {
605 UINT64 Pml5Entry;
606 UINT64 *Pml5;
607 UINT64 *Pml4;
608 UINT64 *Pdpt;
609 UINT64 *Pdt;
610 UINTN Pml5Index;
611 UINTN Pml4Index;
612 UINTN PdptIndex;
613 UINTN PdtIndex;
614 UINTN MinPml5;
615 UINTN MinPml4;
616 UINTN MinPdpt;
617 UINTN MinPdt;
618 UINT64 MinAcc;
619 UINT64 Acc;
620 UINT64 SubEntriesNum;
621 BOOLEAN PML4EIgnore;
622 BOOLEAN PDPTEIgnore;
623 UINT64 *ReleasePageAddress;
624 IA32_CR4 Cr4;
625 BOOLEAN Enable5LevelPaging;
626 UINT64 PFAddress;
627 UINT64 PFAddressPml5Index;
628 UINT64 PFAddressPml4Index;
629 UINT64 PFAddressPdptIndex;
630 UINT64 PFAddressPdtIndex;
631
632 Pml4 = NULL;
633 Pdpt = NULL;
634 Pdt = NULL;
635 MinAcc = (UINT64)-1;
636 MinPml4 = (UINTN)-1;
637 MinPml5 = (UINTN)-1;
638 MinPdpt = (UINTN)-1;
639 MinPdt = (UINTN)-1;
640 Acc = 0;
641 ReleasePageAddress = 0;
642 PFAddress = AsmReadCr2 ();
643 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
644 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
645 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
646 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
647
648 Cr4.UintN = AsmReadCr4 ();
649 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
650 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);
651
652 if (!Enable5LevelPaging) {
653 //
654 // Create one fake PML5 entry for 4-Level Paging
655 // so that the page table parsing logic only handles 5-Level page structure.
656 //
657 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;
658 Pml5 = &Pml5Entry;
659 }
660
661 //
662 // First, find the leaf entry has the smallest access record value
663 //
664 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
665 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {
666 //
667 // If the PML5 entry is not present or is masked, skip it
668 //
669 continue;
670 }
671
672 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);
673 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
674 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {
675 //
676 // If the PML4 entry is not present or is masked, skip it
677 //
678 continue;
679 }
680
681 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
682 PML4EIgnore = FALSE;
683 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
684 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {
685 //
686 // If the PDPT entry is not present or is masked, skip it
687 //
688 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
689 //
690 // If the PDPT entry is masked, we will ignore checking the PML4 entry
691 //
692 PML4EIgnore = TRUE;
693 }
694
695 continue;
696 }
697
698 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
699 //
700 // It's not 1-GByte pages entry, it should be a PDPT entry,
701 // we will not check PML4 entry more
702 //
703 PML4EIgnore = TRUE;
704 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
705 PDPTEIgnore = FALSE;
706 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {
707 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {
708 //
709 // If the PD entry is not present or is masked, skip it
710 //
711 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
712 //
713 // If the PD entry is masked, we will not PDPT entry more
714 //
715 PDPTEIgnore = TRUE;
716 }
717
718 continue;
719 }
720
721 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
722 //
723 // It's not 2 MByte page table entry, it should be PD entry
724 // we will find the entry has the smallest access record value
725 //
726 PDPTEIgnore = TRUE;
727 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||
728 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))
729 {
730 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
731 if (Acc < MinAcc) {
732 //
733 // If the PD entry has the smallest access record value,
734 // save the Page address to be released
735 //
736 MinAcc = Acc;
737 MinPml5 = Pml5Index;
738 MinPml4 = Pml4Index;
739 MinPdpt = PdptIndex;
740 MinPdt = PdtIndex;
741 ReleasePageAddress = Pdt + PdtIndex;
742 }
743 }
744 }
745 }
746
747 if (!PDPTEIgnore) {
748 //
749 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
750 // it should only has the entries point to 2 MByte Pages
751 //
752 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||
753 (Pml5Index != PFAddressPml5Index))
754 {
755 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
756 if (Acc < MinAcc) {
757 //
758 // If the PDPT entry has the smallest access record value,
759 // save the Page address to be released
760 //
761 MinAcc = Acc;
762 MinPml5 = Pml5Index;
763 MinPml4 = Pml4Index;
764 MinPdpt = PdptIndex;
765 MinPdt = (UINTN)-1;
766 ReleasePageAddress = Pdpt + PdptIndex;
767 }
768 }
769 }
770 }
771 }
772
773 if (!PML4EIgnore) {
774 //
775 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
776 // it should only has the entries point to 1 GByte Pages
777 //
778 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {
779 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
780 if (Acc < MinAcc) {
781 //
782 // If the PML4 entry has the smallest access record value,
783 // save the Page address to be released
784 //
785 MinAcc = Acc;
786 MinPml5 = Pml5Index;
787 MinPml4 = Pml4Index;
788 MinPdpt = (UINTN)-1;
789 MinPdt = (UINTN)-1;
790 ReleasePageAddress = Pml4 + Pml4Index;
791 }
792 }
793 }
794 }
795 }
796
797 //
798 // Make sure one PML4/PDPT/PD entry is selected
799 //
800 ASSERT (MinAcc != (UINT64)-1);
801
802 //
803 // Secondly, insert the page pointed by this entry into page pool and clear this entry
804 //
805 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
806 *ReleasePageAddress = 0;
807
808 //
809 // Lastly, check this entry's upper entries if need to be inserted into page pool
810 // or not
811 //
812 while (TRUE) {
813 if (MinPdt != (UINTN)-1) {
814 //
815 // If 4 KByte Page Table is released, check the PDPT entry
816 //
817 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);
818 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
819 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);
820 if ((SubEntriesNum == 0) &&
821 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))
822 {
823 //
824 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
825 // clear the Page directory entry
826 //
827 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
828 Pdpt[MinPdpt] = 0;
829 //
830 // Go on checking the PML4 table
831 //
832 MinPdt = (UINTN)-1;
833 continue;
834 }
835
836 //
837 // Update the sub-entries filed in PDPT entry and exit
838 //
839 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
840 break;
841 }
842
843 if (MinPdpt != (UINTN)-1) {
844 //
845 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
846 //
847 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
848 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {
849 //
850 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
851 // clear the Page directory entry
852 //
853 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
854 Pml4[MinPml4] = 0;
855 MinPdpt = (UINTN)-1;
856 continue;
857 }
858
859 //
860 // Update the sub-entries filed in PML4 entry and exit
861 //
862 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
863 break;
864 }
865
866 //
867 // PLM4 table has been released before, exit it
868 //
869 break;
870 }
871 }
872
873 /**
874 Allocate free Page for PageFault handler use.
875
876 @return Page address.
877
878 **/
879 UINT64
880 AllocPage (
881 VOID
882 )
883 {
884 UINT64 RetVal;
885
886 if (IsListEmpty (&mPagePool)) {
887 //
888 // If page pool is empty, reclaim the used pages and insert one into page pool
889 //
890 ReclaimPages ();
891 }
892
893 //
894 // Get one free page and remove it from page pool
895 //
896 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
897 RemoveEntryList (mPagePool.ForwardLink);
898 //
899 // Clean this page and return
900 //
901 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);
902 return RetVal;
903 }
904
905 /**
906 Page Fault handler for SMM use.
907
908 **/
909 VOID
910 SmiDefaultPFHandler (
911 VOID
912 )
913 {
914 UINT64 *PageTable;
915 UINT64 *PageTableTop;
916 UINT64 PFAddress;
917 UINTN StartBit;
918 UINTN EndBit;
919 UINT64 PTIndex;
920 UINTN Index;
921 SMM_PAGE_SIZE_TYPE PageSize;
922 UINTN NumOfPages;
923 UINTN PageAttribute;
924 EFI_STATUS Status;
925 UINT64 *UpperEntry;
926 BOOLEAN Enable5LevelPaging;
927 IA32_CR4 Cr4;
928
929 //
930 // Set default SMM page attribute
931 //
932 PageSize = SmmPageSize2M;
933 NumOfPages = 1;
934 PageAttribute = 0;
935
936 EndBit = 0;
937 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
938 PFAddress = AsmReadCr2 ();
939
940 Cr4.UintN = AsmReadCr4 ();
941 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
942
943 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
944 //
945 // If platform not support page table attribute, set default SMM page attribute
946 //
947 if (Status != EFI_SUCCESS) {
948 PageSize = SmmPageSize2M;
949 NumOfPages = 1;
950 PageAttribute = 0;
951 }
952
953 if (PageSize >= MaxSmmPageSizeType) {
954 PageSize = SmmPageSize2M;
955 }
956
957 if (NumOfPages > 512) {
958 NumOfPages = 512;
959 }
960
961 switch (PageSize) {
962 case SmmPageSize4K:
963 //
964 // BIT12 to BIT20 is Page Table index
965 //
966 EndBit = 12;
967 break;
968 case SmmPageSize2M:
969 //
970 // BIT21 to BIT29 is Page Directory index
971 //
972 EndBit = 21;
973 PageAttribute |= (UINTN)IA32_PG_PS;
974 break;
975 case SmmPageSize1G:
976 if (!m1GPageTableSupport) {
977 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
978 ASSERT (FALSE);
979 }
980
981 //
982 // BIT30 to BIT38 is Page Directory Pointer Table index
983 //
984 EndBit = 30;
985 PageAttribute |= (UINTN)IA32_PG_PS;
986 break;
987 default:
988 ASSERT (FALSE);
989 }
990
991 //
992 // If execute-disable is enabled, set NX bit
993 //
994 if (mXdEnabled) {
995 PageAttribute |= IA32_PG_NX;
996 }
997
998 for (Index = 0; Index < NumOfPages; Index++) {
999 PageTable = PageTableTop;
1000 UpperEntry = NULL;
1001 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
1002 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
1003 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
1004 //
1005 // If the entry is not present, allocate one page from page pool for it
1006 //
1007 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1008 } else {
1009 //
1010 // Save the upper entry address
1011 //
1012 UpperEntry = PageTable + PTIndex;
1013 }
1014
1015 //
1016 // BIT9 to BIT11 of entry is used to save access record,
1017 // initialize value is 7
1018 //
1019 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
1020 SetAccNum (PageTable + PTIndex, 7);
1021 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
1022 }
1023
1024 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
1025 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
1026 //
1027 // Check if the entry has already existed, this issue may occur when the different
1028 // size page entries created under the same entry
1029 //
1030 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
1031 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
1032 ASSERT (FALSE);
1033 }
1034
1035 //
1036 // Fill the new entry
1037 //
1038 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
1039 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
1040 if (UpperEntry != NULL) {
1041 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
1042 }
1043
1044 //
1045 // Get the next page address if we need to create more page tables
1046 //
1047 PFAddress += (1ull << EndBit);
1048 }
1049 }
1050
1051 /**
1052 ThePage Fault handler wrapper for SMM use.
1053
1054 @param InterruptType Defines the type of interrupt or exception that
1055 occurred on the processor.This parameter is processor architecture specific.
1056 @param SystemContext A pointer to the processor context when
1057 the interrupt occurred on the processor.
1058 **/
1059 VOID
1060 EFIAPI
1061 SmiPFHandler (
1062 IN EFI_EXCEPTION_TYPE InterruptType,
1063 IN EFI_SYSTEM_CONTEXT SystemContext
1064 )
1065 {
1066 UINTN PFAddress;
1067 UINTN GuardPageAddress;
1068 UINTN ShadowStackGuardPageAddress;
1069 UINTN CpuIndex;
1070
1071 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
1072
1073 AcquireSpinLock (mPFLock);
1074
1075 PFAddress = AsmReadCr2 ();
1076
1077 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
1078 DumpCpuContext (InterruptType, SystemContext);
1079 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
1080 CpuDeadLoop ();
1081 goto Exit;
1082 }
1083
1084 //
1085 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1086 // or SMM page protection violation.
1087 //
1088 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1089 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))
1090 {
1091 DumpCpuContext (InterruptType, SystemContext);
1092 CpuIndex = GetCpuIndex ();
1093 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1094 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1095 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1096 (PFAddress >= GuardPageAddress) &&
1097 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))
1098 {
1099 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1100 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1101 (mSmmShadowStackSize > 0) &&
1102 (PFAddress >= ShadowStackGuardPageAddress) &&
1103 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))
1104 {
1105 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
1106 } else {
1107 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1108 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1109 DEBUG_CODE (
1110 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1111 );
1112 } else {
1113 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1114 DEBUG_CODE (
1115 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1116 );
1117 }
1118
1119 if (HEAP_GUARD_NONSTOP_MODE) {
1120 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1121 goto Exit;
1122 }
1123 }
1124
1125 CpuDeadLoop ();
1126 goto Exit;
1127 }
1128
1129 //
1130 // If a page fault occurs in non-SMRAM range.
1131 //
1132 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1133 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))
1134 {
1135 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1136 DumpCpuContext (InterruptType, SystemContext);
1137 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1138 DEBUG_CODE (
1139 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1140 );
1141 CpuDeadLoop ();
1142 goto Exit;
1143 }
1144
1145 //
1146 // If NULL pointer was just accessed
1147 //
1148 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&
1149 (PFAddress < EFI_PAGE_SIZE))
1150 {
1151 DumpCpuContext (InterruptType, SystemContext);
1152 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1153 DEBUG_CODE (
1154 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1155 );
1156
1157 if (NULL_DETECTION_NONSTOP_MODE) {
1158 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1159 goto Exit;
1160 }
1161
1162 CpuDeadLoop ();
1163 goto Exit;
1164 }
1165
1166 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1167 DumpCpuContext (InterruptType, SystemContext);
1168 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1169 DEBUG_CODE (
1170 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1171 );
1172 CpuDeadLoop ();
1173 goto Exit;
1174 }
1175 }
1176
1177 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1178 SmmProfilePFHandler (
1179 SystemContext.SystemContextX64->Rip,
1180 SystemContext.SystemContextX64->ExceptionData
1181 );
1182 } else {
1183 SmiDefaultPFHandler ();
1184 }
1185
1186 Exit:
1187 ReleaseSpinLock (mPFLock);
1188 }
1189
1190 /**
1191 This function sets memory attribute for page table.
1192 **/
1193 VOID
1194 SetPageTableAttributes (
1195 VOID
1196 )
1197 {
1198 UINTN Index2;
1199 UINTN Index3;
1200 UINTN Index4;
1201 UINTN Index5;
1202 UINT64 *L1PageTable;
1203 UINT64 *L2PageTable;
1204 UINT64 *L3PageTable;
1205 UINT64 *L4PageTable;
1206 UINT64 *L5PageTable;
1207 UINTN PageTableBase;
1208 BOOLEAN IsSplitted;
1209 BOOLEAN PageTableSplitted;
1210 BOOLEAN CetEnabled;
1211 BOOLEAN Enable5LevelPaging;
1212
1213 //
1214 // Don't mark page table memory as read-only if
1215 // - no restriction on access to non-SMRAM memory; or
1216 // - SMM heap guard feature enabled; or
1217 // BIT2: SMM page guard enabled
1218 // BIT3: SMM pool guard enabled
1219 // - SMM profile feature enabled
1220 //
1221 if (!mCpuSmmRestrictedMemoryAccess ||
1222 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1223 FeaturePcdGet (PcdCpuSmmProfileEnable))
1224 {
1225 //
1226 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1227 //
1228 ASSERT (
1229 !(mCpuSmmRestrictedMemoryAccess &&
1230 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0)
1231 );
1232
1233 //
1234 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1235 //
1236 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1237 return;
1238 }
1239
1240 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1241
1242 //
1243 // Disable write protection, because we need mark page table to be write protected.
1244 // We need *write* page table memory, to mark itself to be *read only*.
1245 //
1246 CetEnabled = ((AsmReadCr4 () & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1247 if (CetEnabled) {
1248 //
1249 // CET must be disabled if WP is disabled.
1250 //
1251 DisableCet ();
1252 }
1253
1254 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);
1255
1256 do {
1257 DEBUG ((DEBUG_INFO, "Start...\n"));
1258 PageTableSplitted = FALSE;
1259 L5PageTable = NULL;
1260
1261 GetPageTable (&PageTableBase, &Enable5LevelPaging);
1262
1263 if (Enable5LevelPaging) {
1264 L5PageTable = (UINT64 *)PageTableBase;
1265 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1266 PageTableSplitted = (PageTableSplitted || IsSplitted);
1267 }
1268
1269 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof (UINT64) : 1); Index5++) {
1270 if (Enable5LevelPaging) {
1271 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1272 if (L4PageTable == NULL) {
1273 continue;
1274 }
1275 } else {
1276 L4PageTable = (UINT64 *)PageTableBase;
1277 }
1278
1279 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1280 PageTableSplitted = (PageTableSplitted || IsSplitted);
1281
1282 for (Index4 = 0; Index4 < SIZE_4KB/sizeof (UINT64); Index4++) {
1283 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1284 if (L3PageTable == NULL) {
1285 continue;
1286 }
1287
1288 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1289 PageTableSplitted = (PageTableSplitted || IsSplitted);
1290
1291 for (Index3 = 0; Index3 < SIZE_4KB/sizeof (UINT64); Index3++) {
1292 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1293 // 1G
1294 continue;
1295 }
1296
1297 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1298 if (L2PageTable == NULL) {
1299 continue;
1300 }
1301
1302 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1303 PageTableSplitted = (PageTableSplitted || IsSplitted);
1304
1305 for (Index2 = 0; Index2 < SIZE_4KB/sizeof (UINT64); Index2++) {
1306 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1307 // 2M
1308 continue;
1309 }
1310
1311 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1312 if (L1PageTable == NULL) {
1313 continue;
1314 }
1315
1316 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1317 PageTableSplitted = (PageTableSplitted || IsSplitted);
1318 }
1319 }
1320 }
1321 }
1322 } while (PageTableSplitted);
1323
1324 //
1325 // Enable write protection, after page table updated.
1326 //
1327 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);
1328 if (CetEnabled) {
1329 //
1330 // re-enable CET.
1331 //
1332 EnableCet ();
1333 }
1334
1335 return;
1336 }
1337
1338 /**
1339 This function reads CR2 register when on-demand paging is enabled.
1340
1341 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1342 **/
1343 VOID
1344 SaveCr2 (
1345 OUT UINTN *Cr2
1346 )
1347 {
1348 if (!mCpuSmmRestrictedMemoryAccess) {
1349 //
1350 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1351 //
1352 *Cr2 = AsmReadCr2 ();
1353 }
1354 }
1355
1356 /**
1357 This function restores CR2 register when on-demand paging is enabled.
1358
1359 @param[in] Cr2 Value to write into CR2 register.
1360 **/
1361 VOID
1362 RestoreCr2 (
1363 IN UINTN Cr2
1364 )
1365 {
1366 if (!mCpuSmmRestrictedMemoryAccess) {
1367 //
1368 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1369 //
1370 AsmWriteCr2 (Cr2);
1371 }
1372 }
1373
1374 /**
1375 Return whether access to non-SMRAM is restricted.
1376
1377 @retval TRUE Access to non-SMRAM is restricted.
1378 @retval FALSE Access to non-SMRAM is not restricted.
1379 **/
1380 BOOLEAN
1381 IsRestrictedMemoryAccess (
1382 VOID
1383 )
1384 {
1385 return mCpuSmmRestrictedMemoryAccess;
1386 }