]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Improve the performance of GetFreeToken()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17 BOOLEAN m1GPageTableSupport = FALSE;
18 BOOLEAN mCpuSmmRestrictedMemoryAccess;
19 BOOLEAN m5LevelPagingNeeded;
20 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
21
22 /**
23 Disable CET.
24 **/
25 VOID
26 EFIAPI
27 DisableCet (
28 VOID
29 );
30
31 /**
32 Enable CET.
33 **/
34 VOID
35 EFIAPI
36 EnableCet (
37 VOID
38 );
39
40 /**
41 Check if 1-GByte pages is supported by processor or not.
42
43 @retval TRUE 1-GByte pages is supported.
44 @retval FALSE 1-GByte pages is not supported.
45
46 **/
47 BOOLEAN
48 Is1GPageSupport (
49 VOID
50 )
51 {
52 UINT32 RegEax;
53 UINT32 RegEdx;
54
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
56 if (RegEax >= 0x80000001) {
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
58 if ((RegEdx & BIT26) != 0) {
59 return TRUE;
60 }
61 }
62 return FALSE;
63 }
64
65 /**
66 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
67 the max physical address bits is bigger than 48. Because 4-level paging can support
68 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
69 with max physical address bits <= 48.
70
71 @retval TRUE 5-level paging enabling is needed.
72 @retval FALSE 5-level paging enabling is not needed.
73 **/
74 BOOLEAN
75 Is5LevelPagingNeeded (
76 VOID
77 )
78 {
79 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
81 UINT32 MaxExtendedFunctionId;
82
83 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
84 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
85 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
86 } else {
87 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
88 }
89 AsmCpuidEx (
90 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
91 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
92 NULL, NULL, &ExtFeatureEcx.Uint32, NULL
93 );
94 DEBUG ((
95 DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
96 VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
97 ));
98
99 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
100 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
101 return TRUE;
102 } else {
103 return FALSE;
104 }
105 }
106
107 /**
108 Set sub-entries number in entry.
109
110 @param[in, out] Entry Pointer to entry
111 @param[in] SubEntryNum Sub-entries number based on 0:
112 0 means there is 1 sub-entry under this entry
113 0x1ff means there is 512 sub-entries under this entry
114
115 **/
116 VOID
117 SetSubEntriesNum (
118 IN OUT UINT64 *Entry,
119 IN UINT64 SubEntryNum
120 )
121 {
122 //
123 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
124 //
125 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
126 }
127
128 /**
129 Return sub-entries number in entry.
130
131 @param[in] Entry Pointer to entry
132
133 @return Sub-entries number based on 0:
134 0 means there is 1 sub-entry under this entry
135 0x1ff means there is 512 sub-entries under this entry
136 **/
137 UINT64
138 GetSubEntriesNum (
139 IN UINT64 *Entry
140 )
141 {
142 //
143 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
144 //
145 return BitFieldRead64 (*Entry, 52, 60);
146 }
147
148 /**
149 Calculate the maximum support address.
150
151 @return the maximum support address.
152 **/
153 UINT8
154 CalculateMaximumSupportAddress (
155 VOID
156 )
157 {
158 UINT32 RegEax;
159 UINT8 PhysicalAddressBits;
160 VOID *Hob;
161
162 //
163 // Get physical address bits supported.
164 //
165 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
166 if (Hob != NULL) {
167 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
168 } else {
169 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
170 if (RegEax >= 0x80000008) {
171 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
172 PhysicalAddressBits = (UINT8) RegEax;
173 } else {
174 PhysicalAddressBits = 36;
175 }
176 }
177 return PhysicalAddressBits;
178 }
179
180 /**
181 Set static page table.
182
183 @param[in] PageTable Address of page table.
184 **/
185 VOID
186 SetStaticPageTable (
187 IN UINTN PageTable
188 )
189 {
190 UINT64 PageAddress;
191 UINTN NumberOfPml5EntriesNeeded;
192 UINTN NumberOfPml4EntriesNeeded;
193 UINTN NumberOfPdpEntriesNeeded;
194 UINTN IndexOfPml5Entries;
195 UINTN IndexOfPml4Entries;
196 UINTN IndexOfPdpEntries;
197 UINTN IndexOfPageDirectoryEntries;
198 UINT64 *PageMapLevel5Entry;
199 UINT64 *PageMapLevel4Entry;
200 UINT64 *PageMap;
201 UINT64 *PageDirectoryPointerEntry;
202 UINT64 *PageDirectory1GEntry;
203 UINT64 *PageDirectoryEntry;
204
205 //
206 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
207 // when 5-Level Paging is disabled.
208 //
209 ASSERT (mPhysicalAddressBits <= 52);
210 if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {
211 mPhysicalAddressBits = 48;
212 }
213
214 NumberOfPml5EntriesNeeded = 1;
215 if (mPhysicalAddressBits > 48) {
216 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);
217 mPhysicalAddressBits = 48;
218 }
219
220 NumberOfPml4EntriesNeeded = 1;
221 if (mPhysicalAddressBits > 39) {
222 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);
223 mPhysicalAddressBits = 39;
224 }
225
226 NumberOfPdpEntriesNeeded = 1;
227 ASSERT (mPhysicalAddressBits > 30);
228 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);
229
230 //
231 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
232 //
233 PageMap = (VOID *) PageTable;
234
235 PageMapLevel4Entry = PageMap;
236 PageMapLevel5Entry = NULL;
237 if (m5LevelPagingNeeded) {
238 //
239 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
240 //
241 PageMapLevel5Entry = PageMap;
242 }
243 PageAddress = 0;
244
245 for ( IndexOfPml5Entries = 0
246 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
247 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
248 //
249 // Each PML5 entry points to a page of PML4 entires.
250 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
251 // When 5-Level Paging is disabled, below allocation happens only once.
252 //
253 if (m5LevelPagingNeeded) {
254 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
255 if (PageMapLevel4Entry == NULL) {
256 PageMapLevel4Entry = AllocatePageTableMemory (1);
257 ASSERT(PageMapLevel4Entry != NULL);
258 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
259
260 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
261 }
262 }
263
264 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
265 //
266 // Each PML4 entry points to a page of Page Directory Pointer entries.
267 //
268 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
269 if (PageDirectoryPointerEntry == NULL) {
270 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
271 ASSERT(PageDirectoryPointerEntry != NULL);
272 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
273
274 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
275 }
276
277 if (m1GPageTableSupport) {
278 PageDirectory1GEntry = PageDirectoryPointerEntry;
279 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
280 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
281 //
282 // Skip the < 4G entries
283 //
284 continue;
285 }
286 //
287 // Fill in the Page Directory entries
288 //
289 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
290 }
291 } else {
292 PageAddress = BASE_4GB;
293 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
294 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
295 //
296 // Skip the < 4G entries
297 //
298 continue;
299 }
300 //
301 // Each Directory Pointer entries points to a page of Page Directory entires.
302 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
303 //
304 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
305 if (PageDirectoryEntry == NULL) {
306 PageDirectoryEntry = AllocatePageTableMemory (1);
307 ASSERT(PageDirectoryEntry != NULL);
308 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
309
310 //
311 // Fill in a Page Directory Pointer Entries
312 //
313 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
314 }
315
316 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
317 //
318 // Fill in the Page Directory entries
319 //
320 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
321 }
322 }
323 }
324 }
325 }
326 }
327
328 /**
329 Create PageTable for SMM use.
330
331 @return The address of PML4 (to set CR3).
332
333 **/
334 UINT32
335 SmmInitPageTable (
336 VOID
337 )
338 {
339 EFI_PHYSICAL_ADDRESS Pages;
340 UINT64 *PTEntry;
341 LIST_ENTRY *FreePage;
342 UINTN Index;
343 UINTN PageFaultHandlerHookAddress;
344 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
345 EFI_STATUS Status;
346 UINT64 *Pml4Entry;
347 UINT64 *Pml5Entry;
348
349 //
350 // Initialize spin lock
351 //
352 InitializeSpinLock (mPFLock);
353
354 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
355 m1GPageTableSupport = Is1GPageSupport ();
356 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
357 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
358 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
359 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
360 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
361 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
362 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
363 //
364 // Generate PAE page table for the first 4GB memory space
365 //
366 Pages = Gen4GPageTable (FALSE);
367
368 //
369 // Set IA32_PG_PMNT bit to mask this entry
370 //
371 PTEntry = (UINT64*)(UINTN)Pages;
372 for (Index = 0; Index < 4; Index++) {
373 PTEntry[Index] |= IA32_PG_PMNT;
374 }
375
376 //
377 // Fill Page-Table-Level4 (PML4) entry
378 //
379 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
380 ASSERT (Pml4Entry != NULL);
381 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
382 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
383
384 //
385 // Set sub-entries number
386 //
387 SetSubEntriesNum (Pml4Entry, 3);
388 PTEntry = Pml4Entry;
389
390 if (m5LevelPagingNeeded) {
391 //
392 // Fill PML5 entry
393 //
394 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
395 ASSERT (Pml5Entry != NULL);
396 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
397 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
398 //
399 // Set sub-entries number
400 //
401 SetSubEntriesNum (Pml5Entry, 1);
402 PTEntry = Pml5Entry;
403 }
404
405 if (mCpuSmmRestrictedMemoryAccess) {
406 //
407 // When access to non-SMRAM memory is restricted, create page table
408 // that covers all memory space.
409 //
410 SetStaticPageTable ((UINTN)PTEntry);
411 } else {
412 //
413 // Add pages to page pool
414 //
415 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
416 ASSERT (FreePage != NULL);
417 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
418 InsertTailList (&mPagePool, FreePage);
419 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
420 }
421 }
422
423 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
424 HEAP_GUARD_NONSTOP_MODE ||
425 NULL_DETECTION_NONSTOP_MODE) {
426 //
427 // Set own Page Fault entry instead of the default one, because SMM Profile
428 // feature depends on IRET instruction to do Single Step
429 //
430 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
431 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
432 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
433 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
434 IdtEntry->Bits.Reserved_0 = 0;
435 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
436 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
437 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
438 IdtEntry->Bits.Reserved_1 = 0;
439 } else {
440 //
441 // Register Smm Page Fault Handler
442 //
443 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
444 ASSERT_EFI_ERROR (Status);
445 }
446
447 //
448 // Additional SMM IDT initialization for SMM stack guard
449 //
450 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
451 InitializeIDTSmmStackGuard ();
452 }
453
454 //
455 // Return the address of PML4/PML5 (to set CR3)
456 //
457 return (UINT32)(UINTN)PTEntry;
458 }
459
460 /**
461 Set access record in entry.
462
463 @param[in, out] Entry Pointer to entry
464 @param[in] Acc Access record value
465
466 **/
467 VOID
468 SetAccNum (
469 IN OUT UINT64 *Entry,
470 IN UINT64 Acc
471 )
472 {
473 //
474 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
475 //
476 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
477 }
478
479 /**
480 Return access record in entry.
481
482 @param[in] Entry Pointer to entry
483
484 @return Access record value.
485
486 **/
487 UINT64
488 GetAccNum (
489 IN UINT64 *Entry
490 )
491 {
492 //
493 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
494 //
495 return BitFieldRead64 (*Entry, 9, 11);
496 }
497
498 /**
499 Return and update the access record in entry.
500
501 @param[in, out] Entry Pointer to entry
502
503 @return Access record value.
504
505 **/
506 UINT64
507 GetAndUpdateAccNum (
508 IN OUT UINT64 *Entry
509 )
510 {
511 UINT64 Acc;
512
513 Acc = GetAccNum (Entry);
514 if ((*Entry & IA32_PG_A) != 0) {
515 //
516 // If this entry has been accessed, clear access flag in Entry and update access record
517 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
518 //
519 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
520 SetAccNum (Entry, 0x7);
521 return (0x7 + ACC_MAX_BIT);
522 } else {
523 if (Acc != 0) {
524 //
525 // If the access record is not the smallest value 0, minus 1 and update the access record field
526 //
527 SetAccNum (Entry, Acc - 1);
528 }
529 }
530 return Acc;
531 }
532
533 /**
534 Reclaim free pages for PageFault handler.
535
536 Search the whole entries tree to find the leaf entry that has the smallest
537 access record value. Insert the page pointed by this leaf entry into the
538 page pool. And check its upper entries if need to be inserted into the page
539 pool or not.
540
541 **/
542 VOID
543 ReclaimPages (
544 VOID
545 )
546 {
547 UINT64 Pml5Entry;
548 UINT64 *Pml5;
549 UINT64 *Pml4;
550 UINT64 *Pdpt;
551 UINT64 *Pdt;
552 UINTN Pml5Index;
553 UINTN Pml4Index;
554 UINTN PdptIndex;
555 UINTN PdtIndex;
556 UINTN MinPml5;
557 UINTN MinPml4;
558 UINTN MinPdpt;
559 UINTN MinPdt;
560 UINT64 MinAcc;
561 UINT64 Acc;
562 UINT64 SubEntriesNum;
563 BOOLEAN PML4EIgnore;
564 BOOLEAN PDPTEIgnore;
565 UINT64 *ReleasePageAddress;
566 IA32_CR4 Cr4;
567 BOOLEAN Enable5LevelPaging;
568 UINT64 PFAddress;
569 UINT64 PFAddressPml5Index;
570 UINT64 PFAddressPml4Index;
571 UINT64 PFAddressPdptIndex;
572 UINT64 PFAddressPdtIndex;
573
574 Pml4 = NULL;
575 Pdpt = NULL;
576 Pdt = NULL;
577 MinAcc = (UINT64)-1;
578 MinPml4 = (UINTN)-1;
579 MinPml5 = (UINTN)-1;
580 MinPdpt = (UINTN)-1;
581 MinPdt = (UINTN)-1;
582 Acc = 0;
583 ReleasePageAddress = 0;
584 PFAddress = AsmReadCr2 ();
585 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
586 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
587 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
588 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
589
590 Cr4.UintN = AsmReadCr4 ();
591 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
592 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
593
594 if (!Enable5LevelPaging) {
595 //
596 // Create one fake PML5 entry for 4-Level Paging
597 // so that the page table parsing logic only handles 5-Level page structure.
598 //
599 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
600 Pml5 = &Pml5Entry;
601 }
602
603 //
604 // First, find the leaf entry has the smallest access record value
605 //
606 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
607 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
608 //
609 // If the PML5 entry is not present or is masked, skip it
610 //
611 continue;
612 }
613 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
614 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
615 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
616 //
617 // If the PML4 entry is not present or is masked, skip it
618 //
619 continue;
620 }
621 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
622 PML4EIgnore = FALSE;
623 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
624 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
625 //
626 // If the PDPT entry is not present or is masked, skip it
627 //
628 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
629 //
630 // If the PDPT entry is masked, we will ignore checking the PML4 entry
631 //
632 PML4EIgnore = TRUE;
633 }
634 continue;
635 }
636 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
637 //
638 // It's not 1-GByte pages entry, it should be a PDPT entry,
639 // we will not check PML4 entry more
640 //
641 PML4EIgnore = TRUE;
642 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
643 PDPTEIgnore = FALSE;
644 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
645 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
646 //
647 // If the PD entry is not present or is masked, skip it
648 //
649 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
650 //
651 // If the PD entry is masked, we will not PDPT entry more
652 //
653 PDPTEIgnore = TRUE;
654 }
655 continue;
656 }
657 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
658 //
659 // It's not 2 MByte page table entry, it should be PD entry
660 // we will find the entry has the smallest access record value
661 //
662 PDPTEIgnore = TRUE;
663 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
664 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
665 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
666 if (Acc < MinAcc) {
667 //
668 // If the PD entry has the smallest access record value,
669 // save the Page address to be released
670 //
671 MinAcc = Acc;
672 MinPml5 = Pml5Index;
673 MinPml4 = Pml4Index;
674 MinPdpt = PdptIndex;
675 MinPdt = PdtIndex;
676 ReleasePageAddress = Pdt + PdtIndex;
677 }
678 }
679 }
680 }
681 if (!PDPTEIgnore) {
682 //
683 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
684 // it should only has the entries point to 2 MByte Pages
685 //
686 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
687 Pml5Index != PFAddressPml5Index) {
688 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
689 if (Acc < MinAcc) {
690 //
691 // If the PDPT entry has the smallest access record value,
692 // save the Page address to be released
693 //
694 MinAcc = Acc;
695 MinPml5 = Pml5Index;
696 MinPml4 = Pml4Index;
697 MinPdpt = PdptIndex;
698 MinPdt = (UINTN)-1;
699 ReleasePageAddress = Pdpt + PdptIndex;
700 }
701 }
702 }
703 }
704 }
705 if (!PML4EIgnore) {
706 //
707 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
708 // it should only has the entries point to 1 GByte Pages
709 //
710 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
711 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
712 if (Acc < MinAcc) {
713 //
714 // If the PML4 entry has the smallest access record value,
715 // save the Page address to be released
716 //
717 MinAcc = Acc;
718 MinPml5 = Pml5Index;
719 MinPml4 = Pml4Index;
720 MinPdpt = (UINTN)-1;
721 MinPdt = (UINTN)-1;
722 ReleasePageAddress = Pml4 + Pml4Index;
723 }
724 }
725 }
726 }
727 }
728 //
729 // Make sure one PML4/PDPT/PD entry is selected
730 //
731 ASSERT (MinAcc != (UINT64)-1);
732
733 //
734 // Secondly, insert the page pointed by this entry into page pool and clear this entry
735 //
736 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
737 *ReleasePageAddress = 0;
738
739 //
740 // Lastly, check this entry's upper entries if need to be inserted into page pool
741 // or not
742 //
743 while (TRUE) {
744 if (MinPdt != (UINTN)-1) {
745 //
746 // If 4 KByte Page Table is released, check the PDPT entry
747 //
748 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
749 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
750 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
751 if (SubEntriesNum == 0 &&
752 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
753 //
754 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
755 // clear the Page directory entry
756 //
757 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
758 Pdpt[MinPdpt] = 0;
759 //
760 // Go on checking the PML4 table
761 //
762 MinPdt = (UINTN)-1;
763 continue;
764 }
765 //
766 // Update the sub-entries filed in PDPT entry and exit
767 //
768 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
769 break;
770 }
771 if (MinPdpt != (UINTN)-1) {
772 //
773 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
774 //
775 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
776 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
777 //
778 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
779 // clear the Page directory entry
780 //
781 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
782 Pml4[MinPml4] = 0;
783 MinPdpt = (UINTN)-1;
784 continue;
785 }
786 //
787 // Update the sub-entries filed in PML4 entry and exit
788 //
789 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
790 break;
791 }
792 //
793 // PLM4 table has been released before, exit it
794 //
795 break;
796 }
797 }
798
799 /**
800 Allocate free Page for PageFault handler use.
801
802 @return Page address.
803
804 **/
805 UINT64
806 AllocPage (
807 VOID
808 )
809 {
810 UINT64 RetVal;
811
812 if (IsListEmpty (&mPagePool)) {
813 //
814 // If page pool is empty, reclaim the used pages and insert one into page pool
815 //
816 ReclaimPages ();
817 }
818
819 //
820 // Get one free page and remove it from page pool
821 //
822 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
823 RemoveEntryList (mPagePool.ForwardLink);
824 //
825 // Clean this page and return
826 //
827 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
828 return RetVal;
829 }
830
831 /**
832 Page Fault handler for SMM use.
833
834 **/
835 VOID
836 SmiDefaultPFHandler (
837 VOID
838 )
839 {
840 UINT64 *PageTable;
841 UINT64 *PageTableTop;
842 UINT64 PFAddress;
843 UINTN StartBit;
844 UINTN EndBit;
845 UINT64 PTIndex;
846 UINTN Index;
847 SMM_PAGE_SIZE_TYPE PageSize;
848 UINTN NumOfPages;
849 UINTN PageAttribute;
850 EFI_STATUS Status;
851 UINT64 *UpperEntry;
852 BOOLEAN Enable5LevelPaging;
853 IA32_CR4 Cr4;
854
855 //
856 // Set default SMM page attribute
857 //
858 PageSize = SmmPageSize2M;
859 NumOfPages = 1;
860 PageAttribute = 0;
861
862 EndBit = 0;
863 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
864 PFAddress = AsmReadCr2 ();
865
866 Cr4.UintN = AsmReadCr4 ();
867 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
868
869 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
870 //
871 // If platform not support page table attribute, set default SMM page attribute
872 //
873 if (Status != EFI_SUCCESS) {
874 PageSize = SmmPageSize2M;
875 NumOfPages = 1;
876 PageAttribute = 0;
877 }
878 if (PageSize >= MaxSmmPageSizeType) {
879 PageSize = SmmPageSize2M;
880 }
881 if (NumOfPages > 512) {
882 NumOfPages = 512;
883 }
884
885 switch (PageSize) {
886 case SmmPageSize4K:
887 //
888 // BIT12 to BIT20 is Page Table index
889 //
890 EndBit = 12;
891 break;
892 case SmmPageSize2M:
893 //
894 // BIT21 to BIT29 is Page Directory index
895 //
896 EndBit = 21;
897 PageAttribute |= (UINTN)IA32_PG_PS;
898 break;
899 case SmmPageSize1G:
900 if (!m1GPageTableSupport) {
901 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
902 ASSERT (FALSE);
903 }
904 //
905 // BIT30 to BIT38 is Page Directory Pointer Table index
906 //
907 EndBit = 30;
908 PageAttribute |= (UINTN)IA32_PG_PS;
909 break;
910 default:
911 ASSERT (FALSE);
912 }
913
914 //
915 // If execute-disable is enabled, set NX bit
916 //
917 if (mXdEnabled) {
918 PageAttribute |= IA32_PG_NX;
919 }
920
921 for (Index = 0; Index < NumOfPages; Index++) {
922 PageTable = PageTableTop;
923 UpperEntry = NULL;
924 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
925 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
926 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
927 //
928 // If the entry is not present, allocate one page from page pool for it
929 //
930 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
931 } else {
932 //
933 // Save the upper entry address
934 //
935 UpperEntry = PageTable + PTIndex;
936 }
937 //
938 // BIT9 to BIT11 of entry is used to save access record,
939 // initialize value is 7
940 //
941 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
942 SetAccNum (PageTable + PTIndex, 7);
943 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
944 }
945
946 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
947 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
948 //
949 // Check if the entry has already existed, this issue may occur when the different
950 // size page entries created under the same entry
951 //
952 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
953 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
954 ASSERT (FALSE);
955 }
956 //
957 // Fill the new entry
958 //
959 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
960 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
961 if (UpperEntry != NULL) {
962 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
963 }
964 //
965 // Get the next page address if we need to create more page tables
966 //
967 PFAddress += (1ull << EndBit);
968 }
969 }
970
971 /**
972 ThePage Fault handler wrapper for SMM use.
973
974 @param InterruptType Defines the type of interrupt or exception that
975 occurred on the processor.This parameter is processor architecture specific.
976 @param SystemContext A pointer to the processor context when
977 the interrupt occurred on the processor.
978 **/
979 VOID
980 EFIAPI
981 SmiPFHandler (
982 IN EFI_EXCEPTION_TYPE InterruptType,
983 IN EFI_SYSTEM_CONTEXT SystemContext
984 )
985 {
986 UINTN PFAddress;
987 UINTN GuardPageAddress;
988 UINTN CpuIndex;
989
990 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
991
992 AcquireSpinLock (mPFLock);
993
994 PFAddress = AsmReadCr2 ();
995
996 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
997 DumpCpuContext (InterruptType, SystemContext);
998 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
999 CpuDeadLoop ();
1000 goto Exit;
1001 }
1002
1003 //
1004 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
1005 // or SMM page protection violation.
1006 //
1007 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1008 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
1009 DumpCpuContext (InterruptType, SystemContext);
1010 CpuIndex = GetCpuIndex ();
1011 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
1012 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1013 (PFAddress >= GuardPageAddress) &&
1014 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
1015 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1016 } else {
1017 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1018 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1019 DEBUG_CODE (
1020 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1021 );
1022 } else {
1023 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1024 DEBUG_CODE (
1025 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1026 );
1027 }
1028
1029 if (HEAP_GUARD_NONSTOP_MODE) {
1030 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1031 goto Exit;
1032 }
1033 }
1034 CpuDeadLoop ();
1035 goto Exit;
1036 }
1037
1038 //
1039 // If a page fault occurs in non-SMRAM range.
1040 //
1041 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1042 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1043 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1044 DumpCpuContext (InterruptType, SystemContext);
1045 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1046 DEBUG_CODE (
1047 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1048 );
1049 CpuDeadLoop ();
1050 goto Exit;
1051 }
1052
1053 //
1054 // If NULL pointer was just accessed
1055 //
1056 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1057 (PFAddress < EFI_PAGE_SIZE)) {
1058 DumpCpuContext (InterruptType, SystemContext);
1059 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1060 DEBUG_CODE (
1061 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1062 );
1063
1064 if (NULL_DETECTION_NONSTOP_MODE) {
1065 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1066 goto Exit;
1067 }
1068
1069 CpuDeadLoop ();
1070 goto Exit;
1071 }
1072
1073 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1074 DumpCpuContext (InterruptType, SystemContext);
1075 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1076 DEBUG_CODE (
1077 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1078 );
1079 CpuDeadLoop ();
1080 goto Exit;
1081 }
1082 }
1083
1084 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1085 SmmProfilePFHandler (
1086 SystemContext.SystemContextX64->Rip,
1087 SystemContext.SystemContextX64->ExceptionData
1088 );
1089 } else {
1090 SmiDefaultPFHandler ();
1091 }
1092
1093 Exit:
1094 ReleaseSpinLock (mPFLock);
1095 }
1096
1097 /**
1098 This function sets memory attribute for page table.
1099 **/
1100 VOID
1101 SetPageTableAttributes (
1102 VOID
1103 )
1104 {
1105 UINTN Index2;
1106 UINTN Index3;
1107 UINTN Index4;
1108 UINTN Index5;
1109 UINT64 *L1PageTable;
1110 UINT64 *L2PageTable;
1111 UINT64 *L3PageTable;
1112 UINT64 *L4PageTable;
1113 UINT64 *L5PageTable;
1114 BOOLEAN IsSplitted;
1115 BOOLEAN PageTableSplitted;
1116 BOOLEAN CetEnabled;
1117 IA32_CR4 Cr4;
1118 BOOLEAN Enable5LevelPaging;
1119
1120 Cr4.UintN = AsmReadCr4 ();
1121 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
1122
1123 //
1124 // Don't mark page table memory as read-only if
1125 // - no restriction on access to non-SMRAM memory; or
1126 // - SMM heap guard feature enabled; or
1127 // BIT2: SMM page guard enabled
1128 // BIT3: SMM pool guard enabled
1129 // - SMM profile feature enabled
1130 //
1131 if (!mCpuSmmRestrictedMemoryAccess ||
1132 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1133 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1134 //
1135 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1136 //
1137 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
1138 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1139
1140 //
1141 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1142 //
1143 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1144 return ;
1145 }
1146
1147 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1148
1149 //
1150 // Disable write protection, because we need mark page table to be write protected.
1151 // We need *write* page table memory, to mark itself to be *read only*.
1152 //
1153 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1154 if (CetEnabled) {
1155 //
1156 // CET must be disabled if WP is disabled.
1157 //
1158 DisableCet();
1159 }
1160 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1161
1162 do {
1163 DEBUG ((DEBUG_INFO, "Start...\n"));
1164 PageTableSplitted = FALSE;
1165 L5PageTable = NULL;
1166 if (Enable5LevelPaging) {
1167 L5PageTable = (UINT64 *)GetPageTableBase ();
1168 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1169 PageTableSplitted = (PageTableSplitted || IsSplitted);
1170 }
1171
1172 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1173 if (Enable5LevelPaging) {
1174 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1175 if (L4PageTable == NULL) {
1176 continue;
1177 }
1178 } else {
1179 L4PageTable = (UINT64 *)GetPageTableBase ();
1180 }
1181 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1182 PageTableSplitted = (PageTableSplitted || IsSplitted);
1183
1184 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1185 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1186 if (L3PageTable == NULL) {
1187 continue;
1188 }
1189
1190 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1191 PageTableSplitted = (PageTableSplitted || IsSplitted);
1192
1193 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1194 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1195 // 1G
1196 continue;
1197 }
1198 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1199 if (L2PageTable == NULL) {
1200 continue;
1201 }
1202
1203 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1204 PageTableSplitted = (PageTableSplitted || IsSplitted);
1205
1206 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1207 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1208 // 2M
1209 continue;
1210 }
1211 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1212 if (L1PageTable == NULL) {
1213 continue;
1214 }
1215 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1216 PageTableSplitted = (PageTableSplitted || IsSplitted);
1217 }
1218 }
1219 }
1220 }
1221 } while (PageTableSplitted);
1222
1223 //
1224 // Enable write protection, after page table updated.
1225 //
1226 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1227 if (CetEnabled) {
1228 //
1229 // re-enable CET.
1230 //
1231 EnableCet();
1232 }
1233
1234 return ;
1235 }
1236
1237 /**
1238 This function reads CR2 register when on-demand paging is enabled.
1239
1240 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1241 **/
1242 VOID
1243 SaveCr2 (
1244 OUT UINTN *Cr2
1245 )
1246 {
1247 if (!mCpuSmmRestrictedMemoryAccess) {
1248 //
1249 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1250 //
1251 *Cr2 = AsmReadCr2 ();
1252 }
1253 }
1254
1255 /**
1256 This function restores CR2 register when on-demand paging is enabled.
1257
1258 @param[in] Cr2 Value to write into CR2 register.
1259 **/
1260 VOID
1261 RestoreCr2 (
1262 IN UINTN Cr2
1263 )
1264 {
1265 if (!mCpuSmmRestrictedMemoryAccess) {
1266 //
1267 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1268 //
1269 AsmWriteCr2 (Cr2);
1270 }
1271 }
1272
1273 /**
1274 Return whether access to non-SMRAM is restricted.
1275
1276 @retval TRUE Access to non-SMRAM is restricted.
1277 @retval FALSE Access to non-SMRAM is not restricted.
1278 **/
1279 BOOLEAN
1280 IsRestrictedMemoryAccess (
1281 VOID
1282 )
1283 {
1284 return mCpuSmmRestrictedMemoryAccess;
1285 }