]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
d60c404a3df5e120e46ce066b442638a4ed222ed
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17 BOOLEAN m1GPageTableSupport = FALSE;
18 BOOLEAN mCpuSmmStaticPageTable;
19 BOOLEAN m5LevelPagingSupport;
20 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;
21
22 /**
23 Disable CET.
24 **/
25 VOID
26 EFIAPI
27 DisableCet (
28 VOID
29 );
30
31 /**
32 Enable CET.
33 **/
34 VOID
35 EFIAPI
36 EnableCet (
37 VOID
38 );
39
40 /**
41 Check if 1-GByte pages is supported by processor or not.
42
43 @retval TRUE 1-GByte pages is supported.
44 @retval FALSE 1-GByte pages is not supported.
45
46 **/
47 BOOLEAN
48 Is1GPageSupport (
49 VOID
50 )
51 {
52 UINT32 RegEax;
53 UINT32 RegEdx;
54
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
56 if (RegEax >= 0x80000001) {
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
58 if ((RegEdx & BIT26) != 0) {
59 return TRUE;
60 }
61 }
62 return FALSE;
63 }
64
65 /**
66 Check if 5-level paging is supported by processor or not.
67
68 @retval TRUE 5-level paging is supported.
69 @retval FALSE 5-level paging is not supported.
70
71 **/
72 BOOLEAN
73 Is5LevelPagingSupport (
74 VOID
75 )
76 {
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
78
79 AsmCpuidEx (
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
82 NULL,
83 NULL,
84 &EcxFlags.Uint32,
85 NULL
86 );
87 return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
88 }
89
90 /**
91 Set sub-entries number in entry.
92
93 @param[in, out] Entry Pointer to entry
94 @param[in] SubEntryNum Sub-entries number based on 0:
95 0 means there is 1 sub-entry under this entry
96 0x1ff means there is 512 sub-entries under this entry
97
98 **/
99 VOID
100 SetSubEntriesNum (
101 IN OUT UINT64 *Entry,
102 IN UINT64 SubEntryNum
103 )
104 {
105 //
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
107 //
108 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
109 }
110
111 /**
112 Return sub-entries number in entry.
113
114 @param[in] Entry Pointer to entry
115
116 @return Sub-entries number based on 0:
117 0 means there is 1 sub-entry under this entry
118 0x1ff means there is 512 sub-entries under this entry
119 **/
120 UINT64
121 GetSubEntriesNum (
122 IN UINT64 *Entry
123 )
124 {
125 //
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
127 //
128 return BitFieldRead64 (*Entry, 52, 60);
129 }
130
131 /**
132 Calculate the maximum support address.
133
134 @return the maximum support address.
135 **/
136 UINT8
137 CalculateMaximumSupportAddress (
138 VOID
139 )
140 {
141 UINT32 RegEax;
142 UINT8 PhysicalAddressBits;
143 VOID *Hob;
144
145 //
146 // Get physical address bits supported.
147 //
148 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
149 if (Hob != NULL) {
150 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
151 } else {
152 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
153 if (RegEax >= 0x80000008) {
154 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
155 PhysicalAddressBits = (UINT8) RegEax;
156 } else {
157 PhysicalAddressBits = 36;
158 }
159 }
160 return PhysicalAddressBits;
161 }
162
163 /**
164 Set static page table.
165
166 @param[in] PageTable Address of page table.
167 **/
168 VOID
169 SetStaticPageTable (
170 IN UINTN PageTable
171 )
172 {
173 UINT64 PageAddress;
174 UINTN NumberOfPml5EntriesNeeded;
175 UINTN NumberOfPml4EntriesNeeded;
176 UINTN NumberOfPdpEntriesNeeded;
177 UINTN IndexOfPml5Entries;
178 UINTN IndexOfPml4Entries;
179 UINTN IndexOfPdpEntries;
180 UINTN IndexOfPageDirectoryEntries;
181 UINT64 *PageMapLevel5Entry;
182 UINT64 *PageMapLevel4Entry;
183 UINT64 *PageMap;
184 UINT64 *PageDirectoryPointerEntry;
185 UINT64 *PageDirectory1GEntry;
186 UINT64 *PageDirectoryEntry;
187
188 //
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
190 // when 5-Level Paging is disabled.
191 //
192 ASSERT (mPhysicalAddressBits <= 52);
193 if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
194 mPhysicalAddressBits = 48;
195 }
196
197 NumberOfPml5EntriesNeeded = 1;
198 if (mPhysicalAddressBits > 48) {
199 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);
200 mPhysicalAddressBits = 48;
201 }
202
203 NumberOfPml4EntriesNeeded = 1;
204 if (mPhysicalAddressBits > 39) {
205 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);
206 mPhysicalAddressBits = 39;
207 }
208
209 NumberOfPdpEntriesNeeded = 1;
210 ASSERT (mPhysicalAddressBits > 30);
211 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);
212
213 //
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
215 //
216 PageMap = (VOID *) PageTable;
217
218 PageMapLevel4Entry = PageMap;
219 PageMapLevel5Entry = NULL;
220 if (m5LevelPagingSupport) {
221 //
222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
223 //
224 PageMapLevel5Entry = PageMap;
225 }
226 PageAddress = 0;
227
228 for ( IndexOfPml5Entries = 0
229 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
230 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
231 //
232 // Each PML5 entry points to a page of PML4 entires.
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
234 // When 5-Level Paging is disabled, below allocation happens only once.
235 //
236 if (m5LevelPagingSupport) {
237 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
238 if (PageMapLevel4Entry == NULL) {
239 PageMapLevel4Entry = AllocatePageTableMemory (1);
240 ASSERT(PageMapLevel4Entry != NULL);
241 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
242
243 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
244 }
245 }
246
247 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
248 //
249 // Each PML4 entry points to a page of Page Directory Pointer entries.
250 //
251 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
252 if (PageDirectoryPointerEntry == NULL) {
253 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
254 ASSERT(PageDirectoryPointerEntry != NULL);
255 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
256
257 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
258 }
259
260 if (m1GPageTableSupport) {
261 PageDirectory1GEntry = PageDirectoryPointerEntry;
262 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
263 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
264 //
265 // Skip the < 4G entries
266 //
267 continue;
268 }
269 //
270 // Fill in the Page Directory entries
271 //
272 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
273 }
274 } else {
275 PageAddress = BASE_4GB;
276 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
277 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
278 //
279 // Skip the < 4G entries
280 //
281 continue;
282 }
283 //
284 // Each Directory Pointer entries points to a page of Page Directory entires.
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
286 //
287 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
288 if (PageDirectoryEntry == NULL) {
289 PageDirectoryEntry = AllocatePageTableMemory (1);
290 ASSERT(PageDirectoryEntry != NULL);
291 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
292
293 //
294 // Fill in a Page Directory Pointer Entries
295 //
296 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
297 }
298
299 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
300 //
301 // Fill in the Page Directory entries
302 //
303 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
304 }
305 }
306 }
307 }
308 }
309 }
310
311 /**
312 Create PageTable for SMM use.
313
314 @return The address of PML4 (to set CR3).
315
316 **/
317 UINT32
318 SmmInitPageTable (
319 VOID
320 )
321 {
322 EFI_PHYSICAL_ADDRESS Pages;
323 UINT64 *PTEntry;
324 LIST_ENTRY *FreePage;
325 UINTN Index;
326 UINTN PageFaultHandlerHookAddress;
327 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
328 EFI_STATUS Status;
329 UINT64 *Pml4Entry;
330 UINT64 *Pml5Entry;
331
332 //
333 // Initialize spin lock
334 //
335 InitializeSpinLock (mPFLock);
336
337 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
338 m1GPageTableSupport = Is1GPageSupport ();
339 m5LevelPagingSupport = Is5LevelPagingSupport ();
340 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
341 PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);
342 DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));
343 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
344 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable));
345 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
346 //
347 // Generate PAE page table for the first 4GB memory space
348 //
349 Pages = Gen4GPageTable (FALSE);
350
351 //
352 // Set IA32_PG_PMNT bit to mask this entry
353 //
354 PTEntry = (UINT64*)(UINTN)Pages;
355 for (Index = 0; Index < 4; Index++) {
356 PTEntry[Index] |= IA32_PG_PMNT;
357 }
358
359 //
360 // Fill Page-Table-Level4 (PML4) entry
361 //
362 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
363 ASSERT (Pml4Entry != NULL);
364 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
365 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
366
367 //
368 // Set sub-entries number
369 //
370 SetSubEntriesNum (Pml4Entry, 3);
371 PTEntry = Pml4Entry;
372
373 if (m5LevelPagingSupport) {
374 //
375 // Fill PML5 entry
376 //
377 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
378 ASSERT (Pml5Entry != NULL);
379 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
380 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
381 //
382 // Set sub-entries number
383 //
384 SetSubEntriesNum (Pml5Entry, 1);
385 PTEntry = Pml5Entry;
386 }
387
388 if (mCpuSmmStaticPageTable) {
389 SetStaticPageTable ((UINTN)PTEntry);
390 } else {
391 //
392 // Add pages to page pool
393 //
394 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
395 ASSERT (FreePage != NULL);
396 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
397 InsertTailList (&mPagePool, FreePage);
398 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
399 }
400 }
401
402 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
403 HEAP_GUARD_NONSTOP_MODE ||
404 NULL_DETECTION_NONSTOP_MODE) {
405 //
406 // Set own Page Fault entry instead of the default one, because SMM Profile
407 // feature depends on IRET instruction to do Single Step
408 //
409 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
410 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
411 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
412 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
413 IdtEntry->Bits.Reserved_0 = 0;
414 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
415 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
416 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
417 IdtEntry->Bits.Reserved_1 = 0;
418 } else {
419 //
420 // Register Smm Page Fault Handler
421 //
422 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
423 ASSERT_EFI_ERROR (Status);
424 }
425
426 //
427 // Additional SMM IDT initialization for SMM stack guard
428 //
429 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
430 InitializeIDTSmmStackGuard ();
431 }
432
433 //
434 // Return the address of PML4/PML5 (to set CR3)
435 //
436 return (UINT32)(UINTN)PTEntry;
437 }
438
439 /**
440 Set access record in entry.
441
442 @param[in, out] Entry Pointer to entry
443 @param[in] Acc Access record value
444
445 **/
446 VOID
447 SetAccNum (
448 IN OUT UINT64 *Entry,
449 IN UINT64 Acc
450 )
451 {
452 //
453 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
454 //
455 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
456 }
457
458 /**
459 Return access record in entry.
460
461 @param[in] Entry Pointer to entry
462
463 @return Access record value.
464
465 **/
466 UINT64
467 GetAccNum (
468 IN UINT64 *Entry
469 )
470 {
471 //
472 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
473 //
474 return BitFieldRead64 (*Entry, 9, 11);
475 }
476
477 /**
478 Return and update the access record in entry.
479
480 @param[in, out] Entry Pointer to entry
481
482 @return Access record value.
483
484 **/
485 UINT64
486 GetAndUpdateAccNum (
487 IN OUT UINT64 *Entry
488 )
489 {
490 UINT64 Acc;
491
492 Acc = GetAccNum (Entry);
493 if ((*Entry & IA32_PG_A) != 0) {
494 //
495 // If this entry has been accessed, clear access flag in Entry and update access record
496 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
497 //
498 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
499 SetAccNum (Entry, 0x7);
500 return (0x7 + ACC_MAX_BIT);
501 } else {
502 if (Acc != 0) {
503 //
504 // If the access record is not the smallest value 0, minus 1 and update the access record field
505 //
506 SetAccNum (Entry, Acc - 1);
507 }
508 }
509 return Acc;
510 }
511
512 /**
513 Reclaim free pages for PageFault handler.
514
515 Search the whole entries tree to find the leaf entry that has the smallest
516 access record value. Insert the page pointed by this leaf entry into the
517 page pool. And check its upper entries if need to be inserted into the page
518 pool or not.
519
520 **/
521 VOID
522 ReclaimPages (
523 VOID
524 )
525 {
526 UINT64 Pml5Entry;
527 UINT64 *Pml5;
528 UINT64 *Pml4;
529 UINT64 *Pdpt;
530 UINT64 *Pdt;
531 UINTN Pml5Index;
532 UINTN Pml4Index;
533 UINTN PdptIndex;
534 UINTN PdtIndex;
535 UINTN MinPml5;
536 UINTN MinPml4;
537 UINTN MinPdpt;
538 UINTN MinPdt;
539 UINT64 MinAcc;
540 UINT64 Acc;
541 UINT64 SubEntriesNum;
542 BOOLEAN PML4EIgnore;
543 BOOLEAN PDPTEIgnore;
544 UINT64 *ReleasePageAddress;
545 IA32_CR4 Cr4;
546 BOOLEAN Enable5LevelPaging;
547 UINT64 PFAddress;
548 UINT64 PFAddressPml5Index;
549 UINT64 PFAddressPml4Index;
550 UINT64 PFAddressPdptIndex;
551 UINT64 PFAddressPdtIndex;
552
553 Pml4 = NULL;
554 Pdpt = NULL;
555 Pdt = NULL;
556 MinAcc = (UINT64)-1;
557 MinPml4 = (UINTN)-1;
558 MinPml5 = (UINTN)-1;
559 MinPdpt = (UINTN)-1;
560 MinPdt = (UINTN)-1;
561 Acc = 0;
562 ReleasePageAddress = 0;
563 PFAddress = AsmReadCr2 ();
564 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
565 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
566 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
567 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
568
569 Cr4.UintN = AsmReadCr4 ();
570 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
571 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
572
573 if (!Enable5LevelPaging) {
574 //
575 // Create one fake PML5 entry for 4-Level Paging
576 // so that the page table parsing logic only handles 5-Level page structure.
577 //
578 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
579 Pml5 = &Pml5Entry;
580 }
581
582 //
583 // First, find the leaf entry has the smallest access record value
584 //
585 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
586 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
587 //
588 // If the PML5 entry is not present or is masked, skip it
589 //
590 continue;
591 }
592 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
593 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
594 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
595 //
596 // If the PML4 entry is not present or is masked, skip it
597 //
598 continue;
599 }
600 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
601 PML4EIgnore = FALSE;
602 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
603 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
604 //
605 // If the PDPT entry is not present or is masked, skip it
606 //
607 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
608 //
609 // If the PDPT entry is masked, we will ignore checking the PML4 entry
610 //
611 PML4EIgnore = TRUE;
612 }
613 continue;
614 }
615 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
616 //
617 // It's not 1-GByte pages entry, it should be a PDPT entry,
618 // we will not check PML4 entry more
619 //
620 PML4EIgnore = TRUE;
621 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
622 PDPTEIgnore = FALSE;
623 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
624 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
625 //
626 // If the PD entry is not present or is masked, skip it
627 //
628 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
629 //
630 // If the PD entry is masked, we will not PDPT entry more
631 //
632 PDPTEIgnore = TRUE;
633 }
634 continue;
635 }
636 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
637 //
638 // It's not 2 MByte page table entry, it should be PD entry
639 // we will find the entry has the smallest access record value
640 //
641 PDPTEIgnore = TRUE;
642 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
643 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
644 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
645 if (Acc < MinAcc) {
646 //
647 // If the PD entry has the smallest access record value,
648 // save the Page address to be released
649 //
650 MinAcc = Acc;
651 MinPml5 = Pml5Index;
652 MinPml4 = Pml4Index;
653 MinPdpt = PdptIndex;
654 MinPdt = PdtIndex;
655 ReleasePageAddress = Pdt + PdtIndex;
656 }
657 }
658 }
659 }
660 if (!PDPTEIgnore) {
661 //
662 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
663 // it should only has the entries point to 2 MByte Pages
664 //
665 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
666 Pml5Index != PFAddressPml5Index) {
667 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
668 if (Acc < MinAcc) {
669 //
670 // If the PDPT entry has the smallest access record value,
671 // save the Page address to be released
672 //
673 MinAcc = Acc;
674 MinPml5 = Pml5Index;
675 MinPml4 = Pml4Index;
676 MinPdpt = PdptIndex;
677 MinPdt = (UINTN)-1;
678 ReleasePageAddress = Pdpt + PdptIndex;
679 }
680 }
681 }
682 }
683 }
684 if (!PML4EIgnore) {
685 //
686 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
687 // it should only has the entries point to 1 GByte Pages
688 //
689 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
690 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
691 if (Acc < MinAcc) {
692 //
693 // If the PML4 entry has the smallest access record value,
694 // save the Page address to be released
695 //
696 MinAcc = Acc;
697 MinPml5 = Pml5Index;
698 MinPml4 = Pml4Index;
699 MinPdpt = (UINTN)-1;
700 MinPdt = (UINTN)-1;
701 ReleasePageAddress = Pml4 + Pml4Index;
702 }
703 }
704 }
705 }
706 }
707 //
708 // Make sure one PML4/PDPT/PD entry is selected
709 //
710 ASSERT (MinAcc != (UINT64)-1);
711
712 //
713 // Secondly, insert the page pointed by this entry into page pool and clear this entry
714 //
715 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
716 *ReleasePageAddress = 0;
717
718 //
719 // Lastly, check this entry's upper entries if need to be inserted into page pool
720 // or not
721 //
722 while (TRUE) {
723 if (MinPdt != (UINTN)-1) {
724 //
725 // If 4 KByte Page Table is released, check the PDPT entry
726 //
727 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
728 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
729 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
730 if (SubEntriesNum == 0 &&
731 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
732 //
733 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
734 // clear the Page directory entry
735 //
736 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
737 Pdpt[MinPdpt] = 0;
738 //
739 // Go on checking the PML4 table
740 //
741 MinPdt = (UINTN)-1;
742 continue;
743 }
744 //
745 // Update the sub-entries filed in PDPT entry and exit
746 //
747 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
748 break;
749 }
750 if (MinPdpt != (UINTN)-1) {
751 //
752 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
753 //
754 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
755 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
756 //
757 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
758 // clear the Page directory entry
759 //
760 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
761 Pml4[MinPml4] = 0;
762 MinPdpt = (UINTN)-1;
763 continue;
764 }
765 //
766 // Update the sub-entries filed in PML4 entry and exit
767 //
768 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
769 break;
770 }
771 //
772 // PLM4 table has been released before, exit it
773 //
774 break;
775 }
776 }
777
778 /**
779 Allocate free Page for PageFault handler use.
780
781 @return Page address.
782
783 **/
784 UINT64
785 AllocPage (
786 VOID
787 )
788 {
789 UINT64 RetVal;
790
791 if (IsListEmpty (&mPagePool)) {
792 //
793 // If page pool is empty, reclaim the used pages and insert one into page pool
794 //
795 ReclaimPages ();
796 }
797
798 //
799 // Get one free page and remove it from page pool
800 //
801 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
802 RemoveEntryList (mPagePool.ForwardLink);
803 //
804 // Clean this page and return
805 //
806 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
807 return RetVal;
808 }
809
810 /**
811 Page Fault handler for SMM use.
812
813 **/
814 VOID
815 SmiDefaultPFHandler (
816 VOID
817 )
818 {
819 UINT64 *PageTable;
820 UINT64 *PageTableTop;
821 UINT64 PFAddress;
822 UINTN StartBit;
823 UINTN EndBit;
824 UINT64 PTIndex;
825 UINTN Index;
826 SMM_PAGE_SIZE_TYPE PageSize;
827 UINTN NumOfPages;
828 UINTN PageAttribute;
829 EFI_STATUS Status;
830 UINT64 *UpperEntry;
831 BOOLEAN Enable5LevelPaging;
832 IA32_CR4 Cr4;
833
834 //
835 // Set default SMM page attribute
836 //
837 PageSize = SmmPageSize2M;
838 NumOfPages = 1;
839 PageAttribute = 0;
840
841 EndBit = 0;
842 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
843 PFAddress = AsmReadCr2 ();
844
845 Cr4.UintN = AsmReadCr4 ();
846 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
847
848 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
849 //
850 // If platform not support page table attribute, set default SMM page attribute
851 //
852 if (Status != EFI_SUCCESS) {
853 PageSize = SmmPageSize2M;
854 NumOfPages = 1;
855 PageAttribute = 0;
856 }
857 if (PageSize >= MaxSmmPageSizeType) {
858 PageSize = SmmPageSize2M;
859 }
860 if (NumOfPages > 512) {
861 NumOfPages = 512;
862 }
863
864 switch (PageSize) {
865 case SmmPageSize4K:
866 //
867 // BIT12 to BIT20 is Page Table index
868 //
869 EndBit = 12;
870 break;
871 case SmmPageSize2M:
872 //
873 // BIT21 to BIT29 is Page Directory index
874 //
875 EndBit = 21;
876 PageAttribute |= (UINTN)IA32_PG_PS;
877 break;
878 case SmmPageSize1G:
879 if (!m1GPageTableSupport) {
880 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
881 ASSERT (FALSE);
882 }
883 //
884 // BIT30 to BIT38 is Page Directory Pointer Table index
885 //
886 EndBit = 30;
887 PageAttribute |= (UINTN)IA32_PG_PS;
888 break;
889 default:
890 ASSERT (FALSE);
891 }
892
893 //
894 // If execute-disable is enabled, set NX bit
895 //
896 if (mXdEnabled) {
897 PageAttribute |= IA32_PG_NX;
898 }
899
900 for (Index = 0; Index < NumOfPages; Index++) {
901 PageTable = PageTableTop;
902 UpperEntry = NULL;
903 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
904 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
905 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
906 //
907 // If the entry is not present, allocate one page from page pool for it
908 //
909 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
910 } else {
911 //
912 // Save the upper entry address
913 //
914 UpperEntry = PageTable + PTIndex;
915 }
916 //
917 // BIT9 to BIT11 of entry is used to save access record,
918 // initialize value is 7
919 //
920 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
921 SetAccNum (PageTable + PTIndex, 7);
922 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
923 }
924
925 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
926 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
927 //
928 // Check if the entry has already existed, this issue may occur when the different
929 // size page entries created under the same entry
930 //
931 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
932 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
933 ASSERT (FALSE);
934 }
935 //
936 // Fill the new entry
937 //
938 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
939 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
940 if (UpperEntry != NULL) {
941 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
942 }
943 //
944 // Get the next page address if we need to create more page tables
945 //
946 PFAddress += (1ull << EndBit);
947 }
948 }
949
950 /**
951 ThePage Fault handler wrapper for SMM use.
952
953 @param InterruptType Defines the type of interrupt or exception that
954 occurred on the processor.This parameter is processor architecture specific.
955 @param SystemContext A pointer to the processor context when
956 the interrupt occurred on the processor.
957 **/
958 VOID
959 EFIAPI
960 SmiPFHandler (
961 IN EFI_EXCEPTION_TYPE InterruptType,
962 IN EFI_SYSTEM_CONTEXT SystemContext
963 )
964 {
965 UINTN PFAddress;
966 UINTN GuardPageAddress;
967 UINTN CpuIndex;
968
969 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
970
971 AcquireSpinLock (mPFLock);
972
973 PFAddress = AsmReadCr2 ();
974
975 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
976 DumpCpuContext (InterruptType, SystemContext);
977 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
978 CpuDeadLoop ();
979 goto Exit;
980 }
981
982 //
983 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
984 // or SMM page protection violation.
985 //
986 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
987 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
988 DumpCpuContext (InterruptType, SystemContext);
989 CpuIndex = GetCpuIndex ();
990 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
991 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
992 (PFAddress >= GuardPageAddress) &&
993 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
994 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
995 } else {
996 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
997 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
998 DEBUG_CODE (
999 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1000 );
1001 } else {
1002 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1003 DEBUG_CODE (
1004 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1005 );
1006 }
1007
1008 if (HEAP_GUARD_NONSTOP_MODE) {
1009 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1010 goto Exit;
1011 }
1012 }
1013 CpuDeadLoop ();
1014 goto Exit;
1015 }
1016
1017 //
1018 // If a page fault occurs in non-SMRAM range.
1019 //
1020 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1021 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1022 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1023 DumpCpuContext (InterruptType, SystemContext);
1024 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1025 DEBUG_CODE (
1026 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1027 );
1028 CpuDeadLoop ();
1029 goto Exit;
1030 }
1031
1032 //
1033 // If NULL pointer was just accessed
1034 //
1035 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1036 (PFAddress < EFI_PAGE_SIZE)) {
1037 DumpCpuContext (InterruptType, SystemContext);
1038 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1039 DEBUG_CODE (
1040 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1041 );
1042
1043 if (NULL_DETECTION_NONSTOP_MODE) {
1044 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1045 goto Exit;
1046 }
1047
1048 CpuDeadLoop ();
1049 goto Exit;
1050 }
1051
1052 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1053 DumpCpuContext (InterruptType, SystemContext);
1054 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1055 DEBUG_CODE (
1056 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1057 );
1058 CpuDeadLoop ();
1059 goto Exit;
1060 }
1061 }
1062
1063 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1064 SmmProfilePFHandler (
1065 SystemContext.SystemContextX64->Rip,
1066 SystemContext.SystemContextX64->ExceptionData
1067 );
1068 } else {
1069 SmiDefaultPFHandler ();
1070 }
1071
1072 Exit:
1073 ReleaseSpinLock (mPFLock);
1074 }
1075
1076 /**
1077 This function sets memory attribute for page table.
1078 **/
1079 VOID
1080 SetPageTableAttributes (
1081 VOID
1082 )
1083 {
1084 UINTN Index2;
1085 UINTN Index3;
1086 UINTN Index4;
1087 UINTN Index5;
1088 UINT64 *L1PageTable;
1089 UINT64 *L2PageTable;
1090 UINT64 *L3PageTable;
1091 UINT64 *L4PageTable;
1092 UINT64 *L5PageTable;
1093 BOOLEAN IsSplitted;
1094 BOOLEAN PageTableSplitted;
1095 BOOLEAN CetEnabled;
1096 IA32_CR4 Cr4;
1097 BOOLEAN Enable5LevelPaging;
1098
1099 Cr4.UintN = AsmReadCr4 ();
1100 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
1101
1102 //
1103 // Don't do this if
1104 // - no static page table; or
1105 // - SMM heap guard feature enabled; or
1106 // BIT2: SMM page guard enabled
1107 // BIT3: SMM pool guard enabled
1108 // - SMM profile feature enabled
1109 //
1110 if (!mCpuSmmStaticPageTable ||
1111 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1112 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1113 //
1114 // Static paging and heap guard could not be enabled at the same time.
1115 //
1116 ASSERT (!(mCpuSmmStaticPageTable &&
1117 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1118
1119 //
1120 // Static paging and SMM profile could not be enabled at the same time.
1121 //
1122 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1123 return ;
1124 }
1125
1126 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1127
1128 //
1129 // Disable write protection, because we need mark page table to be write protected.
1130 // We need *write* page table memory, to mark itself to be *read only*.
1131 //
1132 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1133 if (CetEnabled) {
1134 //
1135 // CET must be disabled if WP is disabled.
1136 //
1137 DisableCet();
1138 }
1139 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1140
1141 do {
1142 DEBUG ((DEBUG_INFO, "Start...\n"));
1143 PageTableSplitted = FALSE;
1144 L5PageTable = NULL;
1145 if (Enable5LevelPaging) {
1146 L5PageTable = (UINT64 *)GetPageTableBase ();
1147 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1148 PageTableSplitted = (PageTableSplitted || IsSplitted);
1149 }
1150
1151 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1152 if (Enable5LevelPaging) {
1153 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1154 if (L4PageTable == NULL) {
1155 continue;
1156 }
1157 } else {
1158 L4PageTable = (UINT64 *)GetPageTableBase ();
1159 }
1160 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1161 PageTableSplitted = (PageTableSplitted || IsSplitted);
1162
1163 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1164 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1165 if (L3PageTable == NULL) {
1166 continue;
1167 }
1168
1169 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1170 PageTableSplitted = (PageTableSplitted || IsSplitted);
1171
1172 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1173 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1174 // 1G
1175 continue;
1176 }
1177 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1178 if (L2PageTable == NULL) {
1179 continue;
1180 }
1181
1182 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1183 PageTableSplitted = (PageTableSplitted || IsSplitted);
1184
1185 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1186 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1187 // 2M
1188 continue;
1189 }
1190 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1191 if (L1PageTable == NULL) {
1192 continue;
1193 }
1194 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1195 PageTableSplitted = (PageTableSplitted || IsSplitted);
1196 }
1197 }
1198 }
1199 }
1200 } while (PageTableSplitted);
1201
1202 //
1203 // Enable write protection, after page table updated.
1204 //
1205 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1206 if (CetEnabled) {
1207 //
1208 // re-enable CET.
1209 //
1210 EnableCet();
1211 }
1212
1213 return ;
1214 }
1215
1216 /**
1217 This function reads CR2 register when on-demand paging is enabled.
1218
1219 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1220 **/
1221 VOID
1222 SaveCr2 (
1223 OUT UINTN *Cr2
1224 )
1225 {
1226 if (!mCpuSmmStaticPageTable) {
1227 *Cr2 = AsmReadCr2 ();
1228 }
1229 }
1230
1231 /**
1232 This function restores CR2 register when on-demand paging is enabled.
1233
1234 @param[in] Cr2 Value to write into CR2 register.
1235 **/
1236 VOID
1237 RestoreCr2 (
1238 IN UINTN Cr2
1239 )
1240 {
1241 if (!mCpuSmmStaticPageTable) {
1242 AsmWriteCr2 (Cr2);
1243 }
1244 }