]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpu: Use new PCD PcdCpuSmmRestrictedMemoryAccess
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17 BOOLEAN m1GPageTableSupport = FALSE;
18 BOOLEAN mCpuSmmRestrictedMemoryAccess;
19 BOOLEAN m5LevelPagingSupport;
20 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;
21
22 /**
23 Disable CET.
24 **/
25 VOID
26 EFIAPI
27 DisableCet (
28 VOID
29 );
30
31 /**
32 Enable CET.
33 **/
34 VOID
35 EFIAPI
36 EnableCet (
37 VOID
38 );
39
40 /**
41 Check if 1-GByte pages is supported by processor or not.
42
43 @retval TRUE 1-GByte pages is supported.
44 @retval FALSE 1-GByte pages is not supported.
45
46 **/
47 BOOLEAN
48 Is1GPageSupport (
49 VOID
50 )
51 {
52 UINT32 RegEax;
53 UINT32 RegEdx;
54
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
56 if (RegEax >= 0x80000001) {
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
58 if ((RegEdx & BIT26) != 0) {
59 return TRUE;
60 }
61 }
62 return FALSE;
63 }
64
65 /**
66 Check if 5-level paging is supported by processor or not.
67
68 @retval TRUE 5-level paging is supported.
69 @retval FALSE 5-level paging is not supported.
70
71 **/
72 BOOLEAN
73 Is5LevelPagingSupport (
74 VOID
75 )
76 {
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
78
79 AsmCpuidEx (
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
82 NULL,
83 NULL,
84 &EcxFlags.Uint32,
85 NULL
86 );
87 return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
88 }
89
90 /**
91 Set sub-entries number in entry.
92
93 @param[in, out] Entry Pointer to entry
94 @param[in] SubEntryNum Sub-entries number based on 0:
95 0 means there is 1 sub-entry under this entry
96 0x1ff means there is 512 sub-entries under this entry
97
98 **/
99 VOID
100 SetSubEntriesNum (
101 IN OUT UINT64 *Entry,
102 IN UINT64 SubEntryNum
103 )
104 {
105 //
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
107 //
108 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
109 }
110
111 /**
112 Return sub-entries number in entry.
113
114 @param[in] Entry Pointer to entry
115
116 @return Sub-entries number based on 0:
117 0 means there is 1 sub-entry under this entry
118 0x1ff means there is 512 sub-entries under this entry
119 **/
120 UINT64
121 GetSubEntriesNum (
122 IN UINT64 *Entry
123 )
124 {
125 //
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
127 //
128 return BitFieldRead64 (*Entry, 52, 60);
129 }
130
131 /**
132 Calculate the maximum support address.
133
134 @return the maximum support address.
135 **/
136 UINT8
137 CalculateMaximumSupportAddress (
138 VOID
139 )
140 {
141 UINT32 RegEax;
142 UINT8 PhysicalAddressBits;
143 VOID *Hob;
144
145 //
146 // Get physical address bits supported.
147 //
148 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
149 if (Hob != NULL) {
150 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
151 } else {
152 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
153 if (RegEax >= 0x80000008) {
154 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
155 PhysicalAddressBits = (UINT8) RegEax;
156 } else {
157 PhysicalAddressBits = 36;
158 }
159 }
160 return PhysicalAddressBits;
161 }
162
163 /**
164 Set static page table.
165
166 @param[in] PageTable Address of page table.
167 **/
168 VOID
169 SetStaticPageTable (
170 IN UINTN PageTable
171 )
172 {
173 UINT64 PageAddress;
174 UINTN NumberOfPml5EntriesNeeded;
175 UINTN NumberOfPml4EntriesNeeded;
176 UINTN NumberOfPdpEntriesNeeded;
177 UINTN IndexOfPml5Entries;
178 UINTN IndexOfPml4Entries;
179 UINTN IndexOfPdpEntries;
180 UINTN IndexOfPageDirectoryEntries;
181 UINT64 *PageMapLevel5Entry;
182 UINT64 *PageMapLevel4Entry;
183 UINT64 *PageMap;
184 UINT64 *PageDirectoryPointerEntry;
185 UINT64 *PageDirectory1GEntry;
186 UINT64 *PageDirectoryEntry;
187
188 //
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
190 // when 5-Level Paging is disabled.
191 //
192 ASSERT (mPhysicalAddressBits <= 52);
193 if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
194 mPhysicalAddressBits = 48;
195 }
196
197 NumberOfPml5EntriesNeeded = 1;
198 if (mPhysicalAddressBits > 48) {
199 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);
200 mPhysicalAddressBits = 48;
201 }
202
203 NumberOfPml4EntriesNeeded = 1;
204 if (mPhysicalAddressBits > 39) {
205 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);
206 mPhysicalAddressBits = 39;
207 }
208
209 NumberOfPdpEntriesNeeded = 1;
210 ASSERT (mPhysicalAddressBits > 30);
211 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);
212
213 //
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
215 //
216 PageMap = (VOID *) PageTable;
217
218 PageMapLevel4Entry = PageMap;
219 PageMapLevel5Entry = NULL;
220 if (m5LevelPagingSupport) {
221 //
222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
223 //
224 PageMapLevel5Entry = PageMap;
225 }
226 PageAddress = 0;
227
228 for ( IndexOfPml5Entries = 0
229 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
230 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
231 //
232 // Each PML5 entry points to a page of PML4 entires.
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
234 // When 5-Level Paging is disabled, below allocation happens only once.
235 //
236 if (m5LevelPagingSupport) {
237 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
238 if (PageMapLevel4Entry == NULL) {
239 PageMapLevel4Entry = AllocatePageTableMemory (1);
240 ASSERT(PageMapLevel4Entry != NULL);
241 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
242
243 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
244 }
245 }
246
247 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
248 //
249 // Each PML4 entry points to a page of Page Directory Pointer entries.
250 //
251 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
252 if (PageDirectoryPointerEntry == NULL) {
253 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
254 ASSERT(PageDirectoryPointerEntry != NULL);
255 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
256
257 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
258 }
259
260 if (m1GPageTableSupport) {
261 PageDirectory1GEntry = PageDirectoryPointerEntry;
262 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
263 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
264 //
265 // Skip the < 4G entries
266 //
267 continue;
268 }
269 //
270 // Fill in the Page Directory entries
271 //
272 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
273 }
274 } else {
275 PageAddress = BASE_4GB;
276 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
277 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
278 //
279 // Skip the < 4G entries
280 //
281 continue;
282 }
283 //
284 // Each Directory Pointer entries points to a page of Page Directory entires.
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
286 //
287 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
288 if (PageDirectoryEntry == NULL) {
289 PageDirectoryEntry = AllocatePageTableMemory (1);
290 ASSERT(PageDirectoryEntry != NULL);
291 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
292
293 //
294 // Fill in a Page Directory Pointer Entries
295 //
296 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
297 }
298
299 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
300 //
301 // Fill in the Page Directory entries
302 //
303 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
304 }
305 }
306 }
307 }
308 }
309 }
310
311 /**
312 Create PageTable for SMM use.
313
314 @return The address of PML4 (to set CR3).
315
316 **/
317 UINT32
318 SmmInitPageTable (
319 VOID
320 )
321 {
322 EFI_PHYSICAL_ADDRESS Pages;
323 UINT64 *PTEntry;
324 LIST_ENTRY *FreePage;
325 UINTN Index;
326 UINTN PageFaultHandlerHookAddress;
327 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
328 EFI_STATUS Status;
329 UINT64 *Pml4Entry;
330 UINT64 *Pml5Entry;
331
332 //
333 // Initialize spin lock
334 //
335 InitializeSpinLock (mPFLock);
336
337 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
338 m1GPageTableSupport = Is1GPageSupport ();
339 m5LevelPagingSupport = Is5LevelPagingSupport ();
340 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
341 PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);
342 DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));
343 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
344 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
345 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
346 //
347 // Generate PAE page table for the first 4GB memory space
348 //
349 Pages = Gen4GPageTable (FALSE);
350
351 //
352 // Set IA32_PG_PMNT bit to mask this entry
353 //
354 PTEntry = (UINT64*)(UINTN)Pages;
355 for (Index = 0; Index < 4; Index++) {
356 PTEntry[Index] |= IA32_PG_PMNT;
357 }
358
359 //
360 // Fill Page-Table-Level4 (PML4) entry
361 //
362 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
363 ASSERT (Pml4Entry != NULL);
364 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
365 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
366
367 //
368 // Set sub-entries number
369 //
370 SetSubEntriesNum (Pml4Entry, 3);
371 PTEntry = Pml4Entry;
372
373 if (m5LevelPagingSupport) {
374 //
375 // Fill PML5 entry
376 //
377 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
378 ASSERT (Pml5Entry != NULL);
379 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
380 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
381 //
382 // Set sub-entries number
383 //
384 SetSubEntriesNum (Pml5Entry, 1);
385 PTEntry = Pml5Entry;
386 }
387
388 if (mCpuSmmRestrictedMemoryAccess) {
389 //
390 // When access to non-SMRAM memory is restricted, create page table
391 // that covers all memory space.
392 //
393 SetStaticPageTable ((UINTN)PTEntry);
394 } else {
395 //
396 // Add pages to page pool
397 //
398 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
399 ASSERT (FreePage != NULL);
400 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
401 InsertTailList (&mPagePool, FreePage);
402 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
403 }
404 }
405
406 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
407 HEAP_GUARD_NONSTOP_MODE ||
408 NULL_DETECTION_NONSTOP_MODE) {
409 //
410 // Set own Page Fault entry instead of the default one, because SMM Profile
411 // feature depends on IRET instruction to do Single Step
412 //
413 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
414 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
415 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
416 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
417 IdtEntry->Bits.Reserved_0 = 0;
418 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
419 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
420 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
421 IdtEntry->Bits.Reserved_1 = 0;
422 } else {
423 //
424 // Register Smm Page Fault Handler
425 //
426 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
427 ASSERT_EFI_ERROR (Status);
428 }
429
430 //
431 // Additional SMM IDT initialization for SMM stack guard
432 //
433 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
434 InitializeIDTSmmStackGuard ();
435 }
436
437 //
438 // Return the address of PML4/PML5 (to set CR3)
439 //
440 return (UINT32)(UINTN)PTEntry;
441 }
442
443 /**
444 Set access record in entry.
445
446 @param[in, out] Entry Pointer to entry
447 @param[in] Acc Access record value
448
449 **/
450 VOID
451 SetAccNum (
452 IN OUT UINT64 *Entry,
453 IN UINT64 Acc
454 )
455 {
456 //
457 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
458 //
459 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
460 }
461
462 /**
463 Return access record in entry.
464
465 @param[in] Entry Pointer to entry
466
467 @return Access record value.
468
469 **/
470 UINT64
471 GetAccNum (
472 IN UINT64 *Entry
473 )
474 {
475 //
476 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
477 //
478 return BitFieldRead64 (*Entry, 9, 11);
479 }
480
481 /**
482 Return and update the access record in entry.
483
484 @param[in, out] Entry Pointer to entry
485
486 @return Access record value.
487
488 **/
489 UINT64
490 GetAndUpdateAccNum (
491 IN OUT UINT64 *Entry
492 )
493 {
494 UINT64 Acc;
495
496 Acc = GetAccNum (Entry);
497 if ((*Entry & IA32_PG_A) != 0) {
498 //
499 // If this entry has been accessed, clear access flag in Entry and update access record
500 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
501 //
502 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
503 SetAccNum (Entry, 0x7);
504 return (0x7 + ACC_MAX_BIT);
505 } else {
506 if (Acc != 0) {
507 //
508 // If the access record is not the smallest value 0, minus 1 and update the access record field
509 //
510 SetAccNum (Entry, Acc - 1);
511 }
512 }
513 return Acc;
514 }
515
516 /**
517 Reclaim free pages for PageFault handler.
518
519 Search the whole entries tree to find the leaf entry that has the smallest
520 access record value. Insert the page pointed by this leaf entry into the
521 page pool. And check its upper entries if need to be inserted into the page
522 pool or not.
523
524 **/
525 VOID
526 ReclaimPages (
527 VOID
528 )
529 {
530 UINT64 Pml5Entry;
531 UINT64 *Pml5;
532 UINT64 *Pml4;
533 UINT64 *Pdpt;
534 UINT64 *Pdt;
535 UINTN Pml5Index;
536 UINTN Pml4Index;
537 UINTN PdptIndex;
538 UINTN PdtIndex;
539 UINTN MinPml5;
540 UINTN MinPml4;
541 UINTN MinPdpt;
542 UINTN MinPdt;
543 UINT64 MinAcc;
544 UINT64 Acc;
545 UINT64 SubEntriesNum;
546 BOOLEAN PML4EIgnore;
547 BOOLEAN PDPTEIgnore;
548 UINT64 *ReleasePageAddress;
549 IA32_CR4 Cr4;
550 BOOLEAN Enable5LevelPaging;
551 UINT64 PFAddress;
552 UINT64 PFAddressPml5Index;
553 UINT64 PFAddressPml4Index;
554 UINT64 PFAddressPdptIndex;
555 UINT64 PFAddressPdtIndex;
556
557 Pml4 = NULL;
558 Pdpt = NULL;
559 Pdt = NULL;
560 MinAcc = (UINT64)-1;
561 MinPml4 = (UINTN)-1;
562 MinPml5 = (UINTN)-1;
563 MinPdpt = (UINTN)-1;
564 MinPdt = (UINTN)-1;
565 Acc = 0;
566 ReleasePageAddress = 0;
567 PFAddress = AsmReadCr2 ();
568 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
569 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
570 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
571 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
572
573 Cr4.UintN = AsmReadCr4 ();
574 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
575 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
576
577 if (!Enable5LevelPaging) {
578 //
579 // Create one fake PML5 entry for 4-Level Paging
580 // so that the page table parsing logic only handles 5-Level page structure.
581 //
582 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
583 Pml5 = &Pml5Entry;
584 }
585
586 //
587 // First, find the leaf entry has the smallest access record value
588 //
589 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
590 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
591 //
592 // If the PML5 entry is not present or is masked, skip it
593 //
594 continue;
595 }
596 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
597 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
598 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
599 //
600 // If the PML4 entry is not present or is masked, skip it
601 //
602 continue;
603 }
604 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
605 PML4EIgnore = FALSE;
606 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
607 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
608 //
609 // If the PDPT entry is not present or is masked, skip it
610 //
611 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
612 //
613 // If the PDPT entry is masked, we will ignore checking the PML4 entry
614 //
615 PML4EIgnore = TRUE;
616 }
617 continue;
618 }
619 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
620 //
621 // It's not 1-GByte pages entry, it should be a PDPT entry,
622 // we will not check PML4 entry more
623 //
624 PML4EIgnore = TRUE;
625 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
626 PDPTEIgnore = FALSE;
627 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
628 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
629 //
630 // If the PD entry is not present or is masked, skip it
631 //
632 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
633 //
634 // If the PD entry is masked, we will not PDPT entry more
635 //
636 PDPTEIgnore = TRUE;
637 }
638 continue;
639 }
640 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
641 //
642 // It's not 2 MByte page table entry, it should be PD entry
643 // we will find the entry has the smallest access record value
644 //
645 PDPTEIgnore = TRUE;
646 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
647 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
648 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
649 if (Acc < MinAcc) {
650 //
651 // If the PD entry has the smallest access record value,
652 // save the Page address to be released
653 //
654 MinAcc = Acc;
655 MinPml5 = Pml5Index;
656 MinPml4 = Pml4Index;
657 MinPdpt = PdptIndex;
658 MinPdt = PdtIndex;
659 ReleasePageAddress = Pdt + PdtIndex;
660 }
661 }
662 }
663 }
664 if (!PDPTEIgnore) {
665 //
666 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
667 // it should only has the entries point to 2 MByte Pages
668 //
669 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
670 Pml5Index != PFAddressPml5Index) {
671 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
672 if (Acc < MinAcc) {
673 //
674 // If the PDPT entry has the smallest access record value,
675 // save the Page address to be released
676 //
677 MinAcc = Acc;
678 MinPml5 = Pml5Index;
679 MinPml4 = Pml4Index;
680 MinPdpt = PdptIndex;
681 MinPdt = (UINTN)-1;
682 ReleasePageAddress = Pdpt + PdptIndex;
683 }
684 }
685 }
686 }
687 }
688 if (!PML4EIgnore) {
689 //
690 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
691 // it should only has the entries point to 1 GByte Pages
692 //
693 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
694 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
695 if (Acc < MinAcc) {
696 //
697 // If the PML4 entry has the smallest access record value,
698 // save the Page address to be released
699 //
700 MinAcc = Acc;
701 MinPml5 = Pml5Index;
702 MinPml4 = Pml4Index;
703 MinPdpt = (UINTN)-1;
704 MinPdt = (UINTN)-1;
705 ReleasePageAddress = Pml4 + Pml4Index;
706 }
707 }
708 }
709 }
710 }
711 //
712 // Make sure one PML4/PDPT/PD entry is selected
713 //
714 ASSERT (MinAcc != (UINT64)-1);
715
716 //
717 // Secondly, insert the page pointed by this entry into page pool and clear this entry
718 //
719 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
720 *ReleasePageAddress = 0;
721
722 //
723 // Lastly, check this entry's upper entries if need to be inserted into page pool
724 // or not
725 //
726 while (TRUE) {
727 if (MinPdt != (UINTN)-1) {
728 //
729 // If 4 KByte Page Table is released, check the PDPT entry
730 //
731 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
732 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
733 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
734 if (SubEntriesNum == 0 &&
735 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
736 //
737 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
738 // clear the Page directory entry
739 //
740 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
741 Pdpt[MinPdpt] = 0;
742 //
743 // Go on checking the PML4 table
744 //
745 MinPdt = (UINTN)-1;
746 continue;
747 }
748 //
749 // Update the sub-entries filed in PDPT entry and exit
750 //
751 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
752 break;
753 }
754 if (MinPdpt != (UINTN)-1) {
755 //
756 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
757 //
758 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
759 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
760 //
761 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
762 // clear the Page directory entry
763 //
764 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
765 Pml4[MinPml4] = 0;
766 MinPdpt = (UINTN)-1;
767 continue;
768 }
769 //
770 // Update the sub-entries filed in PML4 entry and exit
771 //
772 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
773 break;
774 }
775 //
776 // PLM4 table has been released before, exit it
777 //
778 break;
779 }
780 }
781
782 /**
783 Allocate free Page for PageFault handler use.
784
785 @return Page address.
786
787 **/
788 UINT64
789 AllocPage (
790 VOID
791 )
792 {
793 UINT64 RetVal;
794
795 if (IsListEmpty (&mPagePool)) {
796 //
797 // If page pool is empty, reclaim the used pages and insert one into page pool
798 //
799 ReclaimPages ();
800 }
801
802 //
803 // Get one free page and remove it from page pool
804 //
805 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
806 RemoveEntryList (mPagePool.ForwardLink);
807 //
808 // Clean this page and return
809 //
810 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
811 return RetVal;
812 }
813
814 /**
815 Page Fault handler for SMM use.
816
817 **/
818 VOID
819 SmiDefaultPFHandler (
820 VOID
821 )
822 {
823 UINT64 *PageTable;
824 UINT64 *PageTableTop;
825 UINT64 PFAddress;
826 UINTN StartBit;
827 UINTN EndBit;
828 UINT64 PTIndex;
829 UINTN Index;
830 SMM_PAGE_SIZE_TYPE PageSize;
831 UINTN NumOfPages;
832 UINTN PageAttribute;
833 EFI_STATUS Status;
834 UINT64 *UpperEntry;
835 BOOLEAN Enable5LevelPaging;
836 IA32_CR4 Cr4;
837
838 //
839 // Set default SMM page attribute
840 //
841 PageSize = SmmPageSize2M;
842 NumOfPages = 1;
843 PageAttribute = 0;
844
845 EndBit = 0;
846 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
847 PFAddress = AsmReadCr2 ();
848
849 Cr4.UintN = AsmReadCr4 ();
850 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
851
852 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
853 //
854 // If platform not support page table attribute, set default SMM page attribute
855 //
856 if (Status != EFI_SUCCESS) {
857 PageSize = SmmPageSize2M;
858 NumOfPages = 1;
859 PageAttribute = 0;
860 }
861 if (PageSize >= MaxSmmPageSizeType) {
862 PageSize = SmmPageSize2M;
863 }
864 if (NumOfPages > 512) {
865 NumOfPages = 512;
866 }
867
868 switch (PageSize) {
869 case SmmPageSize4K:
870 //
871 // BIT12 to BIT20 is Page Table index
872 //
873 EndBit = 12;
874 break;
875 case SmmPageSize2M:
876 //
877 // BIT21 to BIT29 is Page Directory index
878 //
879 EndBit = 21;
880 PageAttribute |= (UINTN)IA32_PG_PS;
881 break;
882 case SmmPageSize1G:
883 if (!m1GPageTableSupport) {
884 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
885 ASSERT (FALSE);
886 }
887 //
888 // BIT30 to BIT38 is Page Directory Pointer Table index
889 //
890 EndBit = 30;
891 PageAttribute |= (UINTN)IA32_PG_PS;
892 break;
893 default:
894 ASSERT (FALSE);
895 }
896
897 //
898 // If execute-disable is enabled, set NX bit
899 //
900 if (mXdEnabled) {
901 PageAttribute |= IA32_PG_NX;
902 }
903
904 for (Index = 0; Index < NumOfPages; Index++) {
905 PageTable = PageTableTop;
906 UpperEntry = NULL;
907 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
908 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
909 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
910 //
911 // If the entry is not present, allocate one page from page pool for it
912 //
913 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
914 } else {
915 //
916 // Save the upper entry address
917 //
918 UpperEntry = PageTable + PTIndex;
919 }
920 //
921 // BIT9 to BIT11 of entry is used to save access record,
922 // initialize value is 7
923 //
924 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
925 SetAccNum (PageTable + PTIndex, 7);
926 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
927 }
928
929 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
930 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
931 //
932 // Check if the entry has already existed, this issue may occur when the different
933 // size page entries created under the same entry
934 //
935 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
936 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
937 ASSERT (FALSE);
938 }
939 //
940 // Fill the new entry
941 //
942 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
943 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
944 if (UpperEntry != NULL) {
945 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
946 }
947 //
948 // Get the next page address if we need to create more page tables
949 //
950 PFAddress += (1ull << EndBit);
951 }
952 }
953
954 /**
955 ThePage Fault handler wrapper for SMM use.
956
957 @param InterruptType Defines the type of interrupt or exception that
958 occurred on the processor.This parameter is processor architecture specific.
959 @param SystemContext A pointer to the processor context when
960 the interrupt occurred on the processor.
961 **/
962 VOID
963 EFIAPI
964 SmiPFHandler (
965 IN EFI_EXCEPTION_TYPE InterruptType,
966 IN EFI_SYSTEM_CONTEXT SystemContext
967 )
968 {
969 UINTN PFAddress;
970 UINTN GuardPageAddress;
971 UINTN CpuIndex;
972
973 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
974
975 AcquireSpinLock (mPFLock);
976
977 PFAddress = AsmReadCr2 ();
978
979 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
980 DumpCpuContext (InterruptType, SystemContext);
981 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
982 CpuDeadLoop ();
983 goto Exit;
984 }
985
986 //
987 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
988 // or SMM page protection violation.
989 //
990 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
991 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
992 DumpCpuContext (InterruptType, SystemContext);
993 CpuIndex = GetCpuIndex ();
994 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
995 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
996 (PFAddress >= GuardPageAddress) &&
997 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
998 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
999 } else {
1000 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1001 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1002 DEBUG_CODE (
1003 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1004 );
1005 } else {
1006 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1007 DEBUG_CODE (
1008 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1009 );
1010 }
1011
1012 if (HEAP_GUARD_NONSTOP_MODE) {
1013 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1014 goto Exit;
1015 }
1016 }
1017 CpuDeadLoop ();
1018 goto Exit;
1019 }
1020
1021 //
1022 // If a page fault occurs in non-SMRAM range.
1023 //
1024 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1025 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1026 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1027 DumpCpuContext (InterruptType, SystemContext);
1028 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1029 DEBUG_CODE (
1030 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1031 );
1032 CpuDeadLoop ();
1033 goto Exit;
1034 }
1035
1036 //
1037 // If NULL pointer was just accessed
1038 //
1039 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1040 (PFAddress < EFI_PAGE_SIZE)) {
1041 DumpCpuContext (InterruptType, SystemContext);
1042 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1043 DEBUG_CODE (
1044 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1045 );
1046
1047 if (NULL_DETECTION_NONSTOP_MODE) {
1048 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1049 goto Exit;
1050 }
1051
1052 CpuDeadLoop ();
1053 goto Exit;
1054 }
1055
1056 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1057 DumpCpuContext (InterruptType, SystemContext);
1058 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1059 DEBUG_CODE (
1060 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1061 );
1062 CpuDeadLoop ();
1063 goto Exit;
1064 }
1065 }
1066
1067 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1068 SmmProfilePFHandler (
1069 SystemContext.SystemContextX64->Rip,
1070 SystemContext.SystemContextX64->ExceptionData
1071 );
1072 } else {
1073 SmiDefaultPFHandler ();
1074 }
1075
1076 Exit:
1077 ReleaseSpinLock (mPFLock);
1078 }
1079
1080 /**
1081 This function sets memory attribute for page table.
1082 **/
1083 VOID
1084 SetPageTableAttributes (
1085 VOID
1086 )
1087 {
1088 UINTN Index2;
1089 UINTN Index3;
1090 UINTN Index4;
1091 UINTN Index5;
1092 UINT64 *L1PageTable;
1093 UINT64 *L2PageTable;
1094 UINT64 *L3PageTable;
1095 UINT64 *L4PageTable;
1096 UINT64 *L5PageTable;
1097 BOOLEAN IsSplitted;
1098 BOOLEAN PageTableSplitted;
1099 BOOLEAN CetEnabled;
1100 IA32_CR4 Cr4;
1101 BOOLEAN Enable5LevelPaging;
1102
1103 Cr4.UintN = AsmReadCr4 ();
1104 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
1105
1106 //
1107 // Don't mark page table memory as read-only if
1108 // - no restriction on access to non-SMRAM memory; or
1109 // - SMM heap guard feature enabled; or
1110 // BIT2: SMM page guard enabled
1111 // BIT3: SMM pool guard enabled
1112 // - SMM profile feature enabled
1113 //
1114 if (!mCpuSmmRestrictedMemoryAccess ||
1115 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1116 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1117 //
1118 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1119 //
1120 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
1121 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1122
1123 //
1124 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1125 //
1126 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1127 return ;
1128 }
1129
1130 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1131
1132 //
1133 // Disable write protection, because we need mark page table to be write protected.
1134 // We need *write* page table memory, to mark itself to be *read only*.
1135 //
1136 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1137 if (CetEnabled) {
1138 //
1139 // CET must be disabled if WP is disabled.
1140 //
1141 DisableCet();
1142 }
1143 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1144
1145 do {
1146 DEBUG ((DEBUG_INFO, "Start...\n"));
1147 PageTableSplitted = FALSE;
1148 L5PageTable = NULL;
1149 if (Enable5LevelPaging) {
1150 L5PageTable = (UINT64 *)GetPageTableBase ();
1151 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1152 PageTableSplitted = (PageTableSplitted || IsSplitted);
1153 }
1154
1155 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1156 if (Enable5LevelPaging) {
1157 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1158 if (L4PageTable == NULL) {
1159 continue;
1160 }
1161 } else {
1162 L4PageTable = (UINT64 *)GetPageTableBase ();
1163 }
1164 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1165 PageTableSplitted = (PageTableSplitted || IsSplitted);
1166
1167 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1168 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1169 if (L3PageTable == NULL) {
1170 continue;
1171 }
1172
1173 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1174 PageTableSplitted = (PageTableSplitted || IsSplitted);
1175
1176 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1177 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1178 // 1G
1179 continue;
1180 }
1181 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1182 if (L2PageTable == NULL) {
1183 continue;
1184 }
1185
1186 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1187 PageTableSplitted = (PageTableSplitted || IsSplitted);
1188
1189 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1190 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1191 // 2M
1192 continue;
1193 }
1194 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1195 if (L1PageTable == NULL) {
1196 continue;
1197 }
1198 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1199 PageTableSplitted = (PageTableSplitted || IsSplitted);
1200 }
1201 }
1202 }
1203 }
1204 } while (PageTableSplitted);
1205
1206 //
1207 // Enable write protection, after page table updated.
1208 //
1209 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1210 if (CetEnabled) {
1211 //
1212 // re-enable CET.
1213 //
1214 EnableCet();
1215 }
1216
1217 return ;
1218 }
1219
1220 /**
1221 This function reads CR2 register when on-demand paging is enabled.
1222
1223 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1224 **/
1225 VOID
1226 SaveCr2 (
1227 OUT UINTN *Cr2
1228 )
1229 {
1230 if (!mCpuSmmRestrictedMemoryAccess) {
1231 //
1232 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1233 //
1234 *Cr2 = AsmReadCr2 ();
1235 }
1236 }
1237
1238 /**
1239 This function restores CR2 register when on-demand paging is enabled.
1240
1241 @param[in] Cr2 Value to write into CR2 register.
1242 **/
1243 VOID
1244 RestoreCr2 (
1245 IN UINTN Cr2
1246 )
1247 {
1248 if (!mCpuSmmRestrictedMemoryAccess) {
1249 //
1250 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1251 //
1252 AsmWriteCr2 (Cr2);
1253 }
1254 }