]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 extern UINTN mSmmShadowStackSize;
17
18 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
19 BOOLEAN m1GPageTableSupport = FALSE;
20 BOOLEAN mCpuSmmRestrictedMemoryAccess;
21 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
22
23 /**
24 Check if 1-GByte pages is supported by processor or not.
25
26 @retval TRUE 1-GByte pages is supported.
27 @retval FALSE 1-GByte pages is not supported.
28
29 **/
30 BOOLEAN
31 Is1GPageSupport (
32 VOID
33 )
34 {
35 UINT32 RegEax;
36 UINT32 RegEdx;
37
38 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
39 if (RegEax >= 0x80000001) {
40 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
41 if ((RegEdx & BIT26) != 0) {
42 return TRUE;
43 }
44 }
45
46 return FALSE;
47 }
48
49 /**
50 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
51 the max physical address bits is bigger than 48. Because 4-level paging can support
52 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
53 with max physical address bits <= 48.
54
55 @retval TRUE 5-level paging enabling is needed.
56 @retval FALSE 5-level paging enabling is not needed.
57 **/
58 BOOLEAN
59 Is5LevelPagingNeeded (
60 VOID
61 )
62 {
63 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
64 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
65 UINT32 MaxExtendedFunctionId;
66
67 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
68 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
69 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
70 } else {
71 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
72 }
73
74 AsmCpuidEx (
75 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
76 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
77 NULL,
78 NULL,
79 &ExtFeatureEcx.Uint32,
80 NULL
81 );
82 DEBUG ((
83 DEBUG_INFO,
84 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
85 VirPhyAddressSize.Bits.PhysicalAddressBits,
86 ExtFeatureEcx.Bits.FiveLevelPage
87 ));
88
89 if ((VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) &&
90 (ExtFeatureEcx.Bits.FiveLevelPage == 1))
91 {
92 return TRUE;
93 } else {
94 return FALSE;
95 }
96 }
97
98 /**
99 Set sub-entries number in entry.
100
101 @param[in, out] Entry Pointer to entry
102 @param[in] SubEntryNum Sub-entries number based on 0:
103 0 means there is 1 sub-entry under this entry
104 0x1ff means there is 512 sub-entries under this entry
105
106 **/
107 VOID
108 SetSubEntriesNum (
109 IN OUT UINT64 *Entry,
110 IN UINT64 SubEntryNum
111 )
112 {
113 //
114 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
115 //
116 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
117 }
118
119 /**
120 Return sub-entries number in entry.
121
122 @param[in] Entry Pointer to entry
123
124 @return Sub-entries number based on 0:
125 0 means there is 1 sub-entry under this entry
126 0x1ff means there is 512 sub-entries under this entry
127 **/
128 UINT64
129 GetSubEntriesNum (
130 IN UINT64 *Entry
131 )
132 {
133 //
134 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
135 //
136 return BitFieldRead64 (*Entry, 52, 60);
137 }
138
139 /**
140 Calculate the maximum support address.
141
142 @return the maximum support address.
143 **/
144 UINT8
145 CalculateMaximumSupportAddress (
146 VOID
147 )
148 {
149 UINT32 RegEax;
150 UINT8 PhysicalAddressBits;
151 VOID *Hob;
152
153 //
154 // Get physical address bits supported.
155 //
156 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
157 if (Hob != NULL) {
158 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
159 } else {
160 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
161 if (RegEax >= 0x80000008) {
162 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
163 PhysicalAddressBits = (UINT8)RegEax;
164 } else {
165 PhysicalAddressBits = 36;
166 }
167 }
168
169 return PhysicalAddressBits;
170 }
171
172 /**
173 Set static page table.
174
175 @param[in] PageTable Address of page table.
176 @param[in] PhysicalAddressBits The maximum physical address bits supported.
177 **/
178 VOID
179 SetStaticPageTable (
180 IN UINTN PageTable,
181 IN UINT8 PhysicalAddressBits
182 )
183 {
184 UINT64 PageAddress;
185 UINTN NumberOfPml5EntriesNeeded;
186 UINTN NumberOfPml4EntriesNeeded;
187 UINTN NumberOfPdpEntriesNeeded;
188 UINTN IndexOfPml5Entries;
189 UINTN IndexOfPml4Entries;
190 UINTN IndexOfPdpEntries;
191 UINTN IndexOfPageDirectoryEntries;
192 UINT64 *PageMapLevel5Entry;
193 UINT64 *PageMapLevel4Entry;
194 UINT64 *PageMap;
195 UINT64 *PageDirectoryPointerEntry;
196 UINT64 *PageDirectory1GEntry;
197 UINT64 *PageDirectoryEntry;
198
199 //
200 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
201 // when 5-Level Paging is disabled.
202 //
203 ASSERT (PhysicalAddressBits <= 52);
204 if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
205 PhysicalAddressBits = 48;
206 }
207
208 NumberOfPml5EntriesNeeded = 1;
209 if (PhysicalAddressBits > 48) {
210 NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);
211 PhysicalAddressBits = 48;
212 }
213
214 NumberOfPml4EntriesNeeded = 1;
215 if (PhysicalAddressBits > 39) {
216 NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);
217 PhysicalAddressBits = 39;
218 }
219
220 NumberOfPdpEntriesNeeded = 1;
221 ASSERT (PhysicalAddressBits > 30);
222 NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);
223
224 //
225 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
226 //
227 PageMap = (VOID *)PageTable;
228
229 PageMapLevel4Entry = PageMap;
230 PageMapLevel5Entry = NULL;
231 if (m5LevelPagingNeeded) {
232 //
233 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
234 //
235 PageMapLevel5Entry = PageMap;
236 }
237
238 PageAddress = 0;
239
240 for ( IndexOfPml5Entries = 0
241 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
242 ; IndexOfPml5Entries++, PageMapLevel5Entry++)
243 {
244 //
245 // Each PML5 entry points to a page of PML4 entires.
246 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
247 // When 5-Level Paging is disabled, below allocation happens only once.
248 //
249 if (m5LevelPagingNeeded) {
250 PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
251 if (PageMapLevel4Entry == NULL) {
252 PageMapLevel4Entry = AllocatePageTableMemory (1);
253 ASSERT (PageMapLevel4Entry != NULL);
254 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
255
256 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
257 }
258 }
259
260 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
261 //
262 // Each PML4 entry points to a page of Page Directory Pointer entries.
263 //
264 PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
265 if (PageDirectoryPointerEntry == NULL) {
266 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
267 ASSERT (PageDirectoryPointerEntry != NULL);
268 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
269
270 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
271 }
272
273 if (m1GPageTableSupport) {
274 PageDirectory1GEntry = PageDirectoryPointerEntry;
275 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
276 if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
277 //
278 // Skip the < 4G entries
279 //
280 continue;
281 }
282
283 //
284 // Fill in the Page Directory entries
285 //
286 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
287 }
288 } else {
289 PageAddress = BASE_4GB;
290 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
291 if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
292 //
293 // Skip the < 4G entries
294 //
295 continue;
296 }
297
298 //
299 // Each Directory Pointer entries points to a page of Page Directory entires.
300 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
301 //
302 PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
303 if (PageDirectoryEntry == NULL) {
304 PageDirectoryEntry = AllocatePageTableMemory (1);
305 ASSERT (PageDirectoryEntry != NULL);
306 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
307
308 //
309 // Fill in a Page Directory Pointer Entries
310 //
311 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
312 }
313
314 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
315 //
316 // Fill in the Page Directory entries
317 //
318 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
319 }
320 }
321 }
322 }
323 }
324 }
325
326 /**
327 Create PageTable for SMM use.
328
329 @return The address of PML4 (to set CR3).
330
331 **/
332 UINT32
333 SmmInitPageTable (
334 VOID
335 )
336 {
337 EFI_PHYSICAL_ADDRESS Pages;
338 UINT64 *PTEntry;
339 LIST_ENTRY *FreePage;
340 UINTN Index;
341 UINTN PageFaultHandlerHookAddress;
342 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
343 EFI_STATUS Status;
344 UINT64 *Pml4Entry;
345 UINT64 *Pml5Entry;
346
347 //
348 // Initialize spin lock
349 //
350 InitializeSpinLock (mPFLock);
351
352 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
353 m1GPageTableSupport = Is1GPageSupport ();
354 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
355 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
356 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
357 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
358 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
359 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
360 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
361 //
362 // Generate PAE page table for the first 4GB memory space
363 //
364 Pages = Gen4GPageTable (FALSE);
365
366 //
367 // Set IA32_PG_PMNT bit to mask this entry
368 //
369 PTEntry = (UINT64 *)(UINTN)Pages;
370 for (Index = 0; Index < 4; Index++) {
371 PTEntry[Index] |= IA32_PG_PMNT;
372 }
373
374 //
375 // Fill Page-Table-Level4 (PML4) entry
376 //
377 Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
378 ASSERT (Pml4Entry != NULL);
379 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
380 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
381
382 //
383 // Set sub-entries number
384 //
385 SetSubEntriesNum (Pml4Entry, 3);
386 PTEntry = Pml4Entry;
387
388 if (m5LevelPagingNeeded) {
389 //
390 // Fill PML5 entry
391 //
392 Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
393 ASSERT (Pml5Entry != NULL);
394 *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
395 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
396 //
397 // Set sub-entries number
398 //
399 SetSubEntriesNum (Pml5Entry, 1);
400 PTEntry = Pml5Entry;
401 }
402
403 if (mCpuSmmRestrictedMemoryAccess) {
404 //
405 // When access to non-SMRAM memory is restricted, create page table
406 // that covers all memory space.
407 //
408 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
409 } else {
410 //
411 // Add pages to page pool
412 //
413 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);
414 ASSERT (FreePage != NULL);
415 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
416 InsertTailList (&mPagePool, FreePage);
417 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
418 }
419 }
420
421 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
422 HEAP_GUARD_NONSTOP_MODE ||
423 NULL_DETECTION_NONSTOP_MODE)
424 {
425 //
426 // Set own Page Fault entry instead of the default one, because SMM Profile
427 // feature depends on IRET instruction to do Single Step
428 //
429 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
430 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
431 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
432 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
433 IdtEntry->Bits.Reserved_0 = 0;
434 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
435 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
436 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
437 IdtEntry->Bits.Reserved_1 = 0;
438 } else {
439 //
440 // Register Smm Page Fault Handler
441 //
442 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
443 ASSERT_EFI_ERROR (Status);
444 }
445
446 //
447 // Additional SMM IDT initialization for SMM stack guard
448 //
449 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
450 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));
451 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
452 }
453
454 //
455 // Additional SMM IDT initialization for SMM CET shadow stack
456 //
457 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
458 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));
459 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
460 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);
461 }
462
463 //
464 // Return the address of PML4/PML5 (to set CR3)
465 //
466 return (UINT32)(UINTN)PTEntry;
467 }
468
469 /**
470 Set access record in entry.
471
472 @param[in, out] Entry Pointer to entry
473 @param[in] Acc Access record value
474
475 **/
476 VOID
477 SetAccNum (
478 IN OUT UINT64 *Entry,
479 IN UINT64 Acc
480 )
481 {
482 //
483 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
484 //
485 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
486 }
487
488 /**
489 Return access record in entry.
490
491 @param[in] Entry Pointer to entry
492
493 @return Access record value.
494
495 **/
496 UINT64
497 GetAccNum (
498 IN UINT64 *Entry
499 )
500 {
501 //
502 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
503 //
504 return BitFieldRead64 (*Entry, 9, 11);
505 }
506
507 /**
508 Return and update the access record in entry.
509
510 @param[in, out] Entry Pointer to entry
511
512 @return Access record value.
513
514 **/
515 UINT64
516 GetAndUpdateAccNum (
517 IN OUT UINT64 *Entry
518 )
519 {
520 UINT64 Acc;
521
522 Acc = GetAccNum (Entry);
523 if ((*Entry & IA32_PG_A) != 0) {
524 //
525 // If this entry has been accessed, clear access flag in Entry and update access record
526 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
527 //
528 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
529 SetAccNum (Entry, 0x7);
530 return (0x7 + ACC_MAX_BIT);
531 } else {
532 if (Acc != 0) {
533 //
534 // If the access record is not the smallest value 0, minus 1 and update the access record field
535 //
536 SetAccNum (Entry, Acc - 1);
537 }
538 }
539
540 return Acc;
541 }
542
543 /**
544 Reclaim free pages for PageFault handler.
545
546 Search the whole entries tree to find the leaf entry that has the smallest
547 access record value. Insert the page pointed by this leaf entry into the
548 page pool. And check its upper entries if need to be inserted into the page
549 pool or not.
550
551 **/
552 VOID
553 ReclaimPages (
554 VOID
555 )
556 {
557 UINT64 Pml5Entry;
558 UINT64 *Pml5;
559 UINT64 *Pml4;
560 UINT64 *Pdpt;
561 UINT64 *Pdt;
562 UINTN Pml5Index;
563 UINTN Pml4Index;
564 UINTN PdptIndex;
565 UINTN PdtIndex;
566 UINTN MinPml5;
567 UINTN MinPml4;
568 UINTN MinPdpt;
569 UINTN MinPdt;
570 UINT64 MinAcc;
571 UINT64 Acc;
572 UINT64 SubEntriesNum;
573 BOOLEAN PML4EIgnore;
574 BOOLEAN PDPTEIgnore;
575 UINT64 *ReleasePageAddress;
576 IA32_CR4 Cr4;
577 BOOLEAN Enable5LevelPaging;
578 UINT64 PFAddress;
579 UINT64 PFAddressPml5Index;
580 UINT64 PFAddressPml4Index;
581 UINT64 PFAddressPdptIndex;
582 UINT64 PFAddressPdtIndex;
583
584 Pml4 = NULL;
585 Pdpt = NULL;
586 Pdt = NULL;
587 MinAcc = (UINT64)-1;
588 MinPml4 = (UINTN)-1;
589 MinPml5 = (UINTN)-1;
590 MinPdpt = (UINTN)-1;
591 MinPdt = (UINTN)-1;
592 Acc = 0;
593 ReleasePageAddress = 0;
594 PFAddress = AsmReadCr2 ();
595 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
596 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
597 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
598 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
599
600 Cr4.UintN = AsmReadCr4 ();
601 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
602 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);
603
604 if (!Enable5LevelPaging) {
605 //
606 // Create one fake PML5 entry for 4-Level Paging
607 // so that the page table parsing logic only handles 5-Level page structure.
608 //
609 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;
610 Pml5 = &Pml5Entry;
611 }
612
613 //
614 // First, find the leaf entry has the smallest access record value
615 //
616 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
617 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {
618 //
619 // If the PML5 entry is not present or is masked, skip it
620 //
621 continue;
622 }
623
624 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);
625 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
626 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {
627 //
628 // If the PML4 entry is not present or is masked, skip it
629 //
630 continue;
631 }
632
633 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
634 PML4EIgnore = FALSE;
635 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
636 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {
637 //
638 // If the PDPT entry is not present or is masked, skip it
639 //
640 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
641 //
642 // If the PDPT entry is masked, we will ignore checking the PML4 entry
643 //
644 PML4EIgnore = TRUE;
645 }
646
647 continue;
648 }
649
650 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
651 //
652 // It's not 1-GByte pages entry, it should be a PDPT entry,
653 // we will not check PML4 entry more
654 //
655 PML4EIgnore = TRUE;
656 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
657 PDPTEIgnore = FALSE;
658 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {
659 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {
660 //
661 // If the PD entry is not present or is masked, skip it
662 //
663 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
664 //
665 // If the PD entry is masked, we will not PDPT entry more
666 //
667 PDPTEIgnore = TRUE;
668 }
669
670 continue;
671 }
672
673 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
674 //
675 // It's not 2 MByte page table entry, it should be PD entry
676 // we will find the entry has the smallest access record value
677 //
678 PDPTEIgnore = TRUE;
679 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||
680 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))
681 {
682 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
683 if (Acc < MinAcc) {
684 //
685 // If the PD entry has the smallest access record value,
686 // save the Page address to be released
687 //
688 MinAcc = Acc;
689 MinPml5 = Pml5Index;
690 MinPml4 = Pml4Index;
691 MinPdpt = PdptIndex;
692 MinPdt = PdtIndex;
693 ReleasePageAddress = Pdt + PdtIndex;
694 }
695 }
696 }
697 }
698
699 if (!PDPTEIgnore) {
700 //
701 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
702 // it should only has the entries point to 2 MByte Pages
703 //
704 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||
705 (Pml5Index != PFAddressPml5Index))
706 {
707 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
708 if (Acc < MinAcc) {
709 //
710 // If the PDPT entry has the smallest access record value,
711 // save the Page address to be released
712 //
713 MinAcc = Acc;
714 MinPml5 = Pml5Index;
715 MinPml4 = Pml4Index;
716 MinPdpt = PdptIndex;
717 MinPdt = (UINTN)-1;
718 ReleasePageAddress = Pdpt + PdptIndex;
719 }
720 }
721 }
722 }
723 }
724
725 if (!PML4EIgnore) {
726 //
727 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
728 // it should only has the entries point to 1 GByte Pages
729 //
730 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {
731 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
732 if (Acc < MinAcc) {
733 //
734 // If the PML4 entry has the smallest access record value,
735 // save the Page address to be released
736 //
737 MinAcc = Acc;
738 MinPml5 = Pml5Index;
739 MinPml4 = Pml4Index;
740 MinPdpt = (UINTN)-1;
741 MinPdt = (UINTN)-1;
742 ReleasePageAddress = Pml4 + Pml4Index;
743 }
744 }
745 }
746 }
747 }
748
749 //
750 // Make sure one PML4/PDPT/PD entry is selected
751 //
752 ASSERT (MinAcc != (UINT64)-1);
753
754 //
755 // Secondly, insert the page pointed by this entry into page pool and clear this entry
756 //
757 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
758 *ReleasePageAddress = 0;
759
760 //
761 // Lastly, check this entry's upper entries if need to be inserted into page pool
762 // or not
763 //
764 while (TRUE) {
765 if (MinPdt != (UINTN)-1) {
766 //
767 // If 4 KByte Page Table is released, check the PDPT entry
768 //
769 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);
770 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
771 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);
772 if ((SubEntriesNum == 0) &&
773 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))
774 {
775 //
776 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
777 // clear the Page directory entry
778 //
779 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
780 Pdpt[MinPdpt] = 0;
781 //
782 // Go on checking the PML4 table
783 //
784 MinPdt = (UINTN)-1;
785 continue;
786 }
787
788 //
789 // Update the sub-entries filed in PDPT entry and exit
790 //
791 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
792 break;
793 }
794
795 if (MinPdpt != (UINTN)-1) {
796 //
797 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
798 //
799 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
800 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {
801 //
802 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
803 // clear the Page directory entry
804 //
805 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
806 Pml4[MinPml4] = 0;
807 MinPdpt = (UINTN)-1;
808 continue;
809 }
810
811 //
812 // Update the sub-entries filed in PML4 entry and exit
813 //
814 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
815 break;
816 }
817
818 //
819 // PLM4 table has been released before, exit it
820 //
821 break;
822 }
823 }
824
825 /**
826 Allocate free Page for PageFault handler use.
827
828 @return Page address.
829
830 **/
831 UINT64
832 AllocPage (
833 VOID
834 )
835 {
836 UINT64 RetVal;
837
838 if (IsListEmpty (&mPagePool)) {
839 //
840 // If page pool is empty, reclaim the used pages and insert one into page pool
841 //
842 ReclaimPages ();
843 }
844
845 //
846 // Get one free page and remove it from page pool
847 //
848 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
849 RemoveEntryList (mPagePool.ForwardLink);
850 //
851 // Clean this page and return
852 //
853 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);
854 return RetVal;
855 }
856
857 /**
858 Page Fault handler for SMM use.
859
860 **/
861 VOID
862 SmiDefaultPFHandler (
863 VOID
864 )
865 {
866 UINT64 *PageTable;
867 UINT64 *PageTableTop;
868 UINT64 PFAddress;
869 UINTN StartBit;
870 UINTN EndBit;
871 UINT64 PTIndex;
872 UINTN Index;
873 SMM_PAGE_SIZE_TYPE PageSize;
874 UINTN NumOfPages;
875 UINTN PageAttribute;
876 EFI_STATUS Status;
877 UINT64 *UpperEntry;
878 BOOLEAN Enable5LevelPaging;
879 IA32_CR4 Cr4;
880
881 //
882 // Set default SMM page attribute
883 //
884 PageSize = SmmPageSize2M;
885 NumOfPages = 1;
886 PageAttribute = 0;
887
888 EndBit = 0;
889 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
890 PFAddress = AsmReadCr2 ();
891
892 Cr4.UintN = AsmReadCr4 ();
893 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
894
895 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
896 //
897 // If platform not support page table attribute, set default SMM page attribute
898 //
899 if (Status != EFI_SUCCESS) {
900 PageSize = SmmPageSize2M;
901 NumOfPages = 1;
902 PageAttribute = 0;
903 }
904
905 if (PageSize >= MaxSmmPageSizeType) {
906 PageSize = SmmPageSize2M;
907 }
908
909 if (NumOfPages > 512) {
910 NumOfPages = 512;
911 }
912
913 switch (PageSize) {
914 case SmmPageSize4K:
915 //
916 // BIT12 to BIT20 is Page Table index
917 //
918 EndBit = 12;
919 break;
920 case SmmPageSize2M:
921 //
922 // BIT21 to BIT29 is Page Directory index
923 //
924 EndBit = 21;
925 PageAttribute |= (UINTN)IA32_PG_PS;
926 break;
927 case SmmPageSize1G:
928 if (!m1GPageTableSupport) {
929 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
930 ASSERT (FALSE);
931 }
932
933 //
934 // BIT30 to BIT38 is Page Directory Pointer Table index
935 //
936 EndBit = 30;
937 PageAttribute |= (UINTN)IA32_PG_PS;
938 break;
939 default:
940 ASSERT (FALSE);
941 }
942
943 //
944 // If execute-disable is enabled, set NX bit
945 //
946 if (mXdEnabled) {
947 PageAttribute |= IA32_PG_NX;
948 }
949
950 for (Index = 0; Index < NumOfPages; Index++) {
951 PageTable = PageTableTop;
952 UpperEntry = NULL;
953 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
954 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
955 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
956 //
957 // If the entry is not present, allocate one page from page pool for it
958 //
959 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
960 } else {
961 //
962 // Save the upper entry address
963 //
964 UpperEntry = PageTable + PTIndex;
965 }
966
967 //
968 // BIT9 to BIT11 of entry is used to save access record,
969 // initialize value is 7
970 //
971 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
972 SetAccNum (PageTable + PTIndex, 7);
973 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
974 }
975
976 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
977 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
978 //
979 // Check if the entry has already existed, this issue may occur when the different
980 // size page entries created under the same entry
981 //
982 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
983 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
984 ASSERT (FALSE);
985 }
986
987 //
988 // Fill the new entry
989 //
990 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
991 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
992 if (UpperEntry != NULL) {
993 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
994 }
995
996 //
997 // Get the next page address if we need to create more page tables
998 //
999 PFAddress += (1ull << EndBit);
1000 }
1001 }
1002
1003 /**
1004 ThePage Fault handler wrapper for SMM use.
1005
1006 @param InterruptType Defines the type of interrupt or exception that
1007 occurred on the processor.This parameter is processor architecture specific.
1008 @param SystemContext A pointer to the processor context when
1009 the interrupt occurred on the processor.
1010 **/
1011 VOID
1012 EFIAPI
1013 SmiPFHandler (
1014 IN EFI_EXCEPTION_TYPE InterruptType,
1015 IN EFI_SYSTEM_CONTEXT SystemContext
1016 )
1017 {
1018 UINTN PFAddress;
1019 UINTN GuardPageAddress;
1020 UINTN ShadowStackGuardPageAddress;
1021 UINTN CpuIndex;
1022
1023 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
1024
1025 AcquireSpinLock (mPFLock);
1026
1027 PFAddress = AsmReadCr2 ();
1028
1029 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
1030 DumpCpuContext (InterruptType, SystemContext);
1031 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
1032 CpuDeadLoop ();
1033 goto Exit;
1034 }
1035
1036 //
1037 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
1038 // or SMM page protection violation.
1039 //
1040 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1041 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))
1042 {
1043 DumpCpuContext (InterruptType, SystemContext);
1044 CpuIndex = GetCpuIndex ();
1045 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1046 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1047 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1048 (PFAddress >= GuardPageAddress) &&
1049 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))
1050 {
1051 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1052 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1053 (mSmmShadowStackSize > 0) &&
1054 (PFAddress >= ShadowStackGuardPageAddress) &&
1055 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))
1056 {
1057 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
1058 } else {
1059 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1060 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1061 DEBUG_CODE (
1062 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1063 );
1064 } else {
1065 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1066 DEBUG_CODE (
1067 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1068 );
1069 }
1070
1071 if (HEAP_GUARD_NONSTOP_MODE) {
1072 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1073 goto Exit;
1074 }
1075 }
1076
1077 CpuDeadLoop ();
1078 goto Exit;
1079 }
1080
1081 //
1082 // If a page fault occurs in non-SMRAM range.
1083 //
1084 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1085 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))
1086 {
1087 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1088 DumpCpuContext (InterruptType, SystemContext);
1089 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1090 DEBUG_CODE (
1091 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1092 );
1093 CpuDeadLoop ();
1094 goto Exit;
1095 }
1096
1097 //
1098 // If NULL pointer was just accessed
1099 //
1100 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&
1101 (PFAddress < EFI_PAGE_SIZE))
1102 {
1103 DumpCpuContext (InterruptType, SystemContext);
1104 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1105 DEBUG_CODE (
1106 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1107 );
1108
1109 if (NULL_DETECTION_NONSTOP_MODE) {
1110 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1111 goto Exit;
1112 }
1113
1114 CpuDeadLoop ();
1115 goto Exit;
1116 }
1117
1118 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1119 DumpCpuContext (InterruptType, SystemContext);
1120 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1121 DEBUG_CODE (
1122 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1123 );
1124 CpuDeadLoop ();
1125 goto Exit;
1126 }
1127 }
1128
1129 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1130 SmmProfilePFHandler (
1131 SystemContext.SystemContextX64->Rip,
1132 SystemContext.SystemContextX64->ExceptionData
1133 );
1134 } else {
1135 SmiDefaultPFHandler ();
1136 }
1137
1138 Exit:
1139 ReleaseSpinLock (mPFLock);
1140 }
1141
1142 /**
1143 This function reads CR2 register when on-demand paging is enabled.
1144
1145 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1146 **/
1147 VOID
1148 SaveCr2 (
1149 OUT UINTN *Cr2
1150 )
1151 {
1152 if (!mCpuSmmRestrictedMemoryAccess) {
1153 //
1154 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1155 //
1156 *Cr2 = AsmReadCr2 ();
1157 }
1158 }
1159
1160 /**
1161 This function restores CR2 register when on-demand paging is enabled.
1162
1163 @param[in] Cr2 Value to write into CR2 register.
1164 **/
1165 VOID
1166 RestoreCr2 (
1167 IN UINTN Cr2
1168 )
1169 {
1170 if (!mCpuSmmRestrictedMemoryAccess) {
1171 //
1172 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1173 //
1174 AsmWriteCr2 (Cr2);
1175 }
1176 }
1177
1178 /**
1179 Return whether access to non-SMRAM is restricted.
1180
1181 @retval TRUE Access to non-SMRAM is restricted.
1182 @retval FALSE Access to non-SMRAM is not restricted.
1183 **/
1184 BOOLEAN
1185 IsRestrictedMemoryAccess (
1186 VOID
1187 )
1188 {
1189 return mCpuSmmRestrictedMemoryAccess;
1190 }