]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
c31160735a3790530058026df0a8009cc4be0d6d
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17 BOOLEAN m1GPageTableSupport = FALSE;
18 BOOLEAN mCpuSmmStaticPageTable;
19 BOOLEAN m5LevelPagingSupport;
20 X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;
21
22 /**
23 Disable CET.
24 **/
25 VOID
26 EFIAPI
27 DisableCet (
28 VOID
29 );
30
31 /**
32 Enable CET.
33 **/
34 VOID
35 EFIAPI
36 EnableCet (
37 VOID
38 );
39
40 /**
41 Check if 1-GByte pages is supported by processor or not.
42
43 @retval TRUE 1-GByte pages is supported.
44 @retval FALSE 1-GByte pages is not supported.
45
46 **/
47 BOOLEAN
48 Is1GPageSupport (
49 VOID
50 )
51 {
52 UINT32 RegEax;
53 UINT32 RegEdx;
54
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
56 if (RegEax >= 0x80000001) {
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
58 if ((RegEdx & BIT26) != 0) {
59 return TRUE;
60 }
61 }
62 return FALSE;
63 }
64
65 /**
66 Check if 5-level paging is supported by processor or not.
67
68 @retval TRUE 5-level paging is supported.
69 @retval FALSE 5-level paging is not supported.
70
71 **/
72 BOOLEAN
73 Is5LevelPagingSupport (
74 VOID
75 )
76 {
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
78
79 AsmCpuidEx (
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
82 NULL,
83 NULL,
84 &EcxFlags.Uint32,
85 NULL
86 );
87 return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
88 }
89
90 /**
91 Set sub-entries number in entry.
92
93 @param[in, out] Entry Pointer to entry
94 @param[in] SubEntryNum Sub-entries number based on 0:
95 0 means there is 1 sub-entry under this entry
96 0x1ff means there is 512 sub-entries under this entry
97
98 **/
99 VOID
100 SetSubEntriesNum (
101 IN OUT UINT64 *Entry,
102 IN UINT64 SubEntryNum
103 )
104 {
105 //
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
107 //
108 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
109 }
110
111 /**
112 Return sub-entries number in entry.
113
114 @param[in] Entry Pointer to entry
115
116 @return Sub-entries number based on 0:
117 0 means there is 1 sub-entry under this entry
118 0x1ff means there is 512 sub-entries under this entry
119 **/
120 UINT64
121 GetSubEntriesNum (
122 IN UINT64 *Entry
123 )
124 {
125 //
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
127 //
128 return BitFieldRead64 (*Entry, 52, 60);
129 }
130
131 /**
132 Calculate the maximum support address.
133
134 @return the maximum support address.
135 **/
136 UINT8
137 CalculateMaximumSupportAddress (
138 VOID
139 )
140 {
141 UINT32 RegEax;
142 UINT8 PhysicalAddressBits;
143 VOID *Hob;
144
145 //
146 // Get physical address bits supported.
147 //
148 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
149 if (Hob != NULL) {
150 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
151 } else {
152 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
153 if (RegEax >= 0x80000008) {
154 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
155 PhysicalAddressBits = (UINT8) RegEax;
156 } else {
157 PhysicalAddressBits = 36;
158 }
159 }
160 return PhysicalAddressBits;
161 }
162
163 /**
164 Set static page table.
165
166 @param[in] PageTable Address of page table.
167 **/
168 VOID
169 SetStaticPageTable (
170 IN UINTN PageTable
171 )
172 {
173 UINT64 PageAddress;
174 UINTN NumberOfPml5EntriesNeeded;
175 UINTN NumberOfPml4EntriesNeeded;
176 UINTN NumberOfPdpEntriesNeeded;
177 UINTN IndexOfPml5Entries;
178 UINTN IndexOfPml4Entries;
179 UINTN IndexOfPdpEntries;
180 UINTN IndexOfPageDirectoryEntries;
181 UINT64 *PageMapLevel5Entry;
182 UINT64 *PageMapLevel4Entry;
183 UINT64 *PageMap;
184 UINT64 *PageDirectoryPointerEntry;
185 UINT64 *PageDirectory1GEntry;
186 UINT64 *PageDirectoryEntry;
187
188 //
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
190 // when 5-Level Paging is disabled.
191 //
192 ASSERT (mPhysicalAddressBits <= 52);
193 if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
194 mPhysicalAddressBits = 48;
195 }
196
197 NumberOfPml5EntriesNeeded = 1;
198 if (mPhysicalAddressBits > 48) {
199 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);
200 mPhysicalAddressBits = 48;
201 }
202
203 NumberOfPml4EntriesNeeded = 1;
204 if (mPhysicalAddressBits > 39) {
205 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);
206 mPhysicalAddressBits = 39;
207 }
208
209 NumberOfPdpEntriesNeeded = 1;
210 ASSERT (mPhysicalAddressBits > 30);
211 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);
212
213 //
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
215 //
216 PageMap = (VOID *) PageTable;
217
218 PageMapLevel4Entry = PageMap;
219 PageMapLevel5Entry = NULL;
220 if (m5LevelPagingSupport) {
221 //
222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
223 //
224 PageMapLevel5Entry = PageMap;
225 }
226 PageAddress = 0;
227
228 for ( IndexOfPml5Entries = 0
229 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
230 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
231 //
232 // Each PML5 entry points to a page of PML4 entires.
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
234 // When 5-Level Paging is disabled, below allocation happens only once.
235 //
236 if (m5LevelPagingSupport) {
237 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
238 if (PageMapLevel4Entry == NULL) {
239 PageMapLevel4Entry = AllocatePageTableMemory (1);
240 ASSERT(PageMapLevel4Entry != NULL);
241 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
242
243 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
244 }
245 }
246
247 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
248 //
249 // Each PML4 entry points to a page of Page Directory Pointer entries.
250 //
251 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
252 if (PageDirectoryPointerEntry == NULL) {
253 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
254 ASSERT(PageDirectoryPointerEntry != NULL);
255 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
256
257 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
258 }
259
260 if (m1GPageTableSupport) {
261 PageDirectory1GEntry = PageDirectoryPointerEntry;
262 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
263 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
264 //
265 // Skip the < 4G entries
266 //
267 continue;
268 }
269 //
270 // Fill in the Page Directory entries
271 //
272 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
273 }
274 } else {
275 PageAddress = BASE_4GB;
276 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
277 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
278 //
279 // Skip the < 4G entries
280 //
281 continue;
282 }
283 //
284 // Each Directory Pointer entries points to a page of Page Directory entires.
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
286 //
287 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
288 if (PageDirectoryEntry == NULL) {
289 PageDirectoryEntry = AllocatePageTableMemory (1);
290 ASSERT(PageDirectoryEntry != NULL);
291 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
292
293 //
294 // Fill in a Page Directory Pointer Entries
295 //
296 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
297 }
298
299 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
300 //
301 // Fill in the Page Directory entries
302 //
303 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
304 }
305 }
306 }
307 }
308 }
309 }
310
311 /**
312 Create PageTable for SMM use.
313
314 @return The address of PML4 (to set CR3).
315
316 **/
317 UINT32
318 SmmInitPageTable (
319 VOID
320 )
321 {
322 EFI_PHYSICAL_ADDRESS Pages;
323 UINT64 *PTEntry;
324 LIST_ENTRY *FreePage;
325 UINTN Index;
326 UINTN PageFaultHandlerHookAddress;
327 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
328 EFI_STATUS Status;
329 UINT64 *Pml4Entry;
330 UINT64 *Pml5Entry;
331
332 //
333 // Initialize spin lock
334 //
335 InitializeSpinLock (mPFLock);
336
337 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
338 m1GPageTableSupport = Is1GPageSupport ();
339 m5LevelPagingSupport = Is5LevelPagingSupport ();
340 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
341 PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);
342 DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));
343 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
344 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable));
345 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
346 //
347 // Generate PAE page table for the first 4GB memory space
348 //
349 Pages = Gen4GPageTable (FALSE);
350
351 //
352 // Set IA32_PG_PMNT bit to mask this entry
353 //
354 PTEntry = (UINT64*)(UINTN)Pages;
355 for (Index = 0; Index < 4; Index++) {
356 PTEntry[Index] |= IA32_PG_PMNT;
357 }
358
359 //
360 // Fill Page-Table-Level4 (PML4) entry
361 //
362 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
363 ASSERT (Pml4Entry != NULL);
364 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
365 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
366
367 //
368 // Set sub-entries number
369 //
370 SetSubEntriesNum (Pml4Entry, 3);
371 PTEntry = Pml4Entry;
372
373 if (m5LevelPagingSupport) {
374 //
375 // Fill PML5 entry
376 //
377 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
378 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
379 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
380 //
381 // Set sub-entries number
382 //
383 SetSubEntriesNum (Pml5Entry, 1);
384 PTEntry = Pml5Entry;
385 }
386
387 if (mCpuSmmStaticPageTable) {
388 SetStaticPageTable ((UINTN)PTEntry);
389 } else {
390 //
391 // Add pages to page pool
392 //
393 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
394 ASSERT (FreePage != NULL);
395 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
396 InsertTailList (&mPagePool, FreePage);
397 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
398 }
399 }
400
401 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
402 HEAP_GUARD_NONSTOP_MODE ||
403 NULL_DETECTION_NONSTOP_MODE) {
404 //
405 // Set own Page Fault entry instead of the default one, because SMM Profile
406 // feature depends on IRET instruction to do Single Step
407 //
408 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
409 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
410 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
411 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
412 IdtEntry->Bits.Reserved_0 = 0;
413 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
414 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
415 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
416 IdtEntry->Bits.Reserved_1 = 0;
417 } else {
418 //
419 // Register Smm Page Fault Handler
420 //
421 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
422 ASSERT_EFI_ERROR (Status);
423 }
424
425 //
426 // Additional SMM IDT initialization for SMM stack guard
427 //
428 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
429 InitializeIDTSmmStackGuard ();
430 }
431
432 //
433 // Return the address of PML4/PML5 (to set CR3)
434 //
435 return (UINT32)(UINTN)PTEntry;
436 }
437
438 /**
439 Set access record in entry.
440
441 @param[in, out] Entry Pointer to entry
442 @param[in] Acc Access record value
443
444 **/
445 VOID
446 SetAccNum (
447 IN OUT UINT64 *Entry,
448 IN UINT64 Acc
449 )
450 {
451 //
452 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
453 //
454 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
455 }
456
457 /**
458 Return access record in entry.
459
460 @param[in] Entry Pointer to entry
461
462 @return Access record value.
463
464 **/
465 UINT64
466 GetAccNum (
467 IN UINT64 *Entry
468 )
469 {
470 //
471 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
472 //
473 return BitFieldRead64 (*Entry, 9, 11);
474 }
475
476 /**
477 Return and update the access record in entry.
478
479 @param[in, out] Entry Pointer to entry
480
481 @return Access record value.
482
483 **/
484 UINT64
485 GetAndUpdateAccNum (
486 IN OUT UINT64 *Entry
487 )
488 {
489 UINT64 Acc;
490
491 Acc = GetAccNum (Entry);
492 if ((*Entry & IA32_PG_A) != 0) {
493 //
494 // If this entry has been accessed, clear access flag in Entry and update access record
495 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
496 //
497 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
498 SetAccNum (Entry, 0x7);
499 return (0x7 + ACC_MAX_BIT);
500 } else {
501 if (Acc != 0) {
502 //
503 // If the access record is not the smallest value 0, minus 1 and update the access record field
504 //
505 SetAccNum (Entry, Acc - 1);
506 }
507 }
508 return Acc;
509 }
510
511 /**
512 Reclaim free pages for PageFault handler.
513
514 Search the whole entries tree to find the leaf entry that has the smallest
515 access record value. Insert the page pointed by this leaf entry into the
516 page pool. And check its upper entries if need to be inserted into the page
517 pool or not.
518
519 **/
520 VOID
521 ReclaimPages (
522 VOID
523 )
524 {
525 UINT64 Pml5Entry;
526 UINT64 *Pml5;
527 UINT64 *Pml4;
528 UINT64 *Pdpt;
529 UINT64 *Pdt;
530 UINTN Pml5Index;
531 UINTN Pml4Index;
532 UINTN PdptIndex;
533 UINTN PdtIndex;
534 UINTN MinPml5;
535 UINTN MinPml4;
536 UINTN MinPdpt;
537 UINTN MinPdt;
538 UINT64 MinAcc;
539 UINT64 Acc;
540 UINT64 SubEntriesNum;
541 BOOLEAN PML4EIgnore;
542 BOOLEAN PDPTEIgnore;
543 UINT64 *ReleasePageAddress;
544 IA32_CR4 Cr4;
545 BOOLEAN Enable5LevelPaging;
546
547 Pml4 = NULL;
548 Pdpt = NULL;
549 Pdt = NULL;
550 MinAcc = (UINT64)-1;
551 MinPml4 = (UINTN)-1;
552 MinPml5 = (UINTN)-1;
553 MinPdpt = (UINTN)-1;
554 MinPdt = (UINTN)-1;
555 Acc = 0;
556 ReleasePageAddress = 0;
557
558 Cr4.UintN = AsmReadCr4 ();
559 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
560 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
561
562 if (!Enable5LevelPaging) {
563 //
564 // Create one fake PML5 entry for 4-Level Paging
565 // so that the page table parsing logic only handles 5-Level page structure.
566 //
567 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
568 Pml5 = &Pml5Entry;
569 }
570
571 //
572 // First, find the leaf entry has the smallest access record value
573 //
574 for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
575 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
576 //
577 // If the PML5 entry is not present or is masked, skip it
578 //
579 continue;
580 }
581 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
582 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
583 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
584 //
585 // If the PML4 entry is not present or is masked, skip it
586 //
587 continue;
588 }
589 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
590 PML4EIgnore = FALSE;
591 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
592 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
593 //
594 // If the PDPT entry is not present or is masked, skip it
595 //
596 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
597 //
598 // If the PDPT entry is masked, we will ignore checking the PML4 entry
599 //
600 PML4EIgnore = TRUE;
601 }
602 continue;
603 }
604 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
605 //
606 // It's not 1-GByte pages entry, it should be a PDPT entry,
607 // we will not check PML4 entry more
608 //
609 PML4EIgnore = TRUE;
610 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
611 PDPTEIgnore = FALSE;
612 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
613 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
614 //
615 // If the PD entry is not present or is masked, skip it
616 //
617 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
618 //
619 // If the PD entry is masked, we will not PDPT entry more
620 //
621 PDPTEIgnore = TRUE;
622 }
623 continue;
624 }
625 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
626 //
627 // It's not 2 MByte page table entry, it should be PD entry
628 // we will find the entry has the smallest access record value
629 //
630 PDPTEIgnore = TRUE;
631 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
632 if (Acc < MinAcc) {
633 //
634 // If the PD entry has the smallest access record value,
635 // save the Page address to be released
636 //
637 MinAcc = Acc;
638 MinPml5 = Pml5Index;
639 MinPml4 = Pml4Index;
640 MinPdpt = PdptIndex;
641 MinPdt = PdtIndex;
642 ReleasePageAddress = Pdt + PdtIndex;
643 }
644 }
645 }
646 if (!PDPTEIgnore) {
647 //
648 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
649 // it should only has the entries point to 2 MByte Pages
650 //
651 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
652 if (Acc < MinAcc) {
653 //
654 // If the PDPT entry has the smallest access record value,
655 // save the Page address to be released
656 //
657 MinAcc = Acc;
658 MinPml5 = Pml5Index;
659 MinPml4 = Pml4Index;
660 MinPdpt = PdptIndex;
661 MinPdt = (UINTN)-1;
662 ReleasePageAddress = Pdpt + PdptIndex;
663 }
664 }
665 }
666 }
667 if (!PML4EIgnore) {
668 //
669 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
670 // it should only has the entries point to 1 GByte Pages
671 //
672 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
673 if (Acc < MinAcc) {
674 //
675 // If the PML4 entry has the smallest access record value,
676 // save the Page address to be released
677 //
678 MinAcc = Acc;
679 MinPml5 = Pml5Index;
680 MinPml4 = Pml4Index;
681 MinPdpt = (UINTN)-1;
682 MinPdt = (UINTN)-1;
683 ReleasePageAddress = Pml4 + Pml4Index;
684 }
685 }
686 }
687 }
688 //
689 // Make sure one PML4/PDPT/PD entry is selected
690 //
691 ASSERT (MinAcc != (UINT64)-1);
692
693 //
694 // Secondly, insert the page pointed by this entry into page pool and clear this entry
695 //
696 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
697 *ReleasePageAddress = 0;
698
699 //
700 // Lastly, check this entry's upper entries if need to be inserted into page pool
701 // or not
702 //
703 while (TRUE) {
704 if (MinPdt != (UINTN)-1) {
705 //
706 // If 4 KByte Page Table is released, check the PDPT entry
707 //
708 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
709 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
710 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
711 if (SubEntriesNum == 0) {
712 //
713 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
714 // clear the Page directory entry
715 //
716 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
717 Pdpt[MinPdpt] = 0;
718 //
719 // Go on checking the PML4 table
720 //
721 MinPdt = (UINTN)-1;
722 continue;
723 }
724 //
725 // Update the sub-entries filed in PDPT entry and exit
726 //
727 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
728 break;
729 }
730 if (MinPdpt != (UINTN)-1) {
731 //
732 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
733 //
734 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
735 if (SubEntriesNum == 0) {
736 //
737 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
738 // clear the Page directory entry
739 //
740 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
741 Pml4[MinPml4] = 0;
742 MinPdpt = (UINTN)-1;
743 continue;
744 }
745 //
746 // Update the sub-entries filed in PML4 entry and exit
747 //
748 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
749 break;
750 }
751 //
752 // PLM4 table has been released before, exit it
753 //
754 break;
755 }
756 }
757
758 /**
759 Allocate free Page for PageFault handler use.
760
761 @return Page address.
762
763 **/
764 UINT64
765 AllocPage (
766 VOID
767 )
768 {
769 UINT64 RetVal;
770
771 if (IsListEmpty (&mPagePool)) {
772 //
773 // If page pool is empty, reclaim the used pages and insert one into page pool
774 //
775 ReclaimPages ();
776 }
777
778 //
779 // Get one free page and remove it from page pool
780 //
781 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
782 RemoveEntryList (mPagePool.ForwardLink);
783 //
784 // Clean this page and return
785 //
786 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
787 return RetVal;
788 }
789
790 /**
791 Page Fault handler for SMM use.
792
793 **/
794 VOID
795 SmiDefaultPFHandler (
796 VOID
797 )
798 {
799 UINT64 *PageTable;
800 UINT64 *PageTableTop;
801 UINT64 PFAddress;
802 UINTN StartBit;
803 UINTN EndBit;
804 UINT64 PTIndex;
805 UINTN Index;
806 SMM_PAGE_SIZE_TYPE PageSize;
807 UINTN NumOfPages;
808 UINTN PageAttribute;
809 EFI_STATUS Status;
810 UINT64 *UpperEntry;
811 BOOLEAN Enable5LevelPaging;
812 IA32_CR4 Cr4;
813
814 //
815 // Set default SMM page attribute
816 //
817 PageSize = SmmPageSize2M;
818 NumOfPages = 1;
819 PageAttribute = 0;
820
821 EndBit = 0;
822 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
823 PFAddress = AsmReadCr2 ();
824
825 Cr4.UintN = AsmReadCr4 ();
826 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
827
828 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
829 //
830 // If platform not support page table attribute, set default SMM page attribute
831 //
832 if (Status != EFI_SUCCESS) {
833 PageSize = SmmPageSize2M;
834 NumOfPages = 1;
835 PageAttribute = 0;
836 }
837 if (PageSize >= MaxSmmPageSizeType) {
838 PageSize = SmmPageSize2M;
839 }
840 if (NumOfPages > 512) {
841 NumOfPages = 512;
842 }
843
844 switch (PageSize) {
845 case SmmPageSize4K:
846 //
847 // BIT12 to BIT20 is Page Table index
848 //
849 EndBit = 12;
850 break;
851 case SmmPageSize2M:
852 //
853 // BIT21 to BIT29 is Page Directory index
854 //
855 EndBit = 21;
856 PageAttribute |= (UINTN)IA32_PG_PS;
857 break;
858 case SmmPageSize1G:
859 if (!m1GPageTableSupport) {
860 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
861 ASSERT (FALSE);
862 }
863 //
864 // BIT30 to BIT38 is Page Directory Pointer Table index
865 //
866 EndBit = 30;
867 PageAttribute |= (UINTN)IA32_PG_PS;
868 break;
869 default:
870 ASSERT (FALSE);
871 }
872
873 //
874 // If execute-disable is enabled, set NX bit
875 //
876 if (mXdEnabled) {
877 PageAttribute |= IA32_PG_NX;
878 }
879
880 for (Index = 0; Index < NumOfPages; Index++) {
881 PageTable = PageTableTop;
882 UpperEntry = NULL;
883 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
884 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
885 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
886 //
887 // If the entry is not present, allocate one page from page pool for it
888 //
889 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
890 } else {
891 //
892 // Save the upper entry address
893 //
894 UpperEntry = PageTable + PTIndex;
895 }
896 //
897 // BIT9 to BIT11 of entry is used to save access record,
898 // initialize value is 7
899 //
900 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
901 SetAccNum (PageTable + PTIndex, 7);
902 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
903 }
904
905 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
906 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
907 //
908 // Check if the entry has already existed, this issue may occur when the different
909 // size page entries created under the same entry
910 //
911 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
912 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
913 ASSERT (FALSE);
914 }
915 //
916 // Fill the new entry
917 //
918 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
919 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
920 if (UpperEntry != NULL) {
921 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
922 }
923 //
924 // Get the next page address if we need to create more page tables
925 //
926 PFAddress += (1ull << EndBit);
927 }
928 }
929
930 /**
931 ThePage Fault handler wrapper for SMM use.
932
933 @param InterruptType Defines the type of interrupt or exception that
934 occurred on the processor.This parameter is processor architecture specific.
935 @param SystemContext A pointer to the processor context when
936 the interrupt occurred on the processor.
937 **/
938 VOID
939 EFIAPI
940 SmiPFHandler (
941 IN EFI_EXCEPTION_TYPE InterruptType,
942 IN EFI_SYSTEM_CONTEXT SystemContext
943 )
944 {
945 UINTN PFAddress;
946 UINTN GuardPageAddress;
947 UINTN CpuIndex;
948
949 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
950
951 AcquireSpinLock (mPFLock);
952
953 PFAddress = AsmReadCr2 ();
954
955 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
956 DumpCpuContext (InterruptType, SystemContext);
957 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
958 CpuDeadLoop ();
959 goto Exit;
960 }
961
962 //
963 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
964 // or SMM page protection violation.
965 //
966 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
967 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
968 DumpCpuContext (InterruptType, SystemContext);
969 CpuIndex = GetCpuIndex ();
970 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
971 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
972 (PFAddress >= GuardPageAddress) &&
973 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
974 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
975 } else {
976 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
977 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
978 DEBUG_CODE (
979 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
980 );
981 } else {
982 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
983 DEBUG_CODE (
984 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
985 );
986 }
987
988 if (HEAP_GUARD_NONSTOP_MODE) {
989 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
990 goto Exit;
991 }
992 }
993 CpuDeadLoop ();
994 goto Exit;
995 }
996
997 //
998 // If a page fault occurs in non-SMRAM range.
999 //
1000 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1001 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1002 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1003 DumpCpuContext (InterruptType, SystemContext);
1004 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1005 DEBUG_CODE (
1006 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1007 );
1008 CpuDeadLoop ();
1009 goto Exit;
1010 }
1011
1012 //
1013 // If NULL pointer was just accessed
1014 //
1015 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1016 (PFAddress < EFI_PAGE_SIZE)) {
1017 DumpCpuContext (InterruptType, SystemContext);
1018 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1019 DEBUG_CODE (
1020 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1021 );
1022
1023 if (NULL_DETECTION_NONSTOP_MODE) {
1024 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1025 goto Exit;
1026 }
1027
1028 CpuDeadLoop ();
1029 goto Exit;
1030 }
1031
1032 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1033 DumpCpuContext (InterruptType, SystemContext);
1034 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1035 DEBUG_CODE (
1036 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1037 );
1038 CpuDeadLoop ();
1039 goto Exit;
1040 }
1041 }
1042
1043 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1044 SmmProfilePFHandler (
1045 SystemContext.SystemContextX64->Rip,
1046 SystemContext.SystemContextX64->ExceptionData
1047 );
1048 } else {
1049 SmiDefaultPFHandler ();
1050 }
1051
1052 Exit:
1053 ReleaseSpinLock (mPFLock);
1054 }
1055
1056 /**
1057 This function sets memory attribute for page table.
1058 **/
1059 VOID
1060 SetPageTableAttributes (
1061 VOID
1062 )
1063 {
1064 UINTN Index2;
1065 UINTN Index3;
1066 UINTN Index4;
1067 UINTN Index5;
1068 UINT64 *L1PageTable;
1069 UINT64 *L2PageTable;
1070 UINT64 *L3PageTable;
1071 UINT64 *L4PageTable;
1072 UINT64 *L5PageTable;
1073 BOOLEAN IsSplitted;
1074 BOOLEAN PageTableSplitted;
1075 BOOLEAN CetEnabled;
1076 IA32_CR4 Cr4;
1077 BOOLEAN Enable5LevelPaging;
1078
1079 Cr4.UintN = AsmReadCr4 ();
1080 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
1081
1082 //
1083 // Don't do this if
1084 // - no static page table; or
1085 // - SMM heap guard feature enabled; or
1086 // BIT2: SMM page guard enabled
1087 // BIT3: SMM pool guard enabled
1088 // - SMM profile feature enabled
1089 //
1090 if (!mCpuSmmStaticPageTable ||
1091 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1092 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1093 //
1094 // Static paging and heap guard could not be enabled at the same time.
1095 //
1096 ASSERT (!(mCpuSmmStaticPageTable &&
1097 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1098
1099 //
1100 // Static paging and SMM profile could not be enabled at the same time.
1101 //
1102 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1103 return ;
1104 }
1105
1106 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1107
1108 //
1109 // Disable write protection, because we need mark page table to be write protected.
1110 // We need *write* page table memory, to mark itself to be *read only*.
1111 //
1112 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1113 if (CetEnabled) {
1114 //
1115 // CET must be disabled if WP is disabled.
1116 //
1117 DisableCet();
1118 }
1119 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1120
1121 do {
1122 DEBUG ((DEBUG_INFO, "Start...\n"));
1123 PageTableSplitted = FALSE;
1124 L5PageTable = NULL;
1125 if (Enable5LevelPaging) {
1126 L5PageTable = (UINT64 *)GetPageTableBase ();
1127 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1128 PageTableSplitted = (PageTableSplitted || IsSplitted);
1129 }
1130
1131 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1132 if (Enable5LevelPaging) {
1133 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1134 if (L4PageTable == NULL) {
1135 continue;
1136 }
1137 } else {
1138 L4PageTable = (UINT64 *)GetPageTableBase ();
1139 }
1140 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1141 PageTableSplitted = (PageTableSplitted || IsSplitted);
1142
1143 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1144 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1145 if (L3PageTable == NULL) {
1146 continue;
1147 }
1148
1149 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1150 PageTableSplitted = (PageTableSplitted || IsSplitted);
1151
1152 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1153 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1154 // 1G
1155 continue;
1156 }
1157 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1158 if (L2PageTable == NULL) {
1159 continue;
1160 }
1161
1162 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1163 PageTableSplitted = (PageTableSplitted || IsSplitted);
1164
1165 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1166 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1167 // 2M
1168 continue;
1169 }
1170 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1171 if (L1PageTable == NULL) {
1172 continue;
1173 }
1174 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1175 PageTableSplitted = (PageTableSplitted || IsSplitted);
1176 }
1177 }
1178 }
1179 }
1180 } while (PageTableSplitted);
1181
1182 //
1183 // Enable write protection, after page table updated.
1184 //
1185 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1186 if (CetEnabled) {
1187 //
1188 // re-enable CET.
1189 //
1190 EnableCet();
1191 }
1192
1193 return ;
1194 }
1195
1196 /**
1197 This function reads CR2 register when on-demand paging is enabled.
1198
1199 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1200 **/
1201 VOID
1202 SaveCr2 (
1203 OUT UINTN *Cr2
1204 )
1205 {
1206 if (!mCpuSmmStaticPageTable) {
1207 *Cr2 = AsmReadCr2 ();
1208 }
1209 }
1210
1211 /**
1212 This function restores CR2 register when on-demand paging is enabled.
1213
1214 @param[in] Cr2 Value to write into CR2 register.
1215 **/
1216 VOID
1217 RestoreCr2 (
1218 IN UINTN Cr2
1219 )
1220 {
1221 if (!mCpuSmmStaticPageTable) {
1222 AsmWriteCr2 (Cr2);
1223 }
1224 }