]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 #define PAGE_TABLE_PAGES 8
14 #define ACC_MAX_BIT BIT3
15
16 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17 BOOLEAN m1GPageTableSupport = FALSE;
18 BOOLEAN mCpuSmmStaticPageTable;
19
20 /**
21 Disable CET.
22 **/
23 VOID
24 EFIAPI
25 DisableCet (
26 VOID
27 );
28
29 /**
30 Enable CET.
31 **/
32 VOID
33 EFIAPI
34 EnableCet (
35 VOID
36 );
37
38 /**
39 Check if 1-GByte pages is supported by processor or not.
40
41 @retval TRUE 1-GByte pages is supported.
42 @retval FALSE 1-GByte pages is not supported.
43
44 **/
45 BOOLEAN
46 Is1GPageSupport (
47 VOID
48 )
49 {
50 UINT32 RegEax;
51 UINT32 RegEdx;
52
53 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
54 if (RegEax >= 0x80000001) {
55 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
56 if ((RegEdx & BIT26) != 0) {
57 return TRUE;
58 }
59 }
60 return FALSE;
61 }
62
63 /**
64 Set sub-entries number in entry.
65
66 @param[in, out] Entry Pointer to entry
67 @param[in] SubEntryNum Sub-entries number based on 0:
68 0 means there is 1 sub-entry under this entry
69 0x1ff means there is 512 sub-entries under this entry
70
71 **/
72 VOID
73 SetSubEntriesNum (
74 IN OUT UINT64 *Entry,
75 IN UINT64 SubEntryNum
76 )
77 {
78 //
79 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
80 //
81 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
82 }
83
84 /**
85 Return sub-entries number in entry.
86
87 @param[in] Entry Pointer to entry
88
89 @return Sub-entries number based on 0:
90 0 means there is 1 sub-entry under this entry
91 0x1ff means there is 512 sub-entries under this entry
92 **/
93 UINT64
94 GetSubEntriesNum (
95 IN UINT64 *Entry
96 )
97 {
98 //
99 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
100 //
101 return BitFieldRead64 (*Entry, 52, 60);
102 }
103
104 /**
105 Calculate the maximum support address.
106
107 @return the maximum support address.
108 **/
109 UINT8
110 CalculateMaximumSupportAddress (
111 VOID
112 )
113 {
114 UINT32 RegEax;
115 UINT8 PhysicalAddressBits;
116 VOID *Hob;
117
118 //
119 // Get physical address bits supported.
120 //
121 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
122 if (Hob != NULL) {
123 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
124 } else {
125 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
126 if (RegEax >= 0x80000008) {
127 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
128 PhysicalAddressBits = (UINT8) RegEax;
129 } else {
130 PhysicalAddressBits = 36;
131 }
132 }
133
134 //
135 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
136 //
137 ASSERT (PhysicalAddressBits <= 52);
138 if (PhysicalAddressBits > 48) {
139 PhysicalAddressBits = 48;
140 }
141 return PhysicalAddressBits;
142 }
143
144 /**
145 Set static page table.
146
147 @param[in] PageTable Address of page table.
148 **/
149 VOID
150 SetStaticPageTable (
151 IN UINTN PageTable
152 )
153 {
154 UINT64 PageAddress;
155 UINTN NumberOfPml4EntriesNeeded;
156 UINTN NumberOfPdpEntriesNeeded;
157 UINTN IndexOfPml4Entries;
158 UINTN IndexOfPdpEntries;
159 UINTN IndexOfPageDirectoryEntries;
160 UINT64 *PageMapLevel4Entry;
161 UINT64 *PageMap;
162 UINT64 *PageDirectoryPointerEntry;
163 UINT64 *PageDirectory1GEntry;
164 UINT64 *PageDirectoryEntry;
165
166 if (mPhysicalAddressBits <= 39 ) {
167 NumberOfPml4EntriesNeeded = 1;
168 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
169 } else {
170 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
171 NumberOfPdpEntriesNeeded = 512;
172 }
173
174 //
175 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
176 //
177 PageMap = (VOID *) PageTable;
178
179 PageMapLevel4Entry = PageMap;
180 PageAddress = 0;
181 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
182 //
183 // Each PML4 entry points to a page of Page Directory Pointer entries.
184 //
185 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
186 if (PageDirectoryPointerEntry == NULL) {
187 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
188 ASSERT(PageDirectoryPointerEntry != NULL);
189 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
190
191 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
192 }
193
194 if (m1GPageTableSupport) {
195 PageDirectory1GEntry = PageDirectoryPointerEntry;
196 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
197 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
198 //
199 // Skip the < 4G entries
200 //
201 continue;
202 }
203 //
204 // Fill in the Page Directory entries
205 //
206 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
207 }
208 } else {
209 PageAddress = BASE_4GB;
210 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
211 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
212 //
213 // Skip the < 4G entries
214 //
215 continue;
216 }
217 //
218 // Each Directory Pointer entries points to a page of Page Directory entires.
219 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
220 //
221 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
222 if (PageDirectoryEntry == NULL) {
223 PageDirectoryEntry = AllocatePageTableMemory (1);
224 ASSERT(PageDirectoryEntry != NULL);
225 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
226
227 //
228 // Fill in a Page Directory Pointer Entries
229 //
230 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
231 }
232
233 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
234 //
235 // Fill in the Page Directory entries
236 //
237 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
238 }
239 }
240 }
241 }
242 }
243
244 /**
245 Create PageTable for SMM use.
246
247 @return The address of PML4 (to set CR3).
248
249 **/
250 UINT32
251 SmmInitPageTable (
252 VOID
253 )
254 {
255 EFI_PHYSICAL_ADDRESS Pages;
256 UINT64 *PTEntry;
257 LIST_ENTRY *FreePage;
258 UINTN Index;
259 UINTN PageFaultHandlerHookAddress;
260 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
261 EFI_STATUS Status;
262
263 //
264 // Initialize spin lock
265 //
266 InitializeSpinLock (mPFLock);
267
268 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
269 m1GPageTableSupport = Is1GPageSupport ();
270 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
271 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
272
273 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
274 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
275 //
276 // Generate PAE page table for the first 4GB memory space
277 //
278 Pages = Gen4GPageTable (FALSE);
279
280 //
281 // Set IA32_PG_PMNT bit to mask this entry
282 //
283 PTEntry = (UINT64*)(UINTN)Pages;
284 for (Index = 0; Index < 4; Index++) {
285 PTEntry[Index] |= IA32_PG_PMNT;
286 }
287
288 //
289 // Fill Page-Table-Level4 (PML4) entry
290 //
291 PTEntry = (UINT64*)AllocatePageTableMemory (1);
292 ASSERT (PTEntry != NULL);
293 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
294 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
295
296 //
297 // Set sub-entries number
298 //
299 SetSubEntriesNum (PTEntry, 3);
300
301 if (mCpuSmmStaticPageTable) {
302 SetStaticPageTable ((UINTN)PTEntry);
303 } else {
304 //
305 // Add pages to page pool
306 //
307 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
308 ASSERT (FreePage != NULL);
309 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
310 InsertTailList (&mPagePool, FreePage);
311 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
312 }
313 }
314
315 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
316 HEAP_GUARD_NONSTOP_MODE ||
317 NULL_DETECTION_NONSTOP_MODE) {
318 //
319 // Set own Page Fault entry instead of the default one, because SMM Profile
320 // feature depends on IRET instruction to do Single Step
321 //
322 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
323 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
324 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
325 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
326 IdtEntry->Bits.Reserved_0 = 0;
327 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
328 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
329 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
330 IdtEntry->Bits.Reserved_1 = 0;
331 } else {
332 //
333 // Register Smm Page Fault Handler
334 //
335 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
336 ASSERT_EFI_ERROR (Status);
337 }
338
339 //
340 // Additional SMM IDT initialization for SMM stack guard
341 //
342 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
343 InitializeIDTSmmStackGuard ();
344 }
345
346 //
347 // Return the address of PML4 (to set CR3)
348 //
349 return (UINT32)(UINTN)PTEntry;
350 }
351
352 /**
353 Set access record in entry.
354
355 @param[in, out] Entry Pointer to entry
356 @param[in] Acc Access record value
357
358 **/
359 VOID
360 SetAccNum (
361 IN OUT UINT64 *Entry,
362 IN UINT64 Acc
363 )
364 {
365 //
366 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
367 //
368 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
369 }
370
371 /**
372 Return access record in entry.
373
374 @param[in] Entry Pointer to entry
375
376 @return Access record value.
377
378 **/
379 UINT64
380 GetAccNum (
381 IN UINT64 *Entry
382 )
383 {
384 //
385 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
386 //
387 return BitFieldRead64 (*Entry, 9, 11);
388 }
389
390 /**
391 Return and update the access record in entry.
392
393 @param[in, out] Entry Pointer to entry
394
395 @return Access record value.
396
397 **/
398 UINT64
399 GetAndUpdateAccNum (
400 IN OUT UINT64 *Entry
401 )
402 {
403 UINT64 Acc;
404
405 Acc = GetAccNum (Entry);
406 if ((*Entry & IA32_PG_A) != 0) {
407 //
408 // If this entry has been accessed, clear access flag in Entry and update access record
409 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
410 //
411 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
412 SetAccNum (Entry, 0x7);
413 return (0x7 + ACC_MAX_BIT);
414 } else {
415 if (Acc != 0) {
416 //
417 // If the access record is not the smallest value 0, minus 1 and update the access record field
418 //
419 SetAccNum (Entry, Acc - 1);
420 }
421 }
422 return Acc;
423 }
424
425 /**
426 Reclaim free pages for PageFault handler.
427
428 Search the whole entries tree to find the leaf entry that has the smallest
429 access record value. Insert the page pointed by this leaf entry into the
430 page pool. And check its upper entries if need to be inserted into the page
431 pool or not.
432
433 **/
434 VOID
435 ReclaimPages (
436 VOID
437 )
438 {
439 UINT64 *Pml4;
440 UINT64 *Pdpt;
441 UINT64 *Pdt;
442 UINTN Pml4Index;
443 UINTN PdptIndex;
444 UINTN PdtIndex;
445 UINTN MinPml4;
446 UINTN MinPdpt;
447 UINTN MinPdt;
448 UINT64 MinAcc;
449 UINT64 Acc;
450 UINT64 SubEntriesNum;
451 BOOLEAN PML4EIgnore;
452 BOOLEAN PDPTEIgnore;
453 UINT64 *ReleasePageAddress;
454
455 Pml4 = NULL;
456 Pdpt = NULL;
457 Pdt = NULL;
458 MinAcc = (UINT64)-1;
459 MinPml4 = (UINTN)-1;
460 MinPdpt = (UINTN)-1;
461 MinPdt = (UINTN)-1;
462 Acc = 0;
463 ReleasePageAddress = 0;
464
465 //
466 // First, find the leaf entry has the smallest access record value
467 //
468 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
469 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
470 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
471 //
472 // If the PML4 entry is not present or is masked, skip it
473 //
474 continue;
475 }
476 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
477 PML4EIgnore = FALSE;
478 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
479 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
480 //
481 // If the PDPT entry is not present or is masked, skip it
482 //
483 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
484 //
485 // If the PDPT entry is masked, we will ignore checking the PML4 entry
486 //
487 PML4EIgnore = TRUE;
488 }
489 continue;
490 }
491 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
492 //
493 // It's not 1-GByte pages entry, it should be a PDPT entry,
494 // we will not check PML4 entry more
495 //
496 PML4EIgnore = TRUE;
497 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
498 PDPTEIgnore = FALSE;
499 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
500 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
501 //
502 // If the PD entry is not present or is masked, skip it
503 //
504 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
505 //
506 // If the PD entry is masked, we will not PDPT entry more
507 //
508 PDPTEIgnore = TRUE;
509 }
510 continue;
511 }
512 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
513 //
514 // It's not 2 MByte page table entry, it should be PD entry
515 // we will find the entry has the smallest access record value
516 //
517 PDPTEIgnore = TRUE;
518 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
519 if (Acc < MinAcc) {
520 //
521 // If the PD entry has the smallest access record value,
522 // save the Page address to be released
523 //
524 MinAcc = Acc;
525 MinPml4 = Pml4Index;
526 MinPdpt = PdptIndex;
527 MinPdt = PdtIndex;
528 ReleasePageAddress = Pdt + PdtIndex;
529 }
530 }
531 }
532 if (!PDPTEIgnore) {
533 //
534 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
535 // it should only has the entries point to 2 MByte Pages
536 //
537 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
538 if (Acc < MinAcc) {
539 //
540 // If the PDPT entry has the smallest access record value,
541 // save the Page address to be released
542 //
543 MinAcc = Acc;
544 MinPml4 = Pml4Index;
545 MinPdpt = PdptIndex;
546 MinPdt = (UINTN)-1;
547 ReleasePageAddress = Pdpt + PdptIndex;
548 }
549 }
550 }
551 }
552 if (!PML4EIgnore) {
553 //
554 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
555 // it should only has the entries point to 1 GByte Pages
556 //
557 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
558 if (Acc < MinAcc) {
559 //
560 // If the PML4 entry has the smallest access record value,
561 // save the Page address to be released
562 //
563 MinAcc = Acc;
564 MinPml4 = Pml4Index;
565 MinPdpt = (UINTN)-1;
566 MinPdt = (UINTN)-1;
567 ReleasePageAddress = Pml4 + Pml4Index;
568 }
569 }
570 }
571 //
572 // Make sure one PML4/PDPT/PD entry is selected
573 //
574 ASSERT (MinAcc != (UINT64)-1);
575
576 //
577 // Secondly, insert the page pointed by this entry into page pool and clear this entry
578 //
579 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
580 *ReleasePageAddress = 0;
581
582 //
583 // Lastly, check this entry's upper entries if need to be inserted into page pool
584 // or not
585 //
586 while (TRUE) {
587 if (MinPdt != (UINTN)-1) {
588 //
589 // If 4 KByte Page Table is released, check the PDPT entry
590 //
591 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
592 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
593 if (SubEntriesNum == 0) {
594 //
595 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
596 // clear the Page directory entry
597 //
598 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
599 Pdpt[MinPdpt] = 0;
600 //
601 // Go on checking the PML4 table
602 //
603 MinPdt = (UINTN)-1;
604 continue;
605 }
606 //
607 // Update the sub-entries filed in PDPT entry and exit
608 //
609 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
610 break;
611 }
612 if (MinPdpt != (UINTN)-1) {
613 //
614 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
615 //
616 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
617 if (SubEntriesNum == 0) {
618 //
619 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
620 // clear the Page directory entry
621 //
622 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
623 Pml4[MinPml4] = 0;
624 MinPdpt = (UINTN)-1;
625 continue;
626 }
627 //
628 // Update the sub-entries filed in PML4 entry and exit
629 //
630 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
631 break;
632 }
633 //
634 // PLM4 table has been released before, exit it
635 //
636 break;
637 }
638 }
639
640 /**
641 Allocate free Page for PageFault handler use.
642
643 @return Page address.
644
645 **/
646 UINT64
647 AllocPage (
648 VOID
649 )
650 {
651 UINT64 RetVal;
652
653 if (IsListEmpty (&mPagePool)) {
654 //
655 // If page pool is empty, reclaim the used pages and insert one into page pool
656 //
657 ReclaimPages ();
658 }
659
660 //
661 // Get one free page and remove it from page pool
662 //
663 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
664 RemoveEntryList (mPagePool.ForwardLink);
665 //
666 // Clean this page and return
667 //
668 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
669 return RetVal;
670 }
671
672 /**
673 Page Fault handler for SMM use.
674
675 **/
676 VOID
677 SmiDefaultPFHandler (
678 VOID
679 )
680 {
681 UINT64 *PageTable;
682 UINT64 *Pml4;
683 UINT64 PFAddress;
684 UINTN StartBit;
685 UINTN EndBit;
686 UINT64 PTIndex;
687 UINTN Index;
688 SMM_PAGE_SIZE_TYPE PageSize;
689 UINTN NumOfPages;
690 UINTN PageAttribute;
691 EFI_STATUS Status;
692 UINT64 *UpperEntry;
693
694 //
695 // Set default SMM page attribute
696 //
697 PageSize = SmmPageSize2M;
698 NumOfPages = 1;
699 PageAttribute = 0;
700
701 EndBit = 0;
702 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
703 PFAddress = AsmReadCr2 ();
704
705 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
706 //
707 // If platform not support page table attribute, set default SMM page attribute
708 //
709 if (Status != EFI_SUCCESS) {
710 PageSize = SmmPageSize2M;
711 NumOfPages = 1;
712 PageAttribute = 0;
713 }
714 if (PageSize >= MaxSmmPageSizeType) {
715 PageSize = SmmPageSize2M;
716 }
717 if (NumOfPages > 512) {
718 NumOfPages = 512;
719 }
720
721 switch (PageSize) {
722 case SmmPageSize4K:
723 //
724 // BIT12 to BIT20 is Page Table index
725 //
726 EndBit = 12;
727 break;
728 case SmmPageSize2M:
729 //
730 // BIT21 to BIT29 is Page Directory index
731 //
732 EndBit = 21;
733 PageAttribute |= (UINTN)IA32_PG_PS;
734 break;
735 case SmmPageSize1G:
736 if (!m1GPageTableSupport) {
737 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
738 ASSERT (FALSE);
739 }
740 //
741 // BIT30 to BIT38 is Page Directory Pointer Table index
742 //
743 EndBit = 30;
744 PageAttribute |= (UINTN)IA32_PG_PS;
745 break;
746 default:
747 ASSERT (FALSE);
748 }
749
750 //
751 // If execute-disable is enabled, set NX bit
752 //
753 if (mXdEnabled) {
754 PageAttribute |= IA32_PG_NX;
755 }
756
757 for (Index = 0; Index < NumOfPages; Index++) {
758 PageTable = Pml4;
759 UpperEntry = NULL;
760 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
761 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
762 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
763 //
764 // If the entry is not present, allocate one page from page pool for it
765 //
766 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
767 } else {
768 //
769 // Save the upper entry address
770 //
771 UpperEntry = PageTable + PTIndex;
772 }
773 //
774 // BIT9 to BIT11 of entry is used to save access record,
775 // initialize value is 7
776 //
777 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
778 SetAccNum (PageTable + PTIndex, 7);
779 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
780 }
781
782 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
783 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
784 //
785 // Check if the entry has already existed, this issue may occur when the different
786 // size page entries created under the same entry
787 //
788 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
789 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
790 ASSERT (FALSE);
791 }
792 //
793 // Fill the new entry
794 //
795 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
796 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
797 if (UpperEntry != NULL) {
798 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
799 }
800 //
801 // Get the next page address if we need to create more page tables
802 //
803 PFAddress += (1ull << EndBit);
804 }
805 }
806
807 /**
808 ThePage Fault handler wrapper for SMM use.
809
810 @param InterruptType Defines the type of interrupt or exception that
811 occurred on the processor.This parameter is processor architecture specific.
812 @param SystemContext A pointer to the processor context when
813 the interrupt occurred on the processor.
814 **/
815 VOID
816 EFIAPI
817 SmiPFHandler (
818 IN EFI_EXCEPTION_TYPE InterruptType,
819 IN EFI_SYSTEM_CONTEXT SystemContext
820 )
821 {
822 UINTN PFAddress;
823 UINTN GuardPageAddress;
824 UINTN CpuIndex;
825
826 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
827
828 AcquireSpinLock (mPFLock);
829
830 PFAddress = AsmReadCr2 ();
831
832 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
833 DumpCpuContext (InterruptType, SystemContext);
834 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
835 CpuDeadLoop ();
836 goto Exit;
837 }
838
839 //
840 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
841 // or SMM page protection violation.
842 //
843 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
844 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
845 DumpCpuContext (InterruptType, SystemContext);
846 CpuIndex = GetCpuIndex ();
847 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
848 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
849 (PFAddress >= GuardPageAddress) &&
850 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
851 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
852 } else {
853 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
854 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
855 DEBUG_CODE (
856 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
857 );
858 } else {
859 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
860 DEBUG_CODE (
861 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
862 );
863 }
864
865 if (HEAP_GUARD_NONSTOP_MODE) {
866 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
867 goto Exit;
868 }
869 }
870 CpuDeadLoop ();
871 goto Exit;
872 }
873
874 //
875 // If a page fault occurs in non-SMRAM range.
876 //
877 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
878 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
879 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
880 DumpCpuContext (InterruptType, SystemContext);
881 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
882 DEBUG_CODE (
883 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
884 );
885 CpuDeadLoop ();
886 goto Exit;
887 }
888
889 //
890 // If NULL pointer was just accessed
891 //
892 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
893 (PFAddress < EFI_PAGE_SIZE)) {
894 DumpCpuContext (InterruptType, SystemContext);
895 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
896 DEBUG_CODE (
897 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
898 );
899
900 if (NULL_DETECTION_NONSTOP_MODE) {
901 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
902 goto Exit;
903 }
904
905 CpuDeadLoop ();
906 goto Exit;
907 }
908
909 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {
910 DumpCpuContext (InterruptType, SystemContext);
911 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
912 DEBUG_CODE (
913 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
914 );
915 CpuDeadLoop ();
916 goto Exit;
917 }
918 }
919
920 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
921 SmmProfilePFHandler (
922 SystemContext.SystemContextX64->Rip,
923 SystemContext.SystemContextX64->ExceptionData
924 );
925 } else {
926 SmiDefaultPFHandler ();
927 }
928
929 Exit:
930 ReleaseSpinLock (mPFLock);
931 }
932
933 /**
934 This function sets memory attribute for page table.
935 **/
936 VOID
937 SetPageTableAttributes (
938 VOID
939 )
940 {
941 UINTN Index2;
942 UINTN Index3;
943 UINTN Index4;
944 UINT64 *L1PageTable;
945 UINT64 *L2PageTable;
946 UINT64 *L3PageTable;
947 UINT64 *L4PageTable;
948 BOOLEAN IsSplitted;
949 BOOLEAN PageTableSplitted;
950 BOOLEAN CetEnabled;
951
952 //
953 // Don't do this if
954 // - no static page table; or
955 // - SMM heap guard feature enabled; or
956 // BIT2: SMM page guard enabled
957 // BIT3: SMM pool guard enabled
958 // - SMM profile feature enabled
959 //
960 if (!mCpuSmmStaticPageTable ||
961 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
962 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
963 //
964 // Static paging and heap guard could not be enabled at the same time.
965 //
966 ASSERT (!(mCpuSmmStaticPageTable &&
967 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
968
969 //
970 // Static paging and SMM profile could not be enabled at the same time.
971 //
972 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));
973 return ;
974 }
975
976 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
977
978 //
979 // Disable write protection, because we need mark page table to be write protected.
980 // We need *write* page table memory, to mark itself to be *read only*.
981 //
982 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
983 if (CetEnabled) {
984 //
985 // CET must be disabled if WP is disabled.
986 //
987 DisableCet();
988 }
989 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
990
991 do {
992 DEBUG ((DEBUG_INFO, "Start...\n"));
993 PageTableSplitted = FALSE;
994
995 L4PageTable = (UINT64 *)GetPageTableBase ();
996 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
997 PageTableSplitted = (PageTableSplitted || IsSplitted);
998
999 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1000 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1001 if (L3PageTable == NULL) {
1002 continue;
1003 }
1004
1005 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1006 PageTableSplitted = (PageTableSplitted || IsSplitted);
1007
1008 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1009 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1010 // 1G
1011 continue;
1012 }
1013 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1014 if (L2PageTable == NULL) {
1015 continue;
1016 }
1017
1018 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1019 PageTableSplitted = (PageTableSplitted || IsSplitted);
1020
1021 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1022 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1023 // 2M
1024 continue;
1025 }
1026 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1027 if (L1PageTable == NULL) {
1028 continue;
1029 }
1030 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1031 PageTableSplitted = (PageTableSplitted || IsSplitted);
1032 }
1033 }
1034 }
1035 } while (PageTableSplitted);
1036
1037 //
1038 // Enable write protection, after page table updated.
1039 //
1040 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1041 if (CetEnabled) {
1042 //
1043 // re-enable CET.
1044 //
1045 EnableCet();
1046 }
1047
1048 return ;
1049 }
1050
1051 /**
1052 This function reads CR2 register when on-demand paging is enabled.
1053
1054 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
1055 **/
1056 VOID
1057 SaveCr2 (
1058 OUT UINTN *Cr2
1059 )
1060 {
1061 if (!mCpuSmmStaticPageTable) {
1062 *Cr2 = AsmReadCr2 ();
1063 }
1064 }
1065
1066 /**
1067 This function restores CR2 register when on-demand paging is enabled.
1068
1069 @param[in] Cr2 Value to write into CR2 register.
1070 **/
1071 VOID
1072 RestoreCr2 (
1073 IN UINTN Cr2
1074 )
1075 {
1076 if (!mCpuSmmStaticPageTable) {
1077 AsmWriteCr2 (Cr2);
1078 }
1079 }