]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpu: Add Shadow Stack Support for X86 SMM.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 #define PAGE_TABLE_PAGES 8
20 #define ACC_MAX_BIT BIT3
21
22 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
23 BOOLEAN m1GPageTableSupport = FALSE;
24 BOOLEAN mCpuSmmStaticPageTable;
25
26 /**
27 Disable CET.
28 **/
29 VOID
30 EFIAPI
31 DisableCet (
32 VOID
33 );
34
35 /**
36 Enable CET.
37 **/
38 VOID
39 EFIAPI
40 EnableCet (
41 VOID
42 );
43
44 /**
45 Check if 1-GByte pages is supported by processor or not.
46
47 @retval TRUE 1-GByte pages is supported.
48 @retval FALSE 1-GByte pages is not supported.
49
50 **/
51 BOOLEAN
52 Is1GPageSupport (
53 VOID
54 )
55 {
56 UINT32 RegEax;
57 UINT32 RegEdx;
58
59 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
60 if (RegEax >= 0x80000001) {
61 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
62 if ((RegEdx & BIT26) != 0) {
63 return TRUE;
64 }
65 }
66 return FALSE;
67 }
68
69 /**
70 Set sub-entries number in entry.
71
72 @param[in, out] Entry Pointer to entry
73 @param[in] SubEntryNum Sub-entries number based on 0:
74 0 means there is 1 sub-entry under this entry
75 0x1ff means there is 512 sub-entries under this entry
76
77 **/
78 VOID
79 SetSubEntriesNum (
80 IN OUT UINT64 *Entry,
81 IN UINT64 SubEntryNum
82 )
83 {
84 //
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
86 //
87 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
88 }
89
90 /**
91 Return sub-entries number in entry.
92
93 @param[in] Entry Pointer to entry
94
95 @return Sub-entries number based on 0:
96 0 means there is 1 sub-entry under this entry
97 0x1ff means there is 512 sub-entries under this entry
98 **/
99 UINT64
100 GetSubEntriesNum (
101 IN UINT64 *Entry
102 )
103 {
104 //
105 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
106 //
107 return BitFieldRead64 (*Entry, 52, 60);
108 }
109
110 /**
111 Calculate the maximum support address.
112
113 @return the maximum support address.
114 **/
115 UINT8
116 CalculateMaximumSupportAddress (
117 VOID
118 )
119 {
120 UINT32 RegEax;
121 UINT8 PhysicalAddressBits;
122 VOID *Hob;
123
124 //
125 // Get physical address bits supported.
126 //
127 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
128 if (Hob != NULL) {
129 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
130 } else {
131 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
132 if (RegEax >= 0x80000008) {
133 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
134 PhysicalAddressBits = (UINT8) RegEax;
135 } else {
136 PhysicalAddressBits = 36;
137 }
138 }
139
140 //
141 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
142 //
143 ASSERT (PhysicalAddressBits <= 52);
144 if (PhysicalAddressBits > 48) {
145 PhysicalAddressBits = 48;
146 }
147 return PhysicalAddressBits;
148 }
149
150 /**
151 Set static page table.
152
153 @param[in] PageTable Address of page table.
154 **/
155 VOID
156 SetStaticPageTable (
157 IN UINTN PageTable
158 )
159 {
160 UINT64 PageAddress;
161 UINTN NumberOfPml4EntriesNeeded;
162 UINTN NumberOfPdpEntriesNeeded;
163 UINTN IndexOfPml4Entries;
164 UINTN IndexOfPdpEntries;
165 UINTN IndexOfPageDirectoryEntries;
166 UINT64 *PageMapLevel4Entry;
167 UINT64 *PageMap;
168 UINT64 *PageDirectoryPointerEntry;
169 UINT64 *PageDirectory1GEntry;
170 UINT64 *PageDirectoryEntry;
171
172 if (mPhysicalAddressBits <= 39 ) {
173 NumberOfPml4EntriesNeeded = 1;
174 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
175 } else {
176 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
177 NumberOfPdpEntriesNeeded = 512;
178 }
179
180 //
181 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
182 //
183 PageMap = (VOID *) PageTable;
184
185 PageMapLevel4Entry = PageMap;
186 PageAddress = 0;
187 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
188 //
189 // Each PML4 entry points to a page of Page Directory Pointer entries.
190 //
191 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
192 if (PageDirectoryPointerEntry == NULL) {
193 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
194 ASSERT(PageDirectoryPointerEntry != NULL);
195 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
196
197 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
198 }
199
200 if (m1GPageTableSupport) {
201 PageDirectory1GEntry = PageDirectoryPointerEntry;
202 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
203 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
204 //
205 // Skip the < 4G entries
206 //
207 continue;
208 }
209 //
210 // Fill in the Page Directory entries
211 //
212 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
213 }
214 } else {
215 PageAddress = BASE_4GB;
216 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
217 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
218 //
219 // Skip the < 4G entries
220 //
221 continue;
222 }
223 //
224 // Each Directory Pointer entries points to a page of Page Directory entires.
225 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
226 //
227 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
228 if (PageDirectoryEntry == NULL) {
229 PageDirectoryEntry = AllocatePageTableMemory (1);
230 ASSERT(PageDirectoryEntry != NULL);
231 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
232
233 //
234 // Fill in a Page Directory Pointer Entries
235 //
236 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
237 }
238
239 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
240 //
241 // Fill in the Page Directory entries
242 //
243 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
244 }
245 }
246 }
247 }
248 }
249
250 /**
251 Create PageTable for SMM use.
252
253 @return The address of PML4 (to set CR3).
254
255 **/
256 UINT32
257 SmmInitPageTable (
258 VOID
259 )
260 {
261 EFI_PHYSICAL_ADDRESS Pages;
262 UINT64 *PTEntry;
263 LIST_ENTRY *FreePage;
264 UINTN Index;
265 UINTN PageFaultHandlerHookAddress;
266 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
267 EFI_STATUS Status;
268
269 //
270 // Initialize spin lock
271 //
272 InitializeSpinLock (mPFLock);
273
274 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
275 m1GPageTableSupport = Is1GPageSupport ();
276 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
277 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
278
279 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
280 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
281 //
282 // Generate PAE page table for the first 4GB memory space
283 //
284 Pages = Gen4GPageTable (FALSE);
285
286 //
287 // Set IA32_PG_PMNT bit to mask this entry
288 //
289 PTEntry = (UINT64*)(UINTN)Pages;
290 for (Index = 0; Index < 4; Index++) {
291 PTEntry[Index] |= IA32_PG_PMNT;
292 }
293
294 //
295 // Fill Page-Table-Level4 (PML4) entry
296 //
297 PTEntry = (UINT64*)AllocatePageTableMemory (1);
298 ASSERT (PTEntry != NULL);
299 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
300 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
301
302 //
303 // Set sub-entries number
304 //
305 SetSubEntriesNum (PTEntry, 3);
306
307 if (mCpuSmmStaticPageTable) {
308 SetStaticPageTable ((UINTN)PTEntry);
309 } else {
310 //
311 // Add pages to page pool
312 //
313 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
314 ASSERT (FreePage != NULL);
315 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
316 InsertTailList (&mPagePool, FreePage);
317 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
318 }
319 }
320
321 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
322 HEAP_GUARD_NONSTOP_MODE ||
323 NULL_DETECTION_NONSTOP_MODE) {
324 //
325 // Set own Page Fault entry instead of the default one, because SMM Profile
326 // feature depends on IRET instruction to do Single Step
327 //
328 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
329 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
330 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
331 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
332 IdtEntry->Bits.Reserved_0 = 0;
333 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
334 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
335 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
336 IdtEntry->Bits.Reserved_1 = 0;
337 } else {
338 //
339 // Register Smm Page Fault Handler
340 //
341 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
342 ASSERT_EFI_ERROR (Status);
343 }
344
345 //
346 // Additional SMM IDT initialization for SMM stack guard
347 //
348 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
349 InitializeIDTSmmStackGuard ();
350 }
351
352 //
353 // Return the address of PML4 (to set CR3)
354 //
355 return (UINT32)(UINTN)PTEntry;
356 }
357
358 /**
359 Set access record in entry.
360
361 @param[in, out] Entry Pointer to entry
362 @param[in] Acc Access record value
363
364 **/
365 VOID
366 SetAccNum (
367 IN OUT UINT64 *Entry,
368 IN UINT64 Acc
369 )
370 {
371 //
372 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
373 //
374 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
375 }
376
377 /**
378 Return access record in entry.
379
380 @param[in] Entry Pointer to entry
381
382 @return Access record value.
383
384 **/
385 UINT64
386 GetAccNum (
387 IN UINT64 *Entry
388 )
389 {
390 //
391 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
392 //
393 return BitFieldRead64 (*Entry, 9, 11);
394 }
395
396 /**
397 Return and update the access record in entry.
398
399 @param[in, out] Entry Pointer to entry
400
401 @return Access record value.
402
403 **/
404 UINT64
405 GetAndUpdateAccNum (
406 IN OUT UINT64 *Entry
407 )
408 {
409 UINT64 Acc;
410
411 Acc = GetAccNum (Entry);
412 if ((*Entry & IA32_PG_A) != 0) {
413 //
414 // If this entry has been accessed, clear access flag in Entry and update access record
415 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
416 //
417 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
418 SetAccNum (Entry, 0x7);
419 return (0x7 + ACC_MAX_BIT);
420 } else {
421 if (Acc != 0) {
422 //
423 // If the access record is not the smallest value 0, minus 1 and update the access record field
424 //
425 SetAccNum (Entry, Acc - 1);
426 }
427 }
428 return Acc;
429 }
430
431 /**
432 Reclaim free pages for PageFault handler.
433
434 Search the whole entries tree to find the leaf entry that has the smallest
435 access record value. Insert the page pointed by this leaf entry into the
436 page pool. And check its upper entries if need to be inserted into the page
437 pool or not.
438
439 **/
440 VOID
441 ReclaimPages (
442 VOID
443 )
444 {
445 UINT64 *Pml4;
446 UINT64 *Pdpt;
447 UINT64 *Pdt;
448 UINTN Pml4Index;
449 UINTN PdptIndex;
450 UINTN PdtIndex;
451 UINTN MinPml4;
452 UINTN MinPdpt;
453 UINTN MinPdt;
454 UINT64 MinAcc;
455 UINT64 Acc;
456 UINT64 SubEntriesNum;
457 BOOLEAN PML4EIgnore;
458 BOOLEAN PDPTEIgnore;
459 UINT64 *ReleasePageAddress;
460
461 Pml4 = NULL;
462 Pdpt = NULL;
463 Pdt = NULL;
464 MinAcc = (UINT64)-1;
465 MinPml4 = (UINTN)-1;
466 MinPdpt = (UINTN)-1;
467 MinPdt = (UINTN)-1;
468 Acc = 0;
469 ReleasePageAddress = 0;
470
471 //
472 // First, find the leaf entry has the smallest access record value
473 //
474 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
475 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
476 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
477 //
478 // If the PML4 entry is not present or is masked, skip it
479 //
480 continue;
481 }
482 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
483 PML4EIgnore = FALSE;
484 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
485 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
486 //
487 // If the PDPT entry is not present or is masked, skip it
488 //
489 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
490 //
491 // If the PDPT entry is masked, we will ignore checking the PML4 entry
492 //
493 PML4EIgnore = TRUE;
494 }
495 continue;
496 }
497 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
498 //
499 // It's not 1-GByte pages entry, it should be a PDPT entry,
500 // we will not check PML4 entry more
501 //
502 PML4EIgnore = TRUE;
503 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
504 PDPTEIgnore = FALSE;
505 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
506 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
507 //
508 // If the PD entry is not present or is masked, skip it
509 //
510 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
511 //
512 // If the PD entry is masked, we will not PDPT entry more
513 //
514 PDPTEIgnore = TRUE;
515 }
516 continue;
517 }
518 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
519 //
520 // It's not 2 MByte page table entry, it should be PD entry
521 // we will find the entry has the smallest access record value
522 //
523 PDPTEIgnore = TRUE;
524 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
525 if (Acc < MinAcc) {
526 //
527 // If the PD entry has the smallest access record value,
528 // save the Page address to be released
529 //
530 MinAcc = Acc;
531 MinPml4 = Pml4Index;
532 MinPdpt = PdptIndex;
533 MinPdt = PdtIndex;
534 ReleasePageAddress = Pdt + PdtIndex;
535 }
536 }
537 }
538 if (!PDPTEIgnore) {
539 //
540 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
541 // it should only has the entries point to 2 MByte Pages
542 //
543 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
544 if (Acc < MinAcc) {
545 //
546 // If the PDPT entry has the smallest access record value,
547 // save the Page address to be released
548 //
549 MinAcc = Acc;
550 MinPml4 = Pml4Index;
551 MinPdpt = PdptIndex;
552 MinPdt = (UINTN)-1;
553 ReleasePageAddress = Pdpt + PdptIndex;
554 }
555 }
556 }
557 }
558 if (!PML4EIgnore) {
559 //
560 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
561 // it should only has the entries point to 1 GByte Pages
562 //
563 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
564 if (Acc < MinAcc) {
565 //
566 // If the PML4 entry has the smallest access record value,
567 // save the Page address to be released
568 //
569 MinAcc = Acc;
570 MinPml4 = Pml4Index;
571 MinPdpt = (UINTN)-1;
572 MinPdt = (UINTN)-1;
573 ReleasePageAddress = Pml4 + Pml4Index;
574 }
575 }
576 }
577 //
578 // Make sure one PML4/PDPT/PD entry is selected
579 //
580 ASSERT (MinAcc != (UINT64)-1);
581
582 //
583 // Secondly, insert the page pointed by this entry into page pool and clear this entry
584 //
585 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
586 *ReleasePageAddress = 0;
587
588 //
589 // Lastly, check this entry's upper entries if need to be inserted into page pool
590 // or not
591 //
592 while (TRUE) {
593 if (MinPdt != (UINTN)-1) {
594 //
595 // If 4 KByte Page Table is released, check the PDPT entry
596 //
597 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
598 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
599 if (SubEntriesNum == 0) {
600 //
601 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
602 // clear the Page directory entry
603 //
604 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
605 Pdpt[MinPdpt] = 0;
606 //
607 // Go on checking the PML4 table
608 //
609 MinPdt = (UINTN)-1;
610 continue;
611 }
612 //
613 // Update the sub-entries filed in PDPT entry and exit
614 //
615 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
616 break;
617 }
618 if (MinPdpt != (UINTN)-1) {
619 //
620 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
621 //
622 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
623 if (SubEntriesNum == 0) {
624 //
625 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
626 // clear the Page directory entry
627 //
628 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
629 Pml4[MinPml4] = 0;
630 MinPdpt = (UINTN)-1;
631 continue;
632 }
633 //
634 // Update the sub-entries filed in PML4 entry and exit
635 //
636 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
637 break;
638 }
639 //
640 // PLM4 table has been released before, exit it
641 //
642 break;
643 }
644 }
645
646 /**
647 Allocate free Page for PageFault handler use.
648
649 @return Page address.
650
651 **/
652 UINT64
653 AllocPage (
654 VOID
655 )
656 {
657 UINT64 RetVal;
658
659 if (IsListEmpty (&mPagePool)) {
660 //
661 // If page pool is empty, reclaim the used pages and insert one into page pool
662 //
663 ReclaimPages ();
664 }
665
666 //
667 // Get one free page and remove it from page pool
668 //
669 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
670 RemoveEntryList (mPagePool.ForwardLink);
671 //
672 // Clean this page and return
673 //
674 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
675 return RetVal;
676 }
677
678 /**
679 Page Fault handler for SMM use.
680
681 **/
682 VOID
683 SmiDefaultPFHandler (
684 VOID
685 )
686 {
687 UINT64 *PageTable;
688 UINT64 *Pml4;
689 UINT64 PFAddress;
690 UINTN StartBit;
691 UINTN EndBit;
692 UINT64 PTIndex;
693 UINTN Index;
694 SMM_PAGE_SIZE_TYPE PageSize;
695 UINTN NumOfPages;
696 UINTN PageAttribute;
697 EFI_STATUS Status;
698 UINT64 *UpperEntry;
699
700 //
701 // Set default SMM page attribute
702 //
703 PageSize = SmmPageSize2M;
704 NumOfPages = 1;
705 PageAttribute = 0;
706
707 EndBit = 0;
708 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
709 PFAddress = AsmReadCr2 ();
710
711 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
712 //
713 // If platform not support page table attribute, set default SMM page attribute
714 //
715 if (Status != EFI_SUCCESS) {
716 PageSize = SmmPageSize2M;
717 NumOfPages = 1;
718 PageAttribute = 0;
719 }
720 if (PageSize >= MaxSmmPageSizeType) {
721 PageSize = SmmPageSize2M;
722 }
723 if (NumOfPages > 512) {
724 NumOfPages = 512;
725 }
726
727 switch (PageSize) {
728 case SmmPageSize4K:
729 //
730 // BIT12 to BIT20 is Page Table index
731 //
732 EndBit = 12;
733 break;
734 case SmmPageSize2M:
735 //
736 // BIT21 to BIT29 is Page Directory index
737 //
738 EndBit = 21;
739 PageAttribute |= (UINTN)IA32_PG_PS;
740 break;
741 case SmmPageSize1G:
742 if (!m1GPageTableSupport) {
743 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
744 ASSERT (FALSE);
745 }
746 //
747 // BIT30 to BIT38 is Page Directory Pointer Table index
748 //
749 EndBit = 30;
750 PageAttribute |= (UINTN)IA32_PG_PS;
751 break;
752 default:
753 ASSERT (FALSE);
754 }
755
756 //
757 // If execute-disable is enabled, set NX bit
758 //
759 if (mXdEnabled) {
760 PageAttribute |= IA32_PG_NX;
761 }
762
763 for (Index = 0; Index < NumOfPages; Index++) {
764 PageTable = Pml4;
765 UpperEntry = NULL;
766 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
767 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
768 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
769 //
770 // If the entry is not present, allocate one page from page pool for it
771 //
772 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
773 } else {
774 //
775 // Save the upper entry address
776 //
777 UpperEntry = PageTable + PTIndex;
778 }
779 //
780 // BIT9 to BIT11 of entry is used to save access record,
781 // initialize value is 7
782 //
783 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
784 SetAccNum (PageTable + PTIndex, 7);
785 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
786 }
787
788 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
789 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
790 //
791 // Check if the entry has already existed, this issue may occur when the different
792 // size page entries created under the same entry
793 //
794 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
795 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
796 ASSERT (FALSE);
797 }
798 //
799 // Fill the new entry
800 //
801 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
802 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
803 if (UpperEntry != NULL) {
804 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
805 }
806 //
807 // Get the next page address if we need to create more page tables
808 //
809 PFAddress += (1ull << EndBit);
810 }
811 }
812
813 /**
814 ThePage Fault handler wrapper for SMM use.
815
816 @param InterruptType Defines the type of interrupt or exception that
817 occurred on the processor.This parameter is processor architecture specific.
818 @param SystemContext A pointer to the processor context when
819 the interrupt occurred on the processor.
820 **/
821 VOID
822 EFIAPI
823 SmiPFHandler (
824 IN EFI_EXCEPTION_TYPE InterruptType,
825 IN EFI_SYSTEM_CONTEXT SystemContext
826 )
827 {
828 UINTN PFAddress;
829 UINTN GuardPageAddress;
830 UINTN CpuIndex;
831
832 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
833
834 AcquireSpinLock (mPFLock);
835
836 PFAddress = AsmReadCr2 ();
837
838 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
839 DumpCpuContext (InterruptType, SystemContext);
840 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
841 CpuDeadLoop ();
842 goto Exit;
843 }
844
845 //
846 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
847 // or SMM page protection violation.
848 //
849 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
850 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
851 DumpCpuContext (InterruptType, SystemContext);
852 CpuIndex = GetCpuIndex ();
853 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
854 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
855 (PFAddress >= GuardPageAddress) &&
856 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
857 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
858 } else {
859 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
860 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
861 DEBUG_CODE (
862 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
863 );
864 } else {
865 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
866 DEBUG_CODE (
867 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
868 );
869 }
870
871 if (HEAP_GUARD_NONSTOP_MODE) {
872 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
873 goto Exit;
874 }
875 }
876 CpuDeadLoop ();
877 goto Exit;
878 }
879
880 //
881 // If a page fault occurs in non-SMRAM range.
882 //
883 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
884 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
885 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
886 DumpCpuContext (InterruptType, SystemContext);
887 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
888 DEBUG_CODE (
889 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
890 );
891 CpuDeadLoop ();
892 goto Exit;
893 }
894
895 //
896 // If NULL pointer was just accessed
897 //
898 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
899 (PFAddress < EFI_PAGE_SIZE)) {
900 DumpCpuContext (InterruptType, SystemContext);
901 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
902 DEBUG_CODE (
903 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
904 );
905
906 if (NULL_DETECTION_NONSTOP_MODE) {
907 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
908 goto Exit;
909 }
910
911 CpuDeadLoop ();
912 goto Exit;
913 }
914
915 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {
916 DumpCpuContext (InterruptType, SystemContext);
917 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
918 DEBUG_CODE (
919 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
920 );
921 CpuDeadLoop ();
922 goto Exit;
923 }
924 }
925
926 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
927 SmmProfilePFHandler (
928 SystemContext.SystemContextX64->Rip,
929 SystemContext.SystemContextX64->ExceptionData
930 );
931 } else {
932 SmiDefaultPFHandler ();
933 }
934
935 Exit:
936 ReleaseSpinLock (mPFLock);
937 }
938
939 /**
940 This function sets memory attribute for page table.
941 **/
942 VOID
943 SetPageTableAttributes (
944 VOID
945 )
946 {
947 UINTN Index2;
948 UINTN Index3;
949 UINTN Index4;
950 UINT64 *L1PageTable;
951 UINT64 *L2PageTable;
952 UINT64 *L3PageTable;
953 UINT64 *L4PageTable;
954 BOOLEAN IsSplitted;
955 BOOLEAN PageTableSplitted;
956 BOOLEAN CetEnabled;
957
958 //
959 // Don't do this if
960 // - no static page table; or
961 // - SMM heap guard feature enabled; or
962 // BIT2: SMM page guard enabled
963 // BIT3: SMM pool guard enabled
964 // - SMM profile feature enabled
965 //
966 if (!mCpuSmmStaticPageTable ||
967 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
968 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
969 //
970 // Static paging and heap guard could not be enabled at the same time.
971 //
972 ASSERT (!(mCpuSmmStaticPageTable &&
973 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
974
975 //
976 // Static paging and SMM profile could not be enabled at the same time.
977 //
978 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));
979 return ;
980 }
981
982 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
983
984 //
985 // Disable write protection, because we need mark page table to be write protected.
986 // We need *write* page table memory, to mark itself to be *read only*.
987 //
988 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
989 if (CetEnabled) {
990 //
991 // CET must be disabled if WP is disabled.
992 //
993 DisableCet();
994 }
995 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
996
997 do {
998 DEBUG ((DEBUG_INFO, "Start...\n"));
999 PageTableSplitted = FALSE;
1000
1001 L4PageTable = (UINT64 *)GetPageTableBase ();
1002 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1003 PageTableSplitted = (PageTableSplitted || IsSplitted);
1004
1005 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1006 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1007 if (L3PageTable == NULL) {
1008 continue;
1009 }
1010
1011 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1012 PageTableSplitted = (PageTableSplitted || IsSplitted);
1013
1014 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1015 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1016 // 1G
1017 continue;
1018 }
1019 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1020 if (L2PageTable == NULL) {
1021 continue;
1022 }
1023
1024 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1025 PageTableSplitted = (PageTableSplitted || IsSplitted);
1026
1027 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1028 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1029 // 2M
1030 continue;
1031 }
1032 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1033 if (L1PageTable == NULL) {
1034 continue;
1035 }
1036 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1037 PageTableSplitted = (PageTableSplitted || IsSplitted);
1038 }
1039 }
1040 }
1041 } while (PageTableSplitted);
1042
1043 //
1044 // Enable write protection, after page table updated.
1045 //
1046 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1047 if (CetEnabled) {
1048 //
1049 // re-enable CET.
1050 //
1051 EnableCet();
1052 }
1053
1054 return ;
1055 }