]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add support for PCD PcdPteMemoryEncryptionAddressOrMask
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 #define PAGE_TABLE_PAGES 8
20 #define ACC_MAX_BIT BIT3
21
22 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
23 BOOLEAN m1GPageTableSupport = FALSE;
24 UINT8 mPhysicalAddressBits;
25 BOOLEAN mCpuSmmStaticPageTable;
26
27 /**
28 Check if 1-GByte pages is supported by processor or not.
29
30 @retval TRUE 1-GByte pages is supported.
31 @retval FALSE 1-GByte pages is not supported.
32
33 **/
34 BOOLEAN
35 Is1GPageSupport (
36 VOID
37 )
38 {
39 UINT32 RegEax;
40 UINT32 RegEdx;
41
42 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
43 if (RegEax >= 0x80000001) {
44 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
45 if ((RegEdx & BIT26) != 0) {
46 return TRUE;
47 }
48 }
49 return FALSE;
50 }
51
52 /**
53 Set sub-entries number in entry.
54
55 @param[in, out] Entry Pointer to entry
56 @param[in] SubEntryNum Sub-entries number based on 0:
57 0 means there is 1 sub-entry under this entry
58 0x1ff means there is 512 sub-entries under this entry
59
60 **/
61 VOID
62 SetSubEntriesNum (
63 IN OUT UINT64 *Entry,
64 IN UINT64 SubEntryNum
65 )
66 {
67 //
68 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
69 //
70 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
71 }
72
73 /**
74 Return sub-entries number in entry.
75
76 @param[in] Entry Pointer to entry
77
78 @return Sub-entries number based on 0:
79 0 means there is 1 sub-entry under this entry
80 0x1ff means there is 512 sub-entries under this entry
81 **/
82 UINT64
83 GetSubEntriesNum (
84 IN UINT64 *Entry
85 )
86 {
87 //
88 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
89 //
90 return BitFieldRead64 (*Entry, 52, 60);
91 }
92
93 /**
94 Calculate the maximum support address.
95
96 @return the maximum support address.
97 **/
98 UINT8
99 CalculateMaximumSupportAddress (
100 VOID
101 )
102 {
103 UINT32 RegEax;
104 UINT8 PhysicalAddressBits;
105 VOID *Hob;
106
107 //
108 // Get physical address bits supported.
109 //
110 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
111 if (Hob != NULL) {
112 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
113 } else {
114 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
115 if (RegEax >= 0x80000008) {
116 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
117 PhysicalAddressBits = (UINT8) RegEax;
118 } else {
119 PhysicalAddressBits = 36;
120 }
121 }
122
123 //
124 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
125 //
126 ASSERT (PhysicalAddressBits <= 52);
127 if (PhysicalAddressBits > 48) {
128 PhysicalAddressBits = 48;
129 }
130 return PhysicalAddressBits;
131 }
132
133 /**
134 Set static page table.
135
136 @param[in] PageTable Address of page table.
137 **/
138 VOID
139 SetStaticPageTable (
140 IN UINTN PageTable
141 )
142 {
143 UINT64 PageAddress;
144 UINTN NumberOfPml4EntriesNeeded;
145 UINTN NumberOfPdpEntriesNeeded;
146 UINTN IndexOfPml4Entries;
147 UINTN IndexOfPdpEntries;
148 UINTN IndexOfPageDirectoryEntries;
149 UINT64 *PageMapLevel4Entry;
150 UINT64 *PageMap;
151 UINT64 *PageDirectoryPointerEntry;
152 UINT64 *PageDirectory1GEntry;
153 UINT64 *PageDirectoryEntry;
154
155 if (mPhysicalAddressBits <= 39 ) {
156 NumberOfPml4EntriesNeeded = 1;
157 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
158 } else {
159 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
160 NumberOfPdpEntriesNeeded = 512;
161 }
162
163 //
164 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
165 //
166 PageMap = (VOID *) PageTable;
167
168 PageMapLevel4Entry = PageMap;
169 PageAddress = 0;
170 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
171 //
172 // Each PML4 entry points to a page of Page Directory Pointer entries.
173 //
174 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
175 if (PageDirectoryPointerEntry == NULL) {
176 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
177 ASSERT(PageDirectoryPointerEntry != NULL);
178 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
179
180 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
181 }
182
183 if (m1GPageTableSupport) {
184 PageDirectory1GEntry = PageDirectoryPointerEntry;
185 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
186 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
187 //
188 // Skip the < 4G entries
189 //
190 continue;
191 }
192 //
193 // Fill in the Page Directory entries
194 //
195 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
196 }
197 } else {
198 PageAddress = BASE_4GB;
199 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
200 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
201 //
202 // Skip the < 4G entries
203 //
204 continue;
205 }
206 //
207 // Each Directory Pointer entries points to a page of Page Directory entires.
208 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
209 //
210 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
211 if (PageDirectoryEntry == NULL) {
212 PageDirectoryEntry = AllocatePageTableMemory (1);
213 ASSERT(PageDirectoryEntry != NULL);
214 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
215
216 //
217 // Fill in a Page Directory Pointer Entries
218 //
219 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
220 }
221
222 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
223 //
224 // Fill in the Page Directory entries
225 //
226 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
227 }
228 }
229 }
230 }
231 }
232
233 /**
234 Create PageTable for SMM use.
235
236 @return The address of PML4 (to set CR3).
237
238 **/
239 UINT32
240 SmmInitPageTable (
241 VOID
242 )
243 {
244 EFI_PHYSICAL_ADDRESS Pages;
245 UINT64 *PTEntry;
246 LIST_ENTRY *FreePage;
247 UINTN Index;
248 UINTN PageFaultHandlerHookAddress;
249 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
250 EFI_STATUS Status;
251
252 //
253 // Initialize spin lock
254 //
255 InitializeSpinLock (mPFLock);
256
257 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
258 m1GPageTableSupport = Is1GPageSupport ();
259 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
260 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
261
262 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
263 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
264 //
265 // Generate PAE page table for the first 4GB memory space
266 //
267 Pages = Gen4GPageTable (FALSE);
268
269 //
270 // Set IA32_PG_PMNT bit to mask this entry
271 //
272 PTEntry = (UINT64*)(UINTN)Pages;
273 for (Index = 0; Index < 4; Index++) {
274 PTEntry[Index] |= IA32_PG_PMNT;
275 }
276
277 //
278 // Fill Page-Table-Level4 (PML4) entry
279 //
280 PTEntry = (UINT64*)AllocatePageTableMemory (1);
281 ASSERT (PTEntry != NULL);
282 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
283 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
284
285 //
286 // Set sub-entries number
287 //
288 SetSubEntriesNum (PTEntry, 3);
289
290 if (mCpuSmmStaticPageTable) {
291 SetStaticPageTable ((UINTN)PTEntry);
292 } else {
293 //
294 // Add pages to page pool
295 //
296 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
297 ASSERT (FreePage != NULL);
298 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
299 InsertTailList (&mPagePool, FreePage);
300 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
301 }
302 }
303
304 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
305 //
306 // Set own Page Fault entry instead of the default one, because SMM Profile
307 // feature depends on IRET instruction to do Single Step
308 //
309 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
310 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
311 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
312 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
313 IdtEntry->Bits.Reserved_0 = 0;
314 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
315 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
316 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
317 IdtEntry->Bits.Reserved_1 = 0;
318 } else {
319 //
320 // Register Smm Page Fault Handler
321 //
322 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
323 ASSERT_EFI_ERROR (Status);
324 }
325
326 //
327 // Additional SMM IDT initialization for SMM stack guard
328 //
329 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
330 InitializeIDTSmmStackGuard ();
331 }
332
333 //
334 // Return the address of PML4 (to set CR3)
335 //
336 return (UINT32)(UINTN)PTEntry;
337 }
338
339 /**
340 Set access record in entry.
341
342 @param[in, out] Entry Pointer to entry
343 @param[in] Acc Access record value
344
345 **/
346 VOID
347 SetAccNum (
348 IN OUT UINT64 *Entry,
349 IN UINT64 Acc
350 )
351 {
352 //
353 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
354 //
355 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
356 }
357
358 /**
359 Return access record in entry.
360
361 @param[in] Entry Pointer to entry
362
363 @return Access record value.
364
365 **/
366 UINT64
367 GetAccNum (
368 IN UINT64 *Entry
369 )
370 {
371 //
372 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
373 //
374 return BitFieldRead64 (*Entry, 9, 11);
375 }
376
377 /**
378 Return and update the access record in entry.
379
380 @param[in, out] Entry Pointer to entry
381
382 @return Access record value.
383
384 **/
385 UINT64
386 GetAndUpdateAccNum (
387 IN OUT UINT64 *Entry
388 )
389 {
390 UINT64 Acc;
391
392 Acc = GetAccNum (Entry);
393 if ((*Entry & IA32_PG_A) != 0) {
394 //
395 // If this entry has been accessed, clear access flag in Entry and update access record
396 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
397 //
398 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
399 SetAccNum (Entry, 0x7);
400 return (0x7 + ACC_MAX_BIT);
401 } else {
402 if (Acc != 0) {
403 //
404 // If the access record is not the smallest value 0, minus 1 and update the access record field
405 //
406 SetAccNum (Entry, Acc - 1);
407 }
408 }
409 return Acc;
410 }
411
412 /**
413 Reclaim free pages for PageFault handler.
414
415 Search the whole entries tree to find the leaf entry that has the smallest
416 access record value. Insert the page pointed by this leaf entry into the
417 page pool. And check its upper entries if need to be inserted into the page
418 pool or not.
419
420 **/
421 VOID
422 ReclaimPages (
423 VOID
424 )
425 {
426 UINT64 *Pml4;
427 UINT64 *Pdpt;
428 UINT64 *Pdt;
429 UINTN Pml4Index;
430 UINTN PdptIndex;
431 UINTN PdtIndex;
432 UINTN MinPml4;
433 UINTN MinPdpt;
434 UINTN MinPdt;
435 UINT64 MinAcc;
436 UINT64 Acc;
437 UINT64 SubEntriesNum;
438 BOOLEAN PML4EIgnore;
439 BOOLEAN PDPTEIgnore;
440 UINT64 *ReleasePageAddress;
441
442 Pml4 = NULL;
443 Pdpt = NULL;
444 Pdt = NULL;
445 MinAcc = (UINT64)-1;
446 MinPml4 = (UINTN)-1;
447 MinPdpt = (UINTN)-1;
448 MinPdt = (UINTN)-1;
449 Acc = 0;
450 ReleasePageAddress = 0;
451
452 //
453 // First, find the leaf entry has the smallest access record value
454 //
455 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
456 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
457 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
458 //
459 // If the PML4 entry is not present or is masked, skip it
460 //
461 continue;
462 }
463 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
464 PML4EIgnore = FALSE;
465 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
466 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
467 //
468 // If the PDPT entry is not present or is masked, skip it
469 //
470 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
471 //
472 // If the PDPT entry is masked, we will ignore checking the PML4 entry
473 //
474 PML4EIgnore = TRUE;
475 }
476 continue;
477 }
478 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
479 //
480 // It's not 1-GByte pages entry, it should be a PDPT entry,
481 // we will not check PML4 entry more
482 //
483 PML4EIgnore = TRUE;
484 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
485 PDPTEIgnore = FALSE;
486 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
487 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
488 //
489 // If the PD entry is not present or is masked, skip it
490 //
491 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
492 //
493 // If the PD entry is masked, we will not PDPT entry more
494 //
495 PDPTEIgnore = TRUE;
496 }
497 continue;
498 }
499 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
500 //
501 // It's not 2 MByte page table entry, it should be PD entry
502 // we will find the entry has the smallest access record value
503 //
504 PDPTEIgnore = TRUE;
505 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
506 if (Acc < MinAcc) {
507 //
508 // If the PD entry has the smallest access record value,
509 // save the Page address to be released
510 //
511 MinAcc = Acc;
512 MinPml4 = Pml4Index;
513 MinPdpt = PdptIndex;
514 MinPdt = PdtIndex;
515 ReleasePageAddress = Pdt + PdtIndex;
516 }
517 }
518 }
519 if (!PDPTEIgnore) {
520 //
521 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
522 // it should only has the entries point to 2 MByte Pages
523 //
524 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
525 if (Acc < MinAcc) {
526 //
527 // If the PDPT entry has the smallest access record value,
528 // save the Page address to be released
529 //
530 MinAcc = Acc;
531 MinPml4 = Pml4Index;
532 MinPdpt = PdptIndex;
533 MinPdt = (UINTN)-1;
534 ReleasePageAddress = Pdpt + PdptIndex;
535 }
536 }
537 }
538 }
539 if (!PML4EIgnore) {
540 //
541 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
542 // it should only has the entries point to 1 GByte Pages
543 //
544 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
545 if (Acc < MinAcc) {
546 //
547 // If the PML4 entry has the smallest access record value,
548 // save the Page address to be released
549 //
550 MinAcc = Acc;
551 MinPml4 = Pml4Index;
552 MinPdpt = (UINTN)-1;
553 MinPdt = (UINTN)-1;
554 ReleasePageAddress = Pml4 + Pml4Index;
555 }
556 }
557 }
558 //
559 // Make sure one PML4/PDPT/PD entry is selected
560 //
561 ASSERT (MinAcc != (UINT64)-1);
562
563 //
564 // Secondly, insert the page pointed by this entry into page pool and clear this entry
565 //
566 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
567 *ReleasePageAddress = 0;
568
569 //
570 // Lastly, check this entry's upper entries if need to be inserted into page pool
571 // or not
572 //
573 while (TRUE) {
574 if (MinPdt != (UINTN)-1) {
575 //
576 // If 4 KByte Page Table is released, check the PDPT entry
577 //
578 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
579 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
580 if (SubEntriesNum == 0) {
581 //
582 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
583 // clear the Page directory entry
584 //
585 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
586 Pdpt[MinPdpt] = 0;
587 //
588 // Go on checking the PML4 table
589 //
590 MinPdt = (UINTN)-1;
591 continue;
592 }
593 //
594 // Update the sub-entries filed in PDPT entry and exit
595 //
596 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
597 break;
598 }
599 if (MinPdpt != (UINTN)-1) {
600 //
601 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
602 //
603 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
604 if (SubEntriesNum == 0) {
605 //
606 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
607 // clear the Page directory entry
608 //
609 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
610 Pml4[MinPml4] = 0;
611 MinPdpt = (UINTN)-1;
612 continue;
613 }
614 //
615 // Update the sub-entries filed in PML4 entry and exit
616 //
617 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
618 break;
619 }
620 //
621 // PLM4 table has been released before, exit it
622 //
623 break;
624 }
625 }
626
627 /**
628 Allocate free Page for PageFault handler use.
629
630 @return Page address.
631
632 **/
633 UINT64
634 AllocPage (
635 VOID
636 )
637 {
638 UINT64 RetVal;
639
640 if (IsListEmpty (&mPagePool)) {
641 //
642 // If page pool is empty, reclaim the used pages and insert one into page pool
643 //
644 ReclaimPages ();
645 }
646
647 //
648 // Get one free page and remove it from page pool
649 //
650 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
651 RemoveEntryList (mPagePool.ForwardLink);
652 //
653 // Clean this page and return
654 //
655 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
656 return RetVal;
657 }
658
659 /**
660 Page Fault handler for SMM use.
661
662 **/
663 VOID
664 SmiDefaultPFHandler (
665 VOID
666 )
667 {
668 UINT64 *PageTable;
669 UINT64 *Pml4;
670 UINT64 PFAddress;
671 UINTN StartBit;
672 UINTN EndBit;
673 UINT64 PTIndex;
674 UINTN Index;
675 SMM_PAGE_SIZE_TYPE PageSize;
676 UINTN NumOfPages;
677 UINTN PageAttribute;
678 EFI_STATUS Status;
679 UINT64 *UpperEntry;
680
681 //
682 // Set default SMM page attribute
683 //
684 PageSize = SmmPageSize2M;
685 NumOfPages = 1;
686 PageAttribute = 0;
687
688 EndBit = 0;
689 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
690 PFAddress = AsmReadCr2 ();
691
692 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
693 //
694 // If platform not support page table attribute, set default SMM page attribute
695 //
696 if (Status != EFI_SUCCESS) {
697 PageSize = SmmPageSize2M;
698 NumOfPages = 1;
699 PageAttribute = 0;
700 }
701 if (PageSize >= MaxSmmPageSizeType) {
702 PageSize = SmmPageSize2M;
703 }
704 if (NumOfPages > 512) {
705 NumOfPages = 512;
706 }
707
708 switch (PageSize) {
709 case SmmPageSize4K:
710 //
711 // BIT12 to BIT20 is Page Table index
712 //
713 EndBit = 12;
714 break;
715 case SmmPageSize2M:
716 //
717 // BIT21 to BIT29 is Page Directory index
718 //
719 EndBit = 21;
720 PageAttribute |= (UINTN)IA32_PG_PS;
721 break;
722 case SmmPageSize1G:
723 if (!m1GPageTableSupport) {
724 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
725 ASSERT (FALSE);
726 }
727 //
728 // BIT30 to BIT38 is Page Directory Pointer Table index
729 //
730 EndBit = 30;
731 PageAttribute |= (UINTN)IA32_PG_PS;
732 break;
733 default:
734 ASSERT (FALSE);
735 }
736
737 //
738 // If execute-disable is enabled, set NX bit
739 //
740 if (mXdEnabled) {
741 PageAttribute |= IA32_PG_NX;
742 }
743
744 for (Index = 0; Index < NumOfPages; Index++) {
745 PageTable = Pml4;
746 UpperEntry = NULL;
747 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
748 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
749 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
750 //
751 // If the entry is not present, allocate one page from page pool for it
752 //
753 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
754 } else {
755 //
756 // Save the upper entry address
757 //
758 UpperEntry = PageTable + PTIndex;
759 }
760 //
761 // BIT9 to BIT11 of entry is used to save access record,
762 // initialize value is 7
763 //
764 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
765 SetAccNum (PageTable + PTIndex, 7);
766 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
767 }
768
769 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
770 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
771 //
772 // Check if the entry has already existed, this issue may occur when the different
773 // size page entries created under the same entry
774 //
775 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
776 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
777 ASSERT (FALSE);
778 }
779 //
780 // Fill the new entry
781 //
782 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
783 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
784 if (UpperEntry != NULL) {
785 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
786 }
787 //
788 // Get the next page address if we need to create more page tables
789 //
790 PFAddress += (1ull << EndBit);
791 }
792 }
793
794 /**
795 ThePage Fault handler wrapper for SMM use.
796
797 @param InterruptType Defines the type of interrupt or exception that
798 occurred on the processor.This parameter is processor architecture specific.
799 @param SystemContext A pointer to the processor context when
800 the interrupt occurred on the processor.
801 **/
802 VOID
803 EFIAPI
804 SmiPFHandler (
805 IN EFI_EXCEPTION_TYPE InterruptType,
806 IN EFI_SYSTEM_CONTEXT SystemContext
807 )
808 {
809 UINTN PFAddress;
810 UINTN GuardPageAddress;
811 UINTN CpuIndex;
812
813 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
814
815 AcquireSpinLock (mPFLock);
816
817 PFAddress = AsmReadCr2 ();
818
819 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
820 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
821 CpuDeadLoop ();
822 }
823
824 //
825 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
826 // or SMM page protection violation.
827 //
828 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
829 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
830 CpuIndex = GetCpuIndex ();
831 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
832 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
833 (PFAddress >= GuardPageAddress) &&
834 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
835 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
836 } else {
837 DEBUG ((DEBUG_ERROR, "SMM exception data - 0x%lx(", SystemContext.SystemContextX64->ExceptionData));
838 DEBUG ((DEBUG_ERROR, "I:%x, R:%x, U:%x, W:%x, P:%x",
839 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0,
840 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_RSVD) != 0,
841 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_US) != 0,
842 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_WR) != 0,
843 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_P) != 0
844 ));
845 DEBUG ((DEBUG_ERROR, ")\n"));
846 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
847 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
848 DEBUG_CODE (
849 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
850 );
851 } else {
852 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
853 DEBUG_CODE (
854 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
855 );
856 }
857 }
858 CpuDeadLoop ();
859 }
860
861 //
862 // If a page fault occurs in SMM range
863 //
864 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
865 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
866 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
867 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
868 DEBUG_CODE (
869 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
870 );
871 CpuDeadLoop ();
872 }
873 if (IsSmmCommBufferForbiddenAddress (PFAddress)) {
874 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
875 DEBUG_CODE (
876 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
877 );
878 CpuDeadLoop ();
879 }
880 }
881
882 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
883 SmmProfilePFHandler (
884 SystemContext.SystemContextX64->Rip,
885 SystemContext.SystemContextX64->ExceptionData
886 );
887 } else {
888 SmiDefaultPFHandler ();
889 }
890
891 ReleaseSpinLock (mPFLock);
892 }
893
894 /**
895 This function sets memory attribute for page table.
896 **/
897 VOID
898 SetPageTableAttributes (
899 VOID
900 )
901 {
902 UINTN Index2;
903 UINTN Index3;
904 UINTN Index4;
905 UINT64 *L1PageTable;
906 UINT64 *L2PageTable;
907 UINT64 *L3PageTable;
908 UINT64 *L4PageTable;
909 BOOLEAN IsSplitted;
910 BOOLEAN PageTableSplitted;
911
912 if (!mCpuSmmStaticPageTable) {
913 return ;
914 }
915
916 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
917
918 //
919 // Disable write protection, because we need mark page table to be write protected.
920 // We need *write* page table memory, to mark itself to be *read only*.
921 //
922 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
923
924 do {
925 DEBUG ((DEBUG_INFO, "Start...\n"));
926 PageTableSplitted = FALSE;
927
928 L4PageTable = (UINT64 *)GetPageTableBase ();
929 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
930 PageTableSplitted = (PageTableSplitted || IsSplitted);
931
932 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
933 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
934 if (L3PageTable == NULL) {
935 continue;
936 }
937
938 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
939 PageTableSplitted = (PageTableSplitted || IsSplitted);
940
941 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
942 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
943 // 1G
944 continue;
945 }
946 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
947 if (L2PageTable == NULL) {
948 continue;
949 }
950
951 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
952 PageTableSplitted = (PageTableSplitted || IsSplitted);
953
954 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
955 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
956 // 2M
957 continue;
958 }
959 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
960 if (L1PageTable == NULL) {
961 continue;
962 }
963 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
964 PageTableSplitted = (PageTableSplitted || IsSplitted);
965 }
966 }
967 }
968 } while (PageTableSplitted);
969
970 //
971 // Enable write protection, after page table updated.
972 //
973 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
974
975 return ;
976 }