]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: patch "gSmbase" with PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 #define PAGE_TABLE_PAGES 8
20 #define ACC_MAX_BIT BIT3
21
22 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
23 BOOLEAN m1GPageTableSupport = FALSE;
24 BOOLEAN mCpuSmmStaticPageTable;
25
26 /**
27 Check if 1-GByte pages is supported by processor or not.
28
29 @retval TRUE 1-GByte pages is supported.
30 @retval FALSE 1-GByte pages is not supported.
31
32 **/
33 BOOLEAN
34 Is1GPageSupport (
35 VOID
36 )
37 {
38 UINT32 RegEax;
39 UINT32 RegEdx;
40
41 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
42 if (RegEax >= 0x80000001) {
43 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
44 if ((RegEdx & BIT26) != 0) {
45 return TRUE;
46 }
47 }
48 return FALSE;
49 }
50
51 /**
52 Set sub-entries number in entry.
53
54 @param[in, out] Entry Pointer to entry
55 @param[in] SubEntryNum Sub-entries number based on 0:
56 0 means there is 1 sub-entry under this entry
57 0x1ff means there is 512 sub-entries under this entry
58
59 **/
60 VOID
61 SetSubEntriesNum (
62 IN OUT UINT64 *Entry,
63 IN UINT64 SubEntryNum
64 )
65 {
66 //
67 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
68 //
69 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
70 }
71
72 /**
73 Return sub-entries number in entry.
74
75 @param[in] Entry Pointer to entry
76
77 @return Sub-entries number based on 0:
78 0 means there is 1 sub-entry under this entry
79 0x1ff means there is 512 sub-entries under this entry
80 **/
81 UINT64
82 GetSubEntriesNum (
83 IN UINT64 *Entry
84 )
85 {
86 //
87 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
88 //
89 return BitFieldRead64 (*Entry, 52, 60);
90 }
91
92 /**
93 Calculate the maximum support address.
94
95 @return the maximum support address.
96 **/
97 UINT8
98 CalculateMaximumSupportAddress (
99 VOID
100 )
101 {
102 UINT32 RegEax;
103 UINT8 PhysicalAddressBits;
104 VOID *Hob;
105
106 //
107 // Get physical address bits supported.
108 //
109 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
110 if (Hob != NULL) {
111 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
112 } else {
113 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
114 if (RegEax >= 0x80000008) {
115 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
116 PhysicalAddressBits = (UINT8) RegEax;
117 } else {
118 PhysicalAddressBits = 36;
119 }
120 }
121
122 //
123 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
124 //
125 ASSERT (PhysicalAddressBits <= 52);
126 if (PhysicalAddressBits > 48) {
127 PhysicalAddressBits = 48;
128 }
129 return PhysicalAddressBits;
130 }
131
132 /**
133 Set static page table.
134
135 @param[in] PageTable Address of page table.
136 **/
137 VOID
138 SetStaticPageTable (
139 IN UINTN PageTable
140 )
141 {
142 UINT64 PageAddress;
143 UINTN NumberOfPml4EntriesNeeded;
144 UINTN NumberOfPdpEntriesNeeded;
145 UINTN IndexOfPml4Entries;
146 UINTN IndexOfPdpEntries;
147 UINTN IndexOfPageDirectoryEntries;
148 UINT64 *PageMapLevel4Entry;
149 UINT64 *PageMap;
150 UINT64 *PageDirectoryPointerEntry;
151 UINT64 *PageDirectory1GEntry;
152 UINT64 *PageDirectoryEntry;
153
154 if (mPhysicalAddressBits <= 39 ) {
155 NumberOfPml4EntriesNeeded = 1;
156 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
157 } else {
158 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
159 NumberOfPdpEntriesNeeded = 512;
160 }
161
162 //
163 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
164 //
165 PageMap = (VOID *) PageTable;
166
167 PageMapLevel4Entry = PageMap;
168 PageAddress = 0;
169 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
170 //
171 // Each PML4 entry points to a page of Page Directory Pointer entries.
172 //
173 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
174 if (PageDirectoryPointerEntry == NULL) {
175 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
176 ASSERT(PageDirectoryPointerEntry != NULL);
177 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
178
179 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
180 }
181
182 if (m1GPageTableSupport) {
183 PageDirectory1GEntry = PageDirectoryPointerEntry;
184 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
185 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
186 //
187 // Skip the < 4G entries
188 //
189 continue;
190 }
191 //
192 // Fill in the Page Directory entries
193 //
194 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
195 }
196 } else {
197 PageAddress = BASE_4GB;
198 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
199 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
200 //
201 // Skip the < 4G entries
202 //
203 continue;
204 }
205 //
206 // Each Directory Pointer entries points to a page of Page Directory entires.
207 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
208 //
209 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
210 if (PageDirectoryEntry == NULL) {
211 PageDirectoryEntry = AllocatePageTableMemory (1);
212 ASSERT(PageDirectoryEntry != NULL);
213 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
214
215 //
216 // Fill in a Page Directory Pointer Entries
217 //
218 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
219 }
220
221 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
222 //
223 // Fill in the Page Directory entries
224 //
225 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
226 }
227 }
228 }
229 }
230 }
231
232 /**
233 Create PageTable for SMM use.
234
235 @return The address of PML4 (to set CR3).
236
237 **/
238 UINT32
239 SmmInitPageTable (
240 VOID
241 )
242 {
243 EFI_PHYSICAL_ADDRESS Pages;
244 UINT64 *PTEntry;
245 LIST_ENTRY *FreePage;
246 UINTN Index;
247 UINTN PageFaultHandlerHookAddress;
248 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
249 EFI_STATUS Status;
250
251 //
252 // Initialize spin lock
253 //
254 InitializeSpinLock (mPFLock);
255
256 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
257 m1GPageTableSupport = Is1GPageSupport ();
258 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
259 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
260
261 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
262 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
263 //
264 // Generate PAE page table for the first 4GB memory space
265 //
266 Pages = Gen4GPageTable (FALSE);
267
268 //
269 // Set IA32_PG_PMNT bit to mask this entry
270 //
271 PTEntry = (UINT64*)(UINTN)Pages;
272 for (Index = 0; Index < 4; Index++) {
273 PTEntry[Index] |= IA32_PG_PMNT;
274 }
275
276 //
277 // Fill Page-Table-Level4 (PML4) entry
278 //
279 PTEntry = (UINT64*)AllocatePageTableMemory (1);
280 ASSERT (PTEntry != NULL);
281 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
282 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
283
284 //
285 // Set sub-entries number
286 //
287 SetSubEntriesNum (PTEntry, 3);
288
289 if (mCpuSmmStaticPageTable) {
290 SetStaticPageTable ((UINTN)PTEntry);
291 } else {
292 //
293 // Add pages to page pool
294 //
295 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
296 ASSERT (FreePage != NULL);
297 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
298 InsertTailList (&mPagePool, FreePage);
299 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
300 }
301 }
302
303 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
304 //
305 // Set own Page Fault entry instead of the default one, because SMM Profile
306 // feature depends on IRET instruction to do Single Step
307 //
308 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
309 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
310 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
311 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
312 IdtEntry->Bits.Reserved_0 = 0;
313 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
314 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
315 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
316 IdtEntry->Bits.Reserved_1 = 0;
317 } else {
318 //
319 // Register Smm Page Fault Handler
320 //
321 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
322 ASSERT_EFI_ERROR (Status);
323 }
324
325 //
326 // Additional SMM IDT initialization for SMM stack guard
327 //
328 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
329 InitializeIDTSmmStackGuard ();
330 }
331
332 //
333 // Return the address of PML4 (to set CR3)
334 //
335 return (UINT32)(UINTN)PTEntry;
336 }
337
338 /**
339 Set access record in entry.
340
341 @param[in, out] Entry Pointer to entry
342 @param[in] Acc Access record value
343
344 **/
345 VOID
346 SetAccNum (
347 IN OUT UINT64 *Entry,
348 IN UINT64 Acc
349 )
350 {
351 //
352 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
353 //
354 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
355 }
356
357 /**
358 Return access record in entry.
359
360 @param[in] Entry Pointer to entry
361
362 @return Access record value.
363
364 **/
365 UINT64
366 GetAccNum (
367 IN UINT64 *Entry
368 )
369 {
370 //
371 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
372 //
373 return BitFieldRead64 (*Entry, 9, 11);
374 }
375
376 /**
377 Return and update the access record in entry.
378
379 @param[in, out] Entry Pointer to entry
380
381 @return Access record value.
382
383 **/
384 UINT64
385 GetAndUpdateAccNum (
386 IN OUT UINT64 *Entry
387 )
388 {
389 UINT64 Acc;
390
391 Acc = GetAccNum (Entry);
392 if ((*Entry & IA32_PG_A) != 0) {
393 //
394 // If this entry has been accessed, clear access flag in Entry and update access record
395 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
396 //
397 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
398 SetAccNum (Entry, 0x7);
399 return (0x7 + ACC_MAX_BIT);
400 } else {
401 if (Acc != 0) {
402 //
403 // If the access record is not the smallest value 0, minus 1 and update the access record field
404 //
405 SetAccNum (Entry, Acc - 1);
406 }
407 }
408 return Acc;
409 }
410
411 /**
412 Reclaim free pages for PageFault handler.
413
414 Search the whole entries tree to find the leaf entry that has the smallest
415 access record value. Insert the page pointed by this leaf entry into the
416 page pool. And check its upper entries if need to be inserted into the page
417 pool or not.
418
419 **/
420 VOID
421 ReclaimPages (
422 VOID
423 )
424 {
425 UINT64 *Pml4;
426 UINT64 *Pdpt;
427 UINT64 *Pdt;
428 UINTN Pml4Index;
429 UINTN PdptIndex;
430 UINTN PdtIndex;
431 UINTN MinPml4;
432 UINTN MinPdpt;
433 UINTN MinPdt;
434 UINT64 MinAcc;
435 UINT64 Acc;
436 UINT64 SubEntriesNum;
437 BOOLEAN PML4EIgnore;
438 BOOLEAN PDPTEIgnore;
439 UINT64 *ReleasePageAddress;
440
441 Pml4 = NULL;
442 Pdpt = NULL;
443 Pdt = NULL;
444 MinAcc = (UINT64)-1;
445 MinPml4 = (UINTN)-1;
446 MinPdpt = (UINTN)-1;
447 MinPdt = (UINTN)-1;
448 Acc = 0;
449 ReleasePageAddress = 0;
450
451 //
452 // First, find the leaf entry has the smallest access record value
453 //
454 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
455 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
456 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
457 //
458 // If the PML4 entry is not present or is masked, skip it
459 //
460 continue;
461 }
462 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
463 PML4EIgnore = FALSE;
464 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
465 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
466 //
467 // If the PDPT entry is not present or is masked, skip it
468 //
469 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
470 //
471 // If the PDPT entry is masked, we will ignore checking the PML4 entry
472 //
473 PML4EIgnore = TRUE;
474 }
475 continue;
476 }
477 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
478 //
479 // It's not 1-GByte pages entry, it should be a PDPT entry,
480 // we will not check PML4 entry more
481 //
482 PML4EIgnore = TRUE;
483 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
484 PDPTEIgnore = FALSE;
485 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
486 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
487 //
488 // If the PD entry is not present or is masked, skip it
489 //
490 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
491 //
492 // If the PD entry is masked, we will not PDPT entry more
493 //
494 PDPTEIgnore = TRUE;
495 }
496 continue;
497 }
498 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
499 //
500 // It's not 2 MByte page table entry, it should be PD entry
501 // we will find the entry has the smallest access record value
502 //
503 PDPTEIgnore = TRUE;
504 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
505 if (Acc < MinAcc) {
506 //
507 // If the PD entry has the smallest access record value,
508 // save the Page address to be released
509 //
510 MinAcc = Acc;
511 MinPml4 = Pml4Index;
512 MinPdpt = PdptIndex;
513 MinPdt = PdtIndex;
514 ReleasePageAddress = Pdt + PdtIndex;
515 }
516 }
517 }
518 if (!PDPTEIgnore) {
519 //
520 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
521 // it should only has the entries point to 2 MByte Pages
522 //
523 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
524 if (Acc < MinAcc) {
525 //
526 // If the PDPT entry has the smallest access record value,
527 // save the Page address to be released
528 //
529 MinAcc = Acc;
530 MinPml4 = Pml4Index;
531 MinPdpt = PdptIndex;
532 MinPdt = (UINTN)-1;
533 ReleasePageAddress = Pdpt + PdptIndex;
534 }
535 }
536 }
537 }
538 if (!PML4EIgnore) {
539 //
540 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
541 // it should only has the entries point to 1 GByte Pages
542 //
543 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
544 if (Acc < MinAcc) {
545 //
546 // If the PML4 entry has the smallest access record value,
547 // save the Page address to be released
548 //
549 MinAcc = Acc;
550 MinPml4 = Pml4Index;
551 MinPdpt = (UINTN)-1;
552 MinPdt = (UINTN)-1;
553 ReleasePageAddress = Pml4 + Pml4Index;
554 }
555 }
556 }
557 //
558 // Make sure one PML4/PDPT/PD entry is selected
559 //
560 ASSERT (MinAcc != (UINT64)-1);
561
562 //
563 // Secondly, insert the page pointed by this entry into page pool and clear this entry
564 //
565 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
566 *ReleasePageAddress = 0;
567
568 //
569 // Lastly, check this entry's upper entries if need to be inserted into page pool
570 // or not
571 //
572 while (TRUE) {
573 if (MinPdt != (UINTN)-1) {
574 //
575 // If 4 KByte Page Table is released, check the PDPT entry
576 //
577 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
578 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
579 if (SubEntriesNum == 0) {
580 //
581 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
582 // clear the Page directory entry
583 //
584 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
585 Pdpt[MinPdpt] = 0;
586 //
587 // Go on checking the PML4 table
588 //
589 MinPdt = (UINTN)-1;
590 continue;
591 }
592 //
593 // Update the sub-entries filed in PDPT entry and exit
594 //
595 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
596 break;
597 }
598 if (MinPdpt != (UINTN)-1) {
599 //
600 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
601 //
602 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
603 if (SubEntriesNum == 0) {
604 //
605 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
606 // clear the Page directory entry
607 //
608 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
609 Pml4[MinPml4] = 0;
610 MinPdpt = (UINTN)-1;
611 continue;
612 }
613 //
614 // Update the sub-entries filed in PML4 entry and exit
615 //
616 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
617 break;
618 }
619 //
620 // PLM4 table has been released before, exit it
621 //
622 break;
623 }
624 }
625
626 /**
627 Allocate free Page for PageFault handler use.
628
629 @return Page address.
630
631 **/
632 UINT64
633 AllocPage (
634 VOID
635 )
636 {
637 UINT64 RetVal;
638
639 if (IsListEmpty (&mPagePool)) {
640 //
641 // If page pool is empty, reclaim the used pages and insert one into page pool
642 //
643 ReclaimPages ();
644 }
645
646 //
647 // Get one free page and remove it from page pool
648 //
649 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
650 RemoveEntryList (mPagePool.ForwardLink);
651 //
652 // Clean this page and return
653 //
654 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
655 return RetVal;
656 }
657
658 /**
659 Page Fault handler for SMM use.
660
661 **/
662 VOID
663 SmiDefaultPFHandler (
664 VOID
665 )
666 {
667 UINT64 *PageTable;
668 UINT64 *Pml4;
669 UINT64 PFAddress;
670 UINTN StartBit;
671 UINTN EndBit;
672 UINT64 PTIndex;
673 UINTN Index;
674 SMM_PAGE_SIZE_TYPE PageSize;
675 UINTN NumOfPages;
676 UINTN PageAttribute;
677 EFI_STATUS Status;
678 UINT64 *UpperEntry;
679
680 //
681 // Set default SMM page attribute
682 //
683 PageSize = SmmPageSize2M;
684 NumOfPages = 1;
685 PageAttribute = 0;
686
687 EndBit = 0;
688 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
689 PFAddress = AsmReadCr2 ();
690
691 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
692 //
693 // If platform not support page table attribute, set default SMM page attribute
694 //
695 if (Status != EFI_SUCCESS) {
696 PageSize = SmmPageSize2M;
697 NumOfPages = 1;
698 PageAttribute = 0;
699 }
700 if (PageSize >= MaxSmmPageSizeType) {
701 PageSize = SmmPageSize2M;
702 }
703 if (NumOfPages > 512) {
704 NumOfPages = 512;
705 }
706
707 switch (PageSize) {
708 case SmmPageSize4K:
709 //
710 // BIT12 to BIT20 is Page Table index
711 //
712 EndBit = 12;
713 break;
714 case SmmPageSize2M:
715 //
716 // BIT21 to BIT29 is Page Directory index
717 //
718 EndBit = 21;
719 PageAttribute |= (UINTN)IA32_PG_PS;
720 break;
721 case SmmPageSize1G:
722 if (!m1GPageTableSupport) {
723 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
724 ASSERT (FALSE);
725 }
726 //
727 // BIT30 to BIT38 is Page Directory Pointer Table index
728 //
729 EndBit = 30;
730 PageAttribute |= (UINTN)IA32_PG_PS;
731 break;
732 default:
733 ASSERT (FALSE);
734 }
735
736 //
737 // If execute-disable is enabled, set NX bit
738 //
739 if (mXdEnabled) {
740 PageAttribute |= IA32_PG_NX;
741 }
742
743 for (Index = 0; Index < NumOfPages; Index++) {
744 PageTable = Pml4;
745 UpperEntry = NULL;
746 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
747 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
748 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
749 //
750 // If the entry is not present, allocate one page from page pool for it
751 //
752 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
753 } else {
754 //
755 // Save the upper entry address
756 //
757 UpperEntry = PageTable + PTIndex;
758 }
759 //
760 // BIT9 to BIT11 of entry is used to save access record,
761 // initialize value is 7
762 //
763 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
764 SetAccNum (PageTable + PTIndex, 7);
765 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
766 }
767
768 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
769 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
770 //
771 // Check if the entry has already existed, this issue may occur when the different
772 // size page entries created under the same entry
773 //
774 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
775 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
776 ASSERT (FALSE);
777 }
778 //
779 // Fill the new entry
780 //
781 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
782 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
783 if (UpperEntry != NULL) {
784 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
785 }
786 //
787 // Get the next page address if we need to create more page tables
788 //
789 PFAddress += (1ull << EndBit);
790 }
791 }
792
793 /**
794 ThePage Fault handler wrapper for SMM use.
795
796 @param InterruptType Defines the type of interrupt or exception that
797 occurred on the processor.This parameter is processor architecture specific.
798 @param SystemContext A pointer to the processor context when
799 the interrupt occurred on the processor.
800 **/
801 VOID
802 EFIAPI
803 SmiPFHandler (
804 IN EFI_EXCEPTION_TYPE InterruptType,
805 IN EFI_SYSTEM_CONTEXT SystemContext
806 )
807 {
808 UINTN PFAddress;
809 UINTN GuardPageAddress;
810 UINTN CpuIndex;
811
812 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
813
814 AcquireSpinLock (mPFLock);
815
816 PFAddress = AsmReadCr2 ();
817
818 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
819 DumpCpuContext (InterruptType, SystemContext);
820 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
821 CpuDeadLoop ();
822 }
823
824 //
825 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
826 // or SMM page protection violation.
827 //
828 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
829 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
830 DumpCpuContext (InterruptType, SystemContext);
831 CpuIndex = GetCpuIndex ();
832 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
833 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
834 (PFAddress >= GuardPageAddress) &&
835 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
836 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
837 } else {
838 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
839 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
840 DEBUG_CODE (
841 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
842 );
843 } else {
844 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
845 DEBUG_CODE (
846 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
847 );
848 }
849 }
850 CpuDeadLoop ();
851 }
852
853 //
854 // If a page fault occurs in non-SMRAM range.
855 //
856 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
857 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
858 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
859 DumpCpuContext (InterruptType, SystemContext);
860 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
861 DEBUG_CODE (
862 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
863 );
864 CpuDeadLoop ();
865 }
866 if (IsSmmCommBufferForbiddenAddress (PFAddress)) {
867 DumpCpuContext (InterruptType, SystemContext);
868 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
869 DEBUG_CODE (
870 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
871 );
872 CpuDeadLoop ();
873 }
874 }
875
876 //
877 // If NULL pointer was just accessed
878 //
879 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
880 (PFAddress < EFI_PAGE_SIZE)) {
881 DumpCpuContext (InterruptType, SystemContext);
882 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
883 DEBUG_CODE (
884 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
885 );
886 CpuDeadLoop ();
887 }
888
889 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
890 SmmProfilePFHandler (
891 SystemContext.SystemContextX64->Rip,
892 SystemContext.SystemContextX64->ExceptionData
893 );
894 } else {
895 SmiDefaultPFHandler ();
896 }
897
898 ReleaseSpinLock (mPFLock);
899 }
900
901 /**
902 This function sets memory attribute for page table.
903 **/
904 VOID
905 SetPageTableAttributes (
906 VOID
907 )
908 {
909 UINTN Index2;
910 UINTN Index3;
911 UINTN Index4;
912 UINT64 *L1PageTable;
913 UINT64 *L2PageTable;
914 UINT64 *L3PageTable;
915 UINT64 *L4PageTable;
916 BOOLEAN IsSplitted;
917 BOOLEAN PageTableSplitted;
918
919 //
920 // Don't do this if
921 // - no static page table; or
922 // - SMM heap guard feature enabled; or
923 // BIT2: SMM page guard enabled
924 // BIT3: SMM pool guard enabled
925 // - SMM profile feature enabled
926 //
927 if (!mCpuSmmStaticPageTable ||
928 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
929 FeaturePcdGet (PcdCpuSmmProfileEnable)) {
930 //
931 // Static paging and heap guard could not be enabled at the same time.
932 //
933 ASSERT (!(mCpuSmmStaticPageTable &&
934 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
935
936 //
937 // Static paging and SMM profile could not be enabled at the same time.
938 //
939 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));
940 return ;
941 }
942
943 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
944
945 //
946 // Disable write protection, because we need mark page table to be write protected.
947 // We need *write* page table memory, to mark itself to be *read only*.
948 //
949 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
950
951 do {
952 DEBUG ((DEBUG_INFO, "Start...\n"));
953 PageTableSplitted = FALSE;
954
955 L4PageTable = (UINT64 *)GetPageTableBase ();
956 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
957 PageTableSplitted = (PageTableSplitted || IsSplitted);
958
959 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
960 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
961 if (L3PageTable == NULL) {
962 continue;
963 }
964
965 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
966 PageTableSplitted = (PageTableSplitted || IsSplitted);
967
968 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
969 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
970 // 1G
971 continue;
972 }
973 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
974 if (L2PageTable == NULL) {
975 continue;
976 }
977
978 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
979 PageTableSplitted = (PageTableSplitted || IsSplitted);
980
981 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
982 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
983 // 2M
984 continue;
985 }
986 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
987 if (L1PageTable == NULL) {
988 continue;
989 }
990 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
991 PageTableSplitted = (PageTableSplitted || IsSplitted);
992 }
993 }
994 }
995 } while (PageTableSplitted);
996
997 //
998 // Enable write protection, after page table updated.
999 //
1000 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1001
1002 return ;
1003 }