]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpu: Correct exception message.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 #define PAGE_TABLE_PAGES 8
18 #define ACC_MAX_BIT BIT3
19 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
20 BOOLEAN m1GPageTableSupport = FALSE;
21 UINT8 mPhysicalAddressBits;
22 BOOLEAN mCpuSmmStaticPageTable;
23
24 /**
25 Check if 1-GByte pages is supported by processor or not.
26
27 @retval TRUE 1-GByte pages is supported.
28 @retval FALSE 1-GByte pages is not supported.
29
30 **/
31 BOOLEAN
32 Is1GPageSupport (
33 VOID
34 )
35 {
36 UINT32 RegEax;
37 UINT32 RegEdx;
38
39 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
40 if (RegEax >= 0x80000001) {
41 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
42 if ((RegEdx & BIT26) != 0) {
43 return TRUE;
44 }
45 }
46 return FALSE;
47 }
48
49 /**
50 Set sub-entries number in entry.
51
52 @param[in, out] Entry Pointer to entry
53 @param[in] SubEntryNum Sub-entries number based on 0:
54 0 means there is 1 sub-entry under this entry
55 0x1ff means there is 512 sub-entries under this entry
56
57 **/
58 VOID
59 SetSubEntriesNum (
60 IN OUT UINT64 *Entry,
61 IN UINT64 SubEntryNum
62 )
63 {
64 //
65 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
66 //
67 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
68 }
69
70 /**
71 Return sub-entries number in entry.
72
73 @param[in] Entry Pointer to entry
74
75 @return Sub-entries number based on 0:
76 0 means there is 1 sub-entry under this entry
77 0x1ff means there is 512 sub-entries under this entry
78 **/
79 UINT64
80 GetSubEntriesNum (
81 IN UINT64 *Entry
82 )
83 {
84 //
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
86 //
87 return BitFieldRead64 (*Entry, 52, 60);
88 }
89
90 /**
91 Calculate the maximum support address.
92
93 @return the maximum support address.
94 **/
95 UINT8
96 CalculateMaximumSupportAddress (
97 VOID
98 )
99 {
100 UINT32 RegEax;
101 UINT8 PhysicalAddressBits;
102 VOID *Hob;
103
104 //
105 // Get physical address bits supported.
106 //
107 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
108 if (Hob != NULL) {
109 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
110 } else {
111 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
112 if (RegEax >= 0x80000008) {
113 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
114 PhysicalAddressBits = (UINT8) RegEax;
115 } else {
116 PhysicalAddressBits = 36;
117 }
118 }
119
120 //
121 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
122 //
123 ASSERT (PhysicalAddressBits <= 52);
124 if (PhysicalAddressBits > 48) {
125 PhysicalAddressBits = 48;
126 }
127 return PhysicalAddressBits;
128 }
129
130 /**
131 Set static page table.
132
133 @param[in] PageTable Address of page table.
134 **/
135 VOID
136 SetStaticPageTable (
137 IN UINTN PageTable
138 )
139 {
140 UINT64 PageAddress;
141 UINTN NumberOfPml4EntriesNeeded;
142 UINTN NumberOfPdpEntriesNeeded;
143 UINTN IndexOfPml4Entries;
144 UINTN IndexOfPdpEntries;
145 UINTN IndexOfPageDirectoryEntries;
146 UINT64 *PageMapLevel4Entry;
147 UINT64 *PageMap;
148 UINT64 *PageDirectoryPointerEntry;
149 UINT64 *PageDirectory1GEntry;
150 UINT64 *PageDirectoryEntry;
151
152 if (mPhysicalAddressBits <= 39 ) {
153 NumberOfPml4EntriesNeeded = 1;
154 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
155 } else {
156 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
157 NumberOfPdpEntriesNeeded = 512;
158 }
159
160 //
161 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
162 //
163 PageMap = (VOID *) PageTable;
164
165 PageMapLevel4Entry = PageMap;
166 PageAddress = 0;
167 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
168 //
169 // Each PML4 entry points to a page of Page Directory Pointer entries.
170 //
171 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask);
172 if (PageDirectoryPointerEntry == NULL) {
173 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
174 ASSERT(PageDirectoryPointerEntry != NULL);
175 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
176
177 *PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask) | PAGE_ATTRIBUTE_BITS;
178 }
179
180 if (m1GPageTableSupport) {
181 PageDirectory1GEntry = PageDirectoryPointerEntry;
182 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
183 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
184 //
185 // Skip the < 4G entries
186 //
187 continue;
188 }
189 //
190 // Fill in the Page Directory entries
191 //
192 *PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
193 }
194 } else {
195 PageAddress = BASE_4GB;
196 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
197 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
198 //
199 // Skip the < 4G entries
200 //
201 continue;
202 }
203 //
204 // Each Directory Pointer entries points to a page of Page Directory entires.
205 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
206 //
207 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask);
208 if (PageDirectoryEntry == NULL) {
209 PageDirectoryEntry = AllocatePageTableMemory (1);
210 ASSERT(PageDirectoryEntry != NULL);
211 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
212
213 //
214 // Fill in a Page Directory Pointer Entries
215 //
216 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS;
217 }
218
219 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
220 //
221 // Fill in the Page Directory entries
222 //
223 *PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
224 }
225 }
226 }
227 }
228 }
229
230 /**
231 Create PageTable for SMM use.
232
233 @return The address of PML4 (to set CR3).
234
235 **/
236 UINT32
237 SmmInitPageTable (
238 VOID
239 )
240 {
241 EFI_PHYSICAL_ADDRESS Pages;
242 UINT64 *PTEntry;
243 LIST_ENTRY *FreePage;
244 UINTN Index;
245 UINTN PageFaultHandlerHookAddress;
246 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
247 EFI_STATUS Status;
248
249 //
250 // Initialize spin lock
251 //
252 InitializeSpinLock (mPFLock);
253
254 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
255 m1GPageTableSupport = Is1GPageSupport ();
256 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
257 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
258
259 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
260 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
261 //
262 // Generate PAE page table for the first 4GB memory space
263 //
264 Pages = Gen4GPageTable (FALSE);
265
266 //
267 // Set IA32_PG_PMNT bit to mask this entry
268 //
269 PTEntry = (UINT64*)(UINTN)Pages;
270 for (Index = 0; Index < 4; Index++) {
271 PTEntry[Index] |= IA32_PG_PMNT;
272 }
273
274 //
275 // Fill Page-Table-Level4 (PML4) entry
276 //
277 PTEntry = (UINT64*)AllocatePageTableMemory (1);
278 ASSERT (PTEntry != NULL);
279 *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
280 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
281
282 //
283 // Set sub-entries number
284 //
285 SetSubEntriesNum (PTEntry, 3);
286
287 if (mCpuSmmStaticPageTable) {
288 SetStaticPageTable ((UINTN)PTEntry);
289 } else {
290 //
291 // Add pages to page pool
292 //
293 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
294 ASSERT (FreePage != NULL);
295 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
296 InsertTailList (&mPagePool, FreePage);
297 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
298 }
299 }
300
301 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
302 //
303 // Set own Page Fault entry instead of the default one, because SMM Profile
304 // feature depends on IRET instruction to do Single Step
305 //
306 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
307 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
308 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
309 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
310 IdtEntry->Bits.Reserved_0 = 0;
311 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
312 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
313 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
314 IdtEntry->Bits.Reserved_1 = 0;
315 } else {
316 //
317 // Register Smm Page Fault Handler
318 //
319 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
320 ASSERT_EFI_ERROR (Status);
321 }
322
323 //
324 // Additional SMM IDT initialization for SMM stack guard
325 //
326 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
327 InitializeIDTSmmStackGuard ();
328 }
329
330 //
331 // Return the address of PML4 (to set CR3)
332 //
333 return (UINT32)(UINTN)PTEntry;
334 }
335
336 /**
337 Set access record in entry.
338
339 @param[in, out] Entry Pointer to entry
340 @param[in] Acc Access record value
341
342 **/
343 VOID
344 SetAccNum (
345 IN OUT UINT64 *Entry,
346 IN UINT64 Acc
347 )
348 {
349 //
350 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
351 //
352 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
353 }
354
355 /**
356 Return access record in entry.
357
358 @param[in] Entry Pointer to entry
359
360 @return Access record value.
361
362 **/
363 UINT64
364 GetAccNum (
365 IN UINT64 *Entry
366 )
367 {
368 //
369 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
370 //
371 return BitFieldRead64 (*Entry, 9, 11);
372 }
373
374 /**
375 Return and update the access record in entry.
376
377 @param[in, out] Entry Pointer to entry
378
379 @return Access record value.
380
381 **/
382 UINT64
383 GetAndUpdateAccNum (
384 IN OUT UINT64 *Entry
385 )
386 {
387 UINT64 Acc;
388
389 Acc = GetAccNum (Entry);
390 if ((*Entry & IA32_PG_A) != 0) {
391 //
392 // If this entry has been accessed, clear access flag in Entry and update access record
393 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
394 //
395 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
396 SetAccNum (Entry, 0x7);
397 return (0x7 + ACC_MAX_BIT);
398 } else {
399 if (Acc != 0) {
400 //
401 // If the access record is not the smallest value 0, minus 1 and update the access record field
402 //
403 SetAccNum (Entry, Acc - 1);
404 }
405 }
406 return Acc;
407 }
408
409 /**
410 Reclaim free pages for PageFault handler.
411
412 Search the whole entries tree to find the leaf entry that has the smallest
413 access record value. Insert the page pointed by this leaf entry into the
414 page pool. And check its upper entries if need to be inserted into the page
415 pool or not.
416
417 **/
418 VOID
419 ReclaimPages (
420 VOID
421 )
422 {
423 UINT64 *Pml4;
424 UINT64 *Pdpt;
425 UINT64 *Pdt;
426 UINTN Pml4Index;
427 UINTN PdptIndex;
428 UINTN PdtIndex;
429 UINTN MinPml4;
430 UINTN MinPdpt;
431 UINTN MinPdt;
432 UINT64 MinAcc;
433 UINT64 Acc;
434 UINT64 SubEntriesNum;
435 BOOLEAN PML4EIgnore;
436 BOOLEAN PDPTEIgnore;
437 UINT64 *ReleasePageAddress;
438
439 Pml4 = NULL;
440 Pdpt = NULL;
441 Pdt = NULL;
442 MinAcc = (UINT64)-1;
443 MinPml4 = (UINTN)-1;
444 MinPdpt = (UINTN)-1;
445 MinPdt = (UINTN)-1;
446 Acc = 0;
447 ReleasePageAddress = 0;
448
449 //
450 // First, find the leaf entry has the smallest access record value
451 //
452 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
453 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
454 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
455 //
456 // If the PML4 entry is not present or is masked, skip it
457 //
458 continue;
459 }
460 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);
461 PML4EIgnore = FALSE;
462 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
463 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
464 //
465 // If the PDPT entry is not present or is masked, skip it
466 //
467 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
468 //
469 // If the PDPT entry is masked, we will ignore checking the PML4 entry
470 //
471 PML4EIgnore = TRUE;
472 }
473 continue;
474 }
475 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
476 //
477 // It's not 1-GByte pages entry, it should be a PDPT entry,
478 // we will not check PML4 entry more
479 //
480 PML4EIgnore = TRUE;
481 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);
482 PDPTEIgnore = FALSE;
483 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
484 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
485 //
486 // If the PD entry is not present or is masked, skip it
487 //
488 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
489 //
490 // If the PD entry is masked, we will not PDPT entry more
491 //
492 PDPTEIgnore = TRUE;
493 }
494 continue;
495 }
496 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
497 //
498 // It's not 2 MByte page table entry, it should be PD entry
499 // we will find the entry has the smallest access record value
500 //
501 PDPTEIgnore = TRUE;
502 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
503 if (Acc < MinAcc) {
504 //
505 // If the PD entry has the smallest access record value,
506 // save the Page address to be released
507 //
508 MinAcc = Acc;
509 MinPml4 = Pml4Index;
510 MinPdpt = PdptIndex;
511 MinPdt = PdtIndex;
512 ReleasePageAddress = Pdt + PdtIndex;
513 }
514 }
515 }
516 if (!PDPTEIgnore) {
517 //
518 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
519 // it should only has the entries point to 2 MByte Pages
520 //
521 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
522 if (Acc < MinAcc) {
523 //
524 // If the PDPT entry has the smallest access record value,
525 // save the Page address to be released
526 //
527 MinAcc = Acc;
528 MinPml4 = Pml4Index;
529 MinPdpt = PdptIndex;
530 MinPdt = (UINTN)-1;
531 ReleasePageAddress = Pdpt + PdptIndex;
532 }
533 }
534 }
535 }
536 if (!PML4EIgnore) {
537 //
538 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
539 // it should only has the entries point to 1 GByte Pages
540 //
541 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
542 if (Acc < MinAcc) {
543 //
544 // If the PML4 entry has the smallest access record value,
545 // save the Page address to be released
546 //
547 MinAcc = Acc;
548 MinPml4 = Pml4Index;
549 MinPdpt = (UINTN)-1;
550 MinPdt = (UINTN)-1;
551 ReleasePageAddress = Pml4 + Pml4Index;
552 }
553 }
554 }
555 //
556 // Make sure one PML4/PDPT/PD entry is selected
557 //
558 ASSERT (MinAcc != (UINT64)-1);
559
560 //
561 // Secondly, insert the page pointed by this entry into page pool and clear this entry
562 //
563 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));
564 *ReleasePageAddress = 0;
565
566 //
567 // Lastly, check this entry's upper entries if need to be inserted into page pool
568 // or not
569 //
570 while (TRUE) {
571 if (MinPdt != (UINTN)-1) {
572 //
573 // If 4 KByte Page Table is released, check the PDPT entry
574 //
575 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);
576 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
577 if (SubEntriesNum == 0) {
578 //
579 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
580 // clear the Page directory entry
581 //
582 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));
583 Pdpt[MinPdpt] = 0;
584 //
585 // Go on checking the PML4 table
586 //
587 MinPdt = (UINTN)-1;
588 continue;
589 }
590 //
591 // Update the sub-entries filed in PDPT entry and exit
592 //
593 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
594 break;
595 }
596 if (MinPdpt != (UINTN)-1) {
597 //
598 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
599 //
600 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
601 if (SubEntriesNum == 0) {
602 //
603 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
604 // clear the Page directory entry
605 //
606 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));
607 Pml4[MinPml4] = 0;
608 MinPdpt = (UINTN)-1;
609 continue;
610 }
611 //
612 // Update the sub-entries filed in PML4 entry and exit
613 //
614 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
615 break;
616 }
617 //
618 // PLM4 table has been released before, exit it
619 //
620 break;
621 }
622 }
623
624 /**
625 Allocate free Page for PageFault handler use.
626
627 @return Page address.
628
629 **/
630 UINT64
631 AllocPage (
632 VOID
633 )
634 {
635 UINT64 RetVal;
636
637 if (IsListEmpty (&mPagePool)) {
638 //
639 // If page pool is empty, reclaim the used pages and insert one into page pool
640 //
641 ReclaimPages ();
642 }
643
644 //
645 // Get one free page and remove it from page pool
646 //
647 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
648 RemoveEntryList (mPagePool.ForwardLink);
649 //
650 // Clean this page and return
651 //
652 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
653 return RetVal;
654 }
655
656 /**
657 Page Fault handler for SMM use.
658
659 **/
660 VOID
661 SmiDefaultPFHandler (
662 VOID
663 )
664 {
665 UINT64 *PageTable;
666 UINT64 *Pml4;
667 UINT64 PFAddress;
668 UINTN StartBit;
669 UINTN EndBit;
670 UINT64 PTIndex;
671 UINTN Index;
672 SMM_PAGE_SIZE_TYPE PageSize;
673 UINTN NumOfPages;
674 UINTN PageAttribute;
675 EFI_STATUS Status;
676 UINT64 *UpperEntry;
677
678 //
679 // Set default SMM page attribute
680 //
681 PageSize = SmmPageSize2M;
682 NumOfPages = 1;
683 PageAttribute = 0;
684
685 EndBit = 0;
686 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
687 PFAddress = AsmReadCr2 ();
688
689 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
690 //
691 // If platform not support page table attribute, set default SMM page attribute
692 //
693 if (Status != EFI_SUCCESS) {
694 PageSize = SmmPageSize2M;
695 NumOfPages = 1;
696 PageAttribute = 0;
697 }
698 if (PageSize >= MaxSmmPageSizeType) {
699 PageSize = SmmPageSize2M;
700 }
701 if (NumOfPages > 512) {
702 NumOfPages = 512;
703 }
704
705 switch (PageSize) {
706 case SmmPageSize4K:
707 //
708 // BIT12 to BIT20 is Page Table index
709 //
710 EndBit = 12;
711 break;
712 case SmmPageSize2M:
713 //
714 // BIT21 to BIT29 is Page Directory index
715 //
716 EndBit = 21;
717 PageAttribute |= (UINTN)IA32_PG_PS;
718 break;
719 case SmmPageSize1G:
720 if (!m1GPageTableSupport) {
721 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
722 ASSERT (FALSE);
723 }
724 //
725 // BIT30 to BIT38 is Page Directory Pointer Table index
726 //
727 EndBit = 30;
728 PageAttribute |= (UINTN)IA32_PG_PS;
729 break;
730 default:
731 ASSERT (FALSE);
732 }
733
734 //
735 // If execute-disable is enabled, set NX bit
736 //
737 if (mXdEnabled) {
738 PageAttribute |= IA32_PG_NX;
739 }
740
741 for (Index = 0; Index < NumOfPages; Index++) {
742 PageTable = Pml4;
743 UpperEntry = NULL;
744 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
745 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
746 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
747 //
748 // If the entry is not present, allocate one page from page pool for it
749 //
750 PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS;
751 } else {
752 //
753 // Save the upper entry address
754 //
755 UpperEntry = PageTable + PTIndex;
756 }
757 //
758 // BIT9 to BIT11 of entry is used to save access record,
759 // initialize value is 7
760 //
761 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
762 SetAccNum (PageTable + PTIndex, 7);
763 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
764 }
765
766 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
767 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
768 //
769 // Check if the entry has already existed, this issue may occur when the different
770 // size page entries created under the same entry
771 //
772 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
773 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
774 ASSERT (FALSE);
775 }
776 //
777 // Fill the new entry
778 //
779 PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |
780 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
781 if (UpperEntry != NULL) {
782 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
783 }
784 //
785 // Get the next page address if we need to create more page tables
786 //
787 PFAddress += (1ull << EndBit);
788 }
789 }
790
791 /**
792 ThePage Fault handler wrapper for SMM use.
793
794 @param InterruptType Defines the type of interrupt or exception that
795 occurred on the processor.This parameter is processor architecture specific.
796 @param SystemContext A pointer to the processor context when
797 the interrupt occurred on the processor.
798 **/
799 VOID
800 EFIAPI
801 SmiPFHandler (
802 IN EFI_EXCEPTION_TYPE InterruptType,
803 IN EFI_SYSTEM_CONTEXT SystemContext
804 )
805 {
806 UINTN PFAddress;
807 UINTN GuardPageAddress;
808 UINTN CpuIndex;
809
810 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
811
812 AcquireSpinLock (mPFLock);
813
814 PFAddress = AsmReadCr2 ();
815
816 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
817 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
818 CpuDeadLoop ();
819 }
820
821 //
822 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
823 // or SMM page protection violation.
824 //
825 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
826 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
827 CpuIndex = GetCpuIndex ();
828 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
829 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
830 (PFAddress >= GuardPageAddress) &&
831 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
832 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
833 } else {
834 DEBUG ((DEBUG_ERROR, "SMM exception data - 0x%lx(", SystemContext.SystemContextX64->ExceptionData));
835 DEBUG ((DEBUG_ERROR, "I:%x, R:%x, U:%x, W:%x, P:%x",
836 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0,
837 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_RSVD) != 0,
838 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_US) != 0,
839 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_WR) != 0,
840 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_P) != 0
841 ));
842 DEBUG ((DEBUG_ERROR, ")\n"));
843 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
844 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
845 DEBUG_CODE (
846 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
847 );
848 } else {
849 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
850 DEBUG_CODE (
851 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
852 );
853 }
854 }
855 CpuDeadLoop ();
856 }
857
858 //
859 // If a page fault occurs in SMM range
860 //
861 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
862 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
863 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
864 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
865 DEBUG_CODE (
866 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
867 );
868 CpuDeadLoop ();
869 }
870 }
871
872 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
873 SmmProfilePFHandler (
874 SystemContext.SystemContextX64->Rip,
875 SystemContext.SystemContextX64->ExceptionData
876 );
877 } else {
878 SmiDefaultPFHandler ();
879 }
880
881 ReleaseSpinLock (mPFLock);
882 }
883
884 /**
885 This function sets memory attribute for page table.
886 **/
887 VOID
888 SetPageTableAttributes (
889 VOID
890 )
891 {
892 UINTN Index2;
893 UINTN Index3;
894 UINTN Index4;
895 UINT64 *L1PageTable;
896 UINT64 *L2PageTable;
897 UINT64 *L3PageTable;
898 UINT64 *L4PageTable;
899 BOOLEAN IsSplitted;
900 BOOLEAN PageTableSplitted;
901
902 if (!mCpuSmmStaticPageTable) {
903 return ;
904 }
905
906 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
907
908 //
909 // Disable write protection, because we need mark page table to be write protected.
910 // We need *write* page table memory, to mark itself to be *read only*.
911 //
912 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
913
914 do {
915 DEBUG ((DEBUG_INFO, "Start...\n"));
916 PageTableSplitted = FALSE;
917
918 L4PageTable = (UINT64 *)GetPageTableBase ();
919 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
920 PageTableSplitted = (PageTableSplitted || IsSplitted);
921
922 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
923 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
924 if (L3PageTable == NULL) {
925 continue;
926 }
927
928 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
929 PageTableSplitted = (PageTableSplitted || IsSplitted);
930
931 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
932 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
933 // 1G
934 continue;
935 }
936 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
937 if (L2PageTable == NULL) {
938 continue;
939 }
940
941 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
942 PageTableSplitted = (PageTableSplitted || IsSplitted);
943
944 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
945 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
946 // 2M
947 continue;
948 }
949 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
950 if (L1PageTable == NULL) {
951 continue;
952 }
953 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
954 PageTableSplitted = (PageTableSplitted || IsSplitted);
955 }
956 }
957 }
958 } while (PageTableSplitted);
959
960 //
961 // Enable write protection, after page table updated.
962 //
963 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
964
965 return ;
966 }