]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 #define PAGE_TABLE_PAGES 8
18 #define ACC_MAX_BIT BIT3
19 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
20 BOOLEAN m1GPageTableSupport = FALSE;
21 UINT8 mPhysicalAddressBits;
22 BOOLEAN mCpuSmmStaticPageTable;
23
24 /**
25 Check if 1-GByte pages is supported by processor or not.
26
27 @retval TRUE 1-GByte pages is supported.
28 @retval FALSE 1-GByte pages is not supported.
29
30 **/
31 BOOLEAN
32 Is1GPageSupport (
33 VOID
34 )
35 {
36 UINT32 RegEax;
37 UINT32 RegEdx;
38
39 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
40 if (RegEax >= 0x80000001) {
41 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
42 if ((RegEdx & BIT26) != 0) {
43 return TRUE;
44 }
45 }
46 return FALSE;
47 }
48
49 /**
50 Set sub-entries number in entry.
51
52 @param[in, out] Entry Pointer to entry
53 @param[in] SubEntryNum Sub-entries number based on 0:
54 0 means there is 1 sub-entry under this entry
55 0x1ff means there is 512 sub-entries under this entry
56
57 **/
58 VOID
59 SetSubEntriesNum (
60 IN OUT UINT64 *Entry,
61 IN UINT64 SubEntryNum
62 )
63 {
64 //
65 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
66 //
67 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
68 }
69
70 /**
71 Return sub-entries number in entry.
72
73 @param[in] Entry Pointer to entry
74
75 @return Sub-entries number based on 0:
76 0 means there is 1 sub-entry under this entry
77 0x1ff means there is 512 sub-entries under this entry
78 **/
79 UINT64
80 GetSubEntriesNum (
81 IN UINT64 *Entry
82 )
83 {
84 //
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
86 //
87 return BitFieldRead64 (*Entry, 52, 60);
88 }
89
90 /**
91 Calculate the maximum support address.
92
93 @return the maximum support address.
94 **/
95 UINT8
96 CalculateMaximumSupportAddress (
97 VOID
98 )
99 {
100 UINT32 RegEax;
101 UINT8 PhysicalAddressBits;
102 VOID *Hob;
103
104 //
105 // Get physical address bits supported.
106 //
107 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
108 if (Hob != NULL) {
109 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
110 } else {
111 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
112 if (RegEax >= 0x80000008) {
113 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
114 PhysicalAddressBits = (UINT8) RegEax;
115 } else {
116 PhysicalAddressBits = 36;
117 }
118 }
119
120 //
121 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
122 //
123 ASSERT (PhysicalAddressBits <= 52);
124 if (PhysicalAddressBits > 48) {
125 PhysicalAddressBits = 48;
126 }
127 return PhysicalAddressBits;
128 }
129
130 /**
131 Set static page table.
132
133 @param[in] PageTable Address of page table.
134 **/
135 VOID
136 SetStaticPageTable (
137 IN UINTN PageTable
138 )
139 {
140 UINT64 PageAddress;
141 UINTN NumberOfPml4EntriesNeeded;
142 UINTN NumberOfPdpEntriesNeeded;
143 UINTN IndexOfPml4Entries;
144 UINTN IndexOfPdpEntries;
145 UINTN IndexOfPageDirectoryEntries;
146 UINT64 *PageMapLevel4Entry;
147 UINT64 *PageMap;
148 UINT64 *PageDirectoryPointerEntry;
149 UINT64 *PageDirectory1GEntry;
150 UINT64 *PageDirectoryEntry;
151
152 if (mPhysicalAddressBits <= 39 ) {
153 NumberOfPml4EntriesNeeded = 1;
154 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
155 } else {
156 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
157 NumberOfPdpEntriesNeeded = 512;
158 }
159
160 //
161 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
162 //
163 PageMap = (VOID *) PageTable;
164
165 PageMapLevel4Entry = PageMap;
166 PageAddress = 0;
167 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
168 //
169 // Each PML4 entry points to a page of Page Directory Pointer entries.
170 //
171 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask);
172 if (PageDirectoryPointerEntry == NULL) {
173 PageDirectoryPointerEntry = AllocatePageTableMemory (1);
174 ASSERT(PageDirectoryPointerEntry != NULL);
175 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
176
177 *PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask) | PAGE_ATTRIBUTE_BITS;
178 }
179
180 if (m1GPageTableSupport) {
181 PageDirectory1GEntry = PageDirectoryPointerEntry;
182 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
183 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
184 //
185 // Skip the < 4G entries
186 //
187 continue;
188 }
189 //
190 // Fill in the Page Directory entries
191 //
192 *PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
193 }
194 } else {
195 PageAddress = BASE_4GB;
196 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
197 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
198 //
199 // Skip the < 4G entries
200 //
201 continue;
202 }
203 //
204 // Each Directory Pointer entries points to a page of Page Directory entires.
205 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
206 //
207 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask);
208 if (PageDirectoryEntry == NULL) {
209 PageDirectoryEntry = AllocatePageTableMemory (1);
210 ASSERT(PageDirectoryEntry != NULL);
211 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
212
213 //
214 // Fill in a Page Directory Pointer Entries
215 //
216 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS;
217 }
218
219 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
220 //
221 // Fill in the Page Directory entries
222 //
223 *PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
224 }
225 }
226 }
227 }
228 }
229
230 /**
231 Create PageTable for SMM use.
232
233 @return The address of PML4 (to set CR3).
234
235 **/
236 UINT32
237 SmmInitPageTable (
238 VOID
239 )
240 {
241 EFI_PHYSICAL_ADDRESS Pages;
242 UINT64 *PTEntry;
243 LIST_ENTRY *FreePage;
244 UINTN Index;
245 UINTN PageFaultHandlerHookAddress;
246 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
247
248 //
249 // Initialize spin lock
250 //
251 InitializeSpinLock (mPFLock);
252
253 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
254 m1GPageTableSupport = Is1GPageSupport ();
255 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
256 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
257
258 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
259 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
260 //
261 // Generate PAE page table for the first 4GB memory space
262 //
263 Pages = Gen4GPageTable (FALSE);
264
265 //
266 // Set IA32_PG_PMNT bit to mask this entry
267 //
268 PTEntry = (UINT64*)(UINTN)Pages;
269 for (Index = 0; Index < 4; Index++) {
270 PTEntry[Index] |= IA32_PG_PMNT;
271 }
272
273 //
274 // Fill Page-Table-Level4 (PML4) entry
275 //
276 PTEntry = (UINT64*)AllocatePageTableMemory (1);
277 ASSERT (PTEntry != NULL);
278 *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
279 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
280
281 //
282 // Set sub-entries number
283 //
284 SetSubEntriesNum (PTEntry, 3);
285
286 if (mCpuSmmStaticPageTable) {
287 SetStaticPageTable ((UINTN)PTEntry);
288 } else {
289 //
290 // Add pages to page pool
291 //
292 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
293 ASSERT (FreePage != NULL);
294 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
295 InsertTailList (&mPagePool, FreePage);
296 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
297 }
298 }
299
300 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
301 //
302 // Set own Page Fault entry instead of the default one, because SMM Profile
303 // feature depends on IRET instruction to do Single Step
304 //
305 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
306 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
307 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
308 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
309 IdtEntry->Bits.Reserved_0 = 0;
310 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
311 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
312 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
313 IdtEntry->Bits.Reserved_1 = 0;
314 } else {
315 //
316 // Register Smm Page Fault Handler
317 //
318 SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
319 }
320
321 //
322 // Additional SMM IDT initialization for SMM stack guard
323 //
324 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
325 InitializeIDTSmmStackGuard ();
326 }
327
328 //
329 // Return the address of PML4 (to set CR3)
330 //
331 return (UINT32)(UINTN)PTEntry;
332 }
333
334 /**
335 Set access record in entry.
336
337 @param[in, out] Entry Pointer to entry
338 @param[in] Acc Access record value
339
340 **/
341 VOID
342 SetAccNum (
343 IN OUT UINT64 *Entry,
344 IN UINT64 Acc
345 )
346 {
347 //
348 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
349 //
350 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
351 }
352
353 /**
354 Return access record in entry.
355
356 @param[in] Entry Pointer to entry
357
358 @return Access record value.
359
360 **/
361 UINT64
362 GetAccNum (
363 IN UINT64 *Entry
364 )
365 {
366 //
367 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
368 //
369 return BitFieldRead64 (*Entry, 9, 11);
370 }
371
372 /**
373 Return and update the access record in entry.
374
375 @param[in, out] Entry Pointer to entry
376
377 @return Access record value.
378
379 **/
380 UINT64
381 GetAndUpdateAccNum (
382 IN OUT UINT64 *Entry
383 )
384 {
385 UINT64 Acc;
386
387 Acc = GetAccNum (Entry);
388 if ((*Entry & IA32_PG_A) != 0) {
389 //
390 // If this entry has been accessed, clear access flag in Entry and update access record
391 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
392 //
393 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
394 SetAccNum (Entry, 0x7);
395 return (0x7 + ACC_MAX_BIT);
396 } else {
397 if (Acc != 0) {
398 //
399 // If the access record is not the smallest value 0, minus 1 and update the access record field
400 //
401 SetAccNum (Entry, Acc - 1);
402 }
403 }
404 return Acc;
405 }
406
407 /**
408 Reclaim free pages for PageFault handler.
409
410 Search the whole entries tree to find the leaf entry that has the smallest
411 access record value. Insert the page pointed by this leaf entry into the
412 page pool. And check its upper entries if need to be inserted into the page
413 pool or not.
414
415 **/
416 VOID
417 ReclaimPages (
418 VOID
419 )
420 {
421 UINT64 *Pml4;
422 UINT64 *Pdpt;
423 UINT64 *Pdt;
424 UINTN Pml4Index;
425 UINTN PdptIndex;
426 UINTN PdtIndex;
427 UINTN MinPml4;
428 UINTN MinPdpt;
429 UINTN MinPdt;
430 UINT64 MinAcc;
431 UINT64 Acc;
432 UINT64 SubEntriesNum;
433 BOOLEAN PML4EIgnore;
434 BOOLEAN PDPTEIgnore;
435 UINT64 *ReleasePageAddress;
436
437 Pml4 = NULL;
438 Pdpt = NULL;
439 Pdt = NULL;
440 MinAcc = (UINT64)-1;
441 MinPml4 = (UINTN)-1;
442 MinPdpt = (UINTN)-1;
443 MinPdt = (UINTN)-1;
444 Acc = 0;
445 ReleasePageAddress = 0;
446
447 //
448 // First, find the leaf entry has the smallest access record value
449 //
450 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
451 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
452 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
453 //
454 // If the PML4 entry is not present or is masked, skip it
455 //
456 continue;
457 }
458 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);
459 PML4EIgnore = FALSE;
460 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
461 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
462 //
463 // If the PDPT entry is not present or is masked, skip it
464 //
465 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
466 //
467 // If the PDPT entry is masked, we will ignore checking the PML4 entry
468 //
469 PML4EIgnore = TRUE;
470 }
471 continue;
472 }
473 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
474 //
475 // It's not 1-GByte pages entry, it should be a PDPT entry,
476 // we will not check PML4 entry more
477 //
478 PML4EIgnore = TRUE;
479 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);
480 PDPTEIgnore = FALSE;
481 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
482 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
483 //
484 // If the PD entry is not present or is masked, skip it
485 //
486 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
487 //
488 // If the PD entry is masked, we will not PDPT entry more
489 //
490 PDPTEIgnore = TRUE;
491 }
492 continue;
493 }
494 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
495 //
496 // It's not 2 MByte page table entry, it should be PD entry
497 // we will find the entry has the smallest access record value
498 //
499 PDPTEIgnore = TRUE;
500 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
501 if (Acc < MinAcc) {
502 //
503 // If the PD entry has the smallest access record value,
504 // save the Page address to be released
505 //
506 MinAcc = Acc;
507 MinPml4 = Pml4Index;
508 MinPdpt = PdptIndex;
509 MinPdt = PdtIndex;
510 ReleasePageAddress = Pdt + PdtIndex;
511 }
512 }
513 }
514 if (!PDPTEIgnore) {
515 //
516 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
517 // it should only has the entries point to 2 MByte Pages
518 //
519 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
520 if (Acc < MinAcc) {
521 //
522 // If the PDPT entry has the smallest access record value,
523 // save the Page address to be released
524 //
525 MinAcc = Acc;
526 MinPml4 = Pml4Index;
527 MinPdpt = PdptIndex;
528 MinPdt = (UINTN)-1;
529 ReleasePageAddress = Pdpt + PdptIndex;
530 }
531 }
532 }
533 }
534 if (!PML4EIgnore) {
535 //
536 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
537 // it should only has the entries point to 1 GByte Pages
538 //
539 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
540 if (Acc < MinAcc) {
541 //
542 // If the PML4 entry has the smallest access record value,
543 // save the Page address to be released
544 //
545 MinAcc = Acc;
546 MinPml4 = Pml4Index;
547 MinPdpt = (UINTN)-1;
548 MinPdt = (UINTN)-1;
549 ReleasePageAddress = Pml4 + Pml4Index;
550 }
551 }
552 }
553 //
554 // Make sure one PML4/PDPT/PD entry is selected
555 //
556 ASSERT (MinAcc != (UINT64)-1);
557
558 //
559 // Secondly, insert the page pointed by this entry into page pool and clear this entry
560 //
561 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));
562 *ReleasePageAddress = 0;
563
564 //
565 // Lastly, check this entry's upper entries if need to be inserted into page pool
566 // or not
567 //
568 while (TRUE) {
569 if (MinPdt != (UINTN)-1) {
570 //
571 // If 4 KByte Page Table is released, check the PDPT entry
572 //
573 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);
574 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
575 if (SubEntriesNum == 0) {
576 //
577 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
578 // clear the Page directory entry
579 //
580 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));
581 Pdpt[MinPdpt] = 0;
582 //
583 // Go on checking the PML4 table
584 //
585 MinPdt = (UINTN)-1;
586 continue;
587 }
588 //
589 // Update the sub-entries filed in PDPT entry and exit
590 //
591 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
592 break;
593 }
594 if (MinPdpt != (UINTN)-1) {
595 //
596 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
597 //
598 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
599 if (SubEntriesNum == 0) {
600 //
601 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
602 // clear the Page directory entry
603 //
604 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));
605 Pml4[MinPml4] = 0;
606 MinPdpt = (UINTN)-1;
607 continue;
608 }
609 //
610 // Update the sub-entries filed in PML4 entry and exit
611 //
612 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
613 break;
614 }
615 //
616 // PLM4 table has been released before, exit it
617 //
618 break;
619 }
620 }
621
622 /**
623 Allocate free Page for PageFault handler use.
624
625 @return Page address.
626
627 **/
628 UINT64
629 AllocPage (
630 VOID
631 )
632 {
633 UINT64 RetVal;
634
635 if (IsListEmpty (&mPagePool)) {
636 //
637 // If page pool is empty, reclaim the used pages and insert one into page pool
638 //
639 ReclaimPages ();
640 }
641
642 //
643 // Get one free page and remove it from page pool
644 //
645 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
646 RemoveEntryList (mPagePool.ForwardLink);
647 //
648 // Clean this page and return
649 //
650 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
651 return RetVal;
652 }
653
654 /**
655 Page Fault handler for SMM use.
656
657 **/
658 VOID
659 SmiDefaultPFHandler (
660 VOID
661 )
662 {
663 UINT64 *PageTable;
664 UINT64 *Pml4;
665 UINT64 PFAddress;
666 UINTN StartBit;
667 UINTN EndBit;
668 UINT64 PTIndex;
669 UINTN Index;
670 SMM_PAGE_SIZE_TYPE PageSize;
671 UINTN NumOfPages;
672 UINTN PageAttribute;
673 EFI_STATUS Status;
674 UINT64 *UpperEntry;
675
676 //
677 // Set default SMM page attribute
678 //
679 PageSize = SmmPageSize2M;
680 NumOfPages = 1;
681 PageAttribute = 0;
682
683 EndBit = 0;
684 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
685 PFAddress = AsmReadCr2 ();
686
687 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
688 //
689 // If platform not support page table attribute, set default SMM page attribute
690 //
691 if (Status != EFI_SUCCESS) {
692 PageSize = SmmPageSize2M;
693 NumOfPages = 1;
694 PageAttribute = 0;
695 }
696 if (PageSize >= MaxSmmPageSizeType) {
697 PageSize = SmmPageSize2M;
698 }
699 if (NumOfPages > 512) {
700 NumOfPages = 512;
701 }
702
703 switch (PageSize) {
704 case SmmPageSize4K:
705 //
706 // BIT12 to BIT20 is Page Table index
707 //
708 EndBit = 12;
709 break;
710 case SmmPageSize2M:
711 //
712 // BIT21 to BIT29 is Page Directory index
713 //
714 EndBit = 21;
715 PageAttribute |= (UINTN)IA32_PG_PS;
716 break;
717 case SmmPageSize1G:
718 if (!m1GPageTableSupport) {
719 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
720 ASSERT (FALSE);
721 }
722 //
723 // BIT30 to BIT38 is Page Directory Pointer Table index
724 //
725 EndBit = 30;
726 PageAttribute |= (UINTN)IA32_PG_PS;
727 break;
728 default:
729 ASSERT (FALSE);
730 }
731
732 //
733 // If execute-disable is enabled, set NX bit
734 //
735 if (mXdEnabled) {
736 PageAttribute |= IA32_PG_NX;
737 }
738
739 for (Index = 0; Index < NumOfPages; Index++) {
740 PageTable = Pml4;
741 UpperEntry = NULL;
742 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
743 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
744 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
745 //
746 // If the entry is not present, allocate one page from page pool for it
747 //
748 PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS;
749 } else {
750 //
751 // Save the upper entry address
752 //
753 UpperEntry = PageTable + PTIndex;
754 }
755 //
756 // BIT9 to BIT11 of entry is used to save access record,
757 // initialize value is 7
758 //
759 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
760 SetAccNum (PageTable + PTIndex, 7);
761 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
762 }
763
764 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
765 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
766 //
767 // Check if the entry has already existed, this issue may occur when the different
768 // size page entries created under the same entry
769 //
770 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
771 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
772 ASSERT (FALSE);
773 }
774 //
775 // Fill the new entry
776 //
777 PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |
778 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
779 if (UpperEntry != NULL) {
780 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
781 }
782 //
783 // Get the next page address if we need to create more page tables
784 //
785 PFAddress += (1ull << EndBit);
786 }
787 }
788
789 /**
790 ThePage Fault handler wrapper for SMM use.
791
792 @param InterruptType Defines the type of interrupt or exception that
793 occurred on the processor.This parameter is processor architecture specific.
794 @param SystemContext A pointer to the processor context when
795 the interrupt occurred on the processor.
796 **/
797 VOID
798 EFIAPI
799 SmiPFHandler (
800 IN EFI_EXCEPTION_TYPE InterruptType,
801 IN EFI_SYSTEM_CONTEXT SystemContext
802 )
803 {
804 UINTN PFAddress;
805
806 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
807
808 AcquireSpinLock (mPFLock);
809
810 PFAddress = AsmReadCr2 ();
811
812 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
813 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
814 CpuDeadLoop ();
815 }
816
817 //
818 // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.
819 //
820 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
821 (PFAddress >= mCpuHotPlugData.SmrrBase) &&
822 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
823 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
824 CpuDeadLoop ();
825 }
826
827 //
828 // If a page fault occurs in SMM range
829 //
830 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
831 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
832 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
833 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
834 DEBUG_CODE (
835 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
836 );
837 CpuDeadLoop ();
838 }
839 }
840
841 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
842 SmmProfilePFHandler (
843 SystemContext.SystemContextX64->Rip,
844 SystemContext.SystemContextX64->ExceptionData
845 );
846 } else {
847 SmiDefaultPFHandler ();
848 }
849
850 ReleaseSpinLock (mPFLock);
851 }
852
853 /**
854 This function sets memory attribute for page table.
855 **/
856 VOID
857 SetPageTableAttributes (
858 VOID
859 )
860 {
861 UINTN Index2;
862 UINTN Index3;
863 UINTN Index4;
864 UINT64 *L1PageTable;
865 UINT64 *L2PageTable;
866 UINT64 *L3PageTable;
867 UINT64 *L4PageTable;
868 BOOLEAN IsSplitted;
869 BOOLEAN PageTableSplitted;
870
871 if (!mCpuSmmStaticPageTable) {
872 return ;
873 }
874
875 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
876
877 //
878 // Disable write protection, because we need mark page table to be write protected.
879 // We need *write* page table memory, to mark itself to be *read only*.
880 //
881 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
882
883 do {
884 DEBUG ((DEBUG_INFO, "Start...\n"));
885 PageTableSplitted = FALSE;
886
887 L4PageTable = (UINT64 *)GetPageTableBase ();
888 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
889 PageTableSplitted = (PageTableSplitted || IsSplitted);
890
891 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
892 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
893 if (L3PageTable == NULL) {
894 continue;
895 }
896
897 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
898 PageTableSplitted = (PageTableSplitted || IsSplitted);
899
900 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
901 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
902 // 1G
903 continue;
904 }
905 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
906 if (L2PageTable == NULL) {
907 continue;
908 }
909
910 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
911 PageTableSplitted = (PageTableSplitted || IsSplitted);
912
913 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
914 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
915 // 2M
916 continue;
917 }
918 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
919 if (L1PageTable == NULL) {
920 continue;
921 }
922 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
923 PageTableSplitted = (PageTableSplitted || IsSplitted);
924 }
925 }
926 }
927 } while (PageTableSplitted);
928
929 //
930 // Enable write protection, after page table updated.
931 //
932 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
933
934 return ;
935 }