]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
9cee784156eb0338e647d62464591e6319bd6110
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 #define PAGE_TABLE_PAGES 8
18 #define ACC_MAX_BIT BIT3
19 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
20 BOOLEAN m1GPageTableSupport = FALSE;
21
22 /**
23 Check if 1-GByte pages is supported by processor or not.
24
25 @retval TRUE 1-GByte pages is supported.
26 @retval FALSE 1-GByte pages is not supported.
27
28 **/
29 BOOLEAN
30 Is1GPageSupport (
31 VOID
32 )
33 {
34 UINT32 RegEax;
35 UINT32 RegEdx;
36
37 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
38 if (RegEax >= 0x80000001) {
39 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
40 if ((RegEdx & BIT26) != 0) {
41 return TRUE;
42 }
43 }
44 return FALSE;
45 }
46
47 /**
48 Set sub-entries number in entry.
49
50 @param[in, out] Entry Pointer to entry
51 @param[in] SubEntryNum Sub-entries number based on 0:
52 0 means there is 1 sub-entry under this entry
53 0x1ff means there is 512 sub-entries under this entry
54
55 **/
56 VOID
57 SetSubEntriesNum (
58 IN OUT UINT64 *Entry,
59 IN UINT64 SubEntryNum
60 )
61 {
62 //
63 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
64 //
65 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
66 }
67
68 /**
69 Return sub-entries number in entry.
70
71 @param[in] Entry Pointer to entry
72
73 @return Sub-entries number based on 0:
74 0 means there is 1 sub-entry under this entry
75 0x1ff means there is 512 sub-entries under this entry
76 **/
77 UINT64
78 GetSubEntriesNum (
79 IN UINT64 *Entry
80 )
81 {
82 //
83 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
84 //
85 return BitFieldRead64 (*Entry, 52, 60);
86 }
87
88 /**
89 Create PageTable for SMM use.
90
91 @return The address of PML4 (to set CR3).
92
93 **/
94 UINT32
95 SmmInitPageTable (
96 VOID
97 )
98 {
99 EFI_PHYSICAL_ADDRESS Pages;
100 UINT64 *PTEntry;
101 LIST_ENTRY *FreePage;
102 UINTN Index;
103 UINTN PageFaultHandlerHookAddress;
104 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
105
106 //
107 // Initialize spin lock
108 //
109 InitializeSpinLock (mPFLock);
110
111 m1GPageTableSupport = Is1GPageSupport ();
112 //
113 // Generate PAE page table for the first 4GB memory space
114 //
115 Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1, FALSE);
116
117 //
118 // Set IA32_PG_PMNT bit to mask this entry
119 //
120 PTEntry = (UINT64*)(UINTN)Pages;
121 for (Index = 0; Index < 4; Index++) {
122 PTEntry[Index] |= IA32_PG_PMNT;
123 }
124
125 //
126 // Fill Page-Table-Level4 (PML4) entry
127 //
128 PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1));
129 *PTEntry = Pages + PAGE_ATTRIBUTE_BITS;
130 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
131 //
132 // Set sub-entries number
133 //
134 SetSubEntriesNum (PTEntry, 3);
135
136 //
137 // Add remaining pages to page pool
138 //
139 FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry));
140 while ((UINTN)FreePage < Pages) {
141 InsertTailList (&mPagePool, FreePage);
142 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
143 }
144
145 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
146 //
147 // Set own Page Fault entry instead of the default one, because SMM Profile
148 // feature depends on IRET instruction to do Single Step
149 //
150 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
151 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
152 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
153 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
154 IdtEntry->Bits.Reserved_0 = 0;
155 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
156 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
157 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
158 IdtEntry->Bits.Reserved_1 = 0;
159 } else {
160 //
161 // Register Smm Page Fault Handler
162 //
163 SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
164 }
165
166 //
167 // Additional SMM IDT initialization for SMM stack guard
168 //
169 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
170 InitializeIDTSmmStackGuard ();
171 }
172
173 //
174 // Return the address of PML4 (to set CR3)
175 //
176 return (UINT32)(UINTN)PTEntry;
177 }
178
179 /**
180 Set access record in entry.
181
182 @param[in, out] Entry Pointer to entry
183 @param[in] Acc Access record value
184
185 **/
186 VOID
187 SetAccNum (
188 IN OUT UINT64 *Entry,
189 IN UINT64 Acc
190 )
191 {
192 //
193 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
194 //
195 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
196 }
197
198 /**
199 Return access record in entry.
200
201 @param[in] Entry Pointer to entry
202
203 @return Access record value.
204
205 **/
206 UINT64
207 GetAccNum (
208 IN UINT64 *Entry
209 )
210 {
211 //
212 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
213 //
214 return BitFieldRead64 (*Entry, 9, 11);
215 }
216
217 /**
218 Return and update the access record in entry.
219
220 @param[in, out] Entry Pointer to entry
221
222 @return Access record value.
223
224 **/
225 UINT64
226 GetAndUpdateAccNum (
227 IN OUT UINT64 *Entry
228 )
229 {
230 UINT64 Acc;
231
232 Acc = GetAccNum (Entry);
233 if ((*Entry & IA32_PG_A) != 0) {
234 //
235 // If this entry has been accessed, clear access flag in Entry and update access record
236 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
237 //
238 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
239 SetAccNum (Entry, 0x7);
240 return (0x7 + ACC_MAX_BIT);
241 } else {
242 if (Acc != 0) {
243 //
244 // If the access record is not the smallest value 0, minus 1 and update the access record field
245 //
246 SetAccNum (Entry, Acc - 1);
247 }
248 }
249 return Acc;
250 }
251
252 /**
253 Reclaim free pages for PageFault handler.
254
255 Search the whole entries tree to find the leaf entry that has the smallest
256 access record value. Insert the page pointed by this leaf entry into the
257 page pool. And check its upper entries if need to be inserted into the page
258 pool or not.
259
260 **/
261 VOID
262 ReclaimPages (
263 VOID
264 )
265 {
266 UINT64 *Pml4;
267 UINT64 *Pdpt;
268 UINT64 *Pdt;
269 UINTN Pml4Index;
270 UINTN PdptIndex;
271 UINTN PdtIndex;
272 UINTN MinPml4;
273 UINTN MinPdpt;
274 UINTN MinPdt;
275 UINT64 MinAcc;
276 UINT64 Acc;
277 UINT64 SubEntriesNum;
278 BOOLEAN PML4EIgnore;
279 BOOLEAN PDPTEIgnore;
280 UINT64 *ReleasePageAddress;
281
282 Pml4 = NULL;
283 Pdpt = NULL;
284 Pdt = NULL;
285 MinAcc = (UINT64)-1;
286 MinPml4 = (UINTN)-1;
287 MinPdpt = (UINTN)-1;
288 MinPdt = (UINTN)-1;
289 Acc = 0;
290 ReleasePageAddress = 0;
291
292 //
293 // First, find the leaf entry has the smallest access record value
294 //
295 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
296 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
297 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
298 //
299 // If the PML4 entry is not present or is masked, skip it
300 //
301 continue;
302 }
303 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);
304 PML4EIgnore = FALSE;
305 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
306 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
307 //
308 // If the PDPT entry is not present or is masked, skip it
309 //
310 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
311 //
312 // If the PDPT entry is masked, we will ignore checking the PML4 entry
313 //
314 PML4EIgnore = TRUE;
315 }
316 continue;
317 }
318 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
319 //
320 // It's not 1-GByte pages entry, it should be a PDPT entry,
321 // we will not check PML4 entry more
322 //
323 PML4EIgnore = TRUE;
324 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);
325 PDPTEIgnore = FALSE;
326 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
327 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
328 //
329 // If the PD entry is not present or is masked, skip it
330 //
331 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
332 //
333 // If the PD entry is masked, we will not PDPT entry more
334 //
335 PDPTEIgnore = TRUE;
336 }
337 continue;
338 }
339 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
340 //
341 // It's not 2 MByte page table entry, it should be PD entry
342 // we will find the entry has the smallest access record value
343 //
344 PDPTEIgnore = TRUE;
345 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
346 if (Acc < MinAcc) {
347 //
348 // If the PD entry has the smallest access record value,
349 // save the Page address to be released
350 //
351 MinAcc = Acc;
352 MinPml4 = Pml4Index;
353 MinPdpt = PdptIndex;
354 MinPdt = PdtIndex;
355 ReleasePageAddress = Pdt + PdtIndex;
356 }
357 }
358 }
359 if (!PDPTEIgnore) {
360 //
361 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
362 // it should only has the entries point to 2 MByte Pages
363 //
364 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
365 if (Acc < MinAcc) {
366 //
367 // If the PDPT entry has the smallest access record value,
368 // save the Page address to be released
369 //
370 MinAcc = Acc;
371 MinPml4 = Pml4Index;
372 MinPdpt = PdptIndex;
373 MinPdt = (UINTN)-1;
374 ReleasePageAddress = Pdpt + PdptIndex;
375 }
376 }
377 }
378 }
379 if (!PML4EIgnore) {
380 //
381 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
382 // it should only has the entries point to 1 GByte Pages
383 //
384 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
385 if (Acc < MinAcc) {
386 //
387 // If the PML4 entry has the smallest access record value,
388 // save the Page address to be released
389 //
390 MinAcc = Acc;
391 MinPml4 = Pml4Index;
392 MinPdpt = (UINTN)-1;
393 MinPdt = (UINTN)-1;
394 ReleasePageAddress = Pml4 + Pml4Index;
395 }
396 }
397 }
398 //
399 // Make sure one PML4/PDPT/PD entry is selected
400 //
401 ASSERT (MinAcc != (UINT64)-1);
402
403 //
404 // Secondly, insert the page pointed by this entry into page pool and clear this entry
405 //
406 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));
407 *ReleasePageAddress = 0;
408
409 //
410 // Lastly, check this entry's upper entries if need to be inserted into page pool
411 // or not
412 //
413 while (TRUE) {
414 if (MinPdt != (UINTN)-1) {
415 //
416 // If 4 KByte Page Table is released, check the PDPT entry
417 //
418 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);
419 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
420 if (SubEntriesNum == 0) {
421 //
422 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
423 // clear the Page directory entry
424 //
425 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));
426 Pdpt[MinPdpt] = 0;
427 //
428 // Go on checking the PML4 table
429 //
430 MinPdt = (UINTN)-1;
431 continue;
432 }
433 //
434 // Update the sub-entries filed in PDPT entry and exit
435 //
436 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
437 break;
438 }
439 if (MinPdpt != (UINTN)-1) {
440 //
441 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
442 //
443 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
444 if (SubEntriesNum == 0) {
445 //
446 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
447 // clear the Page directory entry
448 //
449 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));
450 Pml4[MinPml4] = 0;
451 MinPdpt = (UINTN)-1;
452 continue;
453 }
454 //
455 // Update the sub-entries filed in PML4 entry and exit
456 //
457 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
458 break;
459 }
460 //
461 // PLM4 table has been released before, exit it
462 //
463 break;
464 }
465 }
466
467 /**
468 Allocate free Page for PageFault handler use.
469
470 @return Page address.
471
472 **/
473 UINT64
474 AllocPage (
475 VOID
476 )
477 {
478 UINT64 RetVal;
479
480 if (IsListEmpty (&mPagePool)) {
481 //
482 // If page pool is empty, reclaim the used pages and insert one into page pool
483 //
484 ReclaimPages ();
485 }
486
487 //
488 // Get one free page and remove it from page pool
489 //
490 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
491 RemoveEntryList (mPagePool.ForwardLink);
492 //
493 // Clean this page and return
494 //
495 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
496 return RetVal;
497 }
498
499 /**
500 Page Fault handler for SMM use.
501
502 **/
503 VOID
504 SmiDefaultPFHandler (
505 VOID
506 )
507 {
508 UINT64 *PageTable;
509 UINT64 *Pml4;
510 UINT64 PFAddress;
511 UINTN StartBit;
512 UINTN EndBit;
513 UINT64 PTIndex;
514 UINTN Index;
515 SMM_PAGE_SIZE_TYPE PageSize;
516 UINTN NumOfPages;
517 UINTN PageAttribute;
518 EFI_STATUS Status;
519 UINT64 *UpperEntry;
520
521 //
522 // Set default SMM page attribute
523 //
524 PageSize = SmmPageSize2M;
525 NumOfPages = 1;
526 PageAttribute = 0;
527
528 EndBit = 0;
529 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
530 PFAddress = AsmReadCr2 ();
531
532 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
533 //
534 // If platform not support page table attribute, set default SMM page attribute
535 //
536 if (Status != EFI_SUCCESS) {
537 PageSize = SmmPageSize2M;
538 NumOfPages = 1;
539 PageAttribute = 0;
540 }
541 if (PageSize >= MaxSmmPageSizeType) {
542 PageSize = SmmPageSize2M;
543 }
544 if (NumOfPages > 512) {
545 NumOfPages = 512;
546 }
547
548 switch (PageSize) {
549 case SmmPageSize4K:
550 //
551 // BIT12 to BIT20 is Page Table index
552 //
553 EndBit = 12;
554 break;
555 case SmmPageSize2M:
556 //
557 // BIT21 to BIT29 is Page Directory index
558 //
559 EndBit = 21;
560 PageAttribute |= (UINTN)IA32_PG_PS;
561 break;
562 case SmmPageSize1G:
563 if (!m1GPageTableSupport) {
564 DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!"));
565 ASSERT (FALSE);
566 }
567 //
568 // BIT30 to BIT38 is Page Directory Pointer Table index
569 //
570 EndBit = 30;
571 PageAttribute |= (UINTN)IA32_PG_PS;
572 break;
573 default:
574 ASSERT (FALSE);
575 }
576
577 //
578 // If execute-disable is enabled, set NX bit
579 //
580 if (mXdEnabled) {
581 PageAttribute |= IA32_PG_NX;
582 }
583
584 for (Index = 0; Index < NumOfPages; Index++) {
585 PageTable = Pml4;
586 UpperEntry = NULL;
587 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
588 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
589 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
590 //
591 // If the entry is not present, allocate one page from page pool for it
592 //
593 PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS;
594 } else {
595 //
596 // Save the upper entry address
597 //
598 UpperEntry = PageTable + PTIndex;
599 }
600 //
601 // BIT9 to BIT11 of entry is used to save access record,
602 // initialize value is 7
603 //
604 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
605 SetAccNum (PageTable + PTIndex, 7);
606 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
607 }
608
609 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
610 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
611 //
612 // Check if the entry has already existed, this issue may occur when the different
613 // size page entries created under the same entry
614 //
615 DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
616 DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n"));
617 ASSERT (FALSE);
618 }
619 //
620 // Fill the new entry
621 //
622 PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |
623 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
624 if (UpperEntry != NULL) {
625 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
626 }
627 //
628 // Get the next page address if we need to create more page tables
629 //
630 PFAddress += (1ull << EndBit);
631 }
632 }
633
634 /**
635 ThePage Fault handler wrapper for SMM use.
636
637 @param InterruptType Defines the type of interrupt or exception that
638 occurred on the processor.This parameter is processor architecture specific.
639 @param SystemContext A pointer to the processor context when
640 the interrupt occurred on the processor.
641 **/
642 VOID
643 EFIAPI
644 SmiPFHandler (
645 IN EFI_EXCEPTION_TYPE InterruptType,
646 IN EFI_SYSTEM_CONTEXT SystemContext
647 )
648 {
649 UINTN PFAddress;
650
651 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
652
653 AcquireSpinLock (mPFLock);
654
655 PFAddress = AsmReadCr2 ();
656
657 //
658 // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.
659 //
660 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
661 (PFAddress >= mCpuHotPlugData.SmrrBase) &&
662 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
663 DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
664 CpuDeadLoop ();
665 }
666
667 //
668 // If a page fault occurs in SMM range
669 //
670 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
671 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
672 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
673 DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
674 DEBUG_CODE (
675 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
676 );
677 CpuDeadLoop ();
678 }
679 }
680
681 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
682 SmmProfilePFHandler (
683 SystemContext.SystemContextX64->Rip,
684 SystemContext.SystemContextX64->ExceptionData
685 );
686 } else {
687 SmiDefaultPFHandler ();
688 }
689
690 ReleaseSpinLock (mPFLock);
691 }