]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
StandaloneMmPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
... / ...
CommitLineData
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19#define PAGE_TABLE_PAGES 8\r
20#define ACC_MAX_BIT BIT3\r
21\r
22LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
23BOOLEAN m1GPageTableSupport = FALSE;\r
24BOOLEAN mCpuSmmStaticPageTable;\r
25\r
26/**\r
27 Disable CET.\r
28**/\r
29VOID\r
30EFIAPI\r
31DisableCet (\r
32 VOID\r
33 );\r
34\r
35/**\r
36 Enable CET.\r
37**/\r
38VOID\r
39EFIAPI\r
40EnableCet (\r
41 VOID\r
42 );\r
43\r
44/**\r
45 Check if 1-GByte pages is supported by processor or not.\r
46\r
47 @retval TRUE 1-GByte pages is supported.\r
48 @retval FALSE 1-GByte pages is not supported.\r
49\r
50**/\r
51BOOLEAN\r
52Is1GPageSupport (\r
53 VOID\r
54 )\r
55{\r
56 UINT32 RegEax;\r
57 UINT32 RegEdx;\r
58\r
59 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
60 if (RegEax >= 0x80000001) {\r
61 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
62 if ((RegEdx & BIT26) != 0) {\r
63 return TRUE;\r
64 }\r
65 }\r
66 return FALSE;\r
67}\r
68\r
69/**\r
70 Set sub-entries number in entry.\r
71\r
72 @param[in, out] Entry Pointer to entry\r
73 @param[in] SubEntryNum Sub-entries number based on 0:\r
74 0 means there is 1 sub-entry under this entry\r
75 0x1ff means there is 512 sub-entries under this entry\r
76\r
77**/\r
78VOID\r
79SetSubEntriesNum (\r
80 IN OUT UINT64 *Entry,\r
81 IN UINT64 SubEntryNum\r
82 )\r
83{\r
84 //\r
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
86 //\r
87 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
88}\r
89\r
90/**\r
91 Return sub-entries number in entry.\r
92\r
93 @param[in] Entry Pointer to entry\r
94\r
95 @return Sub-entries number based on 0:\r
96 0 means there is 1 sub-entry under this entry\r
97 0x1ff means there is 512 sub-entries under this entry\r
98**/\r
99UINT64\r
100GetSubEntriesNum (\r
101 IN UINT64 *Entry\r
102 )\r
103{\r
104 //\r
105 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
106 //\r
107 return BitFieldRead64 (*Entry, 52, 60);\r
108}\r
109\r
110/**\r
111 Calculate the maximum support address.\r
112\r
113 @return the maximum support address.\r
114**/\r
115UINT8\r
116CalculateMaximumSupportAddress (\r
117 VOID\r
118 )\r
119{\r
120 UINT32 RegEax;\r
121 UINT8 PhysicalAddressBits;\r
122 VOID *Hob;\r
123\r
124 //\r
125 // Get physical address bits supported.\r
126 //\r
127 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
128 if (Hob != NULL) {\r
129 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
130 } else {\r
131 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
132 if (RegEax >= 0x80000008) {\r
133 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
134 PhysicalAddressBits = (UINT8) RegEax;\r
135 } else {\r
136 PhysicalAddressBits = 36;\r
137 }\r
138 }\r
139\r
140 //\r
141 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
142 //\r
143 ASSERT (PhysicalAddressBits <= 52);\r
144 if (PhysicalAddressBits > 48) {\r
145 PhysicalAddressBits = 48;\r
146 }\r
147 return PhysicalAddressBits;\r
148}\r
149\r
150/**\r
151 Set static page table.\r
152\r
153 @param[in] PageTable Address of page table.\r
154**/\r
155VOID\r
156SetStaticPageTable (\r
157 IN UINTN PageTable\r
158 )\r
159{\r
160 UINT64 PageAddress;\r
161 UINTN NumberOfPml4EntriesNeeded;\r
162 UINTN NumberOfPdpEntriesNeeded;\r
163 UINTN IndexOfPml4Entries;\r
164 UINTN IndexOfPdpEntries;\r
165 UINTN IndexOfPageDirectoryEntries;\r
166 UINT64 *PageMapLevel4Entry;\r
167 UINT64 *PageMap;\r
168 UINT64 *PageDirectoryPointerEntry;\r
169 UINT64 *PageDirectory1GEntry;\r
170 UINT64 *PageDirectoryEntry;\r
171\r
172 if (mPhysicalAddressBits <= 39 ) {\r
173 NumberOfPml4EntriesNeeded = 1;\r
174 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));\r
175 } else {\r
176 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));\r
177 NumberOfPdpEntriesNeeded = 512;\r
178 }\r
179\r
180 //\r
181 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
182 //\r
183 PageMap = (VOID *) PageTable;\r
184\r
185 PageMapLevel4Entry = PageMap;\r
186 PageAddress = 0;\r
187 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
188 //\r
189 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
190 //\r
191 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
192 if (PageDirectoryPointerEntry == NULL) {\r
193 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
194 ASSERT(PageDirectoryPointerEntry != NULL);\r
195 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
196\r
197 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
198 }\r
199\r
200 if (m1GPageTableSupport) {\r
201 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
202 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
203 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
204 //\r
205 // Skip the < 4G entries\r
206 //\r
207 continue;\r
208 }\r
209 //\r
210 // Fill in the Page Directory entries\r
211 //\r
212 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
213 }\r
214 } else {\r
215 PageAddress = BASE_4GB;\r
216 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
217 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
218 //\r
219 // Skip the < 4G entries\r
220 //\r
221 continue;\r
222 }\r
223 //\r
224 // Each Directory Pointer entries points to a page of Page Directory entires.\r
225 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
226 //\r
227 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
228 if (PageDirectoryEntry == NULL) {\r
229 PageDirectoryEntry = AllocatePageTableMemory (1);\r
230 ASSERT(PageDirectoryEntry != NULL);\r
231 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
232\r
233 //\r
234 // Fill in a Page Directory Pointer Entries\r
235 //\r
236 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
237 }\r
238\r
239 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
240 //\r
241 // Fill in the Page Directory entries\r
242 //\r
243 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
244 }\r
245 }\r
246 }\r
247 }\r
248}\r
249\r
250/**\r
251 Create PageTable for SMM use.\r
252\r
253 @return The address of PML4 (to set CR3).\r
254\r
255**/\r
256UINT32\r
257SmmInitPageTable (\r
258 VOID\r
259 )\r
260{\r
261 EFI_PHYSICAL_ADDRESS Pages;\r
262 UINT64 *PTEntry;\r
263 LIST_ENTRY *FreePage;\r
264 UINTN Index;\r
265 UINTN PageFaultHandlerHookAddress;\r
266 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
267 EFI_STATUS Status;\r
268\r
269 //\r
270 // Initialize spin lock\r
271 //\r
272 InitializeSpinLock (mPFLock);\r
273\r
274 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
275 m1GPageTableSupport = Is1GPageSupport ();\r
276 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));\r
277 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));\r
278\r
279 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
280 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));\r
281 //\r
282 // Generate PAE page table for the first 4GB memory space\r
283 //\r
284 Pages = Gen4GPageTable (FALSE);\r
285\r
286 //\r
287 // Set IA32_PG_PMNT bit to mask this entry\r
288 //\r
289 PTEntry = (UINT64*)(UINTN)Pages;\r
290 for (Index = 0; Index < 4; Index++) {\r
291 PTEntry[Index] |= IA32_PG_PMNT;\r
292 }\r
293\r
294 //\r
295 // Fill Page-Table-Level4 (PML4) entry\r
296 //\r
297 PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
298 ASSERT (PTEntry != NULL);\r
299 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
300 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
301\r
302 //\r
303 // Set sub-entries number\r
304 //\r
305 SetSubEntriesNum (PTEntry, 3);\r
306\r
307 if (mCpuSmmStaticPageTable) {\r
308 SetStaticPageTable ((UINTN)PTEntry);\r
309 } else {\r
310 //\r
311 // Add pages to page pool\r
312 //\r
313 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
314 ASSERT (FreePage != NULL);\r
315 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
316 InsertTailList (&mPagePool, FreePage);\r
317 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
318 }\r
319 }\r
320\r
321 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
322 HEAP_GUARD_NONSTOP_MODE ||\r
323 NULL_DETECTION_NONSTOP_MODE) {\r
324 //\r
325 // Set own Page Fault entry instead of the default one, because SMM Profile\r
326 // feature depends on IRET instruction to do Single Step\r
327 //\r
328 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
329 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
330 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
331 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
332 IdtEntry->Bits.Reserved_0 = 0;\r
333 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
334 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
335 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
336 IdtEntry->Bits.Reserved_1 = 0;\r
337 } else {\r
338 //\r
339 // Register Smm Page Fault Handler\r
340 //\r
341 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
342 ASSERT_EFI_ERROR (Status);\r
343 }\r
344\r
345 //\r
346 // Additional SMM IDT initialization for SMM stack guard\r
347 //\r
348 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
349 InitializeIDTSmmStackGuard ();\r
350 }\r
351\r
352 //\r
353 // Return the address of PML4 (to set CR3)\r
354 //\r
355 return (UINT32)(UINTN)PTEntry;\r
356}\r
357\r
358/**\r
359 Set access record in entry.\r
360\r
361 @param[in, out] Entry Pointer to entry\r
362 @param[in] Acc Access record value\r
363\r
364**/\r
365VOID\r
366SetAccNum (\r
367 IN OUT UINT64 *Entry,\r
368 IN UINT64 Acc\r
369 )\r
370{\r
371 //\r
372 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
373 //\r
374 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
375}\r
376\r
377/**\r
378 Return access record in entry.\r
379\r
380 @param[in] Entry Pointer to entry\r
381\r
382 @return Access record value.\r
383\r
384**/\r
385UINT64\r
386GetAccNum (\r
387 IN UINT64 *Entry\r
388 )\r
389{\r
390 //\r
391 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
392 //\r
393 return BitFieldRead64 (*Entry, 9, 11);\r
394}\r
395\r
396/**\r
397 Return and update the access record in entry.\r
398\r
399 @param[in, out] Entry Pointer to entry\r
400\r
401 @return Access record value.\r
402\r
403**/\r
404UINT64\r
405GetAndUpdateAccNum (\r
406 IN OUT UINT64 *Entry\r
407 )\r
408{\r
409 UINT64 Acc;\r
410\r
411 Acc = GetAccNum (Entry);\r
412 if ((*Entry & IA32_PG_A) != 0) {\r
413 //\r
414 // If this entry has been accessed, clear access flag in Entry and update access record\r
415 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
416 //\r
417 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
418 SetAccNum (Entry, 0x7);\r
419 return (0x7 + ACC_MAX_BIT);\r
420 } else {\r
421 if (Acc != 0) {\r
422 //\r
423 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
424 //\r
425 SetAccNum (Entry, Acc - 1);\r
426 }\r
427 }\r
428 return Acc;\r
429}\r
430\r
431/**\r
432 Reclaim free pages for PageFault handler.\r
433\r
434 Search the whole entries tree to find the leaf entry that has the smallest\r
435 access record value. Insert the page pointed by this leaf entry into the\r
436 page pool. And check its upper entries if need to be inserted into the page\r
437 pool or not.\r
438\r
439**/\r
440VOID\r
441ReclaimPages (\r
442 VOID\r
443 )\r
444{\r
445 UINT64 *Pml4;\r
446 UINT64 *Pdpt;\r
447 UINT64 *Pdt;\r
448 UINTN Pml4Index;\r
449 UINTN PdptIndex;\r
450 UINTN PdtIndex;\r
451 UINTN MinPml4;\r
452 UINTN MinPdpt;\r
453 UINTN MinPdt;\r
454 UINT64 MinAcc;\r
455 UINT64 Acc;\r
456 UINT64 SubEntriesNum;\r
457 BOOLEAN PML4EIgnore;\r
458 BOOLEAN PDPTEIgnore;\r
459 UINT64 *ReleasePageAddress;\r
460\r
461 Pml4 = NULL;\r
462 Pdpt = NULL;\r
463 Pdt = NULL;\r
464 MinAcc = (UINT64)-1;\r
465 MinPml4 = (UINTN)-1;\r
466 MinPdpt = (UINTN)-1;\r
467 MinPdt = (UINTN)-1;\r
468 Acc = 0;\r
469 ReleasePageAddress = 0;\r
470\r
471 //\r
472 // First, find the leaf entry has the smallest access record value\r
473 //\r
474 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
475 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
476 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
477 //\r
478 // If the PML4 entry is not present or is masked, skip it\r
479 //\r
480 continue;\r
481 }\r
482 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
483 PML4EIgnore = FALSE;\r
484 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
485 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
486 //\r
487 // If the PDPT entry is not present or is masked, skip it\r
488 //\r
489 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
490 //\r
491 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
492 //\r
493 PML4EIgnore = TRUE;\r
494 }\r
495 continue;\r
496 }\r
497 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
498 //\r
499 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
500 // we will not check PML4 entry more\r
501 //\r
502 PML4EIgnore = TRUE;\r
503 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
504 PDPTEIgnore = FALSE;\r
505 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
506 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
507 //\r
508 // If the PD entry is not present or is masked, skip it\r
509 //\r
510 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
511 //\r
512 // If the PD entry is masked, we will not PDPT entry more\r
513 //\r
514 PDPTEIgnore = TRUE;\r
515 }\r
516 continue;\r
517 }\r
518 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
519 //\r
520 // It's not 2 MByte page table entry, it should be PD entry\r
521 // we will find the entry has the smallest access record value\r
522 //\r
523 PDPTEIgnore = TRUE;\r
524 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
525 if (Acc < MinAcc) {\r
526 //\r
527 // If the PD entry has the smallest access record value,\r
528 // save the Page address to be released\r
529 //\r
530 MinAcc = Acc;\r
531 MinPml4 = Pml4Index;\r
532 MinPdpt = PdptIndex;\r
533 MinPdt = PdtIndex;\r
534 ReleasePageAddress = Pdt + PdtIndex;\r
535 }\r
536 }\r
537 }\r
538 if (!PDPTEIgnore) {\r
539 //\r
540 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
541 // it should only has the entries point to 2 MByte Pages\r
542 //\r
543 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
544 if (Acc < MinAcc) {\r
545 //\r
546 // If the PDPT entry has the smallest access record value,\r
547 // save the Page address to be released\r
548 //\r
549 MinAcc = Acc;\r
550 MinPml4 = Pml4Index;\r
551 MinPdpt = PdptIndex;\r
552 MinPdt = (UINTN)-1;\r
553 ReleasePageAddress = Pdpt + PdptIndex;\r
554 }\r
555 }\r
556 }\r
557 }\r
558 if (!PML4EIgnore) {\r
559 //\r
560 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
561 // it should only has the entries point to 1 GByte Pages\r
562 //\r
563 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
564 if (Acc < MinAcc) {\r
565 //\r
566 // If the PML4 entry has the smallest access record value,\r
567 // save the Page address to be released\r
568 //\r
569 MinAcc = Acc;\r
570 MinPml4 = Pml4Index;\r
571 MinPdpt = (UINTN)-1;\r
572 MinPdt = (UINTN)-1;\r
573 ReleasePageAddress = Pml4 + Pml4Index;\r
574 }\r
575 }\r
576 }\r
577 //\r
578 // Make sure one PML4/PDPT/PD entry is selected\r
579 //\r
580 ASSERT (MinAcc != (UINT64)-1);\r
581\r
582 //\r
583 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
584 //\r
585 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
586 *ReleasePageAddress = 0;\r
587\r
588 //\r
589 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
590 // or not\r
591 //\r
592 while (TRUE) {\r
593 if (MinPdt != (UINTN)-1) {\r
594 //\r
595 // If 4 KByte Page Table is released, check the PDPT entry\r
596 //\r
597 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
598 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
599 if (SubEntriesNum == 0) {\r
600 //\r
601 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
602 // clear the Page directory entry\r
603 //\r
604 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
605 Pdpt[MinPdpt] = 0;\r
606 //\r
607 // Go on checking the PML4 table\r
608 //\r
609 MinPdt = (UINTN)-1;\r
610 continue;\r
611 }\r
612 //\r
613 // Update the sub-entries filed in PDPT entry and exit\r
614 //\r
615 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
616 break;\r
617 }\r
618 if (MinPdpt != (UINTN)-1) {\r
619 //\r
620 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
621 //\r
622 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
623 if (SubEntriesNum == 0) {\r
624 //\r
625 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
626 // clear the Page directory entry\r
627 //\r
628 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
629 Pml4[MinPml4] = 0;\r
630 MinPdpt = (UINTN)-1;\r
631 continue;\r
632 }\r
633 //\r
634 // Update the sub-entries filed in PML4 entry and exit\r
635 //\r
636 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
637 break;\r
638 }\r
639 //\r
640 // PLM4 table has been released before, exit it\r
641 //\r
642 break;\r
643 }\r
644}\r
645\r
646/**\r
647 Allocate free Page for PageFault handler use.\r
648\r
649 @return Page address.\r
650\r
651**/\r
652UINT64\r
653AllocPage (\r
654 VOID\r
655 )\r
656{\r
657 UINT64 RetVal;\r
658\r
659 if (IsListEmpty (&mPagePool)) {\r
660 //\r
661 // If page pool is empty, reclaim the used pages and insert one into page pool\r
662 //\r
663 ReclaimPages ();\r
664 }\r
665\r
666 //\r
667 // Get one free page and remove it from page pool\r
668 //\r
669 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
670 RemoveEntryList (mPagePool.ForwardLink);\r
671 //\r
672 // Clean this page and return\r
673 //\r
674 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
675 return RetVal;\r
676}\r
677\r
678/**\r
679 Page Fault handler for SMM use.\r
680\r
681**/\r
682VOID\r
683SmiDefaultPFHandler (\r
684 VOID\r
685 )\r
686{\r
687 UINT64 *PageTable;\r
688 UINT64 *Pml4;\r
689 UINT64 PFAddress;\r
690 UINTN StartBit;\r
691 UINTN EndBit;\r
692 UINT64 PTIndex;\r
693 UINTN Index;\r
694 SMM_PAGE_SIZE_TYPE PageSize;\r
695 UINTN NumOfPages;\r
696 UINTN PageAttribute;\r
697 EFI_STATUS Status;\r
698 UINT64 *UpperEntry;\r
699\r
700 //\r
701 // Set default SMM page attribute\r
702 //\r
703 PageSize = SmmPageSize2M;\r
704 NumOfPages = 1;\r
705 PageAttribute = 0;\r
706\r
707 EndBit = 0;\r
708 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
709 PFAddress = AsmReadCr2 ();\r
710\r
711 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
712 //\r
713 // If platform not support page table attribute, set default SMM page attribute\r
714 //\r
715 if (Status != EFI_SUCCESS) {\r
716 PageSize = SmmPageSize2M;\r
717 NumOfPages = 1;\r
718 PageAttribute = 0;\r
719 }\r
720 if (PageSize >= MaxSmmPageSizeType) {\r
721 PageSize = SmmPageSize2M;\r
722 }\r
723 if (NumOfPages > 512) {\r
724 NumOfPages = 512;\r
725 }\r
726\r
727 switch (PageSize) {\r
728 case SmmPageSize4K:\r
729 //\r
730 // BIT12 to BIT20 is Page Table index\r
731 //\r
732 EndBit = 12;\r
733 break;\r
734 case SmmPageSize2M:\r
735 //\r
736 // BIT21 to BIT29 is Page Directory index\r
737 //\r
738 EndBit = 21;\r
739 PageAttribute |= (UINTN)IA32_PG_PS;\r
740 break;\r
741 case SmmPageSize1G:\r
742 if (!m1GPageTableSupport) {\r
743 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
744 ASSERT (FALSE);\r
745 }\r
746 //\r
747 // BIT30 to BIT38 is Page Directory Pointer Table index\r
748 //\r
749 EndBit = 30;\r
750 PageAttribute |= (UINTN)IA32_PG_PS;\r
751 break;\r
752 default:\r
753 ASSERT (FALSE);\r
754 }\r
755\r
756 //\r
757 // If execute-disable is enabled, set NX bit\r
758 //\r
759 if (mXdEnabled) {\r
760 PageAttribute |= IA32_PG_NX;\r
761 }\r
762\r
763 for (Index = 0; Index < NumOfPages; Index++) {\r
764 PageTable = Pml4;\r
765 UpperEntry = NULL;\r
766 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
767 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
768 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
769 //\r
770 // If the entry is not present, allocate one page from page pool for it\r
771 //\r
772 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
773 } else {\r
774 //\r
775 // Save the upper entry address\r
776 //\r
777 UpperEntry = PageTable + PTIndex;\r
778 }\r
779 //\r
780 // BIT9 to BIT11 of entry is used to save access record,\r
781 // initialize value is 7\r
782 //\r
783 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
784 SetAccNum (PageTable + PTIndex, 7);\r
785 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
786 }\r
787\r
788 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
789 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
790 //\r
791 // Check if the entry has already existed, this issue may occur when the different\r
792 // size page entries created under the same entry\r
793 //\r
794 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
795 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
796 ASSERT (FALSE);\r
797 }\r
798 //\r
799 // Fill the new entry\r
800 //\r
801 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
802 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
803 if (UpperEntry != NULL) {\r
804 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
805 }\r
806 //\r
807 // Get the next page address if we need to create more page tables\r
808 //\r
809 PFAddress += (1ull << EndBit);\r
810 }\r
811}\r
812\r
813/**\r
814 ThePage Fault handler wrapper for SMM use.\r
815\r
816 @param InterruptType Defines the type of interrupt or exception that\r
817 occurred on the processor.This parameter is processor architecture specific.\r
818 @param SystemContext A pointer to the processor context when\r
819 the interrupt occurred on the processor.\r
820**/\r
821VOID\r
822EFIAPI\r
823SmiPFHandler (\r
824 IN EFI_EXCEPTION_TYPE InterruptType,\r
825 IN EFI_SYSTEM_CONTEXT SystemContext\r
826 )\r
827{\r
828 UINTN PFAddress;\r
829 UINTN GuardPageAddress;\r
830 UINTN CpuIndex;\r
831\r
832 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
833\r
834 AcquireSpinLock (mPFLock);\r
835\r
836 PFAddress = AsmReadCr2 ();\r
837\r
838 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
839 DumpCpuContext (InterruptType, SystemContext);\r
840 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
841 CpuDeadLoop ();\r
842 goto Exit;\r
843 }\r
844\r
845 //\r
846 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
847 // or SMM page protection violation.\r
848 //\r
849 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
850 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
851 DumpCpuContext (InterruptType, SystemContext);\r
852 CpuIndex = GetCpuIndex ();\r
853 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
854 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
855 (PFAddress >= GuardPageAddress) &&\r
856 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
857 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
858 } else {\r
859 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
860 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
861 DEBUG_CODE (\r
862 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
863 );\r
864 } else {\r
865 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
866 DEBUG_CODE (\r
867 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
868 );\r
869 }\r
870\r
871 if (HEAP_GUARD_NONSTOP_MODE) {\r
872 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
873 goto Exit;\r
874 }\r
875 }\r
876 CpuDeadLoop ();\r
877 goto Exit;\r
878 }\r
879\r
880 //\r
881 // If a page fault occurs in non-SMRAM range.\r
882 //\r
883 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
884 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
885 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
886 DumpCpuContext (InterruptType, SystemContext);\r
887 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
888 DEBUG_CODE (\r
889 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
890 );\r
891 CpuDeadLoop ();\r
892 goto Exit;\r
893 }\r
894\r
895 //\r
896 // If NULL pointer was just accessed\r
897 //\r
898 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
899 (PFAddress < EFI_PAGE_SIZE)) {\r
900 DumpCpuContext (InterruptType, SystemContext);\r
901 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
902 DEBUG_CODE (\r
903 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
904 );\r
905\r
906 if (NULL_DETECTION_NONSTOP_MODE) {\r
907 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
908 goto Exit;\r
909 }\r
910\r
911 CpuDeadLoop ();\r
912 goto Exit;\r
913 }\r
914\r
915 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
916 DumpCpuContext (InterruptType, SystemContext);\r
917 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
918 DEBUG_CODE (\r
919 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
920 );\r
921 CpuDeadLoop ();\r
922 goto Exit;\r
923 }\r
924 }\r
925\r
926 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
927 SmmProfilePFHandler (\r
928 SystemContext.SystemContextX64->Rip,\r
929 SystemContext.SystemContextX64->ExceptionData\r
930 );\r
931 } else {\r
932 SmiDefaultPFHandler ();\r
933 }\r
934\r
935Exit:\r
936 ReleaseSpinLock (mPFLock);\r
937}\r
938\r
939/**\r
940 This function sets memory attribute for page table.\r
941**/\r
942VOID\r
943SetPageTableAttributes (\r
944 VOID\r
945 )\r
946{\r
947 UINTN Index2;\r
948 UINTN Index3;\r
949 UINTN Index4;\r
950 UINT64 *L1PageTable;\r
951 UINT64 *L2PageTable;\r
952 UINT64 *L3PageTable;\r
953 UINT64 *L4PageTable;\r
954 BOOLEAN IsSplitted;\r
955 BOOLEAN PageTableSplitted;\r
956 BOOLEAN CetEnabled;\r
957\r
958 //\r
959 // Don't do this if\r
960 // - no static page table; or\r
961 // - SMM heap guard feature enabled; or\r
962 // BIT2: SMM page guard enabled\r
963 // BIT3: SMM pool guard enabled\r
964 // - SMM profile feature enabled\r
965 //\r
966 if (!mCpuSmmStaticPageTable ||\r
967 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
968 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
969 //\r
970 // Static paging and heap guard could not be enabled at the same time.\r
971 //\r
972 ASSERT (!(mCpuSmmStaticPageTable &&\r
973 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
974\r
975 //\r
976 // Static paging and SMM profile could not be enabled at the same time.\r
977 //\r
978 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
979 return ;\r
980 }\r
981\r
982 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
983\r
984 //\r
985 // Disable write protection, because we need mark page table to be write protected.\r
986 // We need *write* page table memory, to mark itself to be *read only*.\r
987 //\r
988 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
989 if (CetEnabled) {\r
990 //\r
991 // CET must be disabled if WP is disabled.\r
992 //\r
993 DisableCet();\r
994 }\r
995 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
996\r
997 do {\r
998 DEBUG ((DEBUG_INFO, "Start...\n"));\r
999 PageTableSplitted = FALSE;\r
1000\r
1001 L4PageTable = (UINT64 *)GetPageTableBase ();\r
1002 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1003 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1004\r
1005 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
1006 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1007 if (L3PageTable == NULL) {\r
1008 continue;\r
1009 }\r
1010\r
1011 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1012 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1013\r
1014 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
1015 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1016 // 1G\r
1017 continue;\r
1018 }\r
1019 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1020 if (L2PageTable == NULL) {\r
1021 continue;\r
1022 }\r
1023\r
1024 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1025 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1026\r
1027 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
1028 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1029 // 2M\r
1030 continue;\r
1031 }\r
1032 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1033 if (L1PageTable == NULL) {\r
1034 continue;\r
1035 }\r
1036 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1037 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1038 }\r
1039 }\r
1040 }\r
1041 } while (PageTableSplitted);\r
1042\r
1043 //\r
1044 // Enable write protection, after page table updated.\r
1045 //\r
1046 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
1047 if (CetEnabled) {\r
1048 //\r
1049 // re-enable CET.\r
1050 //\r
1051 EnableCet();\r
1052 }\r
1053\r
1054 return ;\r
1055}\r
1056\r
1057/**\r
1058 This function reads CR2 register when on-demand paging is enabled.\r
1059\r
1060 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1061**/\r
1062VOID\r
1063SaveCr2 (\r
1064 OUT UINTN *Cr2\r
1065 )\r
1066{\r
1067 if (!mCpuSmmStaticPageTable) {\r
1068 *Cr2 = AsmReadCr2 ();\r
1069 }\r
1070}\r
1071\r
1072/**\r
1073 This function restores CR2 register when on-demand paging is enabled.\r
1074\r
1075 @param[in] Cr2 Value to write into CR2 register.\r
1076**/\r
1077VOID\r
1078RestoreCr2 (\r
1079 IN UINTN Cr2\r
1080 )\r
1081{\r
1082 if (!mCpuSmmStaticPageTable) {\r
1083 AsmWriteCr2 (Cr2);\r
1084 }\r
1085}\r