]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
fe3a75bc 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
427e3573
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17#define PAGE_TABLE_PAGES 8\r
18#define ACC_MAX_BIT BIT3\r
19LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 20BOOLEAN m1GPageTableSupport = FALSE;\r
717fb604
JY
21UINT8 mPhysicalAddressBits;\r
22BOOLEAN mCpuSmmStaticPageTable;\r
427e3573
MK
23\r
24/**\r
25 Check if 1-GByte pages is supported by processor or not.\r
26\r
27 @retval TRUE 1-GByte pages is supported.\r
28 @retval FALSE 1-GByte pages is not supported.\r
29\r
30**/\r
31BOOLEAN\r
32Is1GPageSupport (\r
33 VOID\r
34 )\r
35{\r
36 UINT32 RegEax;\r
37 UINT32 RegEdx;\r
38\r
39 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
40 if (RegEax >= 0x80000001) {\r
41 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
42 if ((RegEdx & BIT26) != 0) {\r
43 return TRUE;\r
44 }\r
45 }\r
46 return FALSE;\r
47}\r
48\r
49/**\r
50 Set sub-entries number in entry.\r
51\r
52 @param[in, out] Entry Pointer to entry\r
53 @param[in] SubEntryNum Sub-entries number based on 0:\r
54 0 means there is 1 sub-entry under this entry\r
55 0x1ff means there is 512 sub-entries under this entry\r
56\r
57**/\r
58VOID\r
59SetSubEntriesNum (\r
60 IN OUT UINT64 *Entry,\r
61 IN UINT64 SubEntryNum\r
62 )\r
63{\r
64 //\r
65 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
66 //\r
67 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
68}\r
69\r
70/**\r
71 Return sub-entries number in entry.\r
72\r
73 @param[in] Entry Pointer to entry\r
74\r
75 @return Sub-entries number based on 0:\r
76 0 means there is 1 sub-entry under this entry\r
77 0x1ff means there is 512 sub-entries under this entry\r
78**/\r
79UINT64\r
80GetSubEntriesNum (\r
81 IN UINT64 *Entry\r
82 )\r
83{\r
84 //\r
85 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
86 //\r
87 return BitFieldRead64 (*Entry, 52, 60);\r
88}\r
89\r
717fb604
JY
90/**\r
91 Calculate the maximum support address.\r
92\r
93 @return the maximum support address.\r
94**/\r
95UINT8\r
96CalculateMaximumSupportAddress (\r
97 VOID\r
98 )\r
99{\r
100 UINT32 RegEax;\r
101 UINT8 PhysicalAddressBits;\r
102 VOID *Hob;\r
103\r
104 //\r
105 // Get physical address bits supported.\r
106 //\r
107 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
108 if (Hob != NULL) {\r
109 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
110 } else {\r
111 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
112 if (RegEax >= 0x80000008) {\r
113 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
114 PhysicalAddressBits = (UINT8) RegEax;\r
115 } else {\r
116 PhysicalAddressBits = 36;\r
117 }\r
118 }\r
119\r
120 //\r
121 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
122 //\r
123 ASSERT (PhysicalAddressBits <= 52);\r
124 if (PhysicalAddressBits > 48) {\r
125 PhysicalAddressBits = 48;\r
126 }\r
127 return PhysicalAddressBits;\r
128}\r
129\r
130/**\r
131 Set static page table.\r
132\r
133 @param[in] PageTable Address of page table.\r
134**/\r
135VOID\r
136SetStaticPageTable (\r
137 IN UINTN PageTable\r
138 )\r
139{\r
140 UINT64 PageAddress;\r
141 UINTN NumberOfPml4EntriesNeeded;\r
142 UINTN NumberOfPdpEntriesNeeded;\r
143 UINTN IndexOfPml4Entries;\r
144 UINTN IndexOfPdpEntries;\r
145 UINTN IndexOfPageDirectoryEntries;\r
146 UINT64 *PageMapLevel4Entry;\r
147 UINT64 *PageMap;\r
148 UINT64 *PageDirectoryPointerEntry;\r
149 UINT64 *PageDirectory1GEntry;\r
150 UINT64 *PageDirectoryEntry;\r
151\r
152 if (mPhysicalAddressBits <= 39 ) {\r
153 NumberOfPml4EntriesNeeded = 1;\r
154 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));\r
155 } else {\r
156 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));\r
157 NumberOfPdpEntriesNeeded = 512;\r
158 }\r
159\r
160 //\r
161 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
162 //\r
163 PageMap = (VOID *) PageTable;\r
164\r
165 PageMapLevel4Entry = PageMap;\r
166 PageAddress = 0;\r
167 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
168 //\r
169 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
170 //\r
171 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask);\r
172 if (PageDirectoryPointerEntry == NULL) {\r
173 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
174 ASSERT(PageDirectoryPointerEntry != NULL);\r
175 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
176\r
177 *PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
178 }\r
179\r
180 if (m1GPageTableSupport) {\r
181 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
182 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
183 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
184 //\r
185 // Skip the < 4G entries\r
186 //\r
187 continue;\r
188 }\r
189 //\r
190 // Fill in the Page Directory entries\r
191 //\r
192 *PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
193 }\r
194 } else {\r
195 PageAddress = BASE_4GB;\r
196 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
197 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
198 //\r
199 // Skip the < 4G entries\r
200 //\r
201 continue;\r
202 }\r
203 //\r
204 // Each Directory Pointer entries points to a page of Page Directory entires.\r
205 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
206 //\r
207 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask);\r
208 if (PageDirectoryEntry == NULL) {\r
209 PageDirectoryEntry = AllocatePageTableMemory (1);\r
210 ASSERT(PageDirectoryEntry != NULL);\r
211 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
212\r
213 //\r
214 // Fill in a Page Directory Pointer Entries\r
215 //\r
216 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS;\r
217 }\r
218\r
219 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
220 //\r
221 // Fill in the Page Directory entries\r
222 //\r
223 *PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
224 }\r
225 }\r
226 }\r
227 }\r
228}\r
229\r
427e3573
MK
230/**\r
231 Create PageTable for SMM use.\r
232\r
233 @return The address of PML4 (to set CR3).\r
234\r
235**/\r
236UINT32\r
237SmmInitPageTable (\r
238 VOID\r
239 )\r
240{\r
241 EFI_PHYSICAL_ADDRESS Pages;\r
242 UINT64 *PTEntry;\r
243 LIST_ENTRY *FreePage;\r
244 UINTN Index;\r
245 UINTN PageFaultHandlerHookAddress;\r
246 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
247\r
248 //\r
249 // Initialize spin lock\r
250 //\r
fe3a75bc 251 InitializeSpinLock (mPFLock);\r
427e3573 252\r
717fb604 253 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
427e3573 254 m1GPageTableSupport = Is1GPageSupport ();\r
717fb604
JY
255 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));\r
256 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));\r
257\r
258 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
259 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));\r
427e3573
MK
260 //\r
261 // Generate PAE page table for the first 4GB memory space\r
262 //\r
717fb604 263 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
264\r
265 //\r
266 // Set IA32_PG_PMNT bit to mask this entry\r
267 //\r
268 PTEntry = (UINT64*)(UINTN)Pages;\r
269 for (Index = 0; Index < 4; Index++) {\r
270 PTEntry[Index] |= IA32_PG_PMNT;\r
271 }\r
272\r
273 //\r
274 // Fill Page-Table-Level4 (PML4) entry\r
275 //\r
717fb604
JY
276 PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
277 ASSERT (PTEntry != NULL);\r
278 *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;\r
427e3573 279 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
717fb604 280\r
427e3573
MK
281 //\r
282 // Set sub-entries number\r
283 //\r
284 SetSubEntriesNum (PTEntry, 3);\r
285\r
717fb604
JY
286 if (mCpuSmmStaticPageTable) {\r
287 SetStaticPageTable ((UINTN)PTEntry);\r
288 } else {\r
289 //\r
290 // Add pages to page pool\r
291 //\r
292 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
293 ASSERT (FreePage != NULL);\r
294 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
295 InsertTailList (&mPagePool, FreePage);\r
296 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
297 }\r
427e3573
MK
298 }\r
299\r
300 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
301 //\r
302 // Set own Page Fault entry instead of the default one, because SMM Profile\r
303 // feature depends on IRET instruction to do Single Step\r
304 //\r
305 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
306 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
307 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
308 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
309 IdtEntry->Bits.Reserved_0 = 0;\r
310 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
311 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
312 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
313 IdtEntry->Bits.Reserved_1 = 0;\r
314 } else {\r
315 //\r
316 // Register Smm Page Fault Handler\r
317 //\r
318 SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
319 }\r
320\r
321 //\r
322 // Additional SMM IDT initialization for SMM stack guard\r
323 //\r
324 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
325 InitializeIDTSmmStackGuard ();\r
326 }\r
327\r
328 //\r
329 // Return the address of PML4 (to set CR3)\r
330 //\r
331 return (UINT32)(UINTN)PTEntry;\r
332}\r
333\r
334/**\r
335 Set access record in entry.\r
336\r
337 @param[in, out] Entry Pointer to entry\r
338 @param[in] Acc Access record value\r
339\r
340**/\r
341VOID\r
342SetAccNum (\r
343 IN OUT UINT64 *Entry,\r
344 IN UINT64 Acc\r
345 )\r
346{\r
347 //\r
348 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
349 //\r
350 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
351}\r
352\r
353/**\r
354 Return access record in entry.\r
355\r
356 @param[in] Entry Pointer to entry\r
357\r
358 @return Access record value.\r
359\r
360**/\r
361UINT64\r
362GetAccNum (\r
363 IN UINT64 *Entry\r
364 )\r
365{\r
366 //\r
367 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
368 //\r
369 return BitFieldRead64 (*Entry, 9, 11);\r
370}\r
371\r
372/**\r
373 Return and update the access record in entry.\r
374\r
375 @param[in, out] Entry Pointer to entry\r
376\r
377 @return Access record value.\r
378\r
379**/\r
380UINT64\r
381GetAndUpdateAccNum (\r
382 IN OUT UINT64 *Entry\r
383 )\r
384{\r
385 UINT64 Acc;\r
386\r
387 Acc = GetAccNum (Entry);\r
388 if ((*Entry & IA32_PG_A) != 0) {\r
389 //\r
390 // If this entry has been accessed, clear access flag in Entry and update access record\r
391 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
392 //\r
393 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
394 SetAccNum (Entry, 0x7);\r
395 return (0x7 + ACC_MAX_BIT);\r
396 } else {\r
397 if (Acc != 0) {\r
398 //\r
399 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
400 //\r
401 SetAccNum (Entry, Acc - 1);\r
402 }\r
403 }\r
404 return Acc;\r
405}\r
406\r
407/**\r
408 Reclaim free pages for PageFault handler.\r
409\r
410 Search the whole entries tree to find the leaf entry that has the smallest\r
411 access record value. Insert the page pointed by this leaf entry into the\r
412 page pool. And check its upper entries if need to be inserted into the page\r
413 pool or not.\r
414\r
415**/\r
416VOID\r
417ReclaimPages (\r
418 VOID\r
419 )\r
420{\r
421 UINT64 *Pml4;\r
422 UINT64 *Pdpt;\r
423 UINT64 *Pdt;\r
424 UINTN Pml4Index;\r
425 UINTN PdptIndex;\r
426 UINTN PdtIndex;\r
427 UINTN MinPml4;\r
428 UINTN MinPdpt;\r
429 UINTN MinPdt;\r
430 UINT64 MinAcc;\r
431 UINT64 Acc;\r
432 UINT64 SubEntriesNum;\r
433 BOOLEAN PML4EIgnore;\r
434 BOOLEAN PDPTEIgnore;\r
435 UINT64 *ReleasePageAddress;\r
436\r
437 Pml4 = NULL;\r
438 Pdpt = NULL;\r
439 Pdt = NULL;\r
440 MinAcc = (UINT64)-1;\r
441 MinPml4 = (UINTN)-1;\r
442 MinPdpt = (UINTN)-1;\r
443 MinPdt = (UINTN)-1;\r
444 Acc = 0;\r
445 ReleasePageAddress = 0;\r
446\r
447 //\r
448 // First, find the leaf entry has the smallest access record value\r
449 //\r
450 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
451 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
452 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
453 //\r
454 // If the PML4 entry is not present or is masked, skip it\r
455 //\r
456 continue;\r
457 }\r
458 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);\r
459 PML4EIgnore = FALSE;\r
460 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
461 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
462 //\r
463 // If the PDPT entry is not present or is masked, skip it\r
464 //\r
465 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
466 //\r
467 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
468 //\r
469 PML4EIgnore = TRUE;\r
470 }\r
471 continue;\r
472 }\r
473 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
474 //\r
475 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
476 // we will not check PML4 entry more\r
477 //\r
478 PML4EIgnore = TRUE;\r
479 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);\r
480 PDPTEIgnore = FALSE;\r
481 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
482 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
483 //\r
484 // If the PD entry is not present or is masked, skip it\r
485 //\r
486 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
487 //\r
488 // If the PD entry is masked, we will not PDPT entry more\r
489 //\r
490 PDPTEIgnore = TRUE;\r
491 }\r
492 continue;\r
493 }\r
494 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
495 //\r
496 // It's not 2 MByte page table entry, it should be PD entry\r
497 // we will find the entry has the smallest access record value\r
498 //\r
499 PDPTEIgnore = TRUE;\r
500 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
501 if (Acc < MinAcc) {\r
502 //\r
503 // If the PD entry has the smallest access record value,\r
504 // save the Page address to be released\r
505 //\r
506 MinAcc = Acc;\r
507 MinPml4 = Pml4Index;\r
508 MinPdpt = PdptIndex;\r
509 MinPdt = PdtIndex;\r
510 ReleasePageAddress = Pdt + PdtIndex;\r
511 }\r
512 }\r
513 }\r
514 if (!PDPTEIgnore) {\r
515 //\r
516 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
517 // it should only has the entries point to 2 MByte Pages\r
518 //\r
519 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
520 if (Acc < MinAcc) {\r
521 //\r
522 // If the PDPT entry has the smallest access record value,\r
523 // save the Page address to be released\r
524 //\r
525 MinAcc = Acc;\r
526 MinPml4 = Pml4Index;\r
527 MinPdpt = PdptIndex;\r
528 MinPdt = (UINTN)-1;\r
529 ReleasePageAddress = Pdpt + PdptIndex;\r
530 }\r
531 }\r
532 }\r
533 }\r
534 if (!PML4EIgnore) {\r
535 //\r
536 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
537 // it should only has the entries point to 1 GByte Pages\r
538 //\r
539 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
540 if (Acc < MinAcc) {\r
541 //\r
542 // If the PML4 entry has the smallest access record value,\r
543 // save the Page address to be released\r
544 //\r
545 MinAcc = Acc;\r
546 MinPml4 = Pml4Index;\r
547 MinPdpt = (UINTN)-1;\r
548 MinPdt = (UINTN)-1;\r
549 ReleasePageAddress = Pml4 + Pml4Index;\r
550 }\r
551 }\r
552 }\r
553 //\r
554 // Make sure one PML4/PDPT/PD entry is selected\r
555 //\r
556 ASSERT (MinAcc != (UINT64)-1);\r
557\r
558 //\r
559 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
560 //\r
561 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));\r
562 *ReleasePageAddress = 0;\r
563\r
564 //\r
565 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
566 // or not\r
567 //\r
568 while (TRUE) {\r
569 if (MinPdt != (UINTN)-1) {\r
570 //\r
571 // If 4 KByte Page Table is released, check the PDPT entry\r
572 //\r
573 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);\r
574 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
575 if (SubEntriesNum == 0) {\r
576 //\r
577 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
578 // clear the Page directory entry\r
579 //\r
580 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));\r
581 Pdpt[MinPdpt] = 0;\r
582 //\r
583 // Go on checking the PML4 table\r
584 //\r
585 MinPdt = (UINTN)-1;\r
586 continue;\r
587 }\r
588 //\r
589 // Update the sub-entries filed in PDPT entry and exit\r
590 //\r
591 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
592 break;\r
593 }\r
594 if (MinPdpt != (UINTN)-1) {\r
595 //\r
596 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
597 //\r
598 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
599 if (SubEntriesNum == 0) {\r
600 //\r
601 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
602 // clear the Page directory entry\r
603 //\r
604 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));\r
605 Pml4[MinPml4] = 0;\r
606 MinPdpt = (UINTN)-1;\r
607 continue;\r
608 }\r
609 //\r
610 // Update the sub-entries filed in PML4 entry and exit\r
611 //\r
612 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
613 break;\r
614 }\r
615 //\r
616 // PLM4 table has been released before, exit it\r
617 //\r
618 break;\r
619 }\r
620}\r
621\r
622/**\r
623 Allocate free Page for PageFault handler use.\r
624\r
625 @return Page address.\r
626\r
627**/\r
628UINT64\r
629AllocPage (\r
630 VOID\r
631 )\r
632{\r
633 UINT64 RetVal;\r
634\r
635 if (IsListEmpty (&mPagePool)) {\r
636 //\r
637 // If page pool is empty, reclaim the used pages and insert one into page pool\r
638 //\r
639 ReclaimPages ();\r
640 }\r
641\r
642 //\r
643 // Get one free page and remove it from page pool\r
644 //\r
645 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
646 RemoveEntryList (mPagePool.ForwardLink);\r
647 //\r
648 // Clean this page and return\r
649 //\r
650 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
651 return RetVal;\r
652}\r
653\r
654/**\r
655 Page Fault handler for SMM use.\r
656\r
657**/\r
658VOID\r
659SmiDefaultPFHandler (\r
660 VOID\r
661 )\r
662{\r
663 UINT64 *PageTable;\r
664 UINT64 *Pml4;\r
665 UINT64 PFAddress;\r
666 UINTN StartBit;\r
667 UINTN EndBit;\r
668 UINT64 PTIndex;\r
669 UINTN Index;\r
670 SMM_PAGE_SIZE_TYPE PageSize;\r
671 UINTN NumOfPages;\r
672 UINTN PageAttribute;\r
673 EFI_STATUS Status;\r
674 UINT64 *UpperEntry;\r
675\r
676 //\r
677 // Set default SMM page attribute\r
678 //\r
679 PageSize = SmmPageSize2M;\r
680 NumOfPages = 1;\r
681 PageAttribute = 0;\r
682\r
683 EndBit = 0;\r
684 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
685 PFAddress = AsmReadCr2 ();\r
686\r
687 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
688 //\r
689 // If platform not support page table attribute, set default SMM page attribute\r
690 //\r
691 if (Status != EFI_SUCCESS) {\r
692 PageSize = SmmPageSize2M;\r
693 NumOfPages = 1;\r
694 PageAttribute = 0;\r
695 }\r
696 if (PageSize >= MaxSmmPageSizeType) {\r
697 PageSize = SmmPageSize2M;\r
698 }\r
699 if (NumOfPages > 512) {\r
700 NumOfPages = 512;\r
701 }\r
702\r
703 switch (PageSize) {\r
704 case SmmPageSize4K:\r
705 //\r
706 // BIT12 to BIT20 is Page Table index\r
707 //\r
708 EndBit = 12;\r
709 break;\r
710 case SmmPageSize2M:\r
711 //\r
712 // BIT21 to BIT29 is Page Directory index\r
713 //\r
714 EndBit = 21;\r
715 PageAttribute |= (UINTN)IA32_PG_PS;\r
716 break;\r
717 case SmmPageSize1G:\r
718 if (!m1GPageTableSupport) {\r
717fb604 719 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
720 ASSERT (FALSE);\r
721 }\r
722 //\r
723 // BIT30 to BIT38 is Page Directory Pointer Table index\r
724 //\r
725 EndBit = 30;\r
726 PageAttribute |= (UINTN)IA32_PG_PS;\r
727 break;\r
728 default:\r
729 ASSERT (FALSE);\r
730 }\r
731\r
732 //\r
733 // If execute-disable is enabled, set NX bit\r
734 //\r
735 if (mXdEnabled) {\r
736 PageAttribute |= IA32_PG_NX;\r
737 }\r
738\r
739 for (Index = 0; Index < NumOfPages; Index++) {\r
740 PageTable = Pml4;\r
741 UpperEntry = NULL;\r
742 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
743 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
744 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
745 //\r
746 // If the entry is not present, allocate one page from page pool for it\r
747 //\r
881520ea 748 PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
749 } else {\r
750 //\r
751 // Save the upper entry address\r
752 //\r
753 UpperEntry = PageTable + PTIndex;\r
754 }\r
755 //\r
756 // BIT9 to BIT11 of entry is used to save access record,\r
757 // initialize value is 7\r
758 //\r
759 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
760 SetAccNum (PageTable + PTIndex, 7);\r
761 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
762 }\r
763\r
764 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
765 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
766 //\r
767 // Check if the entry has already existed, this issue may occur when the different\r
768 // size page entries created under the same entry\r
769 //\r
717fb604
JY
770 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
771 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
772 ASSERT (FALSE);\r
773 }\r
774 //\r
775 // Fill the new entry\r
776 //\r
777 PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 778 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
779 if (UpperEntry != NULL) {\r
780 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
781 }\r
782 //\r
783 // Get the next page address if we need to create more page tables\r
784 //\r
785 PFAddress += (1ull << EndBit);\r
786 }\r
787}\r
788\r
789/**\r
790 ThePage Fault handler wrapper for SMM use.\r
791\r
792 @param InterruptType Defines the type of interrupt or exception that\r
793 occurred on the processor.This parameter is processor architecture specific.\r
794 @param SystemContext A pointer to the processor context when\r
795 the interrupt occurred on the processor.\r
796**/\r
797VOID\r
798EFIAPI\r
799SmiPFHandler (\r
800 IN EFI_EXCEPTION_TYPE InterruptType,\r
801 IN EFI_SYSTEM_CONTEXT SystemContext\r
802 )\r
803{\r
804 UINTN PFAddress;\r
805\r
806 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
807\r
fe3a75bc 808 AcquireSpinLock (mPFLock);\r
427e3573
MK
809\r
810 PFAddress = AsmReadCr2 ();\r
811\r
717fb604
JY
812 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
813 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
814 CpuDeadLoop ();\r
815 }\r
816\r
427e3573
MK
817 //\r
818 // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.\r
819 //\r
820 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
821 (PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
822 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
717fb604 823 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
427e3573
MK
824 CpuDeadLoop ();\r
825 }\r
826\r
827 //\r
828 // If a page fault occurs in SMM range\r
829 //\r
830 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
831 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
832 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
717fb604 833 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
834 DEBUG_CODE (\r
835 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
836 );\r
837 CpuDeadLoop ();\r
838 }\r
839 }\r
840\r
841 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
842 SmmProfilePFHandler (\r
843 SystemContext.SystemContextX64->Rip,\r
844 SystemContext.SystemContextX64->ExceptionData\r
845 );\r
846 } else {\r
847 SmiDefaultPFHandler ();\r
848 }\r
849\r
fe3a75bc 850 ReleaseSpinLock (mPFLock);\r
427e3573 851}\r
717fb604
JY
852\r
853/**\r
854 This function sets memory attribute for page table.\r
855**/\r
856VOID\r
857SetPageTableAttributes (\r
858 VOID\r
859 )\r
860{\r
861 UINTN Index2;\r
862 UINTN Index3;\r
863 UINTN Index4;\r
864 UINT64 *L1PageTable;\r
865 UINT64 *L2PageTable;\r
866 UINT64 *L3PageTable;\r
867 UINT64 *L4PageTable;\r
868 BOOLEAN IsSplitted;\r
869 BOOLEAN PageTableSplitted;\r
870\r
871 if (!mCpuSmmStaticPageTable) {\r
872 return ;\r
873 }\r
874\r
875 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
876\r
877 //\r
878 // Disable write protection, because we need mark page table to be write protected.\r
879 // We need *write* page table memory, to mark itself to be *read only*.\r
880 //\r
881 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
882\r
883 do {\r
884 DEBUG ((DEBUG_INFO, "Start...\n"));\r
885 PageTableSplitted = FALSE;\r
886\r
887 L4PageTable = (UINT64 *)GetPageTableBase ();\r
888 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
889 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
890\r
891 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
892 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);\r
893 if (L3PageTable == NULL) {\r
894 continue;\r
895 }\r
896\r
897 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
898 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
899\r
900 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
901 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
902 // 1G\r
903 continue;\r
904 }\r
905 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);\r
906 if (L2PageTable == NULL) {\r
907 continue;\r
908 }\r
909\r
910 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
911 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
912\r
913 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
914 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
915 // 2M\r
916 continue;\r
917 }\r
918 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);\r
919 if (L1PageTable == NULL) {\r
920 continue;\r
921 }\r
922 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
923 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
924 }\r
925 }\r
926 }\r
927 } while (PageTableSplitted);\r
928\r
929 //\r
930 // Enable write protection, after page table updated.\r
931 //\r
932 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
933\r
934 return ;\r
935}\r