]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: patch "gSmbase" with PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
b8caae19 4Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
427e3573
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19#define PAGE_TABLE_PAGES 8\r
20#define ACC_MAX_BIT BIT3\r
241f9149 21\r
427e3573 22LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 23BOOLEAN m1GPageTableSupport = FALSE;\r
717fb604 24BOOLEAN mCpuSmmStaticPageTable;\r
427e3573
MK
25\r
26/**\r
27 Check if 1-GByte pages is supported by processor or not.\r
28\r
29 @retval TRUE 1-GByte pages is supported.\r
30 @retval FALSE 1-GByte pages is not supported.\r
31\r
32**/\r
33BOOLEAN\r
34Is1GPageSupport (\r
35 VOID\r
36 )\r
37{\r
38 UINT32 RegEax;\r
39 UINT32 RegEdx;\r
40\r
41 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
42 if (RegEax >= 0x80000001) {\r
43 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
44 if ((RegEdx & BIT26) != 0) {\r
45 return TRUE;\r
46 }\r
47 }\r
48 return FALSE;\r
49}\r
50\r
51/**\r
52 Set sub-entries number in entry.\r
53\r
54 @param[in, out] Entry Pointer to entry\r
55 @param[in] SubEntryNum Sub-entries number based on 0:\r
56 0 means there is 1 sub-entry under this entry\r
57 0x1ff means there is 512 sub-entries under this entry\r
58\r
59**/\r
60VOID\r
61SetSubEntriesNum (\r
62 IN OUT UINT64 *Entry,\r
63 IN UINT64 SubEntryNum\r
64 )\r
65{\r
66 //\r
67 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
68 //\r
69 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
70}\r
71\r
72/**\r
73 Return sub-entries number in entry.\r
74\r
75 @param[in] Entry Pointer to entry\r
76\r
77 @return Sub-entries number based on 0:\r
78 0 means there is 1 sub-entry under this entry\r
79 0x1ff means there is 512 sub-entries under this entry\r
80**/\r
81UINT64\r
82GetSubEntriesNum (\r
83 IN UINT64 *Entry\r
84 )\r
85{\r
86 //\r
87 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
88 //\r
89 return BitFieldRead64 (*Entry, 52, 60);\r
90}\r
91\r
717fb604
JY
92/**\r
93 Calculate the maximum support address.\r
94\r
95 @return the maximum support address.\r
96**/\r
97UINT8\r
98CalculateMaximumSupportAddress (\r
99 VOID\r
100 )\r
101{\r
102 UINT32 RegEax;\r
103 UINT8 PhysicalAddressBits;\r
104 VOID *Hob;\r
105\r
106 //\r
107 // Get physical address bits supported.\r
108 //\r
109 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
110 if (Hob != NULL) {\r
111 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
112 } else {\r
113 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
114 if (RegEax >= 0x80000008) {\r
115 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
116 PhysicalAddressBits = (UINT8) RegEax;\r
117 } else {\r
118 PhysicalAddressBits = 36;\r
119 }\r
120 }\r
121\r
122 //\r
123 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
124 //\r
125 ASSERT (PhysicalAddressBits <= 52);\r
126 if (PhysicalAddressBits > 48) {\r
127 PhysicalAddressBits = 48;\r
128 }\r
129 return PhysicalAddressBits;\r
130}\r
131\r
132/**\r
133 Set static page table.\r
134\r
135 @param[in] PageTable Address of page table.\r
136**/\r
137VOID\r
138SetStaticPageTable (\r
139 IN UINTN PageTable\r
140 )\r
141{\r
142 UINT64 PageAddress;\r
143 UINTN NumberOfPml4EntriesNeeded;\r
144 UINTN NumberOfPdpEntriesNeeded;\r
145 UINTN IndexOfPml4Entries;\r
146 UINTN IndexOfPdpEntries;\r
147 UINTN IndexOfPageDirectoryEntries;\r
148 UINT64 *PageMapLevel4Entry;\r
149 UINT64 *PageMap;\r
150 UINT64 *PageDirectoryPointerEntry;\r
151 UINT64 *PageDirectory1GEntry;\r
152 UINT64 *PageDirectoryEntry;\r
153\r
154 if (mPhysicalAddressBits <= 39 ) {\r
155 NumberOfPml4EntriesNeeded = 1;\r
156 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));\r
157 } else {\r
158 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));\r
159 NumberOfPdpEntriesNeeded = 512;\r
160 }\r
161\r
162 //\r
163 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
164 //\r
165 PageMap = (VOID *) PageTable;\r
166\r
167 PageMapLevel4Entry = PageMap;\r
168 PageAddress = 0;\r
169 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
170 //\r
171 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
172 //\r
241f9149 173 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
717fb604
JY
174 if (PageDirectoryPointerEntry == NULL) {\r
175 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
176 ASSERT(PageDirectoryPointerEntry != NULL);\r
177 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
178\r
241f9149 179 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
180 }\r
181\r
182 if (m1GPageTableSupport) {\r
183 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
184 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
185 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
186 //\r
187 // Skip the < 4G entries\r
188 //\r
189 continue;\r
190 }\r
191 //\r
192 // Fill in the Page Directory entries\r
193 //\r
241f9149 194 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
195 }\r
196 } else {\r
197 PageAddress = BASE_4GB;\r
198 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
199 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
200 //\r
201 // Skip the < 4G entries\r
202 //\r
203 continue;\r
204 }\r
205 //\r
206 // Each Directory Pointer entries points to a page of Page Directory entires.\r
207 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
208 //\r
241f9149 209 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
717fb604
JY
210 if (PageDirectoryEntry == NULL) {\r
211 PageDirectoryEntry = AllocatePageTableMemory (1);\r
212 ASSERT(PageDirectoryEntry != NULL);\r
213 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
214\r
215 //\r
216 // Fill in a Page Directory Pointer Entries\r
217 //\r
241f9149 218 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
219 }\r
220\r
221 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
222 //\r
223 // Fill in the Page Directory entries\r
224 //\r
241f9149 225 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
226 }\r
227 }\r
228 }\r
229 }\r
230}\r
231\r
427e3573
MK
232/**\r
233 Create PageTable for SMM use.\r
234\r
235 @return The address of PML4 (to set CR3).\r
236\r
237**/\r
238UINT32\r
239SmmInitPageTable (\r
240 VOID\r
241 )\r
242{\r
243 EFI_PHYSICAL_ADDRESS Pages;\r
244 UINT64 *PTEntry;\r
245 LIST_ENTRY *FreePage;\r
246 UINTN Index;\r
247 UINTN PageFaultHandlerHookAddress;\r
248 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 249 EFI_STATUS Status;\r
427e3573
MK
250\r
251 //\r
252 // Initialize spin lock\r
253 //\r
fe3a75bc 254 InitializeSpinLock (mPFLock);\r
427e3573 255\r
717fb604 256 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
427e3573 257 m1GPageTableSupport = Is1GPageSupport ();\r
717fb604
JY
258 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));\r
259 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));\r
260\r
261 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
262 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));\r
427e3573
MK
263 //\r
264 // Generate PAE page table for the first 4GB memory space\r
265 //\r
717fb604 266 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
267\r
268 //\r
269 // Set IA32_PG_PMNT bit to mask this entry\r
270 //\r
271 PTEntry = (UINT64*)(UINTN)Pages;\r
272 for (Index = 0; Index < 4; Index++) {\r
273 PTEntry[Index] |= IA32_PG_PMNT;\r
274 }\r
275\r
276 //\r
277 // Fill Page-Table-Level4 (PML4) entry\r
278 //\r
717fb604
JY
279 PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
280 ASSERT (PTEntry != NULL);\r
241f9149 281 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573 282 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
717fb604 283\r
427e3573
MK
284 //\r
285 // Set sub-entries number\r
286 //\r
287 SetSubEntriesNum (PTEntry, 3);\r
288\r
717fb604
JY
289 if (mCpuSmmStaticPageTable) {\r
290 SetStaticPageTable ((UINTN)PTEntry);\r
291 } else {\r
292 //\r
293 // Add pages to page pool\r
294 //\r
295 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
296 ASSERT (FreePage != NULL);\r
297 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
298 InsertTailList (&mPagePool, FreePage);\r
299 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
300 }\r
427e3573
MK
301 }\r
302\r
303 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
304 //\r
305 // Set own Page Fault entry instead of the default one, because SMM Profile\r
306 // feature depends on IRET instruction to do Single Step\r
307 //\r
308 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
309 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
310 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
311 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
312 IdtEntry->Bits.Reserved_0 = 0;\r
313 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
314 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
315 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
316 IdtEntry->Bits.Reserved_1 = 0;\r
317 } else {\r
318 //\r
319 // Register Smm Page Fault Handler\r
320 //\r
5c88af79
JF
321 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
322 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
323 }\r
324\r
325 //\r
326 // Additional SMM IDT initialization for SMM stack guard\r
327 //\r
328 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
329 InitializeIDTSmmStackGuard ();\r
330 }\r
331\r
332 //\r
333 // Return the address of PML4 (to set CR3)\r
334 //\r
335 return (UINT32)(UINTN)PTEntry;\r
336}\r
337\r
338/**\r
339 Set access record in entry.\r
340\r
341 @param[in, out] Entry Pointer to entry\r
342 @param[in] Acc Access record value\r
343\r
344**/\r
345VOID\r
346SetAccNum (\r
347 IN OUT UINT64 *Entry,\r
348 IN UINT64 Acc\r
349 )\r
350{\r
351 //\r
352 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
353 //\r
354 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
355}\r
356\r
357/**\r
358 Return access record in entry.\r
359\r
360 @param[in] Entry Pointer to entry\r
361\r
362 @return Access record value.\r
363\r
364**/\r
365UINT64\r
366GetAccNum (\r
367 IN UINT64 *Entry\r
368 )\r
369{\r
370 //\r
371 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
372 //\r
373 return BitFieldRead64 (*Entry, 9, 11);\r
374}\r
375\r
376/**\r
377 Return and update the access record in entry.\r
378\r
379 @param[in, out] Entry Pointer to entry\r
380\r
381 @return Access record value.\r
382\r
383**/\r
384UINT64\r
385GetAndUpdateAccNum (\r
386 IN OUT UINT64 *Entry\r
387 )\r
388{\r
389 UINT64 Acc;\r
390\r
391 Acc = GetAccNum (Entry);\r
392 if ((*Entry & IA32_PG_A) != 0) {\r
393 //\r
394 // If this entry has been accessed, clear access flag in Entry and update access record\r
395 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
396 //\r
397 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
398 SetAccNum (Entry, 0x7);\r
399 return (0x7 + ACC_MAX_BIT);\r
400 } else {\r
401 if (Acc != 0) {\r
402 //\r
403 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
404 //\r
405 SetAccNum (Entry, Acc - 1);\r
406 }\r
407 }\r
408 return Acc;\r
409}\r
410\r
411/**\r
412 Reclaim free pages for PageFault handler.\r
413\r
414 Search the whole entries tree to find the leaf entry that has the smallest\r
415 access record value. Insert the page pointed by this leaf entry into the\r
416 page pool. And check its upper entries if need to be inserted into the page\r
417 pool or not.\r
418\r
419**/\r
420VOID\r
421ReclaimPages (\r
422 VOID\r
423 )\r
424{\r
425 UINT64 *Pml4;\r
426 UINT64 *Pdpt;\r
427 UINT64 *Pdt;\r
428 UINTN Pml4Index;\r
429 UINTN PdptIndex;\r
430 UINTN PdtIndex;\r
431 UINTN MinPml4;\r
432 UINTN MinPdpt;\r
433 UINTN MinPdt;\r
434 UINT64 MinAcc;\r
435 UINT64 Acc;\r
436 UINT64 SubEntriesNum;\r
437 BOOLEAN PML4EIgnore;\r
438 BOOLEAN PDPTEIgnore;\r
439 UINT64 *ReleasePageAddress;\r
440\r
441 Pml4 = NULL;\r
442 Pdpt = NULL;\r
443 Pdt = NULL;\r
444 MinAcc = (UINT64)-1;\r
445 MinPml4 = (UINTN)-1;\r
446 MinPdpt = (UINTN)-1;\r
447 MinPdt = (UINTN)-1;\r
448 Acc = 0;\r
449 ReleasePageAddress = 0;\r
450\r
451 //\r
452 // First, find the leaf entry has the smallest access record value\r
453 //\r
454 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
455 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
456 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
457 //\r
458 // If the PML4 entry is not present or is masked, skip it\r
459 //\r
460 continue;\r
461 }\r
241f9149 462 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
463 PML4EIgnore = FALSE;\r
464 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
465 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
466 //\r
467 // If the PDPT entry is not present or is masked, skip it\r
468 //\r
469 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
470 //\r
471 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
472 //\r
473 PML4EIgnore = TRUE;\r
474 }\r
475 continue;\r
476 }\r
477 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
478 //\r
479 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
480 // we will not check PML4 entry more\r
481 //\r
482 PML4EIgnore = TRUE;\r
241f9149 483 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
484 PDPTEIgnore = FALSE;\r
485 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
486 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
487 //\r
488 // If the PD entry is not present or is masked, skip it\r
489 //\r
490 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
491 //\r
492 // If the PD entry is masked, we will not PDPT entry more\r
493 //\r
494 PDPTEIgnore = TRUE;\r
495 }\r
496 continue;\r
497 }\r
498 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
499 //\r
500 // It's not 2 MByte page table entry, it should be PD entry\r
501 // we will find the entry has the smallest access record value\r
502 //\r
503 PDPTEIgnore = TRUE;\r
504 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
505 if (Acc < MinAcc) {\r
506 //\r
507 // If the PD entry has the smallest access record value,\r
508 // save the Page address to be released\r
509 //\r
510 MinAcc = Acc;\r
511 MinPml4 = Pml4Index;\r
512 MinPdpt = PdptIndex;\r
513 MinPdt = PdtIndex;\r
514 ReleasePageAddress = Pdt + PdtIndex;\r
515 }\r
516 }\r
517 }\r
518 if (!PDPTEIgnore) {\r
519 //\r
520 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
521 // it should only has the entries point to 2 MByte Pages\r
522 //\r
523 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
524 if (Acc < MinAcc) {\r
525 //\r
526 // If the PDPT entry has the smallest access record value,\r
527 // save the Page address to be released\r
528 //\r
529 MinAcc = Acc;\r
530 MinPml4 = Pml4Index;\r
531 MinPdpt = PdptIndex;\r
532 MinPdt = (UINTN)-1;\r
533 ReleasePageAddress = Pdpt + PdptIndex;\r
534 }\r
535 }\r
536 }\r
537 }\r
538 if (!PML4EIgnore) {\r
539 //\r
540 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
541 // it should only has the entries point to 1 GByte Pages\r
542 //\r
543 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
544 if (Acc < MinAcc) {\r
545 //\r
546 // If the PML4 entry has the smallest access record value,\r
547 // save the Page address to be released\r
548 //\r
549 MinAcc = Acc;\r
550 MinPml4 = Pml4Index;\r
551 MinPdpt = (UINTN)-1;\r
552 MinPdt = (UINTN)-1;\r
553 ReleasePageAddress = Pml4 + Pml4Index;\r
554 }\r
555 }\r
556 }\r
557 //\r
558 // Make sure one PML4/PDPT/PD entry is selected\r
559 //\r
560 ASSERT (MinAcc != (UINT64)-1);\r
561\r
562 //\r
563 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
564 //\r
241f9149 565 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
566 *ReleasePageAddress = 0;\r
567\r
568 //\r
569 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
570 // or not\r
571 //\r
572 while (TRUE) {\r
573 if (MinPdt != (UINTN)-1) {\r
574 //\r
575 // If 4 KByte Page Table is released, check the PDPT entry\r
576 //\r
241f9149 577 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
578 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
579 if (SubEntriesNum == 0) {\r
580 //\r
581 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
582 // clear the Page directory entry\r
583 //\r
241f9149 584 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
585 Pdpt[MinPdpt] = 0;\r
586 //\r
587 // Go on checking the PML4 table\r
588 //\r
589 MinPdt = (UINTN)-1;\r
590 continue;\r
591 }\r
592 //\r
593 // Update the sub-entries filed in PDPT entry and exit\r
594 //\r
595 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
596 break;\r
597 }\r
598 if (MinPdpt != (UINTN)-1) {\r
599 //\r
600 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
601 //\r
602 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
603 if (SubEntriesNum == 0) {\r
604 //\r
605 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
606 // clear the Page directory entry\r
607 //\r
241f9149 608 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
609 Pml4[MinPml4] = 0;\r
610 MinPdpt = (UINTN)-1;\r
611 continue;\r
612 }\r
613 //\r
614 // Update the sub-entries filed in PML4 entry and exit\r
615 //\r
616 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
617 break;\r
618 }\r
619 //\r
620 // PLM4 table has been released before, exit it\r
621 //\r
622 break;\r
623 }\r
624}\r
625\r
626/**\r
627 Allocate free Page for PageFault handler use.\r
628\r
629 @return Page address.\r
630\r
631**/\r
632UINT64\r
633AllocPage (\r
634 VOID\r
635 )\r
636{\r
637 UINT64 RetVal;\r
638\r
639 if (IsListEmpty (&mPagePool)) {\r
640 //\r
641 // If page pool is empty, reclaim the used pages and insert one into page pool\r
642 //\r
643 ReclaimPages ();\r
644 }\r
645\r
646 //\r
647 // Get one free page and remove it from page pool\r
648 //\r
649 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
650 RemoveEntryList (mPagePool.ForwardLink);\r
651 //\r
652 // Clean this page and return\r
653 //\r
654 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
655 return RetVal;\r
656}\r
657\r
658/**\r
659 Page Fault handler for SMM use.\r
660\r
661**/\r
662VOID\r
663SmiDefaultPFHandler (\r
664 VOID\r
665 )\r
666{\r
667 UINT64 *PageTable;\r
668 UINT64 *Pml4;\r
669 UINT64 PFAddress;\r
670 UINTN StartBit;\r
671 UINTN EndBit;\r
672 UINT64 PTIndex;\r
673 UINTN Index;\r
674 SMM_PAGE_SIZE_TYPE PageSize;\r
675 UINTN NumOfPages;\r
676 UINTN PageAttribute;\r
677 EFI_STATUS Status;\r
678 UINT64 *UpperEntry;\r
679\r
680 //\r
681 // Set default SMM page attribute\r
682 //\r
683 PageSize = SmmPageSize2M;\r
684 NumOfPages = 1;\r
685 PageAttribute = 0;\r
686\r
687 EndBit = 0;\r
688 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
689 PFAddress = AsmReadCr2 ();\r
690\r
691 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
692 //\r
693 // If platform not support page table attribute, set default SMM page attribute\r
694 //\r
695 if (Status != EFI_SUCCESS) {\r
696 PageSize = SmmPageSize2M;\r
697 NumOfPages = 1;\r
698 PageAttribute = 0;\r
699 }\r
700 if (PageSize >= MaxSmmPageSizeType) {\r
701 PageSize = SmmPageSize2M;\r
702 }\r
703 if (NumOfPages > 512) {\r
704 NumOfPages = 512;\r
705 }\r
706\r
707 switch (PageSize) {\r
708 case SmmPageSize4K:\r
709 //\r
710 // BIT12 to BIT20 is Page Table index\r
711 //\r
712 EndBit = 12;\r
713 break;\r
714 case SmmPageSize2M:\r
715 //\r
716 // BIT21 to BIT29 is Page Directory index\r
717 //\r
718 EndBit = 21;\r
719 PageAttribute |= (UINTN)IA32_PG_PS;\r
720 break;\r
721 case SmmPageSize1G:\r
722 if (!m1GPageTableSupport) {\r
717fb604 723 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
724 ASSERT (FALSE);\r
725 }\r
726 //\r
727 // BIT30 to BIT38 is Page Directory Pointer Table index\r
728 //\r
729 EndBit = 30;\r
730 PageAttribute |= (UINTN)IA32_PG_PS;\r
731 break;\r
732 default:\r
733 ASSERT (FALSE);\r
734 }\r
735\r
736 //\r
737 // If execute-disable is enabled, set NX bit\r
738 //\r
739 if (mXdEnabled) {\r
740 PageAttribute |= IA32_PG_NX;\r
741 }\r
742\r
743 for (Index = 0; Index < NumOfPages; Index++) {\r
744 PageTable = Pml4;\r
745 UpperEntry = NULL;\r
746 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
747 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
748 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
749 //\r
750 // If the entry is not present, allocate one page from page pool for it\r
751 //\r
241f9149 752 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
753 } else {\r
754 //\r
755 // Save the upper entry address\r
756 //\r
757 UpperEntry = PageTable + PTIndex;\r
758 }\r
759 //\r
760 // BIT9 to BIT11 of entry is used to save access record,\r
761 // initialize value is 7\r
762 //\r
763 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
764 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 765 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
766 }\r
767\r
768 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
769 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
770 //\r
771 // Check if the entry has already existed, this issue may occur when the different\r
772 // size page entries created under the same entry\r
773 //\r
717fb604
JY
774 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
775 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
776 ASSERT (FALSE);\r
777 }\r
778 //\r
779 // Fill the new entry\r
780 //\r
241f9149 781 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 782 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
783 if (UpperEntry != NULL) {\r
784 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
785 }\r
786 //\r
787 // Get the next page address if we need to create more page tables\r
788 //\r
789 PFAddress += (1ull << EndBit);\r
790 }\r
791}\r
792\r
793/**\r
794 ThePage Fault handler wrapper for SMM use.\r
795\r
796 @param InterruptType Defines the type of interrupt or exception that\r
797 occurred on the processor.This parameter is processor architecture specific.\r
798 @param SystemContext A pointer to the processor context when\r
799 the interrupt occurred on the processor.\r
800**/\r
801VOID\r
802EFIAPI\r
803SmiPFHandler (\r
b8caae19
JF
804 IN EFI_EXCEPTION_TYPE InterruptType,\r
805 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
806 )\r
807{\r
808 UINTN PFAddress;\r
7fa1376c
JY
809 UINTN GuardPageAddress;\r
810 UINTN CpuIndex;\r
427e3573
MK
811\r
812 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
813\r
fe3a75bc 814 AcquireSpinLock (mPFLock);\r
427e3573
MK
815\r
816 PFAddress = AsmReadCr2 ();\r
817\r
717fb604 818 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 819 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
820 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
821 CpuDeadLoop ();\r
822 }\r
823\r
427e3573 824 //\r
7fa1376c
JY
825 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
826 // or SMM page protection violation.\r
427e3573 827 //\r
7fa1376c 828 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 829 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
b8caae19 830 DumpCpuContext (InterruptType, SystemContext);\r
7fa1376c
JY
831 CpuIndex = GetCpuIndex ();\r
832 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
833 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
834 (PFAddress >= GuardPageAddress) &&\r
835 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
836 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
837 } else {\r
7fa1376c
JY
838 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
839 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
840 DEBUG_CODE (\r
841 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
842 );\r
843 } else {\r
844 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
845 DEBUG_CODE (\r
846 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
847 );\r
848 }\r
849 }\r
427e3573
MK
850 CpuDeadLoop ();\r
851 }\r
852\r
853 //\r
8bf0380e 854 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
855 //\r
856 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
857 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
858 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 859 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 860 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
861 DEBUG_CODE (\r
862 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
863 );\r
864 CpuDeadLoop ();\r
865 }\r
d2fc7711 866 if (IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 867 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
868 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
869 DEBUG_CODE (\r
870 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
871 );\r
872 CpuDeadLoop ();\r
873 }\r
427e3573
MK
874 }\r
875\r
f8c1133b
JW
876 //\r
877 // If NULL pointer was just accessed\r
878 //\r
879 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
880 (PFAddress < EFI_PAGE_SIZE)) {\r
8bf0380e 881 DumpCpuContext (InterruptType, SystemContext);\r
f8c1133b
JW
882 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
883 DEBUG_CODE (\r
884 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
885 );\r
886 CpuDeadLoop ();\r
887 }\r
888\r
427e3573
MK
889 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
890 SmmProfilePFHandler (\r
891 SystemContext.SystemContextX64->Rip,\r
892 SystemContext.SystemContextX64->ExceptionData\r
893 );\r
894 } else {\r
895 SmiDefaultPFHandler ();\r
896 }\r
897\r
fe3a75bc 898 ReleaseSpinLock (mPFLock);\r
427e3573 899}\r
717fb604
JY
900\r
901/**\r
902 This function sets memory attribute for page table.\r
903**/\r
904VOID\r
905SetPageTableAttributes (\r
906 VOID\r
907 )\r
908{\r
909 UINTN Index2;\r
910 UINTN Index3;\r
911 UINTN Index4;\r
912 UINT64 *L1PageTable;\r
913 UINT64 *L2PageTable;\r
914 UINT64 *L3PageTable;\r
915 UINT64 *L4PageTable;\r
916 BOOLEAN IsSplitted;\r
917 BOOLEAN PageTableSplitted;\r
918\r
827330cc
JW
919 //\r
920 // Don't do this if\r
921 // - no static page table; or\r
1015fb3c 922 // - SMM heap guard feature enabled; or\r
827330cc
JW
923 // BIT2: SMM page guard enabled\r
924 // BIT3: SMM pool guard enabled\r
1015fb3c 925 // - SMM profile feature enabled\r
827330cc
JW
926 //\r
927 if (!mCpuSmmStaticPageTable ||\r
1015fb3c
SZ
928 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
929 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
827330cc 930 //\r
1015fb3c 931 // Static paging and heap guard could not be enabled at the same time.\r
827330cc
JW
932 //\r
933 ASSERT (!(mCpuSmmStaticPageTable &&\r
934 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1015fb3c
SZ
935\r
936 //\r
937 // Static paging and SMM profile could not be enabled at the same time.\r
938 //\r
939 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
717fb604
JY
940 return ;\r
941 }\r
942\r
943 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
944\r
945 //\r
946 // Disable write protection, because we need mark page table to be write protected.\r
947 // We need *write* page table memory, to mark itself to be *read only*.\r
948 //\r
949 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
950\r
951 do {\r
952 DEBUG ((DEBUG_INFO, "Start...\n"));\r
953 PageTableSplitted = FALSE;\r
954\r
955 L4PageTable = (UINT64 *)GetPageTableBase ();\r
956 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
957 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
958\r
959 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
241f9149 960 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
961 if (L3PageTable == NULL) {\r
962 continue;\r
963 }\r
964\r
965 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
966 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
967\r
968 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
969 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
970 // 1G\r
971 continue;\r
972 }\r
241f9149 973 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
974 if (L2PageTable == NULL) {\r
975 continue;\r
976 }\r
977\r
978 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
979 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
980\r
981 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
982 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
983 // 2M\r
984 continue;\r
985 }\r
241f9149 986 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
987 if (L1PageTable == NULL) {\r
988 continue;\r
989 }\r
990 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
991 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
992 }\r
993 }\r
994 }\r
995 } while (PageTableSplitted);\r
996\r
997 //\r
998 // Enable write protection, after page table updated.\r
999 //\r
1000 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
1001\r
1002 return ;\r
1003}\r