]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg PiSmmCpuDxeSmm: Update SmiEntry function run the same position
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
b8caae19 4Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
427e3573
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19#define PAGE_TABLE_PAGES 8\r
20#define ACC_MAX_BIT BIT3\r
241f9149 21\r
427e3573 22LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 23BOOLEAN m1GPageTableSupport = FALSE;\r
717fb604 24BOOLEAN mCpuSmmStaticPageTable;\r
427e3573
MK
25\r
26/**\r
27 Check if 1-GByte pages is supported by processor or not.\r
28\r
29 @retval TRUE 1-GByte pages is supported.\r
30 @retval FALSE 1-GByte pages is not supported.\r
31\r
32**/\r
33BOOLEAN\r
34Is1GPageSupport (\r
35 VOID\r
36 )\r
37{\r
38 UINT32 RegEax;\r
39 UINT32 RegEdx;\r
40\r
41 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
42 if (RegEax >= 0x80000001) {\r
43 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
44 if ((RegEdx & BIT26) != 0) {\r
45 return TRUE;\r
46 }\r
47 }\r
48 return FALSE;\r
49}\r
50\r
51/**\r
52 Set sub-entries number in entry.\r
53\r
54 @param[in, out] Entry Pointer to entry\r
55 @param[in] SubEntryNum Sub-entries number based on 0:\r
56 0 means there is 1 sub-entry under this entry\r
57 0x1ff means there is 512 sub-entries under this entry\r
58\r
59**/\r
60VOID\r
61SetSubEntriesNum (\r
62 IN OUT UINT64 *Entry,\r
63 IN UINT64 SubEntryNum\r
64 )\r
65{\r
66 //\r
67 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
68 //\r
69 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
70}\r
71\r
72/**\r
73 Return sub-entries number in entry.\r
74\r
75 @param[in] Entry Pointer to entry\r
76\r
77 @return Sub-entries number based on 0:\r
78 0 means there is 1 sub-entry under this entry\r
79 0x1ff means there is 512 sub-entries under this entry\r
80**/\r
81UINT64\r
82GetSubEntriesNum (\r
83 IN UINT64 *Entry\r
84 )\r
85{\r
86 //\r
87 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
88 //\r
89 return BitFieldRead64 (*Entry, 52, 60);\r
90}\r
91\r
717fb604
JY
92/**\r
93 Calculate the maximum support address.\r
94\r
95 @return the maximum support address.\r
96**/\r
97UINT8\r
98CalculateMaximumSupportAddress (\r
99 VOID\r
100 )\r
101{\r
102 UINT32 RegEax;\r
103 UINT8 PhysicalAddressBits;\r
104 VOID *Hob;\r
105\r
106 //\r
107 // Get physical address bits supported.\r
108 //\r
109 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
110 if (Hob != NULL) {\r
111 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
112 } else {\r
113 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
114 if (RegEax >= 0x80000008) {\r
115 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
116 PhysicalAddressBits = (UINT8) RegEax;\r
117 } else {\r
118 PhysicalAddressBits = 36;\r
119 }\r
120 }\r
121\r
122 //\r
123 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
124 //\r
125 ASSERT (PhysicalAddressBits <= 52);\r
126 if (PhysicalAddressBits > 48) {\r
127 PhysicalAddressBits = 48;\r
128 }\r
129 return PhysicalAddressBits;\r
130}\r
131\r
132/**\r
133 Set static page table.\r
134\r
135 @param[in] PageTable Address of page table.\r
136**/\r
137VOID\r
138SetStaticPageTable (\r
139 IN UINTN PageTable\r
140 )\r
141{\r
142 UINT64 PageAddress;\r
143 UINTN NumberOfPml4EntriesNeeded;\r
144 UINTN NumberOfPdpEntriesNeeded;\r
145 UINTN IndexOfPml4Entries;\r
146 UINTN IndexOfPdpEntries;\r
147 UINTN IndexOfPageDirectoryEntries;\r
148 UINT64 *PageMapLevel4Entry;\r
149 UINT64 *PageMap;\r
150 UINT64 *PageDirectoryPointerEntry;\r
151 UINT64 *PageDirectory1GEntry;\r
152 UINT64 *PageDirectoryEntry;\r
153\r
154 if (mPhysicalAddressBits <= 39 ) {\r
155 NumberOfPml4EntriesNeeded = 1;\r
156 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));\r
157 } else {\r
158 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));\r
159 NumberOfPdpEntriesNeeded = 512;\r
160 }\r
161\r
162 //\r
163 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
164 //\r
165 PageMap = (VOID *) PageTable;\r
166\r
167 PageMapLevel4Entry = PageMap;\r
168 PageAddress = 0;\r
169 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
170 //\r
171 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
172 //\r
241f9149 173 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
717fb604
JY
174 if (PageDirectoryPointerEntry == NULL) {\r
175 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
176 ASSERT(PageDirectoryPointerEntry != NULL);\r
177 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
178\r
241f9149 179 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
180 }\r
181\r
182 if (m1GPageTableSupport) {\r
183 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
184 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
185 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
186 //\r
187 // Skip the < 4G entries\r
188 //\r
189 continue;\r
190 }\r
191 //\r
192 // Fill in the Page Directory entries\r
193 //\r
241f9149 194 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
195 }\r
196 } else {\r
197 PageAddress = BASE_4GB;\r
198 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
199 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
200 //\r
201 // Skip the < 4G entries\r
202 //\r
203 continue;\r
204 }\r
205 //\r
206 // Each Directory Pointer entries points to a page of Page Directory entires.\r
207 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
208 //\r
241f9149 209 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
717fb604
JY
210 if (PageDirectoryEntry == NULL) {\r
211 PageDirectoryEntry = AllocatePageTableMemory (1);\r
212 ASSERT(PageDirectoryEntry != NULL);\r
213 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
214\r
215 //\r
216 // Fill in a Page Directory Pointer Entries\r
217 //\r
241f9149 218 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
219 }\r
220\r
221 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
222 //\r
223 // Fill in the Page Directory entries\r
224 //\r
241f9149 225 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
226 }\r
227 }\r
228 }\r
229 }\r
230}\r
231\r
427e3573
MK
232/**\r
233 Create PageTable for SMM use.\r
234\r
235 @return The address of PML4 (to set CR3).\r
236\r
237**/\r
238UINT32\r
239SmmInitPageTable (\r
240 VOID\r
241 )\r
242{\r
243 EFI_PHYSICAL_ADDRESS Pages;\r
244 UINT64 *PTEntry;\r
245 LIST_ENTRY *FreePage;\r
246 UINTN Index;\r
247 UINTN PageFaultHandlerHookAddress;\r
248 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 249 EFI_STATUS Status;\r
427e3573
MK
250\r
251 //\r
252 // Initialize spin lock\r
253 //\r
fe3a75bc 254 InitializeSpinLock (mPFLock);\r
427e3573 255\r
717fb604 256 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
427e3573 257 m1GPageTableSupport = Is1GPageSupport ();\r
717fb604
JY
258 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));\r
259 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));\r
260\r
261 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
262 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));\r
427e3573
MK
263 //\r
264 // Generate PAE page table for the first 4GB memory space\r
265 //\r
717fb604 266 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
267\r
268 //\r
269 // Set IA32_PG_PMNT bit to mask this entry\r
270 //\r
271 PTEntry = (UINT64*)(UINTN)Pages;\r
272 for (Index = 0; Index < 4; Index++) {\r
273 PTEntry[Index] |= IA32_PG_PMNT;\r
274 }\r
275\r
276 //\r
277 // Fill Page-Table-Level4 (PML4) entry\r
278 //\r
717fb604
JY
279 PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
280 ASSERT (PTEntry != NULL);\r
241f9149 281 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573 282 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
717fb604 283\r
427e3573
MK
284 //\r
285 // Set sub-entries number\r
286 //\r
287 SetSubEntriesNum (PTEntry, 3);\r
288\r
717fb604
JY
289 if (mCpuSmmStaticPageTable) {\r
290 SetStaticPageTable ((UINTN)PTEntry);\r
291 } else {\r
292 //\r
293 // Add pages to page pool\r
294 //\r
295 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
296 ASSERT (FreePage != NULL);\r
297 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
298 InsertTailList (&mPagePool, FreePage);\r
299 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
300 }\r
427e3573
MK
301 }\r
302\r
09afd9a4
JW
303 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
304 HEAP_GUARD_NONSTOP_MODE ||\r
305 NULL_DETECTION_NONSTOP_MODE) {\r
427e3573
MK
306 //\r
307 // Set own Page Fault entry instead of the default one, because SMM Profile\r
308 // feature depends on IRET instruction to do Single Step\r
309 //\r
310 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
311 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
312 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
313 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
314 IdtEntry->Bits.Reserved_0 = 0;\r
315 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
316 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
317 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
318 IdtEntry->Bits.Reserved_1 = 0;\r
319 } else {\r
320 //\r
321 // Register Smm Page Fault Handler\r
322 //\r
5c88af79
JF
323 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
324 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
325 }\r
326\r
327 //\r
328 // Additional SMM IDT initialization for SMM stack guard\r
329 //\r
330 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
331 InitializeIDTSmmStackGuard ();\r
332 }\r
333\r
334 //\r
335 // Return the address of PML4 (to set CR3)\r
336 //\r
337 return (UINT32)(UINTN)PTEntry;\r
338}\r
339\r
340/**\r
341 Set access record in entry.\r
342\r
343 @param[in, out] Entry Pointer to entry\r
344 @param[in] Acc Access record value\r
345\r
346**/\r
347VOID\r
348SetAccNum (\r
349 IN OUT UINT64 *Entry,\r
350 IN UINT64 Acc\r
351 )\r
352{\r
353 //\r
354 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
355 //\r
356 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
357}\r
358\r
359/**\r
360 Return access record in entry.\r
361\r
362 @param[in] Entry Pointer to entry\r
363\r
364 @return Access record value.\r
365\r
366**/\r
367UINT64\r
368GetAccNum (\r
369 IN UINT64 *Entry\r
370 )\r
371{\r
372 //\r
373 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
374 //\r
375 return BitFieldRead64 (*Entry, 9, 11);\r
376}\r
377\r
378/**\r
379 Return and update the access record in entry.\r
380\r
381 @param[in, out] Entry Pointer to entry\r
382\r
383 @return Access record value.\r
384\r
385**/\r
386UINT64\r
387GetAndUpdateAccNum (\r
388 IN OUT UINT64 *Entry\r
389 )\r
390{\r
391 UINT64 Acc;\r
392\r
393 Acc = GetAccNum (Entry);\r
394 if ((*Entry & IA32_PG_A) != 0) {\r
395 //\r
396 // If this entry has been accessed, clear access flag in Entry and update access record\r
397 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
398 //\r
399 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
400 SetAccNum (Entry, 0x7);\r
401 return (0x7 + ACC_MAX_BIT);\r
402 } else {\r
403 if (Acc != 0) {\r
404 //\r
405 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
406 //\r
407 SetAccNum (Entry, Acc - 1);\r
408 }\r
409 }\r
410 return Acc;\r
411}\r
412\r
413/**\r
414 Reclaim free pages for PageFault handler.\r
415\r
416 Search the whole entries tree to find the leaf entry that has the smallest\r
417 access record value. Insert the page pointed by this leaf entry into the\r
418 page pool. And check its upper entries if need to be inserted into the page\r
419 pool or not.\r
420\r
421**/\r
422VOID\r
423ReclaimPages (\r
424 VOID\r
425 )\r
426{\r
427 UINT64 *Pml4;\r
428 UINT64 *Pdpt;\r
429 UINT64 *Pdt;\r
430 UINTN Pml4Index;\r
431 UINTN PdptIndex;\r
432 UINTN PdtIndex;\r
433 UINTN MinPml4;\r
434 UINTN MinPdpt;\r
435 UINTN MinPdt;\r
436 UINT64 MinAcc;\r
437 UINT64 Acc;\r
438 UINT64 SubEntriesNum;\r
439 BOOLEAN PML4EIgnore;\r
440 BOOLEAN PDPTEIgnore;\r
441 UINT64 *ReleasePageAddress;\r
442\r
443 Pml4 = NULL;\r
444 Pdpt = NULL;\r
445 Pdt = NULL;\r
446 MinAcc = (UINT64)-1;\r
447 MinPml4 = (UINTN)-1;\r
448 MinPdpt = (UINTN)-1;\r
449 MinPdt = (UINTN)-1;\r
450 Acc = 0;\r
451 ReleasePageAddress = 0;\r
452\r
453 //\r
454 // First, find the leaf entry has the smallest access record value\r
455 //\r
456 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
457 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
458 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
459 //\r
460 // If the PML4 entry is not present or is masked, skip it\r
461 //\r
462 continue;\r
463 }\r
241f9149 464 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
465 PML4EIgnore = FALSE;\r
466 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
467 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
468 //\r
469 // If the PDPT entry is not present or is masked, skip it\r
470 //\r
471 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
472 //\r
473 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
474 //\r
475 PML4EIgnore = TRUE;\r
476 }\r
477 continue;\r
478 }\r
479 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
480 //\r
481 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
482 // we will not check PML4 entry more\r
483 //\r
484 PML4EIgnore = TRUE;\r
241f9149 485 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
486 PDPTEIgnore = FALSE;\r
487 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
488 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
489 //\r
490 // If the PD entry is not present or is masked, skip it\r
491 //\r
492 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
493 //\r
494 // If the PD entry is masked, we will not PDPT entry more\r
495 //\r
496 PDPTEIgnore = TRUE;\r
497 }\r
498 continue;\r
499 }\r
500 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
501 //\r
502 // It's not 2 MByte page table entry, it should be PD entry\r
503 // we will find the entry has the smallest access record value\r
504 //\r
505 PDPTEIgnore = TRUE;\r
506 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
507 if (Acc < MinAcc) {\r
508 //\r
509 // If the PD entry has the smallest access record value,\r
510 // save the Page address to be released\r
511 //\r
512 MinAcc = Acc;\r
513 MinPml4 = Pml4Index;\r
514 MinPdpt = PdptIndex;\r
515 MinPdt = PdtIndex;\r
516 ReleasePageAddress = Pdt + PdtIndex;\r
517 }\r
518 }\r
519 }\r
520 if (!PDPTEIgnore) {\r
521 //\r
522 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
523 // it should only has the entries point to 2 MByte Pages\r
524 //\r
525 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
526 if (Acc < MinAcc) {\r
527 //\r
528 // If the PDPT entry has the smallest access record value,\r
529 // save the Page address to be released\r
530 //\r
531 MinAcc = Acc;\r
532 MinPml4 = Pml4Index;\r
533 MinPdpt = PdptIndex;\r
534 MinPdt = (UINTN)-1;\r
535 ReleasePageAddress = Pdpt + PdptIndex;\r
536 }\r
537 }\r
538 }\r
539 }\r
540 if (!PML4EIgnore) {\r
541 //\r
542 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
543 // it should only has the entries point to 1 GByte Pages\r
544 //\r
545 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
546 if (Acc < MinAcc) {\r
547 //\r
548 // If the PML4 entry has the smallest access record value,\r
549 // save the Page address to be released\r
550 //\r
551 MinAcc = Acc;\r
552 MinPml4 = Pml4Index;\r
553 MinPdpt = (UINTN)-1;\r
554 MinPdt = (UINTN)-1;\r
555 ReleasePageAddress = Pml4 + Pml4Index;\r
556 }\r
557 }\r
558 }\r
559 //\r
560 // Make sure one PML4/PDPT/PD entry is selected\r
561 //\r
562 ASSERT (MinAcc != (UINT64)-1);\r
563\r
564 //\r
565 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
566 //\r
241f9149 567 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
568 *ReleasePageAddress = 0;\r
569\r
570 //\r
571 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
572 // or not\r
573 //\r
574 while (TRUE) {\r
575 if (MinPdt != (UINTN)-1) {\r
576 //\r
577 // If 4 KByte Page Table is released, check the PDPT entry\r
578 //\r
241f9149 579 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
580 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
581 if (SubEntriesNum == 0) {\r
582 //\r
583 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
584 // clear the Page directory entry\r
585 //\r
241f9149 586 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
587 Pdpt[MinPdpt] = 0;\r
588 //\r
589 // Go on checking the PML4 table\r
590 //\r
591 MinPdt = (UINTN)-1;\r
592 continue;\r
593 }\r
594 //\r
595 // Update the sub-entries filed in PDPT entry and exit\r
596 //\r
597 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
598 break;\r
599 }\r
600 if (MinPdpt != (UINTN)-1) {\r
601 //\r
602 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
603 //\r
604 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
605 if (SubEntriesNum == 0) {\r
606 //\r
607 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
608 // clear the Page directory entry\r
609 //\r
241f9149 610 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
611 Pml4[MinPml4] = 0;\r
612 MinPdpt = (UINTN)-1;\r
613 continue;\r
614 }\r
615 //\r
616 // Update the sub-entries filed in PML4 entry and exit\r
617 //\r
618 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
619 break;\r
620 }\r
621 //\r
622 // PLM4 table has been released before, exit it\r
623 //\r
624 break;\r
625 }\r
626}\r
627\r
628/**\r
629 Allocate free Page for PageFault handler use.\r
630\r
631 @return Page address.\r
632\r
633**/\r
634UINT64\r
635AllocPage (\r
636 VOID\r
637 )\r
638{\r
639 UINT64 RetVal;\r
640\r
641 if (IsListEmpty (&mPagePool)) {\r
642 //\r
643 // If page pool is empty, reclaim the used pages and insert one into page pool\r
644 //\r
645 ReclaimPages ();\r
646 }\r
647\r
648 //\r
649 // Get one free page and remove it from page pool\r
650 //\r
651 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
652 RemoveEntryList (mPagePool.ForwardLink);\r
653 //\r
654 // Clean this page and return\r
655 //\r
656 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
657 return RetVal;\r
658}\r
659\r
660/**\r
661 Page Fault handler for SMM use.\r
662\r
663**/\r
664VOID\r
665SmiDefaultPFHandler (\r
666 VOID\r
667 )\r
668{\r
669 UINT64 *PageTable;\r
670 UINT64 *Pml4;\r
671 UINT64 PFAddress;\r
672 UINTN StartBit;\r
673 UINTN EndBit;\r
674 UINT64 PTIndex;\r
675 UINTN Index;\r
676 SMM_PAGE_SIZE_TYPE PageSize;\r
677 UINTN NumOfPages;\r
678 UINTN PageAttribute;\r
679 EFI_STATUS Status;\r
680 UINT64 *UpperEntry;\r
681\r
682 //\r
683 // Set default SMM page attribute\r
684 //\r
685 PageSize = SmmPageSize2M;\r
686 NumOfPages = 1;\r
687 PageAttribute = 0;\r
688\r
689 EndBit = 0;\r
690 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
691 PFAddress = AsmReadCr2 ();\r
692\r
693 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
694 //\r
695 // If platform not support page table attribute, set default SMM page attribute\r
696 //\r
697 if (Status != EFI_SUCCESS) {\r
698 PageSize = SmmPageSize2M;\r
699 NumOfPages = 1;\r
700 PageAttribute = 0;\r
701 }\r
702 if (PageSize >= MaxSmmPageSizeType) {\r
703 PageSize = SmmPageSize2M;\r
704 }\r
705 if (NumOfPages > 512) {\r
706 NumOfPages = 512;\r
707 }\r
708\r
709 switch (PageSize) {\r
710 case SmmPageSize4K:\r
711 //\r
712 // BIT12 to BIT20 is Page Table index\r
713 //\r
714 EndBit = 12;\r
715 break;\r
716 case SmmPageSize2M:\r
717 //\r
718 // BIT21 to BIT29 is Page Directory index\r
719 //\r
720 EndBit = 21;\r
721 PageAttribute |= (UINTN)IA32_PG_PS;\r
722 break;\r
723 case SmmPageSize1G:\r
724 if (!m1GPageTableSupport) {\r
717fb604 725 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
726 ASSERT (FALSE);\r
727 }\r
728 //\r
729 // BIT30 to BIT38 is Page Directory Pointer Table index\r
730 //\r
731 EndBit = 30;\r
732 PageAttribute |= (UINTN)IA32_PG_PS;\r
733 break;\r
734 default:\r
735 ASSERT (FALSE);\r
736 }\r
737\r
738 //\r
739 // If execute-disable is enabled, set NX bit\r
740 //\r
741 if (mXdEnabled) {\r
742 PageAttribute |= IA32_PG_NX;\r
743 }\r
744\r
745 for (Index = 0; Index < NumOfPages; Index++) {\r
746 PageTable = Pml4;\r
747 UpperEntry = NULL;\r
748 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
749 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
750 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
751 //\r
752 // If the entry is not present, allocate one page from page pool for it\r
753 //\r
241f9149 754 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
755 } else {\r
756 //\r
757 // Save the upper entry address\r
758 //\r
759 UpperEntry = PageTable + PTIndex;\r
760 }\r
761 //\r
762 // BIT9 to BIT11 of entry is used to save access record,\r
763 // initialize value is 7\r
764 //\r
765 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
766 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 767 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
768 }\r
769\r
770 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
771 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
772 //\r
773 // Check if the entry has already existed, this issue may occur when the different\r
774 // size page entries created under the same entry\r
775 //\r
717fb604
JY
776 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
777 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
778 ASSERT (FALSE);\r
779 }\r
780 //\r
781 // Fill the new entry\r
782 //\r
241f9149 783 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 784 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
785 if (UpperEntry != NULL) {\r
786 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
787 }\r
788 //\r
789 // Get the next page address if we need to create more page tables\r
790 //\r
791 PFAddress += (1ull << EndBit);\r
792 }\r
793}\r
794\r
795/**\r
796 ThePage Fault handler wrapper for SMM use.\r
797\r
798 @param InterruptType Defines the type of interrupt or exception that\r
799 occurred on the processor.This parameter is processor architecture specific.\r
800 @param SystemContext A pointer to the processor context when\r
801 the interrupt occurred on the processor.\r
802**/\r
803VOID\r
804EFIAPI\r
805SmiPFHandler (\r
b8caae19
JF
806 IN EFI_EXCEPTION_TYPE InterruptType,\r
807 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
808 )\r
809{\r
810 UINTN PFAddress;\r
7fa1376c
JY
811 UINTN GuardPageAddress;\r
812 UINTN CpuIndex;\r
427e3573
MK
813\r
814 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
815\r
fe3a75bc 816 AcquireSpinLock (mPFLock);\r
427e3573
MK
817\r
818 PFAddress = AsmReadCr2 ();\r
819\r
717fb604 820 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 821 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
822 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
823 CpuDeadLoop ();\r
824 }\r
825\r
427e3573 826 //\r
7fa1376c
JY
827 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
828 // or SMM page protection violation.\r
427e3573 829 //\r
7fa1376c 830 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 831 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
b8caae19 832 DumpCpuContext (InterruptType, SystemContext);\r
7fa1376c
JY
833 CpuIndex = GetCpuIndex ();\r
834 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
835 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
836 (PFAddress >= GuardPageAddress) &&\r
837 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
838 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
839 } else {\r
7fa1376c
JY
840 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
841 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
842 DEBUG_CODE (\r
843 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
844 );\r
845 } else {\r
846 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
847 DEBUG_CODE (\r
848 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
849 );\r
850 }\r
09afd9a4
JW
851\r
852 if (HEAP_GUARD_NONSTOP_MODE) {\r
853 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
854 goto Exit;\r
855 }\r
7fa1376c 856 }\r
427e3573
MK
857 CpuDeadLoop ();\r
858 }\r
859\r
860 //\r
8bf0380e 861 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
862 //\r
863 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
864 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
865 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 866 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 867 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
868 DEBUG_CODE (\r
869 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
870 );\r
871 CpuDeadLoop ();\r
872 }\r
09afd9a4
JW
873\r
874 //\r
875 // If NULL pointer was just accessed\r
876 //\r
877 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
878 (PFAddress < EFI_PAGE_SIZE)) {\r
879 DumpCpuContext (InterruptType, SystemContext);\r
880 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
881 DEBUG_CODE (\r
882 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
883 );\r
884\r
885 if (NULL_DETECTION_NONSTOP_MODE) {\r
886 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
887 goto Exit;\r
888 }\r
889\r
890 CpuDeadLoop ();\r
891 }\r
892\r
d2fc7711 893 if (IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 894 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
895 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
896 DEBUG_CODE (\r
897 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
898 );\r
899 CpuDeadLoop ();\r
900 }\r
427e3573
MK
901 }\r
902\r
903 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
904 SmmProfilePFHandler (\r
905 SystemContext.SystemContextX64->Rip,\r
906 SystemContext.SystemContextX64->ExceptionData\r
907 );\r
908 } else {\r
909 SmiDefaultPFHandler ();\r
910 }\r
911\r
09afd9a4 912Exit:\r
fe3a75bc 913 ReleaseSpinLock (mPFLock);\r
427e3573 914}\r
717fb604
JY
915\r
916/**\r
917 This function sets memory attribute for page table.\r
918**/\r
919VOID\r
920SetPageTableAttributes (\r
921 VOID\r
922 )\r
923{\r
924 UINTN Index2;\r
925 UINTN Index3;\r
926 UINTN Index4;\r
927 UINT64 *L1PageTable;\r
928 UINT64 *L2PageTable;\r
929 UINT64 *L3PageTable;\r
930 UINT64 *L4PageTable;\r
931 BOOLEAN IsSplitted;\r
932 BOOLEAN PageTableSplitted;\r
933\r
827330cc
JW
934 //\r
935 // Don't do this if\r
936 // - no static page table; or\r
1015fb3c 937 // - SMM heap guard feature enabled; or\r
827330cc
JW
938 // BIT2: SMM page guard enabled\r
939 // BIT3: SMM pool guard enabled\r
1015fb3c 940 // - SMM profile feature enabled\r
827330cc
JW
941 //\r
942 if (!mCpuSmmStaticPageTable ||\r
1015fb3c
SZ
943 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
944 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
827330cc 945 //\r
1015fb3c 946 // Static paging and heap guard could not be enabled at the same time.\r
827330cc
JW
947 //\r
948 ASSERT (!(mCpuSmmStaticPageTable &&\r
949 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1015fb3c
SZ
950\r
951 //\r
952 // Static paging and SMM profile could not be enabled at the same time.\r
953 //\r
954 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
717fb604
JY
955 return ;\r
956 }\r
957\r
958 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
959\r
960 //\r
961 // Disable write protection, because we need mark page table to be write protected.\r
962 // We need *write* page table memory, to mark itself to be *read only*.\r
963 //\r
964 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
965\r
966 do {\r
967 DEBUG ((DEBUG_INFO, "Start...\n"));\r
968 PageTableSplitted = FALSE;\r
969\r
970 L4PageTable = (UINT64 *)GetPageTableBase ();\r
971 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
972 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
973\r
974 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
241f9149 975 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
976 if (L3PageTable == NULL) {\r
977 continue;\r
978 }\r
979\r
980 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
981 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
982\r
983 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
984 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
985 // 1G\r
986 continue;\r
987 }\r
241f9149 988 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
989 if (L2PageTable == NULL) {\r
990 continue;\r
991 }\r
992\r
993 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
994 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
995\r
996 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
997 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
998 // 2M\r
999 continue;\r
1000 }\r
241f9149 1001 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
1002 if (L1PageTable == NULL) {\r
1003 continue;\r
1004 }\r
1005 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1006 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1007 }\r
1008 }\r
1009 }\r
1010 } while (PageTableSplitted);\r
1011\r
1012 //\r
1013 // Enable write protection, after page table updated.\r
1014 //\r
1015 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
1016\r
1017 return ;\r
1018}\r