]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add support for PCD PcdPteMemoryEncryptionAddressOrMask
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
fe3a75bc 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
427e3573
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19#define PAGE_TABLE_PAGES 8\r
20#define ACC_MAX_BIT BIT3\r
241f9149 21\r
427e3573 22LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 23BOOLEAN m1GPageTableSupport = FALSE;\r
717fb604
JY
24UINT8 mPhysicalAddressBits;\r
25BOOLEAN mCpuSmmStaticPageTable;\r
427e3573
MK
26\r
27/**\r
28 Check if 1-GByte pages is supported by processor or not.\r
29\r
30 @retval TRUE 1-GByte pages is supported.\r
31 @retval FALSE 1-GByte pages is not supported.\r
32\r
33**/\r
34BOOLEAN\r
35Is1GPageSupport (\r
36 VOID\r
37 )\r
38{\r
39 UINT32 RegEax;\r
40 UINT32 RegEdx;\r
41\r
42 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
43 if (RegEax >= 0x80000001) {\r
44 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
45 if ((RegEdx & BIT26) != 0) {\r
46 return TRUE;\r
47 }\r
48 }\r
49 return FALSE;\r
50}\r
51\r
52/**\r
53 Set sub-entries number in entry.\r
54\r
55 @param[in, out] Entry Pointer to entry\r
56 @param[in] SubEntryNum Sub-entries number based on 0:\r
57 0 means there is 1 sub-entry under this entry\r
58 0x1ff means there is 512 sub-entries under this entry\r
59\r
60**/\r
61VOID\r
62SetSubEntriesNum (\r
63 IN OUT UINT64 *Entry,\r
64 IN UINT64 SubEntryNum\r
65 )\r
66{\r
67 //\r
68 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
69 //\r
70 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
71}\r
72\r
73/**\r
74 Return sub-entries number in entry.\r
75\r
76 @param[in] Entry Pointer to entry\r
77\r
78 @return Sub-entries number based on 0:\r
79 0 means there is 1 sub-entry under this entry\r
80 0x1ff means there is 512 sub-entries under this entry\r
81**/\r
82UINT64\r
83GetSubEntriesNum (\r
84 IN UINT64 *Entry\r
85 )\r
86{\r
87 //\r
88 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
89 //\r
90 return BitFieldRead64 (*Entry, 52, 60);\r
91}\r
92\r
717fb604
JY
93/**\r
94 Calculate the maximum support address.\r
95\r
96 @return the maximum support address.\r
97**/\r
98UINT8\r
99CalculateMaximumSupportAddress (\r
100 VOID\r
101 )\r
102{\r
103 UINT32 RegEax;\r
104 UINT8 PhysicalAddressBits;\r
105 VOID *Hob;\r
106\r
107 //\r
108 // Get physical address bits supported.\r
109 //\r
110 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
111 if (Hob != NULL) {\r
112 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
113 } else {\r
114 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
115 if (RegEax >= 0x80000008) {\r
116 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
117 PhysicalAddressBits = (UINT8) RegEax;\r
118 } else {\r
119 PhysicalAddressBits = 36;\r
120 }\r
121 }\r
122\r
123 //\r
124 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
125 //\r
126 ASSERT (PhysicalAddressBits <= 52);\r
127 if (PhysicalAddressBits > 48) {\r
128 PhysicalAddressBits = 48;\r
129 }\r
130 return PhysicalAddressBits;\r
131}\r
132\r
133/**\r
134 Set static page table.\r
135\r
136 @param[in] PageTable Address of page table.\r
137**/\r
138VOID\r
139SetStaticPageTable (\r
140 IN UINTN PageTable\r
141 )\r
142{\r
143 UINT64 PageAddress;\r
144 UINTN NumberOfPml4EntriesNeeded;\r
145 UINTN NumberOfPdpEntriesNeeded;\r
146 UINTN IndexOfPml4Entries;\r
147 UINTN IndexOfPdpEntries;\r
148 UINTN IndexOfPageDirectoryEntries;\r
149 UINT64 *PageMapLevel4Entry;\r
150 UINT64 *PageMap;\r
151 UINT64 *PageDirectoryPointerEntry;\r
152 UINT64 *PageDirectory1GEntry;\r
153 UINT64 *PageDirectoryEntry;\r
154\r
155 if (mPhysicalAddressBits <= 39 ) {\r
156 NumberOfPml4EntriesNeeded = 1;\r
157 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));\r
158 } else {\r
159 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));\r
160 NumberOfPdpEntriesNeeded = 512;\r
161 }\r
162\r
163 //\r
164 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
165 //\r
166 PageMap = (VOID *) PageTable;\r
167\r
168 PageMapLevel4Entry = PageMap;\r
169 PageAddress = 0;\r
170 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
171 //\r
172 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
173 //\r
241f9149 174 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
717fb604
JY
175 if (PageDirectoryPointerEntry == NULL) {\r
176 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
177 ASSERT(PageDirectoryPointerEntry != NULL);\r
178 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
179\r
241f9149 180 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
181 }\r
182\r
183 if (m1GPageTableSupport) {\r
184 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
185 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
186 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
187 //\r
188 // Skip the < 4G entries\r
189 //\r
190 continue;\r
191 }\r
192 //\r
193 // Fill in the Page Directory entries\r
194 //\r
241f9149 195 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
196 }\r
197 } else {\r
198 PageAddress = BASE_4GB;\r
199 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
200 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
201 //\r
202 // Skip the < 4G entries\r
203 //\r
204 continue;\r
205 }\r
206 //\r
207 // Each Directory Pointer entries points to a page of Page Directory entires.\r
208 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
209 //\r
241f9149 210 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
717fb604
JY
211 if (PageDirectoryEntry == NULL) {\r
212 PageDirectoryEntry = AllocatePageTableMemory (1);\r
213 ASSERT(PageDirectoryEntry != NULL);\r
214 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
215\r
216 //\r
217 // Fill in a Page Directory Pointer Entries\r
218 //\r
241f9149 219 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
220 }\r
221\r
222 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
223 //\r
224 // Fill in the Page Directory entries\r
225 //\r
241f9149 226 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
717fb604
JY
227 }\r
228 }\r
229 }\r
230 }\r
231}\r
232\r
427e3573
MK
233/**\r
234 Create PageTable for SMM use.\r
235\r
236 @return The address of PML4 (to set CR3).\r
237\r
238**/\r
239UINT32\r
240SmmInitPageTable (\r
241 VOID\r
242 )\r
243{\r
244 EFI_PHYSICAL_ADDRESS Pages;\r
245 UINT64 *PTEntry;\r
246 LIST_ENTRY *FreePage;\r
247 UINTN Index;\r
248 UINTN PageFaultHandlerHookAddress;\r
249 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 250 EFI_STATUS Status;\r
427e3573
MK
251\r
252 //\r
253 // Initialize spin lock\r
254 //\r
fe3a75bc 255 InitializeSpinLock (mPFLock);\r
427e3573 256\r
717fb604 257 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
427e3573 258 m1GPageTableSupport = Is1GPageSupport ();\r
717fb604
JY
259 DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));\r
260 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));\r
261\r
262 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
263 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));\r
427e3573
MK
264 //\r
265 // Generate PAE page table for the first 4GB memory space\r
266 //\r
717fb604 267 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
268\r
269 //\r
270 // Set IA32_PG_PMNT bit to mask this entry\r
271 //\r
272 PTEntry = (UINT64*)(UINTN)Pages;\r
273 for (Index = 0; Index < 4; Index++) {\r
274 PTEntry[Index] |= IA32_PG_PMNT;\r
275 }\r
276\r
277 //\r
278 // Fill Page-Table-Level4 (PML4) entry\r
279 //\r
717fb604
JY
280 PTEntry = (UINT64*)AllocatePageTableMemory (1);\r
281 ASSERT (PTEntry != NULL);\r
241f9149 282 *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573 283 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
717fb604 284\r
427e3573
MK
285 //\r
286 // Set sub-entries number\r
287 //\r
288 SetSubEntriesNum (PTEntry, 3);\r
289\r
717fb604
JY
290 if (mCpuSmmStaticPageTable) {\r
291 SetStaticPageTable ((UINTN)PTEntry);\r
292 } else {\r
293 //\r
294 // Add pages to page pool\r
295 //\r
296 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
297 ASSERT (FreePage != NULL);\r
298 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
299 InsertTailList (&mPagePool, FreePage);\r
300 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
301 }\r
427e3573
MK
302 }\r
303\r
304 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
305 //\r
306 // Set own Page Fault entry instead of the default one, because SMM Profile\r
307 // feature depends on IRET instruction to do Single Step\r
308 //\r
309 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
310 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
311 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
312 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
313 IdtEntry->Bits.Reserved_0 = 0;\r
314 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
315 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
316 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
317 IdtEntry->Bits.Reserved_1 = 0;\r
318 } else {\r
319 //\r
320 // Register Smm Page Fault Handler\r
321 //\r
5c88af79
JF
322 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
323 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
324 }\r
325\r
326 //\r
327 // Additional SMM IDT initialization for SMM stack guard\r
328 //\r
329 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
330 InitializeIDTSmmStackGuard ();\r
331 }\r
332\r
333 //\r
334 // Return the address of PML4 (to set CR3)\r
335 //\r
336 return (UINT32)(UINTN)PTEntry;\r
337}\r
338\r
339/**\r
340 Set access record in entry.\r
341\r
342 @param[in, out] Entry Pointer to entry\r
343 @param[in] Acc Access record value\r
344\r
345**/\r
346VOID\r
347SetAccNum (\r
348 IN OUT UINT64 *Entry,\r
349 IN UINT64 Acc\r
350 )\r
351{\r
352 //\r
353 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
354 //\r
355 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
356}\r
357\r
358/**\r
359 Return access record in entry.\r
360\r
361 @param[in] Entry Pointer to entry\r
362\r
363 @return Access record value.\r
364\r
365**/\r
366UINT64\r
367GetAccNum (\r
368 IN UINT64 *Entry\r
369 )\r
370{\r
371 //\r
372 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
373 //\r
374 return BitFieldRead64 (*Entry, 9, 11);\r
375}\r
376\r
377/**\r
378 Return and update the access record in entry.\r
379\r
380 @param[in, out] Entry Pointer to entry\r
381\r
382 @return Access record value.\r
383\r
384**/\r
385UINT64\r
386GetAndUpdateAccNum (\r
387 IN OUT UINT64 *Entry\r
388 )\r
389{\r
390 UINT64 Acc;\r
391\r
392 Acc = GetAccNum (Entry);\r
393 if ((*Entry & IA32_PG_A) != 0) {\r
394 //\r
395 // If this entry has been accessed, clear access flag in Entry and update access record\r
396 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
397 //\r
398 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
399 SetAccNum (Entry, 0x7);\r
400 return (0x7 + ACC_MAX_BIT);\r
401 } else {\r
402 if (Acc != 0) {\r
403 //\r
404 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
405 //\r
406 SetAccNum (Entry, Acc - 1);\r
407 }\r
408 }\r
409 return Acc;\r
410}\r
411\r
412/**\r
413 Reclaim free pages for PageFault handler.\r
414\r
415 Search the whole entries tree to find the leaf entry that has the smallest\r
416 access record value. Insert the page pointed by this leaf entry into the\r
417 page pool. And check its upper entries if need to be inserted into the page\r
418 pool or not.\r
419\r
420**/\r
421VOID\r
422ReclaimPages (\r
423 VOID\r
424 )\r
425{\r
426 UINT64 *Pml4;\r
427 UINT64 *Pdpt;\r
428 UINT64 *Pdt;\r
429 UINTN Pml4Index;\r
430 UINTN PdptIndex;\r
431 UINTN PdtIndex;\r
432 UINTN MinPml4;\r
433 UINTN MinPdpt;\r
434 UINTN MinPdt;\r
435 UINT64 MinAcc;\r
436 UINT64 Acc;\r
437 UINT64 SubEntriesNum;\r
438 BOOLEAN PML4EIgnore;\r
439 BOOLEAN PDPTEIgnore;\r
440 UINT64 *ReleasePageAddress;\r
441\r
442 Pml4 = NULL;\r
443 Pdpt = NULL;\r
444 Pdt = NULL;\r
445 MinAcc = (UINT64)-1;\r
446 MinPml4 = (UINTN)-1;\r
447 MinPdpt = (UINTN)-1;\r
448 MinPdt = (UINTN)-1;\r
449 Acc = 0;\r
450 ReleasePageAddress = 0;\r
451\r
452 //\r
453 // First, find the leaf entry has the smallest access record value\r
454 //\r
455 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
456 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
457 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
458 //\r
459 // If the PML4 entry is not present or is masked, skip it\r
460 //\r
461 continue;\r
462 }\r
241f9149 463 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
464 PML4EIgnore = FALSE;\r
465 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
466 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
467 //\r
468 // If the PDPT entry is not present or is masked, skip it\r
469 //\r
470 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
471 //\r
472 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
473 //\r
474 PML4EIgnore = TRUE;\r
475 }\r
476 continue;\r
477 }\r
478 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
479 //\r
480 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
481 // we will not check PML4 entry more\r
482 //\r
483 PML4EIgnore = TRUE;\r
241f9149 484 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
485 PDPTEIgnore = FALSE;\r
486 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
487 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
488 //\r
489 // If the PD entry is not present or is masked, skip it\r
490 //\r
491 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
492 //\r
493 // If the PD entry is masked, we will not PDPT entry more\r
494 //\r
495 PDPTEIgnore = TRUE;\r
496 }\r
497 continue;\r
498 }\r
499 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
500 //\r
501 // It's not 2 MByte page table entry, it should be PD entry\r
502 // we will find the entry has the smallest access record value\r
503 //\r
504 PDPTEIgnore = TRUE;\r
505 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
506 if (Acc < MinAcc) {\r
507 //\r
508 // If the PD entry has the smallest access record value,\r
509 // save the Page address to be released\r
510 //\r
511 MinAcc = Acc;\r
512 MinPml4 = Pml4Index;\r
513 MinPdpt = PdptIndex;\r
514 MinPdt = PdtIndex;\r
515 ReleasePageAddress = Pdt + PdtIndex;\r
516 }\r
517 }\r
518 }\r
519 if (!PDPTEIgnore) {\r
520 //\r
521 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
522 // it should only has the entries point to 2 MByte Pages\r
523 //\r
524 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
525 if (Acc < MinAcc) {\r
526 //\r
527 // If the PDPT entry has the smallest access record value,\r
528 // save the Page address to be released\r
529 //\r
530 MinAcc = Acc;\r
531 MinPml4 = Pml4Index;\r
532 MinPdpt = PdptIndex;\r
533 MinPdt = (UINTN)-1;\r
534 ReleasePageAddress = Pdpt + PdptIndex;\r
535 }\r
536 }\r
537 }\r
538 }\r
539 if (!PML4EIgnore) {\r
540 //\r
541 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
542 // it should only has the entries point to 1 GByte Pages\r
543 //\r
544 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
545 if (Acc < MinAcc) {\r
546 //\r
547 // If the PML4 entry has the smallest access record value,\r
548 // save the Page address to be released\r
549 //\r
550 MinAcc = Acc;\r
551 MinPml4 = Pml4Index;\r
552 MinPdpt = (UINTN)-1;\r
553 MinPdt = (UINTN)-1;\r
554 ReleasePageAddress = Pml4 + Pml4Index;\r
555 }\r
556 }\r
557 }\r
558 //\r
559 // Make sure one PML4/PDPT/PD entry is selected\r
560 //\r
561 ASSERT (MinAcc != (UINT64)-1);\r
562\r
563 //\r
564 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
565 //\r
241f9149 566 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
567 *ReleasePageAddress = 0;\r
568\r
569 //\r
570 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
571 // or not\r
572 //\r
573 while (TRUE) {\r
574 if (MinPdt != (UINTN)-1) {\r
575 //\r
576 // If 4 KByte Page Table is released, check the PDPT entry\r
577 //\r
241f9149 578 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
579 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
580 if (SubEntriesNum == 0) {\r
581 //\r
582 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
583 // clear the Page directory entry\r
584 //\r
241f9149 585 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
586 Pdpt[MinPdpt] = 0;\r
587 //\r
588 // Go on checking the PML4 table\r
589 //\r
590 MinPdt = (UINTN)-1;\r
591 continue;\r
592 }\r
593 //\r
594 // Update the sub-entries filed in PDPT entry and exit\r
595 //\r
596 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
597 break;\r
598 }\r
599 if (MinPdpt != (UINTN)-1) {\r
600 //\r
601 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
602 //\r
603 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
604 if (SubEntriesNum == 0) {\r
605 //\r
606 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
607 // clear the Page directory entry\r
608 //\r
241f9149 609 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
610 Pml4[MinPml4] = 0;\r
611 MinPdpt = (UINTN)-1;\r
612 continue;\r
613 }\r
614 //\r
615 // Update the sub-entries filed in PML4 entry and exit\r
616 //\r
617 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
618 break;\r
619 }\r
620 //\r
621 // PLM4 table has been released before, exit it\r
622 //\r
623 break;\r
624 }\r
625}\r
626\r
627/**\r
628 Allocate free Page for PageFault handler use.\r
629\r
630 @return Page address.\r
631\r
632**/\r
633UINT64\r
634AllocPage (\r
635 VOID\r
636 )\r
637{\r
638 UINT64 RetVal;\r
639\r
640 if (IsListEmpty (&mPagePool)) {\r
641 //\r
642 // If page pool is empty, reclaim the used pages and insert one into page pool\r
643 //\r
644 ReclaimPages ();\r
645 }\r
646\r
647 //\r
648 // Get one free page and remove it from page pool\r
649 //\r
650 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
651 RemoveEntryList (mPagePool.ForwardLink);\r
652 //\r
653 // Clean this page and return\r
654 //\r
655 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
656 return RetVal;\r
657}\r
658\r
659/**\r
660 Page Fault handler for SMM use.\r
661\r
662**/\r
663VOID\r
664SmiDefaultPFHandler (\r
665 VOID\r
666 )\r
667{\r
668 UINT64 *PageTable;\r
669 UINT64 *Pml4;\r
670 UINT64 PFAddress;\r
671 UINTN StartBit;\r
672 UINTN EndBit;\r
673 UINT64 PTIndex;\r
674 UINTN Index;\r
675 SMM_PAGE_SIZE_TYPE PageSize;\r
676 UINTN NumOfPages;\r
677 UINTN PageAttribute;\r
678 EFI_STATUS Status;\r
679 UINT64 *UpperEntry;\r
680\r
681 //\r
682 // Set default SMM page attribute\r
683 //\r
684 PageSize = SmmPageSize2M;\r
685 NumOfPages = 1;\r
686 PageAttribute = 0;\r
687\r
688 EndBit = 0;\r
689 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
690 PFAddress = AsmReadCr2 ();\r
691\r
692 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
693 //\r
694 // If platform not support page table attribute, set default SMM page attribute\r
695 //\r
696 if (Status != EFI_SUCCESS) {\r
697 PageSize = SmmPageSize2M;\r
698 NumOfPages = 1;\r
699 PageAttribute = 0;\r
700 }\r
701 if (PageSize >= MaxSmmPageSizeType) {\r
702 PageSize = SmmPageSize2M;\r
703 }\r
704 if (NumOfPages > 512) {\r
705 NumOfPages = 512;\r
706 }\r
707\r
708 switch (PageSize) {\r
709 case SmmPageSize4K:\r
710 //\r
711 // BIT12 to BIT20 is Page Table index\r
712 //\r
713 EndBit = 12;\r
714 break;\r
715 case SmmPageSize2M:\r
716 //\r
717 // BIT21 to BIT29 is Page Directory index\r
718 //\r
719 EndBit = 21;\r
720 PageAttribute |= (UINTN)IA32_PG_PS;\r
721 break;\r
722 case SmmPageSize1G:\r
723 if (!m1GPageTableSupport) {\r
717fb604 724 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
725 ASSERT (FALSE);\r
726 }\r
727 //\r
728 // BIT30 to BIT38 is Page Directory Pointer Table index\r
729 //\r
730 EndBit = 30;\r
731 PageAttribute |= (UINTN)IA32_PG_PS;\r
732 break;\r
733 default:\r
734 ASSERT (FALSE);\r
735 }\r
736\r
737 //\r
738 // If execute-disable is enabled, set NX bit\r
739 //\r
740 if (mXdEnabled) {\r
741 PageAttribute |= IA32_PG_NX;\r
742 }\r
743\r
744 for (Index = 0; Index < NumOfPages; Index++) {\r
745 PageTable = Pml4;\r
746 UpperEntry = NULL;\r
747 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
748 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
749 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
750 //\r
751 // If the entry is not present, allocate one page from page pool for it\r
752 //\r
241f9149 753 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
754 } else {\r
755 //\r
756 // Save the upper entry address\r
757 //\r
758 UpperEntry = PageTable + PTIndex;\r
759 }\r
760 //\r
761 // BIT9 to BIT11 of entry is used to save access record,\r
762 // initialize value is 7\r
763 //\r
764 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
765 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 766 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
767 }\r
768\r
769 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
770 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
771 //\r
772 // Check if the entry has already existed, this issue may occur when the different\r
773 // size page entries created under the same entry\r
774 //\r
717fb604
JY
775 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
776 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
777 ASSERT (FALSE);\r
778 }\r
779 //\r
780 // Fill the new entry\r
781 //\r
241f9149 782 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 783 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
784 if (UpperEntry != NULL) {\r
785 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
786 }\r
787 //\r
788 // Get the next page address if we need to create more page tables\r
789 //\r
790 PFAddress += (1ull << EndBit);\r
791 }\r
792}\r
793\r
794/**\r
795 ThePage Fault handler wrapper for SMM use.\r
796\r
797 @param InterruptType Defines the type of interrupt or exception that\r
798 occurred on the processor.This parameter is processor architecture specific.\r
799 @param SystemContext A pointer to the processor context when\r
800 the interrupt occurred on the processor.\r
801**/\r
802VOID\r
803EFIAPI\r
804SmiPFHandler (\r
805 IN EFI_EXCEPTION_TYPE InterruptType,\r
806 IN EFI_SYSTEM_CONTEXT SystemContext\r
807 )\r
808{\r
809 UINTN PFAddress;\r
7fa1376c
JY
810 UINTN GuardPageAddress;\r
811 UINTN CpuIndex;\r
427e3573
MK
812\r
813 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
814\r
fe3a75bc 815 AcquireSpinLock (mPFLock);\r
427e3573
MK
816\r
817 PFAddress = AsmReadCr2 ();\r
818\r
717fb604
JY
819 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
820 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
821 CpuDeadLoop ();\r
822 }\r
823\r
427e3573 824 //\r
7fa1376c
JY
825 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
826 // or SMM page protection violation.\r
427e3573 827 //\r
7fa1376c 828 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 829 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
7fa1376c
JY
830 CpuIndex = GetCpuIndex ();\r
831 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
832 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
833 (PFAddress >= GuardPageAddress) &&\r
834 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
835 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
836 } else {\r
837 DEBUG ((DEBUG_ERROR, "SMM exception data - 0x%lx(", SystemContext.SystemContextX64->ExceptionData));\r
838 DEBUG ((DEBUG_ERROR, "I:%x, R:%x, U:%x, W:%x, P:%x",\r
839 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0,\r
840 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_RSVD) != 0,\r
841 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_US) != 0,\r
842 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_WR) != 0,\r
843 (SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_P) != 0\r
844 ));\r
845 DEBUG ((DEBUG_ERROR, ")\n"));\r
846 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
847 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
848 DEBUG_CODE (\r
849 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
850 );\r
851 } else {\r
852 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
853 DEBUG_CODE (\r
854 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
855 );\r
856 }\r
857 }\r
427e3573
MK
858 CpuDeadLoop ();\r
859 }\r
860\r
861 //\r
862 // If a page fault occurs in SMM range\r
863 //\r
864 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
865 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
866 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
717fb604 867 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
868 DEBUG_CODE (\r
869 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
870 );\r
871 CpuDeadLoop ();\r
872 }\r
d2fc7711
JY
873 if (IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
874 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
875 DEBUG_CODE (\r
876 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
877 );\r
878 CpuDeadLoop ();\r
879 }\r
427e3573
MK
880 }\r
881\r
882 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
883 SmmProfilePFHandler (\r
884 SystemContext.SystemContextX64->Rip,\r
885 SystemContext.SystemContextX64->ExceptionData\r
886 );\r
887 } else {\r
888 SmiDefaultPFHandler ();\r
889 }\r
890\r
fe3a75bc 891 ReleaseSpinLock (mPFLock);\r
427e3573 892}\r
717fb604
JY
893\r
894/**\r
895 This function sets memory attribute for page table.\r
896**/\r
897VOID\r
898SetPageTableAttributes (\r
899 VOID\r
900 )\r
901{\r
902 UINTN Index2;\r
903 UINTN Index3;\r
904 UINTN Index4;\r
905 UINT64 *L1PageTable;\r
906 UINT64 *L2PageTable;\r
907 UINT64 *L3PageTable;\r
908 UINT64 *L4PageTable;\r
909 BOOLEAN IsSplitted;\r
910 BOOLEAN PageTableSplitted;\r
911\r
912 if (!mCpuSmmStaticPageTable) {\r
913 return ;\r
914 }\r
915\r
916 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
917\r
918 //\r
919 // Disable write protection, because we need mark page table to be write protected.\r
920 // We need *write* page table memory, to mark itself to be *read only*.\r
921 //\r
922 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
923\r
924 do {\r
925 DEBUG ((DEBUG_INFO, "Start...\n"));\r
926 PageTableSplitted = FALSE;\r
927\r
928 L4PageTable = (UINT64 *)GetPageTableBase ();\r
929 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
930 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
931\r
932 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
241f9149 933 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
934 if (L3PageTable == NULL) {\r
935 continue;\r
936 }\r
937\r
938 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
939 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
940\r
941 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
942 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
943 // 1G\r
944 continue;\r
945 }\r
241f9149 946 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
947 if (L2PageTable == NULL) {\r
948 continue;\r
949 }\r
950\r
951 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
952 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
953\r
954 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
955 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
956 // 2M\r
957 continue;\r
958 }\r
241f9149 959 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
717fb604
JY
960 if (L1PageTable == NULL) {\r
961 continue;\r
962 }\r
963 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
964 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
965 }\r
966 }\r
967 }\r
968 } while (PageTableSplitted);\r
969\r
970 //\r
971 // Enable write protection, after page table updated.\r
972 //\r
973 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
974\r
975 return ;\r
976}\r