]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add check for pointer Pml5Entry
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
241f9149 15\r
427e3573 16LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 17BOOLEAN m1GPageTableSupport = FALSE;\r
717fb604 18BOOLEAN mCpuSmmStaticPageTable;\r
4eee0cc7
RN
19BOOLEAN m5LevelPagingSupport;\r
20X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;\r
427e3573 21\r
3eb69b08
JY
22/**\r
23 Disable CET.\r
24**/\r
25VOID\r
26EFIAPI\r
27DisableCet (\r
28 VOID\r
29 );\r
30\r
31/**\r
32 Enable CET.\r
33**/\r
34VOID\r
35EFIAPI\r
36EnableCet (\r
37 VOID\r
38 );\r
39\r
427e3573
MK
40/**\r
41 Check if 1-GByte pages is supported by processor or not.\r
42\r
43 @retval TRUE 1-GByte pages is supported.\r
44 @retval FALSE 1-GByte pages is not supported.\r
45\r
46**/\r
47BOOLEAN\r
48Is1GPageSupport (\r
49 VOID\r
50 )\r
51{\r
52 UINT32 RegEax;\r
53 UINT32 RegEdx;\r
54\r
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
56 if (RegEax >= 0x80000001) {\r
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
58 if ((RegEdx & BIT26) != 0) {\r
59 return TRUE;\r
60 }\r
61 }\r
62 return FALSE;\r
63}\r
64\r
4eee0cc7
RN
65/**\r
66 Check if 5-level paging is supported by processor or not.\r
67\r
68 @retval TRUE 5-level paging is supported.\r
69 @retval FALSE 5-level paging is not supported.\r
70\r
71**/\r
72BOOLEAN\r
73Is5LevelPagingSupport (\r
74 VOID\r
75 )\r
76{\r
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
78\r
79 AsmCpuidEx (\r
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
82 NULL,\r
83 NULL,\r
84 &EcxFlags.Uint32,\r
85 NULL\r
86 );\r
87 return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);\r
88}\r
89\r
427e3573
MK
90/**\r
91 Set sub-entries number in entry.\r
92\r
93 @param[in, out] Entry Pointer to entry\r
94 @param[in] SubEntryNum Sub-entries number based on 0:\r
95 0 means there is 1 sub-entry under this entry\r
96 0x1ff means there is 512 sub-entries under this entry\r
97\r
98**/\r
99VOID\r
100SetSubEntriesNum (\r
101 IN OUT UINT64 *Entry,\r
102 IN UINT64 SubEntryNum\r
103 )\r
104{\r
105 //\r
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
107 //\r
108 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
109}\r
110\r
111/**\r
112 Return sub-entries number in entry.\r
113\r
114 @param[in] Entry Pointer to entry\r
115\r
116 @return Sub-entries number based on 0:\r
117 0 means there is 1 sub-entry under this entry\r
118 0x1ff means there is 512 sub-entries under this entry\r
119**/\r
120UINT64\r
121GetSubEntriesNum (\r
122 IN UINT64 *Entry\r
123 )\r
124{\r
125 //\r
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
127 //\r
128 return BitFieldRead64 (*Entry, 52, 60);\r
129}\r
130\r
717fb604
JY
131/**\r
132 Calculate the maximum support address.\r
133\r
134 @return the maximum support address.\r
135**/\r
136UINT8\r
137CalculateMaximumSupportAddress (\r
138 VOID\r
139 )\r
140{\r
141 UINT32 RegEax;\r
142 UINT8 PhysicalAddressBits;\r
143 VOID *Hob;\r
144\r
145 //\r
146 // Get physical address bits supported.\r
147 //\r
148 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
149 if (Hob != NULL) {\r
150 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
151 } else {\r
152 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
153 if (RegEax >= 0x80000008) {\r
154 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
155 PhysicalAddressBits = (UINT8) RegEax;\r
156 } else {\r
157 PhysicalAddressBits = 36;\r
158 }\r
159 }\r
717fb604
JY
160 return PhysicalAddressBits;\r
161}\r
162\r
163/**\r
164 Set static page table.\r
165\r
166 @param[in] PageTable Address of page table.\r
167**/\r
168VOID\r
169SetStaticPageTable (\r
170 IN UINTN PageTable\r
171 )\r
172{\r
173 UINT64 PageAddress;\r
4eee0cc7 174 UINTN NumberOfPml5EntriesNeeded;\r
717fb604
JY
175 UINTN NumberOfPml4EntriesNeeded;\r
176 UINTN NumberOfPdpEntriesNeeded;\r
4eee0cc7 177 UINTN IndexOfPml5Entries;\r
717fb604
JY
178 UINTN IndexOfPml4Entries;\r
179 UINTN IndexOfPdpEntries;\r
180 UINTN IndexOfPageDirectoryEntries;\r
4eee0cc7 181 UINT64 *PageMapLevel5Entry;\r
717fb604
JY
182 UINT64 *PageMapLevel4Entry;\r
183 UINT64 *PageMap;\r
184 UINT64 *PageDirectoryPointerEntry;\r
185 UINT64 *PageDirectory1GEntry;\r
186 UINT64 *PageDirectoryEntry;\r
187\r
4eee0cc7
RN
188 //\r
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
190 // when 5-Level Paging is disabled.\r
191 //\r
192 ASSERT (mPhysicalAddressBits <= 52);\r
193 if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {\r
194 mPhysicalAddressBits = 48;\r
195 }\r
196\r
197 NumberOfPml5EntriesNeeded = 1;\r
198 if (mPhysicalAddressBits > 48) {\r
199 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);\r
200 mPhysicalAddressBits = 48;\r
201 }\r
202\r
203 NumberOfPml4EntriesNeeded = 1;\r
204 if (mPhysicalAddressBits > 39) {\r
205 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);\r
206 mPhysicalAddressBits = 39;\r
717fb604
JY
207 }\r
208\r
4eee0cc7
RN
209 NumberOfPdpEntriesNeeded = 1;\r
210 ASSERT (mPhysicalAddressBits > 30);\r
211 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);\r
212\r
717fb604
JY
213 //\r
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
215 //\r
216 PageMap = (VOID *) PageTable;\r
217\r
218 PageMapLevel4Entry = PageMap;\r
4eee0cc7
RN
219 PageMapLevel5Entry = NULL;\r
220 if (m5LevelPagingSupport) {\r
7365eb2c 221 //\r
4eee0cc7 222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
7365eb2c 223 //\r
4eee0cc7
RN
224 PageMapLevel5Entry = PageMap;\r
225 }\r
226 PageAddress = 0;\r
7365eb2c 227\r
4eee0cc7
RN
228 for ( IndexOfPml5Entries = 0\r
229 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
230 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {\r
231 //\r
232 // Each PML5 entry points to a page of PML4 entires.\r
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
234 // When 5-Level Paging is disabled, below allocation happens only once.\r
235 //\r
236 if (m5LevelPagingSupport) {\r
237 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
238 if (PageMapLevel4Entry == NULL) {\r
239 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
240 ASSERT(PageMapLevel4Entry != NULL);\r
241 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));\r
242\r
243 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
244 }\r
4e78c7be 245 }\r
717fb604 246\r
4eee0cc7
RN
247 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
248 //\r
249 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
250 //\r
251 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
252 if (PageDirectoryPointerEntry == NULL) {\r
253 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
254 ASSERT(PageDirectoryPointerEntry != NULL);\r
255 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
256\r
257 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 258 }\r
7365eb2c 259\r
4eee0cc7
RN
260 if (m1GPageTableSupport) {\r
261 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
262 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
263 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
264 //\r
265 // Skip the < 4G entries\r
266 //\r
267 continue;\r
268 }\r
4e78c7be 269 //\r
4eee0cc7 270 // Fill in the Page Directory entries\r
4e78c7be 271 //\r
4eee0cc7 272 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 273 }\r
4eee0cc7
RN
274 } else {\r
275 PageAddress = BASE_4GB;\r
276 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
277 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
278 //\r
279 // Skip the < 4G entries\r
280 //\r
281 continue;\r
282 }\r
4e78c7be 283 //\r
4eee0cc7
RN
284 // Each Directory Pointer entries points to a page of Page Directory entires.\r
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
4e78c7be 286 //\r
4eee0cc7
RN
287 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
288 if (PageDirectoryEntry == NULL) {\r
289 PageDirectoryEntry = AllocatePageTableMemory (1);\r
290 ASSERT(PageDirectoryEntry != NULL);\r
291 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
292\r
293 //\r
294 // Fill in a Page Directory Pointer Entries\r
295 //\r
296 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
297 }\r
298\r
299 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
300 //\r
301 // Fill in the Page Directory entries\r
302 //\r
303 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
304 }\r
717fb604
JY
305 }\r
306 }\r
307 }\r
308 }\r
309}\r
310\r
427e3573
MK
311/**\r
312 Create PageTable for SMM use.\r
313\r
314 @return The address of PML4 (to set CR3).\r
315\r
316**/\r
317UINT32\r
318SmmInitPageTable (\r
319 VOID\r
320 )\r
321{\r
322 EFI_PHYSICAL_ADDRESS Pages;\r
323 UINT64 *PTEntry;\r
324 LIST_ENTRY *FreePage;\r
325 UINTN Index;\r
326 UINTN PageFaultHandlerHookAddress;\r
327 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 328 EFI_STATUS Status;\r
4eee0cc7
RN
329 UINT64 *Pml4Entry;\r
330 UINT64 *Pml5Entry;\r
427e3573
MK
331\r
332 //\r
333 // Initialize spin lock\r
334 //\r
fe3a75bc 335 InitializeSpinLock (mPFLock);\r
427e3573 336\r
717fb604 337 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
4eee0cc7
RN
338 m1GPageTableSupport = Is1GPageSupport ();\r
339 m5LevelPagingSupport = Is5LevelPagingSupport ();\r
340 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
341 PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);\r
342 DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));\r
343 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
344 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable));\r
345 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
427e3573
MK
346 //\r
347 // Generate PAE page table for the first 4GB memory space\r
348 //\r
717fb604 349 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
350\r
351 //\r
352 // Set IA32_PG_PMNT bit to mask this entry\r
353 //\r
354 PTEntry = (UINT64*)(UINTN)Pages;\r
355 for (Index = 0; Index < 4; Index++) {\r
356 PTEntry[Index] |= IA32_PG_PMNT;\r
357 }\r
358\r
359 //\r
360 // Fill Page-Table-Level4 (PML4) entry\r
361 //\r
4eee0cc7
RN
362 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);\r
363 ASSERT (Pml4Entry != NULL);\r
364 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
365 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
717fb604 366\r
427e3573
MK
367 //\r
368 // Set sub-entries number\r
369 //\r
4eee0cc7
RN
370 SetSubEntriesNum (Pml4Entry, 3);\r
371 PTEntry = Pml4Entry;\r
372\r
373 if (m5LevelPagingSupport) {\r
374 //\r
375 // Fill PML5 entry\r
376 //\r
377 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);\r
aefcf2f7 378 ASSERT (Pml5Entry != NULL);\r
4eee0cc7
RN
379 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
380 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
381 //\r
382 // Set sub-entries number\r
383 //\r
384 SetSubEntriesNum (Pml5Entry, 1);\r
385 PTEntry = Pml5Entry;\r
386 }\r
427e3573 387\r
717fb604
JY
388 if (mCpuSmmStaticPageTable) {\r
389 SetStaticPageTable ((UINTN)PTEntry);\r
390 } else {\r
391 //\r
392 // Add pages to page pool\r
393 //\r
394 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
395 ASSERT (FreePage != NULL);\r
396 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
397 InsertTailList (&mPagePool, FreePage);\r
398 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
399 }\r
427e3573
MK
400 }\r
401\r
09afd9a4
JW
402 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
403 HEAP_GUARD_NONSTOP_MODE ||\r
404 NULL_DETECTION_NONSTOP_MODE) {\r
427e3573
MK
405 //\r
406 // Set own Page Fault entry instead of the default one, because SMM Profile\r
407 // feature depends on IRET instruction to do Single Step\r
408 //\r
409 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
410 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
411 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
412 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
413 IdtEntry->Bits.Reserved_0 = 0;\r
414 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
415 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
416 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
417 IdtEntry->Bits.Reserved_1 = 0;\r
418 } else {\r
419 //\r
420 // Register Smm Page Fault Handler\r
421 //\r
5c88af79
JF
422 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
423 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
424 }\r
425\r
426 //\r
427 // Additional SMM IDT initialization for SMM stack guard\r
428 //\r
429 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
430 InitializeIDTSmmStackGuard ();\r
431 }\r
432\r
433 //\r
4eee0cc7 434 // Return the address of PML4/PML5 (to set CR3)\r
427e3573
MK
435 //\r
436 return (UINT32)(UINTN)PTEntry;\r
437}\r
438\r
439/**\r
440 Set access record in entry.\r
441\r
442 @param[in, out] Entry Pointer to entry\r
443 @param[in] Acc Access record value\r
444\r
445**/\r
446VOID\r
447SetAccNum (\r
448 IN OUT UINT64 *Entry,\r
449 IN UINT64 Acc\r
450 )\r
451{\r
452 //\r
453 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
454 //\r
455 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
456}\r
457\r
458/**\r
459 Return access record in entry.\r
460\r
461 @param[in] Entry Pointer to entry\r
462\r
463 @return Access record value.\r
464\r
465**/\r
466UINT64\r
467GetAccNum (\r
468 IN UINT64 *Entry\r
469 )\r
470{\r
471 //\r
472 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
473 //\r
474 return BitFieldRead64 (*Entry, 9, 11);\r
475}\r
476\r
477/**\r
478 Return and update the access record in entry.\r
479\r
480 @param[in, out] Entry Pointer to entry\r
481\r
482 @return Access record value.\r
483\r
484**/\r
485UINT64\r
486GetAndUpdateAccNum (\r
487 IN OUT UINT64 *Entry\r
488 )\r
489{\r
490 UINT64 Acc;\r
491\r
492 Acc = GetAccNum (Entry);\r
493 if ((*Entry & IA32_PG_A) != 0) {\r
494 //\r
495 // If this entry has been accessed, clear access flag in Entry and update access record\r
496 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
497 //\r
498 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
499 SetAccNum (Entry, 0x7);\r
500 return (0x7 + ACC_MAX_BIT);\r
501 } else {\r
502 if (Acc != 0) {\r
503 //\r
504 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
505 //\r
506 SetAccNum (Entry, Acc - 1);\r
507 }\r
508 }\r
509 return Acc;\r
510}\r
511\r
512/**\r
513 Reclaim free pages for PageFault handler.\r
514\r
515 Search the whole entries tree to find the leaf entry that has the smallest\r
516 access record value. Insert the page pointed by this leaf entry into the\r
517 page pool. And check its upper entries if need to be inserted into the page\r
518 pool or not.\r
519\r
520**/\r
521VOID\r
522ReclaimPages (\r
523 VOID\r
524 )\r
525{\r
4eee0cc7
RN
526 UINT64 Pml5Entry;\r
527 UINT64 *Pml5;\r
427e3573
MK
528 UINT64 *Pml4;\r
529 UINT64 *Pdpt;\r
530 UINT64 *Pdt;\r
4eee0cc7 531 UINTN Pml5Index;\r
427e3573
MK
532 UINTN Pml4Index;\r
533 UINTN PdptIndex;\r
534 UINTN PdtIndex;\r
4eee0cc7 535 UINTN MinPml5;\r
427e3573
MK
536 UINTN MinPml4;\r
537 UINTN MinPdpt;\r
538 UINTN MinPdt;\r
539 UINT64 MinAcc;\r
540 UINT64 Acc;\r
541 UINT64 SubEntriesNum;\r
542 BOOLEAN PML4EIgnore;\r
543 BOOLEAN PDPTEIgnore;\r
544 UINT64 *ReleasePageAddress;\r
4eee0cc7
RN
545 IA32_CR4 Cr4;\r
546 BOOLEAN Enable5LevelPaging;\r
427e3573
MK
547\r
548 Pml4 = NULL;\r
549 Pdpt = NULL;\r
550 Pdt = NULL;\r
551 MinAcc = (UINT64)-1;\r
552 MinPml4 = (UINTN)-1;\r
4eee0cc7 553 MinPml5 = (UINTN)-1;\r
427e3573
MK
554 MinPdpt = (UINTN)-1;\r
555 MinPdt = (UINTN)-1;\r
556 Acc = 0;\r
557 ReleasePageAddress = 0;\r
558\r
4eee0cc7
RN
559 Cr4.UintN = AsmReadCr4 ();\r
560 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
561 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
562\r
563 if (!Enable5LevelPaging) {\r
564 //\r
565 // Create one fake PML5 entry for 4-Level Paging\r
566 // so that the page table parsing logic only handles 5-Level page structure.\r
567 //\r
568 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;\r
569 Pml5 = &Pml5Entry;\r
570 }\r
571\r
427e3573
MK
572 //\r
573 // First, find the leaf entry has the smallest access record value\r
574 //\r
c630f69d 575 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
4eee0cc7 576 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {\r
427e3573 577 //\r
4eee0cc7 578 // If the PML5 entry is not present or is masked, skip it\r
427e3573
MK
579 //\r
580 continue;\r
581 }\r
4eee0cc7
RN
582 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
583 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
584 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
427e3573 585 //\r
4eee0cc7 586 // If the PML4 entry is not present or is masked, skip it\r
427e3573 587 //\r
4e78c7be
RN
588 continue;\r
589 }\r
4eee0cc7
RN
590 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
591 PML4EIgnore = FALSE;\r
592 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
593 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
594 //\r
595 // If the PDPT entry is not present or is masked, skip it\r
596 //\r
597 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
427e3573 598 //\r
4eee0cc7 599 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
427e3573 600 //\r
4eee0cc7
RN
601 PML4EIgnore = TRUE;\r
602 }\r
603 continue;\r
604 }\r
605 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
606 //\r
607 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
608 // we will not check PML4 entry more\r
609 //\r
610 PML4EIgnore = TRUE;\r
611 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
612 PDPTEIgnore = FALSE;\r
613 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
614 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
615 //\r
616 // If the PD entry is not present or is masked, skip it\r
617 //\r
618 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
619 //\r
620 // If the PD entry is masked, we will not PDPT entry more\r
621 //\r
622 PDPTEIgnore = TRUE;\r
623 }\r
624 continue;\r
625 }\r
626 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
427e3573 627 //\r
4eee0cc7
RN
628 // It's not 2 MByte page table entry, it should be PD entry\r
629 // we will find the entry has the smallest access record value\r
427e3573
MK
630 //\r
631 PDPTEIgnore = TRUE;\r
4eee0cc7
RN
632 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
633 if (Acc < MinAcc) {\r
634 //\r
635 // If the PD entry has the smallest access record value,\r
636 // save the Page address to be released\r
637 //\r
638 MinAcc = Acc;\r
639 MinPml5 = Pml5Index;\r
640 MinPml4 = Pml4Index;\r
641 MinPdpt = PdptIndex;\r
642 MinPdt = PdtIndex;\r
643 ReleasePageAddress = Pdt + PdtIndex;\r
644 }\r
427e3573 645 }\r
427e3573 646 }\r
4eee0cc7 647 if (!PDPTEIgnore) {\r
427e3573 648 //\r
4eee0cc7
RN
649 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
650 // it should only has the entries point to 2 MByte Pages\r
427e3573 651 //\r
4eee0cc7 652 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
427e3573
MK
653 if (Acc < MinAcc) {\r
654 //\r
4eee0cc7 655 // If the PDPT entry has the smallest access record value,\r
427e3573
MK
656 // save the Page address to be released\r
657 //\r
658 MinAcc = Acc;\r
4eee0cc7 659 MinPml5 = Pml5Index;\r
427e3573
MK
660 MinPml4 = Pml4Index;\r
661 MinPdpt = PdptIndex;\r
4eee0cc7
RN
662 MinPdt = (UINTN)-1;\r
663 ReleasePageAddress = Pdpt + PdptIndex;\r
427e3573
MK
664 }\r
665 }\r
666 }\r
427e3573 667 }\r
4eee0cc7 668 if (!PML4EIgnore) {\r
4e78c7be 669 //\r
4eee0cc7
RN
670 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
671 // it should only has the entries point to 1 GByte Pages\r
4e78c7be 672 //\r
4eee0cc7
RN
673 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
674 if (Acc < MinAcc) {\r
675 //\r
676 // If the PML4 entry has the smallest access record value,\r
677 // save the Page address to be released\r
678 //\r
679 MinAcc = Acc;\r
680 MinPml5 = Pml5Index;\r
681 MinPml4 = Pml4Index;\r
682 MinPdpt = (UINTN)-1;\r
683 MinPdt = (UINTN)-1;\r
684 ReleasePageAddress = Pml4 + Pml4Index;\r
685 }\r
4e78c7be
RN
686 }\r
687 }\r
427e3573
MK
688 }\r
689 //\r
690 // Make sure one PML4/PDPT/PD entry is selected\r
691 //\r
692 ASSERT (MinAcc != (UINT64)-1);\r
693\r
694 //\r
695 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
696 //\r
241f9149 697 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
698 *ReleasePageAddress = 0;\r
699\r
700 //\r
701 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
702 // or not\r
703 //\r
704 while (TRUE) {\r
705 if (MinPdt != (UINTN)-1) {\r
706 //\r
707 // If 4 KByte Page Table is released, check the PDPT entry\r
708 //\r
4eee0cc7 709 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);\r
241f9149 710 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
711 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
712 if (SubEntriesNum == 0) {\r
713 //\r
714 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
715 // clear the Page directory entry\r
716 //\r
241f9149 717 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
718 Pdpt[MinPdpt] = 0;\r
719 //\r
720 // Go on checking the PML4 table\r
721 //\r
722 MinPdt = (UINTN)-1;\r
723 continue;\r
724 }\r
725 //\r
726 // Update the sub-entries filed in PDPT entry and exit\r
727 //\r
728 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
729 break;\r
730 }\r
731 if (MinPdpt != (UINTN)-1) {\r
732 //\r
733 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
734 //\r
735 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
736 if (SubEntriesNum == 0) {\r
737 //\r
738 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
739 // clear the Page directory entry\r
740 //\r
241f9149 741 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
742 Pml4[MinPml4] = 0;\r
743 MinPdpt = (UINTN)-1;\r
744 continue;\r
745 }\r
746 //\r
747 // Update the sub-entries filed in PML4 entry and exit\r
748 //\r
749 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
750 break;\r
751 }\r
752 //\r
753 // PLM4 table has been released before, exit it\r
754 //\r
755 break;\r
756 }\r
757}\r
758\r
759/**\r
760 Allocate free Page for PageFault handler use.\r
761\r
762 @return Page address.\r
763\r
764**/\r
765UINT64\r
766AllocPage (\r
767 VOID\r
768 )\r
769{\r
770 UINT64 RetVal;\r
771\r
772 if (IsListEmpty (&mPagePool)) {\r
773 //\r
774 // If page pool is empty, reclaim the used pages and insert one into page pool\r
775 //\r
776 ReclaimPages ();\r
777 }\r
778\r
779 //\r
780 // Get one free page and remove it from page pool\r
781 //\r
782 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
783 RemoveEntryList (mPagePool.ForwardLink);\r
784 //\r
785 // Clean this page and return\r
786 //\r
787 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
788 return RetVal;\r
789}\r
790\r
791/**\r
792 Page Fault handler for SMM use.\r
793\r
794**/\r
795VOID\r
796SmiDefaultPFHandler (\r
797 VOID\r
798 )\r
799{\r
800 UINT64 *PageTable;\r
4eee0cc7 801 UINT64 *PageTableTop;\r
427e3573
MK
802 UINT64 PFAddress;\r
803 UINTN StartBit;\r
804 UINTN EndBit;\r
805 UINT64 PTIndex;\r
806 UINTN Index;\r
807 SMM_PAGE_SIZE_TYPE PageSize;\r
808 UINTN NumOfPages;\r
809 UINTN PageAttribute;\r
810 EFI_STATUS Status;\r
811 UINT64 *UpperEntry;\r
4eee0cc7
RN
812 BOOLEAN Enable5LevelPaging;\r
813 IA32_CR4 Cr4;\r
427e3573
MK
814\r
815 //\r
816 // Set default SMM page attribute\r
817 //\r
818 PageSize = SmmPageSize2M;\r
819 NumOfPages = 1;\r
820 PageAttribute = 0;\r
821\r
822 EndBit = 0;\r
4eee0cc7 823 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
427e3573
MK
824 PFAddress = AsmReadCr2 ();\r
825\r
4eee0cc7
RN
826 Cr4.UintN = AsmReadCr4 ();\r
827 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);\r
828\r
427e3573
MK
829 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
830 //\r
831 // If platform not support page table attribute, set default SMM page attribute\r
832 //\r
833 if (Status != EFI_SUCCESS) {\r
834 PageSize = SmmPageSize2M;\r
835 NumOfPages = 1;\r
836 PageAttribute = 0;\r
837 }\r
838 if (PageSize >= MaxSmmPageSizeType) {\r
839 PageSize = SmmPageSize2M;\r
840 }\r
841 if (NumOfPages > 512) {\r
842 NumOfPages = 512;\r
843 }\r
844\r
845 switch (PageSize) {\r
846 case SmmPageSize4K:\r
847 //\r
848 // BIT12 to BIT20 is Page Table index\r
849 //\r
850 EndBit = 12;\r
851 break;\r
852 case SmmPageSize2M:\r
853 //\r
854 // BIT21 to BIT29 is Page Directory index\r
855 //\r
856 EndBit = 21;\r
857 PageAttribute |= (UINTN)IA32_PG_PS;\r
858 break;\r
859 case SmmPageSize1G:\r
860 if (!m1GPageTableSupport) {\r
717fb604 861 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
862 ASSERT (FALSE);\r
863 }\r
864 //\r
865 // BIT30 to BIT38 is Page Directory Pointer Table index\r
866 //\r
867 EndBit = 30;\r
868 PageAttribute |= (UINTN)IA32_PG_PS;\r
869 break;\r
870 default:\r
871 ASSERT (FALSE);\r
872 }\r
873\r
874 //\r
875 // If execute-disable is enabled, set NX bit\r
876 //\r
877 if (mXdEnabled) {\r
878 PageAttribute |= IA32_PG_NX;\r
879 }\r
880\r
881 for (Index = 0; Index < NumOfPages; Index++) {\r
4eee0cc7 882 PageTable = PageTableTop;\r
427e3573 883 UpperEntry = NULL;\r
4eee0cc7 884 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
427e3573
MK
885 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
886 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
887 //\r
888 // If the entry is not present, allocate one page from page pool for it\r
889 //\r
241f9149 890 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
891 } else {\r
892 //\r
893 // Save the upper entry address\r
894 //\r
895 UpperEntry = PageTable + PTIndex;\r
896 }\r
897 //\r
898 // BIT9 to BIT11 of entry is used to save access record,\r
899 // initialize value is 7\r
900 //\r
901 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
902 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 903 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
904 }\r
905\r
906 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
907 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
908 //\r
909 // Check if the entry has already existed, this issue may occur when the different\r
910 // size page entries created under the same entry\r
911 //\r
717fb604
JY
912 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
913 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
914 ASSERT (FALSE);\r
915 }\r
916 //\r
917 // Fill the new entry\r
918 //\r
241f9149 919 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 920 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
921 if (UpperEntry != NULL) {\r
922 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
923 }\r
924 //\r
925 // Get the next page address if we need to create more page tables\r
926 //\r
927 PFAddress += (1ull << EndBit);\r
928 }\r
929}\r
930\r
931/**\r
932 ThePage Fault handler wrapper for SMM use.\r
933\r
934 @param InterruptType Defines the type of interrupt or exception that\r
935 occurred on the processor.This parameter is processor architecture specific.\r
936 @param SystemContext A pointer to the processor context when\r
937 the interrupt occurred on the processor.\r
938**/\r
939VOID\r
940EFIAPI\r
941SmiPFHandler (\r
b8caae19
JF
942 IN EFI_EXCEPTION_TYPE InterruptType,\r
943 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
944 )\r
945{\r
946 UINTN PFAddress;\r
7fa1376c
JY
947 UINTN GuardPageAddress;\r
948 UINTN CpuIndex;\r
427e3573
MK
949\r
950 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
951\r
fe3a75bc 952 AcquireSpinLock (mPFLock);\r
427e3573
MK
953\r
954 PFAddress = AsmReadCr2 ();\r
955\r
717fb604 956 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 957 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
958 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
959 CpuDeadLoop ();\r
3eb69b08 960 goto Exit;\r
717fb604
JY
961 }\r
962\r
427e3573 963 //\r
7fa1376c
JY
964 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
965 // or SMM page protection violation.\r
427e3573 966 //\r
7fa1376c 967 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 968 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
b8caae19 969 DumpCpuContext (InterruptType, SystemContext);\r
7fa1376c
JY
970 CpuIndex = GetCpuIndex ();\r
971 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
972 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
973 (PFAddress >= GuardPageAddress) &&\r
974 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
975 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
976 } else {\r
7fa1376c
JY
977 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
978 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
979 DEBUG_CODE (\r
980 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
981 );\r
982 } else {\r
983 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
984 DEBUG_CODE (\r
985 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
986 );\r
987 }\r
09afd9a4
JW
988\r
989 if (HEAP_GUARD_NONSTOP_MODE) {\r
990 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
991 goto Exit;\r
992 }\r
7fa1376c 993 }\r
427e3573 994 CpuDeadLoop ();\r
3eb69b08 995 goto Exit;\r
427e3573
MK
996 }\r
997\r
998 //\r
8bf0380e 999 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
1000 //\r
1001 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
1002 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
1003 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 1004 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 1005 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
1006 DEBUG_CODE (\r
1007 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1008 );\r
1009 CpuDeadLoop ();\r
3eb69b08 1010 goto Exit;\r
427e3573 1011 }\r
09afd9a4
JW
1012\r
1013 //\r
1014 // If NULL pointer was just accessed\r
1015 //\r
1016 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
1017 (PFAddress < EFI_PAGE_SIZE)) {\r
1018 DumpCpuContext (InterruptType, SystemContext);\r
1019 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1020 DEBUG_CODE (\r
1021 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1022 );\r
1023\r
1024 if (NULL_DETECTION_NONSTOP_MODE) {\r
1025 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1026 goto Exit;\r
1027 }\r
1028\r
1029 CpuDeadLoop ();\r
3eb69b08 1030 goto Exit;\r
09afd9a4
JW
1031 }\r
1032\r
c60d36b4 1033 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 1034 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
1035 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1036 DEBUG_CODE (\r
1037 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1038 );\r
1039 CpuDeadLoop ();\r
3eb69b08 1040 goto Exit;\r
d2fc7711 1041 }\r
427e3573
MK
1042 }\r
1043\r
1044 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1045 SmmProfilePFHandler (\r
1046 SystemContext.SystemContextX64->Rip,\r
1047 SystemContext.SystemContextX64->ExceptionData\r
1048 );\r
1049 } else {\r
1050 SmiDefaultPFHandler ();\r
1051 }\r
1052\r
09afd9a4 1053Exit:\r
fe3a75bc 1054 ReleaseSpinLock (mPFLock);\r
427e3573 1055}\r
717fb604
JY
1056\r
1057/**\r
1058 This function sets memory attribute for page table.\r
1059**/\r
1060VOID\r
1061SetPageTableAttributes (\r
1062 VOID\r
1063 )\r
1064{\r
1065 UINTN Index2;\r
1066 UINTN Index3;\r
1067 UINTN Index4;\r
4eee0cc7 1068 UINTN Index5;\r
717fb604
JY
1069 UINT64 *L1PageTable;\r
1070 UINT64 *L2PageTable;\r
1071 UINT64 *L3PageTable;\r
1072 UINT64 *L4PageTable;\r
4eee0cc7 1073 UINT64 *L5PageTable;\r
717fb604
JY
1074 BOOLEAN IsSplitted;\r
1075 BOOLEAN PageTableSplitted;\r
3eb69b08 1076 BOOLEAN CetEnabled;\r
4eee0cc7
RN
1077 IA32_CR4 Cr4;\r
1078 BOOLEAN Enable5LevelPaging;\r
1079\r
1080 Cr4.UintN = AsmReadCr4 ();\r
1081 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
717fb604 1082\r
827330cc
JW
1083 //\r
1084 // Don't do this if\r
1085 // - no static page table; or\r
1015fb3c 1086 // - SMM heap guard feature enabled; or\r
827330cc
JW
1087 // BIT2: SMM page guard enabled\r
1088 // BIT3: SMM pool guard enabled\r
1015fb3c 1089 // - SMM profile feature enabled\r
827330cc
JW
1090 //\r
1091 if (!mCpuSmmStaticPageTable ||\r
1015fb3c
SZ
1092 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
1093 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
827330cc 1094 //\r
1015fb3c 1095 // Static paging and heap guard could not be enabled at the same time.\r
827330cc
JW
1096 //\r
1097 ASSERT (!(mCpuSmmStaticPageTable &&\r
1098 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1015fb3c
SZ
1099\r
1100 //\r
1101 // Static paging and SMM profile could not be enabled at the same time.\r
1102 //\r
1103 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
717fb604
JY
1104 return ;\r
1105 }\r
1106\r
1107 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
1108\r
1109 //\r
1110 // Disable write protection, because we need mark page table to be write protected.\r
1111 // We need *write* page table memory, to mark itself to be *read only*.\r
1112 //\r
3eb69b08
JY
1113 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
1114 if (CetEnabled) {\r
1115 //\r
1116 // CET must be disabled if WP is disabled.\r
1117 //\r
1118 DisableCet();\r
1119 }\r
717fb604
JY
1120 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
1121\r
1122 do {\r
1123 DEBUG ((DEBUG_INFO, "Start...\n"));\r
1124 PageTableSplitted = FALSE;\r
4eee0cc7
RN
1125 L5PageTable = NULL;\r
1126 if (Enable5LevelPaging) {\r
1127 L5PageTable = (UINT64 *)GetPageTableBase ();\r
1128 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
7365eb2c 1129 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7 1130 }\r
7365eb2c 1131\r
4eee0cc7
RN
1132 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {\r
1133 if (Enable5LevelPaging) {\r
1134 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1135 if (L4PageTable == NULL) {\r
4e78c7be
RN
1136 continue;\r
1137 }\r
4eee0cc7
RN
1138 } else {\r
1139 L4PageTable = (UINT64 *)GetPageTableBase ();\r
1140 }\r
1141 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1142 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1143\r
1144 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
1145 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1146 if (L3PageTable == NULL) {\r
717fb604
JY
1147 continue;\r
1148 }\r
1149\r
4eee0cc7 1150 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604
JY
1151 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1152\r
4eee0cc7
RN
1153 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
1154 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1155 // 1G\r
717fb604
JY
1156 continue;\r
1157 }\r
4eee0cc7
RN
1158 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1159 if (L2PageTable == NULL) {\r
717fb604
JY
1160 continue;\r
1161 }\r
4eee0cc7
RN
1162\r
1163 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604 1164 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7
RN
1165\r
1166 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
1167 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1168 // 2M\r
1169 continue;\r
1170 }\r
1171 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1172 if (L1PageTable == NULL) {\r
1173 continue;\r
1174 }\r
1175 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1176 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1177 }\r
717fb604
JY
1178 }\r
1179 }\r
1180 }\r
1181 } while (PageTableSplitted);\r
1182\r
1183 //\r
1184 // Enable write protection, after page table updated.\r
1185 //\r
1186 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
3eb69b08
JY
1187 if (CetEnabled) {\r
1188 //\r
1189 // re-enable CET.\r
1190 //\r
1191 EnableCet();\r
1192 }\r
717fb604
JY
1193\r
1194 return ;\r
1195}\r
37f9fea5
VN
1196\r
1197/**\r
1198 This function reads CR2 register when on-demand paging is enabled.\r
1199\r
1200 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1201**/\r
1202VOID\r
1203SaveCr2 (\r
1204 OUT UINTN *Cr2\r
1205 )\r
1206{\r
1207 if (!mCpuSmmStaticPageTable) {\r
1208 *Cr2 = AsmReadCr2 ();\r
1209 }\r
1210}\r
1211\r
1212/**\r
1213 This function restores CR2 register when on-demand paging is enabled.\r
1214\r
1215 @param[in] Cr2 Value to write into CR2 register.\r
1216**/\r
1217VOID\r
1218RestoreCr2 (\r
1219 IN UINTN Cr2\r
1220 )\r
1221{\r
1222 if (!mCpuSmmStaticPageTable) {\r
1223 AsmWriteCr2 (Cr2);\r
1224 }\r
1225}\r