]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpu: ReclaimPages: fix incorrect operator binding
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
241f9149 15\r
427e3573 16LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 17BOOLEAN m1GPageTableSupport = FALSE;\r
717fb604 18BOOLEAN mCpuSmmStaticPageTable;\r
4eee0cc7
RN
19BOOLEAN m5LevelPagingSupport;\r
20X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;\r
427e3573 21\r
3eb69b08
JY
22/**\r
23 Disable CET.\r
24**/\r
25VOID\r
26EFIAPI\r
27DisableCet (\r
28 VOID\r
29 );\r
30\r
31/**\r
32 Enable CET.\r
33**/\r
34VOID\r
35EFIAPI\r
36EnableCet (\r
37 VOID\r
38 );\r
39\r
427e3573
MK
40/**\r
41 Check if 1-GByte pages is supported by processor or not.\r
42\r
43 @retval TRUE 1-GByte pages is supported.\r
44 @retval FALSE 1-GByte pages is not supported.\r
45\r
46**/\r
47BOOLEAN\r
48Is1GPageSupport (\r
49 VOID\r
50 )\r
51{\r
52 UINT32 RegEax;\r
53 UINT32 RegEdx;\r
54\r
55 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
56 if (RegEax >= 0x80000001) {\r
57 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
58 if ((RegEdx & BIT26) != 0) {\r
59 return TRUE;\r
60 }\r
61 }\r
62 return FALSE;\r
63}\r
64\r
4eee0cc7
RN
65/**\r
66 Check if 5-level paging is supported by processor or not.\r
67\r
68 @retval TRUE 5-level paging is supported.\r
69 @retval FALSE 5-level paging is not supported.\r
70\r
71**/\r
72BOOLEAN\r
73Is5LevelPagingSupport (\r
74 VOID\r
75 )\r
76{\r
77 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
78\r
79 AsmCpuidEx (\r
80 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
81 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
82 NULL,\r
83 NULL,\r
84 &EcxFlags.Uint32,\r
85 NULL\r
86 );\r
87 return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);\r
88}\r
89\r
427e3573
MK
90/**\r
91 Set sub-entries number in entry.\r
92\r
93 @param[in, out] Entry Pointer to entry\r
94 @param[in] SubEntryNum Sub-entries number based on 0:\r
95 0 means there is 1 sub-entry under this entry\r
96 0x1ff means there is 512 sub-entries under this entry\r
97\r
98**/\r
99VOID\r
100SetSubEntriesNum (\r
101 IN OUT UINT64 *Entry,\r
102 IN UINT64 SubEntryNum\r
103 )\r
104{\r
105 //\r
106 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
107 //\r
108 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
109}\r
110\r
111/**\r
112 Return sub-entries number in entry.\r
113\r
114 @param[in] Entry Pointer to entry\r
115\r
116 @return Sub-entries number based on 0:\r
117 0 means there is 1 sub-entry under this entry\r
118 0x1ff means there is 512 sub-entries under this entry\r
119**/\r
120UINT64\r
121GetSubEntriesNum (\r
122 IN UINT64 *Entry\r
123 )\r
124{\r
125 //\r
126 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
127 //\r
128 return BitFieldRead64 (*Entry, 52, 60);\r
129}\r
130\r
717fb604
JY
131/**\r
132 Calculate the maximum support address.\r
133\r
134 @return the maximum support address.\r
135**/\r
136UINT8\r
137CalculateMaximumSupportAddress (\r
138 VOID\r
139 )\r
140{\r
141 UINT32 RegEax;\r
142 UINT8 PhysicalAddressBits;\r
143 VOID *Hob;\r
144\r
145 //\r
146 // Get physical address bits supported.\r
147 //\r
148 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
149 if (Hob != NULL) {\r
150 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
151 } else {\r
152 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
153 if (RegEax >= 0x80000008) {\r
154 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
155 PhysicalAddressBits = (UINT8) RegEax;\r
156 } else {\r
157 PhysicalAddressBits = 36;\r
158 }\r
159 }\r
717fb604
JY
160 return PhysicalAddressBits;\r
161}\r
162\r
163/**\r
164 Set static page table.\r
165\r
166 @param[in] PageTable Address of page table.\r
167**/\r
168VOID\r
169SetStaticPageTable (\r
170 IN UINTN PageTable\r
171 )\r
172{\r
173 UINT64 PageAddress;\r
4eee0cc7 174 UINTN NumberOfPml5EntriesNeeded;\r
717fb604
JY
175 UINTN NumberOfPml4EntriesNeeded;\r
176 UINTN NumberOfPdpEntriesNeeded;\r
4eee0cc7 177 UINTN IndexOfPml5Entries;\r
717fb604
JY
178 UINTN IndexOfPml4Entries;\r
179 UINTN IndexOfPdpEntries;\r
180 UINTN IndexOfPageDirectoryEntries;\r
4eee0cc7 181 UINT64 *PageMapLevel5Entry;\r
717fb604
JY
182 UINT64 *PageMapLevel4Entry;\r
183 UINT64 *PageMap;\r
184 UINT64 *PageDirectoryPointerEntry;\r
185 UINT64 *PageDirectory1GEntry;\r
186 UINT64 *PageDirectoryEntry;\r
187\r
4eee0cc7
RN
188 //\r
189 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
190 // when 5-Level Paging is disabled.\r
191 //\r
192 ASSERT (mPhysicalAddressBits <= 52);\r
193 if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {\r
194 mPhysicalAddressBits = 48;\r
195 }\r
196\r
197 NumberOfPml5EntriesNeeded = 1;\r
198 if (mPhysicalAddressBits > 48) {\r
199 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);\r
200 mPhysicalAddressBits = 48;\r
201 }\r
202\r
203 NumberOfPml4EntriesNeeded = 1;\r
204 if (mPhysicalAddressBits > 39) {\r
205 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);\r
206 mPhysicalAddressBits = 39;\r
717fb604
JY
207 }\r
208\r
4eee0cc7
RN
209 NumberOfPdpEntriesNeeded = 1;\r
210 ASSERT (mPhysicalAddressBits > 30);\r
211 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);\r
212\r
717fb604
JY
213 //\r
214 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
215 //\r
216 PageMap = (VOID *) PageTable;\r
217\r
218 PageMapLevel4Entry = PageMap;\r
4eee0cc7
RN
219 PageMapLevel5Entry = NULL;\r
220 if (m5LevelPagingSupport) {\r
7365eb2c 221 //\r
4eee0cc7 222 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
7365eb2c 223 //\r
4eee0cc7
RN
224 PageMapLevel5Entry = PageMap;\r
225 }\r
226 PageAddress = 0;\r
7365eb2c 227\r
4eee0cc7
RN
228 for ( IndexOfPml5Entries = 0\r
229 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
230 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {\r
231 //\r
232 // Each PML5 entry points to a page of PML4 entires.\r
233 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
234 // When 5-Level Paging is disabled, below allocation happens only once.\r
235 //\r
236 if (m5LevelPagingSupport) {\r
237 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
238 if (PageMapLevel4Entry == NULL) {\r
239 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
240 ASSERT(PageMapLevel4Entry != NULL);\r
241 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));\r
242\r
243 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
244 }\r
4e78c7be 245 }\r
717fb604 246\r
4eee0cc7
RN
247 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
248 //\r
249 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
250 //\r
251 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
252 if (PageDirectoryPointerEntry == NULL) {\r
253 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
254 ASSERT(PageDirectoryPointerEntry != NULL);\r
255 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
256\r
257 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 258 }\r
7365eb2c 259\r
4eee0cc7
RN
260 if (m1GPageTableSupport) {\r
261 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
262 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
263 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
264 //\r
265 // Skip the < 4G entries\r
266 //\r
267 continue;\r
268 }\r
4e78c7be 269 //\r
4eee0cc7 270 // Fill in the Page Directory entries\r
4e78c7be 271 //\r
4eee0cc7 272 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 273 }\r
4eee0cc7
RN
274 } else {\r
275 PageAddress = BASE_4GB;\r
276 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
277 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
278 //\r
279 // Skip the < 4G entries\r
280 //\r
281 continue;\r
282 }\r
4e78c7be 283 //\r
4eee0cc7
RN
284 // Each Directory Pointer entries points to a page of Page Directory entires.\r
285 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
4e78c7be 286 //\r
4eee0cc7
RN
287 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
288 if (PageDirectoryEntry == NULL) {\r
289 PageDirectoryEntry = AllocatePageTableMemory (1);\r
290 ASSERT(PageDirectoryEntry != NULL);\r
291 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
292\r
293 //\r
294 // Fill in a Page Directory Pointer Entries\r
295 //\r
296 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
297 }\r
298\r
299 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
300 //\r
301 // Fill in the Page Directory entries\r
302 //\r
303 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
304 }\r
717fb604
JY
305 }\r
306 }\r
307 }\r
308 }\r
309}\r
310\r
427e3573
MK
311/**\r
312 Create PageTable for SMM use.\r
313\r
314 @return The address of PML4 (to set CR3).\r
315\r
316**/\r
317UINT32\r
318SmmInitPageTable (\r
319 VOID\r
320 )\r
321{\r
322 EFI_PHYSICAL_ADDRESS Pages;\r
323 UINT64 *PTEntry;\r
324 LIST_ENTRY *FreePage;\r
325 UINTN Index;\r
326 UINTN PageFaultHandlerHookAddress;\r
327 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 328 EFI_STATUS Status;\r
4eee0cc7
RN
329 UINT64 *Pml4Entry;\r
330 UINT64 *Pml5Entry;\r
427e3573
MK
331\r
332 //\r
333 // Initialize spin lock\r
334 //\r
fe3a75bc 335 InitializeSpinLock (mPFLock);\r
427e3573 336\r
717fb604 337 mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);\r
4eee0cc7
RN
338 m1GPageTableSupport = Is1GPageSupport ();\r
339 m5LevelPagingSupport = Is5LevelPagingSupport ();\r
340 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
341 PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);\r
342 DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));\r
343 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
344 DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable));\r
345 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
427e3573
MK
346 //\r
347 // Generate PAE page table for the first 4GB memory space\r
348 //\r
717fb604 349 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
350\r
351 //\r
352 // Set IA32_PG_PMNT bit to mask this entry\r
353 //\r
354 PTEntry = (UINT64*)(UINTN)Pages;\r
355 for (Index = 0; Index < 4; Index++) {\r
356 PTEntry[Index] |= IA32_PG_PMNT;\r
357 }\r
358\r
359 //\r
360 // Fill Page-Table-Level4 (PML4) entry\r
361 //\r
4eee0cc7
RN
362 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);\r
363 ASSERT (Pml4Entry != NULL);\r
364 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
365 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
717fb604 366\r
427e3573
MK
367 //\r
368 // Set sub-entries number\r
369 //\r
4eee0cc7
RN
370 SetSubEntriesNum (Pml4Entry, 3);\r
371 PTEntry = Pml4Entry;\r
372\r
373 if (m5LevelPagingSupport) {\r
374 //\r
375 // Fill PML5 entry\r
376 //\r
377 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);\r
378 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
379 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
380 //\r
381 // Set sub-entries number\r
382 //\r
383 SetSubEntriesNum (Pml5Entry, 1);\r
384 PTEntry = Pml5Entry;\r
385 }\r
427e3573 386\r
717fb604
JY
387 if (mCpuSmmStaticPageTable) {\r
388 SetStaticPageTable ((UINTN)PTEntry);\r
389 } else {\r
390 //\r
391 // Add pages to page pool\r
392 //\r
393 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
394 ASSERT (FreePage != NULL);\r
395 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
396 InsertTailList (&mPagePool, FreePage);\r
397 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
398 }\r
427e3573
MK
399 }\r
400\r
09afd9a4
JW
401 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
402 HEAP_GUARD_NONSTOP_MODE ||\r
403 NULL_DETECTION_NONSTOP_MODE) {\r
427e3573
MK
404 //\r
405 // Set own Page Fault entry instead of the default one, because SMM Profile\r
406 // feature depends on IRET instruction to do Single Step\r
407 //\r
408 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
409 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
410 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
411 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
412 IdtEntry->Bits.Reserved_0 = 0;\r
413 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
414 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
415 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
416 IdtEntry->Bits.Reserved_1 = 0;\r
417 } else {\r
418 //\r
419 // Register Smm Page Fault Handler\r
420 //\r
5c88af79
JF
421 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
422 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
423 }\r
424\r
425 //\r
426 // Additional SMM IDT initialization for SMM stack guard\r
427 //\r
428 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
429 InitializeIDTSmmStackGuard ();\r
430 }\r
431\r
432 //\r
4eee0cc7 433 // Return the address of PML4/PML5 (to set CR3)\r
427e3573
MK
434 //\r
435 return (UINT32)(UINTN)PTEntry;\r
436}\r
437\r
438/**\r
439 Set access record in entry.\r
440\r
441 @param[in, out] Entry Pointer to entry\r
442 @param[in] Acc Access record value\r
443\r
444**/\r
445VOID\r
446SetAccNum (\r
447 IN OUT UINT64 *Entry,\r
448 IN UINT64 Acc\r
449 )\r
450{\r
451 //\r
452 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
453 //\r
454 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
455}\r
456\r
457/**\r
458 Return access record in entry.\r
459\r
460 @param[in] Entry Pointer to entry\r
461\r
462 @return Access record value.\r
463\r
464**/\r
465UINT64\r
466GetAccNum (\r
467 IN UINT64 *Entry\r
468 )\r
469{\r
470 //\r
471 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
472 //\r
473 return BitFieldRead64 (*Entry, 9, 11);\r
474}\r
475\r
476/**\r
477 Return and update the access record in entry.\r
478\r
479 @param[in, out] Entry Pointer to entry\r
480\r
481 @return Access record value.\r
482\r
483**/\r
484UINT64\r
485GetAndUpdateAccNum (\r
486 IN OUT UINT64 *Entry\r
487 )\r
488{\r
489 UINT64 Acc;\r
490\r
491 Acc = GetAccNum (Entry);\r
492 if ((*Entry & IA32_PG_A) != 0) {\r
493 //\r
494 // If this entry has been accessed, clear access flag in Entry and update access record\r
495 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
496 //\r
497 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
498 SetAccNum (Entry, 0x7);\r
499 return (0x7 + ACC_MAX_BIT);\r
500 } else {\r
501 if (Acc != 0) {\r
502 //\r
503 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
504 //\r
505 SetAccNum (Entry, Acc - 1);\r
506 }\r
507 }\r
508 return Acc;\r
509}\r
510\r
511/**\r
512 Reclaim free pages for PageFault handler.\r
513\r
514 Search the whole entries tree to find the leaf entry that has the smallest\r
515 access record value. Insert the page pointed by this leaf entry into the\r
516 page pool. And check its upper entries if need to be inserted into the page\r
517 pool or not.\r
518\r
519**/\r
520VOID\r
521ReclaimPages (\r
522 VOID\r
523 )\r
524{\r
4eee0cc7
RN
525 UINT64 Pml5Entry;\r
526 UINT64 *Pml5;\r
427e3573
MK
527 UINT64 *Pml4;\r
528 UINT64 *Pdpt;\r
529 UINT64 *Pdt;\r
4eee0cc7 530 UINTN Pml5Index;\r
427e3573
MK
531 UINTN Pml4Index;\r
532 UINTN PdptIndex;\r
533 UINTN PdtIndex;\r
4eee0cc7 534 UINTN MinPml5;\r
427e3573
MK
535 UINTN MinPml4;\r
536 UINTN MinPdpt;\r
537 UINTN MinPdt;\r
538 UINT64 MinAcc;\r
539 UINT64 Acc;\r
540 UINT64 SubEntriesNum;\r
541 BOOLEAN PML4EIgnore;\r
542 BOOLEAN PDPTEIgnore;\r
543 UINT64 *ReleasePageAddress;\r
4eee0cc7
RN
544 IA32_CR4 Cr4;\r
545 BOOLEAN Enable5LevelPaging;\r
427e3573
MK
546\r
547 Pml4 = NULL;\r
548 Pdpt = NULL;\r
549 Pdt = NULL;\r
550 MinAcc = (UINT64)-1;\r
551 MinPml4 = (UINTN)-1;\r
4eee0cc7 552 MinPml5 = (UINTN)-1;\r
427e3573
MK
553 MinPdpt = (UINTN)-1;\r
554 MinPdt = (UINTN)-1;\r
555 Acc = 0;\r
556 ReleasePageAddress = 0;\r
557\r
4eee0cc7
RN
558 Cr4.UintN = AsmReadCr4 ();\r
559 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
560 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
561\r
562 if (!Enable5LevelPaging) {\r
563 //\r
564 // Create one fake PML5 entry for 4-Level Paging\r
565 // so that the page table parsing logic only handles 5-Level page structure.\r
566 //\r
567 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;\r
568 Pml5 = &Pml5Entry;\r
569 }\r
570\r
427e3573
MK
571 //\r
572 // First, find the leaf entry has the smallest access record value\r
573 //\r
c630f69d 574 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
4eee0cc7 575 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {\r
427e3573 576 //\r
4eee0cc7 577 // If the PML5 entry is not present or is masked, skip it\r
427e3573
MK
578 //\r
579 continue;\r
580 }\r
4eee0cc7
RN
581 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
582 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
583 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
427e3573 584 //\r
4eee0cc7 585 // If the PML4 entry is not present or is masked, skip it\r
427e3573 586 //\r
4e78c7be
RN
587 continue;\r
588 }\r
4eee0cc7
RN
589 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
590 PML4EIgnore = FALSE;\r
591 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
592 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
593 //\r
594 // If the PDPT entry is not present or is masked, skip it\r
595 //\r
596 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
427e3573 597 //\r
4eee0cc7 598 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
427e3573 599 //\r
4eee0cc7
RN
600 PML4EIgnore = TRUE;\r
601 }\r
602 continue;\r
603 }\r
604 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
605 //\r
606 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
607 // we will not check PML4 entry more\r
608 //\r
609 PML4EIgnore = TRUE;\r
610 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
611 PDPTEIgnore = FALSE;\r
612 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
613 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
614 //\r
615 // If the PD entry is not present or is masked, skip it\r
616 //\r
617 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
618 //\r
619 // If the PD entry is masked, we will not PDPT entry more\r
620 //\r
621 PDPTEIgnore = TRUE;\r
622 }\r
623 continue;\r
624 }\r
625 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
427e3573 626 //\r
4eee0cc7
RN
627 // It's not 2 MByte page table entry, it should be PD entry\r
628 // we will find the entry has the smallest access record value\r
427e3573
MK
629 //\r
630 PDPTEIgnore = TRUE;\r
4eee0cc7
RN
631 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
632 if (Acc < MinAcc) {\r
633 //\r
634 // If the PD entry has the smallest access record value,\r
635 // save the Page address to be released\r
636 //\r
637 MinAcc = Acc;\r
638 MinPml5 = Pml5Index;\r
639 MinPml4 = Pml4Index;\r
640 MinPdpt = PdptIndex;\r
641 MinPdt = PdtIndex;\r
642 ReleasePageAddress = Pdt + PdtIndex;\r
643 }\r
427e3573 644 }\r
427e3573 645 }\r
4eee0cc7 646 if (!PDPTEIgnore) {\r
427e3573 647 //\r
4eee0cc7
RN
648 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
649 // it should only has the entries point to 2 MByte Pages\r
427e3573 650 //\r
4eee0cc7 651 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
427e3573
MK
652 if (Acc < MinAcc) {\r
653 //\r
4eee0cc7 654 // If the PDPT entry has the smallest access record value,\r
427e3573
MK
655 // save the Page address to be released\r
656 //\r
657 MinAcc = Acc;\r
4eee0cc7 658 MinPml5 = Pml5Index;\r
427e3573
MK
659 MinPml4 = Pml4Index;\r
660 MinPdpt = PdptIndex;\r
4eee0cc7
RN
661 MinPdt = (UINTN)-1;\r
662 ReleasePageAddress = Pdpt + PdptIndex;\r
427e3573
MK
663 }\r
664 }\r
665 }\r
427e3573 666 }\r
4eee0cc7 667 if (!PML4EIgnore) {\r
4e78c7be 668 //\r
4eee0cc7
RN
669 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
670 // it should only has the entries point to 1 GByte Pages\r
4e78c7be 671 //\r
4eee0cc7
RN
672 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
673 if (Acc < MinAcc) {\r
674 //\r
675 // If the PML4 entry has the smallest access record value,\r
676 // save the Page address to be released\r
677 //\r
678 MinAcc = Acc;\r
679 MinPml5 = Pml5Index;\r
680 MinPml4 = Pml4Index;\r
681 MinPdpt = (UINTN)-1;\r
682 MinPdt = (UINTN)-1;\r
683 ReleasePageAddress = Pml4 + Pml4Index;\r
684 }\r
4e78c7be
RN
685 }\r
686 }\r
427e3573
MK
687 }\r
688 //\r
689 // Make sure one PML4/PDPT/PD entry is selected\r
690 //\r
691 ASSERT (MinAcc != (UINT64)-1);\r
692\r
693 //\r
694 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
695 //\r
241f9149 696 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
697 *ReleasePageAddress = 0;\r
698\r
699 //\r
700 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
701 // or not\r
702 //\r
703 while (TRUE) {\r
704 if (MinPdt != (UINTN)-1) {\r
705 //\r
706 // If 4 KByte Page Table is released, check the PDPT entry\r
707 //\r
4eee0cc7 708 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);\r
241f9149 709 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
710 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
711 if (SubEntriesNum == 0) {\r
712 //\r
713 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
714 // clear the Page directory entry\r
715 //\r
241f9149 716 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
717 Pdpt[MinPdpt] = 0;\r
718 //\r
719 // Go on checking the PML4 table\r
720 //\r
721 MinPdt = (UINTN)-1;\r
722 continue;\r
723 }\r
724 //\r
725 // Update the sub-entries filed in PDPT entry and exit\r
726 //\r
727 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
728 break;\r
729 }\r
730 if (MinPdpt != (UINTN)-1) {\r
731 //\r
732 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
733 //\r
734 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
735 if (SubEntriesNum == 0) {\r
736 //\r
737 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
738 // clear the Page directory entry\r
739 //\r
241f9149 740 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
741 Pml4[MinPml4] = 0;\r
742 MinPdpt = (UINTN)-1;\r
743 continue;\r
744 }\r
745 //\r
746 // Update the sub-entries filed in PML4 entry and exit\r
747 //\r
748 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
749 break;\r
750 }\r
751 //\r
752 // PLM4 table has been released before, exit it\r
753 //\r
754 break;\r
755 }\r
756}\r
757\r
758/**\r
759 Allocate free Page for PageFault handler use.\r
760\r
761 @return Page address.\r
762\r
763**/\r
764UINT64\r
765AllocPage (\r
766 VOID\r
767 )\r
768{\r
769 UINT64 RetVal;\r
770\r
771 if (IsListEmpty (&mPagePool)) {\r
772 //\r
773 // If page pool is empty, reclaim the used pages and insert one into page pool\r
774 //\r
775 ReclaimPages ();\r
776 }\r
777\r
778 //\r
779 // Get one free page and remove it from page pool\r
780 //\r
781 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
782 RemoveEntryList (mPagePool.ForwardLink);\r
783 //\r
784 // Clean this page and return\r
785 //\r
786 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
787 return RetVal;\r
788}\r
789\r
790/**\r
791 Page Fault handler for SMM use.\r
792\r
793**/\r
794VOID\r
795SmiDefaultPFHandler (\r
796 VOID\r
797 )\r
798{\r
799 UINT64 *PageTable;\r
4eee0cc7 800 UINT64 *PageTableTop;\r
427e3573
MK
801 UINT64 PFAddress;\r
802 UINTN StartBit;\r
803 UINTN EndBit;\r
804 UINT64 PTIndex;\r
805 UINTN Index;\r
806 SMM_PAGE_SIZE_TYPE PageSize;\r
807 UINTN NumOfPages;\r
808 UINTN PageAttribute;\r
809 EFI_STATUS Status;\r
810 UINT64 *UpperEntry;\r
4eee0cc7
RN
811 BOOLEAN Enable5LevelPaging;\r
812 IA32_CR4 Cr4;\r
427e3573
MK
813\r
814 //\r
815 // Set default SMM page attribute\r
816 //\r
817 PageSize = SmmPageSize2M;\r
818 NumOfPages = 1;\r
819 PageAttribute = 0;\r
820\r
821 EndBit = 0;\r
4eee0cc7 822 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
427e3573
MK
823 PFAddress = AsmReadCr2 ();\r
824\r
4eee0cc7
RN
825 Cr4.UintN = AsmReadCr4 ();\r
826 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);\r
827\r
427e3573
MK
828 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
829 //\r
830 // If platform not support page table attribute, set default SMM page attribute\r
831 //\r
832 if (Status != EFI_SUCCESS) {\r
833 PageSize = SmmPageSize2M;\r
834 NumOfPages = 1;\r
835 PageAttribute = 0;\r
836 }\r
837 if (PageSize >= MaxSmmPageSizeType) {\r
838 PageSize = SmmPageSize2M;\r
839 }\r
840 if (NumOfPages > 512) {\r
841 NumOfPages = 512;\r
842 }\r
843\r
844 switch (PageSize) {\r
845 case SmmPageSize4K:\r
846 //\r
847 // BIT12 to BIT20 is Page Table index\r
848 //\r
849 EndBit = 12;\r
850 break;\r
851 case SmmPageSize2M:\r
852 //\r
853 // BIT21 to BIT29 is Page Directory index\r
854 //\r
855 EndBit = 21;\r
856 PageAttribute |= (UINTN)IA32_PG_PS;\r
857 break;\r
858 case SmmPageSize1G:\r
859 if (!m1GPageTableSupport) {\r
717fb604 860 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
861 ASSERT (FALSE);\r
862 }\r
863 //\r
864 // BIT30 to BIT38 is Page Directory Pointer Table index\r
865 //\r
866 EndBit = 30;\r
867 PageAttribute |= (UINTN)IA32_PG_PS;\r
868 break;\r
869 default:\r
870 ASSERT (FALSE);\r
871 }\r
872\r
873 //\r
874 // If execute-disable is enabled, set NX bit\r
875 //\r
876 if (mXdEnabled) {\r
877 PageAttribute |= IA32_PG_NX;\r
878 }\r
879\r
880 for (Index = 0; Index < NumOfPages; Index++) {\r
4eee0cc7 881 PageTable = PageTableTop;\r
427e3573 882 UpperEntry = NULL;\r
4eee0cc7 883 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
427e3573
MK
884 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
885 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
886 //\r
887 // If the entry is not present, allocate one page from page pool for it\r
888 //\r
241f9149 889 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
890 } else {\r
891 //\r
892 // Save the upper entry address\r
893 //\r
894 UpperEntry = PageTable + PTIndex;\r
895 }\r
896 //\r
897 // BIT9 to BIT11 of entry is used to save access record,\r
898 // initialize value is 7\r
899 //\r
900 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
901 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 902 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
903 }\r
904\r
905 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
906 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
907 //\r
908 // Check if the entry has already existed, this issue may occur when the different\r
909 // size page entries created under the same entry\r
910 //\r
717fb604
JY
911 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
912 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
913 ASSERT (FALSE);\r
914 }\r
915 //\r
916 // Fill the new entry\r
917 //\r
241f9149 918 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 919 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
920 if (UpperEntry != NULL) {\r
921 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
922 }\r
923 //\r
924 // Get the next page address if we need to create more page tables\r
925 //\r
926 PFAddress += (1ull << EndBit);\r
927 }\r
928}\r
929\r
930/**\r
931 ThePage Fault handler wrapper for SMM use.\r
932\r
933 @param InterruptType Defines the type of interrupt or exception that\r
934 occurred on the processor.This parameter is processor architecture specific.\r
935 @param SystemContext A pointer to the processor context when\r
936 the interrupt occurred on the processor.\r
937**/\r
938VOID\r
939EFIAPI\r
940SmiPFHandler (\r
b8caae19
JF
941 IN EFI_EXCEPTION_TYPE InterruptType,\r
942 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
943 )\r
944{\r
945 UINTN PFAddress;\r
7fa1376c
JY
946 UINTN GuardPageAddress;\r
947 UINTN CpuIndex;\r
427e3573
MK
948\r
949 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
950\r
fe3a75bc 951 AcquireSpinLock (mPFLock);\r
427e3573
MK
952\r
953 PFAddress = AsmReadCr2 ();\r
954\r
717fb604 955 if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 956 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
957 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
958 CpuDeadLoop ();\r
3eb69b08 959 goto Exit;\r
717fb604
JY
960 }\r
961\r
427e3573 962 //\r
7fa1376c
JY
963 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
964 // or SMM page protection violation.\r
427e3573 965 //\r
7fa1376c 966 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 967 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
b8caae19 968 DumpCpuContext (InterruptType, SystemContext);\r
7fa1376c
JY
969 CpuIndex = GetCpuIndex ();\r
970 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);\r
971 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
972 (PFAddress >= GuardPageAddress) &&\r
973 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
974 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
975 } else {\r
7fa1376c
JY
976 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
977 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
978 DEBUG_CODE (\r
979 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
980 );\r
981 } else {\r
982 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
983 DEBUG_CODE (\r
984 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
985 );\r
986 }\r
09afd9a4
JW
987\r
988 if (HEAP_GUARD_NONSTOP_MODE) {\r
989 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
990 goto Exit;\r
991 }\r
7fa1376c 992 }\r
427e3573 993 CpuDeadLoop ();\r
3eb69b08 994 goto Exit;\r
427e3573
MK
995 }\r
996\r
997 //\r
8bf0380e 998 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
999 //\r
1000 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
1001 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
1002 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 1003 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 1004 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
1005 DEBUG_CODE (\r
1006 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1007 );\r
1008 CpuDeadLoop ();\r
3eb69b08 1009 goto Exit;\r
427e3573 1010 }\r
09afd9a4
JW
1011\r
1012 //\r
1013 // If NULL pointer was just accessed\r
1014 //\r
1015 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
1016 (PFAddress < EFI_PAGE_SIZE)) {\r
1017 DumpCpuContext (InterruptType, SystemContext);\r
1018 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1019 DEBUG_CODE (\r
1020 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1021 );\r
1022\r
1023 if (NULL_DETECTION_NONSTOP_MODE) {\r
1024 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1025 goto Exit;\r
1026 }\r
1027\r
1028 CpuDeadLoop ();\r
3eb69b08 1029 goto Exit;\r
09afd9a4
JW
1030 }\r
1031\r
c60d36b4 1032 if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 1033 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
1034 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1035 DEBUG_CODE (\r
1036 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1037 );\r
1038 CpuDeadLoop ();\r
3eb69b08 1039 goto Exit;\r
d2fc7711 1040 }\r
427e3573
MK
1041 }\r
1042\r
1043 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1044 SmmProfilePFHandler (\r
1045 SystemContext.SystemContextX64->Rip,\r
1046 SystemContext.SystemContextX64->ExceptionData\r
1047 );\r
1048 } else {\r
1049 SmiDefaultPFHandler ();\r
1050 }\r
1051\r
09afd9a4 1052Exit:\r
fe3a75bc 1053 ReleaseSpinLock (mPFLock);\r
427e3573 1054}\r
717fb604
JY
1055\r
1056/**\r
1057 This function sets memory attribute for page table.\r
1058**/\r
1059VOID\r
1060SetPageTableAttributes (\r
1061 VOID\r
1062 )\r
1063{\r
1064 UINTN Index2;\r
1065 UINTN Index3;\r
1066 UINTN Index4;\r
4eee0cc7 1067 UINTN Index5;\r
717fb604
JY
1068 UINT64 *L1PageTable;\r
1069 UINT64 *L2PageTable;\r
1070 UINT64 *L3PageTable;\r
1071 UINT64 *L4PageTable;\r
4eee0cc7 1072 UINT64 *L5PageTable;\r
717fb604
JY
1073 BOOLEAN IsSplitted;\r
1074 BOOLEAN PageTableSplitted;\r
3eb69b08 1075 BOOLEAN CetEnabled;\r
4eee0cc7
RN
1076 IA32_CR4 Cr4;\r
1077 BOOLEAN Enable5LevelPaging;\r
1078\r
1079 Cr4.UintN = AsmReadCr4 ();\r
1080 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
717fb604 1081\r
827330cc
JW
1082 //\r
1083 // Don't do this if\r
1084 // - no static page table; or\r
1015fb3c 1085 // - SMM heap guard feature enabled; or\r
827330cc
JW
1086 // BIT2: SMM page guard enabled\r
1087 // BIT3: SMM pool guard enabled\r
1015fb3c 1088 // - SMM profile feature enabled\r
827330cc
JW
1089 //\r
1090 if (!mCpuSmmStaticPageTable ||\r
1015fb3c
SZ
1091 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
1092 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
827330cc 1093 //\r
1015fb3c 1094 // Static paging and heap guard could not be enabled at the same time.\r
827330cc
JW
1095 //\r
1096 ASSERT (!(mCpuSmmStaticPageTable &&\r
1097 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1015fb3c
SZ
1098\r
1099 //\r
1100 // Static paging and SMM profile could not be enabled at the same time.\r
1101 //\r
1102 ASSERT (!(mCpuSmmStaticPageTable && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
717fb604
JY
1103 return ;\r
1104 }\r
1105\r
1106 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
1107\r
1108 //\r
1109 // Disable write protection, because we need mark page table to be write protected.\r
1110 // We need *write* page table memory, to mark itself to be *read only*.\r
1111 //\r
3eb69b08
JY
1112 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
1113 if (CetEnabled) {\r
1114 //\r
1115 // CET must be disabled if WP is disabled.\r
1116 //\r
1117 DisableCet();\r
1118 }\r
717fb604
JY
1119 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
1120\r
1121 do {\r
1122 DEBUG ((DEBUG_INFO, "Start...\n"));\r
1123 PageTableSplitted = FALSE;\r
4eee0cc7
RN
1124 L5PageTable = NULL;\r
1125 if (Enable5LevelPaging) {\r
1126 L5PageTable = (UINT64 *)GetPageTableBase ();\r
1127 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
7365eb2c 1128 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7 1129 }\r
7365eb2c 1130\r
4eee0cc7
RN
1131 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {\r
1132 if (Enable5LevelPaging) {\r
1133 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1134 if (L4PageTable == NULL) {\r
4e78c7be
RN
1135 continue;\r
1136 }\r
4eee0cc7
RN
1137 } else {\r
1138 L4PageTable = (UINT64 *)GetPageTableBase ();\r
1139 }\r
1140 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1141 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1142\r
1143 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
1144 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1145 if (L3PageTable == NULL) {\r
717fb604
JY
1146 continue;\r
1147 }\r
1148\r
4eee0cc7 1149 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604
JY
1150 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1151\r
4eee0cc7
RN
1152 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
1153 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1154 // 1G\r
717fb604
JY
1155 continue;\r
1156 }\r
4eee0cc7
RN
1157 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1158 if (L2PageTable == NULL) {\r
717fb604
JY
1159 continue;\r
1160 }\r
4eee0cc7
RN
1161\r
1162 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604 1163 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7
RN
1164\r
1165 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
1166 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1167 // 2M\r
1168 continue;\r
1169 }\r
1170 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1171 if (L1PageTable == NULL) {\r
1172 continue;\r
1173 }\r
1174 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1175 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1176 }\r
717fb604
JY
1177 }\r
1178 }\r
1179 }\r
1180 } while (PageTableSplitted);\r
1181\r
1182 //\r
1183 // Enable write protection, after page table updated.\r
1184 //\r
1185 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
3eb69b08
JY
1186 if (CetEnabled) {\r
1187 //\r
1188 // re-enable CET.\r
1189 //\r
1190 EnableCet();\r
1191 }\r
717fb604
JY
1192\r
1193 return ;\r
1194}\r
37f9fea5
VN
1195\r
1196/**\r
1197 This function reads CR2 register when on-demand paging is enabled.\r
1198\r
1199 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1200**/\r
1201VOID\r
1202SaveCr2 (\r
1203 OUT UINTN *Cr2\r
1204 )\r
1205{\r
1206 if (!mCpuSmmStaticPageTable) {\r
1207 *Cr2 = AsmReadCr2 ();\r
1208 }\r
1209}\r
1210\r
1211/**\r
1212 This function restores CR2 register when on-demand paging is enabled.\r
1213\r
1214 @param[in] Cr2 Value to write into CR2 register.\r
1215**/\r
1216VOID\r
1217RestoreCr2 (\r
1218 IN UINTN Cr2\r
1219 )\r
1220{\r
1221 if (!mCpuSmmStaticPageTable) {\r
1222 AsmWriteCr2 (Cr2);\r
1223 }\r
1224}\r