]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Apply uncrustify changes
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
053e878b
MK
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
241f9149 15\r
053e878b 16extern UINTN mSmmShadowStackSize;\r
ef91b073 17\r
053e878b
MK
18LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
19BOOLEAN m1GPageTableSupport = FALSE;\r
20BOOLEAN mCpuSmmRestrictedMemoryAccess;\r
21BOOLEAN m5LevelPagingNeeded;\r
22X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;\r
427e3573 23\r
3eb69b08
JY
24/**\r
25 Disable CET.\r
26**/\r
27VOID\r
28EFIAPI\r
29DisableCet (\r
30 VOID\r
31 );\r
32\r
33/**\r
34 Enable CET.\r
35**/\r
36VOID\r
37EFIAPI\r
38EnableCet (\r
39 VOID\r
40 );\r
41\r
427e3573
MK
42/**\r
43 Check if 1-GByte pages is supported by processor or not.\r
44\r
45 @retval TRUE 1-GByte pages is supported.\r
46 @retval FALSE 1-GByte pages is not supported.\r
47\r
48**/\r
49BOOLEAN\r
50Is1GPageSupport (\r
51 VOID\r
52 )\r
53{\r
053e878b
MK
54 UINT32 RegEax;\r
55 UINT32 RegEdx;\r
427e3573
MK
56\r
57 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
58 if (RegEax >= 0x80000001) {\r
59 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
60 if ((RegEdx & BIT26) != 0) {\r
61 return TRUE;\r
62 }\r
63 }\r
053e878b 64\r
427e3573
MK
65 return FALSE;\r
66}\r
67\r
4eee0cc7 68/**\r
86ad762f
RN
69 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and\r
70 the max physical address bits is bigger than 48. Because 4-level paging can support\r
71 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging\r
72 with max physical address bits <= 48.\r
4eee0cc7 73\r
86ad762f
RN
74 @retval TRUE 5-level paging enabling is needed.\r
75 @retval FALSE 5-level paging enabling is not needed.\r
4eee0cc7
RN
76**/\r
77BOOLEAN\r
86ad762f 78Is5LevelPagingNeeded (\r
4eee0cc7
RN
79 VOID\r
80 )\r
81{\r
053e878b
MK
82 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
83 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;\r
84 UINT32 MaxExtendedFunctionId;\r
4eee0cc7 85\r
86ad762f
RN
86 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);\r
87 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
88 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
89 } else {\r
90 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
91 }\r
053e878b 92\r
4eee0cc7
RN
93 AsmCpuidEx (\r
94 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
95 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
053e878b
MK
96 NULL,\r
97 NULL,\r
98 &ExtFeatureEcx.Uint32,\r
99 NULL\r
4eee0cc7 100 );\r
86ad762f 101 DEBUG ((\r
053e878b
MK
102 DEBUG_INFO,\r
103 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",\r
104 VirPhyAddressSize.Bits.PhysicalAddressBits,\r
105 ExtFeatureEcx.Bits.FiveLevelPage\r
86ad762f
RN
106 ));\r
107\r
108 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {\r
109 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);\r
110 return TRUE;\r
111 } else {\r
112 return FALSE;\r
113 }\r
4eee0cc7
RN
114}\r
115\r
404250c8
SW
116/**\r
117 Get page table base address and the depth of the page table.\r
118\r
119 @param[out] Base Page table base address.\r
120 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.\r
121**/\r
122VOID\r
123GetPageTable (\r
053e878b
MK
124 OUT UINTN *Base,\r
125 OUT BOOLEAN *FiveLevels OPTIONAL\r
404250c8
SW
126 )\r
127{\r
053e878b 128 IA32_CR4 Cr4;\r
404250c8
SW
129\r
130 if (mInternalCr3 == 0) {\r
131 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;\r
132 if (FiveLevels != NULL) {\r
053e878b 133 Cr4.UintN = AsmReadCr4 ();\r
404250c8
SW
134 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
135 }\r
053e878b 136\r
404250c8
SW
137 return;\r
138 }\r
139\r
140 *Base = mInternalCr3;\r
141 if (FiveLevels != NULL) {\r
142 *FiveLevels = m5LevelPagingNeeded;\r
143 }\r
144}\r
145\r
427e3573
MK
146/**\r
147 Set sub-entries number in entry.\r
148\r
149 @param[in, out] Entry Pointer to entry\r
150 @param[in] SubEntryNum Sub-entries number based on 0:\r
151 0 means there is 1 sub-entry under this entry\r
152 0x1ff means there is 512 sub-entries under this entry\r
153\r
154**/\r
155VOID\r
156SetSubEntriesNum (\r
053e878b
MK
157 IN OUT UINT64 *Entry,\r
158 IN UINT64 SubEntryNum\r
427e3573
MK
159 )\r
160{\r
161 //\r
162 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
163 //\r
164 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
165}\r
166\r
167/**\r
168 Return sub-entries number in entry.\r
169\r
170 @param[in] Entry Pointer to entry\r
171\r
172 @return Sub-entries number based on 0:\r
173 0 means there is 1 sub-entry under this entry\r
174 0x1ff means there is 512 sub-entries under this entry\r
175**/\r
176UINT64\r
177GetSubEntriesNum (\r
053e878b 178 IN UINT64 *Entry\r
427e3573
MK
179 )\r
180{\r
181 //\r
182 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
183 //\r
184 return BitFieldRead64 (*Entry, 52, 60);\r
185}\r
186\r
717fb604
JY
187/**\r
188 Calculate the maximum support address.\r
189\r
190 @return the maximum support address.\r
191**/\r
192UINT8\r
193CalculateMaximumSupportAddress (\r
194 VOID\r
195 )\r
196{\r
053e878b
MK
197 UINT32 RegEax;\r
198 UINT8 PhysicalAddressBits;\r
199 VOID *Hob;\r
717fb604
JY
200\r
201 //\r
202 // Get physical address bits supported.\r
203 //\r
204 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
205 if (Hob != NULL) {\r
053e878b 206 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
717fb604
JY
207 } else {\r
208 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
209 if (RegEax >= 0x80000008) {\r
210 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
053e878b 211 PhysicalAddressBits = (UINT8)RegEax;\r
717fb604
JY
212 } else {\r
213 PhysicalAddressBits = 36;\r
214 }\r
215 }\r
053e878b 216\r
717fb604
JY
217 return PhysicalAddressBits;\r
218}\r
219\r
220/**\r
221 Set static page table.\r
222\r
c3dcbce2
KQ
223 @param[in] PageTable Address of page table.\r
224 @param[in] PhysicalAddressBits The maximum physical address bits supported.\r
717fb604
JY
225**/\r
226VOID\r
227SetStaticPageTable (\r
053e878b
MK
228 IN UINTN PageTable,\r
229 IN UINT8 PhysicalAddressBits\r
717fb604
JY
230 )\r
231{\r
053e878b
MK
232 UINT64 PageAddress;\r
233 UINTN NumberOfPml5EntriesNeeded;\r
234 UINTN NumberOfPml4EntriesNeeded;\r
235 UINTN NumberOfPdpEntriesNeeded;\r
236 UINTN IndexOfPml5Entries;\r
237 UINTN IndexOfPml4Entries;\r
238 UINTN IndexOfPdpEntries;\r
239 UINTN IndexOfPageDirectoryEntries;\r
240 UINT64 *PageMapLevel5Entry;\r
241 UINT64 *PageMapLevel4Entry;\r
242 UINT64 *PageMap;\r
243 UINT64 *PageDirectoryPointerEntry;\r
244 UINT64 *PageDirectory1GEntry;\r
245 UINT64 *PageDirectoryEntry;\r
717fb604 246\r
4eee0cc7
RN
247 //\r
248 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
249 // when 5-Level Paging is disabled.\r
250 //\r
c3dcbce2 251 ASSERT (PhysicalAddressBits <= 52);\r
053e878b 252 if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {\r
c3dcbce2 253 PhysicalAddressBits = 48;\r
4eee0cc7
RN
254 }\r
255\r
256 NumberOfPml5EntriesNeeded = 1;\r
c3dcbce2 257 if (PhysicalAddressBits > 48) {\r
053e878b
MK
258 NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);\r
259 PhysicalAddressBits = 48;\r
4eee0cc7
RN
260 }\r
261\r
262 NumberOfPml4EntriesNeeded = 1;\r
c3dcbce2 263 if (PhysicalAddressBits > 39) {\r
053e878b
MK
264 NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);\r
265 PhysicalAddressBits = 39;\r
717fb604
JY
266 }\r
267\r
4eee0cc7 268 NumberOfPdpEntriesNeeded = 1;\r
c3dcbce2 269 ASSERT (PhysicalAddressBits > 30);\r
053e878b 270 NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);\r
4eee0cc7 271\r
717fb604
JY
272 //\r
273 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
274 //\r
053e878b 275 PageMap = (VOID *)PageTable;\r
717fb604
JY
276\r
277 PageMapLevel4Entry = PageMap;\r
4eee0cc7 278 PageMapLevel5Entry = NULL;\r
86ad762f 279 if (m5LevelPagingNeeded) {\r
7365eb2c 280 //\r
4eee0cc7 281 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
7365eb2c 282 //\r
4eee0cc7
RN
283 PageMapLevel5Entry = PageMap;\r
284 }\r
053e878b
MK
285\r
286 PageAddress = 0;\r
7365eb2c 287\r
4eee0cc7 288 for ( IndexOfPml5Entries = 0\r
053e878b
MK
289 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
290 ; IndexOfPml5Entries++, PageMapLevel5Entry++)\r
291 {\r
4eee0cc7
RN
292 //\r
293 // Each PML5 entry points to a page of PML4 entires.\r
294 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
295 // When 5-Level Paging is disabled, below allocation happens only once.\r
296 //\r
86ad762f 297 if (m5LevelPagingNeeded) {\r
053e878b 298 PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
299 if (PageMapLevel4Entry == NULL) {\r
300 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
053e878b
MK
301 ASSERT (PageMapLevel4Entry != NULL);\r
302 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));\r
4eee0cc7
RN
303\r
304 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
305 }\r
4e78c7be 306 }\r
717fb604 307\r
4eee0cc7
RN
308 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
309 //\r
310 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
311 //\r
053e878b 312 PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
313 if (PageDirectoryPointerEntry == NULL) {\r
314 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
053e878b
MK
315 ASSERT (PageDirectoryPointerEntry != NULL);\r
316 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));\r
4eee0cc7
RN
317\r
318 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 319 }\r
7365eb2c 320\r
4eee0cc7
RN
321 if (m1GPageTableSupport) {\r
322 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
323 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
053e878b 324 if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {\r
4eee0cc7
RN
325 //\r
326 // Skip the < 4G entries\r
327 //\r
328 continue;\r
329 }\r
053e878b 330\r
4e78c7be 331 //\r
4eee0cc7 332 // Fill in the Page Directory entries\r
4e78c7be 333 //\r
4eee0cc7 334 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 335 }\r
4eee0cc7
RN
336 } else {\r
337 PageAddress = BASE_4GB;\r
338 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
053e878b 339 if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {\r
4eee0cc7
RN
340 //\r
341 // Skip the < 4G entries\r
342 //\r
343 continue;\r
344 }\r
053e878b 345\r
4e78c7be 346 //\r
4eee0cc7
RN
347 // Each Directory Pointer entries points to a page of Page Directory entires.\r
348 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
4e78c7be 349 //\r
053e878b 350 PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
351 if (PageDirectoryEntry == NULL) {\r
352 PageDirectoryEntry = AllocatePageTableMemory (1);\r
053e878b
MK
353 ASSERT (PageDirectoryEntry != NULL);\r
354 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));\r
4eee0cc7
RN
355\r
356 //\r
357 // Fill in a Page Directory Pointer Entries\r
358 //\r
359 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
360 }\r
361\r
362 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
363 //\r
364 // Fill in the Page Directory entries\r
365 //\r
366 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
367 }\r
717fb604
JY
368 }\r
369 }\r
370 }\r
371 }\r
372}\r
373\r
427e3573
MK
374/**\r
375 Create PageTable for SMM use.\r
376\r
377 @return The address of PML4 (to set CR3).\r
378\r
379**/\r
380UINT32\r
381SmmInitPageTable (\r
382 VOID\r
383 )\r
384{\r
053e878b
MK
385 EFI_PHYSICAL_ADDRESS Pages;\r
386 UINT64 *PTEntry;\r
387 LIST_ENTRY *FreePage;\r
388 UINTN Index;\r
389 UINTN PageFaultHandlerHookAddress;\r
390 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
391 EFI_STATUS Status;\r
392 UINT64 *Pml4Entry;\r
393 UINT64 *Pml5Entry;\r
427e3573
MK
394\r
395 //\r
396 // Initialize spin lock\r
397 //\r
fe3a75bc 398 InitializeSpinLock (mPFLock);\r
427e3573 399\r
09f7c82b
RN
400 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);\r
401 m1GPageTableSupport = Is1GPageSupport ();\r
86ad762f 402 m5LevelPagingNeeded = Is5LevelPagingNeeded ();\r
09f7c82b 403 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
86ad762f
RN
404 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);\r
405 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));\r
09f7c82b
RN
406 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
407 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));\r
408 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
427e3573
MK
409 //\r
410 // Generate PAE page table for the first 4GB memory space\r
411 //\r
717fb604 412 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
413\r
414 //\r
415 // Set IA32_PG_PMNT bit to mask this entry\r
416 //\r
053e878b 417 PTEntry = (UINT64 *)(UINTN)Pages;\r
427e3573
MK
418 for (Index = 0; Index < 4; Index++) {\r
419 PTEntry[Index] |= IA32_PG_PMNT;\r
420 }\r
421\r
422 //\r
423 // Fill Page-Table-Level4 (PML4) entry\r
424 //\r
053e878b 425 Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);\r
4eee0cc7
RN
426 ASSERT (Pml4Entry != NULL);\r
427 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
428 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
717fb604 429\r
427e3573
MK
430 //\r
431 // Set sub-entries number\r
432 //\r
4eee0cc7
RN
433 SetSubEntriesNum (Pml4Entry, 3);\r
434 PTEntry = Pml4Entry;\r
435\r
86ad762f 436 if (m5LevelPagingNeeded) {\r
4eee0cc7
RN
437 //\r
438 // Fill PML5 entry\r
439 //\r
053e878b 440 Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);\r
aefcf2f7 441 ASSERT (Pml5Entry != NULL);\r
053e878b 442 *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4eee0cc7
RN
443 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
444 //\r
445 // Set sub-entries number\r
446 //\r
447 SetSubEntriesNum (Pml5Entry, 1);\r
448 PTEntry = Pml5Entry;\r
449 }\r
427e3573 450\r
09f7c82b
RN
451 if (mCpuSmmRestrictedMemoryAccess) {\r
452 //\r
453 // When access to non-SMRAM memory is restricted, create page table\r
454 // that covers all memory space.\r
455 //\r
c3dcbce2 456 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);\r
717fb604
JY
457 } else {\r
458 //\r
459 // Add pages to page pool\r
460 //\r
053e878b 461 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
717fb604
JY
462 ASSERT (FreePage != NULL);\r
463 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
464 InsertTailList (&mPagePool, FreePage);\r
465 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
466 }\r
427e3573
MK
467 }\r
468\r
09afd9a4
JW
469 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
470 HEAP_GUARD_NONSTOP_MODE ||\r
053e878b
MK
471 NULL_DETECTION_NONSTOP_MODE)\r
472 {\r
427e3573
MK
473 //\r
474 // Set own Page Fault entry instead of the default one, because SMM Profile\r
475 // feature depends on IRET instruction to do Single Step\r
476 //\r
477 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
053e878b
MK
478 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;\r
479 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
480 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
481 IdtEntry->Bits.Reserved_0 = 0;\r
482 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
483 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
484 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
485 IdtEntry->Bits.Reserved_1 = 0;\r
427e3573
MK
486 } else {\r
487 //\r
488 // Register Smm Page Fault Handler\r
489 //\r
5c88af79
JF
490 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
491 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
492 }\r
493\r
494 //\r
495 // Additional SMM IDT initialization for SMM stack guard\r
496 //\r
497 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
455b0347
S
498 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));\r
499 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);\r
500 }\r
501\r
502 //\r
503 // Additional SMM IDT initialization for SMM CET shadow stack\r
504 //\r
505 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
506 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));\r
507 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);\r
508 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);\r
427e3573
MK
509 }\r
510\r
511 //\r
4eee0cc7 512 // Return the address of PML4/PML5 (to set CR3)\r
427e3573
MK
513 //\r
514 return (UINT32)(UINTN)PTEntry;\r
515}\r
516\r
517/**\r
518 Set access record in entry.\r
519\r
520 @param[in, out] Entry Pointer to entry\r
521 @param[in] Acc Access record value\r
522\r
523**/\r
524VOID\r
525SetAccNum (\r
053e878b
MK
526 IN OUT UINT64 *Entry,\r
527 IN UINT64 Acc\r
427e3573
MK
528 )\r
529{\r
530 //\r
531 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
532 //\r
533 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
534}\r
535\r
536/**\r
537 Return access record in entry.\r
538\r
539 @param[in] Entry Pointer to entry\r
540\r
541 @return Access record value.\r
542\r
543**/\r
544UINT64\r
545GetAccNum (\r
053e878b 546 IN UINT64 *Entry\r
427e3573
MK
547 )\r
548{\r
549 //\r
550 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
551 //\r
552 return BitFieldRead64 (*Entry, 9, 11);\r
553}\r
554\r
555/**\r
556 Return and update the access record in entry.\r
557\r
558 @param[in, out] Entry Pointer to entry\r
559\r
560 @return Access record value.\r
561\r
562**/\r
563UINT64\r
564GetAndUpdateAccNum (\r
053e878b 565 IN OUT UINT64 *Entry\r
427e3573
MK
566 )\r
567{\r
053e878b 568 UINT64 Acc;\r
427e3573
MK
569\r
570 Acc = GetAccNum (Entry);\r
571 if ((*Entry & IA32_PG_A) != 0) {\r
572 //\r
573 // If this entry has been accessed, clear access flag in Entry and update access record\r
574 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
575 //\r
576 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
577 SetAccNum (Entry, 0x7);\r
578 return (0x7 + ACC_MAX_BIT);\r
579 } else {\r
580 if (Acc != 0) {\r
581 //\r
582 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
583 //\r
584 SetAccNum (Entry, Acc - 1);\r
585 }\r
586 }\r
053e878b 587\r
427e3573
MK
588 return Acc;\r
589}\r
590\r
591/**\r
592 Reclaim free pages for PageFault handler.\r
593\r
594 Search the whole entries tree to find the leaf entry that has the smallest\r
595 access record value. Insert the page pointed by this leaf entry into the\r
596 page pool. And check its upper entries if need to be inserted into the page\r
597 pool or not.\r
598\r
599**/\r
600VOID\r
601ReclaimPages (\r
602 VOID\r
603 )\r
604{\r
053e878b
MK
605 UINT64 Pml5Entry;\r
606 UINT64 *Pml5;\r
607 UINT64 *Pml4;\r
608 UINT64 *Pdpt;\r
609 UINT64 *Pdt;\r
610 UINTN Pml5Index;\r
611 UINTN Pml4Index;\r
612 UINTN PdptIndex;\r
613 UINTN PdtIndex;\r
614 UINTN MinPml5;\r
615 UINTN MinPml4;\r
616 UINTN MinPdpt;\r
617 UINTN MinPdt;\r
618 UINT64 MinAcc;\r
619 UINT64 Acc;\r
620 UINT64 SubEntriesNum;\r
621 BOOLEAN PML4EIgnore;\r
622 BOOLEAN PDPTEIgnore;\r
623 UINT64 *ReleasePageAddress;\r
624 IA32_CR4 Cr4;\r
625 BOOLEAN Enable5LevelPaging;\r
626 UINT64 PFAddress;\r
627 UINT64 PFAddressPml5Index;\r
628 UINT64 PFAddressPml4Index;\r
629 UINT64 PFAddressPdptIndex;\r
630 UINT64 PFAddressPdtIndex;\r
631\r
632 Pml4 = NULL;\r
633 Pdpt = NULL;\r
634 Pdt = NULL;\r
635 MinAcc = (UINT64)-1;\r
636 MinPml4 = (UINTN)-1;\r
637 MinPml5 = (UINTN)-1;\r
638 MinPdpt = (UINTN)-1;\r
639 MinPdt = (UINTN)-1;\r
640 Acc = 0;\r
427e3573 641 ReleasePageAddress = 0;\r
053e878b 642 PFAddress = AsmReadCr2 ();\r
4201098e
DN
643 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);\r
644 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);\r
645 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);\r
053e878b 646 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);\r
427e3573 647\r
053e878b
MK
648 Cr4.UintN = AsmReadCr4 ();\r
649 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
650 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);\r
4eee0cc7
RN
651\r
652 if (!Enable5LevelPaging) {\r
653 //\r
654 // Create one fake PML5 entry for 4-Level Paging\r
655 // so that the page table parsing logic only handles 5-Level page structure.\r
656 //\r
053e878b
MK
657 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;\r
658 Pml5 = &Pml5Entry;\r
4eee0cc7
RN
659 }\r
660\r
427e3573
MK
661 //\r
662 // First, find the leaf entry has the smallest access record value\r
663 //\r
c630f69d 664 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
053e878b 665 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {\r
427e3573 666 //\r
4eee0cc7 667 // If the PML5 entry is not present or is masked, skip it\r
427e3573
MK
668 //\r
669 continue;\r
670 }\r
053e878b
MK
671\r
672 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
4eee0cc7 673 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
053e878b 674 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {\r
427e3573 675 //\r
4eee0cc7 676 // If the PML4 entry is not present or is masked, skip it\r
427e3573 677 //\r
4e78c7be
RN
678 continue;\r
679 }\r
053e878b
MK
680\r
681 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
682 PML4EIgnore = FALSE;\r
683 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
053e878b 684 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {\r
4eee0cc7
RN
685 //\r
686 // If the PDPT entry is not present or is masked, skip it\r
687 //\r
688 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
427e3573 689 //\r
4eee0cc7 690 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
427e3573 691 //\r
4eee0cc7
RN
692 PML4EIgnore = TRUE;\r
693 }\r
053e878b 694\r
4eee0cc7
RN
695 continue;\r
696 }\r
053e878b 697\r
4eee0cc7
RN
698 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
699 //\r
700 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
701 // we will not check PML4 entry more\r
702 //\r
703 PML4EIgnore = TRUE;\r
053e878b 704 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
4eee0cc7 705 PDPTEIgnore = FALSE;\r
053e878b
MK
706 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {\r
707 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {\r
4eee0cc7
RN
708 //\r
709 // If the PD entry is not present or is masked, skip it\r
710 //\r
711 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
712 //\r
713 // If the PD entry is masked, we will not PDPT entry more\r
714 //\r
715 PDPTEIgnore = TRUE;\r
716 }\r
053e878b 717\r
4eee0cc7
RN
718 continue;\r
719 }\r
053e878b 720\r
4eee0cc7 721 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
427e3573 722 //\r
4eee0cc7
RN
723 // It's not 2 MByte page table entry, it should be PD entry\r
724 // we will find the entry has the smallest access record value\r
427e3573
MK
725 //\r
726 PDPTEIgnore = TRUE;\r
053e878b
MK
727 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||\r
728 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))\r
729 {\r
4201098e
DN
730 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
731 if (Acc < MinAcc) {\r
732 //\r
733 // If the PD entry has the smallest access record value,\r
734 // save the Page address to be released\r
735 //\r
053e878b
MK
736 MinAcc = Acc;\r
737 MinPml5 = Pml5Index;\r
738 MinPml4 = Pml4Index;\r
739 MinPdpt = PdptIndex;\r
740 MinPdt = PdtIndex;\r
4201098e
DN
741 ReleasePageAddress = Pdt + PdtIndex;\r
742 }\r
743 }\r
744 }\r
745 }\r
053e878b 746\r
4201098e
DN
747 if (!PDPTEIgnore) {\r
748 //\r
749 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
750 // it should only has the entries point to 2 MByte Pages\r
751 //\r
053e878b
MK
752 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||\r
753 (Pml5Index != PFAddressPml5Index))\r
754 {\r
4201098e 755 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
4eee0cc7
RN
756 if (Acc < MinAcc) {\r
757 //\r
4201098e 758 // If the PDPT entry has the smallest access record value,\r
4eee0cc7
RN
759 // save the Page address to be released\r
760 //\r
053e878b
MK
761 MinAcc = Acc;\r
762 MinPml5 = Pml5Index;\r
763 MinPml4 = Pml4Index;\r
764 MinPdpt = PdptIndex;\r
765 MinPdt = (UINTN)-1;\r
4201098e 766 ReleasePageAddress = Pdpt + PdptIndex;\r
4eee0cc7 767 }\r
427e3573 768 }\r
427e3573 769 }\r
427e3573 770 }\r
427e3573 771 }\r
053e878b 772\r
4eee0cc7 773 if (!PML4EIgnore) {\r
4e78c7be 774 //\r
4eee0cc7
RN
775 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
776 // it should only has the entries point to 1 GByte Pages\r
4e78c7be 777 //\r
053e878b 778 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {\r
4201098e
DN
779 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
780 if (Acc < MinAcc) {\r
781 //\r
782 // If the PML4 entry has the smallest access record value,\r
783 // save the Page address to be released\r
784 //\r
053e878b
MK
785 MinAcc = Acc;\r
786 MinPml5 = Pml5Index;\r
787 MinPml4 = Pml4Index;\r
788 MinPdpt = (UINTN)-1;\r
789 MinPdt = (UINTN)-1;\r
4201098e
DN
790 ReleasePageAddress = Pml4 + Pml4Index;\r
791 }\r
4eee0cc7 792 }\r
4e78c7be
RN
793 }\r
794 }\r
427e3573 795 }\r
053e878b 796\r
427e3573
MK
797 //\r
798 // Make sure one PML4/PDPT/PD entry is selected\r
799 //\r
800 ASSERT (MinAcc != (UINT64)-1);\r
801\r
802 //\r
803 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
804 //\r
053e878b 805 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
806 *ReleasePageAddress = 0;\r
807\r
808 //\r
809 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
810 // or not\r
811 //\r
812 while (TRUE) {\r
813 if (MinPdt != (UINTN)-1) {\r
814 //\r
815 // If 4 KByte Page Table is released, check the PDPT entry\r
816 //\r
053e878b
MK
817 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);\r
818 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
819 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);\r
820 if ((SubEntriesNum == 0) &&\r
821 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))\r
822 {\r
427e3573
MK
823 //\r
824 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
825 // clear the Page directory entry\r
826 //\r
053e878b 827 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
828 Pdpt[MinPdpt] = 0;\r
829 //\r
830 // Go on checking the PML4 table\r
831 //\r
832 MinPdt = (UINTN)-1;\r
833 continue;\r
834 }\r
053e878b 835\r
427e3573
MK
836 //\r
837 // Update the sub-entries filed in PDPT entry and exit\r
838 //\r
4201098e 839 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
840 break;\r
841 }\r
053e878b 842\r
427e3573
MK
843 if (MinPdpt != (UINTN)-1) {\r
844 //\r
845 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
846 //\r
847 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
053e878b 848 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {\r
427e3573
MK
849 //\r
850 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
851 // clear the Page directory entry\r
852 //\r
053e878b 853 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573 854 Pml4[MinPml4] = 0;\r
053e878b 855 MinPdpt = (UINTN)-1;\r
427e3573
MK
856 continue;\r
857 }\r
053e878b 858\r
427e3573
MK
859 //\r
860 // Update the sub-entries filed in PML4 entry and exit\r
861 //\r
4201098e 862 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
863 break;\r
864 }\r
053e878b 865\r
427e3573
MK
866 //\r
867 // PLM4 table has been released before, exit it\r
868 //\r
869 break;\r
870 }\r
871}\r
872\r
873/**\r
874 Allocate free Page for PageFault handler use.\r
875\r
876 @return Page address.\r
877\r
878**/\r
879UINT64\r
880AllocPage (\r
881 VOID\r
882 )\r
883{\r
053e878b 884 UINT64 RetVal;\r
427e3573
MK
885\r
886 if (IsListEmpty (&mPagePool)) {\r
887 //\r
888 // If page pool is empty, reclaim the used pages and insert one into page pool\r
889 //\r
890 ReclaimPages ();\r
891 }\r
892\r
893 //\r
894 // Get one free page and remove it from page pool\r
895 //\r
896 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
897 RemoveEntryList (mPagePool.ForwardLink);\r
898 //\r
899 // Clean this page and return\r
900 //\r
053e878b 901 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);\r
427e3573
MK
902 return RetVal;\r
903}\r
904\r
905/**\r
906 Page Fault handler for SMM use.\r
907\r
908**/\r
909VOID\r
910SmiDefaultPFHandler (\r
911 VOID\r
912 )\r
913{\r
053e878b
MK
914 UINT64 *PageTable;\r
915 UINT64 *PageTableTop;\r
916 UINT64 PFAddress;\r
917 UINTN StartBit;\r
918 UINTN EndBit;\r
919 UINT64 PTIndex;\r
920 UINTN Index;\r
921 SMM_PAGE_SIZE_TYPE PageSize;\r
922 UINTN NumOfPages;\r
923 UINTN PageAttribute;\r
924 EFI_STATUS Status;\r
925 UINT64 *UpperEntry;\r
926 BOOLEAN Enable5LevelPaging;\r
927 IA32_CR4 Cr4;\r
427e3573
MK
928\r
929 //\r
930 // Set default SMM page attribute\r
931 //\r
053e878b
MK
932 PageSize = SmmPageSize2M;\r
933 NumOfPages = 1;\r
427e3573
MK
934 PageAttribute = 0;\r
935\r
053e878b
MK
936 EndBit = 0;\r
937 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);\r
938 PFAddress = AsmReadCr2 ();\r
427e3573 939\r
053e878b
MK
940 Cr4.UintN = AsmReadCr4 ();\r
941 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);\r
4eee0cc7 942\r
427e3573
MK
943 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
944 //\r
945 // If platform not support page table attribute, set default SMM page attribute\r
946 //\r
947 if (Status != EFI_SUCCESS) {\r
053e878b
MK
948 PageSize = SmmPageSize2M;\r
949 NumOfPages = 1;\r
427e3573
MK
950 PageAttribute = 0;\r
951 }\r
053e878b 952\r
427e3573
MK
953 if (PageSize >= MaxSmmPageSizeType) {\r
954 PageSize = SmmPageSize2M;\r
955 }\r
053e878b 956\r
427e3573
MK
957 if (NumOfPages > 512) {\r
958 NumOfPages = 512;\r
959 }\r
960\r
961 switch (PageSize) {\r
053e878b
MK
962 case SmmPageSize4K:\r
963 //\r
964 // BIT12 to BIT20 is Page Table index\r
965 //\r
966 EndBit = 12;\r
967 break;\r
968 case SmmPageSize2M:\r
969 //\r
970 // BIT21 to BIT29 is Page Directory index\r
971 //\r
972 EndBit = 21;\r
973 PageAttribute |= (UINTN)IA32_PG_PS;\r
974 break;\r
975 case SmmPageSize1G:\r
976 if (!m1GPageTableSupport) {\r
977 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
978 ASSERT (FALSE);\r
979 }\r
980\r
981 //\r
982 // BIT30 to BIT38 is Page Directory Pointer Table index\r
983 //\r
984 EndBit = 30;\r
985 PageAttribute |= (UINTN)IA32_PG_PS;\r
986 break;\r
987 default:\r
427e3573 988 ASSERT (FALSE);\r
427e3573
MK
989 }\r
990\r
991 //\r
992 // If execute-disable is enabled, set NX bit\r
993 //\r
994 if (mXdEnabled) {\r
995 PageAttribute |= IA32_PG_NX;\r
996 }\r
997\r
998 for (Index = 0; Index < NumOfPages; Index++) {\r
4eee0cc7 999 PageTable = PageTableTop;\r
427e3573 1000 UpperEntry = NULL;\r
4eee0cc7 1001 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
427e3573
MK
1002 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
1003 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
1004 //\r
1005 // If the entry is not present, allocate one page from page pool for it\r
1006 //\r
241f9149 1007 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
1008 } else {\r
1009 //\r
1010 // Save the upper entry address\r
1011 //\r
1012 UpperEntry = PageTable + PTIndex;\r
1013 }\r
053e878b 1014\r
427e3573
MK
1015 //\r
1016 // BIT9 to BIT11 of entry is used to save access record,\r
1017 // initialize value is 7\r
1018 //\r
1019 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
1020 SetAccNum (PageTable + PTIndex, 7);\r
053e878b 1021 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
1022 }\r
1023\r
1024 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
1025 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
1026 //\r
1027 // Check if the entry has already existed, this issue may occur when the different\r
1028 // size page entries created under the same entry\r
1029 //\r
717fb604
JY
1030 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
1031 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
1032 ASSERT (FALSE);\r
1033 }\r
053e878b 1034\r
427e3573
MK
1035 //\r
1036 // Fill the new entry\r
1037 //\r
241f9149 1038 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 1039 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573 1040 if (UpperEntry != NULL) {\r
4201098e 1041 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);\r
427e3573 1042 }\r
053e878b 1043\r
427e3573
MK
1044 //\r
1045 // Get the next page address if we need to create more page tables\r
1046 //\r
1047 PFAddress += (1ull << EndBit);\r
1048 }\r
1049}\r
1050\r
1051/**\r
1052 ThePage Fault handler wrapper for SMM use.\r
1053\r
1054 @param InterruptType Defines the type of interrupt or exception that\r
1055 occurred on the processor.This parameter is processor architecture specific.\r
1056 @param SystemContext A pointer to the processor context when\r
1057 the interrupt occurred on the processor.\r
1058**/\r
1059VOID\r
1060EFIAPI\r
1061SmiPFHandler (\r
053e878b
MK
1062 IN EFI_EXCEPTION_TYPE InterruptType,\r
1063 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
1064 )\r
1065{\r
053e878b
MK
1066 UINTN PFAddress;\r
1067 UINTN GuardPageAddress;\r
1068 UINTN ShadowStackGuardPageAddress;\r
1069 UINTN CpuIndex;\r
427e3573
MK
1070\r
1071 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
1072\r
fe3a75bc 1073 AcquireSpinLock (mPFLock);\r
427e3573
MK
1074\r
1075 PFAddress = AsmReadCr2 ();\r
1076\r
09f7c82b 1077 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 1078 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
1079 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
1080 CpuDeadLoop ();\r
3eb69b08 1081 goto Exit;\r
717fb604
JY
1082 }\r
1083\r
427e3573 1084 //\r
efa7f4df 1085 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,\r
7fa1376c 1086 // or SMM page protection violation.\r
427e3573 1087 //\r
7fa1376c 1088 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
053e878b
MK
1089 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))\r
1090 {\r
b8caae19 1091 DumpCpuContext (InterruptType, SystemContext);\r
053e878b
MK
1092 CpuIndex = GetCpuIndex ();\r
1093 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
efa7f4df 1094 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
7fa1376c
JY
1095 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
1096 (PFAddress >= GuardPageAddress) &&\r
053e878b
MK
1097 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))\r
1098 {\r
7fa1376c 1099 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
efa7f4df 1100 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
053e878b
MK
1101 (mSmmShadowStackSize > 0) &&\r
1102 (PFAddress >= ShadowStackGuardPageAddress) &&\r
1103 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))\r
1104 {\r
efa7f4df 1105 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));\r
7fa1376c 1106 } else {\r
7fa1376c
JY
1107 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
1108 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
1109 DEBUG_CODE (\r
1110 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
053e878b 1111 );\r
7fa1376c
JY
1112 } else {\r
1113 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
1114 DEBUG_CODE (\r
1115 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
053e878b 1116 );\r
7fa1376c 1117 }\r
09afd9a4
JW
1118\r
1119 if (HEAP_GUARD_NONSTOP_MODE) {\r
1120 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1121 goto Exit;\r
1122 }\r
7fa1376c 1123 }\r
053e878b 1124\r
427e3573 1125 CpuDeadLoop ();\r
3eb69b08 1126 goto Exit;\r
427e3573
MK
1127 }\r
1128\r
1129 //\r
8bf0380e 1130 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
1131 //\r
1132 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
053e878b
MK
1133 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))\r
1134 {\r
427e3573 1135 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 1136 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 1137 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
1138 DEBUG_CODE (\r
1139 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
053e878b 1140 );\r
427e3573 1141 CpuDeadLoop ();\r
3eb69b08 1142 goto Exit;\r
427e3573 1143 }\r
09afd9a4
JW
1144\r
1145 //\r
1146 // If NULL pointer was just accessed\r
1147 //\r
053e878b
MK
1148 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&\r
1149 (PFAddress < EFI_PAGE_SIZE))\r
1150 {\r
09afd9a4
JW
1151 DumpCpuContext (InterruptType, SystemContext);\r
1152 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1153 DEBUG_CODE (\r
1154 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
053e878b 1155 );\r
09afd9a4
JW
1156\r
1157 if (NULL_DETECTION_NONSTOP_MODE) {\r
1158 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1159 goto Exit;\r
1160 }\r
1161\r
1162 CpuDeadLoop ();\r
3eb69b08 1163 goto Exit;\r
09afd9a4
JW
1164 }\r
1165\r
09f7c82b 1166 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 1167 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
1168 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1169 DEBUG_CODE (\r
1170 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
053e878b 1171 );\r
d2fc7711 1172 CpuDeadLoop ();\r
3eb69b08 1173 goto Exit;\r
d2fc7711 1174 }\r
427e3573
MK
1175 }\r
1176\r
1177 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1178 SmmProfilePFHandler (\r
1179 SystemContext.SystemContextX64->Rip,\r
1180 SystemContext.SystemContextX64->ExceptionData\r
1181 );\r
1182 } else {\r
1183 SmiDefaultPFHandler ();\r
1184 }\r
1185\r
09afd9a4 1186Exit:\r
fe3a75bc 1187 ReleaseSpinLock (mPFLock);\r
427e3573 1188}\r
717fb604
JY
1189\r
1190/**\r
1191 This function sets memory attribute for page table.\r
1192**/\r
1193VOID\r
1194SetPageTableAttributes (\r
1195 VOID\r
1196 )\r
1197{\r
053e878b
MK
1198 UINTN Index2;\r
1199 UINTN Index3;\r
1200 UINTN Index4;\r
1201 UINTN Index5;\r
1202 UINT64 *L1PageTable;\r
1203 UINT64 *L2PageTable;\r
1204 UINT64 *L3PageTable;\r
1205 UINT64 *L4PageTable;\r
1206 UINT64 *L5PageTable;\r
1207 UINTN PageTableBase;\r
1208 BOOLEAN IsSplitted;\r
1209 BOOLEAN PageTableSplitted;\r
1210 BOOLEAN CetEnabled;\r
1211 BOOLEAN Enable5LevelPaging;\r
4eee0cc7 1212\r
827330cc 1213 //\r
09f7c82b
RN
1214 // Don't mark page table memory as read-only if\r
1215 // - no restriction on access to non-SMRAM memory; or\r
1015fb3c 1216 // - SMM heap guard feature enabled; or\r
827330cc
JW
1217 // BIT2: SMM page guard enabled\r
1218 // BIT3: SMM pool guard enabled\r
1015fb3c 1219 // - SMM profile feature enabled\r
827330cc 1220 //\r
09f7c82b 1221 if (!mCpuSmmRestrictedMemoryAccess ||\r
1015fb3c 1222 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
053e878b
MK
1223 FeaturePcdGet (PcdCpuSmmProfileEnable))\r
1224 {\r
827330cc 1225 //\r
09f7c82b 1226 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.\r
827330cc 1227 //\r
053e878b
MK
1228 ASSERT (\r
1229 !(mCpuSmmRestrictedMemoryAccess &&\r
1230 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0)\r
1231 );\r
1015fb3c
SZ
1232\r
1233 //\r
09f7c82b 1234 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.\r
1015fb3c 1235 //\r
09f7c82b 1236 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
053e878b 1237 return;\r
717fb604
JY
1238 }\r
1239\r
1240 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
1241\r
1242 //\r
1243 // Disable write protection, because we need mark page table to be write protected.\r
1244 // We need *write* page table memory, to mark itself to be *read only*.\r
1245 //\r
053e878b 1246 CetEnabled = ((AsmReadCr4 () & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
3eb69b08
JY
1247 if (CetEnabled) {\r
1248 //\r
1249 // CET must be disabled if WP is disabled.\r
1250 //\r
053e878b 1251 DisableCet ();\r
3eb69b08 1252 }\r
053e878b
MK
1253\r
1254 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);\r
717fb604
JY
1255\r
1256 do {\r
1257 DEBUG ((DEBUG_INFO, "Start...\n"));\r
1258 PageTableSplitted = FALSE;\r
053e878b 1259 L5PageTable = NULL;\r
404250c8
SW
1260\r
1261 GetPageTable (&PageTableBase, &Enable5LevelPaging);\r
1262\r
4eee0cc7 1263 if (Enable5LevelPaging) {\r
404250c8
SW
1264 L5PageTable = (UINT64 *)PageTableBase;\r
1265 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
7365eb2c 1266 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7 1267 }\r
7365eb2c 1268\r
053e878b 1269 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof (UINT64) : 1); Index5++) {\r
4eee0cc7
RN
1270 if (Enable5LevelPaging) {\r
1271 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1272 if (L4PageTable == NULL) {\r
4e78c7be
RN
1273 continue;\r
1274 }\r
4eee0cc7 1275 } else {\r
404250c8 1276 L4PageTable = (UINT64 *)PageTableBase;\r
4eee0cc7 1277 }\r
053e878b 1278\r
4eee0cc7
RN
1279 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1280 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1281\r
053e878b 1282 for (Index4 = 0; Index4 < SIZE_4KB/sizeof (UINT64); Index4++) {\r
4eee0cc7
RN
1283 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1284 if (L3PageTable == NULL) {\r
717fb604
JY
1285 continue;\r
1286 }\r
1287\r
4eee0cc7 1288 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604
JY
1289 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1290\r
053e878b 1291 for (Index3 = 0; Index3 < SIZE_4KB/sizeof (UINT64); Index3++) {\r
4eee0cc7
RN
1292 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1293 // 1G\r
717fb604
JY
1294 continue;\r
1295 }\r
053e878b 1296\r
4eee0cc7
RN
1297 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1298 if (L2PageTable == NULL) {\r
717fb604
JY
1299 continue;\r
1300 }\r
4eee0cc7
RN
1301\r
1302 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604 1303 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7 1304\r
053e878b 1305 for (Index2 = 0; Index2 < SIZE_4KB/sizeof (UINT64); Index2++) {\r
4eee0cc7
RN
1306 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1307 // 2M\r
1308 continue;\r
1309 }\r
053e878b 1310\r
4eee0cc7
RN
1311 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1312 if (L1PageTable == NULL) {\r
1313 continue;\r
1314 }\r
053e878b 1315\r
4eee0cc7
RN
1316 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1317 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1318 }\r
717fb604
JY
1319 }\r
1320 }\r
1321 }\r
1322 } while (PageTableSplitted);\r
1323\r
1324 //\r
1325 // Enable write protection, after page table updated.\r
1326 //\r
053e878b 1327 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);\r
3eb69b08
JY
1328 if (CetEnabled) {\r
1329 //\r
1330 // re-enable CET.\r
1331 //\r
053e878b 1332 EnableCet ();\r
3eb69b08 1333 }\r
717fb604 1334\r
053e878b 1335 return;\r
717fb604 1336}\r
37f9fea5
VN
1337\r
1338/**\r
1339 This function reads CR2 register when on-demand paging is enabled.\r
1340\r
1341 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1342**/\r
1343VOID\r
1344SaveCr2 (\r
1345 OUT UINTN *Cr2\r
1346 )\r
1347{\r
09f7c82b
RN
1348 if (!mCpuSmmRestrictedMemoryAccess) {\r
1349 //\r
1350 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1351 //\r
37f9fea5
VN
1352 *Cr2 = AsmReadCr2 ();\r
1353 }\r
1354}\r
1355\r
1356/**\r
1357 This function restores CR2 register when on-demand paging is enabled.\r
1358\r
1359 @param[in] Cr2 Value to write into CR2 register.\r
1360**/\r
1361VOID\r
1362RestoreCr2 (\r
1363 IN UINTN Cr2\r
1364 )\r
1365{\r
09f7c82b
RN
1366 if (!mCpuSmmRestrictedMemoryAccess) {\r
1367 //\r
1368 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1369 //\r
37f9fea5
VN
1370 AsmWriteCr2 (Cr2);\r
1371 }\r
1372}\r
79186ddc
RN
1373\r
1374/**\r
1375 Return whether access to non-SMRAM is restricted.\r
1376\r
1377 @retval TRUE Access to non-SMRAM is restricted.\r
1378 @retval FALSE Access to non-SMRAM is not restricted.\r
9c33f16f 1379**/\r
79186ddc
RN
1380BOOLEAN\r
1381IsRestrictedMemoryAccess (\r
1382 VOID\r
1383 )\r
1384{\r
1385 return mCpuSmmRestrictedMemoryAccess;\r
1386}\r