]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
1c75bf3c 4Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
053e878b
MK
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
241f9149 15\r
053e878b 16extern UINTN mSmmShadowStackSize;\r
ef91b073 17\r
053e878b
MK
18LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
19BOOLEAN m1GPageTableSupport = FALSE;\r
20BOOLEAN mCpuSmmRestrictedMemoryAccess;\r
053e878b 21X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;\r
427e3573
MK
22\r
23/**\r
24 Check if 1-GByte pages is supported by processor or not.\r
25\r
26 @retval TRUE 1-GByte pages is supported.\r
27 @retval FALSE 1-GByte pages is not supported.\r
28\r
29**/\r
30BOOLEAN\r
31Is1GPageSupport (\r
32 VOID\r
33 )\r
34{\r
053e878b
MK
35 UINT32 RegEax;\r
36 UINT32 RegEdx;\r
427e3573
MK
37\r
38 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
39 if (RegEax >= 0x80000001) {\r
40 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
41 if ((RegEdx & BIT26) != 0) {\r
42 return TRUE;\r
43 }\r
44 }\r
053e878b 45\r
427e3573
MK
46 return FALSE;\r
47}\r
48\r
4eee0cc7 49/**\r
86ad762f
RN
50 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and\r
51 the max physical address bits is bigger than 48. Because 4-level paging can support\r
52 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging\r
53 with max physical address bits <= 48.\r
4eee0cc7 54\r
86ad762f
RN
55 @retval TRUE 5-level paging enabling is needed.\r
56 @retval FALSE 5-level paging enabling is not needed.\r
4eee0cc7
RN
57**/\r
58BOOLEAN\r
86ad762f 59Is5LevelPagingNeeded (\r
4eee0cc7
RN
60 VOID\r
61 )\r
62{\r
053e878b
MK
63 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
64 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;\r
65 UINT32 MaxExtendedFunctionId;\r
4eee0cc7 66\r
86ad762f
RN
67 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);\r
68 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
69 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
70 } else {\r
71 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
72 }\r
053e878b 73\r
4eee0cc7
RN
74 AsmCpuidEx (\r
75 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
76 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
053e878b
MK
77 NULL,\r
78 NULL,\r
79 &ExtFeatureEcx.Uint32,\r
80 NULL\r
4eee0cc7 81 );\r
86ad762f 82 DEBUG ((\r
053e878b
MK
83 DEBUG_INFO,\r
84 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",\r
85 VirPhyAddressSize.Bits.PhysicalAddressBits,\r
86 ExtFeatureEcx.Bits.FiveLevelPage\r
86ad762f
RN
87 ));\r
88\r
1c75bf3c
RG
89 if ((VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) &&\r
90 (ExtFeatureEcx.Bits.FiveLevelPage == 1))\r
91 {\r
86ad762f
RN
92 return TRUE;\r
93 } else {\r
94 return FALSE;\r
95 }\r
4eee0cc7
RN
96}\r
97\r
427e3573
MK
98/**\r
99 Set sub-entries number in entry.\r
100\r
101 @param[in, out] Entry Pointer to entry\r
102 @param[in] SubEntryNum Sub-entries number based on 0:\r
103 0 means there is 1 sub-entry under this entry\r
104 0x1ff means there is 512 sub-entries under this entry\r
105\r
106**/\r
107VOID\r
108SetSubEntriesNum (\r
053e878b
MK
109 IN OUT UINT64 *Entry,\r
110 IN UINT64 SubEntryNum\r
427e3573
MK
111 )\r
112{\r
113 //\r
114 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
115 //\r
116 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
117}\r
118\r
119/**\r
120 Return sub-entries number in entry.\r
121\r
122 @param[in] Entry Pointer to entry\r
123\r
124 @return Sub-entries number based on 0:\r
125 0 means there is 1 sub-entry under this entry\r
126 0x1ff means there is 512 sub-entries under this entry\r
127**/\r
128UINT64\r
129GetSubEntriesNum (\r
053e878b 130 IN UINT64 *Entry\r
427e3573
MK
131 )\r
132{\r
133 //\r
134 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
135 //\r
136 return BitFieldRead64 (*Entry, 52, 60);\r
137}\r
138\r
717fb604
JY
139/**\r
140 Calculate the maximum support address.\r
141\r
142 @return the maximum support address.\r
143**/\r
144UINT8\r
145CalculateMaximumSupportAddress (\r
146 VOID\r
147 )\r
148{\r
053e878b
MK
149 UINT32 RegEax;\r
150 UINT8 PhysicalAddressBits;\r
151 VOID *Hob;\r
717fb604
JY
152\r
153 //\r
154 // Get physical address bits supported.\r
155 //\r
156 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
157 if (Hob != NULL) {\r
053e878b 158 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
717fb604
JY
159 } else {\r
160 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
161 if (RegEax >= 0x80000008) {\r
162 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
053e878b 163 PhysicalAddressBits = (UINT8)RegEax;\r
717fb604
JY
164 } else {\r
165 PhysicalAddressBits = 36;\r
166 }\r
167 }\r
053e878b 168\r
717fb604
JY
169 return PhysicalAddressBits;\r
170}\r
171\r
172/**\r
173 Set static page table.\r
174\r
c3dcbce2
KQ
175 @param[in] PageTable Address of page table.\r
176 @param[in] PhysicalAddressBits The maximum physical address bits supported.\r
717fb604
JY
177**/\r
178VOID\r
179SetStaticPageTable (\r
053e878b
MK
180 IN UINTN PageTable,\r
181 IN UINT8 PhysicalAddressBits\r
717fb604
JY
182 )\r
183{\r
053e878b
MK
184 UINT64 PageAddress;\r
185 UINTN NumberOfPml5EntriesNeeded;\r
186 UINTN NumberOfPml4EntriesNeeded;\r
187 UINTN NumberOfPdpEntriesNeeded;\r
188 UINTN IndexOfPml5Entries;\r
189 UINTN IndexOfPml4Entries;\r
190 UINTN IndexOfPdpEntries;\r
191 UINTN IndexOfPageDirectoryEntries;\r
192 UINT64 *PageMapLevel5Entry;\r
193 UINT64 *PageMapLevel4Entry;\r
194 UINT64 *PageMap;\r
195 UINT64 *PageDirectoryPointerEntry;\r
196 UINT64 *PageDirectory1GEntry;\r
197 UINT64 *PageDirectoryEntry;\r
717fb604 198\r
4eee0cc7
RN
199 //\r
200 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
201 // when 5-Level Paging is disabled.\r
202 //\r
c3dcbce2 203 ASSERT (PhysicalAddressBits <= 52);\r
053e878b 204 if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {\r
c3dcbce2 205 PhysicalAddressBits = 48;\r
4eee0cc7
RN
206 }\r
207\r
208 NumberOfPml5EntriesNeeded = 1;\r
c3dcbce2 209 if (PhysicalAddressBits > 48) {\r
053e878b
MK
210 NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);\r
211 PhysicalAddressBits = 48;\r
4eee0cc7
RN
212 }\r
213\r
214 NumberOfPml4EntriesNeeded = 1;\r
c3dcbce2 215 if (PhysicalAddressBits > 39) {\r
053e878b
MK
216 NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);\r
217 PhysicalAddressBits = 39;\r
717fb604
JY
218 }\r
219\r
4eee0cc7 220 NumberOfPdpEntriesNeeded = 1;\r
c3dcbce2 221 ASSERT (PhysicalAddressBits > 30);\r
053e878b 222 NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);\r
4eee0cc7 223\r
717fb604
JY
224 //\r
225 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
226 //\r
053e878b 227 PageMap = (VOID *)PageTable;\r
717fb604
JY
228\r
229 PageMapLevel4Entry = PageMap;\r
4eee0cc7 230 PageMapLevel5Entry = NULL;\r
86ad762f 231 if (m5LevelPagingNeeded) {\r
7365eb2c 232 //\r
4eee0cc7 233 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
7365eb2c 234 //\r
4eee0cc7
RN
235 PageMapLevel5Entry = PageMap;\r
236 }\r
053e878b
MK
237\r
238 PageAddress = 0;\r
7365eb2c 239\r
4eee0cc7 240 for ( IndexOfPml5Entries = 0\r
053e878b
MK
241 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
242 ; IndexOfPml5Entries++, PageMapLevel5Entry++)\r
243 {\r
4eee0cc7
RN
244 //\r
245 // Each PML5 entry points to a page of PML4 entires.\r
246 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
247 // When 5-Level Paging is disabled, below allocation happens only once.\r
248 //\r
86ad762f 249 if (m5LevelPagingNeeded) {\r
053e878b 250 PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
251 if (PageMapLevel4Entry == NULL) {\r
252 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
053e878b
MK
253 ASSERT (PageMapLevel4Entry != NULL);\r
254 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));\r
4eee0cc7
RN
255\r
256 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
257 }\r
4e78c7be 258 }\r
717fb604 259\r
4eee0cc7
RN
260 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
261 //\r
262 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
263 //\r
053e878b 264 PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
265 if (PageDirectoryPointerEntry == NULL) {\r
266 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
053e878b
MK
267 ASSERT (PageDirectoryPointerEntry != NULL);\r
268 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));\r
4eee0cc7
RN
269\r
270 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 271 }\r
7365eb2c 272\r
4eee0cc7
RN
273 if (m1GPageTableSupport) {\r
274 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
275 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
053e878b 276 if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {\r
4eee0cc7
RN
277 //\r
278 // Skip the < 4G entries\r
279 //\r
280 continue;\r
281 }\r
053e878b 282\r
4e78c7be 283 //\r
4eee0cc7 284 // Fill in the Page Directory entries\r
4e78c7be 285 //\r
4eee0cc7 286 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 287 }\r
4eee0cc7
RN
288 } else {\r
289 PageAddress = BASE_4GB;\r
290 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
053e878b 291 if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {\r
4eee0cc7
RN
292 //\r
293 // Skip the < 4G entries\r
294 //\r
295 continue;\r
296 }\r
053e878b 297\r
4e78c7be 298 //\r
4eee0cc7
RN
299 // Each Directory Pointer entries points to a page of Page Directory entires.\r
300 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
4e78c7be 301 //\r
053e878b 302 PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
303 if (PageDirectoryEntry == NULL) {\r
304 PageDirectoryEntry = AllocatePageTableMemory (1);\r
053e878b
MK
305 ASSERT (PageDirectoryEntry != NULL);\r
306 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));\r
4eee0cc7
RN
307\r
308 //\r
309 // Fill in a Page Directory Pointer Entries\r
310 //\r
311 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
312 }\r
313\r
314 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
315 //\r
316 // Fill in the Page Directory entries\r
317 //\r
318 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
319 }\r
717fb604
JY
320 }\r
321 }\r
322 }\r
323 }\r
324}\r
325\r
427e3573
MK
326/**\r
327 Create PageTable for SMM use.\r
328\r
329 @return The address of PML4 (to set CR3).\r
330\r
331**/\r
332UINT32\r
333SmmInitPageTable (\r
334 VOID\r
335 )\r
336{\r
053e878b
MK
337 EFI_PHYSICAL_ADDRESS Pages;\r
338 UINT64 *PTEntry;\r
339 LIST_ENTRY *FreePage;\r
340 UINTN Index;\r
341 UINTN PageFaultHandlerHookAddress;\r
342 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
343 EFI_STATUS Status;\r
344 UINT64 *Pml4Entry;\r
345 UINT64 *Pml5Entry;\r
427e3573
MK
346\r
347 //\r
348 // Initialize spin lock\r
349 //\r
fe3a75bc 350 InitializeSpinLock (mPFLock);\r
427e3573 351\r
09f7c82b
RN
352 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);\r
353 m1GPageTableSupport = Is1GPageSupport ();\r
86ad762f 354 m5LevelPagingNeeded = Is5LevelPagingNeeded ();\r
09f7c82b 355 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
86ad762f
RN
356 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);\r
357 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));\r
09f7c82b
RN
358 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
359 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));\r
360 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
427e3573
MK
361 //\r
362 // Generate PAE page table for the first 4GB memory space\r
363 //\r
717fb604 364 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
365\r
366 //\r
367 // Set IA32_PG_PMNT bit to mask this entry\r
368 //\r
053e878b 369 PTEntry = (UINT64 *)(UINTN)Pages;\r
427e3573
MK
370 for (Index = 0; Index < 4; Index++) {\r
371 PTEntry[Index] |= IA32_PG_PMNT;\r
372 }\r
373\r
374 //\r
375 // Fill Page-Table-Level4 (PML4) entry\r
376 //\r
053e878b 377 Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);\r
4eee0cc7
RN
378 ASSERT (Pml4Entry != NULL);\r
379 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
380 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
717fb604 381\r
427e3573
MK
382 //\r
383 // Set sub-entries number\r
384 //\r
4eee0cc7
RN
385 SetSubEntriesNum (Pml4Entry, 3);\r
386 PTEntry = Pml4Entry;\r
387\r
86ad762f 388 if (m5LevelPagingNeeded) {\r
4eee0cc7
RN
389 //\r
390 // Fill PML5 entry\r
391 //\r
053e878b 392 Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);\r
aefcf2f7 393 ASSERT (Pml5Entry != NULL);\r
053e878b 394 *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4eee0cc7
RN
395 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
396 //\r
397 // Set sub-entries number\r
398 //\r
399 SetSubEntriesNum (Pml5Entry, 1);\r
400 PTEntry = Pml5Entry;\r
401 }\r
427e3573 402\r
09f7c82b
RN
403 if (mCpuSmmRestrictedMemoryAccess) {\r
404 //\r
405 // When access to non-SMRAM memory is restricted, create page table\r
406 // that covers all memory space.\r
407 //\r
c3dcbce2 408 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);\r
717fb604
JY
409 } else {\r
410 //\r
411 // Add pages to page pool\r
412 //\r
053e878b 413 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
717fb604
JY
414 ASSERT (FreePage != NULL);\r
415 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
416 InsertTailList (&mPagePool, FreePage);\r
417 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
418 }\r
427e3573
MK
419 }\r
420\r
09afd9a4
JW
421 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
422 HEAP_GUARD_NONSTOP_MODE ||\r
053e878b
MK
423 NULL_DETECTION_NONSTOP_MODE)\r
424 {\r
427e3573
MK
425 //\r
426 // Set own Page Fault entry instead of the default one, because SMM Profile\r
427 // feature depends on IRET instruction to do Single Step\r
428 //\r
429 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
053e878b
MK
430 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;\r
431 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
432 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
433 IdtEntry->Bits.Reserved_0 = 0;\r
434 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
435 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
436 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
437 IdtEntry->Bits.Reserved_1 = 0;\r
427e3573
MK
438 } else {\r
439 //\r
440 // Register Smm Page Fault Handler\r
441 //\r
5c88af79
JF
442 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
443 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
444 }\r
445\r
446 //\r
447 // Additional SMM IDT initialization for SMM stack guard\r
448 //\r
449 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
455b0347
S
450 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));\r
451 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);\r
452 }\r
453\r
454 //\r
455 // Additional SMM IDT initialization for SMM CET shadow stack\r
456 //\r
457 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
458 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));\r
459 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);\r
460 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);\r
427e3573
MK
461 }\r
462\r
463 //\r
4eee0cc7 464 // Return the address of PML4/PML5 (to set CR3)\r
427e3573
MK
465 //\r
466 return (UINT32)(UINTN)PTEntry;\r
467}\r
468\r
469/**\r
470 Set access record in entry.\r
471\r
472 @param[in, out] Entry Pointer to entry\r
473 @param[in] Acc Access record value\r
474\r
475**/\r
476VOID\r
477SetAccNum (\r
053e878b
MK
478 IN OUT UINT64 *Entry,\r
479 IN UINT64 Acc\r
427e3573
MK
480 )\r
481{\r
482 //\r
483 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
484 //\r
485 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
486}\r
487\r
488/**\r
489 Return access record in entry.\r
490\r
491 @param[in] Entry Pointer to entry\r
492\r
493 @return Access record value.\r
494\r
495**/\r
496UINT64\r
497GetAccNum (\r
053e878b 498 IN UINT64 *Entry\r
427e3573
MK
499 )\r
500{\r
501 //\r
502 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
503 //\r
504 return BitFieldRead64 (*Entry, 9, 11);\r
505}\r
506\r
507/**\r
508 Return and update the access record in entry.\r
509\r
510 @param[in, out] Entry Pointer to entry\r
511\r
512 @return Access record value.\r
513\r
514**/\r
515UINT64\r
516GetAndUpdateAccNum (\r
053e878b 517 IN OUT UINT64 *Entry\r
427e3573
MK
518 )\r
519{\r
053e878b 520 UINT64 Acc;\r
427e3573
MK
521\r
522 Acc = GetAccNum (Entry);\r
523 if ((*Entry & IA32_PG_A) != 0) {\r
524 //\r
525 // If this entry has been accessed, clear access flag in Entry and update access record\r
526 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
527 //\r
528 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
529 SetAccNum (Entry, 0x7);\r
530 return (0x7 + ACC_MAX_BIT);\r
531 } else {\r
532 if (Acc != 0) {\r
533 //\r
534 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
535 //\r
536 SetAccNum (Entry, Acc - 1);\r
537 }\r
538 }\r
053e878b 539\r
427e3573
MK
540 return Acc;\r
541}\r
542\r
543/**\r
544 Reclaim free pages for PageFault handler.\r
545\r
546 Search the whole entries tree to find the leaf entry that has the smallest\r
547 access record value. Insert the page pointed by this leaf entry into the\r
548 page pool. And check its upper entries if need to be inserted into the page\r
549 pool or not.\r
550\r
551**/\r
552VOID\r
553ReclaimPages (\r
554 VOID\r
555 )\r
556{\r
053e878b
MK
557 UINT64 Pml5Entry;\r
558 UINT64 *Pml5;\r
559 UINT64 *Pml4;\r
560 UINT64 *Pdpt;\r
561 UINT64 *Pdt;\r
562 UINTN Pml5Index;\r
563 UINTN Pml4Index;\r
564 UINTN PdptIndex;\r
565 UINTN PdtIndex;\r
566 UINTN MinPml5;\r
567 UINTN MinPml4;\r
568 UINTN MinPdpt;\r
569 UINTN MinPdt;\r
570 UINT64 MinAcc;\r
571 UINT64 Acc;\r
572 UINT64 SubEntriesNum;\r
573 BOOLEAN PML4EIgnore;\r
574 BOOLEAN PDPTEIgnore;\r
575 UINT64 *ReleasePageAddress;\r
576 IA32_CR4 Cr4;\r
577 BOOLEAN Enable5LevelPaging;\r
578 UINT64 PFAddress;\r
579 UINT64 PFAddressPml5Index;\r
580 UINT64 PFAddressPml4Index;\r
581 UINT64 PFAddressPdptIndex;\r
582 UINT64 PFAddressPdtIndex;\r
583\r
584 Pml4 = NULL;\r
585 Pdpt = NULL;\r
586 Pdt = NULL;\r
587 MinAcc = (UINT64)-1;\r
588 MinPml4 = (UINTN)-1;\r
589 MinPml5 = (UINTN)-1;\r
590 MinPdpt = (UINTN)-1;\r
591 MinPdt = (UINTN)-1;\r
592 Acc = 0;\r
427e3573 593 ReleasePageAddress = 0;\r
053e878b 594 PFAddress = AsmReadCr2 ();\r
4201098e
DN
595 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);\r
596 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);\r
597 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);\r
053e878b 598 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);\r
427e3573 599\r
053e878b
MK
600 Cr4.UintN = AsmReadCr4 ();\r
601 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
602 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);\r
4eee0cc7
RN
603\r
604 if (!Enable5LevelPaging) {\r
605 //\r
606 // Create one fake PML5 entry for 4-Level Paging\r
607 // so that the page table parsing logic only handles 5-Level page structure.\r
608 //\r
053e878b
MK
609 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;\r
610 Pml5 = &Pml5Entry;\r
4eee0cc7
RN
611 }\r
612\r
427e3573
MK
613 //\r
614 // First, find the leaf entry has the smallest access record value\r
615 //\r
c630f69d 616 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
053e878b 617 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {\r
427e3573 618 //\r
4eee0cc7 619 // If the PML5 entry is not present or is masked, skip it\r
427e3573
MK
620 //\r
621 continue;\r
622 }\r
053e878b
MK
623\r
624 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
4eee0cc7 625 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
053e878b 626 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {\r
427e3573 627 //\r
4eee0cc7 628 // If the PML4 entry is not present or is masked, skip it\r
427e3573 629 //\r
4e78c7be
RN
630 continue;\r
631 }\r
053e878b
MK
632\r
633 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
4eee0cc7
RN
634 PML4EIgnore = FALSE;\r
635 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
053e878b 636 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {\r
4eee0cc7
RN
637 //\r
638 // If the PDPT entry is not present or is masked, skip it\r
639 //\r
640 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
427e3573 641 //\r
4eee0cc7 642 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
427e3573 643 //\r
4eee0cc7
RN
644 PML4EIgnore = TRUE;\r
645 }\r
053e878b 646\r
4eee0cc7
RN
647 continue;\r
648 }\r
053e878b 649\r
4eee0cc7
RN
650 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
651 //\r
652 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
653 // we will not check PML4 entry more\r
654 //\r
655 PML4EIgnore = TRUE;\r
053e878b 656 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
4eee0cc7 657 PDPTEIgnore = FALSE;\r
053e878b
MK
658 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {\r
659 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {\r
4eee0cc7
RN
660 //\r
661 // If the PD entry is not present or is masked, skip it\r
662 //\r
663 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
664 //\r
665 // If the PD entry is masked, we will not PDPT entry more\r
666 //\r
667 PDPTEIgnore = TRUE;\r
668 }\r
053e878b 669\r
4eee0cc7
RN
670 continue;\r
671 }\r
053e878b 672\r
4eee0cc7 673 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
427e3573 674 //\r
4eee0cc7
RN
675 // It's not 2 MByte page table entry, it should be PD entry\r
676 // we will find the entry has the smallest access record value\r
427e3573
MK
677 //\r
678 PDPTEIgnore = TRUE;\r
053e878b
MK
679 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||\r
680 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))\r
681 {\r
4201098e
DN
682 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
683 if (Acc < MinAcc) {\r
684 //\r
685 // If the PD entry has the smallest access record value,\r
686 // save the Page address to be released\r
687 //\r
053e878b
MK
688 MinAcc = Acc;\r
689 MinPml5 = Pml5Index;\r
690 MinPml4 = Pml4Index;\r
691 MinPdpt = PdptIndex;\r
692 MinPdt = PdtIndex;\r
4201098e
DN
693 ReleasePageAddress = Pdt + PdtIndex;\r
694 }\r
695 }\r
696 }\r
697 }\r
053e878b 698\r
4201098e
DN
699 if (!PDPTEIgnore) {\r
700 //\r
701 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
702 // it should only has the entries point to 2 MByte Pages\r
703 //\r
053e878b
MK
704 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||\r
705 (Pml5Index != PFAddressPml5Index))\r
706 {\r
4201098e 707 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
4eee0cc7
RN
708 if (Acc < MinAcc) {\r
709 //\r
4201098e 710 // If the PDPT entry has the smallest access record value,\r
4eee0cc7
RN
711 // save the Page address to be released\r
712 //\r
053e878b
MK
713 MinAcc = Acc;\r
714 MinPml5 = Pml5Index;\r
715 MinPml4 = Pml4Index;\r
716 MinPdpt = PdptIndex;\r
717 MinPdt = (UINTN)-1;\r
4201098e 718 ReleasePageAddress = Pdpt + PdptIndex;\r
4eee0cc7 719 }\r
427e3573 720 }\r
427e3573 721 }\r
427e3573 722 }\r
427e3573 723 }\r
053e878b 724\r
4eee0cc7 725 if (!PML4EIgnore) {\r
4e78c7be 726 //\r
4eee0cc7
RN
727 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
728 // it should only has the entries point to 1 GByte Pages\r
4e78c7be 729 //\r
053e878b 730 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {\r
4201098e
DN
731 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
732 if (Acc < MinAcc) {\r
733 //\r
734 // If the PML4 entry has the smallest access record value,\r
735 // save the Page address to be released\r
736 //\r
053e878b
MK
737 MinAcc = Acc;\r
738 MinPml5 = Pml5Index;\r
739 MinPml4 = Pml4Index;\r
740 MinPdpt = (UINTN)-1;\r
741 MinPdt = (UINTN)-1;\r
4201098e
DN
742 ReleasePageAddress = Pml4 + Pml4Index;\r
743 }\r
4eee0cc7 744 }\r
4e78c7be
RN
745 }\r
746 }\r
427e3573 747 }\r
053e878b 748\r
427e3573
MK
749 //\r
750 // Make sure one PML4/PDPT/PD entry is selected\r
751 //\r
752 ASSERT (MinAcc != (UINT64)-1);\r
753\r
754 //\r
755 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
756 //\r
053e878b 757 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
758 *ReleasePageAddress = 0;\r
759\r
760 //\r
761 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
762 // or not\r
763 //\r
764 while (TRUE) {\r
765 if (MinPdt != (UINTN)-1) {\r
766 //\r
767 // If 4 KByte Page Table is released, check the PDPT entry\r
768 //\r
053e878b
MK
769 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);\r
770 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
771 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);\r
772 if ((SubEntriesNum == 0) &&\r
773 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))\r
774 {\r
427e3573
MK
775 //\r
776 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
777 // clear the Page directory entry\r
778 //\r
053e878b 779 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
780 Pdpt[MinPdpt] = 0;\r
781 //\r
782 // Go on checking the PML4 table\r
783 //\r
784 MinPdt = (UINTN)-1;\r
785 continue;\r
786 }\r
053e878b 787\r
427e3573
MK
788 //\r
789 // Update the sub-entries filed in PDPT entry and exit\r
790 //\r
4201098e 791 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
792 break;\r
793 }\r
053e878b 794\r
427e3573
MK
795 if (MinPdpt != (UINTN)-1) {\r
796 //\r
797 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
798 //\r
799 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
053e878b 800 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {\r
427e3573
MK
801 //\r
802 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
803 // clear the Page directory entry\r
804 //\r
053e878b 805 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573 806 Pml4[MinPml4] = 0;\r
053e878b 807 MinPdpt = (UINTN)-1;\r
427e3573
MK
808 continue;\r
809 }\r
053e878b 810\r
427e3573
MK
811 //\r
812 // Update the sub-entries filed in PML4 entry and exit\r
813 //\r
4201098e 814 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
815 break;\r
816 }\r
053e878b 817\r
427e3573
MK
818 //\r
819 // PLM4 table has been released before, exit it\r
820 //\r
821 break;\r
822 }\r
823}\r
824\r
825/**\r
826 Allocate free Page for PageFault handler use.\r
827\r
828 @return Page address.\r
829\r
830**/\r
831UINT64\r
832AllocPage (\r
833 VOID\r
834 )\r
835{\r
053e878b 836 UINT64 RetVal;\r
427e3573
MK
837\r
838 if (IsListEmpty (&mPagePool)) {\r
839 //\r
840 // If page pool is empty, reclaim the used pages and insert one into page pool\r
841 //\r
842 ReclaimPages ();\r
843 }\r
844\r
845 //\r
846 // Get one free page and remove it from page pool\r
847 //\r
848 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
849 RemoveEntryList (mPagePool.ForwardLink);\r
850 //\r
851 // Clean this page and return\r
852 //\r
053e878b 853 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);\r
427e3573
MK
854 return RetVal;\r
855}\r
856\r
857/**\r
858 Page Fault handler for SMM use.\r
859\r
860**/\r
861VOID\r
862SmiDefaultPFHandler (\r
863 VOID\r
864 )\r
865{\r
053e878b
MK
866 UINT64 *PageTable;\r
867 UINT64 *PageTableTop;\r
868 UINT64 PFAddress;\r
869 UINTN StartBit;\r
870 UINTN EndBit;\r
871 UINT64 PTIndex;\r
872 UINTN Index;\r
873 SMM_PAGE_SIZE_TYPE PageSize;\r
874 UINTN NumOfPages;\r
875 UINTN PageAttribute;\r
876 EFI_STATUS Status;\r
877 UINT64 *UpperEntry;\r
878 BOOLEAN Enable5LevelPaging;\r
879 IA32_CR4 Cr4;\r
427e3573
MK
880\r
881 //\r
882 // Set default SMM page attribute\r
883 //\r
053e878b
MK
884 PageSize = SmmPageSize2M;\r
885 NumOfPages = 1;\r
427e3573
MK
886 PageAttribute = 0;\r
887\r
053e878b
MK
888 EndBit = 0;\r
889 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);\r
890 PFAddress = AsmReadCr2 ();\r
427e3573 891\r
053e878b
MK
892 Cr4.UintN = AsmReadCr4 ();\r
893 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);\r
4eee0cc7 894\r
427e3573
MK
895 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
896 //\r
897 // If platform not support page table attribute, set default SMM page attribute\r
898 //\r
899 if (Status != EFI_SUCCESS) {\r
053e878b
MK
900 PageSize = SmmPageSize2M;\r
901 NumOfPages = 1;\r
427e3573
MK
902 PageAttribute = 0;\r
903 }\r
053e878b 904\r
427e3573
MK
905 if (PageSize >= MaxSmmPageSizeType) {\r
906 PageSize = SmmPageSize2M;\r
907 }\r
053e878b 908\r
427e3573
MK
909 if (NumOfPages > 512) {\r
910 NumOfPages = 512;\r
911 }\r
912\r
913 switch (PageSize) {\r
053e878b
MK
914 case SmmPageSize4K:\r
915 //\r
916 // BIT12 to BIT20 is Page Table index\r
917 //\r
918 EndBit = 12;\r
919 break;\r
920 case SmmPageSize2M:\r
921 //\r
922 // BIT21 to BIT29 is Page Directory index\r
923 //\r
924 EndBit = 21;\r
925 PageAttribute |= (UINTN)IA32_PG_PS;\r
926 break;\r
927 case SmmPageSize1G:\r
928 if (!m1GPageTableSupport) {\r
929 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
930 ASSERT (FALSE);\r
931 }\r
932\r
933 //\r
934 // BIT30 to BIT38 is Page Directory Pointer Table index\r
935 //\r
936 EndBit = 30;\r
937 PageAttribute |= (UINTN)IA32_PG_PS;\r
938 break;\r
939 default:\r
427e3573 940 ASSERT (FALSE);\r
427e3573
MK
941 }\r
942\r
943 //\r
944 // If execute-disable is enabled, set NX bit\r
945 //\r
946 if (mXdEnabled) {\r
947 PageAttribute |= IA32_PG_NX;\r
948 }\r
949\r
950 for (Index = 0; Index < NumOfPages; Index++) {\r
4eee0cc7 951 PageTable = PageTableTop;\r
427e3573 952 UpperEntry = NULL;\r
4eee0cc7 953 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
427e3573
MK
954 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
955 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
956 //\r
957 // If the entry is not present, allocate one page from page pool for it\r
958 //\r
241f9149 959 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
960 } else {\r
961 //\r
962 // Save the upper entry address\r
963 //\r
964 UpperEntry = PageTable + PTIndex;\r
965 }\r
053e878b 966\r
427e3573
MK
967 //\r
968 // BIT9 to BIT11 of entry is used to save access record,\r
969 // initialize value is 7\r
970 //\r
971 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
972 SetAccNum (PageTable + PTIndex, 7);\r
053e878b 973 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
974 }\r
975\r
976 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
977 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
978 //\r
979 // Check if the entry has already existed, this issue may occur when the different\r
980 // size page entries created under the same entry\r
981 //\r
717fb604
JY
982 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
983 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
984 ASSERT (FALSE);\r
985 }\r
053e878b 986\r
427e3573
MK
987 //\r
988 // Fill the new entry\r
989 //\r
241f9149 990 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 991 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573 992 if (UpperEntry != NULL) {\r
4201098e 993 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);\r
427e3573 994 }\r
053e878b 995\r
427e3573
MK
996 //\r
997 // Get the next page address if we need to create more page tables\r
998 //\r
999 PFAddress += (1ull << EndBit);\r
1000 }\r
1001}\r
1002\r
1003/**\r
1004 ThePage Fault handler wrapper for SMM use.\r
1005\r
1006 @param InterruptType Defines the type of interrupt or exception that\r
1007 occurred on the processor.This parameter is processor architecture specific.\r
1008 @param SystemContext A pointer to the processor context when\r
1009 the interrupt occurred on the processor.\r
1010**/\r
1011VOID\r
1012EFIAPI\r
1013SmiPFHandler (\r
053e878b
MK
1014 IN EFI_EXCEPTION_TYPE InterruptType,\r
1015 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
1016 )\r
1017{\r
053e878b
MK
1018 UINTN PFAddress;\r
1019 UINTN GuardPageAddress;\r
1020 UINTN ShadowStackGuardPageAddress;\r
1021 UINTN CpuIndex;\r
427e3573
MK
1022\r
1023 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
1024\r
fe3a75bc 1025 AcquireSpinLock (mPFLock);\r
427e3573
MK
1026\r
1027 PFAddress = AsmReadCr2 ();\r
1028\r
09f7c82b 1029 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 1030 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
1031 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
1032 CpuDeadLoop ();\r
3eb69b08 1033 goto Exit;\r
717fb604
JY
1034 }\r
1035\r
427e3573 1036 //\r
efa7f4df 1037 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,\r
7fa1376c 1038 // or SMM page protection violation.\r
427e3573 1039 //\r
7fa1376c 1040 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
053e878b
MK
1041 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))\r
1042 {\r
b8caae19 1043 DumpCpuContext (InterruptType, SystemContext);\r
053e878b
MK
1044 CpuIndex = GetCpuIndex ();\r
1045 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
efa7f4df 1046 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
7fa1376c
JY
1047 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
1048 (PFAddress >= GuardPageAddress) &&\r
053e878b
MK
1049 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))\r
1050 {\r
7fa1376c 1051 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
efa7f4df 1052 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
053e878b
MK
1053 (mSmmShadowStackSize > 0) &&\r
1054 (PFAddress >= ShadowStackGuardPageAddress) &&\r
1055 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))\r
1056 {\r
efa7f4df 1057 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));\r
7fa1376c 1058 } else {\r
7fa1376c
JY
1059 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
1060 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
1061 DEBUG_CODE (\r
1062 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
053e878b 1063 );\r
7fa1376c
JY
1064 } else {\r
1065 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
1066 DEBUG_CODE (\r
1067 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
053e878b 1068 );\r
7fa1376c 1069 }\r
09afd9a4
JW
1070\r
1071 if (HEAP_GUARD_NONSTOP_MODE) {\r
1072 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1073 goto Exit;\r
1074 }\r
7fa1376c 1075 }\r
053e878b 1076\r
427e3573 1077 CpuDeadLoop ();\r
3eb69b08 1078 goto Exit;\r
427e3573
MK
1079 }\r
1080\r
1081 //\r
8bf0380e 1082 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
1083 //\r
1084 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
053e878b
MK
1085 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))\r
1086 {\r
427e3573 1087 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 1088 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 1089 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
1090 DEBUG_CODE (\r
1091 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
053e878b 1092 );\r
427e3573 1093 CpuDeadLoop ();\r
3eb69b08 1094 goto Exit;\r
427e3573 1095 }\r
09afd9a4
JW
1096\r
1097 //\r
1098 // If NULL pointer was just accessed\r
1099 //\r
053e878b
MK
1100 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&\r
1101 (PFAddress < EFI_PAGE_SIZE))\r
1102 {\r
09afd9a4
JW
1103 DumpCpuContext (InterruptType, SystemContext);\r
1104 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1105 DEBUG_CODE (\r
1106 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
053e878b 1107 );\r
09afd9a4
JW
1108\r
1109 if (NULL_DETECTION_NONSTOP_MODE) {\r
1110 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1111 goto Exit;\r
1112 }\r
1113\r
1114 CpuDeadLoop ();\r
3eb69b08 1115 goto Exit;\r
09afd9a4
JW
1116 }\r
1117\r
09f7c82b 1118 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 1119 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
1120 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1121 DEBUG_CODE (\r
1122 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
053e878b 1123 );\r
d2fc7711 1124 CpuDeadLoop ();\r
3eb69b08 1125 goto Exit;\r
d2fc7711 1126 }\r
427e3573
MK
1127 }\r
1128\r
1129 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1130 SmmProfilePFHandler (\r
1131 SystemContext.SystemContextX64->Rip,\r
1132 SystemContext.SystemContextX64->ExceptionData\r
1133 );\r
1134 } else {\r
1135 SmiDefaultPFHandler ();\r
1136 }\r
1137\r
09afd9a4 1138Exit:\r
fe3a75bc 1139 ReleaseSpinLock (mPFLock);\r
427e3573 1140}\r
717fb604 1141\r
37f9fea5
VN
1142/**\r
1143 This function reads CR2 register when on-demand paging is enabled.\r
1144\r
1145 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1146**/\r
1147VOID\r
1148SaveCr2 (\r
1149 OUT UINTN *Cr2\r
1150 )\r
1151{\r
09f7c82b
RN
1152 if (!mCpuSmmRestrictedMemoryAccess) {\r
1153 //\r
1154 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1155 //\r
37f9fea5
VN
1156 *Cr2 = AsmReadCr2 ();\r
1157 }\r
1158}\r
1159\r
1160/**\r
1161 This function restores CR2 register when on-demand paging is enabled.\r
1162\r
1163 @param[in] Cr2 Value to write into CR2 register.\r
1164**/\r
1165VOID\r
1166RestoreCr2 (\r
1167 IN UINTN Cr2\r
1168 )\r
1169{\r
09f7c82b
RN
1170 if (!mCpuSmmRestrictedMemoryAccess) {\r
1171 //\r
1172 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1173 //\r
37f9fea5
VN
1174 AsmWriteCr2 (Cr2);\r
1175 }\r
1176}\r
79186ddc
RN
1177\r
1178/**\r
1179 Return whether access to non-SMRAM is restricted.\r
1180\r
1181 @retval TRUE Access to non-SMRAM is restricted.\r
1182 @retval FALSE Access to non-SMRAM is not restricted.\r
9c33f16f 1183**/\r
79186ddc
RN
1184BOOLEAN\r
1185IsRestrictedMemoryAccess (\r
1186 VOID\r
1187 )\r
1188{\r
1189 return mCpuSmmRestrictedMemoryAccess;\r
1190}\r