]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/MpInitLib: Consume MicrocodeLib to remove duplicated code
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
427e3573
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13#define PAGE_TABLE_PAGES 8\r
14#define ACC_MAX_BIT BIT3\r
241f9149 15\r
ef91b073
SW
16extern UINTN mSmmShadowStackSize;\r
17\r
427e3573 18LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573 19BOOLEAN m1GPageTableSupport = FALSE;\r
09f7c82b 20BOOLEAN mCpuSmmRestrictedMemoryAccess;\r
86ad762f
RN
21BOOLEAN m5LevelPagingNeeded;\r
22X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;\r
427e3573 23\r
3eb69b08
JY
24/**\r
25 Disable CET.\r
26**/\r
27VOID\r
28EFIAPI\r
29DisableCet (\r
30 VOID\r
31 );\r
32\r
33/**\r
34 Enable CET.\r
35**/\r
36VOID\r
37EFIAPI\r
38EnableCet (\r
39 VOID\r
40 );\r
41\r
427e3573
MK
42/**\r
43 Check if 1-GByte pages is supported by processor or not.\r
44\r
45 @retval TRUE 1-GByte pages is supported.\r
46 @retval FALSE 1-GByte pages is not supported.\r
47\r
48**/\r
49BOOLEAN\r
50Is1GPageSupport (\r
51 VOID\r
52 )\r
53{\r
54 UINT32 RegEax;\r
55 UINT32 RegEdx;\r
56\r
57 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
58 if (RegEax >= 0x80000001) {\r
59 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
60 if ((RegEdx & BIT26) != 0) {\r
61 return TRUE;\r
62 }\r
63 }\r
64 return FALSE;\r
65}\r
66\r
4eee0cc7 67/**\r
86ad762f
RN
68 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and\r
69 the max physical address bits is bigger than 48. Because 4-level paging can support\r
70 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging\r
71 with max physical address bits <= 48.\r
4eee0cc7 72\r
86ad762f
RN
73 @retval TRUE 5-level paging enabling is needed.\r
74 @retval FALSE 5-level paging enabling is not needed.\r
4eee0cc7
RN
75**/\r
76BOOLEAN\r
86ad762f 77Is5LevelPagingNeeded (\r
4eee0cc7
RN
78 VOID\r
79 )\r
80{\r
86ad762f
RN
81 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
82 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;\r
83 UINT32 MaxExtendedFunctionId;\r
4eee0cc7 84\r
86ad762f
RN
85 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);\r
86 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
87 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
88 } else {\r
89 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
90 }\r
4eee0cc7
RN
91 AsmCpuidEx (\r
92 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
93 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
86ad762f 94 NULL, NULL, &ExtFeatureEcx.Uint32, NULL\r
4eee0cc7 95 );\r
86ad762f
RN
96 DEBUG ((\r
97 DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",\r
98 VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage\r
99 ));\r
100\r
101 if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {\r
102 ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);\r
103 return TRUE;\r
104 } else {\r
105 return FALSE;\r
106 }\r
4eee0cc7
RN
107}\r
108\r
404250c8
SW
109/**\r
110 Get page table base address and the depth of the page table.\r
111\r
112 @param[out] Base Page table base address.\r
113 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.\r
114**/\r
115VOID\r
116GetPageTable (\r
117 OUT UINTN *Base,\r
118 OUT BOOLEAN *FiveLevels OPTIONAL\r
119 )\r
120{\r
121 IA32_CR4 Cr4;\r
122\r
123 if (mInternalCr3 == 0) {\r
124 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;\r
125 if (FiveLevels != NULL) {\r
126 Cr4.UintN = AsmReadCr4 ();\r
127 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
128 }\r
129 return;\r
130 }\r
131\r
132 *Base = mInternalCr3;\r
133 if (FiveLevels != NULL) {\r
134 *FiveLevels = m5LevelPagingNeeded;\r
135 }\r
136}\r
137\r
427e3573
MK
138/**\r
139 Set sub-entries number in entry.\r
140\r
141 @param[in, out] Entry Pointer to entry\r
142 @param[in] SubEntryNum Sub-entries number based on 0:\r
143 0 means there is 1 sub-entry under this entry\r
144 0x1ff means there is 512 sub-entries under this entry\r
145\r
146**/\r
147VOID\r
148SetSubEntriesNum (\r
149 IN OUT UINT64 *Entry,\r
150 IN UINT64 SubEntryNum\r
151 )\r
152{\r
153 //\r
154 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
155 //\r
156 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
157}\r
158\r
159/**\r
160 Return sub-entries number in entry.\r
161\r
162 @param[in] Entry Pointer to entry\r
163\r
164 @return Sub-entries number based on 0:\r
165 0 means there is 1 sub-entry under this entry\r
166 0x1ff means there is 512 sub-entries under this entry\r
167**/\r
168UINT64\r
169GetSubEntriesNum (\r
170 IN UINT64 *Entry\r
171 )\r
172{\r
173 //\r
174 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
175 //\r
176 return BitFieldRead64 (*Entry, 52, 60);\r
177}\r
178\r
717fb604
JY
179/**\r
180 Calculate the maximum support address.\r
181\r
182 @return the maximum support address.\r
183**/\r
184UINT8\r
185CalculateMaximumSupportAddress (\r
186 VOID\r
187 )\r
188{\r
189 UINT32 RegEax;\r
190 UINT8 PhysicalAddressBits;\r
191 VOID *Hob;\r
192\r
193 //\r
194 // Get physical address bits supported.\r
195 //\r
196 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
197 if (Hob != NULL) {\r
198 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
199 } else {\r
200 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
201 if (RegEax >= 0x80000008) {\r
202 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
203 PhysicalAddressBits = (UINT8) RegEax;\r
204 } else {\r
205 PhysicalAddressBits = 36;\r
206 }\r
207 }\r
717fb604
JY
208 return PhysicalAddressBits;\r
209}\r
210\r
211/**\r
212 Set static page table.\r
213\r
214 @param[in] PageTable Address of page table.\r
215**/\r
216VOID\r
217SetStaticPageTable (\r
218 IN UINTN PageTable\r
219 )\r
220{\r
221 UINT64 PageAddress;\r
4eee0cc7 222 UINTN NumberOfPml5EntriesNeeded;\r
717fb604
JY
223 UINTN NumberOfPml4EntriesNeeded;\r
224 UINTN NumberOfPdpEntriesNeeded;\r
4eee0cc7 225 UINTN IndexOfPml5Entries;\r
717fb604
JY
226 UINTN IndexOfPml4Entries;\r
227 UINTN IndexOfPdpEntries;\r
228 UINTN IndexOfPageDirectoryEntries;\r
4eee0cc7 229 UINT64 *PageMapLevel5Entry;\r
717fb604
JY
230 UINT64 *PageMapLevel4Entry;\r
231 UINT64 *PageMap;\r
232 UINT64 *PageDirectoryPointerEntry;\r
233 UINT64 *PageDirectory1GEntry;\r
234 UINT64 *PageDirectoryEntry;\r
235\r
4eee0cc7
RN
236 //\r
237 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
238 // when 5-Level Paging is disabled.\r
239 //\r
240 ASSERT (mPhysicalAddressBits <= 52);\r
86ad762f 241 if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {\r
4eee0cc7
RN
242 mPhysicalAddressBits = 48;\r
243 }\r
244\r
245 NumberOfPml5EntriesNeeded = 1;\r
246 if (mPhysicalAddressBits > 48) {\r
247 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);\r
248 mPhysicalAddressBits = 48;\r
249 }\r
250\r
251 NumberOfPml4EntriesNeeded = 1;\r
252 if (mPhysicalAddressBits > 39) {\r
253 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);\r
254 mPhysicalAddressBits = 39;\r
717fb604
JY
255 }\r
256\r
4eee0cc7
RN
257 NumberOfPdpEntriesNeeded = 1;\r
258 ASSERT (mPhysicalAddressBits > 30);\r
259 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);\r
260\r
717fb604
JY
261 //\r
262 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
263 //\r
264 PageMap = (VOID *) PageTable;\r
265\r
266 PageMapLevel4Entry = PageMap;\r
4eee0cc7 267 PageMapLevel5Entry = NULL;\r
86ad762f 268 if (m5LevelPagingNeeded) {\r
7365eb2c 269 //\r
4eee0cc7 270 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
7365eb2c 271 //\r
4eee0cc7
RN
272 PageMapLevel5Entry = PageMap;\r
273 }\r
274 PageAddress = 0;\r
7365eb2c 275\r
4eee0cc7
RN
276 for ( IndexOfPml5Entries = 0\r
277 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
278 ; IndexOfPml5Entries++, PageMapLevel5Entry++) {\r
279 //\r
280 // Each PML5 entry points to a page of PML4 entires.\r
281 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
282 // When 5-Level Paging is disabled, below allocation happens only once.\r
283 //\r
86ad762f 284 if (m5LevelPagingNeeded) {\r
4eee0cc7
RN
285 PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);\r
286 if (PageMapLevel4Entry == NULL) {\r
287 PageMapLevel4Entry = AllocatePageTableMemory (1);\r
288 ASSERT(PageMapLevel4Entry != NULL);\r
289 ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));\r
290\r
291 *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
292 }\r
4e78c7be 293 }\r
717fb604 294\r
4eee0cc7
RN
295 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
296 //\r
297 // Each PML4 entry points to a page of Page Directory Pointer entries.\r
298 //\r
299 PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);\r
300 if (PageDirectoryPointerEntry == NULL) {\r
301 PageDirectoryPointerEntry = AllocatePageTableMemory (1);\r
302 ASSERT(PageDirectoryPointerEntry != NULL);\r
303 ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));\r
304\r
305 *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 306 }\r
7365eb2c 307\r
4eee0cc7
RN
308 if (m1GPageTableSupport) {\r
309 PageDirectory1GEntry = PageDirectoryPointerEntry;\r
310 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
311 if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {\r
312 //\r
313 // Skip the < 4G entries\r
314 //\r
315 continue;\r
316 }\r
4e78c7be 317 //\r
4eee0cc7 318 // Fill in the Page Directory entries\r
4e78c7be 319 //\r
4eee0cc7 320 *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
4e78c7be 321 }\r
4eee0cc7
RN
322 } else {\r
323 PageAddress = BASE_4GB;\r
324 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
325 if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {\r
326 //\r
327 // Skip the < 4G entries\r
328 //\r
329 continue;\r
330 }\r
4e78c7be 331 //\r
4eee0cc7
RN
332 // Each Directory Pointer entries points to a page of Page Directory entires.\r
333 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
4e78c7be 334 //\r
4eee0cc7
RN
335 PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);\r
336 if (PageDirectoryEntry == NULL) {\r
337 PageDirectoryEntry = AllocatePageTableMemory (1);\r
338 ASSERT(PageDirectoryEntry != NULL);\r
339 ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));\r
340\r
341 //\r
342 // Fill in a Page Directory Pointer Entries\r
343 //\r
344 *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
345 }\r
346\r
347 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
348 //\r
349 // Fill in the Page Directory entries\r
350 //\r
351 *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
352 }\r
717fb604
JY
353 }\r
354 }\r
355 }\r
356 }\r
357}\r
358\r
427e3573
MK
359/**\r
360 Create PageTable for SMM use.\r
361\r
362 @return The address of PML4 (to set CR3).\r
363\r
364**/\r
365UINT32\r
366SmmInitPageTable (\r
367 VOID\r
368 )\r
369{\r
370 EFI_PHYSICAL_ADDRESS Pages;\r
371 UINT64 *PTEntry;\r
372 LIST_ENTRY *FreePage;\r
373 UINTN Index;\r
374 UINTN PageFaultHandlerHookAddress;\r
375 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
5c88af79 376 EFI_STATUS Status;\r
4eee0cc7
RN
377 UINT64 *Pml4Entry;\r
378 UINT64 *Pml5Entry;\r
427e3573
MK
379\r
380 //\r
381 // Initialize spin lock\r
382 //\r
fe3a75bc 383 InitializeSpinLock (mPFLock);\r
427e3573 384\r
09f7c82b
RN
385 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);\r
386 m1GPageTableSupport = Is1GPageSupport ();\r
86ad762f 387 m5LevelPagingNeeded = Is5LevelPagingNeeded ();\r
09f7c82b 388 mPhysicalAddressBits = CalculateMaximumSupportAddress ();\r
86ad762f
RN
389 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);\r
390 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));\r
09f7c82b
RN
391 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));\r
392 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));\r
393 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));\r
427e3573
MK
394 //\r
395 // Generate PAE page table for the first 4GB memory space\r
396 //\r
717fb604 397 Pages = Gen4GPageTable (FALSE);\r
427e3573
MK
398\r
399 //\r
400 // Set IA32_PG_PMNT bit to mask this entry\r
401 //\r
402 PTEntry = (UINT64*)(UINTN)Pages;\r
403 for (Index = 0; Index < 4; Index++) {\r
404 PTEntry[Index] |= IA32_PG_PMNT;\r
405 }\r
406\r
407 //\r
408 // Fill Page-Table-Level4 (PML4) entry\r
409 //\r
4eee0cc7
RN
410 Pml4Entry = (UINT64*)AllocatePageTableMemory (1);\r
411 ASSERT (Pml4Entry != NULL);\r
412 *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
413 ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));\r
717fb604 414\r
427e3573
MK
415 //\r
416 // Set sub-entries number\r
417 //\r
4eee0cc7
RN
418 SetSubEntriesNum (Pml4Entry, 3);\r
419 PTEntry = Pml4Entry;\r
420\r
86ad762f 421 if (m5LevelPagingNeeded) {\r
4eee0cc7
RN
422 //\r
423 // Fill PML5 entry\r
424 //\r
425 Pml5Entry = (UINT64*)AllocatePageTableMemory (1);\r
aefcf2f7 426 ASSERT (Pml5Entry != NULL);\r
4eee0cc7
RN
427 *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
428 ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));\r
429 //\r
430 // Set sub-entries number\r
431 //\r
432 SetSubEntriesNum (Pml5Entry, 1);\r
433 PTEntry = Pml5Entry;\r
434 }\r
427e3573 435\r
09f7c82b
RN
436 if (mCpuSmmRestrictedMemoryAccess) {\r
437 //\r
438 // When access to non-SMRAM memory is restricted, create page table\r
439 // that covers all memory space.\r
440 //\r
717fb604
JY
441 SetStaticPageTable ((UINTN)PTEntry);\r
442 } else {\r
443 //\r
444 // Add pages to page pool\r
445 //\r
446 FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);\r
447 ASSERT (FreePage != NULL);\r
448 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {\r
449 InsertTailList (&mPagePool, FreePage);\r
450 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
451 }\r
427e3573
MK
452 }\r
453\r
09afd9a4
JW
454 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||\r
455 HEAP_GUARD_NONSTOP_MODE ||\r
456 NULL_DETECTION_NONSTOP_MODE) {\r
427e3573
MK
457 //\r
458 // Set own Page Fault entry instead of the default one, because SMM Profile\r
459 // feature depends on IRET instruction to do Single Step\r
460 //\r
461 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
462 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
463 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
464 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
465 IdtEntry->Bits.Reserved_0 = 0;\r
466 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
467 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
468 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
469 IdtEntry->Bits.Reserved_1 = 0;\r
470 } else {\r
471 //\r
472 // Register Smm Page Fault Handler\r
473 //\r
5c88af79
JF
474 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
475 ASSERT_EFI_ERROR (Status);\r
427e3573
MK
476 }\r
477\r
478 //\r
479 // Additional SMM IDT initialization for SMM stack guard\r
480 //\r
481 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
482 InitializeIDTSmmStackGuard ();\r
483 }\r
484\r
485 //\r
4eee0cc7 486 // Return the address of PML4/PML5 (to set CR3)\r
427e3573
MK
487 //\r
488 return (UINT32)(UINTN)PTEntry;\r
489}\r
490\r
491/**\r
492 Set access record in entry.\r
493\r
494 @param[in, out] Entry Pointer to entry\r
495 @param[in] Acc Access record value\r
496\r
497**/\r
498VOID\r
499SetAccNum (\r
500 IN OUT UINT64 *Entry,\r
501 IN UINT64 Acc\r
502 )\r
503{\r
504 //\r
505 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
506 //\r
507 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
508}\r
509\r
510/**\r
511 Return access record in entry.\r
512\r
513 @param[in] Entry Pointer to entry\r
514\r
515 @return Access record value.\r
516\r
517**/\r
518UINT64\r
519GetAccNum (\r
520 IN UINT64 *Entry\r
521 )\r
522{\r
523 //\r
524 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
525 //\r
526 return BitFieldRead64 (*Entry, 9, 11);\r
527}\r
528\r
529/**\r
530 Return and update the access record in entry.\r
531\r
532 @param[in, out] Entry Pointer to entry\r
533\r
534 @return Access record value.\r
535\r
536**/\r
537UINT64\r
538GetAndUpdateAccNum (\r
539 IN OUT UINT64 *Entry\r
540 )\r
541{\r
542 UINT64 Acc;\r
543\r
544 Acc = GetAccNum (Entry);\r
545 if ((*Entry & IA32_PG_A) != 0) {\r
546 //\r
547 // If this entry has been accessed, clear access flag in Entry and update access record\r
548 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
549 //\r
550 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
551 SetAccNum (Entry, 0x7);\r
552 return (0x7 + ACC_MAX_BIT);\r
553 } else {\r
554 if (Acc != 0) {\r
555 //\r
556 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
557 //\r
558 SetAccNum (Entry, Acc - 1);\r
559 }\r
560 }\r
561 return Acc;\r
562}\r
563\r
564/**\r
565 Reclaim free pages for PageFault handler.\r
566\r
567 Search the whole entries tree to find the leaf entry that has the smallest\r
568 access record value. Insert the page pointed by this leaf entry into the\r
569 page pool. And check its upper entries if need to be inserted into the page\r
570 pool or not.\r
571\r
572**/\r
573VOID\r
574ReclaimPages (\r
575 VOID\r
576 )\r
577{\r
4eee0cc7
RN
578 UINT64 Pml5Entry;\r
579 UINT64 *Pml5;\r
427e3573
MK
580 UINT64 *Pml4;\r
581 UINT64 *Pdpt;\r
582 UINT64 *Pdt;\r
4eee0cc7 583 UINTN Pml5Index;\r
427e3573
MK
584 UINTN Pml4Index;\r
585 UINTN PdptIndex;\r
586 UINTN PdtIndex;\r
4eee0cc7 587 UINTN MinPml5;\r
427e3573
MK
588 UINTN MinPml4;\r
589 UINTN MinPdpt;\r
590 UINTN MinPdt;\r
591 UINT64 MinAcc;\r
592 UINT64 Acc;\r
593 UINT64 SubEntriesNum;\r
594 BOOLEAN PML4EIgnore;\r
595 BOOLEAN PDPTEIgnore;\r
596 UINT64 *ReleasePageAddress;\r
4eee0cc7
RN
597 IA32_CR4 Cr4;\r
598 BOOLEAN Enable5LevelPaging;\r
4201098e
DN
599 UINT64 PFAddress;\r
600 UINT64 PFAddressPml5Index;\r
601 UINT64 PFAddressPml4Index;\r
602 UINT64 PFAddressPdptIndex;\r
603 UINT64 PFAddressPdtIndex;\r
427e3573
MK
604\r
605 Pml4 = NULL;\r
606 Pdpt = NULL;\r
607 Pdt = NULL;\r
608 MinAcc = (UINT64)-1;\r
609 MinPml4 = (UINTN)-1;\r
4eee0cc7 610 MinPml5 = (UINTN)-1;\r
427e3573
MK
611 MinPdpt = (UINTN)-1;\r
612 MinPdt = (UINTN)-1;\r
613 Acc = 0;\r
614 ReleasePageAddress = 0;\r
4201098e
DN
615 PFAddress = AsmReadCr2 ();\r
616 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);\r
617 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);\r
618 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);\r
619 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);\r
427e3573 620\r
4eee0cc7
RN
621 Cr4.UintN = AsmReadCr4 ();\r
622 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
623 Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
624\r
625 if (!Enable5LevelPaging) {\r
626 //\r
627 // Create one fake PML5 entry for 4-Level Paging\r
628 // so that the page table parsing logic only handles 5-Level page structure.\r
629 //\r
630 Pml5Entry = (UINTN) Pml5 | IA32_PG_P;\r
631 Pml5 = &Pml5Entry;\r
632 }\r
633\r
427e3573
MK
634 //\r
635 // First, find the leaf entry has the smallest access record value\r
636 //\r
c630f69d 637 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {\r
4eee0cc7 638 if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {\r
427e3573 639 //\r
4eee0cc7 640 // If the PML5 entry is not present or is masked, skip it\r
427e3573
MK
641 //\r
642 continue;\r
643 }\r
4eee0cc7
RN
644 Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);\r
645 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
646 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
427e3573 647 //\r
4eee0cc7 648 // If the PML4 entry is not present or is masked, skip it\r
427e3573 649 //\r
4e78c7be
RN
650 continue;\r
651 }\r
4eee0cc7
RN
652 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);\r
653 PML4EIgnore = FALSE;\r
654 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
655 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
656 //\r
657 // If the PDPT entry is not present or is masked, skip it\r
658 //\r
659 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
427e3573 660 //\r
4eee0cc7 661 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
427e3573 662 //\r
4eee0cc7
RN
663 PML4EIgnore = TRUE;\r
664 }\r
665 continue;\r
666 }\r
667 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
668 //\r
669 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
670 // we will not check PML4 entry more\r
671 //\r
672 PML4EIgnore = TRUE;\r
673 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);\r
674 PDPTEIgnore = FALSE;\r
675 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
676 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
677 //\r
678 // If the PD entry is not present or is masked, skip it\r
679 //\r
680 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
681 //\r
682 // If the PD entry is masked, we will not PDPT entry more\r
683 //\r
684 PDPTEIgnore = TRUE;\r
685 }\r
686 continue;\r
687 }\r
688 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
427e3573 689 //\r
4eee0cc7
RN
690 // It's not 2 MByte page table entry, it should be PD entry\r
691 // we will find the entry has the smallest access record value\r
427e3573
MK
692 //\r
693 PDPTEIgnore = TRUE;\r
4201098e
DN
694 if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||\r
695 Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
696 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
697 if (Acc < MinAcc) {\r
698 //\r
699 // If the PD entry has the smallest access record value,\r
700 // save the Page address to be released\r
701 //\r
702 MinAcc = Acc;\r
703 MinPml5 = Pml5Index;\r
704 MinPml4 = Pml4Index;\r
705 MinPdpt = PdptIndex;\r
706 MinPdt = PdtIndex;\r
707 ReleasePageAddress = Pdt + PdtIndex;\r
708 }\r
709 }\r
710 }\r
711 }\r
712 if (!PDPTEIgnore) {\r
713 //\r
714 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
715 // it should only has the entries point to 2 MByte Pages\r
716 //\r
717 if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||\r
718 Pml5Index != PFAddressPml5Index) {\r
719 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
4eee0cc7
RN
720 if (Acc < MinAcc) {\r
721 //\r
4201098e 722 // If the PDPT entry has the smallest access record value,\r
4eee0cc7
RN
723 // save the Page address to be released\r
724 //\r
725 MinAcc = Acc;\r
726 MinPml5 = Pml5Index;\r
727 MinPml4 = Pml4Index;\r
728 MinPdpt = PdptIndex;\r
4201098e
DN
729 MinPdt = (UINTN)-1;\r
730 ReleasePageAddress = Pdpt + PdptIndex;\r
4eee0cc7 731 }\r
427e3573 732 }\r
427e3573 733 }\r
427e3573 734 }\r
427e3573 735 }\r
4eee0cc7 736 if (!PML4EIgnore) {\r
4e78c7be 737 //\r
4eee0cc7
RN
738 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
739 // it should only has the entries point to 1 GByte Pages\r
4e78c7be 740 //\r
4201098e
DN
741 if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {\r
742 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
743 if (Acc < MinAcc) {\r
744 //\r
745 // If the PML4 entry has the smallest access record value,\r
746 // save the Page address to be released\r
747 //\r
748 MinAcc = Acc;\r
749 MinPml5 = Pml5Index;\r
750 MinPml4 = Pml4Index;\r
751 MinPdpt = (UINTN)-1;\r
752 MinPdt = (UINTN)-1;\r
753 ReleasePageAddress = Pml4 + Pml4Index;\r
754 }\r
4eee0cc7 755 }\r
4e78c7be
RN
756 }\r
757 }\r
427e3573
MK
758 }\r
759 //\r
760 // Make sure one PML4/PDPT/PD entry is selected\r
761 //\r
762 ASSERT (MinAcc != (UINT64)-1);\r
763\r
764 //\r
765 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
766 //\r
241f9149 767 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
768 *ReleasePageAddress = 0;\r
769\r
770 //\r
771 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
772 // or not\r
773 //\r
774 while (TRUE) {\r
775 if (MinPdt != (UINTN)-1) {\r
776 //\r
777 // If 4 KByte Page Table is released, check the PDPT entry\r
778 //\r
4eee0cc7 779 Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);\r
241f9149 780 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);\r
427e3573 781 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
4201098e
DN
782 if (SubEntriesNum == 0 &&\r
783 (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
427e3573
MK
784 //\r
785 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
786 // clear the Page directory entry\r
787 //\r
241f9149 788 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
789 Pdpt[MinPdpt] = 0;\r
790 //\r
791 // Go on checking the PML4 table\r
792 //\r
793 MinPdt = (UINTN)-1;\r
794 continue;\r
795 }\r
796 //\r
797 // Update the sub-entries filed in PDPT entry and exit\r
798 //\r
4201098e 799 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
800 break;\r
801 }\r
802 if (MinPdpt != (UINTN)-1) {\r
803 //\r
804 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
805 //\r
806 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
4201098e 807 if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {\r
427e3573
MK
808 //\r
809 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
810 // clear the Page directory entry\r
811 //\r
241f9149 812 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));\r
427e3573
MK
813 Pml4[MinPml4] = 0;\r
814 MinPdpt = (UINTN)-1;\r
815 continue;\r
816 }\r
817 //\r
818 // Update the sub-entries filed in PML4 entry and exit\r
819 //\r
4201098e 820 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);\r
427e3573
MK
821 break;\r
822 }\r
823 //\r
824 // PLM4 table has been released before, exit it\r
825 //\r
826 break;\r
827 }\r
828}\r
829\r
830/**\r
831 Allocate free Page for PageFault handler use.\r
832\r
833 @return Page address.\r
834\r
835**/\r
836UINT64\r
837AllocPage (\r
838 VOID\r
839 )\r
840{\r
841 UINT64 RetVal;\r
842\r
843 if (IsListEmpty (&mPagePool)) {\r
844 //\r
845 // If page pool is empty, reclaim the used pages and insert one into page pool\r
846 //\r
847 ReclaimPages ();\r
848 }\r
849\r
850 //\r
851 // Get one free page and remove it from page pool\r
852 //\r
853 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
854 RemoveEntryList (mPagePool.ForwardLink);\r
855 //\r
856 // Clean this page and return\r
857 //\r
858 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
859 return RetVal;\r
860}\r
861\r
862/**\r
863 Page Fault handler for SMM use.\r
864\r
865**/\r
866VOID\r
867SmiDefaultPFHandler (\r
868 VOID\r
869 )\r
870{\r
871 UINT64 *PageTable;\r
4eee0cc7 872 UINT64 *PageTableTop;\r
427e3573
MK
873 UINT64 PFAddress;\r
874 UINTN StartBit;\r
875 UINTN EndBit;\r
876 UINT64 PTIndex;\r
877 UINTN Index;\r
878 SMM_PAGE_SIZE_TYPE PageSize;\r
879 UINTN NumOfPages;\r
880 UINTN PageAttribute;\r
881 EFI_STATUS Status;\r
882 UINT64 *UpperEntry;\r
4eee0cc7
RN
883 BOOLEAN Enable5LevelPaging;\r
884 IA32_CR4 Cr4;\r
427e3573
MK
885\r
886 //\r
887 // Set default SMM page attribute\r
888 //\r
889 PageSize = SmmPageSize2M;\r
890 NumOfPages = 1;\r
891 PageAttribute = 0;\r
892\r
893 EndBit = 0;\r
4eee0cc7 894 PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
427e3573
MK
895 PFAddress = AsmReadCr2 ();\r
896\r
4eee0cc7
RN
897 Cr4.UintN = AsmReadCr4 ();\r
898 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);\r
899\r
427e3573
MK
900 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
901 //\r
902 // If platform not support page table attribute, set default SMM page attribute\r
903 //\r
904 if (Status != EFI_SUCCESS) {\r
905 PageSize = SmmPageSize2M;\r
906 NumOfPages = 1;\r
907 PageAttribute = 0;\r
908 }\r
909 if (PageSize >= MaxSmmPageSizeType) {\r
910 PageSize = SmmPageSize2M;\r
911 }\r
912 if (NumOfPages > 512) {\r
913 NumOfPages = 512;\r
914 }\r
915\r
916 switch (PageSize) {\r
917 case SmmPageSize4K:\r
918 //\r
919 // BIT12 to BIT20 is Page Table index\r
920 //\r
921 EndBit = 12;\r
922 break;\r
923 case SmmPageSize2M:\r
924 //\r
925 // BIT21 to BIT29 is Page Directory index\r
926 //\r
927 EndBit = 21;\r
928 PageAttribute |= (UINTN)IA32_PG_PS;\r
929 break;\r
930 case SmmPageSize1G:\r
931 if (!m1GPageTableSupport) {\r
717fb604 932 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));\r
427e3573
MK
933 ASSERT (FALSE);\r
934 }\r
935 //\r
936 // BIT30 to BIT38 is Page Directory Pointer Table index\r
937 //\r
938 EndBit = 30;\r
939 PageAttribute |= (UINTN)IA32_PG_PS;\r
940 break;\r
941 default:\r
942 ASSERT (FALSE);\r
943 }\r
944\r
945 //\r
946 // If execute-disable is enabled, set NX bit\r
947 //\r
948 if (mXdEnabled) {\r
949 PageAttribute |= IA32_PG_NX;\r
950 }\r
951\r
952 for (Index = 0; Index < NumOfPages; Index++) {\r
4eee0cc7 953 PageTable = PageTableTop;\r
427e3573 954 UpperEntry = NULL;\r
4eee0cc7 955 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {\r
427e3573
MK
956 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
957 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
958 //\r
959 // If the entry is not present, allocate one page from page pool for it\r
960 //\r
241f9149 961 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
962 } else {\r
963 //\r
964 // Save the upper entry address\r
965 //\r
966 UpperEntry = PageTable + PTIndex;\r
967 }\r
968 //\r
969 // BIT9 to BIT11 of entry is used to save access record,\r
970 // initialize value is 7\r
971 //\r
972 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
973 SetAccNum (PageTable + PTIndex, 7);\r
241f9149 974 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);\r
427e3573
MK
975 }\r
976\r
977 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
978 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
979 //\r
980 // Check if the entry has already existed, this issue may occur when the different\r
981 // size page entries created under the same entry\r
982 //\r
717fb604
JY
983 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
984 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));\r
427e3573
MK
985 ASSERT (FALSE);\r
986 }\r
987 //\r
988 // Fill the new entry\r
989 //\r
241f9149 990 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 991 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573 992 if (UpperEntry != NULL) {\r
4201098e 993 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);\r
427e3573
MK
994 }\r
995 //\r
996 // Get the next page address if we need to create more page tables\r
997 //\r
998 PFAddress += (1ull << EndBit);\r
999 }\r
1000}\r
1001\r
1002/**\r
1003 ThePage Fault handler wrapper for SMM use.\r
1004\r
1005 @param InterruptType Defines the type of interrupt or exception that\r
1006 occurred on the processor.This parameter is processor architecture specific.\r
1007 @param SystemContext A pointer to the processor context when\r
1008 the interrupt occurred on the processor.\r
1009**/\r
1010VOID\r
1011EFIAPI\r
1012SmiPFHandler (\r
b8caae19
JF
1013 IN EFI_EXCEPTION_TYPE InterruptType,\r
1014 IN EFI_SYSTEM_CONTEXT SystemContext\r
427e3573
MK
1015 )\r
1016{\r
1017 UINTN PFAddress;\r
7fa1376c
JY
1018 UINTN GuardPageAddress;\r
1019 UINTN CpuIndex;\r
427e3573
MK
1020\r
1021 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
1022\r
fe3a75bc 1023 AcquireSpinLock (mPFLock);\r
427e3573
MK
1024\r
1025 PFAddress = AsmReadCr2 ();\r
1026\r
09f7c82b 1027 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {\r
b8caae19 1028 DumpCpuContext (InterruptType, SystemContext);\r
717fb604
JY
1029 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));\r
1030 CpuDeadLoop ();\r
3eb69b08 1031 goto Exit;\r
717fb604
JY
1032 }\r
1033\r
427e3573 1034 //\r
7fa1376c
JY
1035 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,\r
1036 // or SMM page protection violation.\r
427e3573 1037 //\r
7fa1376c 1038 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
427e3573 1039 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
b8caae19 1040 DumpCpuContext (InterruptType, SystemContext);\r
7fa1376c 1041 CpuIndex = GetCpuIndex ();\r
ef91b073 1042 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));\r
7fa1376c
JY
1043 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
1044 (PFAddress >= GuardPageAddress) &&\r
1045 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {\r
1046 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));\r
1047 } else {\r
7fa1376c
JY
1048 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
1049 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));\r
1050 DEBUG_CODE (\r
1051 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1052 );\r
1053 } else {\r
1054 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));\r
1055 DEBUG_CODE (\r
1056 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1057 );\r
1058 }\r
09afd9a4
JW
1059\r
1060 if (HEAP_GUARD_NONSTOP_MODE) {\r
1061 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1062 goto Exit;\r
1063 }\r
7fa1376c 1064 }\r
427e3573 1065 CpuDeadLoop ();\r
3eb69b08 1066 goto Exit;\r
427e3573
MK
1067 }\r
1068\r
1069 //\r
8bf0380e 1070 // If a page fault occurs in non-SMRAM range.\r
427e3573
MK
1071 //\r
1072 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
1073 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
1074 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
8bf0380e 1075 DumpCpuContext (InterruptType, SystemContext);\r
717fb604 1076 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
427e3573
MK
1077 DEBUG_CODE (\r
1078 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
1079 );\r
1080 CpuDeadLoop ();\r
3eb69b08 1081 goto Exit;\r
427e3573 1082 }\r
09afd9a4
JW
1083\r
1084 //\r
1085 // If NULL pointer was just accessed\r
1086 //\r
1087 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&\r
1088 (PFAddress < EFI_PAGE_SIZE)) {\r
1089 DumpCpuContext (InterruptType, SystemContext);\r
1090 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));\r
1091 DEBUG_CODE (\r
1092 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1093 );\r
1094\r
1095 if (NULL_DETECTION_NONSTOP_MODE) {\r
1096 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);\r
1097 goto Exit;\r
1098 }\r
1099\r
1100 CpuDeadLoop ();\r
3eb69b08 1101 goto Exit;\r
09afd9a4
JW
1102 }\r
1103\r
09f7c82b 1104 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {\r
8bf0380e 1105 DumpCpuContext (InterruptType, SystemContext);\r
d2fc7711
JY
1106 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));\r
1107 DEBUG_CODE (\r
1108 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);\r
1109 );\r
1110 CpuDeadLoop ();\r
3eb69b08 1111 goto Exit;\r
d2fc7711 1112 }\r
427e3573
MK
1113 }\r
1114\r
1115 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1116 SmmProfilePFHandler (\r
1117 SystemContext.SystemContextX64->Rip,\r
1118 SystemContext.SystemContextX64->ExceptionData\r
1119 );\r
1120 } else {\r
1121 SmiDefaultPFHandler ();\r
1122 }\r
1123\r
09afd9a4 1124Exit:\r
fe3a75bc 1125 ReleaseSpinLock (mPFLock);\r
427e3573 1126}\r
717fb604
JY
1127\r
1128/**\r
1129 This function sets memory attribute for page table.\r
1130**/\r
1131VOID\r
1132SetPageTableAttributes (\r
1133 VOID\r
1134 )\r
1135{\r
1136 UINTN Index2;\r
1137 UINTN Index3;\r
1138 UINTN Index4;\r
4eee0cc7 1139 UINTN Index5;\r
717fb604
JY
1140 UINT64 *L1PageTable;\r
1141 UINT64 *L2PageTable;\r
1142 UINT64 *L3PageTable;\r
1143 UINT64 *L4PageTable;\r
4eee0cc7 1144 UINT64 *L5PageTable;\r
404250c8 1145 UINTN PageTableBase;\r
717fb604
JY
1146 BOOLEAN IsSplitted;\r
1147 BOOLEAN PageTableSplitted;\r
3eb69b08 1148 BOOLEAN CetEnabled;\r
4eee0cc7
RN
1149 BOOLEAN Enable5LevelPaging;\r
1150\r
827330cc 1151 //\r
09f7c82b
RN
1152 // Don't mark page table memory as read-only if\r
1153 // - no restriction on access to non-SMRAM memory; or\r
1015fb3c 1154 // - SMM heap guard feature enabled; or\r
827330cc
JW
1155 // BIT2: SMM page guard enabled\r
1156 // BIT3: SMM pool guard enabled\r
1015fb3c 1157 // - SMM profile feature enabled\r
827330cc 1158 //\r
09f7c82b 1159 if (!mCpuSmmRestrictedMemoryAccess ||\r
1015fb3c
SZ
1160 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||\r
1161 FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
827330cc 1162 //\r
09f7c82b 1163 // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.\r
827330cc 1164 //\r
09f7c82b 1165 ASSERT (!(mCpuSmmRestrictedMemoryAccess &&\r
827330cc 1166 (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));\r
1015fb3c
SZ
1167\r
1168 //\r
09f7c82b 1169 // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.\r
1015fb3c 1170 //\r
09f7c82b 1171 ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));\r
717fb604
JY
1172 return ;\r
1173 }\r
1174\r
1175 DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));\r
1176\r
1177 //\r
1178 // Disable write protection, because we need mark page table to be write protected.\r
1179 // We need *write* page table memory, to mark itself to be *read only*.\r
1180 //\r
3eb69b08
JY
1181 CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;\r
1182 if (CetEnabled) {\r
1183 //\r
1184 // CET must be disabled if WP is disabled.\r
1185 //\r
1186 DisableCet();\r
1187 }\r
717fb604
JY
1188 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
1189\r
1190 do {\r
1191 DEBUG ((DEBUG_INFO, "Start...\n"));\r
1192 PageTableSplitted = FALSE;\r
4eee0cc7 1193 L5PageTable = NULL;\r
404250c8
SW
1194\r
1195 GetPageTable (&PageTableBase, &Enable5LevelPaging);\r
1196\r
4eee0cc7 1197 if (Enable5LevelPaging) {\r
404250c8
SW
1198 L5PageTable = (UINT64 *)PageTableBase;\r
1199 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
7365eb2c 1200 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7 1201 }\r
7365eb2c 1202\r
4eee0cc7
RN
1203 for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {\r
1204 if (Enable5LevelPaging) {\r
1205 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1206 if (L4PageTable == NULL) {\r
4e78c7be
RN
1207 continue;\r
1208 }\r
4eee0cc7 1209 } else {\r
404250c8 1210 L4PageTable = (UINT64 *)PageTableBase;\r
4eee0cc7
RN
1211 }\r
1212 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1213 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1214\r
1215 for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {\r
1216 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1217 if (L3PageTable == NULL) {\r
717fb604
JY
1218 continue;\r
1219 }\r
1220\r
4eee0cc7 1221 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604
JY
1222 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1223\r
4eee0cc7
RN
1224 for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {\r
1225 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {\r
1226 // 1G\r
717fb604
JY
1227 continue;\r
1228 }\r
4eee0cc7
RN
1229 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1230 if (L2PageTable == NULL) {\r
717fb604
JY
1231 continue;\r
1232 }\r
4eee0cc7
RN
1233\r
1234 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
717fb604 1235 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
4eee0cc7
RN
1236\r
1237 for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {\r
1238 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {\r
1239 // 2M\r
1240 continue;\r
1241 }\r
1242 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);\r
1243 if (L1PageTable == NULL) {\r
1244 continue;\r
1245 }\r
1246 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);\r
1247 PageTableSplitted = (PageTableSplitted || IsSplitted);\r
1248 }\r
717fb604
JY
1249 }\r
1250 }\r
1251 }\r
1252 } while (PageTableSplitted);\r
1253\r
1254 //\r
1255 // Enable write protection, after page table updated.\r
1256 //\r
1257 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
3eb69b08
JY
1258 if (CetEnabled) {\r
1259 //\r
1260 // re-enable CET.\r
1261 //\r
1262 EnableCet();\r
1263 }\r
717fb604
JY
1264\r
1265 return ;\r
1266}\r
37f9fea5
VN
1267\r
1268/**\r
1269 This function reads CR2 register when on-demand paging is enabled.\r
1270\r
1271 @param[out] *Cr2 Pointer to variable to hold CR2 register value.\r
1272**/\r
1273VOID\r
1274SaveCr2 (\r
1275 OUT UINTN *Cr2\r
1276 )\r
1277{\r
09f7c82b
RN
1278 if (!mCpuSmmRestrictedMemoryAccess) {\r
1279 //\r
1280 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1281 //\r
37f9fea5
VN
1282 *Cr2 = AsmReadCr2 ();\r
1283 }\r
1284}\r
1285\r
1286/**\r
1287 This function restores CR2 register when on-demand paging is enabled.\r
1288\r
1289 @param[in] Cr2 Value to write into CR2 register.\r
1290**/\r
1291VOID\r
1292RestoreCr2 (\r
1293 IN UINTN Cr2\r
1294 )\r
1295{\r
09f7c82b
RN
1296 if (!mCpuSmmRestrictedMemoryAccess) {\r
1297 //\r
1298 // On-demand paging is enabled when access to non-SMRAM is not restricted.\r
1299 //\r
37f9fea5
VN
1300 AsmWriteCr2 (Cr2);\r
1301 }\r
1302}\r
79186ddc
RN
1303\r
1304/**\r
1305 Return whether access to non-SMRAM is restricted.\r
1306\r
1307 @retval TRUE Access to non-SMRAM is restricted.\r
1308 @retval FALSE Access to non-SMRAM is not restricted.\r
9c33f16f 1309**/\r
79186ddc
RN
1310BOOLEAN\r
1311IsRestrictedMemoryAccess (\r
1312 VOID\r
1313 )\r
1314{\r
1315 return mCpuSmmRestrictedMemoryAccess;\r
1316}\r