]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
d1102dba 2 x64 Virtual Memory Management Services in the form of an IA-32 driver.\r
f3b33289 3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
d1102dba 6 While we make a 1:1 mapping (identity mapping) for all physical pages\r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
a13dfc76 18Copyright (c) 2006 - 2022, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
9d510e61 21SPDX-License-Identifier: BSD-2-Clause-Patent\r
f3b33289 22\r
d1102dba 23**/\r
f3b33289 24\r
b3527ded 25#include <Register/Intel/Cpuid.h>\r
f3b33289 26#include "DxeIpl.h"\r
27#include "VirtualMemory.h"\r
28\r
2ac1730b
JW
29//\r
30// Global variable to keep track current available memory used as page table.\r
31//\r
1436aea4 32PAGE_TABLE_POOL *mPageTablePool = NULL;\r
2ac1730b 33\r
9189ec20 34/**\r
382aeac2 35 Clear legacy memory located at the first 4K-page, if available.\r
9189ec20 36\r
382aeac2
DB
37 This function traverses the whole HOB list to check if memory from 0 to 4095\r
38 exists and has not been allocated, and then clear it if so.\r
9189ec20 39\r
382aeac2 40 @param HobStart The start of HobList passed to DxeCore.\r
9189ec20
JW
41\r
42**/\r
43VOID\r
44ClearFirst4KPage (\r
1436aea4 45 IN VOID *HobStart\r
9189ec20
JW
46 )\r
47{\r
1436aea4
MK
48 EFI_PEI_HOB_POINTERS RscHob;\r
49 EFI_PEI_HOB_POINTERS MemHob;\r
50 BOOLEAN DoClear;\r
9189ec20
JW
51\r
52 RscHob.Raw = HobStart;\r
53 MemHob.Raw = HobStart;\r
1436aea4 54 DoClear = FALSE;\r
9189ec20
JW
55\r
56 //\r
57 // Check if page 0 exists and free\r
58 //\r
1436aea4
MK
59 while ((RscHob.Raw = GetNextHob (\r
60 EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
61 RscHob.Raw\r
62 )) != NULL)\r
63 {\r
64 if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&\r
65 (RscHob.ResourceDescriptor->PhysicalStart == 0))\r
66 {\r
9189ec20
JW
67 DoClear = TRUE;\r
68 //\r
69 // Make sure memory at 0-4095 has not been allocated.\r
70 //\r
1436aea4
MK
71 while ((MemHob.Raw = GetNextHob (\r
72 EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
73 MemHob.Raw\r
74 )) != NULL)\r
75 {\r
9189ec20 76 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
1436aea4
MK
77 < EFI_PAGE_SIZE)\r
78 {\r
9189ec20
JW
79 DoClear = FALSE;\r
80 break;\r
81 }\r
1436aea4 82\r
9189ec20
JW
83 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
84 }\r
1436aea4 85\r
9189ec20
JW
86 break;\r
87 }\r
1436aea4 88\r
9189ec20
JW
89 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
90 }\r
91\r
92 if (DoClear) {\r
93 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
94 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
95 }\r
96\r
97 return;\r
98}\r
99\r
382aeac2
DB
100/**\r
101 Return configure status of NULL pointer detection feature.\r
102\r
103 @return TRUE NULL pointer detection feature is enabled\r
104 @return FALSE NULL pointer detection feature is disabled\r
105\r
106**/\r
9189ec20
JW
107BOOLEAN\r
108IsNullDetectionEnabled (\r
109 VOID\r
110 )\r
111{\r
112 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
113}\r
5997daf7 114\r
52679261
JW
115/**\r
116 The function will check if Execute Disable Bit is available.\r
117\r
118 @retval TRUE Execute Disable Bit is available.\r
119 @retval FALSE Execute Disable Bit is not available.\r
120\r
121**/\r
122BOOLEAN\r
123IsExecuteDisableBitAvailable (\r
124 VOID\r
125 )\r
126{\r
1436aea4
MK
127 UINT32 RegEax;\r
128 UINT32 RegEdx;\r
129 BOOLEAN Available;\r
52679261
JW
130\r
131 Available = FALSE;\r
132 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
133 if (RegEax >= 0x80000001) {\r
134 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
135 if ((RegEdx & BIT20) != 0) {\r
136 //\r
137 // Bit 20: Execute Disable Bit available.\r
138 //\r
139 Available = TRUE;\r
140 }\r
141 }\r
142\r
143 return Available;\r
144}\r
145\r
146/**\r
147 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
148\r
149 @retval TRUE IA32_EFER.NXE should be enabled.\r
150 @retval FALSE IA32_EFER.NXE should not be enabled.\r
151\r
152**/\r
153BOOLEAN\r
154IsEnableNonExecNeeded (\r
155 VOID\r
156 )\r
157{\r
158 if (!IsExecuteDisableBitAvailable ()) {\r
159 return FALSE;\r
160 }\r
161\r
162 //\r
163 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
164 // Features controlled by Following PCDs need this feature to be enabled.\r
165 //\r
166 return (PcdGetBool (PcdSetNxForStack) ||\r
167 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
168 PcdGet32 (PcdImageProtectionPolicy) != 0);\r
169}\r
170\r
5630cdfe
SZ
171/**\r
172 Enable Execute Disable Bit.\r
173\r
174**/\r
175VOID\r
176EnableExecuteDisableBit (\r
177 VOID\r
178 )\r
179{\r
1436aea4 180 UINT64 MsrRegisters;\r
5630cdfe 181\r
bec91042
MX
182 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
183 if ((MsrRegisters & BIT11) == 0) {\r
184 MsrRegisters |= BIT11;\r
185 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
186 }\r
5630cdfe
SZ
187}\r
188\r
50255363
JW
189/**\r
190 The function will check if page table entry should be splitted to smaller\r
191 granularity.\r
192\r
9db7e9fd
JW
193 @param Address Physical memory address.\r
194 @param Size Size of the given physical memory.\r
195 @param StackBase Base address of stack.\r
196 @param StackSize Size of stack.\r
b098f5e9
TL
197 @param GhcbBase Base address of GHCB pages.\r
198 @param GhcbSize Size of GHCB area.\r
9db7e9fd 199\r
50255363
JW
200 @retval TRUE Page table should be split.\r
201 @retval FALSE Page table should not be split.\r
202**/\r
203BOOLEAN\r
204ToSplitPageTable (\r
1436aea4
MK
205 IN EFI_PHYSICAL_ADDRESS Address,\r
206 IN UINTN Size,\r
207 IN EFI_PHYSICAL_ADDRESS StackBase,\r
208 IN UINTN StackSize,\r
209 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
210 IN UINTN GhcbSize\r
50255363
JW
211 )\r
212{\r
1436aea4 213 if (IsNullDetectionEnabled () && (Address == 0)) {\r
50255363
JW
214 return TRUE;\r
215 }\r
216\r
217 if (PcdGetBool (PcdCpuStackGuard)) {\r
1436aea4 218 if ((StackBase >= Address) && (StackBase < (Address + Size))) {\r
50255363
JW
219 return TRUE;\r
220 }\r
221 }\r
222\r
223 if (PcdGetBool (PcdSetNxForStack)) {\r
224 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
225 return TRUE;\r
226 }\r
227 }\r
228\r
b098f5e9
TL
229 if (GhcbBase != 0) {\r
230 if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {\r
231 return TRUE;\r
232 }\r
233 }\r
234\r
50255363
JW
235 return FALSE;\r
236}\r
1436aea4 237\r
2ac1730b
JW
238/**\r
239 Initialize a buffer pool for page table use only.\r
240\r
241 To reduce the potential split operation on page table, the pages reserved for\r
242 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
243 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
244 initialized with number of pages greater than or equal to the given PoolPages.\r
245\r
246 Once the pages in the pool are used up, this method should be called again to\r
247 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
248 happen in practice.\r
249\r
250 @param PoolPages The least page number of the pool to be created.\r
251\r
252 @retval TRUE The pool is initialized successfully.\r
253 @retval FALSE The memory is out of resource.\r
254**/\r
255BOOLEAN\r
256InitializePageTablePool (\r
1436aea4 257 IN UINTN PoolPages\r
2ac1730b
JW
258 )\r
259{\r
1436aea4 260 VOID *Buffer;\r
2ac1730b
JW
261\r
262 //\r
263 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
264 // header.\r
265 //\r
266 PoolPages += 1; // Add one page for header.\r
1436aea4
MK
267 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
268 PAGE_TABLE_POOL_UNIT_PAGES;\r
2ac1730b
JW
269 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
270 if (Buffer == NULL) {\r
271 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
272 return FALSE;\r
273 }\r
274\r
275 //\r
276 // Link all pools into a list for easier track later.\r
277 //\r
278 if (mPageTablePool == NULL) {\r
1436aea4 279 mPageTablePool = Buffer;\r
2ac1730b
JW
280 mPageTablePool->NextPool = mPageTablePool;\r
281 } else {\r
282 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
1436aea4
MK
283 mPageTablePool->NextPool = Buffer;\r
284 mPageTablePool = Buffer;\r
2ac1730b
JW
285 }\r
286\r
287 //\r
288 // Reserve one page for pool header.\r
289 //\r
1436aea4
MK
290 mPageTablePool->FreePages = PoolPages - 1;\r
291 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
2ac1730b
JW
292\r
293 return TRUE;\r
294}\r
295\r
296/**\r
297 This API provides a way to allocate memory for page table.\r
298\r
299 This API can be called more than once to allocate memory for page tables.\r
300\r
301 Allocates the number of 4KB pages and returns a pointer to the allocated\r
302 buffer. The buffer returned is aligned on a 4KB boundary.\r
303\r
304 If Pages is 0, then NULL is returned.\r
305 If there is not enough memory remaining to satisfy the request, then NULL is\r
306 returned.\r
307\r
308 @param Pages The number of 4 KB pages to allocate.\r
309\r
310 @return A pointer to the allocated buffer or NULL if allocation fails.\r
311\r
312**/\r
313VOID *\r
314AllocatePageTableMemory (\r
1436aea4 315 IN UINTN Pages\r
2ac1730b
JW
316 )\r
317{\r
1436aea4 318 VOID *Buffer;\r
2ac1730b
JW
319\r
320 if (Pages == 0) {\r
321 return NULL;\r
322 }\r
323\r
324 //\r
325 // Renew the pool if necessary.\r
326 //\r
1436aea4
MK
327 if ((mPageTablePool == NULL) ||\r
328 (Pages > mPageTablePool->FreePages))\r
329 {\r
2ac1730b
JW
330 if (!InitializePageTablePool (Pages)) {\r
331 return NULL;\r
332 }\r
333 }\r
334\r
335 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
336\r
1436aea4
MK
337 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
338 mPageTablePool->FreePages -= Pages;\r
2ac1730b
JW
339\r
340 return Buffer;\r
341}\r
342\r
5630cdfe
SZ
343/**\r
344 Split 2M page to 4K.\r
345\r
346 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
347 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
348 @param[in] StackBase Stack base address.\r
349 @param[in] StackSize Stack size.\r
b098f5e9
TL
350 @param[in] GhcbBase GHCB page area base address.\r
351 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
352\r
353**/\r
354VOID\r
355Split2MPageTo4K (\r
1436aea4
MK
356 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
357 IN OUT UINT64 *PageEntry2M,\r
358 IN EFI_PHYSICAL_ADDRESS StackBase,\r
359 IN UINTN StackSize,\r
360 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
361 IN UINTN GhcbSize\r
5630cdfe
SZ
362 )\r
363{\r
1436aea4
MK
364 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
365 UINTN IndexOfPageTableEntries;\r
366 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
367 UINT64 AddressEncMask;\r
5997daf7
LD
368\r
369 //\r
370 // Make sure AddressEncMask is contained to smallest supported address field\r
371 //\r
372 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 373\r
2ac1730b 374 PageTableEntry = AllocatePageTableMemory (1);\r
36829e67 375 ASSERT (PageTableEntry != NULL);\r
5997daf7 376\r
5630cdfe
SZ
377 //\r
378 // Fill in 2M page entry.\r
379 //\r
1436aea4 380 *PageEntry2M = (UINT64)(UINTN)PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
381\r
382 PhysicalAddress4K = PhysicalAddress;\r
383 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
384 //\r
385 // Fill in the Page Table entries\r
386 //\r
1436aea4 387 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;\r
b098f5e9
TL
388\r
389 //\r
390 // The GHCB range consists of two pages per CPU, the GHCB and a\r
391 // per-CPU variable page. The GHCB page needs to be mapped as an\r
392 // unencrypted page while the per-CPU variable page needs to be\r
393 // mapped encrypted. These pages alternate in assignment.\r
394 //\r
1436aea4
MK
395 if ( (GhcbBase == 0)\r
396 || (PhysicalAddress4K < GhcbBase)\r
397 || (PhysicalAddress4K >= GhcbBase + GhcbSize)\r
398 || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0))\r
399 {\r
b098f5e9
TL
400 PageTableEntry->Uint64 |= AddressEncMask;\r
401 }\r
1436aea4 402\r
5630cdfe 403 PageTableEntry->Bits.ReadWrite = 1;\r
9189ec20 404\r
1436aea4
MK
405 if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||\r
406 (PcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))\r
407 {\r
9189ec20
JW
408 PageTableEntry->Bits.Present = 0;\r
409 } else {\r
410 PageTableEntry->Bits.Present = 1;\r
411 }\r
412\r
1436aea4
MK
413 if ( PcdGetBool (PcdSetNxForStack)\r
414 && (PhysicalAddress4K >= StackBase)\r
415 && (PhysicalAddress4K < StackBase + StackSize))\r
416 {\r
5630cdfe
SZ
417 //\r
418 // Set Nx bit for stack.\r
419 //\r
420 PageTableEntry->Bits.Nx = 1;\r
421 }\r
422 }\r
423}\r
424\r
425/**\r
426 Split 1G page to 2M.\r
427\r
428 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
429 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
430 @param[in] StackBase Stack base address.\r
431 @param[in] StackSize Stack size.\r
b098f5e9
TL
432 @param[in] GhcbBase GHCB page area base address.\r
433 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
434\r
435**/\r
436VOID\r
437Split1GPageTo2M (\r
1436aea4
MK
438 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
439 IN OUT UINT64 *PageEntry1G,\r
440 IN EFI_PHYSICAL_ADDRESS StackBase,\r
441 IN UINTN StackSize,\r
442 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
443 IN UINTN GhcbSize\r
5630cdfe
SZ
444 )\r
445{\r
1436aea4
MK
446 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
447 UINTN IndexOfPageDirectoryEntries;\r
448 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
449 UINT64 AddressEncMask;\r
5997daf7
LD
450\r
451 //\r
452 // Make sure AddressEncMask is contained to smallest supported address field\r
453 //\r
454 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 455\r
2ac1730b 456 PageDirectoryEntry = AllocatePageTableMemory (1);\r
36829e67 457 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 458\r
5630cdfe
SZ
459 //\r
460 // Fill in 1G page entry.\r
461 //\r
1436aea4 462 *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
463\r
464 PhysicalAddress2M = PhysicalAddress;\r
465 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
b098f5e9 466 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
5630cdfe 467 //\r
9189ec20 468 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe 469 //\r
1436aea4 470 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
471 } else {\r
472 //\r
473 // Fill in the Page Directory entries\r
474 //\r
1436aea4 475 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;\r
5630cdfe 476 PageDirectoryEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
477 PageDirectoryEntry->Bits.Present = 1;\r
478 PageDirectoryEntry->Bits.MustBe1 = 1;\r
5630cdfe
SZ
479 }\r
480 }\r
481}\r
482\r
2ac1730b
JW
483/**\r
484 Set one page of page table pool memory to be read-only.\r
485\r
486 @param[in] PageTableBase Base address of page table (CR3).\r
487 @param[in] Address Start address of a page to be set as read-only.\r
488 @param[in] Level4Paging Level 4 paging flag.\r
489\r
490**/\r
491VOID\r
492SetPageTablePoolReadOnly (\r
1436aea4
MK
493 IN UINTN PageTableBase,\r
494 IN EFI_PHYSICAL_ADDRESS Address,\r
495 IN BOOLEAN Level4Paging\r
2ac1730b
JW
496 )\r
497{\r
498 UINTN Index;\r
499 UINTN EntryIndex;\r
500 UINT64 AddressEncMask;\r
501 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
502 UINT64 *PageTable;\r
503 UINT64 *NewPageTable;\r
504 UINT64 PageAttr;\r
505 UINT64 LevelSize[5];\r
506 UINT64 LevelMask[5];\r
507 UINTN LevelShift[5];\r
508 UINTN Level;\r
509 UINT64 PoolUnitSize;\r
510\r
511 ASSERT (PageTableBase != 0);\r
512\r
513 //\r
514 // Since the page table is always from page table pool, which is always\r
515 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
516 // set the whole pool unit to be read-only.\r
517 //\r
518 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
519\r
520 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
521 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
522 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
523 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
524\r
525 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
526 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
527 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
528 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
529\r
530 LevelSize[1] = SIZE_4KB;\r
531 LevelSize[2] = SIZE_2MB;\r
532 LevelSize[3] = SIZE_1GB;\r
533 LevelSize[4] = SIZE_512GB;\r
534\r
1436aea4
MK
535 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
536 PAGING_1G_ADDRESS_MASK_64;\r
537 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
538 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
2ac1730b
JW
539\r
540 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
1436aea4 541 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
2ac1730b
JW
542 Index &= PAGING_PAE_INDEX_MASK;\r
543\r
544 PageAttr = PageTable[Index];\r
545 if ((PageAttr & IA32_PG_PS) == 0) {\r
546 //\r
547 // Go to next level of table.\r
548 //\r
549 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
550 PAGING_4K_ADDRESS_MASK_64);\r
551 continue;\r
552 }\r
553\r
554 if (PoolUnitSize >= LevelSize[Level]) {\r
555 //\r
556 // Clear R/W bit if current page granularity is not larger than pool unit\r
557 // size.\r
558 //\r
559 if ((PageAttr & IA32_PG_RW) != 0) {\r
560 while (PoolUnitSize > 0) {\r
561 //\r
562 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
563 // one page (2MB). Then we don't need to update attributes for pages\r
564 // crossing page directory. ASSERT below is for that purpose.\r
565 //\r
566 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
567\r
568 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
1436aea4 569 PoolUnitSize -= LevelSize[Level];\r
2ac1730b
JW
570\r
571 ++Index;\r
572 }\r
573 }\r
574\r
575 break;\r
2ac1730b
JW
576 } else {\r
577 //\r
578 // The smaller granularity of page must be needed.\r
579 //\r
41b4600c
JW
580 ASSERT (Level > 1);\r
581\r
2ac1730b
JW
582 NewPageTable = AllocatePageTableMemory (1);\r
583 ASSERT (NewPageTable != NULL);\r
584\r
585 PhysicalAddress = PageAttr & LevelMask[Level];\r
586 for (EntryIndex = 0;\r
1436aea4
MK
587 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
588 ++EntryIndex)\r
589 {\r
2ac1730b
JW
590 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
591 IA32_PG_P | IA32_PG_RW;\r
41b4600c 592 if (Level > 2) {\r
2ac1730b
JW
593 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
594 }\r
1436aea4 595\r
41b4600c 596 PhysicalAddress += LevelSize[Level - 1];\r
2ac1730b
JW
597 }\r
598\r
599 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
1436aea4 600 IA32_PG_P | IA32_PG_RW;\r
2ac1730b
JW
601 PageTable = NewPageTable;\r
602 }\r
603 }\r
604}\r
605\r
606/**\r
607 Prevent the memory pages used for page table from been overwritten.\r
608\r
609 @param[in] PageTableBase Base address of page table (CR3).\r
610 @param[in] Level4Paging Level 4 paging flag.\r
611\r
612**/\r
613VOID\r
614EnablePageTableProtection (\r
1436aea4
MK
615 IN UINTN PageTableBase,\r
616 IN BOOLEAN Level4Paging\r
2ac1730b
JW
617 )\r
618{\r
1436aea4
MK
619 PAGE_TABLE_POOL *HeadPool;\r
620 PAGE_TABLE_POOL *Pool;\r
621 UINT64 PoolSize;\r
622 EFI_PHYSICAL_ADDRESS Address;\r
2ac1730b
JW
623\r
624 if (mPageTablePool == NULL) {\r
625 return;\r
626 }\r
627\r
628 //\r
62391b4c 629 // No need to clear CR0.WP since PageTableBase has't been written to CR3 yet.\r
2ac1730b
JW
630 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
631 // remember original one in advance.\r
632 //\r
633 HeadPool = mPageTablePool;\r
1436aea4 634 Pool = HeadPool;\r
2ac1730b
JW
635 do {\r
636 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
637 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
638\r
639 //\r
640 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
641 // is one of page size of the processor (2MB by default). Let's apply the\r
642 // protection to them one by one.\r
643 //\r
644 while (PoolSize > 0) {\r
1436aea4
MK
645 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
646 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
647 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
2ac1730b
JW
648 }\r
649\r
650 Pool = Pool->NextPool;\r
651 } while (Pool != HeadPool);\r
652\r
653 //\r
654 // Enable write protection, after page table attribute updated.\r
655 //\r
1436aea4 656 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);\r
2ac1730b
JW
657}\r
658\r
f3b33289 659/**\r
660 Allocates and fills in the Page Directory and Page Table Entries to\r
661 establish a 1:1 Virtual to Physical mapping.\r
662\r
5630cdfe
SZ
663 @param[in] StackBase Stack base address.\r
664 @param[in] StackSize Stack size.\r
b098f5e9
TL
665 @param[in] GhcbBase GHCB base address.\r
666 @param[in] GhcbSize GHCB size.\r
f3b33289 667\r
48557c65 668 @return The address of 4 level page map.\r
f3b33289 669\r
670**/\r
671UINTN\r
672CreateIdentityMappingPageTables (\r
1436aea4
MK
673 IN EFI_PHYSICAL_ADDRESS StackBase,\r
674 IN UINTN StackSize,\r
675 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
676 IN UINTN GhcbSize\r
f3b33289 677 )\r
d1102dba 678{\r
1436aea4
MK
679 UINT32 RegEax;\r
680 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
681 UINT32 RegEdx;\r
682 UINT8 PhysicalAddressBits;\r
683 EFI_PHYSICAL_ADDRESS PageAddress;\r
684 UINTN IndexOfPml5Entries;\r
685 UINTN IndexOfPml4Entries;\r
686 UINTN IndexOfPdpEntries;\r
687 UINTN IndexOfPageDirectoryEntries;\r
688 UINT32 NumberOfPml5EntriesNeeded;\r
689 UINT32 NumberOfPml4EntriesNeeded;\r
690 UINT32 NumberOfPdpEntriesNeeded;\r
691 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;\r
692 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
693 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
694 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
695 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
696 UINTN TotalPagesNum;\r
697 UINTN BigPageAddress;\r
698 VOID *Hob;\r
699 BOOLEAN Page5LevelSupport;\r
700 BOOLEAN Page1GSupport;\r
701 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
702 UINT64 AddressEncMask;\r
703 IA32_CR4 Cr4;\r
5997daf7 704\r
0680d086
SZ
705 //\r
706 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings\r
707 //\r
708 PageMapLevel5Entry = NULL;\r
709\r
5997daf7
LD
710 //\r
711 // Make sure AddressEncMask is contained to smallest supported address field\r
712 //\r
713 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
714\r
715 Page1GSupport = FALSE;\r
1436aea4 716 if (PcdGetBool (PcdUse1GPageTable)) {\r
378175d2
JY
717 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
718 if (RegEax >= 0x80000001) {\r
719 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
720 if ((RegEdx & BIT26) != 0) {\r
721 Page1GSupport = TRUE;\r
722 }\r
c56b6566
JY
723 }\r
724 }\r
f3b33289 725\r
726 //\r
c56b6566 727 // Get physical address bits supported.\r
f3b33289 728 //\r
f3b33289 729 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
730 if (Hob != NULL) {\r
1436aea4 731 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
c56b6566
JY
732 } else {\r
733 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
734 if (RegEax >= 0x80000008) {\r
735 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
1436aea4 736 PhysicalAddressBits = (UINT8)RegEax;\r
c56b6566
JY
737 } else {\r
738 PhysicalAddressBits = 36;\r
739 }\r
f3b33289 740 }\r
741\r
b3527ded
RN
742 Page5LevelSupport = FALSE;\r
743 if (PcdGetBool (PcdUse5LevelPageTable)) {\r
744 AsmCpuidEx (\r
1436aea4
MK
745 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
746 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
747 NULL,\r
1436aea4 748 NULL,\r
a13dfc76 749 &EcxFlags.Uint32,\r
1436aea4 750 NULL\r
b3527ded
RN
751 );\r
752 if (EcxFlags.Bits.FiveLevelPage != 0) {\r
753 Page5LevelSupport = TRUE;\r
754 }\r
755 }\r
756\r
757 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));\r
758\r
4140a663 759 //\r
b3527ded
RN
760 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
761 // when 5-Level Paging is disabled,\r
762 // due to either unsupported by HW, or disabled by PCD.\r
4140a663 763 //\r
764 ASSERT (PhysicalAddressBits <= 52);\r
1436aea4 765 if (!Page5LevelSupport && (PhysicalAddressBits > 48)) {\r
4140a663 766 PhysicalAddressBits = 48;\r
767 }\r
768\r
f3b33289 769 //\r
770 // Calculate the table entries needed.\r
771 //\r
b3527ded
RN
772 NumberOfPml5EntriesNeeded = 1;\r
773 if (PhysicalAddressBits > 48) {\r
1436aea4
MK
774 NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);\r
775 PhysicalAddressBits = 48;\r
b3527ded
RN
776 }\r
777\r
778 NumberOfPml4EntriesNeeded = 1;\r
779 if (PhysicalAddressBits > 39) {\r
1436aea4
MK
780 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);\r
781 PhysicalAddressBits = 39;\r
f3b33289 782 }\r
783\r
b3527ded
RN
784 NumberOfPdpEntriesNeeded = 1;\r
785 ASSERT (PhysicalAddressBits > 30);\r
1436aea4 786 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);\r
b3527ded 787\r
f3b33289 788 //\r
d1102dba 789 // Pre-allocate big pages to avoid later allocations.\r
f3b33289 790 //\r
c56b6566 791 if (!Page1GSupport) {\r
b3527ded 792 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
c56b6566 793 } else {\r
b3527ded
RN
794 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
795 }\r
796\r
797 //\r
798 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.\r
799 //\r
800 if (!Page5LevelSupport) {\r
801 TotalPagesNum--;\r
c56b6566 802 }\r
b3527ded 803\r
1436aea4
MK
804 DEBUG ((\r
805 DEBUG_INFO,\r
806 "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",\r
807 NumberOfPml5EntriesNeeded,\r
808 NumberOfPml4EntriesNeeded,\r
809 NumberOfPdpEntriesNeeded,\r
810 (UINT64)TotalPagesNum\r
811 ));\r
b3527ded 812\r
1436aea4 813 BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);\r
f3b33289 814 ASSERT (BigPageAddress != 0);\r
815\r
816 //\r
817 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
818 //\r
1436aea4 819 PageMap = (VOID *)BigPageAddress;\r
b3527ded 820 if (Page5LevelSupport) {\r
f3b33289 821 //\r
b3527ded 822 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
f3b33289 823 //\r
b3527ded
RN
824 PageMapLevel5Entry = PageMap;\r
825 BigPageAddress += SIZE_4KB;\r
826 }\r
1436aea4
MK
827\r
828 PageAddress = 0;\r
f3b33289 829\r
b3527ded 830 for ( IndexOfPml5Entries = 0\r
1436aea4
MK
831 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
832 ; IndexOfPml5Entries++)\r
833 {\r
f3b33289 834 //\r
b3527ded
RN
835 // Each PML5 entry points to a page of PML4 entires.\r
836 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
837 // When 5-Level Paging is disabled, below allocation happens only once.\r
f3b33289 838 //\r
1436aea4 839 PageMapLevel4Entry = (VOID *)BigPageAddress;\r
b3527ded 840 BigPageAddress += SIZE_4KB;\r
f3b33289 841\r
b3527ded
RN
842 if (Page5LevelSupport) {\r
843 //\r
844 // Make a PML5 Entry\r
845 //\r
1436aea4 846 PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry | AddressEncMask;\r
b3527ded
RN
847 PageMapLevel5Entry->Bits.ReadWrite = 1;\r
848 PageMapLevel5Entry->Bits.Present = 1;\r
46f8a689 849 PageMapLevel5Entry++;\r
b3527ded 850 }\r
d1102dba 851\r
b3527ded 852 for ( IndexOfPml4Entries = 0\r
1436aea4
MK
853 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)\r
854 ; IndexOfPml4Entries++, PageMapLevel4Entry++)\r
855 {\r
b3527ded
RN
856 //\r
857 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
858 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
859 //\r
1436aea4
MK
860 PageDirectoryPointerEntry = (VOID *)BigPageAddress;\r
861 BigPageAddress += SIZE_4KB;\r
c56b6566 862\r
b3527ded
RN
863 //\r
864 // Make a PML4 Entry\r
865 //\r
1436aea4 866 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
b3527ded 867 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
1436aea4 868 PageMapLevel4Entry->Bits.Present = 1;\r
c56b6566 869\r
b3527ded 870 if (Page1GSupport) {\r
1436aea4 871 PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;\r
b3527ded
RN
872\r
873 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
b098f5e9 874 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
1436aea4 875 Split1GPageTo2M (PageAddress, (UINT64 *)PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
876 } else {\r
877 //\r
878 // Fill in the Page Directory entries\r
879 //\r
1436aea4 880 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
b3527ded 881 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
882 PageDirectory1GEntry->Bits.Present = 1;\r
883 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
5630cdfe 884 }\r
c56b6566 885 }\r
b3527ded
RN
886 } else {\r
887 for ( IndexOfPdpEntries = 0\r
1436aea4
MK
888 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)\r
889 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)\r
890 {\r
b3527ded
RN
891 //\r
892 // Each Directory Pointer entries points to a page of Page Directory entires.\r
893 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
894 //\r
1436aea4
MK
895 PageDirectoryEntry = (VOID *)BigPageAddress;\r
896 BigPageAddress += SIZE_4KB;\r
f3b33289 897\r
b3527ded
RN
898 //\r
899 // Fill in a Page Directory Pointer Entries\r
900 //\r
1436aea4 901 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
b3527ded 902 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
1436aea4 903 PageDirectoryPointerEntry->Bits.Present = 1;\r
b3527ded
RN
904\r
905 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
b098f5e9 906 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
b3527ded
RN
907 //\r
908 // Need to split this 2M page that covers NULL or stack range.\r
909 //\r
1436aea4 910 Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
b3527ded
RN
911 } else {\r
912 //\r
913 // Fill in the Page Directory entries\r
914 //\r
1436aea4 915 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
b3527ded 916 PageDirectoryEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
917 PageDirectoryEntry->Bits.Present = 1;\r
918 PageDirectoryEntry->Bits.MustBe1 = 1;\r
b3527ded
RN
919 }\r
920 }\r
921 }\r
922\r
923 //\r
924 // Fill with null entry for unused PDPTE\r
925 //\r
1436aea4 926 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 927 }\r
928 }\r
b3527ded
RN
929\r
930 //\r
931 // For the PML4 entries we are not using fill in a null entry.\r
932 //\r
933 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 934 }\r
935\r
b3527ded 936 if (Page5LevelSupport) {\r
1436aea4 937 Cr4.UintN = AsmReadCr4 ();\r
b3527ded
RN
938 Cr4.Bits.LA57 = 1;\r
939 AsmWriteCr4 (Cr4.UintN);\r
940 //\r
941 // For the PML5 entries we are not using fill in a null entry.\r
942 //\r
943 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 944 }\r
945\r
2ac1730b
JW
946 //\r
947 // Protect the page table by marking the memory used for page table to be\r
948 // read-only.\r
949 //\r
950 EnablePageTableProtection ((UINTN)PageMap, TRUE);\r
951\r
52679261
JW
952 //\r
953 // Set IA32_EFER.NXE if necessary.\r
954 //\r
955 if (IsEnableNonExecNeeded ()) {\r
5630cdfe
SZ
956 EnableExecuteDisableBit ();\r
957 }\r
958\r
f3b33289 959 return (UINTN)PageMap;\r
960}\r