]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg: Skip setting IA32_ERER.NXE if it has already been set
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
d1102dba 2 x64 Virtual Memory Management Services in the form of an IA-32 driver.\r
f3b33289 3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
d1102dba 6 While we make a 1:1 mapping (identity mapping) for all physical pages\r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
a13dfc76 18Copyright (c) 2006 - 2022, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
9d510e61 21SPDX-License-Identifier: BSD-2-Clause-Patent\r
f3b33289 22\r
d1102dba 23**/\r
f3b33289 24\r
b3527ded 25#include <Register/Intel/Cpuid.h>\r
f3b33289 26#include "DxeIpl.h"\r
27#include "VirtualMemory.h"\r
28\r
2ac1730b
JW
29//\r
30// Global variable to keep track current available memory used as page table.\r
31//\r
1436aea4 32PAGE_TABLE_POOL *mPageTablePool = NULL;\r
2ac1730b 33\r
9189ec20 34/**\r
382aeac2 35 Clear legacy memory located at the first 4K-page, if available.\r
9189ec20 36\r
382aeac2
DB
37 This function traverses the whole HOB list to check if memory from 0 to 4095\r
38 exists and has not been allocated, and then clear it if so.\r
9189ec20 39\r
382aeac2 40 @param HobStart The start of HobList passed to DxeCore.\r
9189ec20
JW
41\r
42**/\r
43VOID\r
44ClearFirst4KPage (\r
1436aea4 45 IN VOID *HobStart\r
9189ec20
JW
46 )\r
47{\r
1436aea4
MK
48 EFI_PEI_HOB_POINTERS RscHob;\r
49 EFI_PEI_HOB_POINTERS MemHob;\r
50 BOOLEAN DoClear;\r
9189ec20
JW
51\r
52 RscHob.Raw = HobStart;\r
53 MemHob.Raw = HobStart;\r
1436aea4 54 DoClear = FALSE;\r
9189ec20
JW
55\r
56 //\r
57 // Check if page 0 exists and free\r
58 //\r
1436aea4
MK
59 while ((RscHob.Raw = GetNextHob (\r
60 EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
61 RscHob.Raw\r
62 )) != NULL)\r
63 {\r
64 if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&\r
65 (RscHob.ResourceDescriptor->PhysicalStart == 0))\r
66 {\r
9189ec20
JW
67 DoClear = TRUE;\r
68 //\r
69 // Make sure memory at 0-4095 has not been allocated.\r
70 //\r
1436aea4
MK
71 while ((MemHob.Raw = GetNextHob (\r
72 EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
73 MemHob.Raw\r
74 )) != NULL)\r
75 {\r
9189ec20 76 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
1436aea4
MK
77 < EFI_PAGE_SIZE)\r
78 {\r
9189ec20
JW
79 DoClear = FALSE;\r
80 break;\r
81 }\r
1436aea4 82\r
9189ec20
JW
83 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
84 }\r
1436aea4 85\r
9189ec20
JW
86 break;\r
87 }\r
1436aea4 88\r
9189ec20
JW
89 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
90 }\r
91\r
92 if (DoClear) {\r
93 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
94 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
95 }\r
96\r
97 return;\r
98}\r
99\r
382aeac2
DB
100/**\r
101 Return configure status of NULL pointer detection feature.\r
102\r
103 @return TRUE NULL pointer detection feature is enabled\r
104 @return FALSE NULL pointer detection feature is disabled\r
105\r
106**/\r
9189ec20
JW
107BOOLEAN\r
108IsNullDetectionEnabled (\r
109 VOID\r
110 )\r
111{\r
112 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
113}\r
5997daf7 114\r
52679261
JW
115/**\r
116 The function will check if Execute Disable Bit is available.\r
117\r
118 @retval TRUE Execute Disable Bit is available.\r
119 @retval FALSE Execute Disable Bit is not available.\r
120\r
121**/\r
122BOOLEAN\r
123IsExecuteDisableBitAvailable (\r
124 VOID\r
125 )\r
126{\r
1436aea4
MK
127 UINT32 RegEax;\r
128 UINT32 RegEdx;\r
129 BOOLEAN Available;\r
52679261
JW
130\r
131 Available = FALSE;\r
132 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
133 if (RegEax >= 0x80000001) {\r
134 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
135 if ((RegEdx & BIT20) != 0) {\r
136 //\r
137 // Bit 20: Execute Disable Bit available.\r
138 //\r
139 Available = TRUE;\r
140 }\r
141 }\r
142\r
143 return Available;\r
144}\r
145\r
146/**\r
147 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
148\r
149 @retval TRUE IA32_EFER.NXE should be enabled.\r
150 @retval FALSE IA32_EFER.NXE should not be enabled.\r
151\r
152**/\r
153BOOLEAN\r
154IsEnableNonExecNeeded (\r
155 VOID\r
156 )\r
157{\r
158 if (!IsExecuteDisableBitAvailable ()) {\r
159 return FALSE;\r
160 }\r
161\r
162 //\r
163 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
164 // Features controlled by Following PCDs need this feature to be enabled.\r
165 //\r
166 return (PcdGetBool (PcdSetNxForStack) ||\r
167 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
168 PcdGet32 (PcdImageProtectionPolicy) != 0);\r
169}\r
170\r
5630cdfe
SZ
171/**\r
172 Enable Execute Disable Bit.\r
173\r
174**/\r
175VOID\r
176EnableExecuteDisableBit (\r
177 VOID\r
178 )\r
179{\r
1436aea4 180 UINT64 MsrRegisters;\r
5630cdfe 181\r
bec91042
MX
182 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
183 if ((MsrRegisters & BIT11) == 0) {\r
184 MsrRegisters |= BIT11;\r
185 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
186 }\r
5630cdfe
SZ
187}\r
188\r
50255363
JW
189/**\r
190 The function will check if page table entry should be splitted to smaller\r
191 granularity.\r
192\r
9db7e9fd
JW
193 @param Address Physical memory address.\r
194 @param Size Size of the given physical memory.\r
195 @param StackBase Base address of stack.\r
196 @param StackSize Size of stack.\r
b098f5e9
TL
197 @param GhcbBase Base address of GHCB pages.\r
198 @param GhcbSize Size of GHCB area.\r
9db7e9fd 199\r
50255363
JW
200 @retval TRUE Page table should be split.\r
201 @retval FALSE Page table should not be split.\r
202**/\r
203BOOLEAN\r
204ToSplitPageTable (\r
1436aea4
MK
205 IN EFI_PHYSICAL_ADDRESS Address,\r
206 IN UINTN Size,\r
207 IN EFI_PHYSICAL_ADDRESS StackBase,\r
208 IN UINTN StackSize,\r
209 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
210 IN UINTN GhcbSize\r
50255363
JW
211 )\r
212{\r
1436aea4 213 if (IsNullDetectionEnabled () && (Address == 0)) {\r
50255363
JW
214 return TRUE;\r
215 }\r
216\r
217 if (PcdGetBool (PcdCpuStackGuard)) {\r
1436aea4 218 if ((StackBase >= Address) && (StackBase < (Address + Size))) {\r
50255363
JW
219 return TRUE;\r
220 }\r
221 }\r
222\r
223 if (PcdGetBool (PcdSetNxForStack)) {\r
224 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
225 return TRUE;\r
226 }\r
227 }\r
228\r
b098f5e9
TL
229 if (GhcbBase != 0) {\r
230 if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {\r
231 return TRUE;\r
232 }\r
233 }\r
234\r
50255363
JW
235 return FALSE;\r
236}\r
1436aea4 237\r
2ac1730b
JW
238/**\r
239 Initialize a buffer pool for page table use only.\r
240\r
241 To reduce the potential split operation on page table, the pages reserved for\r
242 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
243 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
244 initialized with number of pages greater than or equal to the given PoolPages.\r
245\r
246 Once the pages in the pool are used up, this method should be called again to\r
247 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
248 happen in practice.\r
249\r
250 @param PoolPages The least page number of the pool to be created.\r
251\r
252 @retval TRUE The pool is initialized successfully.\r
253 @retval FALSE The memory is out of resource.\r
254**/\r
255BOOLEAN\r
256InitializePageTablePool (\r
1436aea4 257 IN UINTN PoolPages\r
2ac1730b
JW
258 )\r
259{\r
1436aea4 260 VOID *Buffer;\r
2ac1730b
JW
261\r
262 //\r
263 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
264 // header.\r
265 //\r
266 PoolPages += 1; // Add one page for header.\r
1436aea4
MK
267 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
268 PAGE_TABLE_POOL_UNIT_PAGES;\r
2ac1730b
JW
269 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
270 if (Buffer == NULL) {\r
271 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
272 return FALSE;\r
273 }\r
274\r
275 //\r
276 // Link all pools into a list for easier track later.\r
277 //\r
278 if (mPageTablePool == NULL) {\r
1436aea4 279 mPageTablePool = Buffer;\r
2ac1730b
JW
280 mPageTablePool->NextPool = mPageTablePool;\r
281 } else {\r
282 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
1436aea4
MK
283 mPageTablePool->NextPool = Buffer;\r
284 mPageTablePool = Buffer;\r
2ac1730b
JW
285 }\r
286\r
287 //\r
288 // Reserve one page for pool header.\r
289 //\r
1436aea4
MK
290 mPageTablePool->FreePages = PoolPages - 1;\r
291 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
2ac1730b
JW
292\r
293 return TRUE;\r
294}\r
295\r
296/**\r
297 This API provides a way to allocate memory for page table.\r
298\r
299 This API can be called more than once to allocate memory for page tables.\r
300\r
301 Allocates the number of 4KB pages and returns a pointer to the allocated\r
302 buffer. The buffer returned is aligned on a 4KB boundary.\r
303\r
304 If Pages is 0, then NULL is returned.\r
305 If there is not enough memory remaining to satisfy the request, then NULL is\r
306 returned.\r
307\r
308 @param Pages The number of 4 KB pages to allocate.\r
309\r
310 @return A pointer to the allocated buffer or NULL if allocation fails.\r
311\r
312**/\r
313VOID *\r
314AllocatePageTableMemory (\r
1436aea4 315 IN UINTN Pages\r
2ac1730b
JW
316 )\r
317{\r
1436aea4 318 VOID *Buffer;\r
2ac1730b
JW
319\r
320 if (Pages == 0) {\r
321 return NULL;\r
322 }\r
323\r
324 //\r
325 // Renew the pool if necessary.\r
326 //\r
1436aea4
MK
327 if ((mPageTablePool == NULL) ||\r
328 (Pages > mPageTablePool->FreePages))\r
329 {\r
2ac1730b
JW
330 if (!InitializePageTablePool (Pages)) {\r
331 return NULL;\r
332 }\r
333 }\r
334\r
335 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
336\r
1436aea4
MK
337 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
338 mPageTablePool->FreePages -= Pages;\r
2ac1730b
JW
339\r
340 return Buffer;\r
341}\r
342\r
5630cdfe
SZ
343/**\r
344 Split 2M page to 4K.\r
345\r
346 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
347 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
348 @param[in] StackBase Stack base address.\r
349 @param[in] StackSize Stack size.\r
b098f5e9
TL
350 @param[in] GhcbBase GHCB page area base address.\r
351 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
352\r
353**/\r
354VOID\r
355Split2MPageTo4K (\r
1436aea4
MK
356 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
357 IN OUT UINT64 *PageEntry2M,\r
358 IN EFI_PHYSICAL_ADDRESS StackBase,\r
359 IN UINTN StackSize,\r
360 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
361 IN UINTN GhcbSize\r
5630cdfe
SZ
362 )\r
363{\r
1436aea4
MK
364 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
365 UINTN IndexOfPageTableEntries;\r
366 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
367 UINT64 AddressEncMask;\r
5997daf7
LD
368\r
369 //\r
370 // Make sure AddressEncMask is contained to smallest supported address field\r
371 //\r
372 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 373\r
2ac1730b 374 PageTableEntry = AllocatePageTableMemory (1);\r
36829e67 375 ASSERT (PageTableEntry != NULL);\r
5997daf7 376\r
5630cdfe
SZ
377 //\r
378 // Fill in 2M page entry.\r
379 //\r
1436aea4 380 *PageEntry2M = (UINT64)(UINTN)PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
381\r
382 PhysicalAddress4K = PhysicalAddress;\r
383 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
384 //\r
385 // Fill in the Page Table entries\r
386 //\r
1436aea4 387 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;\r
b098f5e9
TL
388\r
389 //\r
390 // The GHCB range consists of two pages per CPU, the GHCB and a\r
391 // per-CPU variable page. The GHCB page needs to be mapped as an\r
392 // unencrypted page while the per-CPU variable page needs to be\r
393 // mapped encrypted. These pages alternate in assignment.\r
394 //\r
1436aea4
MK
395 if ( (GhcbBase == 0)\r
396 || (PhysicalAddress4K < GhcbBase)\r
397 || (PhysicalAddress4K >= GhcbBase + GhcbSize)\r
398 || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0))\r
399 {\r
b098f5e9
TL
400 PageTableEntry->Uint64 |= AddressEncMask;\r
401 }\r
1436aea4 402\r
5630cdfe 403 PageTableEntry->Bits.ReadWrite = 1;\r
9189ec20 404\r
1436aea4
MK
405 if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||\r
406 (PcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))\r
407 {\r
9189ec20
JW
408 PageTableEntry->Bits.Present = 0;\r
409 } else {\r
410 PageTableEntry->Bits.Present = 1;\r
411 }\r
412\r
1436aea4
MK
413 if ( PcdGetBool (PcdSetNxForStack)\r
414 && (PhysicalAddress4K >= StackBase)\r
415 && (PhysicalAddress4K < StackBase + StackSize))\r
416 {\r
5630cdfe
SZ
417 //\r
418 // Set Nx bit for stack.\r
419 //\r
420 PageTableEntry->Bits.Nx = 1;\r
421 }\r
422 }\r
423}\r
424\r
425/**\r
426 Split 1G page to 2M.\r
427\r
428 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
429 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
430 @param[in] StackBase Stack base address.\r
431 @param[in] StackSize Stack size.\r
b098f5e9
TL
432 @param[in] GhcbBase GHCB page area base address.\r
433 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
434\r
435**/\r
436VOID\r
437Split1GPageTo2M (\r
1436aea4
MK
438 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
439 IN OUT UINT64 *PageEntry1G,\r
440 IN EFI_PHYSICAL_ADDRESS StackBase,\r
441 IN UINTN StackSize,\r
442 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
443 IN UINTN GhcbSize\r
5630cdfe
SZ
444 )\r
445{\r
1436aea4
MK
446 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
447 UINTN IndexOfPageDirectoryEntries;\r
448 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
449 UINT64 AddressEncMask;\r
5997daf7
LD
450\r
451 //\r
452 // Make sure AddressEncMask is contained to smallest supported address field\r
453 //\r
454 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 455\r
2ac1730b 456 PageDirectoryEntry = AllocatePageTableMemory (1);\r
36829e67 457 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 458\r
5630cdfe
SZ
459 //\r
460 // Fill in 1G page entry.\r
461 //\r
1436aea4 462 *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
463\r
464 PhysicalAddress2M = PhysicalAddress;\r
465 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
b098f5e9 466 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
5630cdfe 467 //\r
9189ec20 468 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe 469 //\r
1436aea4 470 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
471 } else {\r
472 //\r
473 // Fill in the Page Directory entries\r
474 //\r
1436aea4 475 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;\r
5630cdfe 476 PageDirectoryEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
477 PageDirectoryEntry->Bits.Present = 1;\r
478 PageDirectoryEntry->Bits.MustBe1 = 1;\r
5630cdfe
SZ
479 }\r
480 }\r
481}\r
482\r
2ac1730b
JW
483/**\r
484 Set one page of page table pool memory to be read-only.\r
485\r
486 @param[in] PageTableBase Base address of page table (CR3).\r
487 @param[in] Address Start address of a page to be set as read-only.\r
488 @param[in] Level4Paging Level 4 paging flag.\r
489\r
490**/\r
491VOID\r
492SetPageTablePoolReadOnly (\r
1436aea4
MK
493 IN UINTN PageTableBase,\r
494 IN EFI_PHYSICAL_ADDRESS Address,\r
495 IN BOOLEAN Level4Paging\r
2ac1730b
JW
496 )\r
497{\r
498 UINTN Index;\r
499 UINTN EntryIndex;\r
500 UINT64 AddressEncMask;\r
501 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
502 UINT64 *PageTable;\r
503 UINT64 *NewPageTable;\r
504 UINT64 PageAttr;\r
505 UINT64 LevelSize[5];\r
506 UINT64 LevelMask[5];\r
507 UINTN LevelShift[5];\r
508 UINTN Level;\r
509 UINT64 PoolUnitSize;\r
510\r
511 ASSERT (PageTableBase != 0);\r
512\r
513 //\r
514 // Since the page table is always from page table pool, which is always\r
515 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
516 // set the whole pool unit to be read-only.\r
517 //\r
518 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
519\r
520 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
521 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
522 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
523 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
524\r
525 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
526 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
527 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
528 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
529\r
530 LevelSize[1] = SIZE_4KB;\r
531 LevelSize[2] = SIZE_2MB;\r
532 LevelSize[3] = SIZE_1GB;\r
533 LevelSize[4] = SIZE_512GB;\r
534\r
1436aea4
MK
535 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
536 PAGING_1G_ADDRESS_MASK_64;\r
537 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
538 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
2ac1730b
JW
539\r
540 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
1436aea4 541 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
2ac1730b
JW
542 Index &= PAGING_PAE_INDEX_MASK;\r
543\r
544 PageAttr = PageTable[Index];\r
545 if ((PageAttr & IA32_PG_PS) == 0) {\r
546 //\r
547 // Go to next level of table.\r
548 //\r
549 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
550 PAGING_4K_ADDRESS_MASK_64);\r
551 continue;\r
552 }\r
553\r
554 if (PoolUnitSize >= LevelSize[Level]) {\r
555 //\r
556 // Clear R/W bit if current page granularity is not larger than pool unit\r
557 // size.\r
558 //\r
559 if ((PageAttr & IA32_PG_RW) != 0) {\r
560 while (PoolUnitSize > 0) {\r
561 //\r
562 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
563 // one page (2MB). Then we don't need to update attributes for pages\r
564 // crossing page directory. ASSERT below is for that purpose.\r
565 //\r
566 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
567\r
568 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
1436aea4 569 PoolUnitSize -= LevelSize[Level];\r
2ac1730b
JW
570\r
571 ++Index;\r
572 }\r
573 }\r
574\r
575 break;\r
2ac1730b
JW
576 } else {\r
577 //\r
578 // The smaller granularity of page must be needed.\r
579 //\r
41b4600c
JW
580 ASSERT (Level > 1);\r
581\r
2ac1730b
JW
582 NewPageTable = AllocatePageTableMemory (1);\r
583 ASSERT (NewPageTable != NULL);\r
584\r
585 PhysicalAddress = PageAttr & LevelMask[Level];\r
586 for (EntryIndex = 0;\r
1436aea4
MK
587 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
588 ++EntryIndex)\r
589 {\r
2ac1730b
JW
590 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
591 IA32_PG_P | IA32_PG_RW;\r
41b4600c 592 if (Level > 2) {\r
2ac1730b
JW
593 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
594 }\r
1436aea4 595\r
41b4600c 596 PhysicalAddress += LevelSize[Level - 1];\r
2ac1730b
JW
597 }\r
598\r
599 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
1436aea4 600 IA32_PG_P | IA32_PG_RW;\r
2ac1730b
JW
601 PageTable = NewPageTable;\r
602 }\r
603 }\r
604}\r
605\r
606/**\r
607 Prevent the memory pages used for page table from been overwritten.\r
608\r
609 @param[in] PageTableBase Base address of page table (CR3).\r
610 @param[in] Level4Paging Level 4 paging flag.\r
611\r
612**/\r
613VOID\r
614EnablePageTableProtection (\r
1436aea4
MK
615 IN UINTN PageTableBase,\r
616 IN BOOLEAN Level4Paging\r
2ac1730b
JW
617 )\r
618{\r
1436aea4
MK
619 PAGE_TABLE_POOL *HeadPool;\r
620 PAGE_TABLE_POOL *Pool;\r
621 UINT64 PoolSize;\r
622 EFI_PHYSICAL_ADDRESS Address;\r
2ac1730b
JW
623\r
624 if (mPageTablePool == NULL) {\r
625 return;\r
626 }\r
627\r
628 //\r
629 // Disable write protection, because we need to mark page table to be write\r
630 // protected.\r
631 //\r
1436aea4 632 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);\r
2ac1730b
JW
633\r
634 //\r
635 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
636 // remember original one in advance.\r
637 //\r
638 HeadPool = mPageTablePool;\r
1436aea4 639 Pool = HeadPool;\r
2ac1730b
JW
640 do {\r
641 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
642 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
643\r
644 //\r
645 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
646 // is one of page size of the processor (2MB by default). Let's apply the\r
647 // protection to them one by one.\r
648 //\r
649 while (PoolSize > 0) {\r
1436aea4
MK
650 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
651 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
652 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
2ac1730b
JW
653 }\r
654\r
655 Pool = Pool->NextPool;\r
656 } while (Pool != HeadPool);\r
657\r
658 //\r
659 // Enable write protection, after page table attribute updated.\r
660 //\r
1436aea4 661 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);\r
2ac1730b
JW
662}\r
663\r
f3b33289 664/**\r
665 Allocates and fills in the Page Directory and Page Table Entries to\r
666 establish a 1:1 Virtual to Physical mapping.\r
667\r
5630cdfe
SZ
668 @param[in] StackBase Stack base address.\r
669 @param[in] StackSize Stack size.\r
b098f5e9
TL
670 @param[in] GhcbBase GHCB base address.\r
671 @param[in] GhcbSize GHCB size.\r
f3b33289 672\r
48557c65 673 @return The address of 4 level page map.\r
f3b33289 674\r
675**/\r
676UINTN\r
677CreateIdentityMappingPageTables (\r
1436aea4
MK
678 IN EFI_PHYSICAL_ADDRESS StackBase,\r
679 IN UINTN StackSize,\r
680 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
681 IN UINTN GhcbSize\r
f3b33289 682 )\r
d1102dba 683{\r
1436aea4
MK
684 UINT32 RegEax;\r
685 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
686 UINT32 RegEdx;\r
687 UINT8 PhysicalAddressBits;\r
688 EFI_PHYSICAL_ADDRESS PageAddress;\r
689 UINTN IndexOfPml5Entries;\r
690 UINTN IndexOfPml4Entries;\r
691 UINTN IndexOfPdpEntries;\r
692 UINTN IndexOfPageDirectoryEntries;\r
693 UINT32 NumberOfPml5EntriesNeeded;\r
694 UINT32 NumberOfPml4EntriesNeeded;\r
695 UINT32 NumberOfPdpEntriesNeeded;\r
696 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;\r
697 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
698 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
699 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
700 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
701 UINTN TotalPagesNum;\r
702 UINTN BigPageAddress;\r
703 VOID *Hob;\r
704 BOOLEAN Page5LevelSupport;\r
705 BOOLEAN Page1GSupport;\r
706 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
707 UINT64 AddressEncMask;\r
708 IA32_CR4 Cr4;\r
5997daf7 709\r
0680d086
SZ
710 //\r
711 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings\r
712 //\r
713 PageMapLevel5Entry = NULL;\r
714\r
5997daf7
LD
715 //\r
716 // Make sure AddressEncMask is contained to smallest supported address field\r
717 //\r
718 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
719\r
720 Page1GSupport = FALSE;\r
1436aea4 721 if (PcdGetBool (PcdUse1GPageTable)) {\r
378175d2
JY
722 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
723 if (RegEax >= 0x80000001) {\r
724 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
725 if ((RegEdx & BIT26) != 0) {\r
726 Page1GSupport = TRUE;\r
727 }\r
c56b6566
JY
728 }\r
729 }\r
f3b33289 730\r
731 //\r
c56b6566 732 // Get physical address bits supported.\r
f3b33289 733 //\r
f3b33289 734 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
735 if (Hob != NULL) {\r
1436aea4 736 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
c56b6566
JY
737 } else {\r
738 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
739 if (RegEax >= 0x80000008) {\r
740 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
1436aea4 741 PhysicalAddressBits = (UINT8)RegEax;\r
c56b6566
JY
742 } else {\r
743 PhysicalAddressBits = 36;\r
744 }\r
f3b33289 745 }\r
746\r
b3527ded
RN
747 Page5LevelSupport = FALSE;\r
748 if (PcdGetBool (PcdUse5LevelPageTable)) {\r
749 AsmCpuidEx (\r
1436aea4
MK
750 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
751 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
752 NULL,\r
1436aea4 753 NULL,\r
a13dfc76 754 &EcxFlags.Uint32,\r
1436aea4 755 NULL\r
b3527ded
RN
756 );\r
757 if (EcxFlags.Bits.FiveLevelPage != 0) {\r
758 Page5LevelSupport = TRUE;\r
759 }\r
760 }\r
761\r
762 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));\r
763\r
4140a663 764 //\r
b3527ded
RN
765 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
766 // when 5-Level Paging is disabled,\r
767 // due to either unsupported by HW, or disabled by PCD.\r
4140a663 768 //\r
769 ASSERT (PhysicalAddressBits <= 52);\r
1436aea4 770 if (!Page5LevelSupport && (PhysicalAddressBits > 48)) {\r
4140a663 771 PhysicalAddressBits = 48;\r
772 }\r
773\r
f3b33289 774 //\r
775 // Calculate the table entries needed.\r
776 //\r
b3527ded
RN
777 NumberOfPml5EntriesNeeded = 1;\r
778 if (PhysicalAddressBits > 48) {\r
1436aea4
MK
779 NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);\r
780 PhysicalAddressBits = 48;\r
b3527ded
RN
781 }\r
782\r
783 NumberOfPml4EntriesNeeded = 1;\r
784 if (PhysicalAddressBits > 39) {\r
1436aea4
MK
785 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);\r
786 PhysicalAddressBits = 39;\r
f3b33289 787 }\r
788\r
b3527ded
RN
789 NumberOfPdpEntriesNeeded = 1;\r
790 ASSERT (PhysicalAddressBits > 30);\r
1436aea4 791 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);\r
b3527ded 792\r
f3b33289 793 //\r
d1102dba 794 // Pre-allocate big pages to avoid later allocations.\r
f3b33289 795 //\r
c56b6566 796 if (!Page1GSupport) {\r
b3527ded 797 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
c56b6566 798 } else {\r
b3527ded
RN
799 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
800 }\r
801\r
802 //\r
803 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.\r
804 //\r
805 if (!Page5LevelSupport) {\r
806 TotalPagesNum--;\r
c56b6566 807 }\r
b3527ded 808\r
1436aea4
MK
809 DEBUG ((\r
810 DEBUG_INFO,\r
811 "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",\r
812 NumberOfPml5EntriesNeeded,\r
813 NumberOfPml4EntriesNeeded,\r
814 NumberOfPdpEntriesNeeded,\r
815 (UINT64)TotalPagesNum\r
816 ));\r
b3527ded 817\r
1436aea4 818 BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);\r
f3b33289 819 ASSERT (BigPageAddress != 0);\r
820\r
821 //\r
822 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
823 //\r
1436aea4 824 PageMap = (VOID *)BigPageAddress;\r
b3527ded 825 if (Page5LevelSupport) {\r
f3b33289 826 //\r
b3527ded 827 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
f3b33289 828 //\r
b3527ded
RN
829 PageMapLevel5Entry = PageMap;\r
830 BigPageAddress += SIZE_4KB;\r
831 }\r
1436aea4
MK
832\r
833 PageAddress = 0;\r
f3b33289 834\r
b3527ded 835 for ( IndexOfPml5Entries = 0\r
1436aea4
MK
836 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
837 ; IndexOfPml5Entries++)\r
838 {\r
f3b33289 839 //\r
b3527ded
RN
840 // Each PML5 entry points to a page of PML4 entires.\r
841 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
842 // When 5-Level Paging is disabled, below allocation happens only once.\r
f3b33289 843 //\r
1436aea4 844 PageMapLevel4Entry = (VOID *)BigPageAddress;\r
b3527ded 845 BigPageAddress += SIZE_4KB;\r
f3b33289 846\r
b3527ded
RN
847 if (Page5LevelSupport) {\r
848 //\r
849 // Make a PML5 Entry\r
850 //\r
1436aea4 851 PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry | AddressEncMask;\r
b3527ded
RN
852 PageMapLevel5Entry->Bits.ReadWrite = 1;\r
853 PageMapLevel5Entry->Bits.Present = 1;\r
46f8a689 854 PageMapLevel5Entry++;\r
b3527ded 855 }\r
d1102dba 856\r
b3527ded 857 for ( IndexOfPml4Entries = 0\r
1436aea4
MK
858 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)\r
859 ; IndexOfPml4Entries++, PageMapLevel4Entry++)\r
860 {\r
b3527ded
RN
861 //\r
862 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
863 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
864 //\r
1436aea4
MK
865 PageDirectoryPointerEntry = (VOID *)BigPageAddress;\r
866 BigPageAddress += SIZE_4KB;\r
c56b6566 867\r
b3527ded
RN
868 //\r
869 // Make a PML4 Entry\r
870 //\r
1436aea4 871 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
b3527ded 872 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
1436aea4 873 PageMapLevel4Entry->Bits.Present = 1;\r
c56b6566 874\r
b3527ded 875 if (Page1GSupport) {\r
1436aea4 876 PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;\r
b3527ded
RN
877\r
878 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
b098f5e9 879 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
1436aea4 880 Split1GPageTo2M (PageAddress, (UINT64 *)PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
881 } else {\r
882 //\r
883 // Fill in the Page Directory entries\r
884 //\r
1436aea4 885 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
b3527ded 886 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
887 PageDirectory1GEntry->Bits.Present = 1;\r
888 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
5630cdfe 889 }\r
c56b6566 890 }\r
b3527ded
RN
891 } else {\r
892 for ( IndexOfPdpEntries = 0\r
1436aea4
MK
893 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)\r
894 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)\r
895 {\r
b3527ded
RN
896 //\r
897 // Each Directory Pointer entries points to a page of Page Directory entires.\r
898 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
899 //\r
1436aea4
MK
900 PageDirectoryEntry = (VOID *)BigPageAddress;\r
901 BigPageAddress += SIZE_4KB;\r
f3b33289 902\r
b3527ded
RN
903 //\r
904 // Fill in a Page Directory Pointer Entries\r
905 //\r
1436aea4 906 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
b3527ded 907 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
1436aea4 908 PageDirectoryPointerEntry->Bits.Present = 1;\r
b3527ded
RN
909\r
910 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
b098f5e9 911 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
b3527ded
RN
912 //\r
913 // Need to split this 2M page that covers NULL or stack range.\r
914 //\r
1436aea4 915 Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
b3527ded
RN
916 } else {\r
917 //\r
918 // Fill in the Page Directory entries\r
919 //\r
1436aea4 920 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
b3527ded 921 PageDirectoryEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
922 PageDirectoryEntry->Bits.Present = 1;\r
923 PageDirectoryEntry->Bits.MustBe1 = 1;\r
b3527ded
RN
924 }\r
925 }\r
926 }\r
927\r
928 //\r
929 // Fill with null entry for unused PDPTE\r
930 //\r
1436aea4 931 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 932 }\r
933 }\r
b3527ded
RN
934\r
935 //\r
936 // For the PML4 entries we are not using fill in a null entry.\r
937 //\r
938 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 939 }\r
940\r
b3527ded 941 if (Page5LevelSupport) {\r
1436aea4 942 Cr4.UintN = AsmReadCr4 ();\r
b3527ded
RN
943 Cr4.Bits.LA57 = 1;\r
944 AsmWriteCr4 (Cr4.UintN);\r
945 //\r
946 // For the PML5 entries we are not using fill in a null entry.\r
947 //\r
948 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 949 }\r
950\r
2ac1730b
JW
951 //\r
952 // Protect the page table by marking the memory used for page table to be\r
953 // read-only.\r
954 //\r
955 EnablePageTableProtection ((UINTN)PageMap, TRUE);\r
956\r
52679261
JW
957 //\r
958 // Set IA32_EFER.NXE if necessary.\r
959 //\r
960 if (IsEnableNonExecNeeded ()) {\r
5630cdfe
SZ
961 EnableExecuteDisableBit ();\r
962 }\r
963\r
f3b33289 964 return (UINTN)PageMap;\r
965}\r