]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg: Apply uncrustify changes
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
d1102dba 2 x64 Virtual Memory Management Services in the form of an IA-32 driver.\r
f3b33289 3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
d1102dba 6 While we make a 1:1 mapping (identity mapping) for all physical pages\r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
b3527ded 18Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
9d510e61 21SPDX-License-Identifier: BSD-2-Clause-Patent\r
f3b33289 22\r
d1102dba 23**/\r
f3b33289 24\r
b3527ded 25#include <Register/Intel/Cpuid.h>\r
f3b33289 26#include "DxeIpl.h"\r
27#include "VirtualMemory.h"\r
28\r
2ac1730b
JW
29//\r
30// Global variable to keep track current available memory used as page table.\r
31//\r
1436aea4 32PAGE_TABLE_POOL *mPageTablePool = NULL;\r
2ac1730b 33\r
9189ec20 34/**\r
382aeac2 35 Clear legacy memory located at the first 4K-page, if available.\r
9189ec20 36\r
382aeac2
DB
37 This function traverses the whole HOB list to check if memory from 0 to 4095\r
38 exists and has not been allocated, and then clear it if so.\r
9189ec20 39\r
382aeac2 40 @param HobStart The start of HobList passed to DxeCore.\r
9189ec20
JW
41\r
42**/\r
43VOID\r
44ClearFirst4KPage (\r
1436aea4 45 IN VOID *HobStart\r
9189ec20
JW
46 )\r
47{\r
1436aea4
MK
48 EFI_PEI_HOB_POINTERS RscHob;\r
49 EFI_PEI_HOB_POINTERS MemHob;\r
50 BOOLEAN DoClear;\r
9189ec20
JW
51\r
52 RscHob.Raw = HobStart;\r
53 MemHob.Raw = HobStart;\r
1436aea4 54 DoClear = FALSE;\r
9189ec20
JW
55\r
56 //\r
57 // Check if page 0 exists and free\r
58 //\r
1436aea4
MK
59 while ((RscHob.Raw = GetNextHob (\r
60 EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
61 RscHob.Raw\r
62 )) != NULL)\r
63 {\r
64 if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&\r
65 (RscHob.ResourceDescriptor->PhysicalStart == 0))\r
66 {\r
9189ec20
JW
67 DoClear = TRUE;\r
68 //\r
69 // Make sure memory at 0-4095 has not been allocated.\r
70 //\r
1436aea4
MK
71 while ((MemHob.Raw = GetNextHob (\r
72 EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
73 MemHob.Raw\r
74 )) != NULL)\r
75 {\r
9189ec20 76 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
1436aea4
MK
77 < EFI_PAGE_SIZE)\r
78 {\r
9189ec20
JW
79 DoClear = FALSE;\r
80 break;\r
81 }\r
1436aea4 82\r
9189ec20
JW
83 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
84 }\r
1436aea4 85\r
9189ec20
JW
86 break;\r
87 }\r
1436aea4 88\r
9189ec20
JW
89 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
90 }\r
91\r
92 if (DoClear) {\r
93 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
94 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
95 }\r
96\r
97 return;\r
98}\r
99\r
382aeac2
DB
100/**\r
101 Return configure status of NULL pointer detection feature.\r
102\r
103 @return TRUE NULL pointer detection feature is enabled\r
104 @return FALSE NULL pointer detection feature is disabled\r
105\r
106**/\r
9189ec20
JW
107BOOLEAN\r
108IsNullDetectionEnabled (\r
109 VOID\r
110 )\r
111{\r
112 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
113}\r
5997daf7 114\r
52679261
JW
115/**\r
116 The function will check if Execute Disable Bit is available.\r
117\r
118 @retval TRUE Execute Disable Bit is available.\r
119 @retval FALSE Execute Disable Bit is not available.\r
120\r
121**/\r
122BOOLEAN\r
123IsExecuteDisableBitAvailable (\r
124 VOID\r
125 )\r
126{\r
1436aea4
MK
127 UINT32 RegEax;\r
128 UINT32 RegEdx;\r
129 BOOLEAN Available;\r
52679261
JW
130\r
131 Available = FALSE;\r
132 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
133 if (RegEax >= 0x80000001) {\r
134 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
135 if ((RegEdx & BIT20) != 0) {\r
136 //\r
137 // Bit 20: Execute Disable Bit available.\r
138 //\r
139 Available = TRUE;\r
140 }\r
141 }\r
142\r
143 return Available;\r
144}\r
145\r
146/**\r
147 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
148\r
149 @retval TRUE IA32_EFER.NXE should be enabled.\r
150 @retval FALSE IA32_EFER.NXE should not be enabled.\r
151\r
152**/\r
153BOOLEAN\r
154IsEnableNonExecNeeded (\r
155 VOID\r
156 )\r
157{\r
158 if (!IsExecuteDisableBitAvailable ()) {\r
159 return FALSE;\r
160 }\r
161\r
162 //\r
163 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
164 // Features controlled by Following PCDs need this feature to be enabled.\r
165 //\r
166 return (PcdGetBool (PcdSetNxForStack) ||\r
167 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
168 PcdGet32 (PcdImageProtectionPolicy) != 0);\r
169}\r
170\r
5630cdfe
SZ
171/**\r
172 Enable Execute Disable Bit.\r
173\r
174**/\r
175VOID\r
176EnableExecuteDisableBit (\r
177 VOID\r
178 )\r
179{\r
1436aea4 180 UINT64 MsrRegisters;\r
5630cdfe 181\r
1436aea4 182 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
5630cdfe
SZ
183 MsrRegisters |= BIT11;\r
184 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
185}\r
186\r
50255363
JW
187/**\r
188 The function will check if page table entry should be splitted to smaller\r
189 granularity.\r
190\r
9db7e9fd
JW
191 @param Address Physical memory address.\r
192 @param Size Size of the given physical memory.\r
193 @param StackBase Base address of stack.\r
194 @param StackSize Size of stack.\r
b098f5e9
TL
195 @param GhcbBase Base address of GHCB pages.\r
196 @param GhcbSize Size of GHCB area.\r
9db7e9fd 197\r
50255363
JW
198 @retval TRUE Page table should be split.\r
199 @retval FALSE Page table should not be split.\r
200**/\r
201BOOLEAN\r
202ToSplitPageTable (\r
1436aea4
MK
203 IN EFI_PHYSICAL_ADDRESS Address,\r
204 IN UINTN Size,\r
205 IN EFI_PHYSICAL_ADDRESS StackBase,\r
206 IN UINTN StackSize,\r
207 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
208 IN UINTN GhcbSize\r
50255363
JW
209 )\r
210{\r
1436aea4 211 if (IsNullDetectionEnabled () && (Address == 0)) {\r
50255363
JW
212 return TRUE;\r
213 }\r
214\r
215 if (PcdGetBool (PcdCpuStackGuard)) {\r
1436aea4 216 if ((StackBase >= Address) && (StackBase < (Address + Size))) {\r
50255363
JW
217 return TRUE;\r
218 }\r
219 }\r
220\r
221 if (PcdGetBool (PcdSetNxForStack)) {\r
222 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
223 return TRUE;\r
224 }\r
225 }\r
226\r
b098f5e9
TL
227 if (GhcbBase != 0) {\r
228 if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {\r
229 return TRUE;\r
230 }\r
231 }\r
232\r
50255363
JW
233 return FALSE;\r
234}\r
1436aea4 235\r
2ac1730b
JW
236/**\r
237 Initialize a buffer pool for page table use only.\r
238\r
239 To reduce the potential split operation on page table, the pages reserved for\r
240 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
241 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
242 initialized with number of pages greater than or equal to the given PoolPages.\r
243\r
244 Once the pages in the pool are used up, this method should be called again to\r
245 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
246 happen in practice.\r
247\r
248 @param PoolPages The least page number of the pool to be created.\r
249\r
250 @retval TRUE The pool is initialized successfully.\r
251 @retval FALSE The memory is out of resource.\r
252**/\r
253BOOLEAN\r
254InitializePageTablePool (\r
1436aea4 255 IN UINTN PoolPages\r
2ac1730b
JW
256 )\r
257{\r
1436aea4 258 VOID *Buffer;\r
2ac1730b
JW
259\r
260 //\r
261 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
262 // header.\r
263 //\r
264 PoolPages += 1; // Add one page for header.\r
1436aea4
MK
265 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
266 PAGE_TABLE_POOL_UNIT_PAGES;\r
2ac1730b
JW
267 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
268 if (Buffer == NULL) {\r
269 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
270 return FALSE;\r
271 }\r
272\r
273 //\r
274 // Link all pools into a list for easier track later.\r
275 //\r
276 if (mPageTablePool == NULL) {\r
1436aea4 277 mPageTablePool = Buffer;\r
2ac1730b
JW
278 mPageTablePool->NextPool = mPageTablePool;\r
279 } else {\r
280 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
1436aea4
MK
281 mPageTablePool->NextPool = Buffer;\r
282 mPageTablePool = Buffer;\r
2ac1730b
JW
283 }\r
284\r
285 //\r
286 // Reserve one page for pool header.\r
287 //\r
1436aea4
MK
288 mPageTablePool->FreePages = PoolPages - 1;\r
289 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
2ac1730b
JW
290\r
291 return TRUE;\r
292}\r
293\r
294/**\r
295 This API provides a way to allocate memory for page table.\r
296\r
297 This API can be called more than once to allocate memory for page tables.\r
298\r
299 Allocates the number of 4KB pages and returns a pointer to the allocated\r
300 buffer. The buffer returned is aligned on a 4KB boundary.\r
301\r
302 If Pages is 0, then NULL is returned.\r
303 If there is not enough memory remaining to satisfy the request, then NULL is\r
304 returned.\r
305\r
306 @param Pages The number of 4 KB pages to allocate.\r
307\r
308 @return A pointer to the allocated buffer or NULL if allocation fails.\r
309\r
310**/\r
311VOID *\r
312AllocatePageTableMemory (\r
1436aea4 313 IN UINTN Pages\r
2ac1730b
JW
314 )\r
315{\r
1436aea4 316 VOID *Buffer;\r
2ac1730b
JW
317\r
318 if (Pages == 0) {\r
319 return NULL;\r
320 }\r
321\r
322 //\r
323 // Renew the pool if necessary.\r
324 //\r
1436aea4
MK
325 if ((mPageTablePool == NULL) ||\r
326 (Pages > mPageTablePool->FreePages))\r
327 {\r
2ac1730b
JW
328 if (!InitializePageTablePool (Pages)) {\r
329 return NULL;\r
330 }\r
331 }\r
332\r
333 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
334\r
1436aea4
MK
335 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
336 mPageTablePool->FreePages -= Pages;\r
2ac1730b
JW
337\r
338 return Buffer;\r
339}\r
340\r
5630cdfe
SZ
341/**\r
342 Split 2M page to 4K.\r
343\r
344 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
345 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
346 @param[in] StackBase Stack base address.\r
347 @param[in] StackSize Stack size.\r
b098f5e9
TL
348 @param[in] GhcbBase GHCB page area base address.\r
349 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
350\r
351**/\r
352VOID\r
353Split2MPageTo4K (\r
1436aea4
MK
354 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
355 IN OUT UINT64 *PageEntry2M,\r
356 IN EFI_PHYSICAL_ADDRESS StackBase,\r
357 IN UINTN StackSize,\r
358 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
359 IN UINTN GhcbSize\r
5630cdfe
SZ
360 )\r
361{\r
1436aea4
MK
362 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
363 UINTN IndexOfPageTableEntries;\r
364 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
365 UINT64 AddressEncMask;\r
5997daf7
LD
366\r
367 //\r
368 // Make sure AddressEncMask is contained to smallest supported address field\r
369 //\r
370 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 371\r
2ac1730b 372 PageTableEntry = AllocatePageTableMemory (1);\r
36829e67 373 ASSERT (PageTableEntry != NULL);\r
5997daf7 374\r
5630cdfe
SZ
375 //\r
376 // Fill in 2M page entry.\r
377 //\r
1436aea4 378 *PageEntry2M = (UINT64)(UINTN)PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
379\r
380 PhysicalAddress4K = PhysicalAddress;\r
381 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
382 //\r
383 // Fill in the Page Table entries\r
384 //\r
1436aea4 385 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;\r
b098f5e9
TL
386\r
387 //\r
388 // The GHCB range consists of two pages per CPU, the GHCB and a\r
389 // per-CPU variable page. The GHCB page needs to be mapped as an\r
390 // unencrypted page while the per-CPU variable page needs to be\r
391 // mapped encrypted. These pages alternate in assignment.\r
392 //\r
1436aea4
MK
393 if ( (GhcbBase == 0)\r
394 || (PhysicalAddress4K < GhcbBase)\r
395 || (PhysicalAddress4K >= GhcbBase + GhcbSize)\r
396 || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0))\r
397 {\r
b098f5e9
TL
398 PageTableEntry->Uint64 |= AddressEncMask;\r
399 }\r
1436aea4 400\r
5630cdfe 401 PageTableEntry->Bits.ReadWrite = 1;\r
9189ec20 402\r
1436aea4
MK
403 if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||\r
404 (PcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))\r
405 {\r
9189ec20
JW
406 PageTableEntry->Bits.Present = 0;\r
407 } else {\r
408 PageTableEntry->Bits.Present = 1;\r
409 }\r
410\r
1436aea4
MK
411 if ( PcdGetBool (PcdSetNxForStack)\r
412 && (PhysicalAddress4K >= StackBase)\r
413 && (PhysicalAddress4K < StackBase + StackSize))\r
414 {\r
5630cdfe
SZ
415 //\r
416 // Set Nx bit for stack.\r
417 //\r
418 PageTableEntry->Bits.Nx = 1;\r
419 }\r
420 }\r
421}\r
422\r
423/**\r
424 Split 1G page to 2M.\r
425\r
426 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
427 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
428 @param[in] StackBase Stack base address.\r
429 @param[in] StackSize Stack size.\r
b098f5e9
TL
430 @param[in] GhcbBase GHCB page area base address.\r
431 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
432\r
433**/\r
434VOID\r
435Split1GPageTo2M (\r
1436aea4
MK
436 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
437 IN OUT UINT64 *PageEntry1G,\r
438 IN EFI_PHYSICAL_ADDRESS StackBase,\r
439 IN UINTN StackSize,\r
440 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
441 IN UINTN GhcbSize\r
5630cdfe
SZ
442 )\r
443{\r
1436aea4
MK
444 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
445 UINTN IndexOfPageDirectoryEntries;\r
446 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
447 UINT64 AddressEncMask;\r
5997daf7
LD
448\r
449 //\r
450 // Make sure AddressEncMask is contained to smallest supported address field\r
451 //\r
452 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 453\r
2ac1730b 454 PageDirectoryEntry = AllocatePageTableMemory (1);\r
36829e67 455 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 456\r
5630cdfe
SZ
457 //\r
458 // Fill in 1G page entry.\r
459 //\r
1436aea4 460 *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
461\r
462 PhysicalAddress2M = PhysicalAddress;\r
463 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
b098f5e9 464 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
5630cdfe 465 //\r
9189ec20 466 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe 467 //\r
1436aea4 468 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
469 } else {\r
470 //\r
471 // Fill in the Page Directory entries\r
472 //\r
1436aea4 473 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;\r
5630cdfe 474 PageDirectoryEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
475 PageDirectoryEntry->Bits.Present = 1;\r
476 PageDirectoryEntry->Bits.MustBe1 = 1;\r
5630cdfe
SZ
477 }\r
478 }\r
479}\r
480\r
2ac1730b
JW
481/**\r
482 Set one page of page table pool memory to be read-only.\r
483\r
484 @param[in] PageTableBase Base address of page table (CR3).\r
485 @param[in] Address Start address of a page to be set as read-only.\r
486 @param[in] Level4Paging Level 4 paging flag.\r
487\r
488**/\r
489VOID\r
490SetPageTablePoolReadOnly (\r
1436aea4
MK
491 IN UINTN PageTableBase,\r
492 IN EFI_PHYSICAL_ADDRESS Address,\r
493 IN BOOLEAN Level4Paging\r
2ac1730b
JW
494 )\r
495{\r
496 UINTN Index;\r
497 UINTN EntryIndex;\r
498 UINT64 AddressEncMask;\r
499 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
500 UINT64 *PageTable;\r
501 UINT64 *NewPageTable;\r
502 UINT64 PageAttr;\r
503 UINT64 LevelSize[5];\r
504 UINT64 LevelMask[5];\r
505 UINTN LevelShift[5];\r
506 UINTN Level;\r
507 UINT64 PoolUnitSize;\r
508\r
509 ASSERT (PageTableBase != 0);\r
510\r
511 //\r
512 // Since the page table is always from page table pool, which is always\r
513 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
514 // set the whole pool unit to be read-only.\r
515 //\r
516 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
517\r
518 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
519 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
520 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
521 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
522\r
523 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
524 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
525 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
526 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
527\r
528 LevelSize[1] = SIZE_4KB;\r
529 LevelSize[2] = SIZE_2MB;\r
530 LevelSize[3] = SIZE_1GB;\r
531 LevelSize[4] = SIZE_512GB;\r
532\r
1436aea4
MK
533 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
534 PAGING_1G_ADDRESS_MASK_64;\r
535 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
536 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
2ac1730b
JW
537\r
538 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
1436aea4 539 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
2ac1730b
JW
540 Index &= PAGING_PAE_INDEX_MASK;\r
541\r
542 PageAttr = PageTable[Index];\r
543 if ((PageAttr & IA32_PG_PS) == 0) {\r
544 //\r
545 // Go to next level of table.\r
546 //\r
547 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
548 PAGING_4K_ADDRESS_MASK_64);\r
549 continue;\r
550 }\r
551\r
552 if (PoolUnitSize >= LevelSize[Level]) {\r
553 //\r
554 // Clear R/W bit if current page granularity is not larger than pool unit\r
555 // size.\r
556 //\r
557 if ((PageAttr & IA32_PG_RW) != 0) {\r
558 while (PoolUnitSize > 0) {\r
559 //\r
560 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
561 // one page (2MB). Then we don't need to update attributes for pages\r
562 // crossing page directory. ASSERT below is for that purpose.\r
563 //\r
564 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
565\r
566 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
1436aea4 567 PoolUnitSize -= LevelSize[Level];\r
2ac1730b
JW
568\r
569 ++Index;\r
570 }\r
571 }\r
572\r
573 break;\r
2ac1730b
JW
574 } else {\r
575 //\r
576 // The smaller granularity of page must be needed.\r
577 //\r
41b4600c
JW
578 ASSERT (Level > 1);\r
579\r
2ac1730b
JW
580 NewPageTable = AllocatePageTableMemory (1);\r
581 ASSERT (NewPageTable != NULL);\r
582\r
583 PhysicalAddress = PageAttr & LevelMask[Level];\r
584 for (EntryIndex = 0;\r
1436aea4
MK
585 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
586 ++EntryIndex)\r
587 {\r
2ac1730b
JW
588 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
589 IA32_PG_P | IA32_PG_RW;\r
41b4600c 590 if (Level > 2) {\r
2ac1730b
JW
591 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
592 }\r
1436aea4 593\r
41b4600c 594 PhysicalAddress += LevelSize[Level - 1];\r
2ac1730b
JW
595 }\r
596\r
597 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
1436aea4 598 IA32_PG_P | IA32_PG_RW;\r
2ac1730b
JW
599 PageTable = NewPageTable;\r
600 }\r
601 }\r
602}\r
603\r
604/**\r
605 Prevent the memory pages used for page table from been overwritten.\r
606\r
607 @param[in] PageTableBase Base address of page table (CR3).\r
608 @param[in] Level4Paging Level 4 paging flag.\r
609\r
610**/\r
611VOID\r
612EnablePageTableProtection (\r
1436aea4
MK
613 IN UINTN PageTableBase,\r
614 IN BOOLEAN Level4Paging\r
2ac1730b
JW
615 )\r
616{\r
1436aea4
MK
617 PAGE_TABLE_POOL *HeadPool;\r
618 PAGE_TABLE_POOL *Pool;\r
619 UINT64 PoolSize;\r
620 EFI_PHYSICAL_ADDRESS Address;\r
2ac1730b
JW
621\r
622 if (mPageTablePool == NULL) {\r
623 return;\r
624 }\r
625\r
626 //\r
627 // Disable write protection, because we need to mark page table to be write\r
628 // protected.\r
629 //\r
1436aea4 630 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);\r
2ac1730b
JW
631\r
632 //\r
633 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
634 // remember original one in advance.\r
635 //\r
636 HeadPool = mPageTablePool;\r
1436aea4 637 Pool = HeadPool;\r
2ac1730b
JW
638 do {\r
639 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
640 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
641\r
642 //\r
643 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
644 // is one of page size of the processor (2MB by default). Let's apply the\r
645 // protection to them one by one.\r
646 //\r
647 while (PoolSize > 0) {\r
1436aea4
MK
648 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
649 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
650 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
2ac1730b
JW
651 }\r
652\r
653 Pool = Pool->NextPool;\r
654 } while (Pool != HeadPool);\r
655\r
656 //\r
657 // Enable write protection, after page table attribute updated.\r
658 //\r
1436aea4 659 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);\r
2ac1730b
JW
660}\r
661\r
f3b33289 662/**\r
663 Allocates and fills in the Page Directory and Page Table Entries to\r
664 establish a 1:1 Virtual to Physical mapping.\r
665\r
5630cdfe
SZ
666 @param[in] StackBase Stack base address.\r
667 @param[in] StackSize Stack size.\r
b098f5e9
TL
668 @param[in] GhcbBase GHCB base address.\r
669 @param[in] GhcbSize GHCB size.\r
f3b33289 670\r
48557c65 671 @return The address of 4 level page map.\r
f3b33289 672\r
673**/\r
674UINTN\r
675CreateIdentityMappingPageTables (\r
1436aea4
MK
676 IN EFI_PHYSICAL_ADDRESS StackBase,\r
677 IN UINTN StackSize,\r
678 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
679 IN UINTN GhcbSize\r
f3b33289 680 )\r
d1102dba 681{\r
1436aea4
MK
682 UINT32 RegEax;\r
683 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
684 UINT32 RegEdx;\r
685 UINT8 PhysicalAddressBits;\r
686 EFI_PHYSICAL_ADDRESS PageAddress;\r
687 UINTN IndexOfPml5Entries;\r
688 UINTN IndexOfPml4Entries;\r
689 UINTN IndexOfPdpEntries;\r
690 UINTN IndexOfPageDirectoryEntries;\r
691 UINT32 NumberOfPml5EntriesNeeded;\r
692 UINT32 NumberOfPml4EntriesNeeded;\r
693 UINT32 NumberOfPdpEntriesNeeded;\r
694 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;\r
695 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
696 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
697 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
698 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
699 UINTN TotalPagesNum;\r
700 UINTN BigPageAddress;\r
701 VOID *Hob;\r
702 BOOLEAN Page5LevelSupport;\r
703 BOOLEAN Page1GSupport;\r
704 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
705 UINT64 AddressEncMask;\r
706 IA32_CR4 Cr4;\r
5997daf7 707\r
0680d086
SZ
708 //\r
709 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings\r
710 //\r
711 PageMapLevel5Entry = NULL;\r
712\r
5997daf7
LD
713 //\r
714 // Make sure AddressEncMask is contained to smallest supported address field\r
715 //\r
716 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
717\r
718 Page1GSupport = FALSE;\r
1436aea4 719 if (PcdGetBool (PcdUse1GPageTable)) {\r
378175d2
JY
720 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
721 if (RegEax >= 0x80000001) {\r
722 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
723 if ((RegEdx & BIT26) != 0) {\r
724 Page1GSupport = TRUE;\r
725 }\r
c56b6566
JY
726 }\r
727 }\r
f3b33289 728\r
729 //\r
c56b6566 730 // Get physical address bits supported.\r
f3b33289 731 //\r
f3b33289 732 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
733 if (Hob != NULL) {\r
1436aea4 734 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
c56b6566
JY
735 } else {\r
736 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
737 if (RegEax >= 0x80000008) {\r
738 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
1436aea4 739 PhysicalAddressBits = (UINT8)RegEax;\r
c56b6566
JY
740 } else {\r
741 PhysicalAddressBits = 36;\r
742 }\r
f3b33289 743 }\r
744\r
b3527ded
RN
745 Page5LevelSupport = FALSE;\r
746 if (PcdGetBool (PcdUse5LevelPageTable)) {\r
747 AsmCpuidEx (\r
1436aea4
MK
748 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,\r
749 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,\r
750 NULL,\r
751 &EcxFlags.Uint32,\r
752 NULL,\r
753 NULL\r
b3527ded
RN
754 );\r
755 if (EcxFlags.Bits.FiveLevelPage != 0) {\r
756 Page5LevelSupport = TRUE;\r
757 }\r
758 }\r
759\r
760 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));\r
761\r
4140a663 762 //\r
b3527ded
RN
763 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
764 // when 5-Level Paging is disabled,\r
765 // due to either unsupported by HW, or disabled by PCD.\r
4140a663 766 //\r
767 ASSERT (PhysicalAddressBits <= 52);\r
1436aea4 768 if (!Page5LevelSupport && (PhysicalAddressBits > 48)) {\r
4140a663 769 PhysicalAddressBits = 48;\r
770 }\r
771\r
f3b33289 772 //\r
773 // Calculate the table entries needed.\r
774 //\r
b3527ded
RN
775 NumberOfPml5EntriesNeeded = 1;\r
776 if (PhysicalAddressBits > 48) {\r
1436aea4
MK
777 NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);\r
778 PhysicalAddressBits = 48;\r
b3527ded
RN
779 }\r
780\r
781 NumberOfPml4EntriesNeeded = 1;\r
782 if (PhysicalAddressBits > 39) {\r
1436aea4
MK
783 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);\r
784 PhysicalAddressBits = 39;\r
f3b33289 785 }\r
786\r
b3527ded
RN
787 NumberOfPdpEntriesNeeded = 1;\r
788 ASSERT (PhysicalAddressBits > 30);\r
1436aea4 789 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);\r
b3527ded 790\r
f3b33289 791 //\r
d1102dba 792 // Pre-allocate big pages to avoid later allocations.\r
f3b33289 793 //\r
c56b6566 794 if (!Page1GSupport) {\r
b3527ded 795 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
c56b6566 796 } else {\r
b3527ded
RN
797 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
798 }\r
799\r
800 //\r
801 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.\r
802 //\r
803 if (!Page5LevelSupport) {\r
804 TotalPagesNum--;\r
c56b6566 805 }\r
b3527ded 806\r
1436aea4
MK
807 DEBUG ((\r
808 DEBUG_INFO,\r
809 "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",\r
810 NumberOfPml5EntriesNeeded,\r
811 NumberOfPml4EntriesNeeded,\r
812 NumberOfPdpEntriesNeeded,\r
813 (UINT64)TotalPagesNum\r
814 ));\r
b3527ded 815\r
1436aea4 816 BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);\r
f3b33289 817 ASSERT (BigPageAddress != 0);\r
818\r
819 //\r
820 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
821 //\r
1436aea4 822 PageMap = (VOID *)BigPageAddress;\r
b3527ded 823 if (Page5LevelSupport) {\r
f3b33289 824 //\r
b3527ded 825 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
f3b33289 826 //\r
b3527ded
RN
827 PageMapLevel5Entry = PageMap;\r
828 BigPageAddress += SIZE_4KB;\r
829 }\r
1436aea4
MK
830\r
831 PageAddress = 0;\r
f3b33289 832\r
b3527ded 833 for ( IndexOfPml5Entries = 0\r
1436aea4
MK
834 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
835 ; IndexOfPml5Entries++)\r
836 {\r
f3b33289 837 //\r
b3527ded
RN
838 // Each PML5 entry points to a page of PML4 entires.\r
839 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
840 // When 5-Level Paging is disabled, below allocation happens only once.\r
f3b33289 841 //\r
1436aea4 842 PageMapLevel4Entry = (VOID *)BigPageAddress;\r
b3527ded 843 BigPageAddress += SIZE_4KB;\r
f3b33289 844\r
b3527ded
RN
845 if (Page5LevelSupport) {\r
846 //\r
847 // Make a PML5 Entry\r
848 //\r
1436aea4 849 PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry | AddressEncMask;\r
b3527ded
RN
850 PageMapLevel5Entry->Bits.ReadWrite = 1;\r
851 PageMapLevel5Entry->Bits.Present = 1;\r
46f8a689 852 PageMapLevel5Entry++;\r
b3527ded 853 }\r
d1102dba 854\r
b3527ded 855 for ( IndexOfPml4Entries = 0\r
1436aea4
MK
856 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)\r
857 ; IndexOfPml4Entries++, PageMapLevel4Entry++)\r
858 {\r
b3527ded
RN
859 //\r
860 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
861 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
862 //\r
1436aea4
MK
863 PageDirectoryPointerEntry = (VOID *)BigPageAddress;\r
864 BigPageAddress += SIZE_4KB;\r
c56b6566 865\r
b3527ded
RN
866 //\r
867 // Make a PML4 Entry\r
868 //\r
1436aea4 869 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
b3527ded 870 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
1436aea4 871 PageMapLevel4Entry->Bits.Present = 1;\r
c56b6566 872\r
b3527ded 873 if (Page1GSupport) {\r
1436aea4 874 PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;\r
b3527ded
RN
875\r
876 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
b098f5e9 877 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
1436aea4 878 Split1GPageTo2M (PageAddress, (UINT64 *)PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
879 } else {\r
880 //\r
881 // Fill in the Page Directory entries\r
882 //\r
1436aea4 883 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
b3527ded 884 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
885 PageDirectory1GEntry->Bits.Present = 1;\r
886 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
5630cdfe 887 }\r
c56b6566 888 }\r
b3527ded
RN
889 } else {\r
890 for ( IndexOfPdpEntries = 0\r
1436aea4
MK
891 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)\r
892 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)\r
893 {\r
b3527ded
RN
894 //\r
895 // Each Directory Pointer entries points to a page of Page Directory entires.\r
896 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
897 //\r
1436aea4
MK
898 PageDirectoryEntry = (VOID *)BigPageAddress;\r
899 BigPageAddress += SIZE_4KB;\r
f3b33289 900\r
b3527ded
RN
901 //\r
902 // Fill in a Page Directory Pointer Entries\r
903 //\r
1436aea4 904 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
b3527ded 905 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
1436aea4 906 PageDirectoryPointerEntry->Bits.Present = 1;\r
b3527ded
RN
907\r
908 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
b098f5e9 909 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
b3527ded
RN
910 //\r
911 // Need to split this 2M page that covers NULL or stack range.\r
912 //\r
1436aea4 913 Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
b3527ded
RN
914 } else {\r
915 //\r
916 // Fill in the Page Directory entries\r
917 //\r
1436aea4 918 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
b3527ded 919 PageDirectoryEntry->Bits.ReadWrite = 1;\r
1436aea4
MK
920 PageDirectoryEntry->Bits.Present = 1;\r
921 PageDirectoryEntry->Bits.MustBe1 = 1;\r
b3527ded
RN
922 }\r
923 }\r
924 }\r
925\r
926 //\r
927 // Fill with null entry for unused PDPTE\r
928 //\r
1436aea4 929 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 930 }\r
931 }\r
b3527ded
RN
932\r
933 //\r
934 // For the PML4 entries we are not using fill in a null entry.\r
935 //\r
936 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 937 }\r
938\r
b3527ded 939 if (Page5LevelSupport) {\r
1436aea4 940 Cr4.UintN = AsmReadCr4 ();\r
b3527ded
RN
941 Cr4.Bits.LA57 = 1;\r
942 AsmWriteCr4 (Cr4.UintN);\r
943 //\r
944 // For the PML5 entries we are not using fill in a null entry.\r
945 //\r
946 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 947 }\r
948\r
2ac1730b
JW
949 //\r
950 // Protect the page table by marking the memory used for page table to be\r
951 // read-only.\r
952 //\r
953 EnablePageTableProtection ((UINTN)PageMap, TRUE);\r
954\r
52679261
JW
955 //\r
956 // Set IA32_EFER.NXE if necessary.\r
957 //\r
958 if (IsEnableNonExecNeeded ()) {\r
5630cdfe
SZ
959 EnableExecuteDisableBit ();\r
960 }\r
961\r
f3b33289 962 return (UINTN)PageMap;\r
963}\r