]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
IntelFsp2WrapperPkg: Apply uncrustify changes
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
d1102dba 2 x64 Virtual Memory Management Services in the form of an IA-32 driver.\r
f3b33289 3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
d1102dba 6 While we make a 1:1 mapping (identity mapping) for all physical pages\r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
b3527ded 18Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
9d510e61 21SPDX-License-Identifier: BSD-2-Clause-Patent\r
f3b33289 22\r
d1102dba 23**/\r
f3b33289 24\r
b3527ded 25#include <Register/Intel/Cpuid.h>\r
f3b33289 26#include "DxeIpl.h"\r
27#include "VirtualMemory.h"\r
28\r
2ac1730b
JW
29//\r
30// Global variable to keep track current available memory used as page table.\r
31//\r
32PAGE_TABLE_POOL *mPageTablePool = NULL;\r
33\r
9189ec20 34/**\r
382aeac2 35 Clear legacy memory located at the first 4K-page, if available.\r
9189ec20 36\r
382aeac2
DB
37 This function traverses the whole HOB list to check if memory from 0 to 4095\r
38 exists and has not been allocated, and then clear it if so.\r
9189ec20 39\r
382aeac2 40 @param HobStart The start of HobList passed to DxeCore.\r
9189ec20
JW
41\r
42**/\r
43VOID\r
44ClearFirst4KPage (\r
45 IN VOID *HobStart\r
46 )\r
47{\r
48 EFI_PEI_HOB_POINTERS RscHob;\r
49 EFI_PEI_HOB_POINTERS MemHob;\r
50 BOOLEAN DoClear;\r
51\r
52 RscHob.Raw = HobStart;\r
53 MemHob.Raw = HobStart;\r
54 DoClear = FALSE;\r
55\r
56 //\r
57 // Check if page 0 exists and free\r
58 //\r
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
60 RscHob.Raw)) != NULL) {\r
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&\r
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {\r
63 DoClear = TRUE;\r
64 //\r
65 // Make sure memory at 0-4095 has not been allocated.\r
66 //\r
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
68 MemHob.Raw)) != NULL) {\r
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
70 < EFI_PAGE_SIZE) {\r
71 DoClear = FALSE;\r
72 break;\r
73 }\r
74 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
75 }\r
76 break;\r
77 }\r
78 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
79 }\r
80\r
81 if (DoClear) {\r
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
83 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
84 }\r
85\r
86 return;\r
87}\r
88\r
382aeac2
DB
89/**\r
90 Return configure status of NULL pointer detection feature.\r
91\r
92 @return TRUE NULL pointer detection feature is enabled\r
93 @return FALSE NULL pointer detection feature is disabled\r
94\r
95**/\r
9189ec20
JW
96BOOLEAN\r
97IsNullDetectionEnabled (\r
98 VOID\r
99 )\r
100{\r
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
102}\r
5997daf7 103\r
52679261
JW
104/**\r
105 The function will check if Execute Disable Bit is available.\r
106\r
107 @retval TRUE Execute Disable Bit is available.\r
108 @retval FALSE Execute Disable Bit is not available.\r
109\r
110**/\r
111BOOLEAN\r
112IsExecuteDisableBitAvailable (\r
113 VOID\r
114 )\r
115{\r
116 UINT32 RegEax;\r
117 UINT32 RegEdx;\r
118 BOOLEAN Available;\r
119\r
120 Available = FALSE;\r
121 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
122 if (RegEax >= 0x80000001) {\r
123 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
124 if ((RegEdx & BIT20) != 0) {\r
125 //\r
126 // Bit 20: Execute Disable Bit available.\r
127 //\r
128 Available = TRUE;\r
129 }\r
130 }\r
131\r
132 return Available;\r
133}\r
134\r
135/**\r
136 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
137\r
138 @retval TRUE IA32_EFER.NXE should be enabled.\r
139 @retval FALSE IA32_EFER.NXE should not be enabled.\r
140\r
141**/\r
142BOOLEAN\r
143IsEnableNonExecNeeded (\r
144 VOID\r
145 )\r
146{\r
147 if (!IsExecuteDisableBitAvailable ()) {\r
148 return FALSE;\r
149 }\r
150\r
151 //\r
152 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
153 // Features controlled by Following PCDs need this feature to be enabled.\r
154 //\r
155 return (PcdGetBool (PcdSetNxForStack) ||\r
156 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
157 PcdGet32 (PcdImageProtectionPolicy) != 0);\r
158}\r
159\r
5630cdfe
SZ
160/**\r
161 Enable Execute Disable Bit.\r
162\r
163**/\r
164VOID\r
165EnableExecuteDisableBit (\r
166 VOID\r
167 )\r
168{\r
169 UINT64 MsrRegisters;\r
170\r
171 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
172 MsrRegisters |= BIT11;\r
173 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
174}\r
175\r
50255363
JW
176/**\r
177 The function will check if page table entry should be splitted to smaller\r
178 granularity.\r
179\r
9db7e9fd
JW
180 @param Address Physical memory address.\r
181 @param Size Size of the given physical memory.\r
182 @param StackBase Base address of stack.\r
183 @param StackSize Size of stack.\r
b098f5e9
TL
184 @param GhcbBase Base address of GHCB pages.\r
185 @param GhcbSize Size of GHCB area.\r
9db7e9fd 186\r
50255363
JW
187 @retval TRUE Page table should be split.\r
188 @retval FALSE Page table should not be split.\r
189**/\r
190BOOLEAN\r
191ToSplitPageTable (\r
192 IN EFI_PHYSICAL_ADDRESS Address,\r
193 IN UINTN Size,\r
194 IN EFI_PHYSICAL_ADDRESS StackBase,\r
b098f5e9
TL
195 IN UINTN StackSize,\r
196 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
197 IN UINTN GhcbSize\r
50255363
JW
198 )\r
199{\r
200 if (IsNullDetectionEnabled () && Address == 0) {\r
201 return TRUE;\r
202 }\r
203\r
204 if (PcdGetBool (PcdCpuStackGuard)) {\r
205 if (StackBase >= Address && StackBase < (Address + Size)) {\r
206 return TRUE;\r
207 }\r
208 }\r
209\r
210 if (PcdGetBool (PcdSetNxForStack)) {\r
211 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
212 return TRUE;\r
213 }\r
214 }\r
215\r
b098f5e9
TL
216 if (GhcbBase != 0) {\r
217 if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {\r
218 return TRUE;\r
219 }\r
220 }\r
221\r
50255363
JW
222 return FALSE;\r
223}\r
2ac1730b
JW
224/**\r
225 Initialize a buffer pool for page table use only.\r
226\r
227 To reduce the potential split operation on page table, the pages reserved for\r
228 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
229 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
230 initialized with number of pages greater than or equal to the given PoolPages.\r
231\r
232 Once the pages in the pool are used up, this method should be called again to\r
233 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
234 happen in practice.\r
235\r
236 @param PoolPages The least page number of the pool to be created.\r
237\r
238 @retval TRUE The pool is initialized successfully.\r
239 @retval FALSE The memory is out of resource.\r
240**/\r
241BOOLEAN\r
242InitializePageTablePool (\r
243 IN UINTN PoolPages\r
244 )\r
245{\r
246 VOID *Buffer;\r
247\r
248 //\r
249 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
250 // header.\r
251 //\r
252 PoolPages += 1; // Add one page for header.\r
253 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
254 PAGE_TABLE_POOL_UNIT_PAGES;\r
255 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
256 if (Buffer == NULL) {\r
257 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
258 return FALSE;\r
259 }\r
260\r
261 //\r
262 // Link all pools into a list for easier track later.\r
263 //\r
264 if (mPageTablePool == NULL) {\r
265 mPageTablePool = Buffer;\r
266 mPageTablePool->NextPool = mPageTablePool;\r
267 } else {\r
268 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
269 mPageTablePool->NextPool = Buffer;\r
270 mPageTablePool = Buffer;\r
271 }\r
272\r
273 //\r
274 // Reserve one page for pool header.\r
275 //\r
276 mPageTablePool->FreePages = PoolPages - 1;\r
277 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
278\r
279 return TRUE;\r
280}\r
281\r
282/**\r
283 This API provides a way to allocate memory for page table.\r
284\r
285 This API can be called more than once to allocate memory for page tables.\r
286\r
287 Allocates the number of 4KB pages and returns a pointer to the allocated\r
288 buffer. The buffer returned is aligned on a 4KB boundary.\r
289\r
290 If Pages is 0, then NULL is returned.\r
291 If there is not enough memory remaining to satisfy the request, then NULL is\r
292 returned.\r
293\r
294 @param Pages The number of 4 KB pages to allocate.\r
295\r
296 @return A pointer to the allocated buffer or NULL if allocation fails.\r
297\r
298**/\r
299VOID *\r
300AllocatePageTableMemory (\r
301 IN UINTN Pages\r
302 )\r
303{\r
304 VOID *Buffer;\r
305\r
306 if (Pages == 0) {\r
307 return NULL;\r
308 }\r
309\r
310 //\r
311 // Renew the pool if necessary.\r
312 //\r
313 if (mPageTablePool == NULL ||\r
314 Pages > mPageTablePool->FreePages) {\r
315 if (!InitializePageTablePool (Pages)) {\r
316 return NULL;\r
317 }\r
318 }\r
319\r
320 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
321\r
322 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
323 mPageTablePool->FreePages -= Pages;\r
324\r
325 return Buffer;\r
326}\r
327\r
5630cdfe
SZ
328/**\r
329 Split 2M page to 4K.\r
330\r
331 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
332 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
333 @param[in] StackBase Stack base address.\r
334 @param[in] StackSize Stack size.\r
b098f5e9
TL
335 @param[in] GhcbBase GHCB page area base address.\r
336 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
337\r
338**/\r
339VOID\r
340Split2MPageTo4K (\r
341 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
342 IN OUT UINT64 *PageEntry2M,\r
343 IN EFI_PHYSICAL_ADDRESS StackBase,\r
b098f5e9
TL
344 IN UINTN StackSize,\r
345 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
346 IN UINTN GhcbSize\r
5630cdfe
SZ
347 )\r
348{\r
349 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
350 UINTN IndexOfPageTableEntries;\r
351 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
5997daf7
LD
352 UINT64 AddressEncMask;\r
353\r
354 //\r
355 // Make sure AddressEncMask is contained to smallest supported address field\r
356 //\r
357 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 358\r
2ac1730b 359 PageTableEntry = AllocatePageTableMemory (1);\r
36829e67 360 ASSERT (PageTableEntry != NULL);\r
5997daf7 361\r
5630cdfe
SZ
362 //\r
363 // Fill in 2M page entry.\r
364 //\r
5997daf7 365 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
366\r
367 PhysicalAddress4K = PhysicalAddress;\r
368 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
369 //\r
370 // Fill in the Page Table entries\r
371 //\r
b098f5e9
TL
372 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K;\r
373\r
374 //\r
375 // The GHCB range consists of two pages per CPU, the GHCB and a\r
376 // per-CPU variable page. The GHCB page needs to be mapped as an\r
377 // unencrypted page while the per-CPU variable page needs to be\r
378 // mapped encrypted. These pages alternate in assignment.\r
379 //\r
380 if ((GhcbBase == 0)\r
381 || (PhysicalAddress4K < GhcbBase)\r
382 || (PhysicalAddress4K >= GhcbBase + GhcbSize)\r
383 || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0)) {\r
384 PageTableEntry->Uint64 |= AddressEncMask;\r
385 }\r
5630cdfe 386 PageTableEntry->Bits.ReadWrite = 1;\r
9189ec20 387\r
50255363
JW
388 if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||\r
389 (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {\r
9189ec20
JW
390 PageTableEntry->Bits.Present = 0;\r
391 } else {\r
392 PageTableEntry->Bits.Present = 1;\r
393 }\r
394\r
395 if (PcdGetBool (PcdSetNxForStack)\r
396 && (PhysicalAddress4K >= StackBase)\r
397 && (PhysicalAddress4K < StackBase + StackSize)) {\r
5630cdfe
SZ
398 //\r
399 // Set Nx bit for stack.\r
400 //\r
401 PageTableEntry->Bits.Nx = 1;\r
402 }\r
403 }\r
404}\r
405\r
406/**\r
407 Split 1G page to 2M.\r
408\r
409 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
410 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
411 @param[in] StackBase Stack base address.\r
412 @param[in] StackSize Stack size.\r
b098f5e9
TL
413 @param[in] GhcbBase GHCB page area base address.\r
414 @param[in] GhcbSize GHCB page area size.\r
5630cdfe
SZ
415\r
416**/\r
417VOID\r
418Split1GPageTo2M (\r
419 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
420 IN OUT UINT64 *PageEntry1G,\r
421 IN EFI_PHYSICAL_ADDRESS StackBase,\r
b098f5e9
TL
422 IN UINTN StackSize,\r
423 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
424 IN UINTN GhcbSize\r
5630cdfe
SZ
425 )\r
426{\r
427 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
428 UINTN IndexOfPageDirectoryEntries;\r
429 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
5997daf7
LD
430 UINT64 AddressEncMask;\r
431\r
432 //\r
433 // Make sure AddressEncMask is contained to smallest supported address field\r
434 //\r
435 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe 436\r
2ac1730b 437 PageDirectoryEntry = AllocatePageTableMemory (1);\r
36829e67 438 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 439\r
5630cdfe
SZ
440 //\r
441 // Fill in 1G page entry.\r
442 //\r
5997daf7 443 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
444\r
445 PhysicalAddress2M = PhysicalAddress;\r
446 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
b098f5e9 447 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
5630cdfe 448 //\r
9189ec20 449 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe 450 //\r
b098f5e9 451 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
452 } else {\r
453 //\r
454 // Fill in the Page Directory entries\r
455 //\r
5997daf7 456 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
5630cdfe
SZ
457 PageDirectoryEntry->Bits.ReadWrite = 1;\r
458 PageDirectoryEntry->Bits.Present = 1;\r
459 PageDirectoryEntry->Bits.MustBe1 = 1;\r
460 }\r
461 }\r
462}\r
463\r
2ac1730b
JW
464/**\r
465 Set one page of page table pool memory to be read-only.\r
466\r
467 @param[in] PageTableBase Base address of page table (CR3).\r
468 @param[in] Address Start address of a page to be set as read-only.\r
469 @param[in] Level4Paging Level 4 paging flag.\r
470\r
471**/\r
472VOID\r
473SetPageTablePoolReadOnly (\r
474 IN UINTN PageTableBase,\r
475 IN EFI_PHYSICAL_ADDRESS Address,\r
476 IN BOOLEAN Level4Paging\r
477 )\r
478{\r
479 UINTN Index;\r
480 UINTN EntryIndex;\r
481 UINT64 AddressEncMask;\r
482 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
483 UINT64 *PageTable;\r
484 UINT64 *NewPageTable;\r
485 UINT64 PageAttr;\r
486 UINT64 LevelSize[5];\r
487 UINT64 LevelMask[5];\r
488 UINTN LevelShift[5];\r
489 UINTN Level;\r
490 UINT64 PoolUnitSize;\r
491\r
492 ASSERT (PageTableBase != 0);\r
493\r
494 //\r
495 // Since the page table is always from page table pool, which is always\r
496 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
497 // set the whole pool unit to be read-only.\r
498 //\r
499 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
500\r
501 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
502 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
503 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
504 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
505\r
506 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
507 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
508 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
509 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
510\r
511 LevelSize[1] = SIZE_4KB;\r
512 LevelSize[2] = SIZE_2MB;\r
513 LevelSize[3] = SIZE_1GB;\r
514 LevelSize[4] = SIZE_512GB;\r
515\r
516 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
517 PAGING_1G_ADDRESS_MASK_64;\r
518 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
519 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
520\r
521 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
522 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
523 Index &= PAGING_PAE_INDEX_MASK;\r
524\r
525 PageAttr = PageTable[Index];\r
526 if ((PageAttr & IA32_PG_PS) == 0) {\r
527 //\r
528 // Go to next level of table.\r
529 //\r
530 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
531 PAGING_4K_ADDRESS_MASK_64);\r
532 continue;\r
533 }\r
534\r
535 if (PoolUnitSize >= LevelSize[Level]) {\r
536 //\r
537 // Clear R/W bit if current page granularity is not larger than pool unit\r
538 // size.\r
539 //\r
540 if ((PageAttr & IA32_PG_RW) != 0) {\r
541 while (PoolUnitSize > 0) {\r
542 //\r
543 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
544 // one page (2MB). Then we don't need to update attributes for pages\r
545 // crossing page directory. ASSERT below is for that purpose.\r
546 //\r
547 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
548\r
549 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
550 PoolUnitSize -= LevelSize[Level];\r
551\r
552 ++Index;\r
553 }\r
554 }\r
555\r
556 break;\r
557\r
558 } else {\r
559 //\r
560 // The smaller granularity of page must be needed.\r
561 //\r
41b4600c
JW
562 ASSERT (Level > 1);\r
563\r
2ac1730b
JW
564 NewPageTable = AllocatePageTableMemory (1);\r
565 ASSERT (NewPageTable != NULL);\r
566\r
567 PhysicalAddress = PageAttr & LevelMask[Level];\r
568 for (EntryIndex = 0;\r
569 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
570 ++EntryIndex) {\r
571 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
572 IA32_PG_P | IA32_PG_RW;\r
41b4600c 573 if (Level > 2) {\r
2ac1730b
JW
574 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
575 }\r
41b4600c 576 PhysicalAddress += LevelSize[Level - 1];\r
2ac1730b
JW
577 }\r
578\r
579 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
580 IA32_PG_P | IA32_PG_RW;\r
581 PageTable = NewPageTable;\r
582 }\r
583 }\r
584}\r
585\r
586/**\r
587 Prevent the memory pages used for page table from been overwritten.\r
588\r
589 @param[in] PageTableBase Base address of page table (CR3).\r
590 @param[in] Level4Paging Level 4 paging flag.\r
591\r
592**/\r
593VOID\r
594EnablePageTableProtection (\r
595 IN UINTN PageTableBase,\r
596 IN BOOLEAN Level4Paging\r
597 )\r
598{\r
599 PAGE_TABLE_POOL *HeadPool;\r
600 PAGE_TABLE_POOL *Pool;\r
601 UINT64 PoolSize;\r
602 EFI_PHYSICAL_ADDRESS Address;\r
603\r
604 if (mPageTablePool == NULL) {\r
605 return;\r
606 }\r
607\r
608 //\r
609 // Disable write protection, because we need to mark page table to be write\r
610 // protected.\r
611 //\r
612 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r
613\r
614 //\r
615 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
616 // remember original one in advance.\r
617 //\r
618 HeadPool = mPageTablePool;\r
619 Pool = HeadPool;\r
620 do {\r
621 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
622 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
623\r
624 //\r
625 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
626 // is one of page size of the processor (2MB by default). Let's apply the\r
627 // protection to them one by one.\r
628 //\r
629 while (PoolSize > 0) {\r
630 SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);\r
631 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
632 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
633 }\r
634\r
635 Pool = Pool->NextPool;\r
636 } while (Pool != HeadPool);\r
637\r
638 //\r
639 // Enable write protection, after page table attribute updated.\r
640 //\r
641 AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r
642}\r
643\r
f3b33289 644/**\r
645 Allocates and fills in the Page Directory and Page Table Entries to\r
646 establish a 1:1 Virtual to Physical mapping.\r
647\r
5630cdfe
SZ
648 @param[in] StackBase Stack base address.\r
649 @param[in] StackSize Stack size.\r
b098f5e9
TL
650 @param[in] GhcbBase GHCB base address.\r
651 @param[in] GhcbSize GHCB size.\r
f3b33289 652\r
48557c65 653 @return The address of 4 level page map.\r
f3b33289 654\r
655**/\r
656UINTN\r
657CreateIdentityMappingPageTables (\r
5630cdfe 658 IN EFI_PHYSICAL_ADDRESS StackBase,\r
b098f5e9
TL
659 IN UINTN StackSize,\r
660 IN EFI_PHYSICAL_ADDRESS GhcbBase,\r
661 IN UINTN GhcbSize\r
f3b33289 662 )\r
d1102dba 663{\r
c56b6566 664 UINT32 RegEax;\r
b3527ded 665 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;\r
c56b6566 666 UINT32 RegEdx;\r
f3b33289 667 UINT8 PhysicalAddressBits;\r
668 EFI_PHYSICAL_ADDRESS PageAddress;\r
b3527ded 669 UINTN IndexOfPml5Entries;\r
f3b33289 670 UINTN IndexOfPml4Entries;\r
671 UINTN IndexOfPdpEntries;\r
672 UINTN IndexOfPageDirectoryEntries;\r
b3527ded 673 UINT32 NumberOfPml5EntriesNeeded;\r
4140a663 674 UINT32 NumberOfPml4EntriesNeeded;\r
675 UINT32 NumberOfPdpEntriesNeeded;\r
b3527ded 676 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;\r
f3b33289 677 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
678 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
679 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
680 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
681 UINTN TotalPagesNum;\r
682 UINTN BigPageAddress;\r
683 VOID *Hob;\r
b3527ded 684 BOOLEAN Page5LevelSupport;\r
c56b6566
JY
685 BOOLEAN Page1GSupport;\r
686 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
5997daf7 687 UINT64 AddressEncMask;\r
b3527ded 688 IA32_CR4 Cr4;\r
5997daf7 689\r
0680d086
SZ
690 //\r
691 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings\r
692 //\r
693 PageMapLevel5Entry = NULL;\r
694\r
5997daf7
LD
695 //\r
696 // Make sure AddressEncMask is contained to smallest supported address field\r
697 //\r
698 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
699\r
700 Page1GSupport = FALSE;\r
378175d2
JY
701 if (PcdGetBool(PcdUse1GPageTable)) {\r
702 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
703 if (RegEax >= 0x80000001) {\r
704 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
705 if ((RegEdx & BIT26) != 0) {\r
706 Page1GSupport = TRUE;\r
707 }\r
c56b6566
JY
708 }\r
709 }\r
f3b33289 710\r
711 //\r
c56b6566 712 // Get physical address bits supported.\r
f3b33289 713 //\r
f3b33289 714 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
715 if (Hob != NULL) {\r
48557c65 716 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
c56b6566
JY
717 } else {\r
718 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
719 if (RegEax >= 0x80000008) {\r
720 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
721 PhysicalAddressBits = (UINT8) RegEax;\r
722 } else {\r
723 PhysicalAddressBits = 36;\r
724 }\r
f3b33289 725 }\r
726\r
b3527ded
RN
727 Page5LevelSupport = FALSE;\r
728 if (PcdGetBool (PcdUse5LevelPageTable)) {\r
729 AsmCpuidEx (\r
730 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL,\r
731 &EcxFlags.Uint32, NULL, NULL\r
732 );\r
733 if (EcxFlags.Bits.FiveLevelPage != 0) {\r
734 Page5LevelSupport = TRUE;\r
735 }\r
736 }\r
737\r
738 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));\r
739\r
4140a663 740 //\r
b3527ded
RN
741 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses\r
742 // when 5-Level Paging is disabled,\r
743 // due to either unsupported by HW, or disabled by PCD.\r
4140a663 744 //\r
745 ASSERT (PhysicalAddressBits <= 52);\r
b3527ded 746 if (!Page5LevelSupport && PhysicalAddressBits > 48) {\r
4140a663 747 PhysicalAddressBits = 48;\r
748 }\r
749\r
f3b33289 750 //\r
751 // Calculate the table entries needed.\r
752 //\r
b3527ded
RN
753 NumberOfPml5EntriesNeeded = 1;\r
754 if (PhysicalAddressBits > 48) {\r
755 NumberOfPml5EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 48);\r
756 PhysicalAddressBits = 48;\r
757 }\r
758\r
759 NumberOfPml4EntriesNeeded = 1;\r
760 if (PhysicalAddressBits > 39) {\r
761 NumberOfPml4EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 39);\r
762 PhysicalAddressBits = 39;\r
f3b33289 763 }\r
764\r
b3527ded
RN
765 NumberOfPdpEntriesNeeded = 1;\r
766 ASSERT (PhysicalAddressBits > 30);\r
767 NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 30);\r
768\r
f3b33289 769 //\r
d1102dba 770 // Pre-allocate big pages to avoid later allocations.\r
f3b33289 771 //\r
c56b6566 772 if (!Page1GSupport) {\r
b3527ded 773 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
c56b6566 774 } else {\r
b3527ded
RN
775 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
776 }\r
777\r
778 //\r
779 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.\r
780 //\r
781 if (!Page5LevelSupport) {\r
782 TotalPagesNum--;\r
c56b6566 783 }\r
b3527ded
RN
784\r
785 DEBUG ((DEBUG_INFO, "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",\r
786 NumberOfPml5EntriesNeeded, NumberOfPml4EntriesNeeded,\r
787 NumberOfPdpEntriesNeeded, (UINT64)TotalPagesNum));\r
788\r
2ac1730b 789 BigPageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);\r
f3b33289 790 ASSERT (BigPageAddress != 0);\r
791\r
792 //\r
793 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
794 //\r
795 PageMap = (VOID *) BigPageAddress;\r
b3527ded 796 if (Page5LevelSupport) {\r
f3b33289 797 //\r
b3527ded 798 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
f3b33289 799 //\r
b3527ded
RN
800 PageMapLevel5Entry = PageMap;\r
801 BigPageAddress += SIZE_4KB;\r
802 }\r
803 PageAddress = 0;\r
f3b33289 804\r
b3527ded
RN
805 for ( IndexOfPml5Entries = 0\r
806 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
46f8a689 807 ; IndexOfPml5Entries++) {\r
f3b33289 808 //\r
b3527ded
RN
809 // Each PML5 entry points to a page of PML4 entires.\r
810 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.\r
811 // When 5-Level Paging is disabled, below allocation happens only once.\r
f3b33289 812 //\r
b3527ded
RN
813 PageMapLevel4Entry = (VOID *) BigPageAddress;\r
814 BigPageAddress += SIZE_4KB;\r
f3b33289 815\r
b3527ded
RN
816 if (Page5LevelSupport) {\r
817 //\r
818 // Make a PML5 Entry\r
819 //\r
820 PageMapLevel5Entry->Uint64 = (UINT64) (UINTN) PageMapLevel4Entry | AddressEncMask;\r
821 PageMapLevel5Entry->Bits.ReadWrite = 1;\r
822 PageMapLevel5Entry->Bits.Present = 1;\r
46f8a689 823 PageMapLevel5Entry++;\r
b3527ded 824 }\r
d1102dba 825\r
b3527ded
RN
826 for ( IndexOfPml4Entries = 0\r
827 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)\r
828 ; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
829 //\r
830 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
831 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
832 //\r
833 PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
834 BigPageAddress += SIZE_4KB;\r
c56b6566 835\r
b3527ded
RN
836 //\r
837 // Make a PML4 Entry\r
838 //\r
839 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
840 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
841 PageMapLevel4Entry->Bits.Present = 1;\r
c56b6566 842\r
b3527ded
RN
843 if (Page1GSupport) {\r
844 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
845\r
846 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
b098f5e9
TL
847 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
848 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
5630cdfe
SZ
849 } else {\r
850 //\r
851 // Fill in the Page Directory entries\r
852 //\r
b3527ded
RN
853 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
854 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
855 PageDirectory1GEntry->Bits.Present = 1;\r
856 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
5630cdfe 857 }\r
c56b6566 858 }\r
b3527ded
RN
859 } else {\r
860 for ( IndexOfPdpEntries = 0\r
861 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)\r
862 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
863 //\r
864 // Each Directory Pointer entries points to a page of Page Directory entires.\r
865 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
866 //\r
867 PageDirectoryEntry = (VOID *) BigPageAddress;\r
868 BigPageAddress += SIZE_4KB;\r
f3b33289 869\r
b3527ded
RN
870 //\r
871 // Fill in a Page Directory Pointer Entries\r
872 //\r
873 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
874 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
875 PageDirectoryPointerEntry->Bits.Present = 1;\r
876\r
877 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
b098f5e9 878 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {\r
b3527ded
RN
879 //\r
880 // Need to split this 2M page that covers NULL or stack range.\r
881 //\r
b098f5e9 882 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);\r
b3527ded
RN
883 } else {\r
884 //\r
885 // Fill in the Page Directory entries\r
886 //\r
887 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
888 PageDirectoryEntry->Bits.ReadWrite = 1;\r
889 PageDirectoryEntry->Bits.Present = 1;\r
890 PageDirectoryEntry->Bits.MustBe1 = 1;\r
891 }\r
892 }\r
893 }\r
894\r
895 //\r
896 // Fill with null entry for unused PDPTE\r
897 //\r
898 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof(PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 899 }\r
900 }\r
b3527ded
RN
901\r
902 //\r
903 // For the PML4 entries we are not using fill in a null entry.\r
904 //\r
905 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 906 }\r
907\r
b3527ded
RN
908 if (Page5LevelSupport) {\r
909 Cr4.UintN = AsmReadCr4 ();\r
910 Cr4.Bits.LA57 = 1;\r
911 AsmWriteCr4 (Cr4.UintN);\r
912 //\r
913 // For the PML5 entries we are not using fill in a null entry.\r
914 //\r
915 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
f3b33289 916 }\r
917\r
2ac1730b
JW
918 //\r
919 // Protect the page table by marking the memory used for page table to be\r
920 // read-only.\r
921 //\r
922 EnablePageTableProtection ((UINTN)PageMap, TRUE);\r
923\r
52679261
JW
924 //\r
925 // Set IA32_EFER.NXE if necessary.\r
926 //\r
927 if (IsEnableNonExecNeeded ()) {\r
5630cdfe
SZ
928 EnableExecuteDisableBit ();\r
929 }\r
930\r
f3b33289 931 return (UINTN)PageMap;\r
932}\r
933\r