]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
18b121d7688c5ddc2515f785dd14335814f132a6
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2022, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20
21 SPDX-License-Identifier: BSD-2-Clause-Patent
22
23 **/
24
25 #include <Register/Intel/Cpuid.h>
26 #include "DxeIpl.h"
27 #include "VirtualMemory.h"
28
29 //
30 // Global variable to keep track current available memory used as page table.
31 //
32 PAGE_TABLE_POOL *mPageTablePool = NULL;
33
34 /**
35 Clear legacy memory located at the first 4K-page, if available.
36
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
39
40 @param HobStart The start of HobList passed to DxeCore.
41
42 **/
43 VOID
44 ClearFirst4KPage (
45 IN VOID *HobStart
46 )
47 {
48 EFI_PEI_HOB_POINTERS RscHob;
49 EFI_PEI_HOB_POINTERS MemHob;
50 BOOLEAN DoClear;
51
52 RscHob.Raw = HobStart;
53 MemHob.Raw = HobStart;
54 DoClear = FALSE;
55
56 //
57 // Check if page 0 exists and free
58 //
59 while ((RscHob.Raw = GetNextHob (
60 EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
61 RscHob.Raw
62 )) != NULL)
63 {
64 if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&
65 (RscHob.ResourceDescriptor->PhysicalStart == 0))
66 {
67 DoClear = TRUE;
68 //
69 // Make sure memory at 0-4095 has not been allocated.
70 //
71 while ((MemHob.Raw = GetNextHob (
72 EFI_HOB_TYPE_MEMORY_ALLOCATION,
73 MemHob.Raw
74 )) != NULL)
75 {
76 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
77 < EFI_PAGE_SIZE)
78 {
79 DoClear = FALSE;
80 break;
81 }
82
83 MemHob.Raw = GET_NEXT_HOB (MemHob);
84 }
85
86 break;
87 }
88
89 RscHob.Raw = GET_NEXT_HOB (RscHob);
90 }
91
92 if (DoClear) {
93 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
94 SetMem (NULL, EFI_PAGE_SIZE, 0);
95 }
96
97 return;
98 }
99
100 /**
101 Return configure status of NULL pointer detection feature.
102
103 @return TRUE NULL pointer detection feature is enabled
104 @return FALSE NULL pointer detection feature is disabled
105
106 **/
107 BOOLEAN
108 IsNullDetectionEnabled (
109 VOID
110 )
111 {
112 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
113 }
114
115 /**
116 The function will check if Execute Disable Bit is available.
117
118 @retval TRUE Execute Disable Bit is available.
119 @retval FALSE Execute Disable Bit is not available.
120
121 **/
122 BOOLEAN
123 IsExecuteDisableBitAvailable (
124 VOID
125 )
126 {
127 UINT32 RegEax;
128 UINT32 RegEdx;
129 BOOLEAN Available;
130
131 Available = FALSE;
132 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
133 if (RegEax >= 0x80000001) {
134 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
135 if ((RegEdx & BIT20) != 0) {
136 //
137 // Bit 20: Execute Disable Bit available.
138 //
139 Available = TRUE;
140 }
141 }
142
143 return Available;
144 }
145
146 /**
147 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.
148
149 @retval TRUE IA32_EFER.NXE should be enabled.
150 @retval FALSE IA32_EFER.NXE should not be enabled.
151
152 **/
153 BOOLEAN
154 IsEnableNonExecNeeded (
155 VOID
156 )
157 {
158 if (!IsExecuteDisableBitAvailable ()) {
159 return FALSE;
160 }
161
162 //
163 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.
164 // Features controlled by Following PCDs need this feature to be enabled.
165 //
166 return (PcdGetBool (PcdSetNxForStack) ||
167 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||
168 PcdGet32 (PcdImageProtectionPolicy) != 0);
169 }
170
171 /**
172 Enable Execute Disable Bit.
173
174 **/
175 VOID
176 EnableExecuteDisableBit (
177 VOID
178 )
179 {
180 UINT64 MsrRegisters;
181
182 MsrRegisters = AsmReadMsr64 (0xC0000080);
183 if ((MsrRegisters & BIT11) == 0) {
184 MsrRegisters |= BIT11;
185 AsmWriteMsr64 (0xC0000080, MsrRegisters);
186 }
187 }
188
189 /**
190 The function will check if page table entry should be splitted to smaller
191 granularity.
192
193 @param Address Physical memory address.
194 @param Size Size of the given physical memory.
195 @param StackBase Base address of stack.
196 @param StackSize Size of stack.
197 @param GhcbBase Base address of GHCB pages.
198 @param GhcbSize Size of GHCB area.
199
200 @retval TRUE Page table should be split.
201 @retval FALSE Page table should not be split.
202 **/
203 BOOLEAN
204 ToSplitPageTable (
205 IN EFI_PHYSICAL_ADDRESS Address,
206 IN UINTN Size,
207 IN EFI_PHYSICAL_ADDRESS StackBase,
208 IN UINTN StackSize,
209 IN EFI_PHYSICAL_ADDRESS GhcbBase,
210 IN UINTN GhcbSize
211 )
212 {
213 if (IsNullDetectionEnabled () && (Address == 0)) {
214 return TRUE;
215 }
216
217 if (PcdGetBool (PcdCpuStackGuard)) {
218 if ((StackBase >= Address) && (StackBase < (Address + Size))) {
219 return TRUE;
220 }
221 }
222
223 if (PcdGetBool (PcdSetNxForStack)) {
224 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
225 return TRUE;
226 }
227 }
228
229 if (GhcbBase != 0) {
230 if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {
231 return TRUE;
232 }
233 }
234
235 return FALSE;
236 }
237
238 /**
239 Initialize a buffer pool for page table use only.
240
241 To reduce the potential split operation on page table, the pages reserved for
242 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
243 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
244 initialized with number of pages greater than or equal to the given PoolPages.
245
246 Once the pages in the pool are used up, this method should be called again to
247 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't
248 happen in practice.
249
250 @param PoolPages The least page number of the pool to be created.
251
252 @retval TRUE The pool is initialized successfully.
253 @retval FALSE The memory is out of resource.
254 **/
255 BOOLEAN
256 InitializePageTablePool (
257 IN UINTN PoolPages
258 )
259 {
260 VOID *Buffer;
261
262 //
263 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
264 // header.
265 //
266 PoolPages += 1; // Add one page for header.
267 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
268 PAGE_TABLE_POOL_UNIT_PAGES;
269 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
270 if (Buffer == NULL) {
271 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
272 return FALSE;
273 }
274
275 //
276 // Link all pools into a list for easier track later.
277 //
278 if (mPageTablePool == NULL) {
279 mPageTablePool = Buffer;
280 mPageTablePool->NextPool = mPageTablePool;
281 } else {
282 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
283 mPageTablePool->NextPool = Buffer;
284 mPageTablePool = Buffer;
285 }
286
287 //
288 // Reserve one page for pool header.
289 //
290 mPageTablePool->FreePages = PoolPages - 1;
291 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
292
293 return TRUE;
294 }
295
296 /**
297 This API provides a way to allocate memory for page table.
298
299 This API can be called more than once to allocate memory for page tables.
300
301 Allocates the number of 4KB pages and returns a pointer to the allocated
302 buffer. The buffer returned is aligned on a 4KB boundary.
303
304 If Pages is 0, then NULL is returned.
305 If there is not enough memory remaining to satisfy the request, then NULL is
306 returned.
307
308 @param Pages The number of 4 KB pages to allocate.
309
310 @return A pointer to the allocated buffer or NULL if allocation fails.
311
312 **/
313 VOID *
314 AllocatePageTableMemory (
315 IN UINTN Pages
316 )
317 {
318 VOID *Buffer;
319
320 if (Pages == 0) {
321 return NULL;
322 }
323
324 //
325 // Renew the pool if necessary.
326 //
327 if ((mPageTablePool == NULL) ||
328 (Pages > mPageTablePool->FreePages))
329 {
330 if (!InitializePageTablePool (Pages)) {
331 return NULL;
332 }
333 }
334
335 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
336
337 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
338 mPageTablePool->FreePages -= Pages;
339
340 return Buffer;
341 }
342
343 /**
344 Split 2M page to 4K.
345
346 @param[in] PhysicalAddress Start physical address the 2M page covered.
347 @param[in, out] PageEntry2M Pointer to 2M page entry.
348 @param[in] StackBase Stack base address.
349 @param[in] StackSize Stack size.
350 @param[in] GhcbBase GHCB page area base address.
351 @param[in] GhcbSize GHCB page area size.
352
353 **/
354 VOID
355 Split2MPageTo4K (
356 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
357 IN OUT UINT64 *PageEntry2M,
358 IN EFI_PHYSICAL_ADDRESS StackBase,
359 IN UINTN StackSize,
360 IN EFI_PHYSICAL_ADDRESS GhcbBase,
361 IN UINTN GhcbSize
362 )
363 {
364 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
365 UINTN IndexOfPageTableEntries;
366 PAGE_TABLE_4K_ENTRY *PageTableEntry;
367 UINT64 AddressEncMask;
368
369 //
370 // Make sure AddressEncMask is contained to smallest supported address field
371 //
372 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
373
374 PageTableEntry = AllocatePageTableMemory (1);
375 ASSERT (PageTableEntry != NULL);
376
377 //
378 // Fill in 2M page entry.
379 //
380 *PageEntry2M = (UINT64)(UINTN)PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
381
382 PhysicalAddress4K = PhysicalAddress;
383 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
384 //
385 // Fill in the Page Table entries
386 //
387 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;
388
389 //
390 // The GHCB range consists of two pages per CPU, the GHCB and a
391 // per-CPU variable page. The GHCB page needs to be mapped as an
392 // unencrypted page while the per-CPU variable page needs to be
393 // mapped encrypted. These pages alternate in assignment.
394 //
395 if ( (GhcbBase == 0)
396 || (PhysicalAddress4K < GhcbBase)
397 || (PhysicalAddress4K >= GhcbBase + GhcbSize)
398 || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0))
399 {
400 PageTableEntry->Uint64 |= AddressEncMask;
401 }
402
403 PageTableEntry->Bits.ReadWrite = 1;
404
405 if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||
406 (PcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))
407 {
408 PageTableEntry->Bits.Present = 0;
409 } else {
410 PageTableEntry->Bits.Present = 1;
411 }
412
413 if ( PcdGetBool (PcdSetNxForStack)
414 && (PhysicalAddress4K >= StackBase)
415 && (PhysicalAddress4K < StackBase + StackSize))
416 {
417 //
418 // Set Nx bit for stack.
419 //
420 PageTableEntry->Bits.Nx = 1;
421 }
422 }
423 }
424
425 /**
426 Split 1G page to 2M.
427
428 @param[in] PhysicalAddress Start physical address the 1G page covered.
429 @param[in, out] PageEntry1G Pointer to 1G page entry.
430 @param[in] StackBase Stack base address.
431 @param[in] StackSize Stack size.
432 @param[in] GhcbBase GHCB page area base address.
433 @param[in] GhcbSize GHCB page area size.
434
435 **/
436 VOID
437 Split1GPageTo2M (
438 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
439 IN OUT UINT64 *PageEntry1G,
440 IN EFI_PHYSICAL_ADDRESS StackBase,
441 IN UINTN StackSize,
442 IN EFI_PHYSICAL_ADDRESS GhcbBase,
443 IN UINTN GhcbSize
444 )
445 {
446 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
447 UINTN IndexOfPageDirectoryEntries;
448 PAGE_TABLE_ENTRY *PageDirectoryEntry;
449 UINT64 AddressEncMask;
450
451 //
452 // Make sure AddressEncMask is contained to smallest supported address field
453 //
454 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
455
456 PageDirectoryEntry = AllocatePageTableMemory (1);
457 ASSERT (PageDirectoryEntry != NULL);
458
459 //
460 // Fill in 1G page entry.
461 //
462 *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
463
464 PhysicalAddress2M = PhysicalAddress;
465 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
466 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
467 //
468 // Need to split this 2M page that covers NULL or stack range.
469 //
470 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
471 } else {
472 //
473 // Fill in the Page Directory entries
474 //
475 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;
476 PageDirectoryEntry->Bits.ReadWrite = 1;
477 PageDirectoryEntry->Bits.Present = 1;
478 PageDirectoryEntry->Bits.MustBe1 = 1;
479 }
480 }
481 }
482
483 /**
484 Set one page of page table pool memory to be read-only.
485
486 @param[in] PageTableBase Base address of page table (CR3).
487 @param[in] Address Start address of a page to be set as read-only.
488 @param[in] Level4Paging Level 4 paging flag.
489
490 **/
491 VOID
492 SetPageTablePoolReadOnly (
493 IN UINTN PageTableBase,
494 IN EFI_PHYSICAL_ADDRESS Address,
495 IN BOOLEAN Level4Paging
496 )
497 {
498 UINTN Index;
499 UINTN EntryIndex;
500 UINT64 AddressEncMask;
501 EFI_PHYSICAL_ADDRESS PhysicalAddress;
502 UINT64 *PageTable;
503 UINT64 *NewPageTable;
504 UINT64 PageAttr;
505 UINT64 LevelSize[5];
506 UINT64 LevelMask[5];
507 UINTN LevelShift[5];
508 UINTN Level;
509 UINT64 PoolUnitSize;
510
511 ASSERT (PageTableBase != 0);
512
513 //
514 // Since the page table is always from page table pool, which is always
515 // located at the boundary of PcdPageTablePoolAlignment, we just need to
516 // set the whole pool unit to be read-only.
517 //
518 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
519
520 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
521 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
522 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
523 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
524
525 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
526 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
527 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
528 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
529
530 LevelSize[1] = SIZE_4KB;
531 LevelSize[2] = SIZE_2MB;
532 LevelSize[3] = SIZE_1GB;
533 LevelSize[4] = SIZE_512GB;
534
535 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
536 PAGING_1G_ADDRESS_MASK_64;
537 PageTable = (UINT64 *)(UINTN)PageTableBase;
538 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
539
540 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
541 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
542 Index &= PAGING_PAE_INDEX_MASK;
543
544 PageAttr = PageTable[Index];
545 if ((PageAttr & IA32_PG_PS) == 0) {
546 //
547 // Go to next level of table.
548 //
549 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
550 PAGING_4K_ADDRESS_MASK_64);
551 continue;
552 }
553
554 if (PoolUnitSize >= LevelSize[Level]) {
555 //
556 // Clear R/W bit if current page granularity is not larger than pool unit
557 // size.
558 //
559 if ((PageAttr & IA32_PG_RW) != 0) {
560 while (PoolUnitSize > 0) {
561 //
562 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
563 // one page (2MB). Then we don't need to update attributes for pages
564 // crossing page directory. ASSERT below is for that purpose.
565 //
566 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
567
568 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
569 PoolUnitSize -= LevelSize[Level];
570
571 ++Index;
572 }
573 }
574
575 break;
576 } else {
577 //
578 // The smaller granularity of page must be needed.
579 //
580 ASSERT (Level > 1);
581
582 NewPageTable = AllocatePageTableMemory (1);
583 ASSERT (NewPageTable != NULL);
584
585 PhysicalAddress = PageAttr & LevelMask[Level];
586 for (EntryIndex = 0;
587 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
588 ++EntryIndex)
589 {
590 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
591 IA32_PG_P | IA32_PG_RW;
592 if (Level > 2) {
593 NewPageTable[EntryIndex] |= IA32_PG_PS;
594 }
595
596 PhysicalAddress += LevelSize[Level - 1];
597 }
598
599 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
600 IA32_PG_P | IA32_PG_RW;
601 PageTable = NewPageTable;
602 }
603 }
604 }
605
606 /**
607 Prevent the memory pages used for page table from been overwritten.
608
609 @param[in] PageTableBase Base address of page table (CR3).
610 @param[in] Level4Paging Level 4 paging flag.
611
612 **/
613 VOID
614 EnablePageTableProtection (
615 IN UINTN PageTableBase,
616 IN BOOLEAN Level4Paging
617 )
618 {
619 PAGE_TABLE_POOL *HeadPool;
620 PAGE_TABLE_POOL *Pool;
621 UINT64 PoolSize;
622 EFI_PHYSICAL_ADDRESS Address;
623
624 if (mPageTablePool == NULL) {
625 return;
626 }
627
628 //
629 // No need to clear CR0.WP since PageTableBase has't been written to CR3 yet.
630 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
631 // remember original one in advance.
632 //
633 HeadPool = mPageTablePool;
634 Pool = HeadPool;
635 do {
636 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
637 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
638
639 //
640 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
641 // is one of page size of the processor (2MB by default). Let's apply the
642 // protection to them one by one.
643 //
644 while (PoolSize > 0) {
645 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
646 Address += PAGE_TABLE_POOL_UNIT_SIZE;
647 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
648 }
649
650 Pool = Pool->NextPool;
651 } while (Pool != HeadPool);
652
653 //
654 // Enable write protection, after page table attribute updated.
655 //
656 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);
657 }
658
659 /**
660 Allocates and fills in the Page Directory and Page Table Entries to
661 establish a 1:1 Virtual to Physical mapping.
662
663 @param[in] StackBase Stack base address.
664 @param[in] StackSize Stack size.
665 @param[in] GhcbBase GHCB base address.
666 @param[in] GhcbSize GHCB size.
667
668 @return The address of 4 level page map.
669
670 **/
671 UINTN
672 CreateIdentityMappingPageTables (
673 IN EFI_PHYSICAL_ADDRESS StackBase,
674 IN UINTN StackSize,
675 IN EFI_PHYSICAL_ADDRESS GhcbBase,
676 IN UINTN GhcbSize
677 )
678 {
679 UINT32 RegEax;
680 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
681 UINT32 RegEdx;
682 UINT8 PhysicalAddressBits;
683 EFI_PHYSICAL_ADDRESS PageAddress;
684 UINTN IndexOfPml5Entries;
685 UINTN IndexOfPml4Entries;
686 UINTN IndexOfPdpEntries;
687 UINTN IndexOfPageDirectoryEntries;
688 UINT32 NumberOfPml5EntriesNeeded;
689 UINT32 NumberOfPml4EntriesNeeded;
690 UINT32 NumberOfPdpEntriesNeeded;
691 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;
692 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
693 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
694 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
695 PAGE_TABLE_ENTRY *PageDirectoryEntry;
696 UINTN TotalPagesNum;
697 UINTN BigPageAddress;
698 VOID *Hob;
699 BOOLEAN Page5LevelSupport;
700 BOOLEAN Page1GSupport;
701 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
702 UINT64 AddressEncMask;
703 IA32_CR4 Cr4;
704
705 //
706 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings
707 //
708 PageMapLevel5Entry = NULL;
709
710 //
711 // Make sure AddressEncMask is contained to smallest supported address field
712 //
713 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
714
715 Page1GSupport = FALSE;
716 if (PcdGetBool (PcdUse1GPageTable)) {
717 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
718 if (RegEax >= 0x80000001) {
719 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
720 if ((RegEdx & BIT26) != 0) {
721 Page1GSupport = TRUE;
722 }
723 }
724 }
725
726 //
727 // Get physical address bits supported.
728 //
729 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
730 if (Hob != NULL) {
731 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
732 } else {
733 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
734 if (RegEax >= 0x80000008) {
735 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
736 PhysicalAddressBits = (UINT8)RegEax;
737 } else {
738 PhysicalAddressBits = 36;
739 }
740 }
741
742 Page5LevelSupport = FALSE;
743 if (PcdGetBool (PcdUse5LevelPageTable)) {
744 AsmCpuidEx (
745 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
746 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
747 NULL,
748 NULL,
749 &EcxFlags.Uint32,
750 NULL
751 );
752 if (EcxFlags.Bits.FiveLevelPage != 0) {
753 Page5LevelSupport = TRUE;
754 }
755 }
756
757 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));
758
759 //
760 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
761 // when 5-Level Paging is disabled,
762 // due to either unsupported by HW, or disabled by PCD.
763 //
764 ASSERT (PhysicalAddressBits <= 52);
765 if (!Page5LevelSupport && (PhysicalAddressBits > 48)) {
766 PhysicalAddressBits = 48;
767 }
768
769 //
770 // Calculate the table entries needed.
771 //
772 NumberOfPml5EntriesNeeded = 1;
773 if (PhysicalAddressBits > 48) {
774 NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);
775 PhysicalAddressBits = 48;
776 }
777
778 NumberOfPml4EntriesNeeded = 1;
779 if (PhysicalAddressBits > 39) {
780 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);
781 PhysicalAddressBits = 39;
782 }
783
784 NumberOfPdpEntriesNeeded = 1;
785 ASSERT (PhysicalAddressBits > 30);
786 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);
787
788 //
789 // Pre-allocate big pages to avoid later allocations.
790 //
791 if (!Page1GSupport) {
792 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
793 } else {
794 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
795 }
796
797 //
798 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.
799 //
800 if (!Page5LevelSupport) {
801 TotalPagesNum--;
802 }
803
804 DEBUG ((
805 DEBUG_INFO,
806 "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",
807 NumberOfPml5EntriesNeeded,
808 NumberOfPml4EntriesNeeded,
809 NumberOfPdpEntriesNeeded,
810 (UINT64)TotalPagesNum
811 ));
812
813 BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);
814 ASSERT (BigPageAddress != 0);
815
816 //
817 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
818 //
819 PageMap = (VOID *)BigPageAddress;
820 if (Page5LevelSupport) {
821 //
822 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
823 //
824 PageMapLevel5Entry = PageMap;
825 BigPageAddress += SIZE_4KB;
826 }
827
828 PageAddress = 0;
829
830 for ( IndexOfPml5Entries = 0
831 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
832 ; IndexOfPml5Entries++)
833 {
834 //
835 // Each PML5 entry points to a page of PML4 entires.
836 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
837 // When 5-Level Paging is disabled, below allocation happens only once.
838 //
839 PageMapLevel4Entry = (VOID *)BigPageAddress;
840 BigPageAddress += SIZE_4KB;
841
842 if (Page5LevelSupport) {
843 //
844 // Make a PML5 Entry
845 //
846 PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry | AddressEncMask;
847 PageMapLevel5Entry->Bits.ReadWrite = 1;
848 PageMapLevel5Entry->Bits.Present = 1;
849 PageMapLevel5Entry++;
850 }
851
852 for ( IndexOfPml4Entries = 0
853 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)
854 ; IndexOfPml4Entries++, PageMapLevel4Entry++)
855 {
856 //
857 // Each PML4 entry points to a page of Page Directory Pointer entires.
858 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
859 //
860 PageDirectoryPointerEntry = (VOID *)BigPageAddress;
861 BigPageAddress += SIZE_4KB;
862
863 //
864 // Make a PML4 Entry
865 //
866 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
867 PageMapLevel4Entry->Bits.ReadWrite = 1;
868 PageMapLevel4Entry->Bits.Present = 1;
869
870 if (Page1GSupport) {
871 PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;
872
873 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
874 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {
875 Split1GPageTo2M (PageAddress, (UINT64 *)PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);
876 } else {
877 //
878 // Fill in the Page Directory entries
879 //
880 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
881 PageDirectory1GEntry->Bits.ReadWrite = 1;
882 PageDirectory1GEntry->Bits.Present = 1;
883 PageDirectory1GEntry->Bits.MustBe1 = 1;
884 }
885 }
886 } else {
887 for ( IndexOfPdpEntries = 0
888 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)
889 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)
890 {
891 //
892 // Each Directory Pointer entries points to a page of Page Directory entires.
893 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
894 //
895 PageDirectoryEntry = (VOID *)BigPageAddress;
896 BigPageAddress += SIZE_4KB;
897
898 //
899 // Fill in a Page Directory Pointer Entries
900 //
901 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
902 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
903 PageDirectoryPointerEntry->Bits.Present = 1;
904
905 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
906 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
907 //
908 // Need to split this 2M page that covers NULL or stack range.
909 //
910 Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
911 } else {
912 //
913 // Fill in the Page Directory entries
914 //
915 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
916 PageDirectoryEntry->Bits.ReadWrite = 1;
917 PageDirectoryEntry->Bits.Present = 1;
918 PageDirectoryEntry->Bits.MustBe1 = 1;
919 }
920 }
921 }
922
923 //
924 // Fill with null entry for unused PDPTE
925 //
926 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
927 }
928 }
929
930 //
931 // For the PML4 entries we are not using fill in a null entry.
932 //
933 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
934 }
935
936 if (Page5LevelSupport) {
937 Cr4.UintN = AsmReadCr4 ();
938 Cr4.Bits.LA57 = 1;
939 AsmWriteCr4 (Cr4.UintN);
940 //
941 // For the PML5 entries we are not using fill in a null entry.
942 //
943 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
944 }
945
946 //
947 // Protect the page table by marking the memory used for page table to be
948 // read-only.
949 //
950 EnablePageTableProtection ((UINTN)PageMap, TRUE);
951
952 //
953 // Set IA32_EFER.NXE if necessary.
954 //
955 if (IsEnableNonExecNeeded ()) {
956 EnableExecuteDisableBit ();
957 }
958
959 return (UINTN)PageMap;
960 }