]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg: Apply uncrustify changes
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20
21 SPDX-License-Identifier: BSD-2-Clause-Patent
22
23 **/
24
25 #include <Register/Intel/Cpuid.h>
26 #include "DxeIpl.h"
27 #include "VirtualMemory.h"
28
29 //
30 // Global variable to keep track current available memory used as page table.
31 //
32 PAGE_TABLE_POOL *mPageTablePool = NULL;
33
34 /**
35 Clear legacy memory located at the first 4K-page, if available.
36
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
39
40 @param HobStart The start of HobList passed to DxeCore.
41
42 **/
43 VOID
44 ClearFirst4KPage (
45 IN VOID *HobStart
46 )
47 {
48 EFI_PEI_HOB_POINTERS RscHob;
49 EFI_PEI_HOB_POINTERS MemHob;
50 BOOLEAN DoClear;
51
52 RscHob.Raw = HobStart;
53 MemHob.Raw = HobStart;
54 DoClear = FALSE;
55
56 //
57 // Check if page 0 exists and free
58 //
59 while ((RscHob.Raw = GetNextHob (
60 EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
61 RscHob.Raw
62 )) != NULL)
63 {
64 if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&
65 (RscHob.ResourceDescriptor->PhysicalStart == 0))
66 {
67 DoClear = TRUE;
68 //
69 // Make sure memory at 0-4095 has not been allocated.
70 //
71 while ((MemHob.Raw = GetNextHob (
72 EFI_HOB_TYPE_MEMORY_ALLOCATION,
73 MemHob.Raw
74 )) != NULL)
75 {
76 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
77 < EFI_PAGE_SIZE)
78 {
79 DoClear = FALSE;
80 break;
81 }
82
83 MemHob.Raw = GET_NEXT_HOB (MemHob);
84 }
85
86 break;
87 }
88
89 RscHob.Raw = GET_NEXT_HOB (RscHob);
90 }
91
92 if (DoClear) {
93 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
94 SetMem (NULL, EFI_PAGE_SIZE, 0);
95 }
96
97 return;
98 }
99
100 /**
101 Return configure status of NULL pointer detection feature.
102
103 @return TRUE NULL pointer detection feature is enabled
104 @return FALSE NULL pointer detection feature is disabled
105
106 **/
107 BOOLEAN
108 IsNullDetectionEnabled (
109 VOID
110 )
111 {
112 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
113 }
114
115 /**
116 The function will check if Execute Disable Bit is available.
117
118 @retval TRUE Execute Disable Bit is available.
119 @retval FALSE Execute Disable Bit is not available.
120
121 **/
122 BOOLEAN
123 IsExecuteDisableBitAvailable (
124 VOID
125 )
126 {
127 UINT32 RegEax;
128 UINT32 RegEdx;
129 BOOLEAN Available;
130
131 Available = FALSE;
132 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
133 if (RegEax >= 0x80000001) {
134 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
135 if ((RegEdx & BIT20) != 0) {
136 //
137 // Bit 20: Execute Disable Bit available.
138 //
139 Available = TRUE;
140 }
141 }
142
143 return Available;
144 }
145
146 /**
147 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.
148
149 @retval TRUE IA32_EFER.NXE should be enabled.
150 @retval FALSE IA32_EFER.NXE should not be enabled.
151
152 **/
153 BOOLEAN
154 IsEnableNonExecNeeded (
155 VOID
156 )
157 {
158 if (!IsExecuteDisableBitAvailable ()) {
159 return FALSE;
160 }
161
162 //
163 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.
164 // Features controlled by Following PCDs need this feature to be enabled.
165 //
166 return (PcdGetBool (PcdSetNxForStack) ||
167 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||
168 PcdGet32 (PcdImageProtectionPolicy) != 0);
169 }
170
171 /**
172 Enable Execute Disable Bit.
173
174 **/
175 VOID
176 EnableExecuteDisableBit (
177 VOID
178 )
179 {
180 UINT64 MsrRegisters;
181
182 MsrRegisters = AsmReadMsr64 (0xC0000080);
183 MsrRegisters |= BIT11;
184 AsmWriteMsr64 (0xC0000080, MsrRegisters);
185 }
186
187 /**
188 The function will check if page table entry should be splitted to smaller
189 granularity.
190
191 @param Address Physical memory address.
192 @param Size Size of the given physical memory.
193 @param StackBase Base address of stack.
194 @param StackSize Size of stack.
195 @param GhcbBase Base address of GHCB pages.
196 @param GhcbSize Size of GHCB area.
197
198 @retval TRUE Page table should be split.
199 @retval FALSE Page table should not be split.
200 **/
201 BOOLEAN
202 ToSplitPageTable (
203 IN EFI_PHYSICAL_ADDRESS Address,
204 IN UINTN Size,
205 IN EFI_PHYSICAL_ADDRESS StackBase,
206 IN UINTN StackSize,
207 IN EFI_PHYSICAL_ADDRESS GhcbBase,
208 IN UINTN GhcbSize
209 )
210 {
211 if (IsNullDetectionEnabled () && (Address == 0)) {
212 return TRUE;
213 }
214
215 if (PcdGetBool (PcdCpuStackGuard)) {
216 if ((StackBase >= Address) && (StackBase < (Address + Size))) {
217 return TRUE;
218 }
219 }
220
221 if (PcdGetBool (PcdSetNxForStack)) {
222 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
223 return TRUE;
224 }
225 }
226
227 if (GhcbBase != 0) {
228 if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {
229 return TRUE;
230 }
231 }
232
233 return FALSE;
234 }
235
236 /**
237 Initialize a buffer pool for page table use only.
238
239 To reduce the potential split operation on page table, the pages reserved for
240 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
241 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
242 initialized with number of pages greater than or equal to the given PoolPages.
243
244 Once the pages in the pool are used up, this method should be called again to
245 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't
246 happen in practice.
247
248 @param PoolPages The least page number of the pool to be created.
249
250 @retval TRUE The pool is initialized successfully.
251 @retval FALSE The memory is out of resource.
252 **/
253 BOOLEAN
254 InitializePageTablePool (
255 IN UINTN PoolPages
256 )
257 {
258 VOID *Buffer;
259
260 //
261 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
262 // header.
263 //
264 PoolPages += 1; // Add one page for header.
265 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
266 PAGE_TABLE_POOL_UNIT_PAGES;
267 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
268 if (Buffer == NULL) {
269 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
270 return FALSE;
271 }
272
273 //
274 // Link all pools into a list for easier track later.
275 //
276 if (mPageTablePool == NULL) {
277 mPageTablePool = Buffer;
278 mPageTablePool->NextPool = mPageTablePool;
279 } else {
280 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
281 mPageTablePool->NextPool = Buffer;
282 mPageTablePool = Buffer;
283 }
284
285 //
286 // Reserve one page for pool header.
287 //
288 mPageTablePool->FreePages = PoolPages - 1;
289 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
290
291 return TRUE;
292 }
293
294 /**
295 This API provides a way to allocate memory for page table.
296
297 This API can be called more than once to allocate memory for page tables.
298
299 Allocates the number of 4KB pages and returns a pointer to the allocated
300 buffer. The buffer returned is aligned on a 4KB boundary.
301
302 If Pages is 0, then NULL is returned.
303 If there is not enough memory remaining to satisfy the request, then NULL is
304 returned.
305
306 @param Pages The number of 4 KB pages to allocate.
307
308 @return A pointer to the allocated buffer or NULL if allocation fails.
309
310 **/
311 VOID *
312 AllocatePageTableMemory (
313 IN UINTN Pages
314 )
315 {
316 VOID *Buffer;
317
318 if (Pages == 0) {
319 return NULL;
320 }
321
322 //
323 // Renew the pool if necessary.
324 //
325 if ((mPageTablePool == NULL) ||
326 (Pages > mPageTablePool->FreePages))
327 {
328 if (!InitializePageTablePool (Pages)) {
329 return NULL;
330 }
331 }
332
333 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
334
335 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
336 mPageTablePool->FreePages -= Pages;
337
338 return Buffer;
339 }
340
341 /**
342 Split 2M page to 4K.
343
344 @param[in] PhysicalAddress Start physical address the 2M page covered.
345 @param[in, out] PageEntry2M Pointer to 2M page entry.
346 @param[in] StackBase Stack base address.
347 @param[in] StackSize Stack size.
348 @param[in] GhcbBase GHCB page area base address.
349 @param[in] GhcbSize GHCB page area size.
350
351 **/
352 VOID
353 Split2MPageTo4K (
354 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
355 IN OUT UINT64 *PageEntry2M,
356 IN EFI_PHYSICAL_ADDRESS StackBase,
357 IN UINTN StackSize,
358 IN EFI_PHYSICAL_ADDRESS GhcbBase,
359 IN UINTN GhcbSize
360 )
361 {
362 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
363 UINTN IndexOfPageTableEntries;
364 PAGE_TABLE_4K_ENTRY *PageTableEntry;
365 UINT64 AddressEncMask;
366
367 //
368 // Make sure AddressEncMask is contained to smallest supported address field
369 //
370 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
371
372 PageTableEntry = AllocatePageTableMemory (1);
373 ASSERT (PageTableEntry != NULL);
374
375 //
376 // Fill in 2M page entry.
377 //
378 *PageEntry2M = (UINT64)(UINTN)PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
379
380 PhysicalAddress4K = PhysicalAddress;
381 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
382 //
383 // Fill in the Page Table entries
384 //
385 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;
386
387 //
388 // The GHCB range consists of two pages per CPU, the GHCB and a
389 // per-CPU variable page. The GHCB page needs to be mapped as an
390 // unencrypted page while the per-CPU variable page needs to be
391 // mapped encrypted. These pages alternate in assignment.
392 //
393 if ( (GhcbBase == 0)
394 || (PhysicalAddress4K < GhcbBase)
395 || (PhysicalAddress4K >= GhcbBase + GhcbSize)
396 || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0))
397 {
398 PageTableEntry->Uint64 |= AddressEncMask;
399 }
400
401 PageTableEntry->Bits.ReadWrite = 1;
402
403 if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||
404 (PcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))
405 {
406 PageTableEntry->Bits.Present = 0;
407 } else {
408 PageTableEntry->Bits.Present = 1;
409 }
410
411 if ( PcdGetBool (PcdSetNxForStack)
412 && (PhysicalAddress4K >= StackBase)
413 && (PhysicalAddress4K < StackBase + StackSize))
414 {
415 //
416 // Set Nx bit for stack.
417 //
418 PageTableEntry->Bits.Nx = 1;
419 }
420 }
421 }
422
423 /**
424 Split 1G page to 2M.
425
426 @param[in] PhysicalAddress Start physical address the 1G page covered.
427 @param[in, out] PageEntry1G Pointer to 1G page entry.
428 @param[in] StackBase Stack base address.
429 @param[in] StackSize Stack size.
430 @param[in] GhcbBase GHCB page area base address.
431 @param[in] GhcbSize GHCB page area size.
432
433 **/
434 VOID
435 Split1GPageTo2M (
436 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
437 IN OUT UINT64 *PageEntry1G,
438 IN EFI_PHYSICAL_ADDRESS StackBase,
439 IN UINTN StackSize,
440 IN EFI_PHYSICAL_ADDRESS GhcbBase,
441 IN UINTN GhcbSize
442 )
443 {
444 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
445 UINTN IndexOfPageDirectoryEntries;
446 PAGE_TABLE_ENTRY *PageDirectoryEntry;
447 UINT64 AddressEncMask;
448
449 //
450 // Make sure AddressEncMask is contained to smallest supported address field
451 //
452 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
453
454 PageDirectoryEntry = AllocatePageTableMemory (1);
455 ASSERT (PageDirectoryEntry != NULL);
456
457 //
458 // Fill in 1G page entry.
459 //
460 *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
461
462 PhysicalAddress2M = PhysicalAddress;
463 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
464 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
465 //
466 // Need to split this 2M page that covers NULL or stack range.
467 //
468 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
469 } else {
470 //
471 // Fill in the Page Directory entries
472 //
473 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;
474 PageDirectoryEntry->Bits.ReadWrite = 1;
475 PageDirectoryEntry->Bits.Present = 1;
476 PageDirectoryEntry->Bits.MustBe1 = 1;
477 }
478 }
479 }
480
481 /**
482 Set one page of page table pool memory to be read-only.
483
484 @param[in] PageTableBase Base address of page table (CR3).
485 @param[in] Address Start address of a page to be set as read-only.
486 @param[in] Level4Paging Level 4 paging flag.
487
488 **/
489 VOID
490 SetPageTablePoolReadOnly (
491 IN UINTN PageTableBase,
492 IN EFI_PHYSICAL_ADDRESS Address,
493 IN BOOLEAN Level4Paging
494 )
495 {
496 UINTN Index;
497 UINTN EntryIndex;
498 UINT64 AddressEncMask;
499 EFI_PHYSICAL_ADDRESS PhysicalAddress;
500 UINT64 *PageTable;
501 UINT64 *NewPageTable;
502 UINT64 PageAttr;
503 UINT64 LevelSize[5];
504 UINT64 LevelMask[5];
505 UINTN LevelShift[5];
506 UINTN Level;
507 UINT64 PoolUnitSize;
508
509 ASSERT (PageTableBase != 0);
510
511 //
512 // Since the page table is always from page table pool, which is always
513 // located at the boundary of PcdPageTablePoolAlignment, we just need to
514 // set the whole pool unit to be read-only.
515 //
516 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
517
518 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
519 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
520 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
521 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
522
523 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
524 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
525 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
526 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
527
528 LevelSize[1] = SIZE_4KB;
529 LevelSize[2] = SIZE_2MB;
530 LevelSize[3] = SIZE_1GB;
531 LevelSize[4] = SIZE_512GB;
532
533 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
534 PAGING_1G_ADDRESS_MASK_64;
535 PageTable = (UINT64 *)(UINTN)PageTableBase;
536 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
537
538 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
539 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
540 Index &= PAGING_PAE_INDEX_MASK;
541
542 PageAttr = PageTable[Index];
543 if ((PageAttr & IA32_PG_PS) == 0) {
544 //
545 // Go to next level of table.
546 //
547 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
548 PAGING_4K_ADDRESS_MASK_64);
549 continue;
550 }
551
552 if (PoolUnitSize >= LevelSize[Level]) {
553 //
554 // Clear R/W bit if current page granularity is not larger than pool unit
555 // size.
556 //
557 if ((PageAttr & IA32_PG_RW) != 0) {
558 while (PoolUnitSize > 0) {
559 //
560 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
561 // one page (2MB). Then we don't need to update attributes for pages
562 // crossing page directory. ASSERT below is for that purpose.
563 //
564 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
565
566 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
567 PoolUnitSize -= LevelSize[Level];
568
569 ++Index;
570 }
571 }
572
573 break;
574 } else {
575 //
576 // The smaller granularity of page must be needed.
577 //
578 ASSERT (Level > 1);
579
580 NewPageTable = AllocatePageTableMemory (1);
581 ASSERT (NewPageTable != NULL);
582
583 PhysicalAddress = PageAttr & LevelMask[Level];
584 for (EntryIndex = 0;
585 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
586 ++EntryIndex)
587 {
588 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
589 IA32_PG_P | IA32_PG_RW;
590 if (Level > 2) {
591 NewPageTable[EntryIndex] |= IA32_PG_PS;
592 }
593
594 PhysicalAddress += LevelSize[Level - 1];
595 }
596
597 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
598 IA32_PG_P | IA32_PG_RW;
599 PageTable = NewPageTable;
600 }
601 }
602 }
603
604 /**
605 Prevent the memory pages used for page table from been overwritten.
606
607 @param[in] PageTableBase Base address of page table (CR3).
608 @param[in] Level4Paging Level 4 paging flag.
609
610 **/
611 VOID
612 EnablePageTableProtection (
613 IN UINTN PageTableBase,
614 IN BOOLEAN Level4Paging
615 )
616 {
617 PAGE_TABLE_POOL *HeadPool;
618 PAGE_TABLE_POOL *Pool;
619 UINT64 PoolSize;
620 EFI_PHYSICAL_ADDRESS Address;
621
622 if (mPageTablePool == NULL) {
623 return;
624 }
625
626 //
627 // Disable write protection, because we need to mark page table to be write
628 // protected.
629 //
630 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);
631
632 //
633 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
634 // remember original one in advance.
635 //
636 HeadPool = mPageTablePool;
637 Pool = HeadPool;
638 do {
639 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
640 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
641
642 //
643 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
644 // is one of page size of the processor (2MB by default). Let's apply the
645 // protection to them one by one.
646 //
647 while (PoolSize > 0) {
648 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
649 Address += PAGE_TABLE_POOL_UNIT_SIZE;
650 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
651 }
652
653 Pool = Pool->NextPool;
654 } while (Pool != HeadPool);
655
656 //
657 // Enable write protection, after page table attribute updated.
658 //
659 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);
660 }
661
662 /**
663 Allocates and fills in the Page Directory and Page Table Entries to
664 establish a 1:1 Virtual to Physical mapping.
665
666 @param[in] StackBase Stack base address.
667 @param[in] StackSize Stack size.
668 @param[in] GhcbBase GHCB base address.
669 @param[in] GhcbSize GHCB size.
670
671 @return The address of 4 level page map.
672
673 **/
674 UINTN
675 CreateIdentityMappingPageTables (
676 IN EFI_PHYSICAL_ADDRESS StackBase,
677 IN UINTN StackSize,
678 IN EFI_PHYSICAL_ADDRESS GhcbBase,
679 IN UINTN GhcbSize
680 )
681 {
682 UINT32 RegEax;
683 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
684 UINT32 RegEdx;
685 UINT8 PhysicalAddressBits;
686 EFI_PHYSICAL_ADDRESS PageAddress;
687 UINTN IndexOfPml5Entries;
688 UINTN IndexOfPml4Entries;
689 UINTN IndexOfPdpEntries;
690 UINTN IndexOfPageDirectoryEntries;
691 UINT32 NumberOfPml5EntriesNeeded;
692 UINT32 NumberOfPml4EntriesNeeded;
693 UINT32 NumberOfPdpEntriesNeeded;
694 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;
695 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
696 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
697 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
698 PAGE_TABLE_ENTRY *PageDirectoryEntry;
699 UINTN TotalPagesNum;
700 UINTN BigPageAddress;
701 VOID *Hob;
702 BOOLEAN Page5LevelSupport;
703 BOOLEAN Page1GSupport;
704 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
705 UINT64 AddressEncMask;
706 IA32_CR4 Cr4;
707
708 //
709 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings
710 //
711 PageMapLevel5Entry = NULL;
712
713 //
714 // Make sure AddressEncMask is contained to smallest supported address field
715 //
716 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
717
718 Page1GSupport = FALSE;
719 if (PcdGetBool (PcdUse1GPageTable)) {
720 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
721 if (RegEax >= 0x80000001) {
722 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
723 if ((RegEdx & BIT26) != 0) {
724 Page1GSupport = TRUE;
725 }
726 }
727 }
728
729 //
730 // Get physical address bits supported.
731 //
732 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
733 if (Hob != NULL) {
734 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
735 } else {
736 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
737 if (RegEax >= 0x80000008) {
738 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
739 PhysicalAddressBits = (UINT8)RegEax;
740 } else {
741 PhysicalAddressBits = 36;
742 }
743 }
744
745 Page5LevelSupport = FALSE;
746 if (PcdGetBool (PcdUse5LevelPageTable)) {
747 AsmCpuidEx (
748 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
749 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
750 NULL,
751 &EcxFlags.Uint32,
752 NULL,
753 NULL
754 );
755 if (EcxFlags.Bits.FiveLevelPage != 0) {
756 Page5LevelSupport = TRUE;
757 }
758 }
759
760 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));
761
762 //
763 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
764 // when 5-Level Paging is disabled,
765 // due to either unsupported by HW, or disabled by PCD.
766 //
767 ASSERT (PhysicalAddressBits <= 52);
768 if (!Page5LevelSupport && (PhysicalAddressBits > 48)) {
769 PhysicalAddressBits = 48;
770 }
771
772 //
773 // Calculate the table entries needed.
774 //
775 NumberOfPml5EntriesNeeded = 1;
776 if (PhysicalAddressBits > 48) {
777 NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);
778 PhysicalAddressBits = 48;
779 }
780
781 NumberOfPml4EntriesNeeded = 1;
782 if (PhysicalAddressBits > 39) {
783 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);
784 PhysicalAddressBits = 39;
785 }
786
787 NumberOfPdpEntriesNeeded = 1;
788 ASSERT (PhysicalAddressBits > 30);
789 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);
790
791 //
792 // Pre-allocate big pages to avoid later allocations.
793 //
794 if (!Page1GSupport) {
795 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
796 } else {
797 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
798 }
799
800 //
801 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.
802 //
803 if (!Page5LevelSupport) {
804 TotalPagesNum--;
805 }
806
807 DEBUG ((
808 DEBUG_INFO,
809 "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",
810 NumberOfPml5EntriesNeeded,
811 NumberOfPml4EntriesNeeded,
812 NumberOfPdpEntriesNeeded,
813 (UINT64)TotalPagesNum
814 ));
815
816 BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);
817 ASSERT (BigPageAddress != 0);
818
819 //
820 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
821 //
822 PageMap = (VOID *)BigPageAddress;
823 if (Page5LevelSupport) {
824 //
825 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
826 //
827 PageMapLevel5Entry = PageMap;
828 BigPageAddress += SIZE_4KB;
829 }
830
831 PageAddress = 0;
832
833 for ( IndexOfPml5Entries = 0
834 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
835 ; IndexOfPml5Entries++)
836 {
837 //
838 // Each PML5 entry points to a page of PML4 entires.
839 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
840 // When 5-Level Paging is disabled, below allocation happens only once.
841 //
842 PageMapLevel4Entry = (VOID *)BigPageAddress;
843 BigPageAddress += SIZE_4KB;
844
845 if (Page5LevelSupport) {
846 //
847 // Make a PML5 Entry
848 //
849 PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry | AddressEncMask;
850 PageMapLevel5Entry->Bits.ReadWrite = 1;
851 PageMapLevel5Entry->Bits.Present = 1;
852 PageMapLevel5Entry++;
853 }
854
855 for ( IndexOfPml4Entries = 0
856 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)
857 ; IndexOfPml4Entries++, PageMapLevel4Entry++)
858 {
859 //
860 // Each PML4 entry points to a page of Page Directory Pointer entires.
861 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
862 //
863 PageDirectoryPointerEntry = (VOID *)BigPageAddress;
864 BigPageAddress += SIZE_4KB;
865
866 //
867 // Make a PML4 Entry
868 //
869 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
870 PageMapLevel4Entry->Bits.ReadWrite = 1;
871 PageMapLevel4Entry->Bits.Present = 1;
872
873 if (Page1GSupport) {
874 PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;
875
876 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
877 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {
878 Split1GPageTo2M (PageAddress, (UINT64 *)PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);
879 } else {
880 //
881 // Fill in the Page Directory entries
882 //
883 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
884 PageDirectory1GEntry->Bits.ReadWrite = 1;
885 PageDirectory1GEntry->Bits.Present = 1;
886 PageDirectory1GEntry->Bits.MustBe1 = 1;
887 }
888 }
889 } else {
890 for ( IndexOfPdpEntries = 0
891 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)
892 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)
893 {
894 //
895 // Each Directory Pointer entries points to a page of Page Directory entires.
896 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
897 //
898 PageDirectoryEntry = (VOID *)BigPageAddress;
899 BigPageAddress += SIZE_4KB;
900
901 //
902 // Fill in a Page Directory Pointer Entries
903 //
904 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
905 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
906 PageDirectoryPointerEntry->Bits.Present = 1;
907
908 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
909 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
910 //
911 // Need to split this 2M page that covers NULL or stack range.
912 //
913 Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
914 } else {
915 //
916 // Fill in the Page Directory entries
917 //
918 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
919 PageDirectoryEntry->Bits.ReadWrite = 1;
920 PageDirectoryEntry->Bits.Present = 1;
921 PageDirectoryEntry->Bits.MustBe1 = 1;
922 }
923 }
924 }
925
926 //
927 // Fill with null entry for unused PDPTE
928 //
929 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
930 }
931 }
932
933 //
934 // For the PML4 entries we are not using fill in a null entry.
935 //
936 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
937 }
938
939 if (Page5LevelSupport) {
940 Cr4.UintN = AsmReadCr4 ();
941 Cr4.Bits.LA57 = 1;
942 AsmWriteCr4 (Cr4.UintN);
943 //
944 // For the PML5 entries we are not using fill in a null entry.
945 //
946 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
947 }
948
949 //
950 // Protect the page table by marking the memory used for page table to be
951 // read-only.
952 //
953 EnablePageTableProtection ((UINTN)PageMap, TRUE);
954
955 //
956 // Set IA32_EFER.NXE if necessary.
957 //
958 if (IsEnableNonExecNeeded ()) {
959 EnableExecuteDisableBit ();
960 }
961
962 return (UINTN)PageMap;
963 }