]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg/DxeIplPeim: Initialize pointer PageMapLevel5Entry
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20
21 SPDX-License-Identifier: BSD-2-Clause-Patent
22
23 **/
24
25 #include <Register/Intel/Cpuid.h>
26 #include "DxeIpl.h"
27 #include "VirtualMemory.h"
28
29 //
30 // Global variable to keep track current available memory used as page table.
31 //
32 PAGE_TABLE_POOL *mPageTablePool = NULL;
33
34 /**
35 Clear legacy memory located at the first 4K-page, if available.
36
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
39
40 @param HobStart The start of HobList passed to DxeCore.
41
42 **/
43 VOID
44 ClearFirst4KPage (
45 IN VOID *HobStart
46 )
47 {
48 EFI_PEI_HOB_POINTERS RscHob;
49 EFI_PEI_HOB_POINTERS MemHob;
50 BOOLEAN DoClear;
51
52 RscHob.Raw = HobStart;
53 MemHob.Raw = HobStart;
54 DoClear = FALSE;
55
56 //
57 // Check if page 0 exists and free
58 //
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
60 RscHob.Raw)) != NULL) {
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {
63 DoClear = TRUE;
64 //
65 // Make sure memory at 0-4095 has not been allocated.
66 //
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,
68 MemHob.Raw)) != NULL) {
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
70 < EFI_PAGE_SIZE) {
71 DoClear = FALSE;
72 break;
73 }
74 MemHob.Raw = GET_NEXT_HOB (MemHob);
75 }
76 break;
77 }
78 RscHob.Raw = GET_NEXT_HOB (RscHob);
79 }
80
81 if (DoClear) {
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
83 SetMem (NULL, EFI_PAGE_SIZE, 0);
84 }
85
86 return;
87 }
88
89 /**
90 Return configure status of NULL pointer detection feature.
91
92 @return TRUE NULL pointer detection feature is enabled
93 @return FALSE NULL pointer detection feature is disabled
94
95 **/
96 BOOLEAN
97 IsNullDetectionEnabled (
98 VOID
99 )
100 {
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
102 }
103
104 /**
105 The function will check if Execute Disable Bit is available.
106
107 @retval TRUE Execute Disable Bit is available.
108 @retval FALSE Execute Disable Bit is not available.
109
110 **/
111 BOOLEAN
112 IsExecuteDisableBitAvailable (
113 VOID
114 )
115 {
116 UINT32 RegEax;
117 UINT32 RegEdx;
118 BOOLEAN Available;
119
120 Available = FALSE;
121 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
122 if (RegEax >= 0x80000001) {
123 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
124 if ((RegEdx & BIT20) != 0) {
125 //
126 // Bit 20: Execute Disable Bit available.
127 //
128 Available = TRUE;
129 }
130 }
131
132 return Available;
133 }
134
135 /**
136 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.
137
138 @retval TRUE IA32_EFER.NXE should be enabled.
139 @retval FALSE IA32_EFER.NXE should not be enabled.
140
141 **/
142 BOOLEAN
143 IsEnableNonExecNeeded (
144 VOID
145 )
146 {
147 if (!IsExecuteDisableBitAvailable ()) {
148 return FALSE;
149 }
150
151 //
152 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.
153 // Features controlled by Following PCDs need this feature to be enabled.
154 //
155 return (PcdGetBool (PcdSetNxForStack) ||
156 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||
157 PcdGet32 (PcdImageProtectionPolicy) != 0);
158 }
159
160 /**
161 Enable Execute Disable Bit.
162
163 **/
164 VOID
165 EnableExecuteDisableBit (
166 VOID
167 )
168 {
169 UINT64 MsrRegisters;
170
171 MsrRegisters = AsmReadMsr64 (0xC0000080);
172 MsrRegisters |= BIT11;
173 AsmWriteMsr64 (0xC0000080, MsrRegisters);
174 }
175
176 /**
177 The function will check if page table entry should be splitted to smaller
178 granularity.
179
180 @param Address Physical memory address.
181 @param Size Size of the given physical memory.
182 @param StackBase Base address of stack.
183 @param StackSize Size of stack.
184
185 @retval TRUE Page table should be split.
186 @retval FALSE Page table should not be split.
187 **/
188 BOOLEAN
189 ToSplitPageTable (
190 IN EFI_PHYSICAL_ADDRESS Address,
191 IN UINTN Size,
192 IN EFI_PHYSICAL_ADDRESS StackBase,
193 IN UINTN StackSize
194 )
195 {
196 if (IsNullDetectionEnabled () && Address == 0) {
197 return TRUE;
198 }
199
200 if (PcdGetBool (PcdCpuStackGuard)) {
201 if (StackBase >= Address && StackBase < (Address + Size)) {
202 return TRUE;
203 }
204 }
205
206 if (PcdGetBool (PcdSetNxForStack)) {
207 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
208 return TRUE;
209 }
210 }
211
212 return FALSE;
213 }
214 /**
215 Initialize a buffer pool for page table use only.
216
217 To reduce the potential split operation on page table, the pages reserved for
218 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
219 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
220 initialized with number of pages greater than or equal to the given PoolPages.
221
222 Once the pages in the pool are used up, this method should be called again to
223 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't
224 happen in practice.
225
226 @param PoolPages The least page number of the pool to be created.
227
228 @retval TRUE The pool is initialized successfully.
229 @retval FALSE The memory is out of resource.
230 **/
231 BOOLEAN
232 InitializePageTablePool (
233 IN UINTN PoolPages
234 )
235 {
236 VOID *Buffer;
237
238 //
239 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
240 // header.
241 //
242 PoolPages += 1; // Add one page for header.
243 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
244 PAGE_TABLE_POOL_UNIT_PAGES;
245 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
246 if (Buffer == NULL) {
247 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
248 return FALSE;
249 }
250
251 //
252 // Link all pools into a list for easier track later.
253 //
254 if (mPageTablePool == NULL) {
255 mPageTablePool = Buffer;
256 mPageTablePool->NextPool = mPageTablePool;
257 } else {
258 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
259 mPageTablePool->NextPool = Buffer;
260 mPageTablePool = Buffer;
261 }
262
263 //
264 // Reserve one page for pool header.
265 //
266 mPageTablePool->FreePages = PoolPages - 1;
267 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
268
269 return TRUE;
270 }
271
272 /**
273 This API provides a way to allocate memory for page table.
274
275 This API can be called more than once to allocate memory for page tables.
276
277 Allocates the number of 4KB pages and returns a pointer to the allocated
278 buffer. The buffer returned is aligned on a 4KB boundary.
279
280 If Pages is 0, then NULL is returned.
281 If there is not enough memory remaining to satisfy the request, then NULL is
282 returned.
283
284 @param Pages The number of 4 KB pages to allocate.
285
286 @return A pointer to the allocated buffer or NULL if allocation fails.
287
288 **/
289 VOID *
290 AllocatePageTableMemory (
291 IN UINTN Pages
292 )
293 {
294 VOID *Buffer;
295
296 if (Pages == 0) {
297 return NULL;
298 }
299
300 //
301 // Renew the pool if necessary.
302 //
303 if (mPageTablePool == NULL ||
304 Pages > mPageTablePool->FreePages) {
305 if (!InitializePageTablePool (Pages)) {
306 return NULL;
307 }
308 }
309
310 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
311
312 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
313 mPageTablePool->FreePages -= Pages;
314
315 return Buffer;
316 }
317
318 /**
319 Split 2M page to 4K.
320
321 @param[in] PhysicalAddress Start physical address the 2M page covered.
322 @param[in, out] PageEntry2M Pointer to 2M page entry.
323 @param[in] StackBase Stack base address.
324 @param[in] StackSize Stack size.
325
326 **/
327 VOID
328 Split2MPageTo4K (
329 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
330 IN OUT UINT64 *PageEntry2M,
331 IN EFI_PHYSICAL_ADDRESS StackBase,
332 IN UINTN StackSize
333 )
334 {
335 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
336 UINTN IndexOfPageTableEntries;
337 PAGE_TABLE_4K_ENTRY *PageTableEntry;
338 UINT64 AddressEncMask;
339
340 //
341 // Make sure AddressEncMask is contained to smallest supported address field
342 //
343 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
344
345 PageTableEntry = AllocatePageTableMemory (1);
346 ASSERT (PageTableEntry != NULL);
347
348 //
349 // Fill in 2M page entry.
350 //
351 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
352
353 PhysicalAddress4K = PhysicalAddress;
354 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
355 //
356 // Fill in the Page Table entries
357 //
358 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
359 PageTableEntry->Bits.ReadWrite = 1;
360
361 if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||
362 (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {
363 PageTableEntry->Bits.Present = 0;
364 } else {
365 PageTableEntry->Bits.Present = 1;
366 }
367
368 if (PcdGetBool (PcdSetNxForStack)
369 && (PhysicalAddress4K >= StackBase)
370 && (PhysicalAddress4K < StackBase + StackSize)) {
371 //
372 // Set Nx bit for stack.
373 //
374 PageTableEntry->Bits.Nx = 1;
375 }
376 }
377 }
378
379 /**
380 Split 1G page to 2M.
381
382 @param[in] PhysicalAddress Start physical address the 1G page covered.
383 @param[in, out] PageEntry1G Pointer to 1G page entry.
384 @param[in] StackBase Stack base address.
385 @param[in] StackSize Stack size.
386
387 **/
388 VOID
389 Split1GPageTo2M (
390 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
391 IN OUT UINT64 *PageEntry1G,
392 IN EFI_PHYSICAL_ADDRESS StackBase,
393 IN UINTN StackSize
394 )
395 {
396 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
397 UINTN IndexOfPageDirectoryEntries;
398 PAGE_TABLE_ENTRY *PageDirectoryEntry;
399 UINT64 AddressEncMask;
400
401 //
402 // Make sure AddressEncMask is contained to smallest supported address field
403 //
404 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
405
406 PageDirectoryEntry = AllocatePageTableMemory (1);
407 ASSERT (PageDirectoryEntry != NULL);
408
409 //
410 // Fill in 1G page entry.
411 //
412 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
413
414 PhysicalAddress2M = PhysicalAddress;
415 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
416 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {
417 //
418 // Need to split this 2M page that covers NULL or stack range.
419 //
420 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
421 } else {
422 //
423 // Fill in the Page Directory entries
424 //
425 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
426 PageDirectoryEntry->Bits.ReadWrite = 1;
427 PageDirectoryEntry->Bits.Present = 1;
428 PageDirectoryEntry->Bits.MustBe1 = 1;
429 }
430 }
431 }
432
433 /**
434 Set one page of page table pool memory to be read-only.
435
436 @param[in] PageTableBase Base address of page table (CR3).
437 @param[in] Address Start address of a page to be set as read-only.
438 @param[in] Level4Paging Level 4 paging flag.
439
440 **/
441 VOID
442 SetPageTablePoolReadOnly (
443 IN UINTN PageTableBase,
444 IN EFI_PHYSICAL_ADDRESS Address,
445 IN BOOLEAN Level4Paging
446 )
447 {
448 UINTN Index;
449 UINTN EntryIndex;
450 UINT64 AddressEncMask;
451 EFI_PHYSICAL_ADDRESS PhysicalAddress;
452 UINT64 *PageTable;
453 UINT64 *NewPageTable;
454 UINT64 PageAttr;
455 UINT64 LevelSize[5];
456 UINT64 LevelMask[5];
457 UINTN LevelShift[5];
458 UINTN Level;
459 UINT64 PoolUnitSize;
460
461 ASSERT (PageTableBase != 0);
462
463 //
464 // Since the page table is always from page table pool, which is always
465 // located at the boundary of PcdPageTablePoolAlignment, we just need to
466 // set the whole pool unit to be read-only.
467 //
468 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
469
470 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
471 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
472 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
473 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
474
475 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
476 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
477 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
478 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
479
480 LevelSize[1] = SIZE_4KB;
481 LevelSize[2] = SIZE_2MB;
482 LevelSize[3] = SIZE_1GB;
483 LevelSize[4] = SIZE_512GB;
484
485 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
486 PAGING_1G_ADDRESS_MASK_64;
487 PageTable = (UINT64 *)(UINTN)PageTableBase;
488 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
489
490 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
491 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
492 Index &= PAGING_PAE_INDEX_MASK;
493
494 PageAttr = PageTable[Index];
495 if ((PageAttr & IA32_PG_PS) == 0) {
496 //
497 // Go to next level of table.
498 //
499 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
500 PAGING_4K_ADDRESS_MASK_64);
501 continue;
502 }
503
504 if (PoolUnitSize >= LevelSize[Level]) {
505 //
506 // Clear R/W bit if current page granularity is not larger than pool unit
507 // size.
508 //
509 if ((PageAttr & IA32_PG_RW) != 0) {
510 while (PoolUnitSize > 0) {
511 //
512 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
513 // one page (2MB). Then we don't need to update attributes for pages
514 // crossing page directory. ASSERT below is for that purpose.
515 //
516 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
517
518 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
519 PoolUnitSize -= LevelSize[Level];
520
521 ++Index;
522 }
523 }
524
525 break;
526
527 } else {
528 //
529 // The smaller granularity of page must be needed.
530 //
531 ASSERT (Level > 1);
532
533 NewPageTable = AllocatePageTableMemory (1);
534 ASSERT (NewPageTable != NULL);
535
536 PhysicalAddress = PageAttr & LevelMask[Level];
537 for (EntryIndex = 0;
538 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
539 ++EntryIndex) {
540 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
541 IA32_PG_P | IA32_PG_RW;
542 if (Level > 2) {
543 NewPageTable[EntryIndex] |= IA32_PG_PS;
544 }
545 PhysicalAddress += LevelSize[Level - 1];
546 }
547
548 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
549 IA32_PG_P | IA32_PG_RW;
550 PageTable = NewPageTable;
551 }
552 }
553 }
554
555 /**
556 Prevent the memory pages used for page table from been overwritten.
557
558 @param[in] PageTableBase Base address of page table (CR3).
559 @param[in] Level4Paging Level 4 paging flag.
560
561 **/
562 VOID
563 EnablePageTableProtection (
564 IN UINTN PageTableBase,
565 IN BOOLEAN Level4Paging
566 )
567 {
568 PAGE_TABLE_POOL *HeadPool;
569 PAGE_TABLE_POOL *Pool;
570 UINT64 PoolSize;
571 EFI_PHYSICAL_ADDRESS Address;
572
573 if (mPageTablePool == NULL) {
574 return;
575 }
576
577 //
578 // Disable write protection, because we need to mark page table to be write
579 // protected.
580 //
581 AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
582
583 //
584 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
585 // remember original one in advance.
586 //
587 HeadPool = mPageTablePool;
588 Pool = HeadPool;
589 do {
590 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
591 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
592
593 //
594 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
595 // is one of page size of the processor (2MB by default). Let's apply the
596 // protection to them one by one.
597 //
598 while (PoolSize > 0) {
599 SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);
600 Address += PAGE_TABLE_POOL_UNIT_SIZE;
601 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
602 }
603
604 Pool = Pool->NextPool;
605 } while (Pool != HeadPool);
606
607 //
608 // Enable write protection, after page table attribute updated.
609 //
610 AsmWriteCr0 (AsmReadCr0() | CR0_WP);
611 }
612
613 /**
614 Allocates and fills in the Page Directory and Page Table Entries to
615 establish a 1:1 Virtual to Physical mapping.
616
617 @param[in] StackBase Stack base address.
618 @param[in] StackSize Stack size.
619
620 @return The address of 4 level page map.
621
622 **/
623 UINTN
624 CreateIdentityMappingPageTables (
625 IN EFI_PHYSICAL_ADDRESS StackBase,
626 IN UINTN StackSize
627 )
628 {
629 UINT32 RegEax;
630 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
631 UINT32 RegEdx;
632 UINT8 PhysicalAddressBits;
633 EFI_PHYSICAL_ADDRESS PageAddress;
634 UINTN IndexOfPml5Entries;
635 UINTN IndexOfPml4Entries;
636 UINTN IndexOfPdpEntries;
637 UINTN IndexOfPageDirectoryEntries;
638 UINT32 NumberOfPml5EntriesNeeded;
639 UINT32 NumberOfPml4EntriesNeeded;
640 UINT32 NumberOfPdpEntriesNeeded;
641 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;
642 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
643 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
644 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
645 PAGE_TABLE_ENTRY *PageDirectoryEntry;
646 UINTN TotalPagesNum;
647 UINTN BigPageAddress;
648 VOID *Hob;
649 BOOLEAN Page5LevelSupport;
650 BOOLEAN Page1GSupport;
651 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
652 UINT64 AddressEncMask;
653 IA32_CR4 Cr4;
654
655 //
656 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings
657 //
658 PageMapLevel5Entry = NULL;
659
660 //
661 // Make sure AddressEncMask is contained to smallest supported address field
662 //
663 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
664
665 Page1GSupport = FALSE;
666 if (PcdGetBool(PcdUse1GPageTable)) {
667 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
668 if (RegEax >= 0x80000001) {
669 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
670 if ((RegEdx & BIT26) != 0) {
671 Page1GSupport = TRUE;
672 }
673 }
674 }
675
676 //
677 // Get physical address bits supported.
678 //
679 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
680 if (Hob != NULL) {
681 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
682 } else {
683 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
684 if (RegEax >= 0x80000008) {
685 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
686 PhysicalAddressBits = (UINT8) RegEax;
687 } else {
688 PhysicalAddressBits = 36;
689 }
690 }
691
692 Page5LevelSupport = FALSE;
693 if (PcdGetBool (PcdUse5LevelPageTable)) {
694 AsmCpuidEx (
695 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL,
696 &EcxFlags.Uint32, NULL, NULL
697 );
698 if (EcxFlags.Bits.FiveLevelPage != 0) {
699 Page5LevelSupport = TRUE;
700 }
701 }
702
703 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));
704
705 //
706 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
707 // when 5-Level Paging is disabled,
708 // due to either unsupported by HW, or disabled by PCD.
709 //
710 ASSERT (PhysicalAddressBits <= 52);
711 if (!Page5LevelSupport && PhysicalAddressBits > 48) {
712 PhysicalAddressBits = 48;
713 }
714
715 //
716 // Calculate the table entries needed.
717 //
718 NumberOfPml5EntriesNeeded = 1;
719 if (PhysicalAddressBits > 48) {
720 NumberOfPml5EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 48);
721 PhysicalAddressBits = 48;
722 }
723
724 NumberOfPml4EntriesNeeded = 1;
725 if (PhysicalAddressBits > 39) {
726 NumberOfPml4EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 39);
727 PhysicalAddressBits = 39;
728 }
729
730 NumberOfPdpEntriesNeeded = 1;
731 ASSERT (PhysicalAddressBits > 30);
732 NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 30);
733
734 //
735 // Pre-allocate big pages to avoid later allocations.
736 //
737 if (!Page1GSupport) {
738 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
739 } else {
740 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
741 }
742
743 //
744 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.
745 //
746 if (!Page5LevelSupport) {
747 TotalPagesNum--;
748 }
749
750 DEBUG ((DEBUG_INFO, "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",
751 NumberOfPml5EntriesNeeded, NumberOfPml4EntriesNeeded,
752 NumberOfPdpEntriesNeeded, (UINT64)TotalPagesNum));
753
754 BigPageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);
755 ASSERT (BigPageAddress != 0);
756
757 //
758 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
759 //
760 PageMap = (VOID *) BigPageAddress;
761 if (Page5LevelSupport) {
762 //
763 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
764 //
765 PageMapLevel5Entry = PageMap;
766 BigPageAddress += SIZE_4KB;
767 }
768 PageAddress = 0;
769
770 for ( IndexOfPml5Entries = 0
771 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
772 ; IndexOfPml5Entries++) {
773 //
774 // Each PML5 entry points to a page of PML4 entires.
775 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
776 // When 5-Level Paging is disabled, below allocation happens only once.
777 //
778 PageMapLevel4Entry = (VOID *) BigPageAddress;
779 BigPageAddress += SIZE_4KB;
780
781 if (Page5LevelSupport) {
782 //
783 // Make a PML5 Entry
784 //
785 PageMapLevel5Entry->Uint64 = (UINT64) (UINTN) PageMapLevel4Entry | AddressEncMask;
786 PageMapLevel5Entry->Bits.ReadWrite = 1;
787 PageMapLevel5Entry->Bits.Present = 1;
788 PageMapLevel5Entry++;
789 }
790
791 for ( IndexOfPml4Entries = 0
792 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)
793 ; IndexOfPml4Entries++, PageMapLevel4Entry++) {
794 //
795 // Each PML4 entry points to a page of Page Directory Pointer entires.
796 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
797 //
798 PageDirectoryPointerEntry = (VOID *) BigPageAddress;
799 BigPageAddress += SIZE_4KB;
800
801 //
802 // Make a PML4 Entry
803 //
804 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
805 PageMapLevel4Entry->Bits.ReadWrite = 1;
806 PageMapLevel4Entry->Bits.Present = 1;
807
808 if (Page1GSupport) {
809 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
810
811 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
812 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {
813 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
814 } else {
815 //
816 // Fill in the Page Directory entries
817 //
818 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
819 PageDirectory1GEntry->Bits.ReadWrite = 1;
820 PageDirectory1GEntry->Bits.Present = 1;
821 PageDirectory1GEntry->Bits.MustBe1 = 1;
822 }
823 }
824 } else {
825 for ( IndexOfPdpEntries = 0
826 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)
827 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
828 //
829 // Each Directory Pointer entries points to a page of Page Directory entires.
830 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
831 //
832 PageDirectoryEntry = (VOID *) BigPageAddress;
833 BigPageAddress += SIZE_4KB;
834
835 //
836 // Fill in a Page Directory Pointer Entries
837 //
838 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
839 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
840 PageDirectoryPointerEntry->Bits.Present = 1;
841
842 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
843 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {
844 //
845 // Need to split this 2M page that covers NULL or stack range.
846 //
847 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
848 } else {
849 //
850 // Fill in the Page Directory entries
851 //
852 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
853 PageDirectoryEntry->Bits.ReadWrite = 1;
854 PageDirectoryEntry->Bits.Present = 1;
855 PageDirectoryEntry->Bits.MustBe1 = 1;
856 }
857 }
858 }
859
860 //
861 // Fill with null entry for unused PDPTE
862 //
863 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof(PAGE_MAP_AND_DIRECTORY_POINTER));
864 }
865 }
866
867 //
868 // For the PML4 entries we are not using fill in a null entry.
869 //
870 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
871 }
872
873 if (Page5LevelSupport) {
874 Cr4.UintN = AsmReadCr4 ();
875 Cr4.Bits.LA57 = 1;
876 AsmWriteCr4 (Cr4.UintN);
877 //
878 // For the PML5 entries we are not using fill in a null entry.
879 //
880 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
881 }
882
883 //
884 // Protect the page table by marking the memory used for page table to be
885 // read-only.
886 //
887 EnablePageTableProtection ((UINTN)PageMap, TRUE);
888
889 //
890 // Set IA32_EFER.NXE if necessary.
891 //
892 if (IsEnableNonExecNeeded ()) {
893 EnableExecuteDisableBit ();
894 }
895
896 return (UINTN)PageMap;
897 }
898