2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
21 This program and the accompanying materials
22 are licensed and made available under the terms and conditions of the BSD License
23 which accompanies this distribution. The full text of the license may be found at
24 http://opensource.org/licenses/bsd-license.php
26 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
27 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
32 #include "VirtualMemory.h"
35 Clear legacy memory located at the first 4K-page, if available.
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
40 @param HoStart The start of HobList passed to DxeCore.
48 EFI_PEI_HOB_POINTERS RscHob
;
49 EFI_PEI_HOB_POINTERS MemHob
;
52 RscHob
.Raw
= HobStart
;
53 MemHob
.Raw
= HobStart
;
57 // Check if page 0 exists and free
59 while ((RscHob
.Raw
= GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR
,
60 RscHob
.Raw
)) != NULL
) {
61 if (RscHob
.ResourceDescriptor
->ResourceType
== EFI_RESOURCE_SYSTEM_MEMORY
&&
62 RscHob
.ResourceDescriptor
->PhysicalStart
== 0) {
65 // Make sure memory at 0-4095 has not been allocated.
67 while ((MemHob
.Raw
= GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION
,
68 MemHob
.Raw
)) != NULL
) {
69 if (MemHob
.MemoryAllocation
->AllocDescriptor
.MemoryBaseAddress
74 MemHob
.Raw
= GET_NEXT_HOB (MemHob
);
78 RscHob
.Raw
= GET_NEXT_HOB (RscHob
);
82 DEBUG ((DEBUG_INFO
, "Clearing first 4K-page!\r\n"));
83 SetMem (NULL
, EFI_PAGE_SIZE
, 0);
90 IsNullDetectionEnabled (
94 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT0
) != 0);
98 Enable Execute Disable Bit.
102 EnableExecuteDisableBit (
108 MsrRegisters
= AsmReadMsr64 (0xC0000080);
109 MsrRegisters
|= BIT11
;
110 AsmWriteMsr64 (0xC0000080, MsrRegisters
);
116 @param[in] PhysicalAddress Start physical address the 2M page covered.
117 @param[in, out] PageEntry2M Pointer to 2M page entry.
118 @param[in] StackBase Stack base address.
119 @param[in] StackSize Stack size.
124 IN EFI_PHYSICAL_ADDRESS PhysicalAddress
,
125 IN OUT UINT64
*PageEntry2M
,
126 IN EFI_PHYSICAL_ADDRESS StackBase
,
130 EFI_PHYSICAL_ADDRESS PhysicalAddress4K
;
131 UINTN IndexOfPageTableEntries
;
132 PAGE_TABLE_4K_ENTRY
*PageTableEntry
;
133 UINT64 AddressEncMask
;
136 // Make sure AddressEncMask is contained to smallest supported address field
138 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
140 PageTableEntry
= AllocatePages (1);
141 ASSERT (PageTableEntry
!= NULL
);
144 // Fill in 2M page entry.
146 *PageEntry2M
= (UINT64
) (UINTN
) PageTableEntry
| AddressEncMask
| IA32_PG_P
| IA32_PG_RW
;
148 PhysicalAddress4K
= PhysicalAddress
;
149 for (IndexOfPageTableEntries
= 0; IndexOfPageTableEntries
< 512; IndexOfPageTableEntries
++, PageTableEntry
++, PhysicalAddress4K
+= SIZE_4KB
) {
151 // Fill in the Page Table entries
153 PageTableEntry
->Uint64
= (UINT64
) PhysicalAddress4K
| AddressEncMask
;
154 PageTableEntry
->Bits
.ReadWrite
= 1;
156 if (IsNullDetectionEnabled () && PhysicalAddress4K
== 0) {
157 PageTableEntry
->Bits
.Present
= 0;
159 PageTableEntry
->Bits
.Present
= 1;
162 if (PcdGetBool (PcdSetNxForStack
)
163 && (PhysicalAddress4K
>= StackBase
)
164 && (PhysicalAddress4K
< StackBase
+ StackSize
)) {
166 // Set Nx bit for stack.
168 PageTableEntry
->Bits
.Nx
= 1;
176 @param[in] PhysicalAddress Start physical address the 1G page covered.
177 @param[in, out] PageEntry1G Pointer to 1G page entry.
178 @param[in] StackBase Stack base address.
179 @param[in] StackSize Stack size.
184 IN EFI_PHYSICAL_ADDRESS PhysicalAddress
,
185 IN OUT UINT64
*PageEntry1G
,
186 IN EFI_PHYSICAL_ADDRESS StackBase
,
190 EFI_PHYSICAL_ADDRESS PhysicalAddress2M
;
191 UINTN IndexOfPageDirectoryEntries
;
192 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
193 UINT64 AddressEncMask
;
196 // Make sure AddressEncMask is contained to smallest supported address field
198 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
200 PageDirectoryEntry
= AllocatePages (1);
201 ASSERT (PageDirectoryEntry
!= NULL
);
204 // Fill in 1G page entry.
206 *PageEntry1G
= (UINT64
) (UINTN
) PageDirectoryEntry
| AddressEncMask
| IA32_PG_P
| IA32_PG_RW
;
208 PhysicalAddress2M
= PhysicalAddress
;
209 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PhysicalAddress2M
+= SIZE_2MB
) {
210 if ((IsNullDetectionEnabled () && PhysicalAddress2M
== 0)
211 || (PcdGetBool (PcdSetNxForStack
)
212 && (PhysicalAddress2M
< StackBase
+ StackSize
)
213 && ((PhysicalAddress2M
+ SIZE_2MB
) > StackBase
))) {
215 // Need to split this 2M page that covers NULL or stack range.
217 Split2MPageTo4K (PhysicalAddress2M
, (UINT64
*) PageDirectoryEntry
, StackBase
, StackSize
);
220 // Fill in the Page Directory entries
222 PageDirectoryEntry
->Uint64
= (UINT64
) PhysicalAddress2M
| AddressEncMask
;
223 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
224 PageDirectoryEntry
->Bits
.Present
= 1;
225 PageDirectoryEntry
->Bits
.MustBe1
= 1;
231 Allocates and fills in the Page Directory and Page Table Entries to
232 establish a 1:1 Virtual to Physical mapping.
234 @param[in] StackBase Stack base address.
235 @param[in] StackSize Stack size.
237 @return The address of 4 level page map.
241 CreateIdentityMappingPageTables (
242 IN EFI_PHYSICAL_ADDRESS StackBase
,
248 UINT8 PhysicalAddressBits
;
249 EFI_PHYSICAL_ADDRESS PageAddress
;
250 UINTN IndexOfPml4Entries
;
251 UINTN IndexOfPdpEntries
;
252 UINTN IndexOfPageDirectoryEntries
;
253 UINT32 NumberOfPml4EntriesNeeded
;
254 UINT32 NumberOfPdpEntriesNeeded
;
255 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMapLevel4Entry
;
256 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMap
;
257 PAGE_MAP_AND_DIRECTORY_POINTER
*PageDirectoryPointerEntry
;
258 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
260 UINTN BigPageAddress
;
262 BOOLEAN Page1GSupport
;
263 PAGE_TABLE_1G_ENTRY
*PageDirectory1GEntry
;
264 UINT64 AddressEncMask
;
267 // Make sure AddressEncMask is contained to smallest supported address field
269 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
271 Page1GSupport
= FALSE
;
272 if (PcdGetBool(PcdUse1GPageTable
)) {
273 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
274 if (RegEax
>= 0x80000001) {
275 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
276 if ((RegEdx
& BIT26
) != 0) {
277 Page1GSupport
= TRUE
;
283 // Get physical address bits supported.
285 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
287 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
289 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
290 if (RegEax
>= 0x80000008) {
291 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
292 PhysicalAddressBits
= (UINT8
) RegEax
;
294 PhysicalAddressBits
= 36;
299 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
301 ASSERT (PhysicalAddressBits
<= 52);
302 if (PhysicalAddressBits
> 48) {
303 PhysicalAddressBits
= 48;
307 // Calculate the table entries needed.
309 if (PhysicalAddressBits
<= 39 ) {
310 NumberOfPml4EntriesNeeded
= 1;
311 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (PhysicalAddressBits
- 30));
313 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (PhysicalAddressBits
- 39));
314 NumberOfPdpEntriesNeeded
= 512;
318 // Pre-allocate big pages to avoid later allocations.
320 if (!Page1GSupport
) {
321 TotalPagesNum
= (NumberOfPdpEntriesNeeded
+ 1) * NumberOfPml4EntriesNeeded
+ 1;
323 TotalPagesNum
= NumberOfPml4EntriesNeeded
+ 1;
325 BigPageAddress
= (UINTN
) AllocatePages (TotalPagesNum
);
326 ASSERT (BigPageAddress
!= 0);
329 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
331 PageMap
= (VOID
*) BigPageAddress
;
332 BigPageAddress
+= SIZE_4KB
;
334 PageMapLevel4Entry
= PageMap
;
336 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
338 // Each PML4 entry points to a page of Page Directory Pointer entires.
339 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
341 PageDirectoryPointerEntry
= (VOID
*) BigPageAddress
;
342 BigPageAddress
+= SIZE_4KB
;
347 PageMapLevel4Entry
->Uint64
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| AddressEncMask
;
348 PageMapLevel4Entry
->Bits
.ReadWrite
= 1;
349 PageMapLevel4Entry
->Bits
.Present
= 1;
352 PageDirectory1GEntry
= (VOID
*) PageDirectoryPointerEntry
;
354 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
355 if ((IsNullDetectionEnabled () && PageAddress
== 0)
356 || (PcdGetBool (PcdSetNxForStack
)
357 && (PageAddress
< StackBase
+ StackSize
)
358 && ((PageAddress
+ SIZE_1GB
) > StackBase
))) {
359 Split1GPageTo2M (PageAddress
, (UINT64
*) PageDirectory1GEntry
, StackBase
, StackSize
);
362 // Fill in the Page Directory entries
364 PageDirectory1GEntry
->Uint64
= (UINT64
)PageAddress
| AddressEncMask
;
365 PageDirectory1GEntry
->Bits
.ReadWrite
= 1;
366 PageDirectory1GEntry
->Bits
.Present
= 1;
367 PageDirectory1GEntry
->Bits
.MustBe1
= 1;
371 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
373 // Each Directory Pointer entries points to a page of Page Directory entires.
374 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
376 PageDirectoryEntry
= (VOID
*) BigPageAddress
;
377 BigPageAddress
+= SIZE_4KB
;
380 // Fill in a Page Directory Pointer Entries
382 PageDirectoryPointerEntry
->Uint64
= (UINT64
)(UINTN
)PageDirectoryEntry
| AddressEncMask
;
383 PageDirectoryPointerEntry
->Bits
.ReadWrite
= 1;
384 PageDirectoryPointerEntry
->Bits
.Present
= 1;
386 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
387 if ((IsNullDetectionEnabled () && PageAddress
== 0)
388 || (PcdGetBool (PcdSetNxForStack
)
389 && (PageAddress
< StackBase
+ StackSize
)
390 && ((PageAddress
+ SIZE_2MB
) > StackBase
))) {
392 // Need to split this 2M page that covers NULL or stack range.
394 Split2MPageTo4K (PageAddress
, (UINT64
*) PageDirectoryEntry
, StackBase
, StackSize
);
397 // Fill in the Page Directory entries
399 PageDirectoryEntry
->Uint64
= (UINT64
)PageAddress
| AddressEncMask
;
400 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
401 PageDirectoryEntry
->Bits
.Present
= 1;
402 PageDirectoryEntry
->Bits
.MustBe1
= 1;
407 for (; IndexOfPdpEntries
< 512; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
409 PageDirectoryPointerEntry
,
410 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER
)
417 // For the PML4 entries we are not using fill in a null entry.
419 for (; IndexOfPml4Entries
< 512; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
422 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER
)
426 if (PcdGetBool (PcdSetNxForStack
)) {
427 EnableExecuteDisableBit ();
430 return (UINTN
)PageMap
;