2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
21 This program and the accompanying materials
22 are licensed and made available under the terms and conditions of the BSD License
23 which accompanies this distribution. The full text of the license may be found at
24 http://opensource.org/licenses/bsd-license.php
26 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
27 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
32 #include "VirtualMemory.h"
35 Clear legacy memory located at the first 4K-page, if available.
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
40 @param HobStart The start of HobList passed to DxeCore.
48 EFI_PEI_HOB_POINTERS RscHob
;
49 EFI_PEI_HOB_POINTERS MemHob
;
52 RscHob
.Raw
= HobStart
;
53 MemHob
.Raw
= HobStart
;
57 // Check if page 0 exists and free
59 while ((RscHob
.Raw
= GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR
,
60 RscHob
.Raw
)) != NULL
) {
61 if (RscHob
.ResourceDescriptor
->ResourceType
== EFI_RESOURCE_SYSTEM_MEMORY
&&
62 RscHob
.ResourceDescriptor
->PhysicalStart
== 0) {
65 // Make sure memory at 0-4095 has not been allocated.
67 while ((MemHob
.Raw
= GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION
,
68 MemHob
.Raw
)) != NULL
) {
69 if (MemHob
.MemoryAllocation
->AllocDescriptor
.MemoryBaseAddress
74 MemHob
.Raw
= GET_NEXT_HOB (MemHob
);
78 RscHob
.Raw
= GET_NEXT_HOB (RscHob
);
82 DEBUG ((DEBUG_INFO
, "Clearing first 4K-page!\r\n"));
83 SetMem (NULL
, EFI_PAGE_SIZE
, 0);
90 Return configure status of NULL pointer detection feature.
92 @return TRUE NULL pointer detection feature is enabled
93 @return FALSE NULL pointer detection feature is disabled
97 IsNullDetectionEnabled (
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT0
) != 0);
105 Enable Execute Disable Bit.
109 EnableExecuteDisableBit (
115 MsrRegisters
= AsmReadMsr64 (0xC0000080);
116 MsrRegisters
|= BIT11
;
117 AsmWriteMsr64 (0xC0000080, MsrRegisters
);
123 @param[in] PhysicalAddress Start physical address the 2M page covered.
124 @param[in, out] PageEntry2M Pointer to 2M page entry.
125 @param[in] StackBase Stack base address.
126 @param[in] StackSize Stack size.
131 IN EFI_PHYSICAL_ADDRESS PhysicalAddress
,
132 IN OUT UINT64
*PageEntry2M
,
133 IN EFI_PHYSICAL_ADDRESS StackBase
,
137 EFI_PHYSICAL_ADDRESS PhysicalAddress4K
;
138 UINTN IndexOfPageTableEntries
;
139 PAGE_TABLE_4K_ENTRY
*PageTableEntry
;
140 UINT64 AddressEncMask
;
143 // Make sure AddressEncMask is contained to smallest supported address field
145 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
147 PageTableEntry
= AllocatePages (1);
148 ASSERT (PageTableEntry
!= NULL
);
151 // Fill in 2M page entry.
153 *PageEntry2M
= (UINT64
) (UINTN
) PageTableEntry
| AddressEncMask
| IA32_PG_P
| IA32_PG_RW
;
155 PhysicalAddress4K
= PhysicalAddress
;
156 for (IndexOfPageTableEntries
= 0; IndexOfPageTableEntries
< 512; IndexOfPageTableEntries
++, PageTableEntry
++, PhysicalAddress4K
+= SIZE_4KB
) {
158 // Fill in the Page Table entries
160 PageTableEntry
->Uint64
= (UINT64
) PhysicalAddress4K
| AddressEncMask
;
161 PageTableEntry
->Bits
.ReadWrite
= 1;
163 if (IsNullDetectionEnabled () && PhysicalAddress4K
== 0) {
164 PageTableEntry
->Bits
.Present
= 0;
166 PageTableEntry
->Bits
.Present
= 1;
169 if (PcdGetBool (PcdSetNxForStack
)
170 && (PhysicalAddress4K
>= StackBase
)
171 && (PhysicalAddress4K
< StackBase
+ StackSize
)) {
173 // Set Nx bit for stack.
175 PageTableEntry
->Bits
.Nx
= 1;
183 @param[in] PhysicalAddress Start physical address the 1G page covered.
184 @param[in, out] PageEntry1G Pointer to 1G page entry.
185 @param[in] StackBase Stack base address.
186 @param[in] StackSize Stack size.
191 IN EFI_PHYSICAL_ADDRESS PhysicalAddress
,
192 IN OUT UINT64
*PageEntry1G
,
193 IN EFI_PHYSICAL_ADDRESS StackBase
,
197 EFI_PHYSICAL_ADDRESS PhysicalAddress2M
;
198 UINTN IndexOfPageDirectoryEntries
;
199 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
200 UINT64 AddressEncMask
;
203 // Make sure AddressEncMask is contained to smallest supported address field
205 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
207 PageDirectoryEntry
= AllocatePages (1);
208 ASSERT (PageDirectoryEntry
!= NULL
);
211 // Fill in 1G page entry.
213 *PageEntry1G
= (UINT64
) (UINTN
) PageDirectoryEntry
| AddressEncMask
| IA32_PG_P
| IA32_PG_RW
;
215 PhysicalAddress2M
= PhysicalAddress
;
216 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PhysicalAddress2M
+= SIZE_2MB
) {
217 if ((IsNullDetectionEnabled () && PhysicalAddress2M
== 0)
218 || (PcdGetBool (PcdSetNxForStack
)
219 && (PhysicalAddress2M
< StackBase
+ StackSize
)
220 && ((PhysicalAddress2M
+ SIZE_2MB
) > StackBase
))) {
222 // Need to split this 2M page that covers NULL or stack range.
224 Split2MPageTo4K (PhysicalAddress2M
, (UINT64
*) PageDirectoryEntry
, StackBase
, StackSize
);
227 // Fill in the Page Directory entries
229 PageDirectoryEntry
->Uint64
= (UINT64
) PhysicalAddress2M
| AddressEncMask
;
230 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
231 PageDirectoryEntry
->Bits
.Present
= 1;
232 PageDirectoryEntry
->Bits
.MustBe1
= 1;
238 Allocates and fills in the Page Directory and Page Table Entries to
239 establish a 1:1 Virtual to Physical mapping.
241 @param[in] StackBase Stack base address.
242 @param[in] StackSize Stack size.
244 @return The address of 4 level page map.
248 CreateIdentityMappingPageTables (
249 IN EFI_PHYSICAL_ADDRESS StackBase
,
255 UINT8 PhysicalAddressBits
;
256 EFI_PHYSICAL_ADDRESS PageAddress
;
257 UINTN IndexOfPml4Entries
;
258 UINTN IndexOfPdpEntries
;
259 UINTN IndexOfPageDirectoryEntries
;
260 UINT32 NumberOfPml4EntriesNeeded
;
261 UINT32 NumberOfPdpEntriesNeeded
;
262 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMapLevel4Entry
;
263 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMap
;
264 PAGE_MAP_AND_DIRECTORY_POINTER
*PageDirectoryPointerEntry
;
265 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
267 UINTN BigPageAddress
;
269 BOOLEAN Page1GSupport
;
270 PAGE_TABLE_1G_ENTRY
*PageDirectory1GEntry
;
271 UINT64 AddressEncMask
;
274 // Make sure AddressEncMask is contained to smallest supported address field
276 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
278 Page1GSupport
= FALSE
;
279 if (PcdGetBool(PcdUse1GPageTable
)) {
280 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
281 if (RegEax
>= 0x80000001) {
282 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
283 if ((RegEdx
& BIT26
) != 0) {
284 Page1GSupport
= TRUE
;
290 // Get physical address bits supported.
292 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
294 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
296 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
297 if (RegEax
>= 0x80000008) {
298 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
299 PhysicalAddressBits
= (UINT8
) RegEax
;
301 PhysicalAddressBits
= 36;
306 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
308 ASSERT (PhysicalAddressBits
<= 52);
309 if (PhysicalAddressBits
> 48) {
310 PhysicalAddressBits
= 48;
314 // Calculate the table entries needed.
316 if (PhysicalAddressBits
<= 39 ) {
317 NumberOfPml4EntriesNeeded
= 1;
318 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (PhysicalAddressBits
- 30));
320 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (PhysicalAddressBits
- 39));
321 NumberOfPdpEntriesNeeded
= 512;
325 // Pre-allocate big pages to avoid later allocations.
327 if (!Page1GSupport
) {
328 TotalPagesNum
= (NumberOfPdpEntriesNeeded
+ 1) * NumberOfPml4EntriesNeeded
+ 1;
330 TotalPagesNum
= NumberOfPml4EntriesNeeded
+ 1;
332 BigPageAddress
= (UINTN
) AllocatePages (TotalPagesNum
);
333 ASSERT (BigPageAddress
!= 0);
336 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
338 PageMap
= (VOID
*) BigPageAddress
;
339 BigPageAddress
+= SIZE_4KB
;
341 PageMapLevel4Entry
= PageMap
;
343 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
345 // Each PML4 entry points to a page of Page Directory Pointer entires.
346 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
348 PageDirectoryPointerEntry
= (VOID
*) BigPageAddress
;
349 BigPageAddress
+= SIZE_4KB
;
354 PageMapLevel4Entry
->Uint64
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| AddressEncMask
;
355 PageMapLevel4Entry
->Bits
.ReadWrite
= 1;
356 PageMapLevel4Entry
->Bits
.Present
= 1;
359 PageDirectory1GEntry
= (VOID
*) PageDirectoryPointerEntry
;
361 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
362 if ((IsNullDetectionEnabled () && PageAddress
== 0)
363 || (PcdGetBool (PcdSetNxForStack
)
364 && (PageAddress
< StackBase
+ StackSize
)
365 && ((PageAddress
+ SIZE_1GB
) > StackBase
))) {
366 Split1GPageTo2M (PageAddress
, (UINT64
*) PageDirectory1GEntry
, StackBase
, StackSize
);
369 // Fill in the Page Directory entries
371 PageDirectory1GEntry
->Uint64
= (UINT64
)PageAddress
| AddressEncMask
;
372 PageDirectory1GEntry
->Bits
.ReadWrite
= 1;
373 PageDirectory1GEntry
->Bits
.Present
= 1;
374 PageDirectory1GEntry
->Bits
.MustBe1
= 1;
378 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
380 // Each Directory Pointer entries points to a page of Page Directory entires.
381 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
383 PageDirectoryEntry
= (VOID
*) BigPageAddress
;
384 BigPageAddress
+= SIZE_4KB
;
387 // Fill in a Page Directory Pointer Entries
389 PageDirectoryPointerEntry
->Uint64
= (UINT64
)(UINTN
)PageDirectoryEntry
| AddressEncMask
;
390 PageDirectoryPointerEntry
->Bits
.ReadWrite
= 1;
391 PageDirectoryPointerEntry
->Bits
.Present
= 1;
393 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
394 if ((IsNullDetectionEnabled () && PageAddress
== 0)
395 || (PcdGetBool (PcdSetNxForStack
)
396 && (PageAddress
< StackBase
+ StackSize
)
397 && ((PageAddress
+ SIZE_2MB
) > StackBase
))) {
399 // Need to split this 2M page that covers NULL or stack range.
401 Split2MPageTo4K (PageAddress
, (UINT64
*) PageDirectoryEntry
, StackBase
, StackSize
);
404 // Fill in the Page Directory entries
406 PageDirectoryEntry
->Uint64
= (UINT64
)PageAddress
| AddressEncMask
;
407 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
408 PageDirectoryEntry
->Bits
.Present
= 1;
409 PageDirectoryEntry
->Bits
.MustBe1
= 1;
414 for (; IndexOfPdpEntries
< 512; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
416 PageDirectoryPointerEntry
,
417 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER
)
424 // For the PML4 entries we are not using fill in a null entry.
426 for (; IndexOfPml4Entries
< 512; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
429 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER
)
433 if (PcdGetBool (PcdSetNxForStack
)) {
434 EnableExecuteDisableBit ();
437 return (UINTN
)PageMap
;