2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
21 This program and the accompanying materials
22 are licensed and made available under the terms and conditions of the BSD License
23 which accompanies this distribution. The full text of the license may be found at
24 http://opensource.org/licenses/bsd-license.php
26 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
27 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
32 #include "VirtualMemory.h"
35 Clear legacy memory located at the first 4K-page, if available.
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
40 @param HobStart The start of HobList passed to DxeCore.
48 EFI_PEI_HOB_POINTERS RscHob
;
49 EFI_PEI_HOB_POINTERS MemHob
;
52 RscHob
.Raw
= HobStart
;
53 MemHob
.Raw
= HobStart
;
57 // Check if page 0 exists and free
59 while ((RscHob
.Raw
= GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR
,
60 RscHob
.Raw
)) != NULL
) {
61 if (RscHob
.ResourceDescriptor
->ResourceType
== EFI_RESOURCE_SYSTEM_MEMORY
&&
62 RscHob
.ResourceDescriptor
->PhysicalStart
== 0) {
65 // Make sure memory at 0-4095 has not been allocated.
67 while ((MemHob
.Raw
= GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION
,
68 MemHob
.Raw
)) != NULL
) {
69 if (MemHob
.MemoryAllocation
->AllocDescriptor
.MemoryBaseAddress
74 MemHob
.Raw
= GET_NEXT_HOB (MemHob
);
78 RscHob
.Raw
= GET_NEXT_HOB (RscHob
);
82 DEBUG ((DEBUG_INFO
, "Clearing first 4K-page!\r\n"));
83 SetMem (NULL
, EFI_PAGE_SIZE
, 0);
90 Return configure status of NULL pointer detection feature.
92 @return TRUE NULL pointer detection feature is enabled
93 @return FALSE NULL pointer detection feature is disabled
97 IsNullDetectionEnabled (
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT0
) != 0);
105 Enable Execute Disable Bit.
109 EnableExecuteDisableBit (
115 MsrRegisters
= AsmReadMsr64 (0xC0000080);
116 MsrRegisters
|= BIT11
;
117 AsmWriteMsr64 (0xC0000080, MsrRegisters
);
121 The function will check if page table entry should be splitted to smaller
124 @retval TRUE Page table should be split.
125 @retval FALSE Page table should not be split.
129 IN EFI_PHYSICAL_ADDRESS Address
,
131 IN EFI_PHYSICAL_ADDRESS StackBase
,
135 if (IsNullDetectionEnabled () && Address
== 0) {
139 if (PcdGetBool (PcdCpuStackGuard
)) {
140 if (StackBase
>= Address
&& StackBase
< (Address
+ Size
)) {
145 if (PcdGetBool (PcdSetNxForStack
)) {
146 if ((Address
< StackBase
+ StackSize
) && ((Address
+ Size
) > StackBase
)) {
156 @param[in] PhysicalAddress Start physical address the 2M page covered.
157 @param[in, out] PageEntry2M Pointer to 2M page entry.
158 @param[in] StackBase Stack base address.
159 @param[in] StackSize Stack size.
164 IN EFI_PHYSICAL_ADDRESS PhysicalAddress
,
165 IN OUT UINT64
*PageEntry2M
,
166 IN EFI_PHYSICAL_ADDRESS StackBase
,
170 EFI_PHYSICAL_ADDRESS PhysicalAddress4K
;
171 UINTN IndexOfPageTableEntries
;
172 PAGE_TABLE_4K_ENTRY
*PageTableEntry
;
173 UINT64 AddressEncMask
;
176 // Make sure AddressEncMask is contained to smallest supported address field
178 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
180 PageTableEntry
= AllocatePages (1);
181 ASSERT (PageTableEntry
!= NULL
);
184 // Fill in 2M page entry.
186 *PageEntry2M
= (UINT64
) (UINTN
) PageTableEntry
| AddressEncMask
| IA32_PG_P
| IA32_PG_RW
;
188 PhysicalAddress4K
= PhysicalAddress
;
189 for (IndexOfPageTableEntries
= 0; IndexOfPageTableEntries
< 512; IndexOfPageTableEntries
++, PageTableEntry
++, PhysicalAddress4K
+= SIZE_4KB
) {
191 // Fill in the Page Table entries
193 PageTableEntry
->Uint64
= (UINT64
) PhysicalAddress4K
| AddressEncMask
;
194 PageTableEntry
->Bits
.ReadWrite
= 1;
196 if ((IsNullDetectionEnabled () && PhysicalAddress4K
== 0) ||
197 (PcdGetBool (PcdCpuStackGuard
) && PhysicalAddress4K
== StackBase
)) {
198 PageTableEntry
->Bits
.Present
= 0;
200 PageTableEntry
->Bits
.Present
= 1;
203 if (PcdGetBool (PcdSetNxForStack
)
204 && (PhysicalAddress4K
>= StackBase
)
205 && (PhysicalAddress4K
< StackBase
+ StackSize
)) {
207 // Set Nx bit for stack.
209 PageTableEntry
->Bits
.Nx
= 1;
217 @param[in] PhysicalAddress Start physical address the 1G page covered.
218 @param[in, out] PageEntry1G Pointer to 1G page entry.
219 @param[in] StackBase Stack base address.
220 @param[in] StackSize Stack size.
225 IN EFI_PHYSICAL_ADDRESS PhysicalAddress
,
226 IN OUT UINT64
*PageEntry1G
,
227 IN EFI_PHYSICAL_ADDRESS StackBase
,
231 EFI_PHYSICAL_ADDRESS PhysicalAddress2M
;
232 UINTN IndexOfPageDirectoryEntries
;
233 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
234 UINT64 AddressEncMask
;
237 // Make sure AddressEncMask is contained to smallest supported address field
239 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
241 PageDirectoryEntry
= AllocatePages (1);
242 ASSERT (PageDirectoryEntry
!= NULL
);
245 // Fill in 1G page entry.
247 *PageEntry1G
= (UINT64
) (UINTN
) PageDirectoryEntry
| AddressEncMask
| IA32_PG_P
| IA32_PG_RW
;
249 PhysicalAddress2M
= PhysicalAddress
;
250 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PhysicalAddress2M
+= SIZE_2MB
) {
251 if (ToSplitPageTable (PhysicalAddress2M
, SIZE_2MB
, StackBase
, StackSize
)) {
253 // Need to split this 2M page that covers NULL or stack range.
255 Split2MPageTo4K (PhysicalAddress2M
, (UINT64
*) PageDirectoryEntry
, StackBase
, StackSize
);
258 // Fill in the Page Directory entries
260 PageDirectoryEntry
->Uint64
= (UINT64
) PhysicalAddress2M
| AddressEncMask
;
261 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
262 PageDirectoryEntry
->Bits
.Present
= 1;
263 PageDirectoryEntry
->Bits
.MustBe1
= 1;
269 Allocates and fills in the Page Directory and Page Table Entries to
270 establish a 1:1 Virtual to Physical mapping.
272 @param[in] StackBase Stack base address.
273 @param[in] StackSize Stack size.
275 @return The address of 4 level page map.
279 CreateIdentityMappingPageTables (
280 IN EFI_PHYSICAL_ADDRESS StackBase
,
286 UINT8 PhysicalAddressBits
;
287 EFI_PHYSICAL_ADDRESS PageAddress
;
288 UINTN IndexOfPml4Entries
;
289 UINTN IndexOfPdpEntries
;
290 UINTN IndexOfPageDirectoryEntries
;
291 UINT32 NumberOfPml4EntriesNeeded
;
292 UINT32 NumberOfPdpEntriesNeeded
;
293 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMapLevel4Entry
;
294 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMap
;
295 PAGE_MAP_AND_DIRECTORY_POINTER
*PageDirectoryPointerEntry
;
296 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
298 UINTN BigPageAddress
;
300 BOOLEAN Page1GSupport
;
301 PAGE_TABLE_1G_ENTRY
*PageDirectory1GEntry
;
302 UINT64 AddressEncMask
;
305 // Make sure AddressEncMask is contained to smallest supported address field
307 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
309 Page1GSupport
= FALSE
;
310 if (PcdGetBool(PcdUse1GPageTable
)) {
311 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
312 if (RegEax
>= 0x80000001) {
313 AsmCpuid (0x80000001, NULL
, NULL
, NULL
, &RegEdx
);
314 if ((RegEdx
& BIT26
) != 0) {
315 Page1GSupport
= TRUE
;
321 // Get physical address bits supported.
323 Hob
= GetFirstHob (EFI_HOB_TYPE_CPU
);
325 PhysicalAddressBits
= ((EFI_HOB_CPU
*) Hob
)->SizeOfMemorySpace
;
327 AsmCpuid (0x80000000, &RegEax
, NULL
, NULL
, NULL
);
328 if (RegEax
>= 0x80000008) {
329 AsmCpuid (0x80000008, &RegEax
, NULL
, NULL
, NULL
);
330 PhysicalAddressBits
= (UINT8
) RegEax
;
332 PhysicalAddressBits
= 36;
337 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
339 ASSERT (PhysicalAddressBits
<= 52);
340 if (PhysicalAddressBits
> 48) {
341 PhysicalAddressBits
= 48;
345 // Calculate the table entries needed.
347 if (PhysicalAddressBits
<= 39 ) {
348 NumberOfPml4EntriesNeeded
= 1;
349 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (PhysicalAddressBits
- 30));
351 NumberOfPml4EntriesNeeded
= (UINT32
)LShiftU64 (1, (PhysicalAddressBits
- 39));
352 NumberOfPdpEntriesNeeded
= 512;
356 // Pre-allocate big pages to avoid later allocations.
358 if (!Page1GSupport
) {
359 TotalPagesNum
= (NumberOfPdpEntriesNeeded
+ 1) * NumberOfPml4EntriesNeeded
+ 1;
361 TotalPagesNum
= NumberOfPml4EntriesNeeded
+ 1;
363 BigPageAddress
= (UINTN
) AllocatePages (TotalPagesNum
);
364 ASSERT (BigPageAddress
!= 0);
367 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
369 PageMap
= (VOID
*) BigPageAddress
;
370 BigPageAddress
+= SIZE_4KB
;
372 PageMapLevel4Entry
= PageMap
;
374 for (IndexOfPml4Entries
= 0; IndexOfPml4Entries
< NumberOfPml4EntriesNeeded
; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
376 // Each PML4 entry points to a page of Page Directory Pointer entires.
377 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
379 PageDirectoryPointerEntry
= (VOID
*) BigPageAddress
;
380 BigPageAddress
+= SIZE_4KB
;
385 PageMapLevel4Entry
->Uint64
= (UINT64
)(UINTN
)PageDirectoryPointerEntry
| AddressEncMask
;
386 PageMapLevel4Entry
->Bits
.ReadWrite
= 1;
387 PageMapLevel4Entry
->Bits
.Present
= 1;
390 PageDirectory1GEntry
= (VOID
*) PageDirectoryPointerEntry
;
392 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectory1GEntry
++, PageAddress
+= SIZE_1GB
) {
393 if (ToSplitPageTable (PageAddress
, SIZE_1GB
, StackBase
, StackSize
)) {
394 Split1GPageTo2M (PageAddress
, (UINT64
*) PageDirectory1GEntry
, StackBase
, StackSize
);
397 // Fill in the Page Directory entries
399 PageDirectory1GEntry
->Uint64
= (UINT64
)PageAddress
| AddressEncMask
;
400 PageDirectory1GEntry
->Bits
.ReadWrite
= 1;
401 PageDirectory1GEntry
->Bits
.Present
= 1;
402 PageDirectory1GEntry
->Bits
.MustBe1
= 1;
406 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
408 // Each Directory Pointer entries points to a page of Page Directory entires.
409 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
411 PageDirectoryEntry
= (VOID
*) BigPageAddress
;
412 BigPageAddress
+= SIZE_4KB
;
415 // Fill in a Page Directory Pointer Entries
417 PageDirectoryPointerEntry
->Uint64
= (UINT64
)(UINTN
)PageDirectoryEntry
| AddressEncMask
;
418 PageDirectoryPointerEntry
->Bits
.ReadWrite
= 1;
419 PageDirectoryPointerEntry
->Bits
.Present
= 1;
421 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PageAddress
+= SIZE_2MB
) {
422 if (ToSplitPageTable (PageAddress
, SIZE_2MB
, StackBase
, StackSize
)) {
424 // Need to split this 2M page that covers NULL or stack range.
426 Split2MPageTo4K (PageAddress
, (UINT64
*) PageDirectoryEntry
, StackBase
, StackSize
);
429 // Fill in the Page Directory entries
431 PageDirectoryEntry
->Uint64
= (UINT64
)PageAddress
| AddressEncMask
;
432 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
433 PageDirectoryEntry
->Bits
.Present
= 1;
434 PageDirectoryEntry
->Bits
.MustBe1
= 1;
439 for (; IndexOfPdpEntries
< 512; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
441 PageDirectoryPointerEntry
,
442 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER
)
449 // For the PML4 entries we are not using fill in a null entry.
451 for (; IndexOfPml4Entries
< 512; IndexOfPml4Entries
++, PageMapLevel4Entry
++) {
454 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER
)
458 if (PcdGetBool (PcdSetNxForStack
)) {
459 EnableExecuteDisableBit ();
462 return (UINTN
)PageMap
;