]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
Correct 1G page table generation.
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.<BR>
19 This program and the accompanying materials
20 are licensed and made available under the terms and conditions of the BSD License
21 which accompanies this distribution. The full text of the license may be found at
22 http://opensource.org/licenses/bsd-license.php
23
24 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
25 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
26
27 **/
28
29 #include "DxeIpl.h"
30 #include "VirtualMemory.h"
31
32 /**
33 Allocates and fills in the Page Directory and Page Table Entries to
34 establish a 1:1 Virtual to Physical mapping.
35
36 @param NumberOfProcessorPhysicalAddressBits Number of processor address bits
37 to use. Limits the number of page
38 table entries to the physical
39 address space.
40
41 @return The address of 4 level page map.
42
43 **/
44 UINTN
45 CreateIdentityMappingPageTables (
46 VOID
47 )
48 {
49 UINT32 RegEax;
50 UINT32 RegEdx;
51 UINT8 PhysicalAddressBits;
52 EFI_PHYSICAL_ADDRESS PageAddress;
53 UINTN IndexOfPml4Entries;
54 UINTN IndexOfPdpEntries;
55 UINTN IndexOfPageDirectoryEntries;
56 UINT32 NumberOfPml4EntriesNeeded;
57 UINT32 NumberOfPdpEntriesNeeded;
58 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
59 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
60 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
61 PAGE_TABLE_ENTRY *PageDirectoryEntry;
62 UINTN TotalPagesNum;
63 UINTN BigPageAddress;
64 VOID *Hob;
65 BOOLEAN Page1GSupport;
66 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
67
68 Page1GSupport = FALSE;
69 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
70 if (RegEax >= 0x80000001) {
71 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
72 if ((RegEdx & BIT26) != 0) {
73 Page1GSupport = TRUE;
74 }
75 }
76
77 //
78 // Get physical address bits supported.
79 //
80 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
81 if (Hob != NULL) {
82 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
83 } else {
84 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
85 if (RegEax >= 0x80000008) {
86 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
87 PhysicalAddressBits = (UINT8) RegEax;
88 } else {
89 PhysicalAddressBits = 36;
90 }
91 }
92
93 //
94 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
95 //
96 ASSERT (PhysicalAddressBits <= 52);
97 if (PhysicalAddressBits > 48) {
98 PhysicalAddressBits = 48;
99 }
100
101 //
102 // Calculate the table entries needed.
103 //
104 if (PhysicalAddressBits <= 39 ) {
105 NumberOfPml4EntriesNeeded = 1;
106 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
107 } else {
108 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));
109 NumberOfPdpEntriesNeeded = 512;
110 }
111
112 //
113 // Pre-allocate big pages to avoid later allocations.
114 //
115 if (!Page1GSupport) {
116 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;
117 } else {
118 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;
119 }
120 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);
121 ASSERT (BigPageAddress != 0);
122
123 //
124 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
125 //
126 PageMap = (VOID *) BigPageAddress;
127 BigPageAddress += SIZE_4KB;
128
129 PageMapLevel4Entry = PageMap;
130 PageAddress = 0;
131 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
132 //
133 // Each PML4 entry points to a page of Page Directory Pointer entires.
134 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
135 //
136 PageDirectoryPointerEntry = (VOID *) BigPageAddress;
137 BigPageAddress += SIZE_4KB;
138
139 //
140 // Make a PML4 Entry
141 //
142 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry;
143 PageMapLevel4Entry->Bits.ReadWrite = 1;
144 PageMapLevel4Entry->Bits.Present = 1;
145
146 if (Page1GSupport) {
147 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
148
149 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
150 //
151 // Fill in the Page Directory entries
152 //
153 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress;
154 PageDirectory1GEntry->Bits.ReadWrite = 1;
155 PageDirectory1GEntry->Bits.Present = 1;
156 PageDirectory1GEntry->Bits.MustBe1 = 1;
157 }
158 } else {
159 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
160 //
161 // Each Directory Pointer entries points to a page of Page Directory entires.
162 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
163 //
164 PageDirectoryEntry = (VOID *) BigPageAddress;
165 BigPageAddress += SIZE_4KB;
166
167 //
168 // Fill in a Page Directory Pointer Entries
169 //
170 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry;
171 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
172 PageDirectoryPointerEntry->Bits.Present = 1;
173
174 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
175 //
176 // Fill in the Page Directory entries
177 //
178 PageDirectoryEntry->Uint64 = (UINT64)PageAddress;
179 PageDirectoryEntry->Bits.ReadWrite = 1;
180 PageDirectoryEntry->Bits.Present = 1;
181 PageDirectoryEntry->Bits.MustBe1 = 1;
182 }
183 }
184
185 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
186 ZeroMem (
187 PageDirectoryPointerEntry,
188 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)
189 );
190 }
191 }
192 }
193
194 //
195 // For the PML4 entries we are not using fill in a null entry.
196 //
197 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {
198 ZeroMem (
199 PageMapLevel4Entry,
200 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
201 );
202 }
203
204 return (UINTN)PageMap;
205 }
206