]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg: Add PCD PcdPteMemoryEncryptionAddressOrMask
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 This program and the accompanying materials
20 are licensed and made available under the terms and conditions of the BSD License
21 which accompanies this distribution. The full text of the license may be found at
22 http://opensource.org/licenses/bsd-license.php
23
24 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
25 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
26
27 **/
28
29 #include "DxeIpl.h"
30 #include "VirtualMemory.h"
31
32 /**
33 Enable Execute Disable Bit.
34
35 **/
36 VOID
37 EnableExecuteDisableBit (
38 VOID
39 )
40 {
41 UINT64 MsrRegisters;
42
43 MsrRegisters = AsmReadMsr64 (0xC0000080);
44 MsrRegisters |= BIT11;
45 AsmWriteMsr64 (0xC0000080, MsrRegisters);
46 }
47
48 /**
49 Split 2M page to 4K.
50
51 @param[in] PhysicalAddress Start physical address the 2M page covered.
52 @param[in, out] PageEntry2M Pointer to 2M page entry.
53 @param[in] StackBase Stack base address.
54 @param[in] StackSize Stack size.
55
56 **/
57 VOID
58 Split2MPageTo4K (
59 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
60 IN OUT UINT64 *PageEntry2M,
61 IN EFI_PHYSICAL_ADDRESS StackBase,
62 IN UINTN StackSize
63 )
64 {
65 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
66 UINTN IndexOfPageTableEntries;
67 PAGE_TABLE_4K_ENTRY *PageTableEntry;
68
69 PageTableEntry = AllocatePages (1);
70 ASSERT (PageTableEntry != NULL);
71 //
72 // Fill in 2M page entry.
73 //
74 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | IA32_PG_P | IA32_PG_RW;
75
76 PhysicalAddress4K = PhysicalAddress;
77 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
78 //
79 // Fill in the Page Table entries
80 //
81 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K;
82 PageTableEntry->Bits.ReadWrite = 1;
83 PageTableEntry->Bits.Present = 1;
84 if ((PhysicalAddress4K >= StackBase) && (PhysicalAddress4K < StackBase + StackSize)) {
85 //
86 // Set Nx bit for stack.
87 //
88 PageTableEntry->Bits.Nx = 1;
89 }
90 }
91 }
92
93 /**
94 Split 1G page to 2M.
95
96 @param[in] PhysicalAddress Start physical address the 1G page covered.
97 @param[in, out] PageEntry1G Pointer to 1G page entry.
98 @param[in] StackBase Stack base address.
99 @param[in] StackSize Stack size.
100
101 **/
102 VOID
103 Split1GPageTo2M (
104 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
105 IN OUT UINT64 *PageEntry1G,
106 IN EFI_PHYSICAL_ADDRESS StackBase,
107 IN UINTN StackSize
108 )
109 {
110 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
111 UINTN IndexOfPageDirectoryEntries;
112 PAGE_TABLE_ENTRY *PageDirectoryEntry;
113
114 PageDirectoryEntry = AllocatePages (1);
115 ASSERT (PageDirectoryEntry != NULL);
116 //
117 // Fill in 1G page entry.
118 //
119 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | IA32_PG_P | IA32_PG_RW;
120
121 PhysicalAddress2M = PhysicalAddress;
122 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
123 if ((PhysicalAddress2M < StackBase + StackSize) && ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {
124 //
125 // Need to split this 2M page that covers stack range.
126 //
127 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
128 } else {
129 //
130 // Fill in the Page Directory entries
131 //
132 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M;
133 PageDirectoryEntry->Bits.ReadWrite = 1;
134 PageDirectoryEntry->Bits.Present = 1;
135 PageDirectoryEntry->Bits.MustBe1 = 1;
136 }
137 }
138 }
139
140 /**
141 Allocates and fills in the Page Directory and Page Table Entries to
142 establish a 1:1 Virtual to Physical mapping.
143
144 @param[in] StackBase Stack base address.
145 @param[in] StackSize Stack size.
146
147 @return The address of 4 level page map.
148
149 **/
150 UINTN
151 CreateIdentityMappingPageTables (
152 IN EFI_PHYSICAL_ADDRESS StackBase,
153 IN UINTN StackSize
154 )
155 {
156 UINT32 RegEax;
157 UINT32 RegEdx;
158 UINT8 PhysicalAddressBits;
159 EFI_PHYSICAL_ADDRESS PageAddress;
160 UINTN IndexOfPml4Entries;
161 UINTN IndexOfPdpEntries;
162 UINTN IndexOfPageDirectoryEntries;
163 UINT32 NumberOfPml4EntriesNeeded;
164 UINT32 NumberOfPdpEntriesNeeded;
165 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
166 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
167 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
168 PAGE_TABLE_ENTRY *PageDirectoryEntry;
169 UINTN TotalPagesNum;
170 UINTN BigPageAddress;
171 VOID *Hob;
172 BOOLEAN Page1GSupport;
173 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
174
175 Page1GSupport = FALSE;
176 if (PcdGetBool(PcdUse1GPageTable)) {
177 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
178 if (RegEax >= 0x80000001) {
179 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
180 if ((RegEdx & BIT26) != 0) {
181 Page1GSupport = TRUE;
182 }
183 }
184 }
185
186 //
187 // Get physical address bits supported.
188 //
189 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
190 if (Hob != NULL) {
191 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
192 } else {
193 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
194 if (RegEax >= 0x80000008) {
195 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
196 PhysicalAddressBits = (UINT8) RegEax;
197 } else {
198 PhysicalAddressBits = 36;
199 }
200 }
201
202 //
203 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
204 //
205 ASSERT (PhysicalAddressBits <= 52);
206 if (PhysicalAddressBits > 48) {
207 PhysicalAddressBits = 48;
208 }
209
210 //
211 // Calculate the table entries needed.
212 //
213 if (PhysicalAddressBits <= 39 ) {
214 NumberOfPml4EntriesNeeded = 1;
215 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
216 } else {
217 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));
218 NumberOfPdpEntriesNeeded = 512;
219 }
220
221 //
222 // Pre-allocate big pages to avoid later allocations.
223 //
224 if (!Page1GSupport) {
225 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;
226 } else {
227 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;
228 }
229 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);
230 ASSERT (BigPageAddress != 0);
231
232 //
233 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
234 //
235 PageMap = (VOID *) BigPageAddress;
236 BigPageAddress += SIZE_4KB;
237
238 PageMapLevel4Entry = PageMap;
239 PageAddress = 0;
240 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
241 //
242 // Each PML4 entry points to a page of Page Directory Pointer entires.
243 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
244 //
245 PageDirectoryPointerEntry = (VOID *) BigPageAddress;
246 BigPageAddress += SIZE_4KB;
247
248 //
249 // Make a PML4 Entry
250 //
251 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry;
252 PageMapLevel4Entry->Bits.ReadWrite = 1;
253 PageMapLevel4Entry->Bits.Present = 1;
254
255 if (Page1GSupport) {
256 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
257
258 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
259 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_1GB) > StackBase)) {
260 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
261 } else {
262 //
263 // Fill in the Page Directory entries
264 //
265 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress;
266 PageDirectory1GEntry->Bits.ReadWrite = 1;
267 PageDirectory1GEntry->Bits.Present = 1;
268 PageDirectory1GEntry->Bits.MustBe1 = 1;
269 }
270 }
271 } else {
272 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
273 //
274 // Each Directory Pointer entries points to a page of Page Directory entires.
275 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
276 //
277 PageDirectoryEntry = (VOID *) BigPageAddress;
278 BigPageAddress += SIZE_4KB;
279
280 //
281 // Fill in a Page Directory Pointer Entries
282 //
283 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry;
284 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
285 PageDirectoryPointerEntry->Bits.Present = 1;
286
287 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
288 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_2MB) > StackBase)) {
289 //
290 // Need to split this 2M page that covers stack range.
291 //
292 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
293 } else {
294 //
295 // Fill in the Page Directory entries
296 //
297 PageDirectoryEntry->Uint64 = (UINT64)PageAddress;
298 PageDirectoryEntry->Bits.ReadWrite = 1;
299 PageDirectoryEntry->Bits.Present = 1;
300 PageDirectoryEntry->Bits.MustBe1 = 1;
301 }
302 }
303 }
304
305 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
306 ZeroMem (
307 PageDirectoryPointerEntry,
308 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)
309 );
310 }
311 }
312 }
313
314 //
315 // For the PML4 entries we are not using fill in a null entry.
316 //
317 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {
318 ZeroMem (
319 PageMapLevel4Entry,
320 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
321 );
322 }
323
324 if (PcdGetBool (PcdSetNxForStack)) {
325 EnableExecuteDisableBit ();
326 }
327
328 return (UINTN)PageMap;
329 }
330