]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
7f6314451028cc763613f132857f754cbc9c7b92
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20
21 This program and the accompanying materials
22 are licensed and made available under the terms and conditions of the BSD License
23 which accompanies this distribution. The full text of the license may be found at
24 http://opensource.org/licenses/bsd-license.php
25
26 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
27 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
28
29 **/
30
31 #include "DxeIpl.h"
32 #include "VirtualMemory.h"
33
34 /**
35 Clear legacy memory located at the first 4K-page, if available.
36
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
39
40 @param HobStart The start of HobList passed to DxeCore.
41
42 **/
43 VOID
44 ClearFirst4KPage (
45 IN VOID *HobStart
46 )
47 {
48 EFI_PEI_HOB_POINTERS RscHob;
49 EFI_PEI_HOB_POINTERS MemHob;
50 BOOLEAN DoClear;
51
52 RscHob.Raw = HobStart;
53 MemHob.Raw = HobStart;
54 DoClear = FALSE;
55
56 //
57 // Check if page 0 exists and free
58 //
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
60 RscHob.Raw)) != NULL) {
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {
63 DoClear = TRUE;
64 //
65 // Make sure memory at 0-4095 has not been allocated.
66 //
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,
68 MemHob.Raw)) != NULL) {
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
70 < EFI_PAGE_SIZE) {
71 DoClear = FALSE;
72 break;
73 }
74 MemHob.Raw = GET_NEXT_HOB (MemHob);
75 }
76 break;
77 }
78 RscHob.Raw = GET_NEXT_HOB (RscHob);
79 }
80
81 if (DoClear) {
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
83 SetMem (NULL, EFI_PAGE_SIZE, 0);
84 }
85
86 return;
87 }
88
89 /**
90 Return configure status of NULL pointer detection feature.
91
92 @return TRUE NULL pointer detection feature is enabled
93 @return FALSE NULL pointer detection feature is disabled
94
95 **/
96 BOOLEAN
97 IsNullDetectionEnabled (
98 VOID
99 )
100 {
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
102 }
103
104 /**
105 Enable Execute Disable Bit.
106
107 **/
108 VOID
109 EnableExecuteDisableBit (
110 VOID
111 )
112 {
113 UINT64 MsrRegisters;
114
115 MsrRegisters = AsmReadMsr64 (0xC0000080);
116 MsrRegisters |= BIT11;
117 AsmWriteMsr64 (0xC0000080, MsrRegisters);
118 }
119
120 /**
121 The function will check if page table entry should be splitted to smaller
122 granularity.
123
124 @retval TRUE Page table should be split.
125 @retval FALSE Page table should not be split.
126 **/
127 BOOLEAN
128 ToSplitPageTable (
129 IN EFI_PHYSICAL_ADDRESS Address,
130 IN UINTN Size,
131 IN EFI_PHYSICAL_ADDRESS StackBase,
132 IN UINTN StackSize
133 )
134 {
135 if (IsNullDetectionEnabled () && Address == 0) {
136 return TRUE;
137 }
138
139 if (PcdGetBool (PcdCpuStackGuard)) {
140 if (StackBase >= Address && StackBase < (Address + Size)) {
141 return TRUE;
142 }
143 }
144
145 if (PcdGetBool (PcdSetNxForStack)) {
146 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
147 return TRUE;
148 }
149 }
150
151 return FALSE;
152 }
153 /**
154 Split 2M page to 4K.
155
156 @param[in] PhysicalAddress Start physical address the 2M page covered.
157 @param[in, out] PageEntry2M Pointer to 2M page entry.
158 @param[in] StackBase Stack base address.
159 @param[in] StackSize Stack size.
160
161 **/
162 VOID
163 Split2MPageTo4K (
164 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
165 IN OUT UINT64 *PageEntry2M,
166 IN EFI_PHYSICAL_ADDRESS StackBase,
167 IN UINTN StackSize
168 )
169 {
170 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
171 UINTN IndexOfPageTableEntries;
172 PAGE_TABLE_4K_ENTRY *PageTableEntry;
173 UINT64 AddressEncMask;
174
175 //
176 // Make sure AddressEncMask is contained to smallest supported address field
177 //
178 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
179
180 PageTableEntry = AllocatePages (1);
181 ASSERT (PageTableEntry != NULL);
182
183 //
184 // Fill in 2M page entry.
185 //
186 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
187
188 PhysicalAddress4K = PhysicalAddress;
189 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
190 //
191 // Fill in the Page Table entries
192 //
193 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
194 PageTableEntry->Bits.ReadWrite = 1;
195
196 if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||
197 (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {
198 PageTableEntry->Bits.Present = 0;
199 } else {
200 PageTableEntry->Bits.Present = 1;
201 }
202
203 if (PcdGetBool (PcdSetNxForStack)
204 && (PhysicalAddress4K >= StackBase)
205 && (PhysicalAddress4K < StackBase + StackSize)) {
206 //
207 // Set Nx bit for stack.
208 //
209 PageTableEntry->Bits.Nx = 1;
210 }
211 }
212 }
213
214 /**
215 Split 1G page to 2M.
216
217 @param[in] PhysicalAddress Start physical address the 1G page covered.
218 @param[in, out] PageEntry1G Pointer to 1G page entry.
219 @param[in] StackBase Stack base address.
220 @param[in] StackSize Stack size.
221
222 **/
223 VOID
224 Split1GPageTo2M (
225 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
226 IN OUT UINT64 *PageEntry1G,
227 IN EFI_PHYSICAL_ADDRESS StackBase,
228 IN UINTN StackSize
229 )
230 {
231 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
232 UINTN IndexOfPageDirectoryEntries;
233 PAGE_TABLE_ENTRY *PageDirectoryEntry;
234 UINT64 AddressEncMask;
235
236 //
237 // Make sure AddressEncMask is contained to smallest supported address field
238 //
239 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
240
241 PageDirectoryEntry = AllocatePages (1);
242 ASSERT (PageDirectoryEntry != NULL);
243
244 //
245 // Fill in 1G page entry.
246 //
247 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
248
249 PhysicalAddress2M = PhysicalAddress;
250 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
251 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {
252 //
253 // Need to split this 2M page that covers NULL or stack range.
254 //
255 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
256 } else {
257 //
258 // Fill in the Page Directory entries
259 //
260 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
261 PageDirectoryEntry->Bits.ReadWrite = 1;
262 PageDirectoryEntry->Bits.Present = 1;
263 PageDirectoryEntry->Bits.MustBe1 = 1;
264 }
265 }
266 }
267
268 /**
269 Allocates and fills in the Page Directory and Page Table Entries to
270 establish a 1:1 Virtual to Physical mapping.
271
272 @param[in] StackBase Stack base address.
273 @param[in] StackSize Stack size.
274
275 @return The address of 4 level page map.
276
277 **/
278 UINTN
279 CreateIdentityMappingPageTables (
280 IN EFI_PHYSICAL_ADDRESS StackBase,
281 IN UINTN StackSize
282 )
283 {
284 UINT32 RegEax;
285 UINT32 RegEdx;
286 UINT8 PhysicalAddressBits;
287 EFI_PHYSICAL_ADDRESS PageAddress;
288 UINTN IndexOfPml4Entries;
289 UINTN IndexOfPdpEntries;
290 UINTN IndexOfPageDirectoryEntries;
291 UINT32 NumberOfPml4EntriesNeeded;
292 UINT32 NumberOfPdpEntriesNeeded;
293 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
294 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
295 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
296 PAGE_TABLE_ENTRY *PageDirectoryEntry;
297 UINTN TotalPagesNum;
298 UINTN BigPageAddress;
299 VOID *Hob;
300 BOOLEAN Page1GSupport;
301 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
302 UINT64 AddressEncMask;
303
304 //
305 // Make sure AddressEncMask is contained to smallest supported address field
306 //
307 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
308
309 Page1GSupport = FALSE;
310 if (PcdGetBool(PcdUse1GPageTable)) {
311 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
312 if (RegEax >= 0x80000001) {
313 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
314 if ((RegEdx & BIT26) != 0) {
315 Page1GSupport = TRUE;
316 }
317 }
318 }
319
320 //
321 // Get physical address bits supported.
322 //
323 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
324 if (Hob != NULL) {
325 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
326 } else {
327 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
328 if (RegEax >= 0x80000008) {
329 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
330 PhysicalAddressBits = (UINT8) RegEax;
331 } else {
332 PhysicalAddressBits = 36;
333 }
334 }
335
336 //
337 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
338 //
339 ASSERT (PhysicalAddressBits <= 52);
340 if (PhysicalAddressBits > 48) {
341 PhysicalAddressBits = 48;
342 }
343
344 //
345 // Calculate the table entries needed.
346 //
347 if (PhysicalAddressBits <= 39 ) {
348 NumberOfPml4EntriesNeeded = 1;
349 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
350 } else {
351 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));
352 NumberOfPdpEntriesNeeded = 512;
353 }
354
355 //
356 // Pre-allocate big pages to avoid later allocations.
357 //
358 if (!Page1GSupport) {
359 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;
360 } else {
361 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;
362 }
363 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);
364 ASSERT (BigPageAddress != 0);
365
366 //
367 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
368 //
369 PageMap = (VOID *) BigPageAddress;
370 BigPageAddress += SIZE_4KB;
371
372 PageMapLevel4Entry = PageMap;
373 PageAddress = 0;
374 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
375 //
376 // Each PML4 entry points to a page of Page Directory Pointer entires.
377 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
378 //
379 PageDirectoryPointerEntry = (VOID *) BigPageAddress;
380 BigPageAddress += SIZE_4KB;
381
382 //
383 // Make a PML4 Entry
384 //
385 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
386 PageMapLevel4Entry->Bits.ReadWrite = 1;
387 PageMapLevel4Entry->Bits.Present = 1;
388
389 if (Page1GSupport) {
390 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
391
392 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
393 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {
394 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
395 } else {
396 //
397 // Fill in the Page Directory entries
398 //
399 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
400 PageDirectory1GEntry->Bits.ReadWrite = 1;
401 PageDirectory1GEntry->Bits.Present = 1;
402 PageDirectory1GEntry->Bits.MustBe1 = 1;
403 }
404 }
405 } else {
406 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
407 //
408 // Each Directory Pointer entries points to a page of Page Directory entires.
409 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
410 //
411 PageDirectoryEntry = (VOID *) BigPageAddress;
412 BigPageAddress += SIZE_4KB;
413
414 //
415 // Fill in a Page Directory Pointer Entries
416 //
417 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
418 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
419 PageDirectoryPointerEntry->Bits.Present = 1;
420
421 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
422 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {
423 //
424 // Need to split this 2M page that covers NULL or stack range.
425 //
426 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
427 } else {
428 //
429 // Fill in the Page Directory entries
430 //
431 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
432 PageDirectoryEntry->Bits.ReadWrite = 1;
433 PageDirectoryEntry->Bits.Present = 1;
434 PageDirectoryEntry->Bits.MustBe1 = 1;
435 }
436 }
437 }
438
439 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
440 ZeroMem (
441 PageDirectoryPointerEntry,
442 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)
443 );
444 }
445 }
446 }
447
448 //
449 // For the PML4 entries we are not using fill in a null entry.
450 //
451 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {
452 ZeroMem (
453 PageMapLevel4Entry,
454 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
455 );
456 }
457
458 if (PcdGetBool (PcdSetNxForStack)) {
459 EnableExecuteDisableBit ();
460 }
461
462 return (UINTN)PageMap;
463 }
464