]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg/DxeIplPeim: Refine coding style in function comments
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20
21 This program and the accompanying materials
22 are licensed and made available under the terms and conditions of the BSD License
23 which accompanies this distribution. The full text of the license may be found at
24 http://opensource.org/licenses/bsd-license.php
25
26 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
27 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
28
29 **/
30
31 #include "DxeIpl.h"
32 #include "VirtualMemory.h"
33
34 /**
35 Clear legacy memory located at the first 4K-page, if available.
36
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
39
40 @param HobStart The start of HobList passed to DxeCore.
41
42 **/
43 VOID
44 ClearFirst4KPage (
45 IN VOID *HobStart
46 )
47 {
48 EFI_PEI_HOB_POINTERS RscHob;
49 EFI_PEI_HOB_POINTERS MemHob;
50 BOOLEAN DoClear;
51
52 RscHob.Raw = HobStart;
53 MemHob.Raw = HobStart;
54 DoClear = FALSE;
55
56 //
57 // Check if page 0 exists and free
58 //
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
60 RscHob.Raw)) != NULL) {
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {
63 DoClear = TRUE;
64 //
65 // Make sure memory at 0-4095 has not been allocated.
66 //
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,
68 MemHob.Raw)) != NULL) {
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
70 < EFI_PAGE_SIZE) {
71 DoClear = FALSE;
72 break;
73 }
74 MemHob.Raw = GET_NEXT_HOB (MemHob);
75 }
76 break;
77 }
78 RscHob.Raw = GET_NEXT_HOB (RscHob);
79 }
80
81 if (DoClear) {
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
83 SetMem (NULL, EFI_PAGE_SIZE, 0);
84 }
85
86 return;
87 }
88
89 /**
90 Return configure status of NULL pointer detection feature.
91
92 @return TRUE NULL pointer detection feature is enabled
93 @return FALSE NULL pointer detection feature is disabled
94
95 **/
96 BOOLEAN
97 IsNullDetectionEnabled (
98 VOID
99 )
100 {
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
102 }
103
104 /**
105 Enable Execute Disable Bit.
106
107 **/
108 VOID
109 EnableExecuteDisableBit (
110 VOID
111 )
112 {
113 UINT64 MsrRegisters;
114
115 MsrRegisters = AsmReadMsr64 (0xC0000080);
116 MsrRegisters |= BIT11;
117 AsmWriteMsr64 (0xC0000080, MsrRegisters);
118 }
119
120 /**
121 Split 2M page to 4K.
122
123 @param[in] PhysicalAddress Start physical address the 2M page covered.
124 @param[in, out] PageEntry2M Pointer to 2M page entry.
125 @param[in] StackBase Stack base address.
126 @param[in] StackSize Stack size.
127
128 **/
129 VOID
130 Split2MPageTo4K (
131 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
132 IN OUT UINT64 *PageEntry2M,
133 IN EFI_PHYSICAL_ADDRESS StackBase,
134 IN UINTN StackSize
135 )
136 {
137 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
138 UINTN IndexOfPageTableEntries;
139 PAGE_TABLE_4K_ENTRY *PageTableEntry;
140 UINT64 AddressEncMask;
141
142 //
143 // Make sure AddressEncMask is contained to smallest supported address field
144 //
145 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
146
147 PageTableEntry = AllocatePages (1);
148 ASSERT (PageTableEntry != NULL);
149
150 //
151 // Fill in 2M page entry.
152 //
153 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
154
155 PhysicalAddress4K = PhysicalAddress;
156 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
157 //
158 // Fill in the Page Table entries
159 //
160 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
161 PageTableEntry->Bits.ReadWrite = 1;
162
163 if (IsNullDetectionEnabled () && PhysicalAddress4K == 0) {
164 PageTableEntry->Bits.Present = 0;
165 } else {
166 PageTableEntry->Bits.Present = 1;
167 }
168
169 if (PcdGetBool (PcdSetNxForStack)
170 && (PhysicalAddress4K >= StackBase)
171 && (PhysicalAddress4K < StackBase + StackSize)) {
172 //
173 // Set Nx bit for stack.
174 //
175 PageTableEntry->Bits.Nx = 1;
176 }
177 }
178 }
179
180 /**
181 Split 1G page to 2M.
182
183 @param[in] PhysicalAddress Start physical address the 1G page covered.
184 @param[in, out] PageEntry1G Pointer to 1G page entry.
185 @param[in] StackBase Stack base address.
186 @param[in] StackSize Stack size.
187
188 **/
189 VOID
190 Split1GPageTo2M (
191 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
192 IN OUT UINT64 *PageEntry1G,
193 IN EFI_PHYSICAL_ADDRESS StackBase,
194 IN UINTN StackSize
195 )
196 {
197 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
198 UINTN IndexOfPageDirectoryEntries;
199 PAGE_TABLE_ENTRY *PageDirectoryEntry;
200 UINT64 AddressEncMask;
201
202 //
203 // Make sure AddressEncMask is contained to smallest supported address field
204 //
205 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
206
207 PageDirectoryEntry = AllocatePages (1);
208 ASSERT (PageDirectoryEntry != NULL);
209
210 //
211 // Fill in 1G page entry.
212 //
213 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
214
215 PhysicalAddress2M = PhysicalAddress;
216 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
217 if ((IsNullDetectionEnabled () && PhysicalAddress2M == 0)
218 || (PcdGetBool (PcdSetNxForStack)
219 && (PhysicalAddress2M < StackBase + StackSize)
220 && ((PhysicalAddress2M + SIZE_2MB) > StackBase))) {
221 //
222 // Need to split this 2M page that covers NULL or stack range.
223 //
224 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
225 } else {
226 //
227 // Fill in the Page Directory entries
228 //
229 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
230 PageDirectoryEntry->Bits.ReadWrite = 1;
231 PageDirectoryEntry->Bits.Present = 1;
232 PageDirectoryEntry->Bits.MustBe1 = 1;
233 }
234 }
235 }
236
237 /**
238 Allocates and fills in the Page Directory and Page Table Entries to
239 establish a 1:1 Virtual to Physical mapping.
240
241 @param[in] StackBase Stack base address.
242 @param[in] StackSize Stack size.
243
244 @return The address of 4 level page map.
245
246 **/
247 UINTN
248 CreateIdentityMappingPageTables (
249 IN EFI_PHYSICAL_ADDRESS StackBase,
250 IN UINTN StackSize
251 )
252 {
253 UINT32 RegEax;
254 UINT32 RegEdx;
255 UINT8 PhysicalAddressBits;
256 EFI_PHYSICAL_ADDRESS PageAddress;
257 UINTN IndexOfPml4Entries;
258 UINTN IndexOfPdpEntries;
259 UINTN IndexOfPageDirectoryEntries;
260 UINT32 NumberOfPml4EntriesNeeded;
261 UINT32 NumberOfPdpEntriesNeeded;
262 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
263 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
264 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
265 PAGE_TABLE_ENTRY *PageDirectoryEntry;
266 UINTN TotalPagesNum;
267 UINTN BigPageAddress;
268 VOID *Hob;
269 BOOLEAN Page1GSupport;
270 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
271 UINT64 AddressEncMask;
272
273 //
274 // Make sure AddressEncMask is contained to smallest supported address field
275 //
276 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
277
278 Page1GSupport = FALSE;
279 if (PcdGetBool(PcdUse1GPageTable)) {
280 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
281 if (RegEax >= 0x80000001) {
282 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
283 if ((RegEdx & BIT26) != 0) {
284 Page1GSupport = TRUE;
285 }
286 }
287 }
288
289 //
290 // Get physical address bits supported.
291 //
292 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
293 if (Hob != NULL) {
294 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
295 } else {
296 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
297 if (RegEax >= 0x80000008) {
298 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
299 PhysicalAddressBits = (UINT8) RegEax;
300 } else {
301 PhysicalAddressBits = 36;
302 }
303 }
304
305 //
306 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
307 //
308 ASSERT (PhysicalAddressBits <= 52);
309 if (PhysicalAddressBits > 48) {
310 PhysicalAddressBits = 48;
311 }
312
313 //
314 // Calculate the table entries needed.
315 //
316 if (PhysicalAddressBits <= 39 ) {
317 NumberOfPml4EntriesNeeded = 1;
318 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
319 } else {
320 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));
321 NumberOfPdpEntriesNeeded = 512;
322 }
323
324 //
325 // Pre-allocate big pages to avoid later allocations.
326 //
327 if (!Page1GSupport) {
328 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;
329 } else {
330 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;
331 }
332 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);
333 ASSERT (BigPageAddress != 0);
334
335 //
336 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
337 //
338 PageMap = (VOID *) BigPageAddress;
339 BigPageAddress += SIZE_4KB;
340
341 PageMapLevel4Entry = PageMap;
342 PageAddress = 0;
343 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
344 //
345 // Each PML4 entry points to a page of Page Directory Pointer entires.
346 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
347 //
348 PageDirectoryPointerEntry = (VOID *) BigPageAddress;
349 BigPageAddress += SIZE_4KB;
350
351 //
352 // Make a PML4 Entry
353 //
354 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
355 PageMapLevel4Entry->Bits.ReadWrite = 1;
356 PageMapLevel4Entry->Bits.Present = 1;
357
358 if (Page1GSupport) {
359 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
360
361 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
362 if ((IsNullDetectionEnabled () && PageAddress == 0)
363 || (PcdGetBool (PcdSetNxForStack)
364 && (PageAddress < StackBase + StackSize)
365 && ((PageAddress + SIZE_1GB) > StackBase))) {
366 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
367 } else {
368 //
369 // Fill in the Page Directory entries
370 //
371 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
372 PageDirectory1GEntry->Bits.ReadWrite = 1;
373 PageDirectory1GEntry->Bits.Present = 1;
374 PageDirectory1GEntry->Bits.MustBe1 = 1;
375 }
376 }
377 } else {
378 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
379 //
380 // Each Directory Pointer entries points to a page of Page Directory entires.
381 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
382 //
383 PageDirectoryEntry = (VOID *) BigPageAddress;
384 BigPageAddress += SIZE_4KB;
385
386 //
387 // Fill in a Page Directory Pointer Entries
388 //
389 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
390 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
391 PageDirectoryPointerEntry->Bits.Present = 1;
392
393 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
394 if ((IsNullDetectionEnabled () && PageAddress == 0)
395 || (PcdGetBool (PcdSetNxForStack)
396 && (PageAddress < StackBase + StackSize)
397 && ((PageAddress + SIZE_2MB) > StackBase))) {
398 //
399 // Need to split this 2M page that covers NULL or stack range.
400 //
401 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
402 } else {
403 //
404 // Fill in the Page Directory entries
405 //
406 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
407 PageDirectoryEntry->Bits.ReadWrite = 1;
408 PageDirectoryEntry->Bits.Present = 1;
409 PageDirectoryEntry->Bits.MustBe1 = 1;
410 }
411 }
412 }
413
414 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
415 ZeroMem (
416 PageDirectoryPointerEntry,
417 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)
418 );
419 }
420 }
421 }
422
423 //
424 // For the PML4 entries we are not using fill in a null entry.
425 //
426 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {
427 ZeroMem (
428 PageMapLevel4Entry,
429 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
430 );
431 }
432
433 if (PcdGetBool (PcdSetNxForStack)) {
434 EnableExecuteDisableBit ();
435 }
436
437 return (UINTN)PageMap;
438 }
439