]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg/DxeIpl: Implement NULL pointer detection
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20
21 This program and the accompanying materials
22 are licensed and made available under the terms and conditions of the BSD License
23 which accompanies this distribution. The full text of the license may be found at
24 http://opensource.org/licenses/bsd-license.php
25
26 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
27 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
28
29 **/
30
31 #include "DxeIpl.h"
32 #include "VirtualMemory.h"
33
34 /**
35 Clear legacy memory located at the first 4K-page, if available.
36
37 This function traverses the whole HOB list to check if memory from 0 to 4095
38 exists and has not been allocated, and then clear it if so.
39
40 @param HoStart The start of HobList passed to DxeCore.
41
42 **/
43 VOID
44 ClearFirst4KPage (
45 IN VOID *HobStart
46 )
47 {
48 EFI_PEI_HOB_POINTERS RscHob;
49 EFI_PEI_HOB_POINTERS MemHob;
50 BOOLEAN DoClear;
51
52 RscHob.Raw = HobStart;
53 MemHob.Raw = HobStart;
54 DoClear = FALSE;
55
56 //
57 // Check if page 0 exists and free
58 //
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
60 RscHob.Raw)) != NULL) {
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {
63 DoClear = TRUE;
64 //
65 // Make sure memory at 0-4095 has not been allocated.
66 //
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,
68 MemHob.Raw)) != NULL) {
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
70 < EFI_PAGE_SIZE) {
71 DoClear = FALSE;
72 break;
73 }
74 MemHob.Raw = GET_NEXT_HOB (MemHob);
75 }
76 break;
77 }
78 RscHob.Raw = GET_NEXT_HOB (RscHob);
79 }
80
81 if (DoClear) {
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
83 SetMem (NULL, EFI_PAGE_SIZE, 0);
84 }
85
86 return;
87 }
88
89 BOOLEAN
90 IsNullDetectionEnabled (
91 VOID
92 )
93 {
94 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
95 }
96
97 /**
98 Enable Execute Disable Bit.
99
100 **/
101 VOID
102 EnableExecuteDisableBit (
103 VOID
104 )
105 {
106 UINT64 MsrRegisters;
107
108 MsrRegisters = AsmReadMsr64 (0xC0000080);
109 MsrRegisters |= BIT11;
110 AsmWriteMsr64 (0xC0000080, MsrRegisters);
111 }
112
113 /**
114 Split 2M page to 4K.
115
116 @param[in] PhysicalAddress Start physical address the 2M page covered.
117 @param[in, out] PageEntry2M Pointer to 2M page entry.
118 @param[in] StackBase Stack base address.
119 @param[in] StackSize Stack size.
120
121 **/
122 VOID
123 Split2MPageTo4K (
124 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
125 IN OUT UINT64 *PageEntry2M,
126 IN EFI_PHYSICAL_ADDRESS StackBase,
127 IN UINTN StackSize
128 )
129 {
130 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
131 UINTN IndexOfPageTableEntries;
132 PAGE_TABLE_4K_ENTRY *PageTableEntry;
133 UINT64 AddressEncMask;
134
135 //
136 // Make sure AddressEncMask is contained to smallest supported address field
137 //
138 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
139
140 PageTableEntry = AllocatePages (1);
141 ASSERT (PageTableEntry != NULL);
142
143 //
144 // Fill in 2M page entry.
145 //
146 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
147
148 PhysicalAddress4K = PhysicalAddress;
149 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
150 //
151 // Fill in the Page Table entries
152 //
153 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
154 PageTableEntry->Bits.ReadWrite = 1;
155
156 if (IsNullDetectionEnabled () && PhysicalAddress4K == 0) {
157 PageTableEntry->Bits.Present = 0;
158 } else {
159 PageTableEntry->Bits.Present = 1;
160 }
161
162 if (PcdGetBool (PcdSetNxForStack)
163 && (PhysicalAddress4K >= StackBase)
164 && (PhysicalAddress4K < StackBase + StackSize)) {
165 //
166 // Set Nx bit for stack.
167 //
168 PageTableEntry->Bits.Nx = 1;
169 }
170 }
171 }
172
173 /**
174 Split 1G page to 2M.
175
176 @param[in] PhysicalAddress Start physical address the 1G page covered.
177 @param[in, out] PageEntry1G Pointer to 1G page entry.
178 @param[in] StackBase Stack base address.
179 @param[in] StackSize Stack size.
180
181 **/
182 VOID
183 Split1GPageTo2M (
184 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
185 IN OUT UINT64 *PageEntry1G,
186 IN EFI_PHYSICAL_ADDRESS StackBase,
187 IN UINTN StackSize
188 )
189 {
190 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
191 UINTN IndexOfPageDirectoryEntries;
192 PAGE_TABLE_ENTRY *PageDirectoryEntry;
193 UINT64 AddressEncMask;
194
195 //
196 // Make sure AddressEncMask is contained to smallest supported address field
197 //
198 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
199
200 PageDirectoryEntry = AllocatePages (1);
201 ASSERT (PageDirectoryEntry != NULL);
202
203 //
204 // Fill in 1G page entry.
205 //
206 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
207
208 PhysicalAddress2M = PhysicalAddress;
209 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
210 if ((IsNullDetectionEnabled () && PhysicalAddress2M == 0)
211 || (PcdGetBool (PcdSetNxForStack)
212 && (PhysicalAddress2M < StackBase + StackSize)
213 && ((PhysicalAddress2M + SIZE_2MB) > StackBase))) {
214 //
215 // Need to split this 2M page that covers NULL or stack range.
216 //
217 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
218 } else {
219 //
220 // Fill in the Page Directory entries
221 //
222 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
223 PageDirectoryEntry->Bits.ReadWrite = 1;
224 PageDirectoryEntry->Bits.Present = 1;
225 PageDirectoryEntry->Bits.MustBe1 = 1;
226 }
227 }
228 }
229
230 /**
231 Allocates and fills in the Page Directory and Page Table Entries to
232 establish a 1:1 Virtual to Physical mapping.
233
234 @param[in] StackBase Stack base address.
235 @param[in] StackSize Stack size.
236
237 @return The address of 4 level page map.
238
239 **/
240 UINTN
241 CreateIdentityMappingPageTables (
242 IN EFI_PHYSICAL_ADDRESS StackBase,
243 IN UINTN StackSize
244 )
245 {
246 UINT32 RegEax;
247 UINT32 RegEdx;
248 UINT8 PhysicalAddressBits;
249 EFI_PHYSICAL_ADDRESS PageAddress;
250 UINTN IndexOfPml4Entries;
251 UINTN IndexOfPdpEntries;
252 UINTN IndexOfPageDirectoryEntries;
253 UINT32 NumberOfPml4EntriesNeeded;
254 UINT32 NumberOfPdpEntriesNeeded;
255 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
256 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
257 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
258 PAGE_TABLE_ENTRY *PageDirectoryEntry;
259 UINTN TotalPagesNum;
260 UINTN BigPageAddress;
261 VOID *Hob;
262 BOOLEAN Page1GSupport;
263 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
264 UINT64 AddressEncMask;
265
266 //
267 // Make sure AddressEncMask is contained to smallest supported address field
268 //
269 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
270
271 Page1GSupport = FALSE;
272 if (PcdGetBool(PcdUse1GPageTable)) {
273 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
274 if (RegEax >= 0x80000001) {
275 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
276 if ((RegEdx & BIT26) != 0) {
277 Page1GSupport = TRUE;
278 }
279 }
280 }
281
282 //
283 // Get physical address bits supported.
284 //
285 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
286 if (Hob != NULL) {
287 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
288 } else {
289 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
290 if (RegEax >= 0x80000008) {
291 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
292 PhysicalAddressBits = (UINT8) RegEax;
293 } else {
294 PhysicalAddressBits = 36;
295 }
296 }
297
298 //
299 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
300 //
301 ASSERT (PhysicalAddressBits <= 52);
302 if (PhysicalAddressBits > 48) {
303 PhysicalAddressBits = 48;
304 }
305
306 //
307 // Calculate the table entries needed.
308 //
309 if (PhysicalAddressBits <= 39 ) {
310 NumberOfPml4EntriesNeeded = 1;
311 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
312 } else {
313 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));
314 NumberOfPdpEntriesNeeded = 512;
315 }
316
317 //
318 // Pre-allocate big pages to avoid later allocations.
319 //
320 if (!Page1GSupport) {
321 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;
322 } else {
323 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;
324 }
325 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);
326 ASSERT (BigPageAddress != 0);
327
328 //
329 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
330 //
331 PageMap = (VOID *) BigPageAddress;
332 BigPageAddress += SIZE_4KB;
333
334 PageMapLevel4Entry = PageMap;
335 PageAddress = 0;
336 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
337 //
338 // Each PML4 entry points to a page of Page Directory Pointer entires.
339 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
340 //
341 PageDirectoryPointerEntry = (VOID *) BigPageAddress;
342 BigPageAddress += SIZE_4KB;
343
344 //
345 // Make a PML4 Entry
346 //
347 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
348 PageMapLevel4Entry->Bits.ReadWrite = 1;
349 PageMapLevel4Entry->Bits.Present = 1;
350
351 if (Page1GSupport) {
352 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
353
354 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
355 if ((IsNullDetectionEnabled () && PageAddress == 0)
356 || (PcdGetBool (PcdSetNxForStack)
357 && (PageAddress < StackBase + StackSize)
358 && ((PageAddress + SIZE_1GB) > StackBase))) {
359 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
360 } else {
361 //
362 // Fill in the Page Directory entries
363 //
364 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
365 PageDirectory1GEntry->Bits.ReadWrite = 1;
366 PageDirectory1GEntry->Bits.Present = 1;
367 PageDirectory1GEntry->Bits.MustBe1 = 1;
368 }
369 }
370 } else {
371 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
372 //
373 // Each Directory Pointer entries points to a page of Page Directory entires.
374 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
375 //
376 PageDirectoryEntry = (VOID *) BigPageAddress;
377 BigPageAddress += SIZE_4KB;
378
379 //
380 // Fill in a Page Directory Pointer Entries
381 //
382 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
383 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
384 PageDirectoryPointerEntry->Bits.Present = 1;
385
386 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
387 if ((IsNullDetectionEnabled () && PageAddress == 0)
388 || (PcdGetBool (PcdSetNxForStack)
389 && (PageAddress < StackBase + StackSize)
390 && ((PageAddress + SIZE_2MB) > StackBase))) {
391 //
392 // Need to split this 2M page that covers NULL or stack range.
393 //
394 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
395 } else {
396 //
397 // Fill in the Page Directory entries
398 //
399 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
400 PageDirectoryEntry->Bits.ReadWrite = 1;
401 PageDirectoryEntry->Bits.Present = 1;
402 PageDirectoryEntry->Bits.MustBe1 = 1;
403 }
404 }
405 }
406
407 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
408 ZeroMem (
409 PageDirectoryPointerEntry,
410 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)
411 );
412 }
413 }
414 }
415
416 //
417 // For the PML4 entries we are not using fill in a null entry.
418 //
419 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {
420 ZeroMem (
421 PageMapLevel4Entry,
422 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
423 );
424 }
425
426 if (PcdGetBool (PcdSetNxForStack)) {
427 EnableExecuteDisableBit ();
428 }
429
430 return (UINTN)PageMap;
431 }
432