]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg/DxeIpl: Implement NULL pointer detection
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
2 x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
6 While we make a 1:1 mapping (identity mapping) for all physical pages \r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
36829e67 18Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
cd5ebaa0 21This program and the accompanying materials\r
f3b33289 22are licensed and made available under the terms and conditions of the BSD License\r
23which accompanies this distribution. The full text of the license may be found at\r
24http://opensource.org/licenses/bsd-license.php\r
25\r
26THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
27WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
28\r
29**/ \r
30\r
31#include "DxeIpl.h"\r
32#include "VirtualMemory.h"\r
33\r
9189ec20
JW
34/**\r
35 Clear legacy memory located at the first 4K-page, if available.\r
36\r
37 This function traverses the whole HOB list to check if memory from 0 to 4095\r
38 exists and has not been allocated, and then clear it if so.\r
39\r
40 @param HoStart The start of HobList passed to DxeCore.\r
41\r
42**/\r
43VOID\r
44ClearFirst4KPage (\r
45 IN VOID *HobStart\r
46 )\r
47{\r
48 EFI_PEI_HOB_POINTERS RscHob;\r
49 EFI_PEI_HOB_POINTERS MemHob;\r
50 BOOLEAN DoClear;\r
51\r
52 RscHob.Raw = HobStart;\r
53 MemHob.Raw = HobStart;\r
54 DoClear = FALSE;\r
55\r
56 //\r
57 // Check if page 0 exists and free\r
58 //\r
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
60 RscHob.Raw)) != NULL) {\r
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&\r
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {\r
63 DoClear = TRUE;\r
64 //\r
65 // Make sure memory at 0-4095 has not been allocated.\r
66 //\r
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
68 MemHob.Raw)) != NULL) {\r
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
70 < EFI_PAGE_SIZE) {\r
71 DoClear = FALSE;\r
72 break;\r
73 }\r
74 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
75 }\r
76 break;\r
77 }\r
78 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
79 }\r
80\r
81 if (DoClear) {\r
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
83 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
84 }\r
85\r
86 return;\r
87}\r
88\r
89BOOLEAN\r
90IsNullDetectionEnabled (\r
91 VOID\r
92 )\r
93{\r
94 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
95}\r
5997daf7 96\r
5630cdfe
SZ
97/**\r
98 Enable Execute Disable Bit.\r
99\r
100**/\r
101VOID\r
102EnableExecuteDisableBit (\r
103 VOID\r
104 )\r
105{\r
106 UINT64 MsrRegisters;\r
107\r
108 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
109 MsrRegisters |= BIT11;\r
110 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
111}\r
112\r
113/**\r
114 Split 2M page to 4K.\r
115\r
116 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
117 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
118 @param[in] StackBase Stack base address.\r
119 @param[in] StackSize Stack size.\r
120\r
121**/\r
122VOID\r
123Split2MPageTo4K (\r
124 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
125 IN OUT UINT64 *PageEntry2M,\r
126 IN EFI_PHYSICAL_ADDRESS StackBase,\r
127 IN UINTN StackSize\r
128 )\r
129{\r
130 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
131 UINTN IndexOfPageTableEntries;\r
132 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
5997daf7
LD
133 UINT64 AddressEncMask;\r
134\r
135 //\r
136 // Make sure AddressEncMask is contained to smallest supported address field\r
137 //\r
138 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
139\r
140 PageTableEntry = AllocatePages (1);\r
36829e67 141 ASSERT (PageTableEntry != NULL);\r
5997daf7 142\r
5630cdfe
SZ
143 //\r
144 // Fill in 2M page entry.\r
145 //\r
5997daf7 146 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
147\r
148 PhysicalAddress4K = PhysicalAddress;\r
149 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
150 //\r
151 // Fill in the Page Table entries\r
152 //\r
5997daf7 153 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
5630cdfe 154 PageTableEntry->Bits.ReadWrite = 1;\r
9189ec20
JW
155\r
156 if (IsNullDetectionEnabled () && PhysicalAddress4K == 0) {\r
157 PageTableEntry->Bits.Present = 0;\r
158 } else {\r
159 PageTableEntry->Bits.Present = 1;\r
160 }\r
161\r
162 if (PcdGetBool (PcdSetNxForStack)\r
163 && (PhysicalAddress4K >= StackBase)\r
164 && (PhysicalAddress4K < StackBase + StackSize)) {\r
5630cdfe
SZ
165 //\r
166 // Set Nx bit for stack.\r
167 //\r
168 PageTableEntry->Bits.Nx = 1;\r
169 }\r
170 }\r
171}\r
172\r
173/**\r
174 Split 1G page to 2M.\r
175\r
176 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
177 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
178 @param[in] StackBase Stack base address.\r
179 @param[in] StackSize Stack size.\r
180\r
181**/\r
182VOID\r
183Split1GPageTo2M (\r
184 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
185 IN OUT UINT64 *PageEntry1G,\r
186 IN EFI_PHYSICAL_ADDRESS StackBase,\r
187 IN UINTN StackSize\r
188 )\r
189{\r
190 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
191 UINTN IndexOfPageDirectoryEntries;\r
192 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
5997daf7
LD
193 UINT64 AddressEncMask;\r
194\r
195 //\r
196 // Make sure AddressEncMask is contained to smallest supported address field\r
197 //\r
198 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
199\r
200 PageDirectoryEntry = AllocatePages (1);\r
36829e67 201 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 202\r
5630cdfe
SZ
203 //\r
204 // Fill in 1G page entry.\r
205 //\r
5997daf7 206 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
207\r
208 PhysicalAddress2M = PhysicalAddress;\r
209 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
9189ec20
JW
210 if ((IsNullDetectionEnabled () && PhysicalAddress2M == 0)\r
211 || (PcdGetBool (PcdSetNxForStack)\r
212 && (PhysicalAddress2M < StackBase + StackSize)\r
213 && ((PhysicalAddress2M + SIZE_2MB) > StackBase))) {\r
5630cdfe 214 //\r
9189ec20 215 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe
SZ
216 //\r
217 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
218 } else {\r
219 //\r
220 // Fill in the Page Directory entries\r
221 //\r
5997daf7 222 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
5630cdfe
SZ
223 PageDirectoryEntry->Bits.ReadWrite = 1;\r
224 PageDirectoryEntry->Bits.Present = 1;\r
225 PageDirectoryEntry->Bits.MustBe1 = 1;\r
226 }\r
227 }\r
228}\r
229\r
f3b33289 230/**\r
231 Allocates and fills in the Page Directory and Page Table Entries to\r
232 establish a 1:1 Virtual to Physical mapping.\r
233\r
5630cdfe
SZ
234 @param[in] StackBase Stack base address.\r
235 @param[in] StackSize Stack size.\r
f3b33289 236\r
48557c65 237 @return The address of 4 level page map.\r
f3b33289 238\r
239**/\r
240UINTN\r
241CreateIdentityMappingPageTables (\r
5630cdfe
SZ
242 IN EFI_PHYSICAL_ADDRESS StackBase,\r
243 IN UINTN StackSize\r
f3b33289 244 )\r
245{ \r
c56b6566
JY
246 UINT32 RegEax;\r
247 UINT32 RegEdx;\r
f3b33289 248 UINT8 PhysicalAddressBits;\r
249 EFI_PHYSICAL_ADDRESS PageAddress;\r
250 UINTN IndexOfPml4Entries;\r
251 UINTN IndexOfPdpEntries;\r
252 UINTN IndexOfPageDirectoryEntries;\r
4140a663 253 UINT32 NumberOfPml4EntriesNeeded;\r
254 UINT32 NumberOfPdpEntriesNeeded;\r
f3b33289 255 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
256 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
257 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
258 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
259 UINTN TotalPagesNum;\r
260 UINTN BigPageAddress;\r
261 VOID *Hob;\r
c56b6566
JY
262 BOOLEAN Page1GSupport;\r
263 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
5997daf7
LD
264 UINT64 AddressEncMask;\r
265\r
266 //\r
267 // Make sure AddressEncMask is contained to smallest supported address field\r
268 //\r
269 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
270\r
271 Page1GSupport = FALSE;\r
378175d2
JY
272 if (PcdGetBool(PcdUse1GPageTable)) {\r
273 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
274 if (RegEax >= 0x80000001) {\r
275 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
276 if ((RegEdx & BIT26) != 0) {\r
277 Page1GSupport = TRUE;\r
278 }\r
c56b6566
JY
279 }\r
280 }\r
f3b33289 281\r
282 //\r
c56b6566 283 // Get physical address bits supported.\r
f3b33289 284 //\r
f3b33289 285 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
286 if (Hob != NULL) {\r
48557c65 287 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
c56b6566
JY
288 } else {\r
289 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
290 if (RegEax >= 0x80000008) {\r
291 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
292 PhysicalAddressBits = (UINT8) RegEax;\r
293 } else {\r
294 PhysicalAddressBits = 36;\r
295 }\r
f3b33289 296 }\r
297\r
4140a663 298 //\r
299 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
300 //\r
301 ASSERT (PhysicalAddressBits <= 52);\r
302 if (PhysicalAddressBits > 48) {\r
303 PhysicalAddressBits = 48;\r
304 }\r
305\r
f3b33289 306 //\r
307 // Calculate the table entries needed.\r
308 //\r
309 if (PhysicalAddressBits <= 39 ) {\r
310 NumberOfPml4EntriesNeeded = 1;\r
c56b6566 311 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
f3b33289 312 } else {\r
c56b6566 313 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
f3b33289 314 NumberOfPdpEntriesNeeded = 512;\r
315 }\r
316\r
317 //\r
318 // Pre-allocate big pages to avoid later allocations. \r
319 //\r
c56b6566
JY
320 if (!Page1GSupport) {\r
321 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
322 } else {\r
323 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
324 }\r
f3b33289 325 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
326 ASSERT (BigPageAddress != 0);\r
327\r
328 //\r
329 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
330 //\r
331 PageMap = (VOID *) BigPageAddress;\r
c56b6566 332 BigPageAddress += SIZE_4KB;\r
f3b33289 333\r
334 PageMapLevel4Entry = PageMap;\r
335 PageAddress = 0;\r
336 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
337 //\r
338 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
339 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
340 //\r
341 PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
c56b6566 342 BigPageAddress += SIZE_4KB;\r
f3b33289 343\r
344 //\r
345 // Make a PML4 Entry\r
346 //\r
5997daf7 347 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
f3b33289 348 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
349 PageMapLevel4Entry->Bits.Present = 1;\r
350\r
c56b6566 351 if (Page1GSupport) {\r
54d3b84e 352 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
c56b6566
JY
353 \r
354 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
9189ec20
JW
355 if ((IsNullDetectionEnabled () && PageAddress == 0)\r
356 || (PcdGetBool (PcdSetNxForStack)\r
357 && (PageAddress < StackBase + StackSize)\r
358 && ((PageAddress + SIZE_1GB) > StackBase))) {\r
5630cdfe
SZ
359 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
360 } else {\r
361 //\r
362 // Fill in the Page Directory entries\r
363 //\r
5997daf7 364 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
365 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
366 PageDirectory1GEntry->Bits.Present = 1;\r
367 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
368 }\r
c56b6566
JY
369 }\r
370 } else {\r
371 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
372 //\r
373 // Each Directory Pointer entries points to a page of Page Directory entires.\r
374 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
375 // \r
376 PageDirectoryEntry = (VOID *) BigPageAddress;\r
377 BigPageAddress += SIZE_4KB;\r
378\r
379 //\r
380 // Fill in a Page Directory Pointer Entries\r
381 //\r
5997daf7 382 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
c56b6566
JY
383 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
384 PageDirectoryPointerEntry->Bits.Present = 1;\r
385\r
386 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
9189ec20
JW
387 if ((IsNullDetectionEnabled () && PageAddress == 0)\r
388 || (PcdGetBool (PcdSetNxForStack)\r
389 && (PageAddress < StackBase + StackSize)\r
390 && ((PageAddress + SIZE_2MB) > StackBase))) {\r
5630cdfe 391 //\r
9189ec20 392 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe
SZ
393 //\r
394 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
395 } else {\r
396 //\r
397 // Fill in the Page Directory entries\r
398 //\r
5997daf7 399 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
400 PageDirectoryEntry->Bits.ReadWrite = 1;\r
401 PageDirectoryEntry->Bits.Present = 1;\r
402 PageDirectoryEntry->Bits.MustBe1 = 1;\r
403 }\r
c56b6566
JY
404 }\r
405 }\r
f3b33289 406\r
c56b6566
JY
407 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
408 ZeroMem (\r
409 PageDirectoryPointerEntry,\r
410 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
411 );\r
f3b33289 412 }\r
413 }\r
414 }\r
415\r
416 //\r
417 // For the PML4 entries we are not using fill in a null entry.\r
f3b33289 418 //\r
419 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
c56b6566
JY
420 ZeroMem (\r
421 PageMapLevel4Entry,\r
422 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
423 );\r
f3b33289 424 }\r
425\r
5630cdfe
SZ
426 if (PcdGetBool (PcdSetNxForStack)) {\r
427 EnableExecuteDisableBit ();\r
428 }\r
429\r
f3b33289 430 return (UINTN)PageMap;\r
431}\r
432\r