]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
IntelSilicon: Correct function description for AllocateBuffer
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
2 x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
6 While we make a 1:1 mapping (identity mapping) for all physical pages \r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
36829e67 18Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
cd5ebaa0 21This program and the accompanying materials\r
f3b33289 22are licensed and made available under the terms and conditions of the BSD License\r
23which accompanies this distribution. The full text of the license may be found at\r
24http://opensource.org/licenses/bsd-license.php\r
25\r
26THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
27WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
28\r
29**/ \r
30\r
31#include "DxeIpl.h"\r
32#include "VirtualMemory.h"\r
33\r
9189ec20 34/**\r
382aeac2 35 Clear legacy memory located at the first 4K-page, if available.\r
9189ec20 36\r
382aeac2
DB
37 This function traverses the whole HOB list to check if memory from 0 to 4095\r
38 exists and has not been allocated, and then clear it if so.\r
9189ec20 39\r
382aeac2 40 @param HobStart The start of HobList passed to DxeCore.\r
9189ec20
JW
41\r
42**/\r
43VOID\r
44ClearFirst4KPage (\r
45 IN VOID *HobStart\r
46 )\r
47{\r
48 EFI_PEI_HOB_POINTERS RscHob;\r
49 EFI_PEI_HOB_POINTERS MemHob;\r
50 BOOLEAN DoClear;\r
51\r
52 RscHob.Raw = HobStart;\r
53 MemHob.Raw = HobStart;\r
54 DoClear = FALSE;\r
55\r
56 //\r
57 // Check if page 0 exists and free\r
58 //\r
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
60 RscHob.Raw)) != NULL) {\r
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&\r
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {\r
63 DoClear = TRUE;\r
64 //\r
65 // Make sure memory at 0-4095 has not been allocated.\r
66 //\r
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
68 MemHob.Raw)) != NULL) {\r
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
70 < EFI_PAGE_SIZE) {\r
71 DoClear = FALSE;\r
72 break;\r
73 }\r
74 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
75 }\r
76 break;\r
77 }\r
78 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
79 }\r
80\r
81 if (DoClear) {\r
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
83 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
84 }\r
85\r
86 return;\r
87}\r
88\r
382aeac2
DB
89/**\r
90 Return configure status of NULL pointer detection feature.\r
91\r
92 @return TRUE NULL pointer detection feature is enabled\r
93 @return FALSE NULL pointer detection feature is disabled\r
94\r
95**/\r
9189ec20
JW
96BOOLEAN\r
97IsNullDetectionEnabled (\r
98 VOID\r
99 )\r
100{\r
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
102}\r
5997daf7 103\r
5630cdfe
SZ
104/**\r
105 Enable Execute Disable Bit.\r
106\r
107**/\r
108VOID\r
109EnableExecuteDisableBit (\r
110 VOID\r
111 )\r
112{\r
113 UINT64 MsrRegisters;\r
114\r
115 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
116 MsrRegisters |= BIT11;\r
117 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
118}\r
119\r
50255363
JW
120/**\r
121 The function will check if page table entry should be splitted to smaller\r
122 granularity.\r
123\r
124 @retval TRUE Page table should be split.\r
125 @retval FALSE Page table should not be split.\r
126**/\r
127BOOLEAN\r
128ToSplitPageTable (\r
129 IN EFI_PHYSICAL_ADDRESS Address,\r
130 IN UINTN Size,\r
131 IN EFI_PHYSICAL_ADDRESS StackBase,\r
132 IN UINTN StackSize\r
133 )\r
134{\r
135 if (IsNullDetectionEnabled () && Address == 0) {\r
136 return TRUE;\r
137 }\r
138\r
139 if (PcdGetBool (PcdCpuStackGuard)) {\r
140 if (StackBase >= Address && StackBase < (Address + Size)) {\r
141 return TRUE;\r
142 }\r
143 }\r
144\r
145 if (PcdGetBool (PcdSetNxForStack)) {\r
146 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
147 return TRUE;\r
148 }\r
149 }\r
150\r
151 return FALSE;\r
152}\r
5630cdfe
SZ
153/**\r
154 Split 2M page to 4K.\r
155\r
156 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
157 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
158 @param[in] StackBase Stack base address.\r
159 @param[in] StackSize Stack size.\r
160\r
161**/\r
162VOID\r
163Split2MPageTo4K (\r
164 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
165 IN OUT UINT64 *PageEntry2M,\r
166 IN EFI_PHYSICAL_ADDRESS StackBase,\r
167 IN UINTN StackSize\r
168 )\r
169{\r
170 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
171 UINTN IndexOfPageTableEntries;\r
172 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
5997daf7
LD
173 UINT64 AddressEncMask;\r
174\r
175 //\r
176 // Make sure AddressEncMask is contained to smallest supported address field\r
177 //\r
178 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
179\r
180 PageTableEntry = AllocatePages (1);\r
36829e67 181 ASSERT (PageTableEntry != NULL);\r
5997daf7 182\r
5630cdfe
SZ
183 //\r
184 // Fill in 2M page entry.\r
185 //\r
5997daf7 186 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
187\r
188 PhysicalAddress4K = PhysicalAddress;\r
189 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
190 //\r
191 // Fill in the Page Table entries\r
192 //\r
5997daf7 193 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
5630cdfe 194 PageTableEntry->Bits.ReadWrite = 1;\r
9189ec20 195\r
50255363
JW
196 if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||\r
197 (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {\r
9189ec20
JW
198 PageTableEntry->Bits.Present = 0;\r
199 } else {\r
200 PageTableEntry->Bits.Present = 1;\r
201 }\r
202\r
203 if (PcdGetBool (PcdSetNxForStack)\r
204 && (PhysicalAddress4K >= StackBase)\r
205 && (PhysicalAddress4K < StackBase + StackSize)) {\r
5630cdfe
SZ
206 //\r
207 // Set Nx bit for stack.\r
208 //\r
209 PageTableEntry->Bits.Nx = 1;\r
210 }\r
211 }\r
212}\r
213\r
214/**\r
215 Split 1G page to 2M.\r
216\r
217 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
218 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
219 @param[in] StackBase Stack base address.\r
220 @param[in] StackSize Stack size.\r
221\r
222**/\r
223VOID\r
224Split1GPageTo2M (\r
225 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
226 IN OUT UINT64 *PageEntry1G,\r
227 IN EFI_PHYSICAL_ADDRESS StackBase,\r
228 IN UINTN StackSize\r
229 )\r
230{\r
231 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
232 UINTN IndexOfPageDirectoryEntries;\r
233 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
5997daf7
LD
234 UINT64 AddressEncMask;\r
235\r
236 //\r
237 // Make sure AddressEncMask is contained to smallest supported address field\r
238 //\r
239 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
240\r
241 PageDirectoryEntry = AllocatePages (1);\r
36829e67 242 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 243\r
5630cdfe
SZ
244 //\r
245 // Fill in 1G page entry.\r
246 //\r
5997daf7 247 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
248\r
249 PhysicalAddress2M = PhysicalAddress;\r
250 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
50255363 251 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {\r
5630cdfe 252 //\r
9189ec20 253 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe
SZ
254 //\r
255 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
256 } else {\r
257 //\r
258 // Fill in the Page Directory entries\r
259 //\r
5997daf7 260 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
5630cdfe
SZ
261 PageDirectoryEntry->Bits.ReadWrite = 1;\r
262 PageDirectoryEntry->Bits.Present = 1;\r
263 PageDirectoryEntry->Bits.MustBe1 = 1;\r
264 }\r
265 }\r
266}\r
267\r
f3b33289 268/**\r
269 Allocates and fills in the Page Directory and Page Table Entries to\r
270 establish a 1:1 Virtual to Physical mapping.\r
271\r
5630cdfe
SZ
272 @param[in] StackBase Stack base address.\r
273 @param[in] StackSize Stack size.\r
f3b33289 274\r
48557c65 275 @return The address of 4 level page map.\r
f3b33289 276\r
277**/\r
278UINTN\r
279CreateIdentityMappingPageTables (\r
5630cdfe
SZ
280 IN EFI_PHYSICAL_ADDRESS StackBase,\r
281 IN UINTN StackSize\r
f3b33289 282 )\r
283{ \r
c56b6566
JY
284 UINT32 RegEax;\r
285 UINT32 RegEdx;\r
f3b33289 286 UINT8 PhysicalAddressBits;\r
287 EFI_PHYSICAL_ADDRESS PageAddress;\r
288 UINTN IndexOfPml4Entries;\r
289 UINTN IndexOfPdpEntries;\r
290 UINTN IndexOfPageDirectoryEntries;\r
4140a663 291 UINT32 NumberOfPml4EntriesNeeded;\r
292 UINT32 NumberOfPdpEntriesNeeded;\r
f3b33289 293 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
294 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
295 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
296 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
297 UINTN TotalPagesNum;\r
298 UINTN BigPageAddress;\r
299 VOID *Hob;\r
c56b6566
JY
300 BOOLEAN Page1GSupport;\r
301 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
5997daf7
LD
302 UINT64 AddressEncMask;\r
303\r
304 //\r
305 // Make sure AddressEncMask is contained to smallest supported address field\r
306 //\r
307 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
308\r
309 Page1GSupport = FALSE;\r
378175d2
JY
310 if (PcdGetBool(PcdUse1GPageTable)) {\r
311 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
312 if (RegEax >= 0x80000001) {\r
313 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
314 if ((RegEdx & BIT26) != 0) {\r
315 Page1GSupport = TRUE;\r
316 }\r
c56b6566
JY
317 }\r
318 }\r
f3b33289 319\r
320 //\r
c56b6566 321 // Get physical address bits supported.\r
f3b33289 322 //\r
f3b33289 323 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
324 if (Hob != NULL) {\r
48557c65 325 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
c56b6566
JY
326 } else {\r
327 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
328 if (RegEax >= 0x80000008) {\r
329 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
330 PhysicalAddressBits = (UINT8) RegEax;\r
331 } else {\r
332 PhysicalAddressBits = 36;\r
333 }\r
f3b33289 334 }\r
335\r
4140a663 336 //\r
337 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
338 //\r
339 ASSERT (PhysicalAddressBits <= 52);\r
340 if (PhysicalAddressBits > 48) {\r
341 PhysicalAddressBits = 48;\r
342 }\r
343\r
f3b33289 344 //\r
345 // Calculate the table entries needed.\r
346 //\r
347 if (PhysicalAddressBits <= 39 ) {\r
348 NumberOfPml4EntriesNeeded = 1;\r
c56b6566 349 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
f3b33289 350 } else {\r
c56b6566 351 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
f3b33289 352 NumberOfPdpEntriesNeeded = 512;\r
353 }\r
354\r
355 //\r
356 // Pre-allocate big pages to avoid later allocations. \r
357 //\r
c56b6566
JY
358 if (!Page1GSupport) {\r
359 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
360 } else {\r
361 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
362 }\r
f3b33289 363 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
364 ASSERT (BigPageAddress != 0);\r
365\r
366 //\r
367 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
368 //\r
369 PageMap = (VOID *) BigPageAddress;\r
c56b6566 370 BigPageAddress += SIZE_4KB;\r
f3b33289 371\r
372 PageMapLevel4Entry = PageMap;\r
373 PageAddress = 0;\r
374 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
375 //\r
376 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
377 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
378 //\r
379 PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
c56b6566 380 BigPageAddress += SIZE_4KB;\r
f3b33289 381\r
382 //\r
383 // Make a PML4 Entry\r
384 //\r
5997daf7 385 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
f3b33289 386 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
387 PageMapLevel4Entry->Bits.Present = 1;\r
388\r
c56b6566 389 if (Page1GSupport) {\r
54d3b84e 390 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
c56b6566
JY
391 \r
392 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
50255363 393 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {\r
5630cdfe
SZ
394 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
395 } else {\r
396 //\r
397 // Fill in the Page Directory entries\r
398 //\r
5997daf7 399 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
400 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
401 PageDirectory1GEntry->Bits.Present = 1;\r
402 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
403 }\r
c56b6566
JY
404 }\r
405 } else {\r
406 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
407 //\r
408 // Each Directory Pointer entries points to a page of Page Directory entires.\r
409 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
410 // \r
411 PageDirectoryEntry = (VOID *) BigPageAddress;\r
412 BigPageAddress += SIZE_4KB;\r
413\r
414 //\r
415 // Fill in a Page Directory Pointer Entries\r
416 //\r
5997daf7 417 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
c56b6566
JY
418 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
419 PageDirectoryPointerEntry->Bits.Present = 1;\r
420\r
421 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
50255363 422 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {\r
5630cdfe 423 //\r
9189ec20 424 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe
SZ
425 //\r
426 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
427 } else {\r
428 //\r
429 // Fill in the Page Directory entries\r
430 //\r
5997daf7 431 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
432 PageDirectoryEntry->Bits.ReadWrite = 1;\r
433 PageDirectoryEntry->Bits.Present = 1;\r
434 PageDirectoryEntry->Bits.MustBe1 = 1;\r
435 }\r
c56b6566
JY
436 }\r
437 }\r
f3b33289 438\r
c56b6566
JY
439 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
440 ZeroMem (\r
441 PageDirectoryPointerEntry,\r
442 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
443 );\r
f3b33289 444 }\r
445 }\r
446 }\r
447\r
448 //\r
449 // For the PML4 entries we are not using fill in a null entry.\r
f3b33289 450 //\r
451 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
c56b6566
JY
452 ZeroMem (\r
453 PageMapLevel4Entry,\r
454 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
455 );\r
f3b33289 456 }\r
457\r
5630cdfe
SZ
458 if (PcdGetBool (PcdSetNxForStack)) {\r
459 EnableExecuteDisableBit ();\r
460 }\r
461\r
f3b33289 462 return (UINTN)PageMap;\r
463}\r
464\r