]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg/DxeIplPeim: Refine coding style in function comments
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
2 x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
6 While we make a 1:1 mapping (identity mapping) for all physical pages \r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
36829e67 18Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
cd5ebaa0 21This program and the accompanying materials\r
f3b33289 22are licensed and made available under the terms and conditions of the BSD License\r
23which accompanies this distribution. The full text of the license may be found at\r
24http://opensource.org/licenses/bsd-license.php\r
25\r
26THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
27WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
28\r
29**/ \r
30\r
31#include "DxeIpl.h"\r
32#include "VirtualMemory.h"\r
33\r
9189ec20 34/**\r
382aeac2 35 Clear legacy memory located at the first 4K-page, if available.\r
9189ec20 36\r
382aeac2
DB
37 This function traverses the whole HOB list to check if memory from 0 to 4095\r
38 exists and has not been allocated, and then clear it if so.\r
9189ec20 39\r
382aeac2 40 @param HobStart The start of HobList passed to DxeCore.\r
9189ec20
JW
41\r
42**/\r
43VOID\r
44ClearFirst4KPage (\r
45 IN VOID *HobStart\r
46 )\r
47{\r
48 EFI_PEI_HOB_POINTERS RscHob;\r
49 EFI_PEI_HOB_POINTERS MemHob;\r
50 BOOLEAN DoClear;\r
51\r
52 RscHob.Raw = HobStart;\r
53 MemHob.Raw = HobStart;\r
54 DoClear = FALSE;\r
55\r
56 //\r
57 // Check if page 0 exists and free\r
58 //\r
59 while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
60 RscHob.Raw)) != NULL) {\r
61 if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&\r
62 RscHob.ResourceDescriptor->PhysicalStart == 0) {\r
63 DoClear = TRUE;\r
64 //\r
65 // Make sure memory at 0-4095 has not been allocated.\r
66 //\r
67 while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
68 MemHob.Raw)) != NULL) {\r
69 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
70 < EFI_PAGE_SIZE) {\r
71 DoClear = FALSE;\r
72 break;\r
73 }\r
74 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
75 }\r
76 break;\r
77 }\r
78 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
79 }\r
80\r
81 if (DoClear) {\r
82 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
83 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
84 }\r
85\r
86 return;\r
87}\r
88\r
382aeac2
DB
89/**\r
90 Return configure status of NULL pointer detection feature.\r
91\r
92 @return TRUE NULL pointer detection feature is enabled\r
93 @return FALSE NULL pointer detection feature is disabled\r
94\r
95**/\r
9189ec20
JW
96BOOLEAN\r
97IsNullDetectionEnabled (\r
98 VOID\r
99 )\r
100{\r
101 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
102}\r
5997daf7 103\r
5630cdfe
SZ
104/**\r
105 Enable Execute Disable Bit.\r
106\r
107**/\r
108VOID\r
109EnableExecuteDisableBit (\r
110 VOID\r
111 )\r
112{\r
113 UINT64 MsrRegisters;\r
114\r
115 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
116 MsrRegisters |= BIT11;\r
117 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
118}\r
119\r
120/**\r
121 Split 2M page to 4K.\r
122\r
123 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
124 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
125 @param[in] StackBase Stack base address.\r
126 @param[in] StackSize Stack size.\r
127\r
128**/\r
129VOID\r
130Split2MPageTo4K (\r
131 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
132 IN OUT UINT64 *PageEntry2M,\r
133 IN EFI_PHYSICAL_ADDRESS StackBase,\r
134 IN UINTN StackSize\r
135 )\r
136{\r
137 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
138 UINTN IndexOfPageTableEntries;\r
139 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
5997daf7
LD
140 UINT64 AddressEncMask;\r
141\r
142 //\r
143 // Make sure AddressEncMask is contained to smallest supported address field\r
144 //\r
145 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
146\r
147 PageTableEntry = AllocatePages (1);\r
36829e67 148 ASSERT (PageTableEntry != NULL);\r
5997daf7 149\r
5630cdfe
SZ
150 //\r
151 // Fill in 2M page entry.\r
152 //\r
5997daf7 153 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
154\r
155 PhysicalAddress4K = PhysicalAddress;\r
156 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
157 //\r
158 // Fill in the Page Table entries\r
159 //\r
5997daf7 160 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
5630cdfe 161 PageTableEntry->Bits.ReadWrite = 1;\r
9189ec20
JW
162\r
163 if (IsNullDetectionEnabled () && PhysicalAddress4K == 0) {\r
164 PageTableEntry->Bits.Present = 0;\r
165 } else {\r
166 PageTableEntry->Bits.Present = 1;\r
167 }\r
168\r
169 if (PcdGetBool (PcdSetNxForStack)\r
170 && (PhysicalAddress4K >= StackBase)\r
171 && (PhysicalAddress4K < StackBase + StackSize)) {\r
5630cdfe
SZ
172 //\r
173 // Set Nx bit for stack.\r
174 //\r
175 PageTableEntry->Bits.Nx = 1;\r
176 }\r
177 }\r
178}\r
179\r
180/**\r
181 Split 1G page to 2M.\r
182\r
183 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
184 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
185 @param[in] StackBase Stack base address.\r
186 @param[in] StackSize Stack size.\r
187\r
188**/\r
189VOID\r
190Split1GPageTo2M (\r
191 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
192 IN OUT UINT64 *PageEntry1G,\r
193 IN EFI_PHYSICAL_ADDRESS StackBase,\r
194 IN UINTN StackSize\r
195 )\r
196{\r
197 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
198 UINTN IndexOfPageDirectoryEntries;\r
199 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
5997daf7
LD
200 UINT64 AddressEncMask;\r
201\r
202 //\r
203 // Make sure AddressEncMask is contained to smallest supported address field\r
204 //\r
205 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
206\r
207 PageDirectoryEntry = AllocatePages (1);\r
36829e67 208 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 209\r
5630cdfe
SZ
210 //\r
211 // Fill in 1G page entry.\r
212 //\r
5997daf7 213 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
214\r
215 PhysicalAddress2M = PhysicalAddress;\r
216 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
9189ec20
JW
217 if ((IsNullDetectionEnabled () && PhysicalAddress2M == 0)\r
218 || (PcdGetBool (PcdSetNxForStack)\r
219 && (PhysicalAddress2M < StackBase + StackSize)\r
220 && ((PhysicalAddress2M + SIZE_2MB) > StackBase))) {\r
5630cdfe 221 //\r
9189ec20 222 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe
SZ
223 //\r
224 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
225 } else {\r
226 //\r
227 // Fill in the Page Directory entries\r
228 //\r
5997daf7 229 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
5630cdfe
SZ
230 PageDirectoryEntry->Bits.ReadWrite = 1;\r
231 PageDirectoryEntry->Bits.Present = 1;\r
232 PageDirectoryEntry->Bits.MustBe1 = 1;\r
233 }\r
234 }\r
235}\r
236\r
f3b33289 237/**\r
238 Allocates and fills in the Page Directory and Page Table Entries to\r
239 establish a 1:1 Virtual to Physical mapping.\r
240\r
5630cdfe
SZ
241 @param[in] StackBase Stack base address.\r
242 @param[in] StackSize Stack size.\r
f3b33289 243\r
48557c65 244 @return The address of 4 level page map.\r
f3b33289 245\r
246**/\r
247UINTN\r
248CreateIdentityMappingPageTables (\r
5630cdfe
SZ
249 IN EFI_PHYSICAL_ADDRESS StackBase,\r
250 IN UINTN StackSize\r
f3b33289 251 )\r
252{ \r
c56b6566
JY
253 UINT32 RegEax;\r
254 UINT32 RegEdx;\r
f3b33289 255 UINT8 PhysicalAddressBits;\r
256 EFI_PHYSICAL_ADDRESS PageAddress;\r
257 UINTN IndexOfPml4Entries;\r
258 UINTN IndexOfPdpEntries;\r
259 UINTN IndexOfPageDirectoryEntries;\r
4140a663 260 UINT32 NumberOfPml4EntriesNeeded;\r
261 UINT32 NumberOfPdpEntriesNeeded;\r
f3b33289 262 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
263 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
264 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
265 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
266 UINTN TotalPagesNum;\r
267 UINTN BigPageAddress;\r
268 VOID *Hob;\r
c56b6566
JY
269 BOOLEAN Page1GSupport;\r
270 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
5997daf7
LD
271 UINT64 AddressEncMask;\r
272\r
273 //\r
274 // Make sure AddressEncMask is contained to smallest supported address field\r
275 //\r
276 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
277\r
278 Page1GSupport = FALSE;\r
378175d2
JY
279 if (PcdGetBool(PcdUse1GPageTable)) {\r
280 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
281 if (RegEax >= 0x80000001) {\r
282 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
283 if ((RegEdx & BIT26) != 0) {\r
284 Page1GSupport = TRUE;\r
285 }\r
c56b6566
JY
286 }\r
287 }\r
f3b33289 288\r
289 //\r
c56b6566 290 // Get physical address bits supported.\r
f3b33289 291 //\r
f3b33289 292 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
293 if (Hob != NULL) {\r
48557c65 294 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
c56b6566
JY
295 } else {\r
296 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
297 if (RegEax >= 0x80000008) {\r
298 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
299 PhysicalAddressBits = (UINT8) RegEax;\r
300 } else {\r
301 PhysicalAddressBits = 36;\r
302 }\r
f3b33289 303 }\r
304\r
4140a663 305 //\r
306 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
307 //\r
308 ASSERT (PhysicalAddressBits <= 52);\r
309 if (PhysicalAddressBits > 48) {\r
310 PhysicalAddressBits = 48;\r
311 }\r
312\r
f3b33289 313 //\r
314 // Calculate the table entries needed.\r
315 //\r
316 if (PhysicalAddressBits <= 39 ) {\r
317 NumberOfPml4EntriesNeeded = 1;\r
c56b6566 318 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
f3b33289 319 } else {\r
c56b6566 320 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
f3b33289 321 NumberOfPdpEntriesNeeded = 512;\r
322 }\r
323\r
324 //\r
325 // Pre-allocate big pages to avoid later allocations. \r
326 //\r
c56b6566
JY
327 if (!Page1GSupport) {\r
328 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
329 } else {\r
330 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
331 }\r
f3b33289 332 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
333 ASSERT (BigPageAddress != 0);\r
334\r
335 //\r
336 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
337 //\r
338 PageMap = (VOID *) BigPageAddress;\r
c56b6566 339 BigPageAddress += SIZE_4KB;\r
f3b33289 340\r
341 PageMapLevel4Entry = PageMap;\r
342 PageAddress = 0;\r
343 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
344 //\r
345 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
346 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
347 //\r
348 PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
c56b6566 349 BigPageAddress += SIZE_4KB;\r
f3b33289 350\r
351 //\r
352 // Make a PML4 Entry\r
353 //\r
5997daf7 354 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
f3b33289 355 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
356 PageMapLevel4Entry->Bits.Present = 1;\r
357\r
c56b6566 358 if (Page1GSupport) {\r
54d3b84e 359 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
c56b6566
JY
360 \r
361 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
9189ec20
JW
362 if ((IsNullDetectionEnabled () && PageAddress == 0)\r
363 || (PcdGetBool (PcdSetNxForStack)\r
364 && (PageAddress < StackBase + StackSize)\r
365 && ((PageAddress + SIZE_1GB) > StackBase))) {\r
5630cdfe
SZ
366 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
367 } else {\r
368 //\r
369 // Fill in the Page Directory entries\r
370 //\r
5997daf7 371 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
372 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
373 PageDirectory1GEntry->Bits.Present = 1;\r
374 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
375 }\r
c56b6566
JY
376 }\r
377 } else {\r
378 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
379 //\r
380 // Each Directory Pointer entries points to a page of Page Directory entires.\r
381 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
382 // \r
383 PageDirectoryEntry = (VOID *) BigPageAddress;\r
384 BigPageAddress += SIZE_4KB;\r
385\r
386 //\r
387 // Fill in a Page Directory Pointer Entries\r
388 //\r
5997daf7 389 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
c56b6566
JY
390 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
391 PageDirectoryPointerEntry->Bits.Present = 1;\r
392\r
393 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
9189ec20
JW
394 if ((IsNullDetectionEnabled () && PageAddress == 0)\r
395 || (PcdGetBool (PcdSetNxForStack)\r
396 && (PageAddress < StackBase + StackSize)\r
397 && ((PageAddress + SIZE_2MB) > StackBase))) {\r
5630cdfe 398 //\r
9189ec20 399 // Need to split this 2M page that covers NULL or stack range.\r
5630cdfe
SZ
400 //\r
401 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
402 } else {\r
403 //\r
404 // Fill in the Page Directory entries\r
405 //\r
5997daf7 406 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
407 PageDirectoryEntry->Bits.ReadWrite = 1;\r
408 PageDirectoryEntry->Bits.Present = 1;\r
409 PageDirectoryEntry->Bits.MustBe1 = 1;\r
410 }\r
c56b6566
JY
411 }\r
412 }\r
f3b33289 413\r
c56b6566
JY
414 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
415 ZeroMem (\r
416 PageDirectoryPointerEntry,\r
417 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
418 );\r
f3b33289 419 }\r
420 }\r
421 }\r
422\r
423 //\r
424 // For the PML4 entries we are not using fill in a null entry.\r
f3b33289 425 //\r
426 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
c56b6566
JY
427 ZeroMem (\r
428 PageMapLevel4Entry,\r
429 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
430 );\r
f3b33289 431 }\r
432\r
5630cdfe
SZ
433 if (PcdGetBool (PcdSetNxForStack)) {\r
434 EnableExecuteDisableBit ();\r
435 }\r
436\r
f3b33289 437 return (UINTN)PageMap;\r
438}\r
439\r