]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - MdeModulePkg/Core/DxeIplPeim/Ia32/DxeLoadFunc.c
MdeModulePkg/DxeIplPeim: Support GHCB pages when creating page tables
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / Ia32 / DxeLoadFunc.c
... / ...
CommitLineData
1/** @file\r
2 Ia32-specific functionality for DxeLoad.\r
3\r
4Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7SPDX-License-Identifier: BSD-2-Clause-Patent\r
8\r
9**/\r
10\r
11#include "DxeIpl.h"\r
12#include "VirtualMemory.h"\r
13\r
14#define IDT_ENTRY_COUNT 32\r
15\r
16typedef struct _X64_IDT_TABLE {\r
17 //\r
18 // Reserved 4 bytes preceding PeiService and IdtTable,\r
19 // since IDT base address should be 8-byte alignment.\r
20 //\r
21 UINT32 Reserved;\r
22 CONST EFI_PEI_SERVICES **PeiService;\r
23 X64_IDT_GATE_DESCRIPTOR IdtTable[IDT_ENTRY_COUNT];\r
24} X64_IDT_TABLE;\r
25\r
26//\r
27// Global Descriptor Table (GDT)\r
28//\r
29GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries[] = {\r
30/* selector { Global Segment Descriptor } */\r
31/* 0x00 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //null descriptor\r
32/* 0x08 */ {{0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear data segment descriptor\r
33/* 0x10 */ {{0xffff, 0, 0, 0xf, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear code segment descriptor\r
34/* 0x18 */ {{0xffff, 0, 0, 0x3, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system data segment descriptor\r
35/* 0x20 */ {{0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system code segment descriptor\r
36/* 0x28 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //spare segment descriptor\r
37/* 0x30 */ {{0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system data segment descriptor\r
38/* 0x38 */ {{0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 1, 0, 1, 0}}, //system code segment descriptor\r
39/* 0x40 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //spare segment descriptor\r
40};\r
41\r
42//\r
43// IA32 Gdt register\r
44//\r
45GLOBAL_REMOVE_IF_UNREFERENCED CONST IA32_DESCRIPTOR gGdt = {\r
46 sizeof (gGdtEntries) - 1,\r
47 (UINTN) gGdtEntries\r
48 };\r
49\r
50GLOBAL_REMOVE_IF_UNREFERENCED IA32_DESCRIPTOR gLidtDescriptor = {\r
51 sizeof (X64_IDT_GATE_DESCRIPTOR) * IDT_ENTRY_COUNT - 1,\r
52 0\r
53};\r
54\r
55/**\r
56 Allocates and fills in the Page Directory and Page Table Entries to\r
57 establish a 4G page table.\r
58\r
59 @param[in] StackBase Stack base address.\r
60 @param[in] StackSize Stack size.\r
61\r
62 @return The address of page table.\r
63\r
64**/\r
65UINTN\r
66Create4GPageTablesIa32Pae (\r
67 IN EFI_PHYSICAL_ADDRESS StackBase,\r
68 IN UINTN StackSize\r
69 )\r
70{\r
71 UINT8 PhysicalAddressBits;\r
72 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
73 UINTN IndexOfPdpEntries;\r
74 UINTN IndexOfPageDirectoryEntries;\r
75 UINT32 NumberOfPdpEntriesNeeded;\r
76 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
77 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
78 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
79 UINTN TotalPagesNum;\r
80 UINTN PageAddress;\r
81 UINT64 AddressEncMask;\r
82\r
83 //\r
84 // Make sure AddressEncMask is contained to smallest supported address field\r
85 //\r
86 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
87\r
88 PhysicalAddressBits = 32;\r
89\r
90 //\r
91 // Calculate the table entries needed.\r
92 //\r
93 NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, (PhysicalAddressBits - 30));\r
94\r
95 TotalPagesNum = NumberOfPdpEntriesNeeded + 1;\r
96 PageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);\r
97 ASSERT (PageAddress != 0);\r
98\r
99 PageMap = (VOID *) PageAddress;\r
100 PageAddress += SIZE_4KB;\r
101\r
102 PageDirectoryPointerEntry = PageMap;\r
103 PhysicalAddress = 0;\r
104\r
105 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
106 //\r
107 // Each Directory Pointer entries points to a page of Page Directory entires.\r
108 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
109 //\r
110 PageDirectoryEntry = (VOID *) PageAddress;\r
111 PageAddress += SIZE_4KB;\r
112\r
113 //\r
114 // Fill in a Page Directory Pointer Entries\r
115 //\r
116 PageDirectoryPointerEntry->Uint64 = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask;\r
117 PageDirectoryPointerEntry->Bits.Present = 1;\r
118\r
119 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress += SIZE_2MB) {\r
120 if ((IsNullDetectionEnabled () && PhysicalAddress == 0)\r
121 || ((PhysicalAddress < StackBase + StackSize)\r
122 && ((PhysicalAddress + SIZE_2MB) > StackBase))) {\r
123 //\r
124 // Need to split this 2M page that covers stack range.\r
125 //\r
126 Split2MPageTo4K (PhysicalAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize, 0, 0);\r
127 } else {\r
128 //\r
129 // Fill in the Page Directory entries\r
130 //\r
131 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress | AddressEncMask;\r
132 PageDirectoryEntry->Bits.ReadWrite = 1;\r
133 PageDirectoryEntry->Bits.Present = 1;\r
134 PageDirectoryEntry->Bits.MustBe1 = 1;\r
135 }\r
136 }\r
137 }\r
138\r
139 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
140 ZeroMem (\r
141 PageDirectoryPointerEntry,\r
142 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
143 );\r
144 }\r
145\r
146 //\r
147 // Protect the page table by marking the memory used for page table to be\r
148 // read-only.\r
149 //\r
150 EnablePageTableProtection ((UINTN)PageMap, FALSE);\r
151\r
152 return (UINTN) PageMap;\r
153}\r
154\r
155/**\r
156 The function will check if IA32 PAE is supported.\r
157\r
158 @retval TRUE IA32 PAE is supported.\r
159 @retval FALSE IA32 PAE is not supported.\r
160\r
161**/\r
162BOOLEAN\r
163IsIa32PaeSupport (\r
164 VOID\r
165 )\r
166{\r
167 UINT32 RegEax;\r
168 UINT32 RegEdx;\r
169 BOOLEAN Ia32PaeSupport;\r
170\r
171 Ia32PaeSupport = FALSE;\r
172 AsmCpuid (0x0, &RegEax, NULL, NULL, NULL);\r
173 if (RegEax >= 0x1) {\r
174 AsmCpuid (0x1, NULL, NULL, NULL, &RegEdx);\r
175 if ((RegEdx & BIT6) != 0) {\r
176 Ia32PaeSupport = TRUE;\r
177 }\r
178 }\r
179\r
180 return Ia32PaeSupport;\r
181}\r
182\r
183/**\r
184 The function will check if page table should be setup or not.\r
185\r
186 @retval TRUE Page table should be created.\r
187 @retval FALSE Page table should not be created.\r
188\r
189**/\r
190BOOLEAN\r
191ToBuildPageTable (\r
192 VOID\r
193 )\r
194{\r
195 if (!IsIa32PaeSupport ()) {\r
196 return FALSE;\r
197 }\r
198\r
199 if (IsNullDetectionEnabled ()) {\r
200 return TRUE;\r
201 }\r
202\r
203 if (PcdGet8 (PcdHeapGuardPropertyMask) != 0) {\r
204 return TRUE;\r
205 }\r
206\r
207 if (PcdGetBool (PcdCpuStackGuard)) {\r
208 return TRUE;\r
209 }\r
210\r
211 if (IsEnableNonExecNeeded ()) {\r
212 return TRUE;\r
213 }\r
214\r
215 return FALSE;\r
216}\r
217\r
218/**\r
219 Transfers control to DxeCore.\r
220\r
221 This function performs a CPU architecture specific operations to execute\r
222 the entry point of DxeCore with the parameters of HobList.\r
223 It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase.\r
224\r
225 @param DxeCoreEntryPoint The entry point of DxeCore.\r
226 @param HobList The start of HobList passed to DxeCore.\r
227\r
228**/\r
229VOID\r
230HandOffToDxeCore (\r
231 IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint,\r
232 IN EFI_PEI_HOB_POINTERS HobList\r
233 )\r
234{\r
235 EFI_STATUS Status;\r
236 EFI_PHYSICAL_ADDRESS BaseOfStack;\r
237 EFI_PHYSICAL_ADDRESS TopOfStack;\r
238 UINTN PageTables;\r
239 X64_IDT_GATE_DESCRIPTOR *IdtTable;\r
240 UINTN SizeOfTemplate;\r
241 VOID *TemplateBase;\r
242 EFI_PHYSICAL_ADDRESS VectorAddress;\r
243 UINT32 Index;\r
244 X64_IDT_TABLE *IdtTableForX64;\r
245 EFI_VECTOR_HANDOFF_INFO *VectorInfo;\r
246 EFI_PEI_VECTOR_HANDOFF_INFO_PPI *VectorHandoffInfoPpi;\r
247 BOOLEAN BuildPageTablesIa32Pae;\r
248\r
249 //\r
250 // Clear page 0 and mark it as allocated if NULL pointer detection is enabled.\r
251 //\r
252 if (IsNullDetectionEnabled ()) {\r
253 ClearFirst4KPage (HobList.Raw);\r
254 BuildMemoryAllocationHob (0, EFI_PAGES_TO_SIZE (1), EfiBootServicesData);\r
255 }\r
256\r
257 Status = PeiServicesAllocatePages (EfiBootServicesData, EFI_SIZE_TO_PAGES (STACK_SIZE), &BaseOfStack);\r
258 ASSERT_EFI_ERROR (Status);\r
259\r
260 if (FeaturePcdGet(PcdDxeIplSwitchToLongMode)) {\r
261 //\r
262 // Compute the top of the stack we were allocated, which is used to load X64 dxe core.\r
263 // Pre-allocate a 32 bytes which confroms to x64 calling convention.\r
264 //\r
265 // The first four parameters to a function are passed in rcx, rdx, r8 and r9.\r
266 // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the\r
267 // register parameters is reserved on the stack, in case the called function\r
268 // wants to spill them; this is important if the function is variadic.\r
269 //\r
270 TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - 32;\r
271\r
272 //\r
273 // x64 Calling Conventions requires that the stack must be aligned to 16 bytes\r
274 //\r
275 TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, 16);\r
276\r
277 //\r
278 // Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA\r
279 // memory, it may be corrupted when copying FV to high-end memory\r
280 //\r
281 AsmWriteGdtr (&gGdt);\r
282 //\r
283 // Create page table and save PageMapLevel4 to CR3\r
284 //\r
285 PageTables = CreateIdentityMappingPageTables (BaseOfStack, STACK_SIZE, 0, 0);\r
286\r
287 //\r
288 // End of PEI phase signal\r
289 //\r
290 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);\r
291 Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);\r
292 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);\r
293 ASSERT_EFI_ERROR (Status);\r
294\r
295 //\r
296 // Paging might be already enabled. To avoid conflict configuration,\r
297 // disable paging first anyway.\r
298 //\r
299 AsmWriteCr0 (AsmReadCr0 () & (~BIT31));\r
300 AsmWriteCr3 (PageTables);\r
301\r
302 //\r
303 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.\r
304 //\r
305 UpdateStackHob (BaseOfStack, STACK_SIZE);\r
306\r
307 SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase);\r
308\r
309 Status = PeiServicesAllocatePages (\r
310 EfiBootServicesData,\r
311 EFI_SIZE_TO_PAGES(sizeof (X64_IDT_TABLE) + SizeOfTemplate * IDT_ENTRY_COUNT),\r
312 &VectorAddress\r
313 );\r
314 ASSERT_EFI_ERROR (Status);\r
315\r
316 //\r
317 // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that\r
318 // it may not be gotten correctly after IDT register is re-written.\r
319 //\r
320 IdtTableForX64 = (X64_IDT_TABLE *) (UINTN) VectorAddress;\r
321 IdtTableForX64->PeiService = GetPeiServicesTablePointer ();\r
322\r
323 VectorAddress = (EFI_PHYSICAL_ADDRESS) (UINTN) (IdtTableForX64 + 1);\r
324 IdtTable = IdtTableForX64->IdtTable;\r
325 for (Index = 0; Index < IDT_ENTRY_COUNT; Index++) {\r
326 IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e;\r
327 IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0;\r
328 IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL;\r
329\r
330 IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16) VectorAddress;\r
331 IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16) (RShiftU64 (VectorAddress, 16));\r
332 IdtTable[Index].Offset32To63 = (UINT32) (RShiftU64 (VectorAddress, 32));\r
333 IdtTable[Index].Reserved = 0;\r
334\r
335 CopyMem ((VOID *) (UINTN) VectorAddress, TemplateBase, SizeOfTemplate);\r
336 AsmVectorFixup ((VOID *) (UINTN) VectorAddress, (UINT8) Index);\r
337\r
338 VectorAddress += SizeOfTemplate;\r
339 }\r
340\r
341 gLidtDescriptor.Base = (UINTN) IdtTable;\r
342\r
343 //\r
344 // Disable interrupt of Debug timer, since new IDT table cannot handle it.\r
345 //\r
346 SaveAndSetDebugTimerInterrupt (FALSE);\r
347\r
348 AsmWriteIdtr (&gLidtDescriptor);\r
349\r
350 DEBUG ((\r
351 DEBUG_INFO,\r
352 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",\r
353 __FUNCTION__,\r
354 BaseOfStack,\r
355 STACK_SIZE\r
356 ));\r
357\r
358 //\r
359 // Go to Long Mode and transfer control to DxeCore.\r
360 // Interrupts will not get turned on until the CPU AP is loaded.\r
361 // Call x64 drivers passing in single argument, a pointer to the HOBs.\r
362 //\r
363 AsmEnablePaging64 (\r
364 SYS_CODE64_SEL,\r
365 DxeCoreEntryPoint,\r
366 (EFI_PHYSICAL_ADDRESS)(UINTN)(HobList.Raw),\r
367 0,\r
368 TopOfStack\r
369 );\r
370 } else {\r
371 //\r
372 // Get Vector Hand-off Info PPI and build Guided HOB\r
373 //\r
374 Status = PeiServicesLocatePpi (\r
375 &gEfiVectorHandoffInfoPpiGuid,\r
376 0,\r
377 NULL,\r
378 (VOID **)&VectorHandoffInfoPpi\r
379 );\r
380 if (Status == EFI_SUCCESS) {\r
381 DEBUG ((EFI_D_INFO, "Vector Hand-off Info PPI is gotten, GUIDed HOB is created!\n"));\r
382 VectorInfo = VectorHandoffInfoPpi->Info;\r
383 Index = 1;\r
384 while (VectorInfo->Attribute != EFI_VECTOR_HANDOFF_LAST_ENTRY) {\r
385 VectorInfo ++;\r
386 Index ++;\r
387 }\r
388 BuildGuidDataHob (\r
389 &gEfiVectorHandoffInfoPpiGuid,\r
390 VectorHandoffInfoPpi->Info,\r
391 sizeof (EFI_VECTOR_HANDOFF_INFO) * Index\r
392 );\r
393 }\r
394\r
395 //\r
396 // Compute the top of the stack we were allocated. Pre-allocate a UINTN\r
397 // for safety.\r
398 //\r
399 TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT;\r
400 TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT);\r
401\r
402 PageTables = 0;\r
403 BuildPageTablesIa32Pae = ToBuildPageTable ();\r
404 if (BuildPageTablesIa32Pae) {\r
405 PageTables = Create4GPageTablesIa32Pae (BaseOfStack, STACK_SIZE);\r
406 if (IsEnableNonExecNeeded ()) {\r
407 EnableExecuteDisableBit();\r
408 }\r
409 }\r
410\r
411 //\r
412 // End of PEI phase signal\r
413 //\r
414 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);\r
415 Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);\r
416 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);\r
417 ASSERT_EFI_ERROR (Status);\r
418\r
419 if (BuildPageTablesIa32Pae) {\r
420 //\r
421 // Paging might be already enabled. To avoid conflict configuration,\r
422 // disable paging first anyway.\r
423 //\r
424 AsmWriteCr0 (AsmReadCr0 () & (~BIT31));\r
425 AsmWriteCr3 (PageTables);\r
426 //\r
427 // Set Physical Address Extension (bit 5 of CR4).\r
428 //\r
429 AsmWriteCr4 (AsmReadCr4 () | BIT5);\r
430 }\r
431\r
432 //\r
433 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.\r
434 //\r
435 UpdateStackHob (BaseOfStack, STACK_SIZE);\r
436\r
437 DEBUG ((\r
438 DEBUG_INFO,\r
439 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",\r
440 __FUNCTION__,\r
441 BaseOfStack,\r
442 STACK_SIZE\r
443 ));\r
444\r
445 //\r
446 // Transfer the control to the entry point of DxeCore.\r
447 //\r
448 if (BuildPageTablesIa32Pae) {\r
449 AsmEnablePaging32 (\r
450 (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r
451 HobList.Raw,\r
452 NULL,\r
453 (VOID *) (UINTN) TopOfStack\r
454 );\r
455 } else {\r
456 SwitchStack (\r
457 (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r
458 HobList.Raw,\r
459 NULL,\r
460 (VOID *) (UINTN) TopOfStack\r
461 );\r
462 }\r
463 }\r
464}\r
465\r