]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/Ia32/DxeLoadFunc.c
MdeModulePkg: Change use of EFI_D_* to DEBUG_*
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / Ia32 / DxeLoadFunc.c
1 /** @file
2 Ia32-specific functionality for DxeLoad.
3
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "DxeIpl.h"
12 #include "VirtualMemory.h"
13
14 #define IDT_ENTRY_COUNT 32
15
16 typedef struct _X64_IDT_TABLE {
17 //
18 // Reserved 4 bytes preceding PeiService and IdtTable,
19 // since IDT base address should be 8-byte alignment.
20 //
21 UINT32 Reserved;
22 CONST EFI_PEI_SERVICES **PeiService;
23 X64_IDT_GATE_DESCRIPTOR IdtTable[IDT_ENTRY_COUNT];
24 } X64_IDT_TABLE;
25
26 //
27 // Global Descriptor Table (GDT)
28 //
29 GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries[] = {
30 /* selector { Global Segment Descriptor } */
31 /* 0x00 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //null descriptor
32 /* 0x08 */ {{0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear data segment descriptor
33 /* 0x10 */ {{0xffff, 0, 0, 0xf, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear code segment descriptor
34 /* 0x18 */ {{0xffff, 0, 0, 0x3, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system data segment descriptor
35 /* 0x20 */ {{0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system code segment descriptor
36 /* 0x28 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //spare segment descriptor
37 /* 0x30 */ {{0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system data segment descriptor
38 /* 0x38 */ {{0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 1, 0, 1, 0}}, //system code segment descriptor
39 /* 0x40 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //spare segment descriptor
40 };
41
42 //
43 // IA32 Gdt register
44 //
45 GLOBAL_REMOVE_IF_UNREFERENCED CONST IA32_DESCRIPTOR gGdt = {
46 sizeof (gGdtEntries) - 1,
47 (UINTN) gGdtEntries
48 };
49
50 GLOBAL_REMOVE_IF_UNREFERENCED IA32_DESCRIPTOR gLidtDescriptor = {
51 sizeof (X64_IDT_GATE_DESCRIPTOR) * IDT_ENTRY_COUNT - 1,
52 0
53 };
54
55 /**
56 Allocates and fills in the Page Directory and Page Table Entries to
57 establish a 4G page table.
58
59 @param[in] StackBase Stack base address.
60 @param[in] StackSize Stack size.
61
62 @return The address of page table.
63
64 **/
65 UINTN
66 Create4GPageTablesIa32Pae (
67 IN EFI_PHYSICAL_ADDRESS StackBase,
68 IN UINTN StackSize
69 )
70 {
71 UINT8 PhysicalAddressBits;
72 EFI_PHYSICAL_ADDRESS PhysicalAddress;
73 UINTN IndexOfPdpEntries;
74 UINTN IndexOfPageDirectoryEntries;
75 UINT32 NumberOfPdpEntriesNeeded;
76 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
77 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
78 PAGE_TABLE_ENTRY *PageDirectoryEntry;
79 UINTN TotalPagesNum;
80 UINTN PageAddress;
81 UINT64 AddressEncMask;
82
83 //
84 // Make sure AddressEncMask is contained to smallest supported address field
85 //
86 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
87
88 PhysicalAddressBits = 32;
89
90 //
91 // Calculate the table entries needed.
92 //
93 NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, (PhysicalAddressBits - 30));
94
95 TotalPagesNum = NumberOfPdpEntriesNeeded + 1;
96 PageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);
97 ASSERT (PageAddress != 0);
98
99 PageMap = (VOID *) PageAddress;
100 PageAddress += SIZE_4KB;
101
102 PageDirectoryPointerEntry = PageMap;
103 PhysicalAddress = 0;
104
105 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
106 //
107 // Each Directory Pointer entries points to a page of Page Directory entires.
108 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
109 //
110 PageDirectoryEntry = (VOID *) PageAddress;
111 PageAddress += SIZE_4KB;
112
113 //
114 // Fill in a Page Directory Pointer Entries
115 //
116 PageDirectoryPointerEntry->Uint64 = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask;
117 PageDirectoryPointerEntry->Bits.Present = 1;
118
119 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress += SIZE_2MB) {
120 if ((IsNullDetectionEnabled () && PhysicalAddress == 0)
121 || ((PhysicalAddress < StackBase + StackSize)
122 && ((PhysicalAddress + SIZE_2MB) > StackBase))) {
123 //
124 // Need to split this 2M page that covers stack range.
125 //
126 Split2MPageTo4K (PhysicalAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize, 0, 0);
127 } else {
128 //
129 // Fill in the Page Directory entries
130 //
131 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress | AddressEncMask;
132 PageDirectoryEntry->Bits.ReadWrite = 1;
133 PageDirectoryEntry->Bits.Present = 1;
134 PageDirectoryEntry->Bits.MustBe1 = 1;
135 }
136 }
137 }
138
139 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
140 ZeroMem (
141 PageDirectoryPointerEntry,
142 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
143 );
144 }
145
146 //
147 // Protect the page table by marking the memory used for page table to be
148 // read-only.
149 //
150 EnablePageTableProtection ((UINTN)PageMap, FALSE);
151
152 return (UINTN) PageMap;
153 }
154
155 /**
156 The function will check if IA32 PAE is supported.
157
158 @retval TRUE IA32 PAE is supported.
159 @retval FALSE IA32 PAE is not supported.
160
161 **/
162 BOOLEAN
163 IsIa32PaeSupport (
164 VOID
165 )
166 {
167 UINT32 RegEax;
168 UINT32 RegEdx;
169 BOOLEAN Ia32PaeSupport;
170
171 Ia32PaeSupport = FALSE;
172 AsmCpuid (0x0, &RegEax, NULL, NULL, NULL);
173 if (RegEax >= 0x1) {
174 AsmCpuid (0x1, NULL, NULL, NULL, &RegEdx);
175 if ((RegEdx & BIT6) != 0) {
176 Ia32PaeSupport = TRUE;
177 }
178 }
179
180 return Ia32PaeSupport;
181 }
182
183 /**
184 The function will check if page table should be setup or not.
185
186 @retval TRUE Page table should be created.
187 @retval FALSE Page table should not be created.
188
189 **/
190 BOOLEAN
191 ToBuildPageTable (
192 VOID
193 )
194 {
195 if (!IsIa32PaeSupport ()) {
196 return FALSE;
197 }
198
199 if (IsNullDetectionEnabled ()) {
200 return TRUE;
201 }
202
203 if (PcdGet8 (PcdHeapGuardPropertyMask) != 0) {
204 return TRUE;
205 }
206
207 if (PcdGetBool (PcdCpuStackGuard)) {
208 return TRUE;
209 }
210
211 if (IsEnableNonExecNeeded ()) {
212 return TRUE;
213 }
214
215 return FALSE;
216 }
217
218 /**
219 Transfers control to DxeCore.
220
221 This function performs a CPU architecture specific operations to execute
222 the entry point of DxeCore with the parameters of HobList.
223 It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase.
224
225 @param DxeCoreEntryPoint The entry point of DxeCore.
226 @param HobList The start of HobList passed to DxeCore.
227
228 **/
229 VOID
230 HandOffToDxeCore (
231 IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint,
232 IN EFI_PEI_HOB_POINTERS HobList
233 )
234 {
235 EFI_STATUS Status;
236 EFI_PHYSICAL_ADDRESS BaseOfStack;
237 EFI_PHYSICAL_ADDRESS TopOfStack;
238 UINTN PageTables;
239 X64_IDT_GATE_DESCRIPTOR *IdtTable;
240 UINTN SizeOfTemplate;
241 VOID *TemplateBase;
242 EFI_PHYSICAL_ADDRESS VectorAddress;
243 UINT32 Index;
244 X64_IDT_TABLE *IdtTableForX64;
245 EFI_VECTOR_HANDOFF_INFO *VectorInfo;
246 EFI_PEI_VECTOR_HANDOFF_INFO_PPI *VectorHandoffInfoPpi;
247 BOOLEAN BuildPageTablesIa32Pae;
248
249 //
250 // Clear page 0 and mark it as allocated if NULL pointer detection is enabled.
251 //
252 if (IsNullDetectionEnabled ()) {
253 ClearFirst4KPage (HobList.Raw);
254 BuildMemoryAllocationHob (0, EFI_PAGES_TO_SIZE (1), EfiBootServicesData);
255 }
256
257 Status = PeiServicesAllocatePages (EfiBootServicesData, EFI_SIZE_TO_PAGES (STACK_SIZE), &BaseOfStack);
258 ASSERT_EFI_ERROR (Status);
259
260 if (FeaturePcdGet(PcdDxeIplSwitchToLongMode)) {
261 //
262 // Compute the top of the stack we were allocated, which is used to load X64 dxe core.
263 // Pre-allocate a 32 bytes which confroms to x64 calling convention.
264 //
265 // The first four parameters to a function are passed in rcx, rdx, r8 and r9.
266 // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the
267 // register parameters is reserved on the stack, in case the called function
268 // wants to spill them; this is important if the function is variadic.
269 //
270 TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - 32;
271
272 //
273 // x64 Calling Conventions requires that the stack must be aligned to 16 bytes
274 //
275 TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, 16);
276
277 //
278 // Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA
279 // memory, it may be corrupted when copying FV to high-end memory
280 //
281 AsmWriteGdtr (&gGdt);
282 //
283 // Create page table and save PageMapLevel4 to CR3
284 //
285 PageTables = CreateIdentityMappingPageTables (BaseOfStack, STACK_SIZE, 0, 0);
286
287 //
288 // End of PEI phase signal
289 //
290 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);
291 Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);
292 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);
293 ASSERT_EFI_ERROR (Status);
294
295 //
296 // Paging might be already enabled. To avoid conflict configuration,
297 // disable paging first anyway.
298 //
299 AsmWriteCr0 (AsmReadCr0 () & (~BIT31));
300 AsmWriteCr3 (PageTables);
301
302 //
303 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
304 //
305 UpdateStackHob (BaseOfStack, STACK_SIZE);
306
307 SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase);
308
309 Status = PeiServicesAllocatePages (
310 EfiBootServicesData,
311 EFI_SIZE_TO_PAGES(sizeof (X64_IDT_TABLE) + SizeOfTemplate * IDT_ENTRY_COUNT),
312 &VectorAddress
313 );
314 ASSERT_EFI_ERROR (Status);
315
316 //
317 // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that
318 // it may not be gotten correctly after IDT register is re-written.
319 //
320 IdtTableForX64 = (X64_IDT_TABLE *) (UINTN) VectorAddress;
321 IdtTableForX64->PeiService = GetPeiServicesTablePointer ();
322
323 VectorAddress = (EFI_PHYSICAL_ADDRESS) (UINTN) (IdtTableForX64 + 1);
324 IdtTable = IdtTableForX64->IdtTable;
325 for (Index = 0; Index < IDT_ENTRY_COUNT; Index++) {
326 IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e;
327 IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0;
328 IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL;
329
330 IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16) VectorAddress;
331 IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16) (RShiftU64 (VectorAddress, 16));
332 IdtTable[Index].Offset32To63 = (UINT32) (RShiftU64 (VectorAddress, 32));
333 IdtTable[Index].Reserved = 0;
334
335 CopyMem ((VOID *) (UINTN) VectorAddress, TemplateBase, SizeOfTemplate);
336 AsmVectorFixup ((VOID *) (UINTN) VectorAddress, (UINT8) Index);
337
338 VectorAddress += SizeOfTemplate;
339 }
340
341 gLidtDescriptor.Base = (UINTN) IdtTable;
342
343 //
344 // Disable interrupt of Debug timer, since new IDT table cannot handle it.
345 //
346 SaveAndSetDebugTimerInterrupt (FALSE);
347
348 AsmWriteIdtr (&gLidtDescriptor);
349
350 DEBUG ((
351 DEBUG_INFO,
352 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",
353 __FUNCTION__,
354 BaseOfStack,
355 STACK_SIZE
356 ));
357
358 //
359 // Go to Long Mode and transfer control to DxeCore.
360 // Interrupts will not get turned on until the CPU AP is loaded.
361 // Call x64 drivers passing in single argument, a pointer to the HOBs.
362 //
363 AsmEnablePaging64 (
364 SYS_CODE64_SEL,
365 DxeCoreEntryPoint,
366 (EFI_PHYSICAL_ADDRESS)(UINTN)(HobList.Raw),
367 0,
368 TopOfStack
369 );
370 } else {
371 //
372 // Get Vector Hand-off Info PPI and build Guided HOB
373 //
374 Status = PeiServicesLocatePpi (
375 &gEfiVectorHandoffInfoPpiGuid,
376 0,
377 NULL,
378 (VOID **)&VectorHandoffInfoPpi
379 );
380 if (Status == EFI_SUCCESS) {
381 DEBUG ((DEBUG_INFO, "Vector Hand-off Info PPI is gotten, GUIDed HOB is created!\n"));
382 VectorInfo = VectorHandoffInfoPpi->Info;
383 Index = 1;
384 while (VectorInfo->Attribute != EFI_VECTOR_HANDOFF_LAST_ENTRY) {
385 VectorInfo ++;
386 Index ++;
387 }
388 BuildGuidDataHob (
389 &gEfiVectorHandoffInfoPpiGuid,
390 VectorHandoffInfoPpi->Info,
391 sizeof (EFI_VECTOR_HANDOFF_INFO) * Index
392 );
393 }
394
395 //
396 // Compute the top of the stack we were allocated. Pre-allocate a UINTN
397 // for safety.
398 //
399 TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT;
400 TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT);
401
402 PageTables = 0;
403 BuildPageTablesIa32Pae = ToBuildPageTable ();
404 if (BuildPageTablesIa32Pae) {
405 PageTables = Create4GPageTablesIa32Pae (BaseOfStack, STACK_SIZE);
406 if (IsEnableNonExecNeeded ()) {
407 EnableExecuteDisableBit();
408 }
409 }
410
411 //
412 // End of PEI phase signal
413 //
414 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);
415 Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);
416 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);
417 ASSERT_EFI_ERROR (Status);
418
419 if (BuildPageTablesIa32Pae) {
420 //
421 // Paging might be already enabled. To avoid conflict configuration,
422 // disable paging first anyway.
423 //
424 AsmWriteCr0 (AsmReadCr0 () & (~BIT31));
425 AsmWriteCr3 (PageTables);
426 //
427 // Set Physical Address Extension (bit 5 of CR4).
428 //
429 AsmWriteCr4 (AsmReadCr4 () | BIT5);
430 }
431
432 //
433 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
434 //
435 UpdateStackHob (BaseOfStack, STACK_SIZE);
436
437 DEBUG ((
438 DEBUG_INFO,
439 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",
440 __FUNCTION__,
441 BaseOfStack,
442 STACK_SIZE
443 ));
444
445 //
446 // Transfer the control to the entry point of DxeCore.
447 //
448 if (BuildPageTablesIa32Pae) {
449 AsmEnablePaging32 (
450 (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,
451 HobList.Raw,
452 NULL,
453 (VOID *) (UINTN) TopOfStack
454 );
455 } else {
456 SwitchStack (
457 (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,
458 HobList.Raw,
459 NULL,
460 (VOID *) (UINTN) TopOfStack
461 );
462 }
463 }
464 }