]>
Commit | Line | Data |
---|---|---|
1 | /** @file\r | |
2 | Ia32-specific functionality for DxeLoad.\r | |
3 | \r | |
4 | Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r | |
5 | Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r | |
6 | \r | |
7 | SPDX-License-Identifier: BSD-2-Clause-Patent\r | |
8 | \r | |
9 | **/\r | |
10 | \r | |
11 | #include "DxeIpl.h"\r | |
12 | #include "VirtualMemory.h"\r | |
13 | \r | |
14 | #define IDT_ENTRY_COUNT 32\r | |
15 | \r | |
16 | typedef struct _X64_IDT_TABLE {\r | |
17 | //\r | |
18 | // Reserved 4 bytes preceding PeiService and IdtTable,\r | |
19 | // since IDT base address should be 8-byte alignment.\r | |
20 | //\r | |
21 | UINT32 Reserved;\r | |
22 | CONST EFI_PEI_SERVICES **PeiService;\r | |
23 | X64_IDT_GATE_DESCRIPTOR IdtTable[IDT_ENTRY_COUNT];\r | |
24 | } X64_IDT_TABLE;\r | |
25 | \r | |
26 | //\r | |
27 | // Global Descriptor Table (GDT)\r | |
28 | //\r | |
29 | GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries[] = {\r | |
30 | /* selector { Global Segment Descriptor } */\r | |
31 | /* 0x00 */ {\r | |
32 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }\r | |
33 | }, // null descriptor\r | |
34 | /* 0x08 */ {\r | |
35 | { 0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }\r | |
36 | }, // linear data segment descriptor\r | |
37 | /* 0x10 */ {\r | |
38 | { 0xffff, 0, 0, 0xf, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }\r | |
39 | }, // linear code segment descriptor\r | |
40 | /* 0x18 */ {\r | |
41 | { 0xffff, 0, 0, 0x3, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }\r | |
42 | }, // system data segment descriptor\r | |
43 | /* 0x20 */ {\r | |
44 | { 0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }\r | |
45 | }, // system code segment descriptor\r | |
46 | /* 0x28 */ {\r | |
47 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }\r | |
48 | }, // spare segment descriptor\r | |
49 | /* 0x30 */ {\r | |
50 | { 0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }\r | |
51 | }, // system data segment descriptor\r | |
52 | /* 0x38 */ {\r | |
53 | { 0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 1, 0, 1, 0 }\r | |
54 | }, // system code segment descriptor\r | |
55 | /* 0x40 */ {\r | |
56 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }\r | |
57 | }, // spare segment descriptor\r | |
58 | };\r | |
59 | \r | |
60 | //\r | |
61 | // IA32 Gdt register\r | |
62 | //\r | |
63 | GLOBAL_REMOVE_IF_UNREFERENCED CONST IA32_DESCRIPTOR gGdt = {\r | |
64 | sizeof (gGdtEntries) - 1,\r | |
65 | (UINTN)gGdtEntries\r | |
66 | };\r | |
67 | \r | |
68 | GLOBAL_REMOVE_IF_UNREFERENCED IA32_DESCRIPTOR gLidtDescriptor = {\r | |
69 | sizeof (X64_IDT_GATE_DESCRIPTOR) * IDT_ENTRY_COUNT - 1,\r | |
70 | 0\r | |
71 | };\r | |
72 | \r | |
73 | /**\r | |
74 | Allocates and fills in the Page Directory and Page Table Entries to\r | |
75 | establish a 4G page table.\r | |
76 | \r | |
77 | @param[in] StackBase Stack base address.\r | |
78 | @param[in] StackSize Stack size.\r | |
79 | \r | |
80 | @return The address of page table.\r | |
81 | \r | |
82 | **/\r | |
83 | UINTN\r | |
84 | Create4GPageTablesIa32Pae (\r | |
85 | IN EFI_PHYSICAL_ADDRESS StackBase,\r | |
86 | IN UINTN StackSize\r | |
87 | )\r | |
88 | {\r | |
89 | UINT8 PhysicalAddressBits;\r | |
90 | EFI_PHYSICAL_ADDRESS PhysicalAddress;\r | |
91 | UINTN IndexOfPdpEntries;\r | |
92 | UINTN IndexOfPageDirectoryEntries;\r | |
93 | UINT32 NumberOfPdpEntriesNeeded;\r | |
94 | PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r | |
95 | PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r | |
96 | PAGE_TABLE_ENTRY *PageDirectoryEntry;\r | |
97 | UINTN TotalPagesNum;\r | |
98 | UINTN PageAddress;\r | |
99 | UINT64 AddressEncMask;\r | |
100 | \r | |
101 | //\r | |
102 | // Make sure AddressEncMask is contained to smallest supported address field\r | |
103 | //\r | |
104 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r | |
105 | \r | |
106 | PhysicalAddressBits = 32;\r | |
107 | \r | |
108 | //\r | |
109 | // Calculate the table entries needed.\r | |
110 | //\r | |
111 | NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r | |
112 | \r | |
113 | TotalPagesNum = NumberOfPdpEntriesNeeded + 1;\r | |
114 | PageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);\r | |
115 | ASSERT (PageAddress != 0);\r | |
116 | \r | |
117 | PageMap = (VOID *)PageAddress;\r | |
118 | PageAddress += SIZE_4KB;\r | |
119 | \r | |
120 | PageDirectoryPointerEntry = PageMap;\r | |
121 | PhysicalAddress = 0;\r | |
122 | \r | |
123 | for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r | |
124 | //\r | |
125 | // Each Directory Pointer entries points to a page of Page Directory entires.\r | |
126 | // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r | |
127 | //\r | |
128 | PageDirectoryEntry = (VOID *)PageAddress;\r | |
129 | PageAddress += SIZE_4KB;\r | |
130 | \r | |
131 | //\r | |
132 | // Fill in a Page Directory Pointer Entries\r | |
133 | //\r | |
134 | PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r | |
135 | PageDirectoryPointerEntry->Bits.Present = 1;\r | |
136 | \r | |
137 | for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress += SIZE_2MB) {\r | |
138 | if ( (IsNullDetectionEnabled () && (PhysicalAddress == 0))\r | |
139 | || ( (PhysicalAddress < StackBase + StackSize)\r | |
140 | && ((PhysicalAddress + SIZE_2MB) > StackBase)))\r | |
141 | {\r | |
142 | //\r | |
143 | // Need to split this 2M page that covers stack range.\r | |
144 | //\r | |
145 | Split2MPageTo4K (PhysicalAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, 0, 0);\r | |
146 | } else {\r | |
147 | //\r | |
148 | // Fill in the Page Directory entries\r | |
149 | //\r | |
150 | PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress | AddressEncMask;\r | |
151 | PageDirectoryEntry->Bits.ReadWrite = 1;\r | |
152 | PageDirectoryEntry->Bits.Present = 1;\r | |
153 | PageDirectoryEntry->Bits.MustBe1 = 1;\r | |
154 | }\r | |
155 | }\r | |
156 | }\r | |
157 | \r | |
158 | for ( ; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r | |
159 | ZeroMem (\r | |
160 | PageDirectoryPointerEntry,\r | |
161 | sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r | |
162 | );\r | |
163 | }\r | |
164 | \r | |
165 | //\r | |
166 | // Protect the page table by marking the memory used for page table to be\r | |
167 | // read-only.\r | |
168 | //\r | |
169 | EnablePageTableProtection ((UINTN)PageMap, FALSE);\r | |
170 | \r | |
171 | return (UINTN)PageMap;\r | |
172 | }\r | |
173 | \r | |
174 | /**\r | |
175 | The function will check if IA32 PAE is supported.\r | |
176 | \r | |
177 | @retval TRUE IA32 PAE is supported.\r | |
178 | @retval FALSE IA32 PAE is not supported.\r | |
179 | \r | |
180 | **/\r | |
181 | BOOLEAN\r | |
182 | IsIa32PaeSupport (\r | |
183 | VOID\r | |
184 | )\r | |
185 | {\r | |
186 | UINT32 RegEax;\r | |
187 | UINT32 RegEdx;\r | |
188 | BOOLEAN Ia32PaeSupport;\r | |
189 | \r | |
190 | Ia32PaeSupport = FALSE;\r | |
191 | AsmCpuid (0x0, &RegEax, NULL, NULL, NULL);\r | |
192 | if (RegEax >= 0x1) {\r | |
193 | AsmCpuid (0x1, NULL, NULL, NULL, &RegEdx);\r | |
194 | if ((RegEdx & BIT6) != 0) {\r | |
195 | Ia32PaeSupport = TRUE;\r | |
196 | }\r | |
197 | }\r | |
198 | \r | |
199 | return Ia32PaeSupport;\r | |
200 | }\r | |
201 | \r | |
202 | /**\r | |
203 | The function will check if page table should be setup or not.\r | |
204 | \r | |
205 | @retval TRUE Page table should be created.\r | |
206 | @retval FALSE Page table should not be created.\r | |
207 | \r | |
208 | **/\r | |
209 | BOOLEAN\r | |
210 | ToBuildPageTable (\r | |
211 | VOID\r | |
212 | )\r | |
213 | {\r | |
214 | if (!IsIa32PaeSupport ()) {\r | |
215 | return FALSE;\r | |
216 | }\r | |
217 | \r | |
218 | if (IsNullDetectionEnabled ()) {\r | |
219 | return TRUE;\r | |
220 | }\r | |
221 | \r | |
222 | if (PcdGet8 (PcdHeapGuardPropertyMask) != 0) {\r | |
223 | return TRUE;\r | |
224 | }\r | |
225 | \r | |
226 | if (PcdGetBool (PcdCpuStackGuard)) {\r | |
227 | return TRUE;\r | |
228 | }\r | |
229 | \r | |
230 | if (IsEnableNonExecNeeded ()) {\r | |
231 | return TRUE;\r | |
232 | }\r | |
233 | \r | |
234 | return FALSE;\r | |
235 | }\r | |
236 | \r | |
237 | /**\r | |
238 | Transfers control to DxeCore.\r | |
239 | \r | |
240 | This function performs a CPU architecture specific operations to execute\r | |
241 | the entry point of DxeCore with the parameters of HobList.\r | |
242 | It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase.\r | |
243 | \r | |
244 | @param DxeCoreEntryPoint The entry point of DxeCore.\r | |
245 | @param HobList The start of HobList passed to DxeCore.\r | |
246 | \r | |
247 | **/\r | |
248 | VOID\r | |
249 | HandOffToDxeCore (\r | |
250 | IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint,\r | |
251 | IN EFI_PEI_HOB_POINTERS HobList\r | |
252 | )\r | |
253 | {\r | |
254 | EFI_STATUS Status;\r | |
255 | EFI_PHYSICAL_ADDRESS BaseOfStack;\r | |
256 | EFI_PHYSICAL_ADDRESS TopOfStack;\r | |
257 | UINTN PageTables;\r | |
258 | X64_IDT_GATE_DESCRIPTOR *IdtTable;\r | |
259 | UINTN SizeOfTemplate;\r | |
260 | VOID *TemplateBase;\r | |
261 | EFI_PHYSICAL_ADDRESS VectorAddress;\r | |
262 | UINT32 Index;\r | |
263 | X64_IDT_TABLE *IdtTableForX64;\r | |
264 | EFI_VECTOR_HANDOFF_INFO *VectorInfo;\r | |
265 | EFI_PEI_VECTOR_HANDOFF_INFO_PPI *VectorHandoffInfoPpi;\r | |
266 | BOOLEAN BuildPageTablesIa32Pae;\r | |
267 | \r | |
268 | //\r | |
269 | // Clear page 0 and mark it as allocated if NULL pointer detection is enabled.\r | |
270 | //\r | |
271 | if (IsNullDetectionEnabled ()) {\r | |
272 | ClearFirst4KPage (HobList.Raw);\r | |
273 | BuildMemoryAllocationHob (0, EFI_PAGES_TO_SIZE (1), EfiBootServicesData);\r | |
274 | }\r | |
275 | \r | |
276 | Status = PeiServicesAllocatePages (EfiBootServicesData, EFI_SIZE_TO_PAGES (STACK_SIZE), &BaseOfStack);\r | |
277 | ASSERT_EFI_ERROR (Status);\r | |
278 | \r | |
279 | if (FeaturePcdGet (PcdDxeIplSwitchToLongMode)) {\r | |
280 | //\r | |
281 | // Compute the top of the stack we were allocated, which is used to load X64 dxe core.\r | |
282 | // Pre-allocate a 32 bytes which confroms to x64 calling convention.\r | |
283 | //\r | |
284 | // The first four parameters to a function are passed in rcx, rdx, r8 and r9.\r | |
285 | // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the\r | |
286 | // register parameters is reserved on the stack, in case the called function\r | |
287 | // wants to spill them; this is important if the function is variadic.\r | |
288 | //\r | |
289 | TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - 32;\r | |
290 | \r | |
291 | //\r | |
292 | // x64 Calling Conventions requires that the stack must be aligned to 16 bytes\r | |
293 | //\r | |
294 | TopOfStack = (EFI_PHYSICAL_ADDRESS)(UINTN)ALIGN_POINTER (TopOfStack, 16);\r | |
295 | \r | |
296 | //\r | |
297 | // Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA\r | |
298 | // memory, it may be corrupted when copying FV to high-end memory\r | |
299 | //\r | |
300 | AsmWriteGdtr (&gGdt);\r | |
301 | //\r | |
302 | // Create page table and save PageMapLevel4 to CR3\r | |
303 | //\r | |
304 | PageTables = CreateIdentityMappingPageTables (BaseOfStack, STACK_SIZE, 0, 0);\r | |
305 | \r | |
306 | //\r | |
307 | // End of PEI phase signal\r | |
308 | //\r | |
309 | PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);\r | |
310 | Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);\r | |
311 | PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);\r | |
312 | ASSERT_EFI_ERROR (Status);\r | |
313 | \r | |
314 | //\r | |
315 | // Paging might be already enabled. To avoid conflict configuration,\r | |
316 | // disable paging first anyway.\r | |
317 | //\r | |
318 | AsmWriteCr0 (AsmReadCr0 () & (~BIT31));\r | |
319 | AsmWriteCr3 (PageTables);\r | |
320 | \r | |
321 | //\r | |
322 | // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.\r | |
323 | //\r | |
324 | UpdateStackHob (BaseOfStack, STACK_SIZE);\r | |
325 | \r | |
326 | SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase);\r | |
327 | \r | |
328 | Status = PeiServicesAllocatePages (\r | |
329 | EfiBootServicesData,\r | |
330 | EFI_SIZE_TO_PAGES (sizeof (X64_IDT_TABLE) + SizeOfTemplate * IDT_ENTRY_COUNT),\r | |
331 | &VectorAddress\r | |
332 | );\r | |
333 | ASSERT_EFI_ERROR (Status);\r | |
334 | \r | |
335 | //\r | |
336 | // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that\r | |
337 | // it may not be gotten correctly after IDT register is re-written.\r | |
338 | //\r | |
339 | IdtTableForX64 = (X64_IDT_TABLE *)(UINTN)VectorAddress;\r | |
340 | IdtTableForX64->PeiService = GetPeiServicesTablePointer ();\r | |
341 | \r | |
342 | VectorAddress = (EFI_PHYSICAL_ADDRESS)(UINTN)(IdtTableForX64 + 1);\r | |
343 | IdtTable = IdtTableForX64->IdtTable;\r | |
344 | for (Index = 0; Index < IDT_ENTRY_COUNT; Index++) {\r | |
345 | IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e;\r | |
346 | IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0;\r | |
347 | IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL;\r | |
348 | \r | |
349 | IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16)VectorAddress;\r | |
350 | IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16)(RShiftU64 (VectorAddress, 16));\r | |
351 | IdtTable[Index].Offset32To63 = (UINT32)(RShiftU64 (VectorAddress, 32));\r | |
352 | IdtTable[Index].Reserved = 0;\r | |
353 | \r | |
354 | CopyMem ((VOID *)(UINTN)VectorAddress, TemplateBase, SizeOfTemplate);\r | |
355 | AsmVectorFixup ((VOID *)(UINTN)VectorAddress, (UINT8)Index);\r | |
356 | \r | |
357 | VectorAddress += SizeOfTemplate;\r | |
358 | }\r | |
359 | \r | |
360 | gLidtDescriptor.Base = (UINTN)IdtTable;\r | |
361 | \r | |
362 | //\r | |
363 | // Disable interrupt of Debug timer, since new IDT table cannot handle it.\r | |
364 | //\r | |
365 | SaveAndSetDebugTimerInterrupt (FALSE);\r | |
366 | \r | |
367 | AsmWriteIdtr (&gLidtDescriptor);\r | |
368 | \r | |
369 | DEBUG ((\r | |
370 | DEBUG_INFO,\r | |
371 | "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",\r | |
372 | __FUNCTION__,\r | |
373 | BaseOfStack,\r | |
374 | STACK_SIZE\r | |
375 | ));\r | |
376 | \r | |
377 | //\r | |
378 | // Go to Long Mode and transfer control to DxeCore.\r | |
379 | // Interrupts will not get turned on until the CPU AP is loaded.\r | |
380 | // Call x64 drivers passing in single argument, a pointer to the HOBs.\r | |
381 | //\r | |
382 | AsmEnablePaging64 (\r | |
383 | SYS_CODE64_SEL,\r | |
384 | DxeCoreEntryPoint,\r | |
385 | (EFI_PHYSICAL_ADDRESS)(UINTN)(HobList.Raw),\r | |
386 | 0,\r | |
387 | TopOfStack\r | |
388 | );\r | |
389 | } else {\r | |
390 | //\r | |
391 | // Get Vector Hand-off Info PPI and build Guided HOB\r | |
392 | //\r | |
393 | Status = PeiServicesLocatePpi (\r | |
394 | &gEfiVectorHandoffInfoPpiGuid,\r | |
395 | 0,\r | |
396 | NULL,\r | |
397 | (VOID **)&VectorHandoffInfoPpi\r | |
398 | );\r | |
399 | if (Status == EFI_SUCCESS) {\r | |
400 | DEBUG ((DEBUG_INFO, "Vector Hand-off Info PPI is gotten, GUIDed HOB is created!\n"));\r | |
401 | VectorInfo = VectorHandoffInfoPpi->Info;\r | |
402 | Index = 1;\r | |
403 | while (VectorInfo->Attribute != EFI_VECTOR_HANDOFF_LAST_ENTRY) {\r | |
404 | VectorInfo++;\r | |
405 | Index++;\r | |
406 | }\r | |
407 | \r | |
408 | BuildGuidDataHob (\r | |
409 | &gEfiVectorHandoffInfoPpiGuid,\r | |
410 | VectorHandoffInfoPpi->Info,\r | |
411 | sizeof (EFI_VECTOR_HANDOFF_INFO) * Index\r | |
412 | );\r | |
413 | }\r | |
414 | \r | |
415 | //\r | |
416 | // Compute the top of the stack we were allocated. Pre-allocate a UINTN\r | |
417 | // for safety.\r | |
418 | //\r | |
419 | TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT;\r | |
420 | TopOfStack = (EFI_PHYSICAL_ADDRESS)(UINTN)ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT);\r | |
421 | \r | |
422 | PageTables = 0;\r | |
423 | BuildPageTablesIa32Pae = ToBuildPageTable ();\r | |
424 | if (BuildPageTablesIa32Pae) {\r | |
425 | PageTables = Create4GPageTablesIa32Pae (BaseOfStack, STACK_SIZE);\r | |
426 | if (IsEnableNonExecNeeded ()) {\r | |
427 | EnableExecuteDisableBit ();\r | |
428 | }\r | |
429 | }\r | |
430 | \r | |
431 | //\r | |
432 | // End of PEI phase signal\r | |
433 | //\r | |
434 | PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);\r | |
435 | Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);\r | |
436 | PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);\r | |
437 | ASSERT_EFI_ERROR (Status);\r | |
438 | \r | |
439 | if (BuildPageTablesIa32Pae) {\r | |
440 | //\r | |
441 | // Paging might be already enabled. To avoid conflict configuration,\r | |
442 | // disable paging first anyway.\r | |
443 | //\r | |
444 | AsmWriteCr0 (AsmReadCr0 () & (~BIT31));\r | |
445 | AsmWriteCr3 (PageTables);\r | |
446 | //\r | |
447 | // Set Physical Address Extension (bit 5 of CR4).\r | |
448 | //\r | |
449 | AsmWriteCr4 (AsmReadCr4 () | BIT5);\r | |
450 | }\r | |
451 | \r | |
452 | //\r | |
453 | // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.\r | |
454 | //\r | |
455 | UpdateStackHob (BaseOfStack, STACK_SIZE);\r | |
456 | \r | |
457 | DEBUG ((\r | |
458 | DEBUG_INFO,\r | |
459 | "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",\r | |
460 | __FUNCTION__,\r | |
461 | BaseOfStack,\r | |
462 | STACK_SIZE\r | |
463 | ));\r | |
464 | \r | |
465 | //\r | |
466 | // Transfer the control to the entry point of DxeCore.\r | |
467 | //\r | |
468 | if (BuildPageTablesIa32Pae) {\r | |
469 | AsmEnablePaging32 (\r | |
470 | (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r | |
471 | HobList.Raw,\r | |
472 | NULL,\r | |
473 | (VOID *)(UINTN)TopOfStack\r | |
474 | );\r | |
475 | } else {\r | |
476 | SwitchStack (\r | |
477 | (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r | |
478 | HobList.Raw,\r | |
479 | NULL,\r | |
480 | (VOID *)(UINTN)TopOfStack\r | |
481 | );\r | |
482 | }\r | |
483 | }\r | |
484 | }\r |