2 Ia32-specific functionality for DxeLoad.
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
12 #include "VirtualMemory.h"
14 #define IDT_ENTRY_COUNT 32
16 typedef struct _X64_IDT_TABLE
{
18 // Reserved 4 bytes preceding PeiService and IdtTable,
19 // since IDT base address should be 8-byte alignment.
22 CONST EFI_PEI_SERVICES
**PeiService
;
23 X64_IDT_GATE_DESCRIPTOR IdtTable
[IDT_ENTRY_COUNT
];
27 // Global Descriptor Table (GDT)
29 GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries
[] = {
30 /* selector { Global Segment Descriptor } */
32 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
35 { 0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
36 }, // linear data segment descriptor
38 { 0xffff, 0, 0, 0xf, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
39 }, // linear code segment descriptor
41 { 0xffff, 0, 0, 0x3, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
42 }, // system data segment descriptor
44 { 0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
45 }, // system code segment descriptor
47 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
48 }, // spare segment descriptor
50 { 0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
51 }, // system data segment descriptor
53 { 0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 1, 0, 1, 0 }
54 }, // system code segment descriptor
56 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
57 }, // spare segment descriptor
63 GLOBAL_REMOVE_IF_UNREFERENCED CONST IA32_DESCRIPTOR gGdt
= {
64 sizeof (gGdtEntries
) - 1,
68 GLOBAL_REMOVE_IF_UNREFERENCED IA32_DESCRIPTOR gLidtDescriptor
= {
69 sizeof (X64_IDT_GATE_DESCRIPTOR
) * IDT_ENTRY_COUNT
- 1,
74 Allocates and fills in the Page Directory and Page Table Entries to
75 establish a 4G page table.
77 @param[in] StackBase Stack base address.
78 @param[in] StackSize Stack size.
80 @return The address of page table.
84 Create4GPageTablesIa32Pae (
85 IN EFI_PHYSICAL_ADDRESS StackBase
,
89 UINT8 PhysicalAddressBits
;
90 EFI_PHYSICAL_ADDRESS PhysicalAddress
;
91 UINTN IndexOfPdpEntries
;
92 UINTN IndexOfPageDirectoryEntries
;
93 UINT32 NumberOfPdpEntriesNeeded
;
94 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMap
;
95 PAGE_MAP_AND_DIRECTORY_POINTER
*PageDirectoryPointerEntry
;
96 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
99 UINT64 AddressEncMask
;
102 // Make sure AddressEncMask is contained to smallest supported address field
104 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
106 PhysicalAddressBits
= 32;
109 // Calculate the table entries needed.
111 NumberOfPdpEntriesNeeded
= (UINT32
)LShiftU64 (1, (PhysicalAddressBits
- 30));
113 TotalPagesNum
= NumberOfPdpEntriesNeeded
+ 1;
114 PageAddress
= (UINTN
)AllocatePageTableMemory (TotalPagesNum
);
115 ASSERT (PageAddress
!= 0);
117 PageMap
= (VOID
*)PageAddress
;
118 PageAddress
+= SIZE_4KB
;
120 PageDirectoryPointerEntry
= PageMap
;
123 for (IndexOfPdpEntries
= 0; IndexOfPdpEntries
< NumberOfPdpEntriesNeeded
; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
125 // Each Directory Pointer entries points to a page of Page Directory entires.
126 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
128 PageDirectoryEntry
= (VOID
*)PageAddress
;
129 PageAddress
+= SIZE_4KB
;
132 // Fill in a Page Directory Pointer Entries
134 PageDirectoryPointerEntry
->Uint64
= (UINT64
)(UINTN
)PageDirectoryEntry
| AddressEncMask
;
135 PageDirectoryPointerEntry
->Bits
.Present
= 1;
137 for (IndexOfPageDirectoryEntries
= 0; IndexOfPageDirectoryEntries
< 512; IndexOfPageDirectoryEntries
++, PageDirectoryEntry
++, PhysicalAddress
+= SIZE_2MB
) {
138 if ( (IsNullDetectionEnabled () && (PhysicalAddress
== 0))
139 || ( (PhysicalAddress
< StackBase
+ StackSize
)
140 && ((PhysicalAddress
+ SIZE_2MB
) > StackBase
)))
143 // Need to split this 2M page that covers stack range.
145 Split2MPageTo4K (PhysicalAddress
, (UINT64
*)PageDirectoryEntry
, StackBase
, StackSize
, 0, 0);
148 // Fill in the Page Directory entries
150 PageDirectoryEntry
->Uint64
= (UINT64
)PhysicalAddress
| AddressEncMask
;
151 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
152 PageDirectoryEntry
->Bits
.Present
= 1;
153 PageDirectoryEntry
->Bits
.MustBe1
= 1;
158 for ( ; IndexOfPdpEntries
< 512; IndexOfPdpEntries
++, PageDirectoryPointerEntry
++) {
160 PageDirectoryPointerEntry
,
161 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER
)
166 // Protect the page table by marking the memory used for page table to be
169 EnablePageTableProtection ((UINTN
)PageMap
, FALSE
);
171 return (UINTN
)PageMap
;
175 The function will check if IA32 PAE is supported.
177 @retval TRUE IA32 PAE is supported.
178 @retval FALSE IA32 PAE is not supported.
188 BOOLEAN Ia32PaeSupport
;
190 Ia32PaeSupport
= FALSE
;
191 AsmCpuid (0x0, &RegEax
, NULL
, NULL
, NULL
);
193 AsmCpuid (0x1, NULL
, NULL
, NULL
, &RegEdx
);
194 if ((RegEdx
& BIT6
) != 0) {
195 Ia32PaeSupport
= TRUE
;
199 return Ia32PaeSupport
;
203 The function will check if page table should be setup or not.
205 @retval TRUE Page table should be created.
206 @retval FALSE Page table should not be created.
214 if (!IsIa32PaeSupport ()) {
218 if (IsNullDetectionEnabled ()) {
222 if (PcdGet8 (PcdHeapGuardPropertyMask
) != 0) {
226 if (PcdGetBool (PcdCpuStackGuard
)) {
230 if (IsEnableNonExecNeeded ()) {
238 Transfers control to DxeCore.
240 This function performs a CPU architecture specific operations to execute
241 the entry point of DxeCore with the parameters of HobList.
242 It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase.
244 @param DxeCoreEntryPoint The entry point of DxeCore.
245 @param HobList The start of HobList passed to DxeCore.
250 IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint
,
251 IN EFI_PEI_HOB_POINTERS HobList
255 EFI_PHYSICAL_ADDRESS BaseOfStack
;
256 EFI_PHYSICAL_ADDRESS TopOfStack
;
258 X64_IDT_GATE_DESCRIPTOR
*IdtTable
;
259 UINTN SizeOfTemplate
;
261 EFI_PHYSICAL_ADDRESS VectorAddress
;
263 X64_IDT_TABLE
*IdtTableForX64
;
264 EFI_VECTOR_HANDOFF_INFO
*VectorInfo
;
265 EFI_PEI_VECTOR_HANDOFF_INFO_PPI
*VectorHandoffInfoPpi
;
266 BOOLEAN BuildPageTablesIa32Pae
;
269 // Clear page 0 and mark it as allocated if NULL pointer detection is enabled.
271 if (IsNullDetectionEnabled ()) {
272 ClearFirst4KPage (HobList
.Raw
);
273 BuildMemoryAllocationHob (0, EFI_PAGES_TO_SIZE (1), EfiBootServicesData
);
276 Status
= PeiServicesAllocatePages (EfiBootServicesData
, EFI_SIZE_TO_PAGES (STACK_SIZE
), &BaseOfStack
);
277 ASSERT_EFI_ERROR (Status
);
279 if (FeaturePcdGet (PcdDxeIplSwitchToLongMode
)) {
281 // Compute the top of the stack we were allocated, which is used to load X64 dxe core.
282 // Pre-allocate a 32 bytes which confroms to x64 calling convention.
284 // The first four parameters to a function are passed in rcx, rdx, r8 and r9.
285 // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the
286 // register parameters is reserved on the stack, in case the called function
287 // wants to spill them; this is important if the function is variadic.
289 TopOfStack
= BaseOfStack
+ EFI_SIZE_TO_PAGES (STACK_SIZE
) * EFI_PAGE_SIZE
- 32;
292 // x64 Calling Conventions requires that the stack must be aligned to 16 bytes
294 TopOfStack
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)ALIGN_POINTER (TopOfStack
, 16);
297 // Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA
298 // memory, it may be corrupted when copying FV to high-end memory
300 AsmWriteGdtr (&gGdt
);
302 // Create page table and save PageMapLevel4 to CR3
304 PageTables
= CreateIdentityMappingPageTables (BaseOfStack
, STACK_SIZE
, 0, 0);
307 // End of PEI phase signal
309 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi
.Guid
);
310 Status
= PeiServicesInstallPpi (&gEndOfPeiSignalPpi
);
311 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi
.Guid
);
312 ASSERT_EFI_ERROR (Status
);
315 // Paging might be already enabled. To avoid conflict configuration,
316 // disable paging first anyway.
318 AsmWriteCr0 (AsmReadCr0 () & (~BIT31
));
319 AsmWriteCr3 (PageTables
);
322 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
324 UpdateStackHob (BaseOfStack
, STACK_SIZE
);
326 SizeOfTemplate
= AsmGetVectorTemplatInfo (&TemplateBase
);
328 Status
= PeiServicesAllocatePages (
330 EFI_SIZE_TO_PAGES (sizeof (X64_IDT_TABLE
) + SizeOfTemplate
* IDT_ENTRY_COUNT
),
333 ASSERT_EFI_ERROR (Status
);
336 // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that
337 // it may not be gotten correctly after IDT register is re-written.
339 IdtTableForX64
= (X64_IDT_TABLE
*)(UINTN
)VectorAddress
;
340 IdtTableForX64
->PeiService
= GetPeiServicesTablePointer ();
342 VectorAddress
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)(IdtTableForX64
+ 1);
343 IdtTable
= IdtTableForX64
->IdtTable
;
344 for (Index
= 0; Index
< IDT_ENTRY_COUNT
; Index
++) {
345 IdtTable
[Index
].Ia32IdtEntry
.Bits
.GateType
= 0x8e;
346 IdtTable
[Index
].Ia32IdtEntry
.Bits
.Reserved_0
= 0;
347 IdtTable
[Index
].Ia32IdtEntry
.Bits
.Selector
= SYS_CODE64_SEL
;
349 IdtTable
[Index
].Ia32IdtEntry
.Bits
.OffsetLow
= (UINT16
)VectorAddress
;
350 IdtTable
[Index
].Ia32IdtEntry
.Bits
.OffsetHigh
= (UINT16
)(RShiftU64 (VectorAddress
, 16));
351 IdtTable
[Index
].Offset32To63
= (UINT32
)(RShiftU64 (VectorAddress
, 32));
352 IdtTable
[Index
].Reserved
= 0;
354 CopyMem ((VOID
*)(UINTN
)VectorAddress
, TemplateBase
, SizeOfTemplate
);
355 AsmVectorFixup ((VOID
*)(UINTN
)VectorAddress
, (UINT8
)Index
);
357 VectorAddress
+= SizeOfTemplate
;
360 gLidtDescriptor
.Base
= (UINTN
)IdtTable
;
363 // Disable interrupt of Debug timer, since new IDT table cannot handle it.
365 SaveAndSetDebugTimerInterrupt (FALSE
);
367 AsmWriteIdtr (&gLidtDescriptor
);
371 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",
378 // Go to Long Mode and transfer control to DxeCore.
379 // Interrupts will not get turned on until the CPU AP is loaded.
380 // Call x64 drivers passing in single argument, a pointer to the HOBs.
385 (EFI_PHYSICAL_ADDRESS
)(UINTN
)(HobList
.Raw
),
391 // Get Vector Hand-off Info PPI and build Guided HOB
393 Status
= PeiServicesLocatePpi (
394 &gEfiVectorHandoffInfoPpiGuid
,
397 (VOID
**)&VectorHandoffInfoPpi
399 if (Status
== EFI_SUCCESS
) {
400 DEBUG ((DEBUG_INFO
, "Vector Hand-off Info PPI is gotten, GUIDed HOB is created!\n"));
401 VectorInfo
= VectorHandoffInfoPpi
->Info
;
403 while (VectorInfo
->Attribute
!= EFI_VECTOR_HANDOFF_LAST_ENTRY
) {
409 &gEfiVectorHandoffInfoPpiGuid
,
410 VectorHandoffInfoPpi
->Info
,
411 sizeof (EFI_VECTOR_HANDOFF_INFO
) * Index
416 // Compute the top of the stack we were allocated. Pre-allocate a UINTN
419 TopOfStack
= BaseOfStack
+ EFI_SIZE_TO_PAGES (STACK_SIZE
) * EFI_PAGE_SIZE
- CPU_STACK_ALIGNMENT
;
420 TopOfStack
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)ALIGN_POINTER (TopOfStack
, CPU_STACK_ALIGNMENT
);
423 BuildPageTablesIa32Pae
= ToBuildPageTable ();
424 if (BuildPageTablesIa32Pae
) {
425 PageTables
= Create4GPageTablesIa32Pae (BaseOfStack
, STACK_SIZE
);
426 if (IsEnableNonExecNeeded ()) {
427 EnableExecuteDisableBit ();
432 // End of PEI phase signal
434 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi
.Guid
);
435 Status
= PeiServicesInstallPpi (&gEndOfPeiSignalPpi
);
436 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi
.Guid
);
437 ASSERT_EFI_ERROR (Status
);
439 if (BuildPageTablesIa32Pae
) {
441 // Paging might be already enabled. To avoid conflict configuration,
442 // disable paging first anyway.
444 AsmWriteCr0 (AsmReadCr0 () & (~BIT31
));
445 AsmWriteCr3 (PageTables
);
447 // Set Physical Address Extension (bit 5 of CR4).
449 AsmWriteCr4 (AsmReadCr4 () | BIT5
);
453 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
455 UpdateStackHob (BaseOfStack
, STACK_SIZE
);
459 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",
466 // Transfer the control to the entry point of DxeCore.
468 if (BuildPageTablesIa32Pae
) {
470 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)DxeCoreEntryPoint
,
473 (VOID
*)(UINTN
)TopOfStack
477 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)DxeCoreEntryPoint
,
480 (VOID
*)(UINTN
)TopOfStack