/** @file\r
- Ia32-specifc functionality for DxeLoad.\r
+ Ia32-specific functionality for DxeLoad.\r
\r
-Copyright (c) 2006 - 2008, Intel Corporation. <BR>\r
-All rights reserved. This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
#include "DxeIpl.h"\r
#include "VirtualMemory.h"\r
\r
+#define IDT_ENTRY_COUNT 32\r
+\r
+typedef struct _X64_IDT_TABLE {\r
+ //\r
+ // Reserved 4 bytes preceding PeiService and IdtTable,\r
+ // since IDT base address should be 8-byte alignment.\r
+ //\r
+ UINT32 Reserved;\r
+ CONST EFI_PEI_SERVICES **PeiService;\r
+ X64_IDT_GATE_DESCRIPTOR IdtTable[IDT_ENTRY_COUNT];\r
+} X64_IDT_TABLE;\r
+\r
//\r
// Global Descriptor Table (GDT)\r
//\r
-GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries [] = {\r
-/* selector { Global Segment Descriptor } */ \r
-/* 0x00 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //null descriptor \r
+GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries[] = {\r
+/* selector { Global Segment Descriptor } */\r
+/* 0x00 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //null descriptor\r
/* 0x08 */ {{0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear data segment descriptor\r
/* 0x10 */ {{0xffff, 0, 0, 0xf, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear code segment descriptor\r
/* 0x18 */ {{0xffff, 0, 0, 0x3, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system data segment descriptor\r
};\r
\r
GLOBAL_REMOVE_IF_UNREFERENCED IA32_DESCRIPTOR gLidtDescriptor = {\r
- sizeof (X64_IDT_GATE_DESCRIPTOR) * 32 - 1,\r
+ sizeof (X64_IDT_GATE_DESCRIPTOR) * IDT_ENTRY_COUNT - 1,\r
0\r
};\r
\r
+/**\r
+ Allocates and fills in the Page Directory and Page Table Entries to\r
+ establish a 4G page table.\r
+\r
+ @param[in] StackBase Stack base address.\r
+ @param[in] StackSize Stack size.\r
+\r
+ @return The address of page table.\r
+\r
+**/\r
+UINTN\r
+Create4GPageTablesIa32Pae (\r
+ IN EFI_PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ UINT8 PhysicalAddressBits;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
+ UINTN IndexOfPdpEntries;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ UINT32 NumberOfPdpEntriesNeeded;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
+ UINTN TotalPagesNum;\r
+ UINTN PageAddress;\r
+ UINT64 AddressEncMask;\r
+\r
+ //\r
+ // Make sure AddressEncMask is contained to smallest supported address field\r
+ //\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
+\r
+ PhysicalAddressBits = 32;\r
+\r
+ //\r
+ // Calculate the table entries needed.\r
+ //\r
+ NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, (PhysicalAddressBits - 30));\r
+\r
+ TotalPagesNum = NumberOfPdpEntriesNeeded + 1;\r
+ PageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);\r
+ ASSERT (PageAddress != 0);\r
+\r
+ PageMap = (VOID *) PageAddress;\r
+ PageAddress += SIZE_4KB;\r
+\r
+ PageDirectoryPointerEntry = PageMap;\r
+ PhysicalAddress = 0;\r
+\r
+ for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
+ //\r
+ // Each Directory Pointer entries points to a page of Page Directory entires.\r
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
+ //\r
+ PageDirectoryEntry = (VOID *) PageAddress;\r
+ PageAddress += SIZE_4KB;\r
+\r
+ //\r
+ // Fill in a Page Directory Pointer Entries\r
+ //\r
+ PageDirectoryPointerEntry->Uint64 = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask;\r
+ PageDirectoryPointerEntry->Bits.Present = 1;\r
+\r
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress += SIZE_2MB) {\r
+ if ((IsNullDetectionEnabled () && PhysicalAddress == 0)\r
+ || ((PhysicalAddress < StackBase + StackSize)\r
+ && ((PhysicalAddress + SIZE_2MB) > StackBase))) {\r
+ //\r
+ // Need to split this 2M page that covers stack range.\r
+ //\r
+ Split2MPageTo4K (PhysicalAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
+ } else {\r
+ //\r
+ // Fill in the Page Directory entries\r
+ //\r
+ PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress | AddressEncMask;\r
+ PageDirectoryEntry->Bits.ReadWrite = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ }\r
+ }\r
+ }\r
+\r
+ for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
+ ZeroMem (\r
+ PageDirectoryPointerEntry,\r
+ sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
+ );\r
+ }\r
+\r
+ //\r
+ // Protect the page table by marking the memory used for page table to be\r
+ // read-only.\r
+ //\r
+ EnablePageTableProtection ((UINTN)PageMap, FALSE);\r
+\r
+ return (UINTN) PageMap;\r
+}\r
+\r
+/**\r
+ The function will check if IA32 PAE is supported.\r
+\r
+ @retval TRUE IA32 PAE is supported.\r
+ @retval FALSE IA32 PAE is not supported.\r
+\r
+**/\r
+BOOLEAN\r
+IsIa32PaeSupport (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ BOOLEAN Ia32PaeSupport;\r
+\r
+ Ia32PaeSupport = FALSE;\r
+ AsmCpuid (0x0, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x1) {\r
+ AsmCpuid (0x1, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT6) != 0) {\r
+ Ia32PaeSupport = TRUE;\r
+ }\r
+ }\r
+\r
+ return Ia32PaeSupport;\r
+}\r
+\r
+/**\r
+ The function will check if page table should be setup or not.\r
+\r
+ @retval TRUE Page table should be created.\r
+ @retval FALSE Page table should not be created.\r
+\r
+**/\r
+BOOLEAN\r
+ToBuildPageTable (\r
+ VOID\r
+ )\r
+{\r
+ if (!IsIa32PaeSupport ()) {\r
+ return FALSE;\r
+ }\r
+\r
+ if (IsNullDetectionEnabled ()) {\r
+ return TRUE;\r
+ }\r
+\r
+ if (PcdGet8 (PcdHeapGuardPropertyMask) != 0) {\r
+ return TRUE;\r
+ }\r
+\r
+ if (PcdGetBool (PcdCpuStackGuard)) {\r
+ return TRUE;\r
+ }\r
+\r
+ if (IsEnableNonExecNeeded ()) {\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Transfers control to DxeCore.\r
+\r
+ This function performs a CPU architecture specific operations to execute\r
+ the entry point of DxeCore with the parameters of HobList.\r
+ It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase.\r
+\r
+ @param DxeCoreEntryPoint The entry point of DxeCore.\r
+ @param HobList The start of HobList passed to DxeCore.\r
+\r
+**/\r
VOID\r
HandOffToDxeCore (\r
IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint,\r
- IN EFI_PEI_HOB_POINTERS HobList,\r
- IN EFI_PEI_PPI_DESCRIPTOR *EndOfPeiSignal\r
+ IN EFI_PEI_HOB_POINTERS HobList\r
)\r
{\r
EFI_STATUS Status;\r
VOID *TemplateBase;\r
EFI_PHYSICAL_ADDRESS VectorAddress;\r
UINT32 Index;\r
+ X64_IDT_TABLE *IdtTableForX64;\r
+ EFI_VECTOR_HANDOFF_INFO *VectorInfo;\r
+ EFI_PEI_VECTOR_HANDOFF_INFO_PPI *VectorHandoffInfoPpi;\r
+ BOOLEAN BuildPageTablesIa32Pae;\r
+\r
+ //\r
+ // Clear page 0 and mark it as allocated if NULL pointer detection is enabled.\r
+ //\r
+ if (IsNullDetectionEnabled ()) {\r
+ ClearFirst4KPage (HobList.Raw);\r
+ BuildMemoryAllocationHob (0, EFI_PAGES_TO_SIZE (1), EfiBootServicesData);\r
+ }\r
\r
Status = PeiServicesAllocatePages (EfiBootServicesData, EFI_SIZE_TO_PAGES (STACK_SIZE), &BaseOfStack);\r
ASSERT_EFI_ERROR (Status);\r
- \r
+\r
if (FeaturePcdGet(PcdDxeIplSwitchToLongMode)) {\r
//\r
- // Compute the top of the stack we were allocated, which is used to load X64 dxe core. \r
+ // Compute the top of the stack we were allocated, which is used to load X64 dxe core.\r
// Pre-allocate a 32 bytes which confroms to x64 calling convention.\r
//\r
- // The first four parameters to a function are passed in rcx, rdx, r8 and r9. \r
- // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the \r
- // register parameters is reserved on the stack, in case the called function \r
- // wants to spill them; this is important if the function is variadic. \r
+ // The first four parameters to a function are passed in rcx, rdx, r8 and r9.\r
+ // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the\r
+ // register parameters is reserved on the stack, in case the called function\r
+ // wants to spill them; this is important if the function is variadic.\r
//\r
TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - 32;\r
\r
//\r
- // X64 Calling Conventions requires that the stack must be aligned to 16 bytes\r
+ // x64 Calling Conventions requires that the stack must be aligned to 16 bytes\r
//\r
TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, 16);\r
\r
//\r
// Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA\r
- // memory, it may be corrupted when copying FV to high-end memory \r
+ // memory, it may be corrupted when copying FV to high-end memory\r
//\r
AsmWriteGdtr (&gGdt);\r
//\r
// Create page table and save PageMapLevel4 to CR3\r
//\r
- PageTables = CreateIdentityMappingPageTables ();\r
+ PageTables = CreateIdentityMappingPageTables (BaseOfStack, STACK_SIZE);\r
\r
//\r
- // End of PEI phase singal\r
+ // End of PEI phase signal\r
//\r
- Status = PeiServicesInstallPpi (EndOfPeiSignal);\r
+ PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);\r
+ Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);\r
+ PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);\r
ASSERT_EFI_ERROR (Status);\r
- \r
+\r
+ //\r
+ // Paging might be already enabled. To avoid conflict configuration,\r
+ // disable paging first anyway.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0 () & (~BIT31));\r
AsmWriteCr3 (PageTables);\r
\r
//\r
// Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.\r
- // \r
+ //\r
UpdateStackHob (BaseOfStack, STACK_SIZE);\r
\r
- if (FeaturePcdGet (PcdDxeIplEnableIdt)) {\r
- SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase);\r
- \r
- Status = PeiServicesAllocatePages (\r
- EfiBootServicesData, \r
- EFI_SIZE_TO_PAGES((SizeOfTemplate + sizeof (X64_IDT_GATE_DESCRIPTOR)) * 32), \r
- &VectorAddress\r
- );\r
- \r
- ASSERT_EFI_ERROR (Status);\r
- \r
- IdtTable = (X64_IDT_GATE_DESCRIPTOR *) (UINTN) (VectorAddress + SizeOfTemplate * 32);\r
- for (Index = 0; Index < 32; Index++) {\r
- IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e;\r
- IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0;\r
- IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL;\r
- \r
- IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16) VectorAddress;\r
- IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16) (RShiftU64 (VectorAddress, 16));\r
- IdtTable[Index].Offset32To63 = (UINT32) (RShiftU64 (VectorAddress, 32));\r
- IdtTable[Index].Reserved = 0;\r
- \r
- CopyMem ((VOID *) (UINTN) VectorAddress, TemplateBase, SizeOfTemplate);\r
- AsmVectorFixup ((VOID *) (UINTN) VectorAddress, (UINT8) Index);\r
- \r
- VectorAddress += SizeOfTemplate;\r
- }\r
- \r
- gLidtDescriptor.Base = (UINTN) IdtTable;\r
- AsmWriteIdtr (&gLidtDescriptor);\r
+ SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase);\r
+\r
+ Status = PeiServicesAllocatePages (\r
+ EfiBootServicesData,\r
+ EFI_SIZE_TO_PAGES(sizeof (X64_IDT_TABLE) + SizeOfTemplate * IDT_ENTRY_COUNT),\r
+ &VectorAddress\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that\r
+ // it may not be gotten correctly after IDT register is re-written.\r
+ //\r
+ IdtTableForX64 = (X64_IDT_TABLE *) (UINTN) VectorAddress;\r
+ IdtTableForX64->PeiService = GetPeiServicesTablePointer ();\r
+\r
+ VectorAddress = (EFI_PHYSICAL_ADDRESS) (UINTN) (IdtTableForX64 + 1);\r
+ IdtTable = IdtTableForX64->IdtTable;\r
+ for (Index = 0; Index < IDT_ENTRY_COUNT; Index++) {\r
+ IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e;\r
+ IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0;\r
+ IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL;\r
+\r
+ IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16) VectorAddress;\r
+ IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16) (RShiftU64 (VectorAddress, 16));\r
+ IdtTable[Index].Offset32To63 = (UINT32) (RShiftU64 (VectorAddress, 32));\r
+ IdtTable[Index].Reserved = 0;\r
+\r
+ CopyMem ((VOID *) (UINTN) VectorAddress, TemplateBase, SizeOfTemplate);\r
+ AsmVectorFixup ((VOID *) (UINTN) VectorAddress, (UINT8) Index);\r
+\r
+ VectorAddress += SizeOfTemplate;\r
}\r
+\r
+ gLidtDescriptor.Base = (UINTN) IdtTable;\r
+\r
+ //\r
+ // Disable interrupt of Debug timer, since new IDT table cannot handle it.\r
+ //\r
+ SaveAndSetDebugTimerInterrupt (FALSE);\r
+\r
+ AsmWriteIdtr (&gLidtDescriptor);\r
+\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",\r
+ __FUNCTION__,\r
+ BaseOfStack,\r
+ STACK_SIZE\r
+ ));\r
+\r
//\r
- // Go to Long Mode. Interrupts will not get turned on until the CPU AP is loaded.\r
+ // Go to Long Mode and transfer control to DxeCore.\r
+ // Interrupts will not get turned on until the CPU AP is loaded.\r
// Call x64 drivers passing in single argument, a pointer to the HOBs.\r
- // \r
+ //\r
AsmEnablePaging64 (\r
SYS_CODE64_SEL,\r
DxeCoreEntryPoint,\r
TopOfStack\r
);\r
} else {\r
+ //\r
+ // Get Vector Hand-off Info PPI and build Guided HOB\r
+ //\r
+ Status = PeiServicesLocatePpi (\r
+ &gEfiVectorHandoffInfoPpiGuid,\r
+ 0,\r
+ NULL,\r
+ (VOID **)&VectorHandoffInfoPpi\r
+ );\r
+ if (Status == EFI_SUCCESS) {\r
+ DEBUG ((EFI_D_INFO, "Vector Hand-off Info PPI is gotten, GUIDed HOB is created!\n"));\r
+ VectorInfo = VectorHandoffInfoPpi->Info;\r
+ Index = 1;\r
+ while (VectorInfo->Attribute != EFI_VECTOR_HANDOFF_LAST_ENTRY) {\r
+ VectorInfo ++;\r
+ Index ++;\r
+ }\r
+ BuildGuidDataHob (\r
+ &gEfiVectorHandoffInfoPpiGuid,\r
+ VectorHandoffInfoPpi->Info,\r
+ sizeof (EFI_VECTOR_HANDOFF_INFO) * Index\r
+ );\r
+ }\r
+\r
//\r
// Compute the top of the stack we were allocated. Pre-allocate a UINTN\r
// for safety.\r
TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT;\r
TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT);\r
\r
+ PageTables = 0;\r
+ BuildPageTablesIa32Pae = ToBuildPageTable ();\r
+ if (BuildPageTablesIa32Pae) {\r
+ PageTables = Create4GPageTablesIa32Pae (BaseOfStack, STACK_SIZE);\r
+ if (IsEnableNonExecNeeded ()) {\r
+ EnableExecuteDisableBit();\r
+ }\r
+ }\r
+\r
//\r
- // End of PEI phase singal\r
+ // End of PEI phase signal\r
//\r
- Status = PeiServicesInstallPpi (EndOfPeiSignal);\r
+ PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);\r
+ Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);\r
+ PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);\r
ASSERT_EFI_ERROR (Status);\r
\r
+ if (BuildPageTablesIa32Pae) {\r
+ //\r
+ // Paging might be already enabled. To avoid conflict configuration,\r
+ // disable paging first anyway.\r
+ //\r
+ AsmWriteCr0 (AsmReadCr0 () & (~BIT31));\r
+ AsmWriteCr3 (PageTables);\r
+ //\r
+ // Set Physical Address Extension (bit 5 of CR4).\r
+ //\r
+ AsmWriteCr4 (AsmReadCr4 () | BIT5);\r
+ }\r
+\r
//\r
// Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.\r
- // \r
+ //\r
UpdateStackHob (BaseOfStack, STACK_SIZE);\r
\r
- SwitchStack (\r
- (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r
- HobList.Raw,\r
- NULL,\r
- (VOID *) (UINTN) TopOfStack\r
- );\r
- } \r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",\r
+ __FUNCTION__,\r
+ BaseOfStack,\r
+ STACK_SIZE\r
+ ));\r
+\r
+ //\r
+ // Transfer the control to the entry point of DxeCore.\r
+ //\r
+ if (BuildPageTablesIa32Pae) {\r
+ AsmEnablePaging32 (\r
+ (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r
+ HobList.Raw,\r
+ NULL,\r
+ (VOID *) (UINTN) TopOfStack\r
+ );\r
+ } else {\r
+ SwitchStack (\r
+ (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,\r
+ HobList.Raw,\r
+ NULL,\r
+ (VOID *) (UINTN) TopOfStack\r
+ );\r
+ }\r
+ }\r
}\r
\r