Add the union RELOCATE_AP_LOOP_ENTRY, split the path in RelocateApLoop
into two:
1. 64-bit AMD processors with SEV-ES
2. Intel processors (32-bit or 64-bit), 32-bit AMD processors, or
64-bit AMD processors without SEV-ES.
Cc: Guo Dong <guo.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Sean Rhodes <sean@starlabs.systems>
Cc: James Lu <james.lu@intel.com>
Cc: Gua Guo <gua.guo@intel.com>
Signed-off-by: Yuanhao Xie <yuanhao.xie@intel.com>
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Tested-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
/** @file\r
MP initialize support functions for DXE phase.\r
\r
/** @file\r
MP initialize support functions for DXE phase.\r
\r
- Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR>\r
+ Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>\r
SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
#define AP_SAFE_STACK_SIZE 128\r
\r
\r
#define AP_SAFE_STACK_SIZE 128\r
\r
-CPU_MP_DATA *mCpuMpData = NULL;\r
-EFI_EVENT mCheckAllApsEvent = NULL;\r
-EFI_EVENT mMpInitExitBootServicesEvent = NULL;\r
-EFI_EVENT mLegacyBootEvent = NULL;\r
-volatile BOOLEAN mStopCheckAllApsStatus = TRUE;\r
-VOID *mReservedApLoopFunc = NULL;\r
-UINTN mReservedTopOfApStack;\r
-volatile UINT32 mNumberToFinish = 0;\r
+CPU_MP_DATA *mCpuMpData = NULL;\r
+EFI_EVENT mCheckAllApsEvent = NULL;\r
+EFI_EVENT mMpInitExitBootServicesEvent = NULL;\r
+EFI_EVENT mLegacyBootEvent = NULL;\r
+volatile BOOLEAN mStopCheckAllApsStatus = TRUE;\r
+RELOCATE_AP_LOOP_ENTRY mReservedApLoop;\r
+UINTN mReservedTopOfApStack;\r
+volatile UINT32 mNumberToFinish = 0;\r
\r
//\r
// Begin wakeup buffer allocation below 0x88000\r
\r
//\r
// Begin wakeup buffer allocation below 0x88000\r
IN OUT VOID *Buffer\r
)\r
{\r
IN OUT VOID *Buffer\r
)\r
{\r
- CPU_MP_DATA *CpuMpData;\r
- BOOLEAN MwaitSupport;\r
- ASM_RELOCATE_AP_LOOP AsmRelocateApLoopFunc;\r
- UINTN ProcessorNumber;\r
- UINTN StackStart;\r
+ CPU_MP_DATA *CpuMpData;\r
+ BOOLEAN MwaitSupport;\r
+ UINTN ProcessorNumber;\r
+ UINTN StackStart;\r
\r
MpInitLibWhoAmI (&ProcessorNumber);\r
CpuMpData = GetCpuMpData ();\r
MwaitSupport = IsMwaitSupport ();\r
if (CpuMpData->UseSevEsAPMethod) {\r
\r
MpInitLibWhoAmI (&ProcessorNumber);\r
CpuMpData = GetCpuMpData ();\r
MwaitSupport = IsMwaitSupport ();\r
if (CpuMpData->UseSevEsAPMethod) {\r
+ //\r
+ // 64-bit AMD processors with SEV-ES\r
+ //\r
StackStart = CpuMpData->SevEsAPResetStackStart;\r
StackStart = CpuMpData->SevEsAPResetStackStart;\r
+ mReservedApLoop.AmdSevEntry (\r
+ MwaitSupport,\r
+ CpuMpData->ApTargetCState,\r
+ CpuMpData->PmCodeSegment,\r
+ StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,\r
+ (UINTN)&mNumberToFinish,\r
+ CpuMpData->Pm16CodeSegment,\r
+ CpuMpData->SevEsAPBuffer,\r
+ CpuMpData->WakeupBuffer\r
+ );\r
+ //\r
+ // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES\r
+ //\r
StackStart = mReservedTopOfApStack;\r
StackStart = mReservedTopOfApStack;\r
+ mReservedApLoop.GenericEntry (\r
+ MwaitSupport,\r
+ CpuMpData->ApTargetCState,\r
+ CpuMpData->PmCodeSegment,\r
+ StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,\r
+ (UINTN)&mNumberToFinish,\r
+ CpuMpData->Pm16CodeSegment,\r
+ CpuMpData->SevEsAPBuffer,\r
+ CpuMpData->WakeupBuffer\r
+ );\r
- AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;\r
- AsmRelocateApLoopFunc (\r
- MwaitSupport,\r
- CpuMpData->ApTargetCState,\r
- CpuMpData->PmCodeSegment,\r
- StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,\r
- (UINTN)&mNumberToFinish,\r
- CpuMpData->Pm16CodeSegment,\r
- CpuMpData->SevEsAPBuffer,\r
- CpuMpData->WakeupBuffer\r
- );\r
//\r
// It should never reach here\r
//\r
//\r
// It should never reach here\r
//\r
);\r
ASSERT_EFI_ERROR (Status);\r
\r
);\r
ASSERT_EFI_ERROR (Status);\r
\r
- mReservedApLoopFunc = (VOID *)(UINTN)Address;\r
- ASSERT (mReservedApLoopFunc != NULL);\r
+ mReservedApLoop.Data = (VOID *)(UINTN)Address;\r
+ ASSERT (mReservedApLoop.Data != NULL);\r
\r
//\r
// Make sure that the buffer memory is executable if NX protection is enabled\r
\r
//\r
// Make sure that the buffer memory is executable if NX protection is enabled\r
mReservedTopOfApStack = (UINTN)Address + ApSafeBufferSize;\r
ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);\r
CopyMem (\r
mReservedTopOfApStack = (UINTN)Address + ApSafeBufferSize;\r
ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);\r
CopyMem (\r
+ mReservedApLoop.Data,\r
CpuMpData->AddressMap.RelocateApLoopFuncAddress,\r
CpuMpData->AddressMap.RelocateApLoopFuncSize\r
);\r
CpuMpData->AddressMap.RelocateApLoopFuncAddress,\r
CpuMpData->AddressMap.RelocateApLoopFuncSize\r
);\r
IN CPU_EXCHANGE_ROLE_INFO *OthersInfo\r
);\r
\r
IN CPU_EXCHANGE_ROLE_INFO *OthersInfo\r
);\r
\r
+typedef union {\r
+ VOID *Data;\r
+ ASM_RELOCATE_AP_LOOP AmdSevEntry; // 64-bit AMD Sev processors\r
+ ASM_RELOCATE_AP_LOOP GenericEntry; // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or AMD non-Sev processors\r
+} RELOCATE_AP_LOOP_ENTRY;\r
+\r
/**\r
Get the pointer to CPU MP Data structure.\r
\r
/**\r
Get the pointer to CPU MP Data structure.\r
\r