]> git.proxmox.com Git - mirror_edk2.git/commitdiff
UefiCpuPkg: Split the path in RelocateApLoop into two.
authorXie, Yuanhao <yuanhao.xie@intel.com>
Wed, 1 Mar 2023 06:09:48 +0000 (14:09 +0800)
committermergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
Tue, 7 Mar 2023 08:14:59 +0000 (08:14 +0000)
Add the union RELOCATE_AP_LOOP_ENTRY, split the path in RelocateApLoop
 into two:
 1. 64-bit AMD processors with SEV-ES
 2. Intel processors (32-bit or 64-bit), 32-bit AMD processors, or
 64-bit AMD processors without SEV-ES.

Cc: Guo Dong <guo.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Sean Rhodes <sean@starlabs.systems>
Cc: James Lu <james.lu@intel.com>
Cc: Gua Guo <gua.guo@intel.com>
Signed-off-by: Yuanhao Xie <yuanhao.xie@intel.com>
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Tested-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
UefiCpuPkg/Library/MpInitLib/MpLib.h

index a84e9e33bafab03cefbd6fdc6883f0b22873a53c..e9ac858f4f902388cbf86e9704397e113079159d 100644 (file)
@@ -1,7 +1,7 @@
 /** @file\r
   MP initialize support functions for DXE phase.\r
 \r
-  Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR>\r
+  Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>\r
   SPDX-License-Identifier: BSD-2-Clause-Patent\r
 \r
 **/\r
 \r
 #define  AP_SAFE_STACK_SIZE  128\r
 \r
-CPU_MP_DATA       *mCpuMpData                  = NULL;\r
-EFI_EVENT         mCheckAllApsEvent            = NULL;\r
-EFI_EVENT         mMpInitExitBootServicesEvent = NULL;\r
-EFI_EVENT         mLegacyBootEvent             = NULL;\r
-volatile BOOLEAN  mStopCheckAllApsStatus       = TRUE;\r
-VOID              *mReservedApLoopFunc         = NULL;\r
-UINTN             mReservedTopOfApStack;\r
-volatile UINT32   mNumberToFinish = 0;\r
+CPU_MP_DATA             *mCpuMpData                  = NULL;\r
+EFI_EVENT               mCheckAllApsEvent            = NULL;\r
+EFI_EVENT               mMpInitExitBootServicesEvent = NULL;\r
+EFI_EVENT               mLegacyBootEvent             = NULL;\r
+volatile BOOLEAN        mStopCheckAllApsStatus       = TRUE;\r
+RELOCATE_AP_LOOP_ENTRY  mReservedApLoop;\r
+UINTN                   mReservedTopOfApStack;\r
+volatile UINT32         mNumberToFinish = 0;\r
 \r
 //\r
 // Begin wakeup buffer allocation below 0x88000\r
@@ -378,32 +378,46 @@ RelocateApLoop (
   IN OUT VOID  *Buffer\r
   )\r
 {\r
-  CPU_MP_DATA           *CpuMpData;\r
-  BOOLEAN               MwaitSupport;\r
-  ASM_RELOCATE_AP_LOOP  AsmRelocateApLoopFunc;\r
-  UINTN                 ProcessorNumber;\r
-  UINTN                 StackStart;\r
+  CPU_MP_DATA  *CpuMpData;\r
+  BOOLEAN      MwaitSupport;\r
+  UINTN        ProcessorNumber;\r
+  UINTN        StackStart;\r
 \r
   MpInitLibWhoAmI (&ProcessorNumber);\r
   CpuMpData    = GetCpuMpData ();\r
   MwaitSupport = IsMwaitSupport ();\r
   if (CpuMpData->UseSevEsAPMethod) {\r
+    //\r
+    // 64-bit AMD processors with SEV-ES\r
+    //\r
     StackStart = CpuMpData->SevEsAPResetStackStart;\r
+    mReservedApLoop.AmdSevEntry (\r
+                      MwaitSupport,\r
+                      CpuMpData->ApTargetCState,\r
+                      CpuMpData->PmCodeSegment,\r
+                      StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,\r
+                      (UINTN)&mNumberToFinish,\r
+                      CpuMpData->Pm16CodeSegment,\r
+                      CpuMpData->SevEsAPBuffer,\r
+                      CpuMpData->WakeupBuffer\r
+                      );\r
   } else {\r
+    //\r
+    // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES\r
+    //\r
     StackStart = mReservedTopOfApStack;\r
+    mReservedApLoop.GenericEntry (\r
+                      MwaitSupport,\r
+                      CpuMpData->ApTargetCState,\r
+                      CpuMpData->PmCodeSegment,\r
+                      StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,\r
+                      (UINTN)&mNumberToFinish,\r
+                      CpuMpData->Pm16CodeSegment,\r
+                      CpuMpData->SevEsAPBuffer,\r
+                      CpuMpData->WakeupBuffer\r
+                      );\r
   }\r
 \r
-  AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;\r
-  AsmRelocateApLoopFunc (\r
-    MwaitSupport,\r
-    CpuMpData->ApTargetCState,\r
-    CpuMpData->PmCodeSegment,\r
-    StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,\r
-    (UINTN)&mNumberToFinish,\r
-    CpuMpData->Pm16CodeSegment,\r
-    CpuMpData->SevEsAPBuffer,\r
-    CpuMpData->WakeupBuffer\r
-    );\r
   //\r
   // It should never reach here\r
   //\r
@@ -547,8 +561,8 @@ InitMpGlobalData (
                    );\r
   ASSERT_EFI_ERROR (Status);\r
 \r
-  mReservedApLoopFunc = (VOID *)(UINTN)Address;\r
-  ASSERT (mReservedApLoopFunc != NULL);\r
+  mReservedApLoop.Data = (VOID *)(UINTN)Address;\r
+  ASSERT (mReservedApLoop.Data != NULL);\r
 \r
   //\r
   // Make sure that the buffer memory is executable if NX protection is enabled\r
@@ -583,7 +597,7 @@ InitMpGlobalData (
   mReservedTopOfApStack = (UINTN)Address + ApSafeBufferSize;\r
   ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);\r
   CopyMem (\r
-    mReservedApLoopFunc,\r
+    mReservedApLoop.Data,\r
     CpuMpData->AddressMap.RelocateApLoopFuncAddress,\r
     CpuMpData->AddressMap.RelocateApLoopFuncSize\r
     );\r
index a73a89d2a53dab6554ab9d6cd700b5bcb7d058ca..81a95733fca74647ffb170b20c8365948e1c3c85 100644 (file)
@@ -402,6 +402,12 @@ AsmExchangeRole (
   IN CPU_EXCHANGE_ROLE_INFO  *OthersInfo\r
   );\r
 \r
+typedef union {\r
+  VOID                    *Data;\r
+  ASM_RELOCATE_AP_LOOP    AmdSevEntry;  // 64-bit AMD Sev processors\r
+  ASM_RELOCATE_AP_LOOP    GenericEntry; // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or AMD non-Sev processors\r
+} RELOCATE_AP_LOOP_ENTRY;\r
+\r
 /**\r
   Get the pointer to CPU MP Data structure.\r
 \r