/** @file\r
Code for Processor S3 restoration\r
\r
-Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
This program and the accompanying materials\r
are licensed and made available under the terms and conditions of the BSD License\r
which accompanies this distribution. The full text of the license may be found at\r
\r
#include "PiSmmCpuDxeSmm.h"\r
\r
+#pragma pack(1)\r
typedef struct {\r
UINTN Lock;\r
VOID *StackStart;\r
IA32_DESCRIPTOR IdtrProfile;\r
UINT32 BufferStart;\r
UINT32 Cr3;\r
+ UINTN InitializeFloatingPointUnitsAddress;\r
} MP_CPU_EXCHANGE_INFO;\r
+#pragma pack()\r
\r
typedef struct {\r
UINT8 *RendezvousFunnelAddress;\r
UINTN LongJumpOffset;\r
} MP_ASSEMBLY_ADDRESS_MAP;\r
\r
+//\r
+// Flags used when program the register.\r
+//\r
+typedef struct {\r
+ volatile UINTN ConsoleLogLock; // Spinlock used to control console.\r
+ volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
+ volatile UINT32 *SemaphoreCount; // Semaphore used to program semaphore.\r
+} PROGRAM_CPU_REGISTER_FLAGS;\r
+\r
+//\r
+// Signal that SMM BASE relocation is complete.\r
+//\r
+volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
+\r
/**\r
Get starting address and size of the rendezvous entry for APs.\r
Information for fixing a jump instruction in the code is also returned.\r
#define LEGACY_REGION_SIZE (2 * 0x1000)\r
#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
\r
+PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
ACPI_CPU_DATA mAcpiCpuData;\r
-UINT32 mNumberToFinish;\r
+volatile UINT32 mNumberToFinish;\r
MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
-VOID *mGdtForAp = NULL;\r
-VOID *mIdtForAp = NULL;\r
-VOID *mMachineCheckHandlerForAp = NULL;\r
-MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
-UINTN mMsrSpinLockCount = MSR_SPIN_LOCK_INIT_NUM;\r
-UINTN mMsrCount = 0;\r
-\r
-/**\r
- Get MSR spin lock by MSR index.\r
\r
- @param MsrIndex MSR index value.\r
-\r
- @return Pointer to MSR spin lock.\r
-\r
-**/\r
-SPIN_LOCK *\r
-GetMsrSpinLockByIndex (\r
- IN UINT32 MsrIndex\r
- )\r
-{\r
- UINTN Index;\r
- for (Index = 0; Index < mMsrCount; Index++) {\r
- if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
- return &mMsrSpinLocks[Index].SpinLock;\r
- }\r
- }\r
- return NULL;\r
-}\r
+//\r
+// S3 boot flag\r
+//\r
+BOOLEAN mSmmS3Flag = FALSE;\r
\r
-/**\r
- Initialize MSR spin lock by MSR index.\r
+//\r
+// Pointer to structure used during S3 Resume\r
+//\r
+SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
\r
- @param MsrIndex MSR index value.\r
+BOOLEAN mAcpiS3Enable = TRUE;\r
\r
-**/\r
-VOID\r
-InitMsrSpinLockByIndex (\r
- IN UINT32 MsrIndex\r
- )\r
-{\r
- UINTN NewMsrSpinLockCount;\r
+UINT8 *mApHltLoopCode = NULL;\r
+UINT8 mApHltLoopCodeTemplate[] = {\r
+ 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
+ 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
+ 0xFA, // cli\r
+ 0xF4, // hlt\r
+ 0xEB, 0xFC // jmp $-2\r
+ };\r
\r
- if (mMsrSpinLocks == NULL) {\r
- mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * mMsrSpinLockCount);\r
- ASSERT (mMsrSpinLocks != NULL);\r
- }\r
- if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
- //\r
- // Initialize spin lock for MSR programming\r
- //\r
- mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
- InitializeSpinLock (&mMsrSpinLocks[mMsrCount].SpinLock);\r
- mMsrCount ++;\r
- if (mMsrCount == mMsrSpinLockCount) {\r
- //\r
- // If MSR spin lock buffer is full, enlarge it\r
- //\r
- NewMsrSpinLockCount = mMsrSpinLockCount + MSR_SPIN_LOCK_INIT_NUM;\r
- mMsrSpinLocks = ReallocatePool (\r
- sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
- sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
- mMsrSpinLocks\r
- );\r
- mMsrSpinLockCount = NewMsrSpinLockCount;\r
- }\r
- }\r
-}\r
+CHAR16 *mRegisterTypeStr[] = {L"MSR", L"CR", L"MMIO", L"CACHE", L"SEMAP", L"INVALID" };\r
\r
/**\r
Sync up the MTRR values for all processors.\r
}\r
\r
/**\r
- Programs registers for the calling processor.\r
+ Increment semaphore by 1.\r
+\r
+ @param Sem IN: 32-bit unsigned integer\r
+\r
+**/\r
+VOID\r
+S3ReleaseSemaphore (\r
+ IN OUT volatile UINT32 *Sem\r
+ )\r
+{\r
+ InterlockedIncrement (Sem);\r
+}\r
+\r
+/**\r
+ Decrement the semaphore by 1 if it is not zero.\r
\r
- This function programs registers for the calling processor.\r
+ Performs an atomic decrement operation for semaphore.\r
+ The compare exchange operation must be performed using\r
+ MP safe mechanisms.\r
\r
- @param RegisterTable Pointer to register table of the running processor.\r
+ @param Sem IN: 32-bit unsigned integer\r
\r
**/\r
VOID\r
-SetProcessorRegister (\r
- IN CPU_REGISTER_TABLE *RegisterTable\r
+S3WaitForSemaphore (\r
+ IN OUT volatile UINT32 *Sem\r
+ )\r
+{\r
+ UINT32 Value;\r
+\r
+ do {\r
+ Value = *Sem;\r
+ } while (Value == 0 ||\r
+ InterlockedCompareExchange32 (\r
+ Sem,\r
+ Value,\r
+ Value - 1\r
+ ) != Value);\r
+}\r
+\r
+/**\r
+ Initialize the CPU registers from a register table.\r
+\r
+ @param[in] RegisterTable The register table for this AP.\r
+ @param[in] ApLocation AP location info for this ap.\r
+ @param[in] CpuStatus CPU status info for this CPU.\r
+ @param[in] CpuFlags Flags data structure used when program the register.\r
+\r
+ @note This service could be called by BSP/APs.\r
+**/\r
+VOID\r
+ProgramProcessorRegister (\r
+ IN CPU_REGISTER_TABLE *RegisterTable,\r
+ IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
+ IN CPU_STATUS_INFORMATION *CpuStatus,\r
+ IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
)\r
{\r
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
UINTN Index;\r
UINTN Value;\r
- SPIN_LOCK *MsrSpinLock;\r
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r
+ volatile UINT32 *SemaphorePtr;\r
+ UINT32 FirstThread;\r
+ UINT32 PackageThreadsCount;\r
+ UINT32 CurrentThread;\r
+ UINTN ProcessorIndex;\r
+ UINTN ThreadIndex;\r
+ UINTN ValidThreadCount;\r
+ UINT32 *ValidCoreCountPerPackage;\r
\r
//\r
// Traverse Register Table of this logical processor\r
//\r
- RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
- for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
+ RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
+\r
+ for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
+\r
+ RegisterTableEntry = &RegisterTableEntryHead[Index];\r
+\r
+ DEBUG_CODE_BEGIN ();\r
+ if (ApLocation != NULL) {\r
+ AcquireSpinLock (&CpuFlags->ConsoleLogLock);\r
+ ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +\r
+ ApLocation->Core * CpuStatus->MaxThreadCount +\r
+ ApLocation->Thread;\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "Processor = %lu, Entry Index %lu, Type = %s!\n",\r
+ (UINT64)ThreadIndex,\r
+ (UINT64)Index,\r
+ mRegisterTypeStr[MIN ((REGISTER_TYPE)RegisterTableEntry->RegisterType, InvalidReg)]\r
+ ));\r
+ ReleaseSpinLock (&CpuFlags->ConsoleLogLock);\r
+ }\r
+ DEBUG_CODE_END ();\r
+\r
//\r
// Check the type of specified register\r
//\r
RegisterTableEntry->Value\r
);\r
} else {\r
- //\r
- // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
- // to make sure MSR read/write operation is atomic.\r
- //\r
- MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
- AcquireSpinLock (MsrSpinLock);\r
//\r
// Set the bit section according to bit start and length\r
//\r
RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
RegisterTableEntry->Value\r
);\r
- ReleaseSpinLock (MsrSpinLock);\r
}\r
break;\r
//\r
+ // MemoryMapped operations\r
+ //\r
+ case MemoryMapped:\r
+ AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
+ MmioBitFieldWrite32 (\r
+ (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINT32)RegisterTableEntry->Value\r
+ );\r
+ ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
+ break;\r
+ //\r
// Enable or disable cache\r
//\r
case CacheControl:\r
}\r
break;\r
\r
+ case Semaphore:\r
+ // Semaphore works logic like below:\r
+ //\r
+ // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
+ // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
+ //\r
+ // All threads (T0...Tn) waits in P() line and continues running\r
+ // together.\r
+ //\r
+ //\r
+ // T0 T1 ... Tn\r
+ //\r
+ // V(0...n) V(0...n) ... V(0...n)\r
+ // n * P(0) n * P(1) ... n * P(n)\r
+ //\r
+ ASSERT (\r
+ (ApLocation != NULL) &&\r
+ (CpuStatus->ValidCoreCountPerPackage != 0) &&\r
+ (CpuFlags->SemaphoreCount) != NULL\r
+ );\r
+ SemaphorePtr = CpuFlags->SemaphoreCount;\r
+ switch (RegisterTableEntry->Value) {\r
+ case CoreDepType:\r
+ //\r
+ // Get Offset info for the first thread in the core which current thread belongs to.\r
+ //\r
+ FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;\r
+ CurrentThread = FirstThread + ApLocation->Thread;\r
+ //\r
+ // First Notify all threads in current Core that this thread has ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
+ }\r
+ //\r
+ // Second, check whether all valid threads in current core have ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ }\r
+ break;\r
+\r
+ case PackageDepType:\r
+ ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;\r
+ //\r
+ // Get Offset info for the first thread in the package which current thread belongs to.\r
+ //\r
+ FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
+ //\r
+ // Get the possible threads count for current package.\r
+ //\r
+ PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;\r
+ CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
+ //\r
+ // Get the valid thread count for current package.\r
+ //\r
+ ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];\r
+\r
+ //\r
+ // Different packages may have different valid cores in them. If driver maintail clearly\r
+ // cores number in different packages, the logic will be much complicated.\r
+ // Here driver just simply records the max core number in all packages and use it as expect\r
+ // core number for all packages.\r
+ // In below two steps logic, first current thread will Release semaphore for each thread\r
+ // in current package. Maybe some threads are not valid in this package, but driver don't\r
+ // care. Second, driver will let current thread wait semaphore for all valid threads in\r
+ // current package. Because only the valid threads will do release semaphore for this\r
+ // thread, driver here only need to wait the valid thread count.\r
+ //\r
+\r
+ //\r
+ // First Notify all threads in current package that this thread has ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {\r
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
+ }\r
+ //\r
+ // Second, check whether all valid threads in current package have ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {\r
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ }\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
+ break;\r
+\r
default:\r
break;\r
}\r
}\r
\r
/**\r
- AP initialization before SMBASE relocation in the S3 boot path.\r
+\r
+ Set Processor register for one AP.\r
+\r
+ @param PreSmmRegisterTable Use pre Smm register table or register table.\r
+\r
**/\r
VOID\r
-EarlyMPRendezvousProcedure (\r
- VOID\r
+SetRegister (\r
+ IN BOOLEAN PreSmmRegisterTable\r
)\r
{\r
- CPU_REGISTER_TABLE *RegisterTableList;\r
- UINT32 InitApicId;\r
- UINTN Index;\r
+ CPU_REGISTER_TABLE *RegisterTable;\r
+ CPU_REGISTER_TABLE *RegisterTables;\r
+ UINT32 InitApicId;\r
+ UINTN ProcIndex;\r
+ UINTN Index;\r
\r
- LoadMtrrData (mAcpiCpuData.MtrrTable);\r
+ if (PreSmmRegisterTable) {\r
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;\r
+ } else {\r
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;\r
+ }\r
\r
- //\r
- // Find processor number for this CPU.\r
- //\r
- RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
InitApicId = GetInitialApicId ();\r
+ RegisterTable = NULL;\r
for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
- if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
- SetProcessorRegister (&RegisterTableList[Index]);\r
+ if (RegisterTables[Index].InitialApicId == InitApicId) {\r
+ RegisterTable = &RegisterTables[Index];\r
+ ProcIndex = Index;\r
break;\r
}\r
}\r
-\r
- //\r
- // Count down the number with lock mechanism.\r
- //\r
- InterlockedDecrement (&mNumberToFinish);\r
+ ASSERT (RegisterTable != NULL);\r
+\r
+ if (mAcpiCpuData.ApLocation != 0) {\r
+ ProgramProcessorRegister (\r
+ RegisterTable,\r
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,\r
+ &mAcpiCpuData.CpuStatus,\r
+ &mCpuFlags\r
+ );\r
+ } else {\r
+ ProgramProcessorRegister (\r
+ RegisterTable,\r
+ NULL,\r
+ &mAcpiCpuData.CpuStatus,\r
+ &mCpuFlags\r
+ );\r
+ }\r
}\r
\r
/**\r
- AP initialization after SMBASE relocation in the S3 boot path.\r
+ AP initialization before then after SMBASE relocation in the S3 boot path.\r
**/\r
VOID\r
-MPRendezvousProcedure (\r
+InitializeAp (\r
VOID\r
)\r
{\r
- CPU_REGISTER_TABLE *RegisterTableList;\r
- UINT32 InitApicId;\r
- UINTN Index;\r
+ UINTN TopOfStack;\r
+ UINT8 Stack[128];\r
\r
- ProgramVirtualWireMode ();\r
- DisableLvtInterrupts ();\r
+ LoadMtrrData (mAcpiCpuData.MtrrTable);\r
\r
- RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
- InitApicId = GetInitialApicId ();\r
- for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
- if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
- SetProcessorRegister (&RegisterTableList[Index]);\r
- break;\r
- }\r
- }\r
+ SetRegister (TRUE);\r
\r
//\r
// Count down the number with lock mechanism.\r
//\r
InterlockedDecrement (&mNumberToFinish);\r
+\r
+ //\r
+ // Wait for BSP to signal SMM Base relocation done.\r
+ //\r
+ while (!mInitApsAfterSmmBaseReloc) {\r
+ CpuPause ();\r
+ }\r
+\r
+ ProgramVirtualWireMode ();\r
+ DisableLvtInterrupts ();\r
+\r
+ SetRegister (FALSE);\r
+\r
+ //\r
+ // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
+ //\r
+ TopOfStack = (UINTN) Stack + sizeof (Stack);\r
+ TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
+ CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
+ TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
}\r
\r
/**\r
CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
\r
- //\r
- // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
- //\r
- CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
- CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
- CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
-\r
mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
+ mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
}\r
\r
/**\r
\r
**/\r
VOID\r
-EarlyInitializeCpu (\r
+InitializeCpuBeforeRebase (\r
VOID\r
)\r
{\r
- CPU_REGISTER_TABLE *RegisterTableList;\r
- UINT32 InitApicId;\r
- UINTN Index;\r
-\r
LoadMtrrData (mAcpiCpuData.MtrrTable);\r
\r
- //\r
- // Find processor number for this CPU.\r
- //\r
- RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
- InitApicId = GetInitialApicId ();\r
- for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
- if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
- SetProcessorRegister (&RegisterTableList[Index]);\r
- break;\r
- }\r
- }\r
+ SetRegister (TRUE);\r
\r
ProgramVirtualWireMode ();\r
\r
PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
\r
mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
- mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r
+ mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
+\r
+ //\r
+ // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
+ //\r
+ mInitApsAfterSmmBaseReloc = FALSE;\r
\r
//\r
// Send INIT IPI - SIPI to all APs\r
\r
**/\r
VOID\r
-InitializeCpu (\r
+InitializeCpuAfterRebase (\r
VOID\r
)\r
{\r
- CPU_REGISTER_TABLE *RegisterTableList;\r
- UINT32 InitApicId;\r
- UINTN Index;\r
+ mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
\r
- RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
- InitApicId = GetInitialApicId ();\r
- for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
- if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
- SetProcessorRegister (&RegisterTableList[Index]);\r
- break;\r
+ //\r
+ // Signal that SMM base relocation is complete and to continue initialization for all APs.\r
+ //\r
+ mInitApsAfterSmmBaseReloc = TRUE;\r
+\r
+ //\r
+ // Must begin set register after all APs have continue their initialization.\r
+ // This is a requirement to support semaphore mechanism in register table.\r
+ // Because if semaphore's dependence type is package type, semaphore will wait\r
+ // for all Aps in one package finishing their tasks before set next register\r
+ // for all APs. If the Aps not begin its task during BSP doing its task, the\r
+ // BSP thread will hang because it is waiting for other Aps in the same\r
+ // package finishing their task.\r
+ //\r
+ SetRegister (FALSE);\r
+\r
+ while (mNumberToFinish > 0) {\r
+ CpuPause ();\r
+ }\r
+}\r
+\r
+/**\r
+ Restore SMM Configuration in S3 boot path.\r
+\r
+**/\r
+VOID\r
+RestoreSmmConfigurationInS3 (\r
+ VOID\r
+ )\r
+{\r
+ if (!mAcpiS3Enable) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Restore SMM Configuration in S3 boot path.\r
+ //\r
+ if (mRestoreSmmConfigurationInS3) {\r
+ //\r
+ // Need make sure gSmst is correct because below function may use them.\r
+ //\r
+ gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
+ gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
+ gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
+ gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
+\r
+ //\r
+ // Configure SMM Code Access Check feature if available.\r
+ //\r
+ ConfigSmmCodeAccessCheck ();\r
+\r
+ SmmCpuFeaturesCompleteSmmReadyToLock ();\r
+\r
+ mRestoreSmmConfigurationInS3 = FALSE;\r
+ }\r
+}\r
+\r
+/**\r
+ Perform SMM initialization for all processors in the S3 boot path.\r
+\r
+ For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmRestoreCpu (\r
+ VOID\r
+ )\r
+{\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ IA32_DESCRIPTOR Ia32Idtr;\r
+ IA32_DESCRIPTOR X64Idtr;\r
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
+ EFI_STATUS Status;\r
+\r
+ DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
+\r
+ mSmmS3Flag = TRUE;\r
+\r
+ //\r
+ // See if there is enough context to resume PEI Phase\r
+ //\r
+ if (mSmmS3ResumeState == NULL) {\r
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ SmmS3ResumeState = mSmmS3ResumeState;\r
+ ASSERT (SmmS3ResumeState != NULL);\r
+\r
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
+ //\r
+ // Save the IA32 IDT Descriptor\r
+ //\r
+ AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+\r
+ //\r
+ // Setup X64 IDT table\r
+ //\r
+ ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
+ X64Idtr.Base = (UINTN) IdtEntryTable;\r
+ X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
+\r
+ //\r
+ // Setup the default exception handler\r
+ //\r
+ Status = InitializeCpuExceptionHandlers (NULL);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Initialize Debug Agent to support source level debug\r
+ //\r
+ InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
+ }\r
+\r
+ //\r
+ // Skip initialization if mAcpiCpuData is not valid\r
+ //\r
+ if (mAcpiCpuData.NumberOfCpus > 0) {\r
+ //\r
+ // First time microcode load and restore MTRRs\r
+ //\r
+ InitializeCpuBeforeRebase ();\r
+ }\r
+\r
+ //\r
+ // Restore SMBASE for BSP and all APs\r
+ //\r
+ SmmRelocateBases ();\r
+\r
+ //\r
+ // Skip initialization if mAcpiCpuData is not valid\r
+ //\r
+ if (mAcpiCpuData.NumberOfCpus > 0) {\r
+ //\r
+ // Restore MSRs for BSP and all APs\r
+ //\r
+ InitializeCpuAfterRebase ();\r
+ }\r
+\r
+ //\r
+ // Set a flag to restore SMM configuration in S3 path.\r
+ //\r
+ mRestoreSmmConfigurationInS3 = TRUE;\r
+\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
+\r
+ //\r
+ // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
+ //\r
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
+ DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
+\r
+ SwitchStack (\r
+ (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
+ );\r
+ }\r
+\r
+ //\r
+ // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
+ //\r
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
+ DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
+ //\r
+ // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
+ //\r
+ SaveAndSetDebugTimerInterrupt (FALSE);\r
+ //\r
+ // Restore IA32 IDT table\r
+ //\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+ AsmDisablePaging64 (\r
+ SmmS3ResumeState->ReturnCs,\r
+ (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
+ (UINT32)SmmS3ResumeState->ReturnContext1,\r
+ (UINT32)SmmS3ResumeState->ReturnContext2,\r
+ (UINT32)SmmS3ResumeState->ReturnStackPointer\r
+ );\r
+ }\r
+\r
+ //\r
+ // Can not resume PEI Phase\r
+ //\r
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
+ CpuDeadLoop ();\r
+}\r
+\r
+/**\r
+ Initialize SMM S3 resume state structure used during S3 Resume.\r
+\r
+ @param[in] Cr3 The base address of the page tables to use in SMM.\r
+\r
+**/\r
+VOID\r
+InitSmmS3ResumeState (\r
+ IN UINT32 Cr3\r
+ )\r
+{\r
+ VOID *GuidHob;\r
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ EFI_PHYSICAL_ADDRESS Address;\r
+ EFI_STATUS Status;\r
+\r
+ if (!mAcpiS3Enable) {\r
+ return;\r
+ }\r
+\r
+ GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
+ if (GuidHob == NULL) {\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
+ __FUNCTION__,\r
+ &gEfiAcpiVariableGuid\r
+ ));\r
+ CpuDeadLoop ();\r
+ } else {\r
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
+\r
+ DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
+ DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
+\r
+ SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
+ ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
+\r
+ mSmmS3ResumeState = SmmS3ResumeState;\r
+ SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
+\r
+ SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
+\r
+ SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
+ SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
+ if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
+ SmmS3ResumeState->SmmS3StackSize = 0;\r
+ }\r
+\r
+ SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
+ SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
+ SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
+\r
+ if (sizeof (UINTN) == sizeof (UINT64)) {\r
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
+ }\r
+ if (sizeof (UINTN) == sizeof (UINT32)) {\r
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
}\r
+\r
+ //\r
+ // Patch SmmS3ResumeState->SmmS3Cr3\r
+ //\r
+ InitSmmS3Cr3 ();\r
}\r
\r
- mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
//\r
- // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r
- // Re-initialize StackAddress to original beginning address.\r
+ // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
+ // protected mode on S3 path\r
//\r
- mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
- mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r
+ Address = BASE_4GB - 1;\r
+ Status = gBS->AllocatePages (\r
+ AllocateMaxAddress,\r
+ EfiACPIMemoryNVS,\r
+ EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
+ &Address\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+ mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
+}\r
+\r
+/**\r
+ Copy register table from ACPI NVS memory into SMRAM.\r
+\r
+ @param[in] DestinationRegisterTableList Points to destination register table.\r
+ @param[in] SourceRegisterTableList Points to source register table.\r
+ @param[in] NumberOfCpus Number of CPUs.\r
+\r
+**/\r
+VOID\r
+CopyRegisterTable (\r
+ IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
+ IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
+ IN UINT32 NumberOfCpus\r
+ )\r
+{\r
+ UINTN Index;\r
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
+\r
+ CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ for (Index = 0; Index < NumberOfCpus; Index++) {\r
+ if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
+ RegisterTableEntry = AllocateCopyPool (\r
+ DestinationRegisterTableList[Index].AllocatedSize,\r
+ (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
+ );\r
+ ASSERT (RegisterTableEntry != NULL);\r
+ DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Get ACPI CPU data.\r
+\r
+**/\r
+VOID\r
+GetAcpiCpuData (\r
+ VOID\r
+ )\r
+{\r
+ ACPI_CPU_DATA *AcpiCpuData;\r
+ IA32_DESCRIPTOR *Gdtr;\r
+ IA32_DESCRIPTOR *Idtr;\r
+ VOID *GdtForAp;\r
+ VOID *IdtForAp;\r
+ VOID *MachineCheckHandlerForAp;\r
+ CPU_STATUS_INFORMATION *CpuStatus;\r
+\r
+ if (!mAcpiS3Enable) {\r
+ return;\r
+ }\r
\r
//\r
- // Send INIT IPI - SIPI to all APs\r
+ // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
//\r
- SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
+ mAcpiCpuData.NumberOfCpus = 0;\r
\r
- while (mNumberToFinish > 0) {\r
- CpuPause ();\r
+ //\r
+ // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
+ //\r
+ AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
+ if (AcpiCpuData == 0) {\r
+ return;\r
}\r
+\r
+ //\r
+ // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
+ //\r
+ CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
+\r
+ mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
+ ASSERT (mAcpiCpuData.MtrrTable != 0);\r
+\r
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
+\r
+ mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
+ ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
+\r
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+\r
+ mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
+ ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
+\r
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+\r
+ mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
+\r
+ CopyRegisterTable (\r
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
+ mAcpiCpuData.NumberOfCpus\r
+ );\r
+\r
+ mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ ASSERT (mAcpiCpuData.RegisterTable != 0);\r
+\r
+ CopyRegisterTable (\r
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
+ mAcpiCpuData.NumberOfCpus\r
+ );\r
+\r
+ //\r
+ // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
+ //\r
+ Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
+ Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
+\r
+ GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
+ ASSERT (GdtForAp != NULL);\r
+ IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
+ MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
+\r
+ CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
+ CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
+ CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
+\r
+ Gdtr->Base = (UINTN)GdtForAp;\r
+ Idtr->Base = (UINTN)IdtForAp;\r
+ mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
+\r
+ CpuStatus = &mAcpiCpuData.CpuStatus;\r
+ CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r
+ if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {\r
+ CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
+ sizeof (UINT32) * CpuStatus->PackageCount,\r
+ (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage\r
+ );\r
+ ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);\r
+ }\r
+ if (AcpiCpuData->ApLocation != 0) {\r
+ mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
+ mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation\r
+ );\r
+ ASSERT (mAcpiCpuData.ApLocation != 0);\r
+ }\r
+ if (CpuStatus->PackageCount != 0) {\r
+ mCpuFlags.SemaphoreCount = AllocateZeroPool (\r
+ sizeof (UINT32) * CpuStatus->PackageCount *\r
+ CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount);\r
+ ASSERT (mCpuFlags.SemaphoreCount != NULL);\r
+ }\r
+ InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);\r
+ InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.ConsoleLogLock);\r
+}\r
+\r
+/**\r
+ Get ACPI S3 enable flag.\r
+\r
+**/\r
+VOID\r
+GetAcpiS3EnableFlag (\r
+ VOID\r
+ )\r
+{\r
+ mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
}\r