/** @file\r
SMM MP service implementation\r
\r
-Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>\r
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
SPDX-License-Identifier: BSD-2-Clause-Patent\r
//\r
// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
//\r
-MTRR_SETTINGS gSmiMtrrs;\r
-UINT64 gPhyMask;\r
-SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
-UINTN mSmmMpSyncDataSize;\r
-SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
-UINTN mSemaphoreSize;\r
-SPIN_LOCK *mPFLock = NULL;\r
-SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
-BOOLEAN mMachineCheckSupported = FALSE;\r
+MTRR_SETTINGS gSmiMtrrs;\r
+UINT64 gPhyMask;\r
+SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
+UINTN mSmmMpSyncDataSize;\r
+SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
+UINTN mSemaphoreSize;\r
+SPIN_LOCK *mPFLock = NULL;\r
+SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
+BOOLEAN mMachineCheckSupported = FALSE;\r
+MM_COMPLETION mSmmStartupThisApToken;\r
+\r
+//\r
+// Processor specified by mPackageFirstThreadIndex[PackageIndex] will do the package-scope register check.\r
+//\r
+UINT32 *mPackageFirstThreadIndex = NULL;\r
+\r
+extern UINTN mSmmShadowStackSize;\r
\r
/**\r
Performs an atomic compare exchange operation to get semaphore.\r
**/\r
UINT32\r
WaitForSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
- UINT32 Value;\r
+ UINT32 Value;\r
\r
- do {\r
+ for ( ; ;) {\r
Value = *Sem;\r
- } while (Value == 0 ||\r
- InterlockedCompareExchange32 (\r
- (UINT32*)Sem,\r
- Value,\r
- Value - 1\r
- ) != Value);\r
+ if ((Value != 0) &&\r
+ (InterlockedCompareExchange32 (\r
+ (UINT32 *)Sem,\r
+ Value,\r
+ Value - 1\r
+ ) == Value))\r
+ {\r
+ break;\r
+ }\r
+\r
+ CpuPause ();\r
+ }\r
+\r
return Value - 1;\r
}\r
\r
-\r
/**\r
Performs an atomic compare exchange operation to release semaphore.\r
The compare exchange operation must be performed using\r
**/\r
UINT32\r
ReleaseSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
- UINT32 Value;\r
+ UINT32 Value;\r
\r
do {\r
Value = *Sem;\r
} while (Value + 1 != 0 &&\r
InterlockedCompareExchange32 (\r
- (UINT32*)Sem,\r
+ (UINT32 *)Sem,\r
Value,\r
Value + 1\r
) != Value);\r
+\r
return Value + 1;\r
}\r
\r
**/\r
UINT32\r
LockdownSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
- UINT32 Value;\r
+ UINT32 Value;\r
\r
do {\r
Value = *Sem;\r
} while (InterlockedCompareExchange32 (\r
- (UINT32*)Sem,\r
- Value, (UINT32)-1\r
+ (UINT32 *)Sem,\r
+ Value,\r
+ (UINT32)-1\r
) != Value);\r
+\r
return Value;\r
}\r
\r
**/\r
VOID\r
WaitForAllAPs (\r
- IN UINTN NumberOfAPs\r
+ IN UINTN NumberOfAPs\r
)\r
{\r
- UINTN BspIndex;\r
+ UINTN BspIndex;\r
\r
BspIndex = mSmmMpSyncData->BspIndex;\r
while (NumberOfAPs-- > 0) {\r
VOID\r
)\r
{\r
- UINTN Index;\r
+ UINTN Index;\r
\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
if (IsPresentAp (Index)) {\r
}\r
\r
/**\r
- Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
+ Check whether the index of CPU perform the package level register\r
+ programming during System Management Mode initialization.\r
\r
- @param Exceptions CPU Arrival exception flags.\r
+ The index of Processor specified by mPackageFirstThreadIndex[PackageIndex]\r
+ will do the package-scope register programming.\r
\r
- @retval TRUE if all CPUs the have checked in.\r
- @retval FALSE if at least one Normal AP hasn't checked in.\r
+ @param[in] CpuIndex Processor Index.\r
+\r
+ @retval TRUE Perform the package level register programming.\r
+ @retval FALSE Don't perform the package level register programming.\r
\r
**/\r
BOOLEAN\r
-AllCpusInSmmWithExceptions (\r
- SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
+IsPackageFirstThread (\r
+ IN UINTN CpuIndex\r
)\r
{\r
- UINTN Index;\r
- SMM_CPU_DATA_BLOCK *CpuData;\r
- EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
+ UINT32 PackageIndex;\r
\r
- ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ PackageIndex = gSmmCpuPrivate->ProcessorInfo[CpuIndex].Location.Package;\r
\r
- if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
- return TRUE;\r
+ ASSERT (mPackageFirstThreadIndex != NULL);\r
+\r
+ //\r
+ // Set the value of mPackageFirstThreadIndex[PackageIndex].\r
+ // The package-scope register are checked by the first processor (CpuIndex) in Package.\r
+ //\r
+ // If mPackageFirstThreadIndex[PackageIndex] equals to (UINT32)-1, then update\r
+ // to current CpuIndex. If it doesn't equal to (UINT32)-1, don't change it.\r
+ //\r
+ if (mPackageFirstThreadIndex[PackageIndex] == (UINT32)-1) {\r
+ mPackageFirstThreadIndex[PackageIndex] = (UINT32)CpuIndex;\r
}\r
\r
- CpuData = mSmmMpSyncData->CpuData;\r
- ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
- for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
- if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
- if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
- continue;\r
+ return (BOOLEAN)(mPackageFirstThreadIndex[PackageIndex] == CpuIndex);\r
+}\r
+\r
+/**\r
+ Returns the Number of SMM Delayed & Blocked & Disabled Thread Count.\r
+\r
+ @param[in,out] DelayedCount The Number of SMM Delayed Thread Count.\r
+ @param[in,out] BlockedCount The Number of SMM Blocked Thread Count.\r
+ @param[in,out] DisabledCount The Number of SMM Disabled Thread Count.\r
+\r
+**/\r
+VOID\r
+GetSmmDelayedBlockedDisabledCount (\r
+ IN OUT UINT32 *DelayedCount,\r
+ IN OUT UINT32 *BlockedCount,\r
+ IN OUT UINT32 *DisabledCount\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ for (Index = 0; Index < mNumberOfCpus; Index++) {\r
+ if (IsPackageFirstThread (Index)) {\r
+ if (DelayedCount != NULL) {\r
+ *DelayedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed);\r
}\r
- if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
- continue;\r
+\r
+ if (BlockedCount != NULL) {\r
+ *BlockedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked);\r
}\r
- if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
- continue;\r
+\r
+ if (DisabledCount != NULL) {\r
+ *DisabledCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable);\r
}\r
- return FALSE;\r
}\r
}\r
+}\r
\r
+/**\r
+ Checks if all CPUs (except Blocked & Disabled) have checked in for this SMI run\r
\r
- return TRUE;\r
+ @retval TRUE if all CPUs the have checked in.\r
+ @retval FALSE if at least one Normal AP hasn't checked in.\r
+\r
+**/\r
+BOOLEAN\r
+AllCpusInSmmExceptBlockedDisabled (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 BlockedCount;\r
+ UINT32 DisabledCount;\r
+\r
+ BlockedCount = 0;\r
+ DisabledCount = 0;\r
+\r
+ //\r
+ // Check to make sure mSmmMpSyncData->Counter is valid and not locked.\r
+ //\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+\r
+ //\r
+ // Check whether all CPUs in SMM.\r
+ //\r
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
+ return TRUE;\r
+ }\r
+\r
+ //\r
+ // Check for the Blocked & Disabled Exceptions Case.\r
+ //\r
+ GetSmmDelayedBlockedDisabledCount (NULL, &BlockedCount, &DisabledCount);\r
+\r
+ //\r
+ // *mSmmMpSyncData->Counter might be updated by all APs concurrently. The value\r
+ // can be dynamic changed. If some Aps enter the SMI after the BlockedCount &\r
+ // DisabledCount check, then the *mSmmMpSyncData->Counter will be increased, thus\r
+ // leading the *mSmmMpSyncData->Counter + BlockedCount + DisabledCount > mNumberOfCpus.\r
+ // since the BlockedCount & DisabledCount are local variable, it's ok here only for\r
+ // the checking of all CPUs In Smm.\r
+ //\r
+ if (*mSmmMpSyncData->Counter + BlockedCount + DisabledCount >= mNumberOfCpus) {\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
}\r
\r
/**\r
}\r
\r
McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
- return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
+ return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
+ MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
\r
McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
- return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
+ return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- UINT64 Timer;\r
- UINTN Index;\r
- BOOLEAN LmceEn;\r
- BOOLEAN LmceSignal;\r
+ UINT64 Timer;\r
+ UINTN Index;\r
+ BOOLEAN LmceEn;\r
+ BOOLEAN LmceSignal;\r
+ UINT32 DelayedCount;\r
+ UINT32 BlockedCount;\r
+\r
+ DelayedCount = 0;\r
+ BlockedCount = 0;\r
\r
ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
LmceSignal = FALSE;\r
if (mMachineCheckSupported) {\r
LmceEn = IsLmceOsEnabled ();\r
- LmceSignal = IsLmceSignaled();\r
+ LmceSignal = IsLmceSignaled ();\r
}\r
\r
//\r
// Sync with APs 1st timeout\r
//\r
for (Timer = StartSyncTimer ();\r
- !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
- !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
- ) {\r
+ !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);\r
+ )\r
+ {\r
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();\r
+ if (mSmmMpSyncData->AllApArrivedWithException) {\r
+ break;\r
+ }\r
+\r
CpuPause ();\r
}\r
\r
// Send SMI IPIs to bring outside processors in\r
//\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
- if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
+ if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {\r
SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
}\r
}\r
// Sync with APs 2nd timeout.\r
//\r
for (Timer = StartSyncTimer ();\r
- !IsSyncTimerTimeout (Timer) &&\r
- !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
- ) {\r
+ !IsSyncTimerTimeout (Timer);\r
+ )\r
+ {\r
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();\r
+ if (mSmmMpSyncData->AllApArrivedWithException) {\r
+ break;\r
+ }\r
+\r
CpuPause ();\r
}\r
}\r
\r
+ if (!mSmmMpSyncData->AllApArrivedWithException) {\r
+ //\r
+ // Check for the Blocked & Delayed Case.\r
+ //\r
+ GetSmmDelayedBlockedDisabledCount (&DelayedCount, &BlockedCount, NULL);\r
+ DEBUG ((DEBUG_INFO, "SmmWaitForApArrival: Delayed AP Count = %d, Blocked AP Count = %d\n", DelayedCount, BlockedCount));\r
+ }\r
+\r
return;\r
}\r
\r
-\r
/**\r
Replace OS MTRR's with SMI MTRR's.\r
\r
**/\r
VOID\r
ReplaceOSMtrrs (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
SmmCpuFeaturesDisableSmrr ();\r
**/\r
BOOLEAN\r
WaitForAllAPsNotBusy (\r
- IN BOOLEAN BlockMode\r
+ IN BOOLEAN BlockMode\r
)\r
{\r
- UINTN Index;\r
+ UINTN Index;\r
\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
//\r
// Ignore BSP and APs which not call in SMM.\r
//\r
- if (!IsPresentAp(Index)) {\r
+ if (!IsPresentAp (Index)) {\r
continue;\r
}\r
\r
if (BlockMode) {\r
- AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
- ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
} else {\r
if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
- ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
} else {\r
return FALSE;\r
}\r
**/\r
BOOLEAN\r
IsPresentAp (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
- *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
}\r
\r
/**\r
**/\r
VOID\r
ReleaseToken (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
- PROCEDURE_TOKEN *Token;\r
+ PROCEDURE_TOKEN *Token;\r
\r
Token = mSmmMpSyncData->CpuData[CpuIndex].Token;\r
\r
VOID\r
)\r
{\r
- LIST_ENTRY *Link;\r
- PROCEDURE_TOKEN *ProcToken;\r
-\r
- Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
- while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
- ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
-\r
- ProcToken->RunningApCount = 0;\r
- ProcToken->Used = FALSE;\r
-\r
- //\r
- // Check the spinlock status and release it if not released yet.\r
- //\r
- if (!AcquireSpinLockOrFail(ProcToken->SpinLock)) {\r
- DEBUG((DEBUG_ERROR, "Risk::SpinLock still not released!"));\r
- }\r
- ReleaseSpinLock (ProcToken->SpinLock);\r
-\r
- Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
- }\r
+ //\r
+ // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.\r
+ //\r
+ gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
}\r
\r
/**\r
**/\r
VOID\r
BSPHandler (\r
- IN UINTN CpuIndex,\r
- IN SMM_CPU_SYNC_MODE SyncMode\r
+ IN UINTN CpuIndex,\r
+ IN SMM_CPU_SYNC_MODE SyncMode\r
)\r
{\r
- UINTN Index;\r
- MTRR_SETTINGS Mtrrs;\r
- UINTN ApCount;\r
- BOOLEAN ClearTopLevelSmiResult;\r
- UINTN PresentCount;\r
+ UINTN Index;\r
+ MTRR_SETTINGS Mtrrs;\r
+ UINTN ApCount;\r
+ BOOLEAN ClearTopLevelSmiResult;\r
+ UINTN PresentCount;\r
\r
ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
ApCount = 0;\r
// we cleared it after SMI handlers are run, we would miss the SMI that\r
// occurs after SMI handlers are done and before SMI status bit is cleared.\r
//\r
- ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
+ ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();\r
ASSERT (ClearTopLevelSmiResult == TRUE);\r
\r
//\r
//\r
// If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
//\r
- if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
-\r
+ if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Wait for APs to arrive\r
//\r
- SmmWaitForApArrival();\r
+ SmmWaitForApArrival ();\r
\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
*mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
\r
//\r
// Wait for all APs to get ready for programming MTRRs\r
//\r
WaitForAllAPs (ApCount);\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Signal all APs it's time for backup MTRRs\r
//\r
// We do the backup first and then set MTRR to avoid race condition for threads\r
// in the same core.\r
//\r
- MtrrGetAllMtrrs(&Mtrrs);\r
+ MtrrGetAllMtrrs (&Mtrrs);\r
\r
//\r
// Wait for all APs to complete their MTRR saving\r
// make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
// will run through freely.\r
//\r
- if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
-\r
+ if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
*mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
//\r
// Make sure all APs have their Present flag set\r
//\r
PresentCount = 0;\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
- PresentCount ++;\r
+ PresentCount++;\r
}\r
}\r
+\r
if (PresentCount > ApCount) {\r
break;\r
}\r
//\r
WaitForAllAPs (ApCount);\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Signal APs to restore MTRRs\r
//\r
// Restore OS MTRRs\r
//\r
SmmCpuFeaturesReenableSmrr ();\r
- MtrrSetAllMtrrs(&Mtrrs);\r
+ MtrrSetAllMtrrs (&Mtrrs);\r
\r
//\r
// Wait for all APs to complete MTRR programming\r
//\r
// Allow APs to check in from this point on\r
//\r
- *mSmmMpSyncData->Counter = 0;\r
- *mSmmMpSyncData->AllCpusInSync = FALSE;\r
+ *mSmmMpSyncData->Counter = 0;\r
+ *mSmmMpSyncData->AllCpusInSync = FALSE;\r
+ mSmmMpSyncData->AllApArrivedWithException = FALSE;\r
}\r
\r
/**\r
**/\r
VOID\r
APHandler (\r
- IN UINTN CpuIndex,\r
- IN BOOLEAN ValidSmi,\r
- IN SMM_CPU_SYNC_MODE SyncMode\r
+ IN UINTN CpuIndex,\r
+ IN BOOLEAN ValidSmi,\r
+ IN SMM_CPU_SYNC_MODE SyncMode\r
)\r
{\r
- UINT64 Timer;\r
- UINTN BspIndex;\r
- MTRR_SETTINGS Mtrrs;\r
- EFI_STATUS ProcedureStatus;\r
+ UINT64 Timer;\r
+ UINTN BspIndex;\r
+ MTRR_SETTINGS Mtrrs;\r
+ EFI_STATUS ProcedureStatus;\r
\r
//\r
// Timeout BSP\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
!(*mSmmMpSyncData->InsideSmm);\r
- ) {\r
+ )\r
+ {\r
CpuPause ();\r
}\r
\r
if (mSmmMpSyncData->BspIndex != -1) {\r
//\r
// BSP Index is known\r
+ // Existing AP is in SMI now but BSP not in, so, try bring BSP in SMM.\r
//\r
BspIndex = mSmmMpSyncData->BspIndex;\r
ASSERT (CpuIndex != BspIndex);\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
!(*mSmmMpSyncData->InsideSmm);\r
- ) {\r
+ )\r
+ {\r
CpuPause ();\r
}\r
\r
//\r
// Give up since BSP is unable to enter SMM\r
// and signal the completion of this AP\r
+ // Reduce the mSmmMpSyncData->Counter!\r
+ //\r
WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
} else {\r
//\r
// Don't know BSP index. Give up without sending IPI to BSP.\r
+ // Reduce the mSmmMpSyncData->Counter!\r
//\r
WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
//\r
*(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
\r
- if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Notify BSP of arrival at this point\r
//\r
ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Wait for the signal from BSP to backup MTRRs\r
//\r
//\r
// Backup OS MTRRs\r
//\r
- MtrrGetAllMtrrs(&Mtrrs);\r
+ MtrrGetAllMtrrs (&Mtrrs);\r
\r
//\r
// Signal BSP the completion of this AP\r
//\r
// Invoke the scheduled procedure\r
//\r
- ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
- (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
- );\r
+ ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(\r
+ (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
+ );\r
if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
*mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
}\r
ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
}\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Notify BSP the readiness of this AP to program MTRRs\r
//\r
// Restore OS MTRRs\r
//\r
SmmCpuFeaturesReenableSmrr ();\r
- MtrrSetAllMtrrs(&Mtrrs);\r
+ MtrrSetAllMtrrs (&Mtrrs);\r
}\r
\r
//\r
// Notify BSP the readiness of this AP to exit SMM\r
//\r
ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
-\r
}\r
\r
/**\r
**/\r
UINT32\r
Gen4GPageTable (\r
- IN BOOLEAN Is32BitPageTable\r
+ IN BOOLEAN Is32BitPageTable\r
)\r
{\r
VOID *PageTable;\r
UINTN PageIndex;\r
UINTN PageAddress;\r
\r
- Low2MBoundary = 0;\r
+ Low2MBoundary = 0;\r
High2MBoundary = 0;\r
- PagesNeeded = 0;\r
+ PagesNeeded = 0;\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
//\r
// Add one more page for known good stack, then find the lower 2MB aligned address.\r
// Add two more pages for known good stack and stack guard page,\r
// then find the lower 2MB aligned address.\r
//\r
- High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
- PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
+ High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
+ PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
}\r
+\r
//\r
// Allocate the page table\r
//\r
ASSERT (PageTable != NULL);\r
\r
PageTable = (VOID *)((UINTN)PageTable);\r
- Pte = (UINT64*)PageTable;\r
+ Pte = (UINT64 *)PageTable;\r
\r
//\r
// Zero out all page table entries first\r
//\r
for (Index = 0; Index < 4; Index++) {\r
Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
- (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
+ (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
}\r
+\r
Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
\r
//\r
Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
- Pdpte = (UINT64*)PageTable;\r
+ Pdpte = (UINT64 *)PageTable;\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
- Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
+ Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
- Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ Pte = (UINT64 *)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
//\r
// Fill in Page Table Entries\r
//\r
- Pte = (UINT64*)Pages;\r
+ Pte = (UINT64 *)Pages;\r
PageAddress = PageIndex;\r
for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
if (PageAddress == GuardPage) {\r
// Mark the guard page as non-present\r
//\r
Pte[Index] = PageAddress | mAddressEncMask;\r
- GuardPage += mSmmStackSize;\r
+ GuardPage += (mSmmStackSize + mSmmShadowStackSize);\r
if (GuardPage > mSmmStackArrayEnd) {\r
GuardPage = 0;\r
}\r
} else {\r
Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
}\r
- PageAddress+= EFI_PAGE_SIZE;\r
+\r
+ PageAddress += EFI_PAGE_SIZE;\r
}\r
+\r
Pages += EFI_PAGE_SIZE;\r
}\r
}\r
\r
if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
- Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ Pte = (UINT64 *)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
if ((Pte[0] & IA32_PG_PS) == 0) {\r
// 4K-page entries are already mapped. Just hide the first one anyway.\r
- Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ Pte = (UINT64 *)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
} else {\r
// Create 4K-page entries\r
\r
Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
\r
- Pte = (UINT64*)Pages;\r
+ Pte = (UINT64 *)Pages;\r
PageAddress = 0;\r
- Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
+ Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
PageAddress += EFI_PAGE_SIZE;\r
- Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
}\r
}\r
}\r
**/\r
BOOLEAN\r
IsTokenInUse (\r
- IN SPIN_LOCK *Token\r
+ IN SPIN_LOCK *Token\r
)\r
{\r
- LIST_ENTRY *Link;\r
- PROCEDURE_TOKEN *ProcToken;\r
+ LIST_ENTRY *Link;\r
+ PROCEDURE_TOKEN *ProcToken;\r
\r
if (Token == NULL) {\r
return FALSE;\r
}\r
\r
Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
- while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
+ //\r
+ // Only search used tokens.\r
+ //\r
+ while (Link != gSmmCpuPrivate->FirstFreeToken) {\r
ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
\r
- if (ProcToken->Used && ProcToken->SpinLock == Token) {\r
+ if (ProcToken->SpinLock == Token) {\r
return TRUE;\r
}\r
\r
/**\r
Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.\r
\r
+ @return First token of the token buffer.\r
**/\r
-VOID\r
+LIST_ENTRY *\r
AllocateTokenBuffer (\r
VOID\r
)\r
{\r
- UINTN SpinLockSize;\r
- UINT32 TokenCountPerChunk;\r
- UINTN ProcTokenSize;\r
- UINTN Index;\r
- PROCEDURE_TOKEN *ProcToken;\r
- SPIN_LOCK *SpinLock;\r
- UINT8 *SpinLockBuffer;\r
- UINT8 *ProcTokenBuffer;\r
+ UINTN SpinLockSize;\r
+ UINT32 TokenCountPerChunk;\r
+ UINTN Index;\r
+ SPIN_LOCK *SpinLock;\r
+ UINT8 *SpinLockBuffer;\r
+ PROCEDURE_TOKEN *ProcTokens;\r
\r
SpinLockSize = GetSpinLockProperties ();\r
- ProcTokenSize = sizeof (PROCEDURE_TOKEN);\r
\r
TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
ASSERT (TokenCountPerChunk != 0);\r
DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
CpuDeadLoop ();\r
}\r
+\r
DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
\r
//\r
SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
ASSERT (SpinLockBuffer != NULL);\r
\r
- ProcTokenBuffer = AllocatePool (ProcTokenSize * TokenCountPerChunk);\r
- ASSERT (ProcTokenBuffer != NULL);\r
+ ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);\r
+ ASSERT (ProcTokens != NULL);\r
\r
for (Index = 0; Index < TokenCountPerChunk; Index++) {\r
SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);\r
InitializeSpinLock (SpinLock);\r
\r
- ProcToken = (PROCEDURE_TOKEN *)(ProcTokenBuffer + ProcTokenSize * Index);\r
- ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;\r
- ProcToken->SpinLock = SpinLock;\r
- ProcToken->Used = FALSE;\r
- ProcToken->RunningApCount = 0;\r
-\r
- InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);\r
- }\r
-}\r
-\r
-/**\r
- Find first free token in the allocated token list.\r
-\r
- @retval return the first free PROCEDURE_TOKEN.\r
-\r
-**/\r
-PROCEDURE_TOKEN *\r
-FindFirstFreeToken (\r
- VOID\r
- )\r
-{\r
- LIST_ENTRY *Link;\r
- PROCEDURE_TOKEN *ProcToken;\r
-\r
- Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
- while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
- ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
-\r
- if (!ProcToken->Used) {\r
- return ProcToken;\r
- }\r
+ ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;\r
+ ProcTokens[Index].SpinLock = SpinLock;\r
+ ProcTokens[Index].RunningApCount = 0;\r
\r
- Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
+ InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);\r
}\r
\r
- return NULL;\r
+ return &ProcTokens[0].Link;\r
}\r
\r
/**\r
**/\r
PROCEDURE_TOKEN *\r
GetFreeToken (\r
- IN UINT32 RunningApsCount\r
+ IN UINT32 RunningApsCount\r
)\r
{\r
PROCEDURE_TOKEN *NewToken;\r
\r
- NewToken = FindFirstFreeToken ();\r
- if (NewToken == NULL) {\r
- AllocateTokenBuffer ();\r
- NewToken = FindFirstFreeToken ();\r
+ //\r
+ // If FirstFreeToken meets the end of token list, enlarge the token list.\r
+ // Set FirstFreeToken to the first free token.\r
+ //\r
+ if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {\r
+ gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
}\r
- ASSERT (NewToken != NULL);\r
\r
- NewToken->Used = TRUE;\r
+ NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);\r
+ gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);\r
+\r
NewToken->RunningApCount = RunningApsCount;\r
AcquireSpinLock (NewToken->SpinLock);\r
\r
**/\r
EFI_STATUS\r
IsApReady (\r
- IN SPIN_LOCK *Token\r
+ IN SPIN_LOCK *Token\r
)\r
{\r
if (AcquireSpinLockOrFail (Token)) {\r
**/\r
EFI_STATUS\r
InternalSmmStartupThisAp (\r
- IN EFI_AP_PROCEDURE2 Procedure,\r
- IN UINTN CpuIndex,\r
- IN OUT VOID *ProcArguments OPTIONAL,\r
- IN MM_COMPLETION *Token,\r
- IN UINTN TimeoutInMicroseconds,\r
- IN OUT EFI_STATUS *CpuStatus\r
+ IN EFI_AP_PROCEDURE2 Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL,\r
+ IN MM_COMPLETION *Token,\r
+ IN UINTN TimeoutInMicroseconds,\r
+ IN OUT EFI_STATUS *CpuStatus\r
)\r
{\r
- PROCEDURE_TOKEN *ProcToken;\r
+ PROCEDURE_TOKEN *ProcToken;\r
\r
if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
- DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
- DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
+ DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
- DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
+ DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
}\r
+\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
- DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
+ DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
}\r
+\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if (Procedure == NULL) {\r
return EFI_INVALID_PARAMETER;\r
}\r
mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
if (Token != NULL) {\r
- ProcToken= GetFreeToken (1);\r
- mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
- *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
+ if (Token != &mSmmStartupThisApToken) {\r
+ //\r
+ // When Token points to mSmmStartupThisApToken, this routine is called\r
+ // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).\r
+ //\r
+ // In this case, caller wants to startup AP procedure in non-blocking\r
+ // mode and cannot get the completion status from the Token because there\r
+ // is no way to return the Token to caller from SmmStartupThisAp().\r
+ // Caller needs to use its implementation specific way to query the completion status.\r
+ //\r
+ // There is no need to allocate a token for such case so the 3 overheads\r
+ // can be avoided:\r
+ // 1. Call AllocateTokenBuffer() when there is no free token.\r
+ // 2. Get a free token from the token buffer.\r
+ // 3. Call ReleaseToken() in APHandler().\r
+ //\r
+ ProcToken = GetFreeToken (1);\r
+ mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
+ *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
+ }\r
}\r
- mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
+\r
+ mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
*mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
}\r
**/\r
EFI_STATUS\r
InternalSmmStartupAllAPs (\r
- IN EFI_AP_PROCEDURE2 Procedure,\r
- IN UINTN TimeoutInMicroseconds,\r
- IN OUT VOID *ProcedureArguments OPTIONAL,\r
- IN OUT MM_COMPLETION *Token,\r
- IN OUT EFI_STATUS *CPUStatus\r
+ IN EFI_AP_PROCEDURE2 Procedure,\r
+ IN UINTN TimeoutInMicroseconds,\r
+ IN OUT VOID *ProcedureArguments OPTIONAL,\r
+ IN OUT MM_COMPLETION *Token,\r
+ IN OUT EFI_STATUS *CPUStatus\r
)\r
{\r
- UINTN Index;\r
- UINTN CpuCount;\r
- PROCEDURE_TOKEN *ProcToken;\r
+ UINTN Index;\r
+ UINTN CpuCount;\r
+ PROCEDURE_TOKEN *ProcToken;\r
\r
if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if (Procedure == NULL) {\r
return EFI_INVALID_PARAMETER;\r
}\r
CpuCount = 0;\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
if (IsPresentAp (Index)) {\r
- CpuCount ++;\r
+ CpuCount++;\r
\r
if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
return EFI_INVALID_PARAMETER;\r
}\r
\r
- if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {\r
+ if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
return EFI_NOT_READY;\r
}\r
+\r
ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
}\r
}\r
+\r
if (CpuCount == 0) {\r
return EFI_NOT_STARTED;\r
}\r
\r
if (Token != NULL) {\r
ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);\r
- *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
+ *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
} else {\r
ProcToken = NULL;\r
}\r
\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
if (IsPresentAp (Index)) {\r
- mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;\r
+ mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;\r
mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
if (ProcToken != NULL) {\r
- mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
+ mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
}\r
+\r
if (CPUStatus != NULL) {\r
- mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
+ mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
*mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
}\r
EFI_STATUS\r
EFIAPI\r
ProcedureWrapper (\r
- IN VOID *Buffer\r
+ IN VOID *Buffer\r
)\r
{\r
- PROCEDURE_WRAPPER *Wrapper;\r
+ PROCEDURE_WRAPPER *Wrapper;\r
\r
Wrapper = Buffer;\r
Wrapper->Procedure (Wrapper->ProcedureArgument);\r
EFI_STATUS\r
EFIAPI\r
SmmBlockingStartupThisAp (\r
- IN EFI_AP_PROCEDURE Procedure,\r
- IN UINTN CpuIndex,\r
- IN OUT VOID *ProcArguments OPTIONAL\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
)\r
{\r
PROCEDURE_WRAPPER Wrapper;\r
\r
- Wrapper.Procedure = Procedure;\r
+ Wrapper.Procedure = Procedure;\r
Wrapper.ProcedureArgument = ProcArguments;\r
\r
//\r
EFI_STATUS\r
EFIAPI\r
SmmStartupThisAp (\r
- IN EFI_AP_PROCEDURE Procedure,\r
- IN UINTN CpuIndex,\r
- IN OUT VOID *ProcArguments OPTIONAL\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
)\r
{\r
- MM_COMPLETION Token;\r
-\r
- gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
+ gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
\r
//\r
// Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
//\r
return InternalSmmStartupThisAp (\r
- ProcedureWrapper,\r
- CpuIndex,\r
- &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
- FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,\r
- 0,\r
- NULL\r
- );\r
+ ProcedureWrapper,\r
+ CpuIndex,\r
+ &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
+ FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,\r
+ 0,\r
+ NULL\r
+ );\r
}\r
\r
/**\r
IN UINTN CpuIndex\r
)\r
{\r
- SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
\r
if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
- ASSERT(CpuIndex < mMaxNumberOfCpus);\r
+ ASSERT (CpuIndex < mMaxNumberOfCpus);\r
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
AsmWriteDr6 (CpuSaveState->x86._DR6);\r
IN UINTN CpuIndex\r
)\r
{\r
- SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
\r
if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
- ASSERT(CpuIndex < mMaxNumberOfCpus);\r
+ ASSERT (CpuIndex < mMaxNumberOfCpus);\r
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
VOID\r
EFIAPI\r
SmiRendezvous (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
- EFI_STATUS Status;\r
- BOOLEAN ValidSmi;\r
- BOOLEAN IsBsp;\r
- BOOLEAN BspInProgress;\r
- UINTN Index;\r
- UINTN Cr2;\r
+ EFI_STATUS Status;\r
+ BOOLEAN ValidSmi;\r
+ BOOLEAN IsBsp;\r
+ BOOLEAN BspInProgress;\r
+ UINTN Index;\r
+ UINTN Cr2;\r
+\r
+ ASSERT (CpuIndex < mMaxNumberOfCpus);\r
\r
- ASSERT(CpuIndex < mMaxNumberOfCpus);\r
+ if (mSmmRelocated) {\r
+ ASSERT (mSmmInitialized != NULL);\r
+ }\r
\r
//\r
// Save Cr2 because Page Fault exception in SMM may override its value,\r
Cr2 = 0;\r
SaveCr2 (&Cr2);\r
\r
+ if (mSmmRelocated && !mSmmInitialized[CpuIndex]) {\r
+ //\r
+ // Perform SmmInitHandler for CpuIndex\r
+ //\r
+ SmmInitHandler ();\r
+\r
+ //\r
+ // Restore Cr2\r
+ //\r
+ RestoreCr2 (Cr2);\r
+\r
+ //\r
+ // Mark the first SMI init for CpuIndex has been done so as to avoid the reentry.\r
+ //\r
+ mSmmInitialized[CpuIndex] = TRUE;\r
+\r
+ return;\r
+ }\r
+\r
//\r
// Call the user register Startup function first.\r
//\r
//\r
// Determine if this is a valid SMI\r
//\r
- ValidSmi = PlatformValidSmi();\r
+ ValidSmi = PlatformValidSmi ();\r
\r
//\r
// Determine if BSP has been already in progress. Note this must be checked after\r
} else {\r
//\r
// Signal presence of this processor\r
+ // mSmmMpSyncData->Counter is increased here!\r
+ // "ReleaseSemaphore (mSmmMpSyncData->Counter) == 0" means BSP has already ended the synchronization.\r
//\r
if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
//\r
// BSP has already ended the synchronization, so QUIT!!!\r
+ // Existing AP is too late now to enter SMI since BSP has already ended the synchronization!!!\r
//\r
\r
//\r
while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
}\r
+\r
goto Exit;\r
} else {\r
-\r
//\r
// The BUSY lock is initialized to Released state.\r
// This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
// Platform hook fails to determine, use default BSP election method\r
//\r
InterlockedCompareExchange32 (\r
- (UINT32*)&mSmmMpSyncData->BspIndex,\r
+ (UINT32 *)&mSmmMpSyncData->BspIndex,\r
(UINT32)-1,\r
(UINT32)CpuIndex\r
);\r
// "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
//\r
if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
-\r
//\r
// Clear last request for SwitchBsp.\r
//\r
RestoreCr2 (Cr2);\r
}\r
\r
+/**\r
+ Initialize PackageBsp Info. Processor specified by mPackageFirstThreadIndex[PackageIndex]\r
+ will do the package-scope register programming. Set default CpuIndex to (UINT32)-1, which\r
+ means not specified yet.\r
+\r
+**/\r
+VOID\r
+InitPackageFirstThreadIndexInfo (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 Index;\r
+ UINT32 PackageId;\r
+ UINT32 PackageCount;\r
+\r
+ PackageId = 0;\r
+ PackageCount = 0;\r
+\r
+ //\r
+ // Count the number of package, set to max PackageId + 1\r
+ //\r
+ for (Index = 0; Index < mNumberOfCpus; Index++) {\r
+ if (PackageId < gSmmCpuPrivate->ProcessorInfo[Index].Location.Package) {\r
+ PackageId = gSmmCpuPrivate->ProcessorInfo[Index].Location.Package;\r
+ }\r
+ }\r
+\r
+ PackageCount = PackageId + 1;\r
+\r
+ mPackageFirstThreadIndex = (UINT32 *)AllocatePool (sizeof (UINT32) * PackageCount);\r
+ ASSERT (mPackageFirstThreadIndex != NULL);\r
+ if (mPackageFirstThreadIndex == NULL) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Set default CpuIndex to (UINT32)-1, which means not specified yet.\r
+ //\r
+ SetMem32 (mPackageFirstThreadIndex, sizeof (UINT32) * PackageCount, (UINT32)-1);\r
+}\r
+\r
/**\r
Allocate buffer for SpinLock and Wrapper function buffer.\r
\r
\r
InitializeListHead (&gSmmCpuPrivate->TokenList);\r
\r
- AllocateTokenBuffer ();\r
+ gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- UINTN ProcessorCount;\r
- UINTN TotalSize;\r
- UINTN GlobalSemaphoresSize;\r
- UINTN CpuSemaphoresSize;\r
- UINTN SemaphoreSize;\r
- UINTN Pages;\r
- UINTN *SemaphoreBlock;\r
- UINTN SemaphoreAddr;\r
-\r
- SemaphoreSize = GetSpinLockProperties ();\r
- ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ UINTN ProcessorCount;\r
+ UINTN TotalSize;\r
+ UINTN GlobalSemaphoresSize;\r
+ UINTN CpuSemaphoresSize;\r
+ UINTN SemaphoreSize;\r
+ UINTN Pages;\r
+ UINTN *SemaphoreBlock;\r
+ UINTN SemaphoreAddr;\r
+\r
+ SemaphoreSize = GetSpinLockProperties ();\r
+ ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
- TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
- DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
- DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
- Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
+ TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
+ DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
+ DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
+ Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
SemaphoreBlock = AllocatePages (Pages);\r
ASSERT (SemaphoreBlock != NULL);\r
ZeroMem (SemaphoreBlock, TotalSize);\r
\r
- SemaphoreAddr = (UINTN)SemaphoreBlock;\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock;\r
mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
- SemaphoreAddr += SemaphoreSize;\r
+ SemaphoreAddr += SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
- SemaphoreAddr += SemaphoreSize;\r
+ SemaphoreAddr += SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
- SemaphoreAddr += SemaphoreSize;\r
+ SemaphoreAddr += SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
- SemaphoreAddr += SemaphoreSize;\r
+ SemaphoreAddr += SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
- = (SPIN_LOCK *)SemaphoreAddr;\r
+ = (SPIN_LOCK *)SemaphoreAddr;\r
SemaphoreAddr += SemaphoreSize;\r
\r
- SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
- SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
- SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
\r
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
VOID\r
)\r
{\r
- UINTN CpuIndex;\r
+ UINTN CpuIndex;\r
\r
if (mSmmMpSyncData != NULL) {\r
//\r
// CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
//\r
ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
- mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
+ mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
//\r
//\r
mSmmMpSyncData->BspIndex = (UINT32)-1;\r
}\r
+\r
mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
\r
mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
- ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
- mSmmMpSyncData->AllCpusInSync != NULL);\r
+ ASSERT (\r
+ mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
+ mSmmMpSyncData->AllCpusInSync != NULL\r
+ );\r
*mSmmMpSyncData->Counter = 0;\r
*mSmmMpSyncData->InsideSmm = FALSE;\r
*mSmmMpSyncData->AllCpusInSync = FALSE;\r
\r
- for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
- mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
+ mSmmMpSyncData->AllApArrivedWithException = FALSE;\r
+\r
+ for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {\r
+ mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
(SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
- mSmmMpSyncData->CpuData[CpuIndex].Run =\r
+ mSmmMpSyncData->CpuData[CpuIndex].Run =\r
(UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
mSmmMpSyncData->CpuData[CpuIndex].Present =\r
(BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
**/\r
UINT32\r
InitializeMpServiceData (\r
- IN VOID *Stacks,\r
- IN UINTN StackSize,\r
- IN UINTN ShadowStackSize\r
+ IN VOID *Stacks,\r
+ IN UINTN StackSize,\r
+ IN UINTN ShadowStackSize\r
)\r
{\r
- UINT32 Cr3;\r
- UINTN Index;\r
- UINT8 *GdtTssTables;\r
- UINTN GdtTableStepSize;\r
- CPUID_VERSION_INFO_EDX RegEdx;\r
+ UINT32 Cr3;\r
+ UINTN Index;\r
+ UINT8 *GdtTssTables;\r
+ UINTN GdtTableStepSize;\r
+ CPUID_VERSION_INFO_EDX RegEdx;\r
+ UINT32 MaxExtendedFunction;\r
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
\r
//\r
// Determine if this CPU supports machine check\r
//\r
mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
(sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
- mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
+ mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
ASSERT (mSmmMpSyncData != NULL);\r
mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
InitializeMpSyncData ();\r
// Initialize physical address mask\r
// NOTE: Physical memory above virtual address limit is not supported !!!\r
//\r
- AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
- gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
- gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);\r
+ if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
+ } else {\r
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
+ }\r
+\r
+ gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;\r
+ //\r
+ // Clear the low 12 bits\r
+ //\r
+ gPhyMask &= 0xfffffffffffff000ULL;\r
\r
//\r
// Create page tables\r
InstallSmiHandler (\r
Index,\r
(UINT32)mCpuHotPlugData.SmBase[Index],\r
- (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
+ (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
StackSize,\r
(UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
gcSmiGdtr.Limit + 1,\r
**/\r
EFI_STATUS\r
RegisterStartupProcedure (\r
- IN EFI_AP_PROCEDURE Procedure,\r
- IN OUT VOID *ProcedureArguments OPTIONAL\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN OUT VOID *ProcedureArguments OPTIONAL\r
)\r
{\r
- if (Procedure == NULL && ProcedureArguments != NULL) {\r
+ if ((Procedure == NULL) && (ProcedureArguments != NULL)) {\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
if (mSmmMpSyncData == NULL) {\r
return EFI_NOT_READY;\r
}\r