/** @file\r
SMM MP service implementation\r
\r
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
//\r
// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
//\r
-UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r
-UINT64 gPhyMask;\r
-SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
-UINTN mSmmMpSyncDataSize;\r
+MTRR_SETTINGS gSmiMtrrs;\r
+UINT64 gPhyMask;\r
+SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
+UINTN mSmmMpSyncDataSize;\r
+SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
+UINTN mSemaphoreSize;\r
+SPIN_LOCK *mPFLock = NULL;\r
+SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
+BOOLEAN mMachineCheckSupported = FALSE;\r
+MM_COMPLETION mSmmStartupThisApToken;\r
+\r
+extern UINTN mSmmShadowStackSize;\r
\r
/**\r
Performs an atomic compare exchange operation to get semaphore.\r
**/\r
UINT32\r
WaitForSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
- UINT32 Value;\r
+ UINT32 Value;\r
\r
- do {\r
+ for ( ; ;) {\r
Value = *Sem;\r
- } while (Value == 0 ||\r
- InterlockedCompareExchange32 (\r
- (UINT32*)Sem,\r
- Value,\r
- Value - 1\r
- ) != Value);\r
+ if ((Value != 0) &&\r
+ (InterlockedCompareExchange32 (\r
+ (UINT32 *)Sem,\r
+ Value,\r
+ Value - 1\r
+ ) == Value))\r
+ {\r
+ break;\r
+ }\r
+\r
+ CpuPause ();\r
+ }\r
+\r
return Value - 1;\r
}\r
\r
-\r
/**\r
Performs an atomic compare exchange operation to release semaphore.\r
The compare exchange operation must be performed using\r
**/\r
UINT32\r
ReleaseSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
- UINT32 Value;\r
+ UINT32 Value;\r
\r
do {\r
Value = *Sem;\r
} while (Value + 1 != 0 &&\r
InterlockedCompareExchange32 (\r
- (UINT32*)Sem,\r
+ (UINT32 *)Sem,\r
Value,\r
Value + 1\r
) != Value);\r
+\r
return Value + 1;\r
}\r
\r
**/\r
UINT32\r
LockdownSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
- UINT32 Value;\r
+ UINT32 Value;\r
\r
do {\r
Value = *Sem;\r
} while (InterlockedCompareExchange32 (\r
- (UINT32*)Sem,\r
- Value, (UINT32)-1\r
+ (UINT32 *)Sem,\r
+ Value,\r
+ (UINT32)-1\r
) != Value);\r
+\r
return Value;\r
}\r
\r
**/\r
VOID\r
WaitForAllAPs (\r
- IN UINTN NumberOfAPs\r
+ IN UINTN NumberOfAPs\r
)\r
{\r
- UINTN BspIndex;\r
+ UINTN BspIndex;\r
\r
BspIndex = mSmmMpSyncData->BspIndex;\r
while (NumberOfAPs-- > 0) {\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
}\r
\r
VOID\r
)\r
{\r
- UINTN Index;\r
- UINTN BspIndex;\r
+ UINTN Index;\r
\r
- BspIndex = mSmmMpSyncData->BspIndex;\r
- for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (IsPresentAp (Index)) {\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
}\r
}\r
}\r
SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
)\r
{\r
- UINTN Index;\r
- SMM_CPU_DATA_BLOCK *CpuData;\r
- EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
+ UINTN Index;\r
+ SMM_CPU_DATA_BLOCK *CpuData;\r
+ EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
- if (mSmmMpSyncData->Counter == mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
return TRUE;\r
}\r
\r
- CpuData = mSmmMpSyncData->CpuData;\r
+ CpuData = mSmmMpSyncData->CpuData;\r
ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
- for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
- if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (!(*(CpuData[Index].Present)) && (ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {\r
+ if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0)) {\r
continue;\r
}\r
- if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
+\r
+ if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0)) {\r
continue;\r
}\r
- if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
+\r
+ if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0)) {\r
continue;\r
}\r
+\r
return FALSE;\r
}\r
}\r
\r
-\r
return TRUE;\r
}\r
\r
+/**\r
+ Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
+\r
+ @retval TRUE Os enable lmce.\r
+ @retval FALSE Os not enable lmce.\r
+\r
+**/\r
+BOOLEAN\r
+IsLmceOsEnabled (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_MCG_CAP_REGISTER McgCap;\r
+ MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
+ MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
+\r
+ McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
+ if (McgCap.Bits.MCG_LMCE_P == 0) {\r
+ return FALSE;\r
+ }\r
+\r
+ FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
+ if (FeatureCtrl.Bits.LmceOn == 0) {\r
+ return FALSE;\r
+ }\r
+\r
+ McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
+ return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);\r
+}\r
+\r
+/**\r
+ Return if Local machine check exception signaled.\r
+\r
+ Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
+ delivered to only the logical processor.\r
+\r
+ @retval TRUE LMCE was signaled.\r
+ @retval FALSE LMCE was not signaled.\r
+\r
+**/\r
+BOOLEAN\r
+IsLmceSignaled (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
+\r
+ McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
+ return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);\r
+}\r
\r
/**\r
Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
VOID\r
)\r
{\r
- UINT64 Timer;\r
- UINTN Index;\r
-\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ UINT64 Timer;\r
+ UINTN Index;\r
+ BOOLEAN LmceEn;\r
+ BOOLEAN LmceSignal;\r
+\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+\r
+ LmceEn = FALSE;\r
+ LmceSignal = FALSE;\r
+ if (mMachineCheckSupported) {\r
+ LmceEn = IsLmceOsEnabled ();\r
+ LmceSignal = IsLmceSignaled ();\r
+ }\r
\r
//\r
// Platform implementor should choose a timeout value appropriately:\r
// Sync with APs 1st timeout\r
//\r
for (Timer = StartSyncTimer ();\r
- !IsSyncTimerTimeout (Timer) &&\r
- !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
- ) {\r
+ !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);\r
+ )\r
+ {\r
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);\r
+ if (mSmmMpSyncData->AllApArrivedWithException) {\r
+ break;\r
+ }\r
+\r
CpuPause ();\r
}\r
\r
// - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
// In both cases, adding SMI-disabling checking code increases overhead.\r
//\r
- if (mSmmMpSyncData->Counter < mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
//\r
// Send SMI IPIs to bring outside processors in\r
//\r
- for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {\r
SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
}\r
}\r
// Sync with APs 2nd timeout.\r
//\r
for (Timer = StartSyncTimer ();\r
- !IsSyncTimerTimeout (Timer) &&\r
- !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
- ) {\r
+ !IsSyncTimerTimeout (Timer);\r
+ )\r
+ {\r
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);\r
+ if (mSmmMpSyncData->AllApArrivedWithException) {\r
+ break;\r
+ }\r
+\r
CpuPause ();\r
}\r
}\r
return;\r
}\r
\r
-\r
/**\r
Replace OS MTRR's with SMI MTRR's.\r
\r
**/\r
VOID\r
ReplaceOSMtrrs (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
- PROCESSOR_SMM_DESCRIPTOR *Psd;\r
- UINT64 *SmiMtrrs;\r
- MTRR_SETTINGS *BiosMtrr;\r
-\r
- Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r
- SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r
-\r
SmmCpuFeaturesDisableSmrr ();\r
\r
//\r
// Replace all MTRRs registers\r
//\r
- BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r
- MtrrSetAllMtrrs(BiosMtrr);\r
+ MtrrSetAllMtrrs (&gSmiMtrrs);\r
+}\r
+\r
+/**\r
+ Wheck whether task has been finished by all APs.\r
+\r
+ @param BlockMode Whether did it in block mode or non-block mode.\r
+\r
+ @retval TRUE Task has been finished by all APs.\r
+ @retval FALSE Task not has been finished by all APs.\r
+\r
+**/\r
+BOOLEAN\r
+WaitForAllAPsNotBusy (\r
+ IN BOOLEAN BlockMode\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ //\r
+ // Ignore BSP and APs which not call in SMM.\r
+ //\r
+ if (!IsPresentAp (Index)) {\r
+ continue;\r
+ }\r
+\r
+ if (BlockMode) {\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
+ } else {\r
+ if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
+ } else {\r
+ return FALSE;\r
+ }\r
+ }\r
+ }\r
+\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+ Check whether it is an present AP.\r
+\r
+ @param CpuIndex The AP index which calls this function.\r
+\r
+ @retval TRUE It's a present AP.\r
+ @retval TRUE This is not an AP or it is not present.\r
+\r
+**/\r
+BOOLEAN\r
+IsPresentAp (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
+}\r
+\r
+/**\r
+ Clean up the status flags used during executing the procedure.\r
+\r
+ @param CpuIndex The AP index which calls this function.\r
+\r
+**/\r
+VOID\r
+ReleaseToken (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ PROCEDURE_TOKEN *Token;\r
+\r
+ Token = mSmmMpSyncData->CpuData[CpuIndex].Token;\r
+\r
+ if (InterlockedDecrement (&Token->RunningApCount) == 0) {\r
+ ReleaseSpinLock (Token->SpinLock);\r
+ }\r
+\r
+ mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
+}\r
+\r
+/**\r
+ Free the tokens in the maintained list.\r
+\r
+**/\r
+VOID\r
+ResetTokens (\r
+ VOID\r
+ )\r
+{\r
+ //\r
+ // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.\r
+ //\r
+ gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
}\r
\r
/**\r
**/\r
VOID\r
BSPHandler (\r
- IN UINTN CpuIndex,\r
- IN SMM_CPU_SYNC_MODE SyncMode\r
+ IN UINTN CpuIndex,\r
+ IN SMM_CPU_SYNC_MODE SyncMode\r
)\r
{\r
- UINTN Index;\r
- MTRR_SETTINGS Mtrrs;\r
- UINTN ApCount;\r
- BOOLEAN ClearTopLevelSmiResult;\r
- UINTN PresentCount;\r
+ UINTN Index;\r
+ MTRR_SETTINGS Mtrrs;\r
+ UINTN ApCount;\r
+ BOOLEAN ClearTopLevelSmiResult;\r
+ UINTN PresentCount;\r
\r
ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
ApCount = 0;\r
//\r
// Flag BSP's presence\r
//\r
- mSmmMpSyncData->InsideSmm = TRUE;\r
+ *mSmmMpSyncData->InsideSmm = TRUE;\r
\r
//\r
// Initialize Debug Agent to start source level debug in BSP handler\r
//\r
// Mark this processor's presence\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
\r
//\r
// Clear platform top level SMI status bit before calling SMI handlers. If\r
// we cleared it after SMI handlers are run, we would miss the SMI that\r
// occurs after SMI handlers are done and before SMI status bit is cleared.\r
//\r
- ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
+ ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();\r
ASSERT (ClearTopLevelSmiResult == TRUE);\r
\r
//\r
//\r
// If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
//\r
- if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
-\r
+ if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Wait for APs to arrive\r
//\r
- SmmWaitForApArrival();\r
+ SmmWaitForApArrival ();\r
\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
\r
//\r
// Wait for all APs to get ready for programming MTRRs\r
//\r
WaitForAllAPs (ApCount);\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Signal all APs it's time for backup MTRRs\r
//\r
// We do the backup first and then set MTRR to avoid race condition for threads\r
// in the same core.\r
//\r
- MtrrGetAllMtrrs(&Mtrrs);\r
+ MtrrGetAllMtrrs (&Mtrrs);\r
\r
//\r
// Wait for all APs to complete their MTRR saving\r
//\r
// The BUSY lock is initialized to Acquired state\r
//\r
- AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
\r
//\r
- // Restore SMM Configuration in S3 boot path.\r
+ // Perform the pre tasks\r
//\r
- if (mRestoreSmmConfigurationInS3) {\r
- //\r
- // Configure SMM Code Access Check feature if available.\r
- //\r
- ConfigSmmCodeAccessCheck ();\r
- mRestoreSmmConfigurationInS3 = FALSE;\r
- }\r
+ PerformPreTasks ();\r
\r
//\r
// Invoke SMM Foundation EntryPoint with the processor information context.\r
//\r
// Make sure all APs have completed their pending none-block tasks\r
//\r
- for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
- AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);\r
- ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;\r
- }\r
- }\r
+ WaitForAllAPsNotBusy (TRUE);\r
\r
//\r
// Perform the remaining tasks\r
// make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
// will run through freely.\r
//\r
- if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
-\r
+ if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
//\r
// Make sure all APs have their Present flag set\r
//\r
while (TRUE) {\r
PresentCount = 0;\r
- for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (mSmmMpSyncData->CpuData[Index].Present) {\r
- PresentCount ++;\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
+ PresentCount++;\r
}\r
}\r
+\r
if (PresentCount > ApCount) {\r
break;\r
}\r
//\r
// Notify all APs to exit\r
//\r
- mSmmMpSyncData->InsideSmm = FALSE;\r
+ *mSmmMpSyncData->InsideSmm = FALSE;\r
ReleaseAllAPs ();\r
\r
//\r
//\r
WaitForAllAPs (ApCount);\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Signal APs to restore MTRRs\r
//\r
// Restore OS MTRRs\r
//\r
SmmCpuFeaturesReenableSmrr ();\r
- MtrrSetAllMtrrs(&Mtrrs);\r
+ MtrrSetAllMtrrs (&Mtrrs);\r
\r
//\r
// Wait for all APs to complete MTRR programming\r
//\r
// Clear the Present flag of BSP\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
\r
//\r
// Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
//\r
WaitForAllAPs (ApCount);\r
\r
+ //\r
+ // Reset the tokens buffer.\r
+ //\r
+ ResetTokens ();\r
+\r
//\r
// Reset BspIndex to -1, meaning BSP has not been elected.\r
//\r
//\r
// Allow APs to check in from this point on\r
//\r
- mSmmMpSyncData->Counter = 0;\r
- mSmmMpSyncData->AllCpusInSync = FALSE;\r
+ *mSmmMpSyncData->Counter = 0;\r
+ *mSmmMpSyncData->AllCpusInSync = FALSE;\r
}\r
\r
/**\r
**/\r
VOID\r
APHandler (\r
- IN UINTN CpuIndex,\r
- IN BOOLEAN ValidSmi,\r
- IN SMM_CPU_SYNC_MODE SyncMode\r
+ IN UINTN CpuIndex,\r
+ IN BOOLEAN ValidSmi,\r
+ IN SMM_CPU_SYNC_MODE SyncMode\r
)\r
{\r
- UINT64 Timer;\r
- UINTN BspIndex;\r
- MTRR_SETTINGS Mtrrs;\r
+ UINT64 Timer;\r
+ UINTN BspIndex;\r
+ MTRR_SETTINGS Mtrrs;\r
+ EFI_STATUS ProcedureStatus;\r
\r
//\r
// Timeout BSP\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
- ) {\r
+ !(*mSmmMpSyncData->InsideSmm);\r
+ )\r
+ {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// BSP timeout in the first round\r
//\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
- ) {\r
+ !(*mSmmMpSyncData->InsideSmm);\r
+ )\r
+ {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// Give up since BSP is unable to enter SMM\r
// and signal the completion of this AP\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
} else {\r
//\r
// Don't know BSP index. Give up without sending IPI to BSP.\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
}\r
//\r
// Mark this processor's presence\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
\r
- if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Notify BSP of arrival at this point\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Wait for the signal from BSP to backup MTRRs\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Backup OS MTRRs\r
//\r
- MtrrGetAllMtrrs(&Mtrrs);\r
+ MtrrGetAllMtrrs (&Mtrrs);\r
\r
//\r
// Signal BSP the completion of this AP\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
\r
//\r
// Wait for BSP's signal to program MTRRs\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Replace OS MTRRs with SMI MTRRs\r
//\r
// Signal BSP the completion of this AP\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
\r
while (TRUE) {\r
//\r
// Wait for something to happen\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Check if BSP wants to exit SMM\r
//\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
break;\r
}\r
\r
// BUSY should be acquired by SmmStartupThisAp()\r
//\r
ASSERT (\r
- !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
+ !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
);\r
\r
//\r
// Invoke the scheduled procedure\r
//\r
- (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
- (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
- );\r
+ ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(\r
+ (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
+ );\r
+ if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
+ *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
+ }\r
+\r
+ if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
+ ReleaseToken (CpuIndex);\r
+ }\r
\r
//\r
// Release BUSY\r
//\r
- ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
}\r
\r
- if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
//\r
// Notify BSP the readiness of this AP to program MTRRs\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
\r
//\r
// Wait for the signal from BSP to program MTRRs\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Restore OS MTRRs\r
//\r
SmmCpuFeaturesReenableSmrr ();\r
- MtrrSetAllMtrrs(&Mtrrs);\r
+ MtrrSetAllMtrrs (&Mtrrs);\r
}\r
\r
//\r
// Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
\r
//\r
// Wait for the signal from BSP to Reset states/semaphore for this processor\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Reset states/semaphore for this processor\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
\r
//\r
// Notify BSP the readiness of this AP to exit SMM\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
-\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
\r
/**\r
Create 4G PageTable in SMRAM.\r
\r
- @param ExtraPages Additional page numbers besides for 4G memory\r
+ @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
@return PageTable Address\r
\r
**/\r
UINT32\r
Gen4GPageTable (\r
- IN UINTN ExtraPages\r
+ IN BOOLEAN Is32BitPageTable\r
)\r
{\r
VOID *PageTable;\r
UINTN PageIndex;\r
UINTN PageAddress;\r
\r
- Low2MBoundary = 0;\r
+ Low2MBoundary = 0;\r
High2MBoundary = 0;\r
- PagesNeeded = 0;\r
+ PagesNeeded = 0;\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
//\r
// Add one more page for known good stack, then find the lower 2MB aligned address.\r
// Add two more pages for known good stack and stack guard page,\r
// then find the lower 2MB aligned address.\r
//\r
- High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
- PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
+ High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
+ PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
}\r
+\r
//\r
// Allocate the page table\r
//\r
- PageTable = AllocatePages (ExtraPages + 5 + PagesNeeded);\r
+ PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
ASSERT (PageTable != NULL);\r
\r
- PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r
- Pte = (UINT64*)PageTable;\r
+ PageTable = (VOID *)((UINTN)PageTable);\r
+ Pte = (UINT64 *)PageTable;\r
\r
//\r
// Zero out all page table entries first\r
// Set Page Directory Pointers\r
//\r
for (Index = 0; Index < 4; Index++) {\r
- Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + IA32_PG_P;\r
+ Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
+ (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
}\r
+\r
Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
\r
//\r
// Fill in Page Directory Entries\r
//\r
for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
- Pte[Index] = (Index << 21) + IA32_PG_PS + IA32_PG_RW + IA32_PG_P;\r
+ Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
+ Pdpte = (UINT64 *)PageTable;\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
- Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
+ Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
- Pdpte = (UINT64*)PageTable;\r
for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
- Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
- Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages + IA32_PG_RW + IA32_PG_P;\r
+ Pte = (UINT64 *)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
//\r
// Fill in Page Table Entries\r
//\r
- Pte = (UINT64*)Pages;\r
+ Pte = (UINT64 *)Pages;\r
PageAddress = PageIndex;\r
for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
if (PageAddress == GuardPage) {\r
//\r
// Mark the guard page as non-present\r
//\r
- Pte[Index] = PageAddress;\r
- GuardPage += mSmmStackSize;\r
+ Pte[Index] = PageAddress | mAddressEncMask;\r
+ GuardPage += (mSmmStackSize + mSmmShadowStackSize);\r
if (GuardPage > mSmmStackArrayEnd) {\r
GuardPage = 0;\r
}\r
} else {\r
- Pte[Index] = PageAddress + IA32_PG_RW + IA32_PG_P;\r
+ Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
}\r
- PageAddress+= EFI_PAGE_SIZE;\r
+\r
+ PageAddress += EFI_PAGE_SIZE;\r
}\r
+\r
Pages += EFI_PAGE_SIZE;\r
}\r
}\r
\r
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
+ Pte = (UINT64 *)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ if ((Pte[0] & IA32_PG_PS) == 0) {\r
+ // 4K-page entries are already mapped. Just hide the first one anyway.\r
+ Pte = (UINT64 *)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
+ } else {\r
+ // Create 4K-page entries\r
+ Pages = (UINTN)AllocatePageTableMemory (1);\r
+ ASSERT (Pages != 0);\r
+\r
+ Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
+\r
+ Pte = (UINT64 *)Pages;\r
+ PageAddress = 0;\r
+ Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
+ for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
+ PageAddress += EFI_PAGE_SIZE;\r
+ Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ }\r
+ }\r
+ }\r
+\r
return (UINT32)(UINTN)PageTable;\r
}\r
\r
/**\r
- Set memory cache ability.\r
+ Checks whether the input token is the current used token.\r
\r
- @param PageTable PageTable Address\r
- @param Address Memory Address to change cache ability\r
- @param Cacheability Cache ability to set\r
+ @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
+ BroadcastProcedure.\r
\r
+ @retval TRUE The input token is the current used token.\r
+ @retval FALSE The input token is not the current used token.\r
**/\r
-VOID\r
-SetCacheability (\r
- IN UINT64 *PageTable,\r
- IN UINTN Address,\r
- IN UINT8 Cacheability\r
+BOOLEAN\r
+IsTokenInUse (\r
+ IN SPIN_LOCK *Token\r
)\r
{\r
- UINTN PTIndex;\r
- VOID *NewPageTableAddress;\r
- UINT64 *NewPageTable;\r
- UINTN Index;\r
+ LIST_ENTRY *Link;\r
+ PROCEDURE_TOKEN *ProcToken;\r
+\r
+ if (Token == NULL) {\r
+ return FALSE;\r
+ }\r
+\r
+ Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
+ //\r
+ // Only search used tokens.\r
+ //\r
+ while (Link != gSmmCpuPrivate->FirstFreeToken) {\r
+ ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
\r
- ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
+ if (ProcToken->SpinLock == Token) {\r
+ return TRUE;\r
+ }\r
+\r
+ Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
+ }\r
\r
- if (sizeof (UINTN) == sizeof (UINT64)) {\r
- PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.\r
+\r
+ @return First token of the token buffer.\r
+**/\r
+LIST_ENTRY *\r
+AllocateTokenBuffer (\r
+ VOID\r
+ )\r
+{\r
+ UINTN SpinLockSize;\r
+ UINT32 TokenCountPerChunk;\r
+ UINTN Index;\r
+ SPIN_LOCK *SpinLock;\r
+ UINT8 *SpinLockBuffer;\r
+ PROCEDURE_TOKEN *ProcTokens;\r
+\r
+ SpinLockSize = GetSpinLockProperties ();\r
+\r
+ TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
+ ASSERT (TokenCountPerChunk != 0);\r
+ if (TokenCountPerChunk == 0) {\r
+ DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
+ CpuDeadLoop ();\r
}\r
\r
- PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
+ DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
\r
//\r
- // A perfect implementation should check the original cacheability with the\r
- // one being set, and break a 2M page entry into pieces only when they\r
- // disagreed.\r
+ // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.\r
//\r
- PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
- if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
- //\r
- // Allocate a page from SMRAM\r
- //\r
- NewPageTableAddress = AllocatePages (1);\r
- ASSERT (NewPageTableAddress != NULL);\r
+ SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
+ ASSERT (SpinLockBuffer != NULL);\r
+\r
+ ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);\r
+ ASSERT (ProcTokens != NULL);\r
+\r
+ for (Index = 0; Index < TokenCountPerChunk; Index++) {\r
+ SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);\r
+ InitializeSpinLock (SpinLock);\r
+\r
+ ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;\r
+ ProcTokens[Index].SpinLock = SpinLock;\r
+ ProcTokens[Index].RunningApCount = 0;\r
+\r
+ InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);\r
+ }\r
+\r
+ return &ProcTokens[0].Link;\r
+}\r
+\r
+/**\r
+ Get the free token.\r
+\r
+ If no free token, allocate new tokens then return the free one.\r
+\r
+ @param RunningApsCount The Running Aps count for this token.\r
+\r
+ @retval return the first free PROCEDURE_TOKEN.\r
\r
- NewPageTable = (UINT64 *)NewPageTableAddress;\r
+**/\r
+PROCEDURE_TOKEN *\r
+GetFreeToken (\r
+ IN UINT32 RunningApsCount\r
+ )\r
+{\r
+ PROCEDURE_TOKEN *NewToken;\r
+\r
+ //\r
+ // If FirstFreeToken meets the end of token list, enlarge the token list.\r
+ // Set FirstFreeToken to the first free token.\r
+ //\r
+ if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {\r
+ gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
+ }\r
+\r
+ NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);\r
+ gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);\r
+\r
+ NewToken->RunningApCount = RunningApsCount;\r
+ AcquireSpinLock (NewToken->SpinLock);\r
+\r
+ return NewToken;\r
+}\r
+\r
+/**\r
+ Checks status of specified AP.\r
+\r
+ This function checks whether the specified AP has finished the task assigned\r
+ by StartupThisAP(), and whether timeout expires.\r
+\r
+ @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
+ BroadcastProcedure.\r
+\r
+ @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
+ @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
+**/\r
+EFI_STATUS\r
+IsApReady (\r
+ IN SPIN_LOCK *Token\r
+ )\r
+{\r
+ if (AcquireSpinLockOrFail (Token)) {\r
+ ReleaseSpinLock (Token);\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ return EFI_NOT_READY;\r
+}\r
+\r
+/**\r
+ Schedule a procedure to run on the specified CPU.\r
+\r
+ @param[in] Procedure The address of the procedure to run\r
+ @param[in] CpuIndex Target CPU Index\r
+ @param[in,out] ProcArguments The parameter to pass to the procedure\r
+ @param[in] Token This is an optional parameter that allows the caller to execute the\r
+ procedure in a blocking or non-blocking fashion. If it is NULL the\r
+ call is blocking, and the call will not return until the AP has\r
+ completed the procedure. If the token is not NULL, the call will\r
+ return immediately. The caller can check whether the procedure has\r
+ completed with CheckOnProcedure or WaitForProcedure.\r
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
+ execution of Procedure, either for blocking or non-blocking mode.\r
+ Zero means infinity. If the timeout expires before all APs return\r
+ from Procedure, then Procedure on the failed APs is terminated. If\r
+ the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
+ If the timeout expires in non-blocking mode, the timeout determined\r
+ can be through CheckOnProcedure or WaitForProcedure.\r
+ Note that timeout support is optional. Whether an implementation\r
+ supports this feature can be determined via the Attributes data\r
+ member.\r
+ @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
+ by Procedure when it completes execution on the target AP, or with\r
+ EFI_TIMEOUT if the Procedure fails to complete within the optional\r
+ timeout. The implementation will update this variable with\r
+ EFI_NOT_READY prior to starting Procedure on the target AP.\r
+\r
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
+ @retval EFI_SUCCESS The procedure has been successfully scheduled\r
+\r
+**/\r
+EFI_STATUS\r
+InternalSmmStartupThisAp (\r
+ IN EFI_AP_PROCEDURE2 Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL,\r
+ IN MM_COMPLETION *Token,\r
+ IN UINTN TimeoutInMicroseconds,\r
+ IN OUT EFI_STATUS *CpuStatus\r
+ )\r
+{\r
+ PROCEDURE_TOKEN *ProcToken;\r
+\r
+ if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
+ DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
+ DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
\r
- for (Index = 0; Index < 0x200; Index++) {\r
- NewPageTable[Index] = PageTable[PTIndex];\r
- if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
- NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
- NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
+ if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
+ if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
+ DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
+ }\r
+\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
+ }\r
+\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (Procedure == NULL) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+\r
+ mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
+ mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
+ if (Token != NULL) {\r
+ if (Token != &mSmmStartupThisApToken) {\r
+ //\r
+ // When Token points to mSmmStartupThisApToken, this routine is called\r
+ // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).\r
+ //\r
+ // In this case, caller wants to startup AP procedure in non-blocking\r
+ // mode and cannot get the completion status from the Token because there\r
+ // is no way to return the Token to caller from SmmStartupThisAp().\r
+ // Caller needs to use its implementation specific way to query the completion status.\r
+ //\r
+ // There is no need to allocate a token for such case so the 3 overheads\r
+ // can be avoided:\r
+ // 1. Call AllocateTokenBuffer() when there is no free token.\r
+ // 2. Get a free token from the token buffer.\r
+ // 3. Call ReleaseToken() in APHandler().\r
+ //\r
+ ProcToken = GetFreeToken (1);\r
+ mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
+ *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
+ }\r
+ }\r
+\r
+ mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
+ if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
+ *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
+ }\r
+\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+\r
+ if (Token == NULL) {\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ }\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ Worker function to execute a caller provided function on all enabled APs.\r
+\r
+ @param[in] Procedure A pointer to the function to be run on\r
+ enabled APs of the system.\r
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
+ APs to return from Procedure, either for\r
+ blocking or non-blocking mode.\r
+ @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
+ all APs.\r
+ @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
+ procedure in a blocking or non-blocking fashion. If it is NULL the\r
+ call is blocking, and the call will not return until the AP has\r
+ completed the procedure. If the token is not NULL, the call will\r
+ return immediately. The caller can check whether the procedure has\r
+ completed with CheckOnProcedure or WaitForProcedure.\r
+ @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
+ by Procedure when it completes execution on the target AP, or with\r
+ EFI_TIMEOUT if the Procedure fails to complete within the optional\r
+ timeout. The implementation will update this variable with\r
+ EFI_NOT_READY prior to starting Procedure on the target AP.\r
+\r
+\r
+ @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
+ the timeout expired.\r
+ @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
+ to all enabled APs.\r
+ @retval others Failed to Startup all APs.\r
+\r
+**/\r
+EFI_STATUS\r
+InternalSmmStartupAllAPs (\r
+ IN EFI_AP_PROCEDURE2 Procedure,\r
+ IN UINTN TimeoutInMicroseconds,\r
+ IN OUT VOID *ProcedureArguments OPTIONAL,\r
+ IN OUT MM_COMPLETION *Token,\r
+ IN OUT EFI_STATUS *CPUStatus\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN CpuCount;\r
+ PROCEDURE_TOKEN *ProcToken;\r
+\r
+ if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (Procedure == NULL) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ CpuCount = 0;\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (IsPresentAp (Index)) {\r
+ CpuCount++;\r
+\r
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
+ return EFI_NOT_READY;\r
+ }\r
+\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
+ }\r
+ }\r
+\r
+ if (CpuCount == 0) {\r
+ return EFI_NOT_STARTED;\r
+ }\r
+\r
+ if (Token != NULL) {\r
+ ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);\r
+ *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
+ } else {\r
+ ProcToken = NULL;\r
+ }\r
+\r
+ //\r
+ // Make sure all BUSY should be acquired.\r
+ //\r
+ // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
+ // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
+ // block mode.\r
+ //\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (IsPresentAp (Index)) {\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
+ }\r
+ }\r
+\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (IsPresentAp (Index)) {\r
+ mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;\r
+ mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
+ if (ProcToken != NULL) {\r
+ mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
+ }\r
+\r
+ if (CPUStatus != NULL) {\r
+ mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
+ if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
+ *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
+ }\r
+ }\r
+ } else {\r
+ //\r
+ // PI spec requirement:\r
+ // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
+ //\r
+ if (CPUStatus != NULL) {\r
+ CPUStatus[Index] = EFI_NOT_STARTED;\r
+ }\r
+\r
+ //\r
+ // Decrease the count to mark this processor(AP or BSP) as finished.\r
+ //\r
+ if (ProcToken != NULL) {\r
+ WaitForSemaphore (&ProcToken->RunningApCount);\r
}\r
- NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
}\r
+ }\r
\r
- PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P;\r
+ ReleaseAllAPs ();\r
+\r
+ if (Token == NULL) {\r
+ //\r
+ // Make sure all APs have completed their tasks.\r
+ //\r
+ WaitForAllAPsNotBusy (TRUE);\r
}\r
\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
+ If the function is defined with a type that is not compatible with\r
+ the type (of the expression) pointed to by the expression that\r
+ denotes the called function, the behavior is undefined.\r
+\r
+ So add below wrapper function to convert between EFI_AP_PROCEDURE\r
+ and EFI_AP_PROCEDURE2.\r
+\r
+ Wrapper for Procedures.\r
+\r
+ @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+ProcedureWrapper (\r
+ IN VOID *Buffer\r
+ )\r
+{\r
+ PROCEDURE_WRAPPER *Wrapper;\r
+\r
+ Wrapper = Buffer;\r
+ Wrapper->Procedure (Wrapper->ProcedureArgument);\r
\r
- PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
- PageTable[PTIndex] |= (UINT64)Cacheability;\r
+ return EFI_SUCCESS;\r
}\r
\r
+/**\r
+ Schedule a procedure to run on the specified CPU in blocking mode.\r
+\r
+ @param[in] Procedure The address of the procedure to run\r
+ @param[in] CpuIndex Target CPU Index\r
+ @param[in, out] ProcArguments The parameter to pass to the procedure\r
+\r
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
+ @retval EFI_SUCCESS The procedure has been successfully scheduled\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmBlockingStartupThisAp (\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
+ )\r
+{\r
+ PROCEDURE_WRAPPER Wrapper;\r
+\r
+ Wrapper.Procedure = Procedure;\r
+ Wrapper.ProcedureArgument = ProcArguments;\r
+\r
+ //\r
+ // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
+ //\r
+ return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
+}\r
\r
/**\r
Schedule a procedure to run on the specified CPU.\r
EFI_STATUS\r
EFIAPI\r
SmmStartupThisAp (\r
- IN EFI_AP_PROCEDURE Procedure,\r
- IN UINTN CpuIndex,\r
- IN OUT VOID *ProcArguments OPTIONAL\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
)\r
{\r
- if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||\r
- CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||\r
- !mSmmMpSyncData->CpuData[CpuIndex].Present ||\r
- gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||\r
- !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
- return EFI_INVALID_PARAMETER;\r
+ gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
+ gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
+\r
+ //\r
+ // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
+ //\r
+ return InternalSmmStartupThisAp (\r
+ ProcedureWrapper,\r
+ CpuIndex,\r
+ &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
+ FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,\r
+ 0,\r
+ NULL\r
+ );\r
+}\r
+\r
+/**\r
+ This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
+ They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
+\r
+ NOTE: It might not be appreciated in runtime since it might\r
+ conflict with OS debugging facilities. Turn them off in RELEASE.\r
+\r
+ @param CpuIndex CPU Index\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+CpuSmmDebugEntry (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
+ ASSERT (CpuIndex < mMaxNumberOfCpus);\r
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
+ AsmWriteDr6 (CpuSaveState->x86._DR6);\r
+ AsmWriteDr7 (CpuSaveState->x86._DR7);\r
+ } else {\r
+ AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
+ AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
+ }\r
}\r
+}\r
\r
- mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
- mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+/**\r
+ This function restores DR6 & DR7 to SMM save state.\r
+\r
+ NOTE: It might not be appreciated in runtime since it might\r
+ conflict with OS debugging facilities. Turn them off in RELEASE.\r
\r
- if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {\r
- AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
- ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ @param CpuIndex CPU Index\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+CpuSmmDebugExit (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
+ ASSERT (CpuIndex < mMaxNumberOfCpus);\r
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
+ CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
+ CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
+ } else {\r
+ CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
+ CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
+ }\r
}\r
- return EFI_SUCCESS;\r
}\r
\r
/**\r
VOID\r
EFIAPI\r
SmiRendezvous (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
- EFI_STATUS Status;\r
- BOOLEAN ValidSmi;\r
- BOOLEAN IsBsp;\r
- BOOLEAN BspInProgress;\r
- UINTN Index;\r
- UINTN Cr2;\r
+ EFI_STATUS Status;\r
+ BOOLEAN ValidSmi;\r
+ BOOLEAN IsBsp;\r
+ BOOLEAN BspInProgress;\r
+ UINTN Index;\r
+ UINTN Cr2;\r
+\r
+ ASSERT (CpuIndex < mMaxNumberOfCpus);\r
+\r
+ //\r
+ // Save Cr2 because Page Fault exception in SMM may override its value,\r
+ // when using on-demand paging for above 4G memory.\r
+ //\r
+ Cr2 = 0;\r
+ SaveCr2 (&Cr2);\r
\r
//\r
- // Save Cr2 because Page Fault exception in SMM may override its value\r
+ // Call the user register Startup function first.\r
//\r
- Cr2 = AsmReadCr2 ();\r
+ if (mSmmMpSyncData->StartupProcedure != NULL) {\r
+ mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
+ }\r
\r
//\r
// Perform CPU specific entry hooks\r
//\r
// Determine if this is a valid SMI\r
//\r
- ValidSmi = PlatformValidSmi();\r
+ ValidSmi = PlatformValidSmi ();\r
\r
//\r
// Determine if BSP has been already in progress. Note this must be checked after\r
// ValidSmi because BSP may clear a valid SMI source after checking in.\r
//\r
- BspInProgress = mSmmMpSyncData->InsideSmm;\r
+ BspInProgress = *mSmmMpSyncData->InsideSmm;\r
\r
if (!BspInProgress && !ValidSmi) {\r
//\r
//\r
// Signal presence of this processor\r
//\r
- if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {\r
+ if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
//\r
// BSP has already ended the synchronization, so QUIT!!!\r
//\r
//\r
// Wait for BSP's signal to finish SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
}\r
+\r
goto Exit;\r
} else {\r
-\r
//\r
// The BUSY lock is initialized to Released state.\r
// This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
// E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
// after AP's present flag is detected.\r
//\r
- InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
- }\r
-\r
- //\r
- // Try to enable NX\r
- //\r
- if (mXdSupported) {\r
- ActivateXd ();\r
+ InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
}\r
\r
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
// Platform hook fails to determine, use default BSP election method\r
//\r
InterlockedCompareExchange32 (\r
- (UINT32*)&mSmmMpSyncData->BspIndex,\r
+ (UINT32 *)&mSmmMpSyncData->BspIndex,\r
(UINT32)-1,\r
(UINT32)CpuIndex\r
);\r
// "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
//\r
if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
-\r
//\r
// Clear last request for SwitchBsp.\r
//\r
// BSP Handler is always called with a ValidSmi == TRUE\r
//\r
BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
-\r
} else {\r
APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
}\r
}\r
\r
- ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
+ ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
\r
//\r
// Wait for BSP's signal to exit SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
}\r
}\r
\r
Exit:\r
SmmCpuFeaturesRendezvousExit (CpuIndex);\r
+\r
//\r
// Restore Cr2\r
//\r
- AsmWriteCr2 (Cr2);\r
+ RestoreCr2 (Cr2);\r
}\r
\r
+/**\r
+ Allocate buffer for SpinLock and Wrapper function buffer.\r
+\r
+**/\r
+VOID\r
+InitializeDataForMmMp (\r
+ VOID\r
+ )\r
+{\r
+ gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
+ ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
+\r
+ InitializeListHead (&gSmmCpuPrivate->TokenList);\r
+\r
+ gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
+}\r
+\r
+/**\r
+ Allocate buffer for all semaphores and spin locks.\r
+\r
+**/\r
+VOID\r
+InitializeSmmCpuSemaphores (\r
+ VOID\r
+ )\r
+{\r
+ UINTN ProcessorCount;\r
+ UINTN TotalSize;\r
+ UINTN GlobalSemaphoresSize;\r
+ UINTN CpuSemaphoresSize;\r
+ UINTN SemaphoreSize;\r
+ UINTN Pages;\r
+ UINTN *SemaphoreBlock;\r
+ UINTN SemaphoreAddr;\r
+\r
+ SemaphoreSize = GetSpinLockProperties ();\r
+ ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
+ CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
+ TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
+ DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
+ DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
+ Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
+ SemaphoreBlock = AllocatePages (Pages);\r
+ ASSERT (SemaphoreBlock != NULL);\r
+ ZeroMem (SemaphoreBlock, TotalSize);\r
+\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
+ = (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
+ mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
+\r
+ mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
+ mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
+\r
+ mSemaphoreSize = SemaphoreSize;\r
+}\r
\r
/**\r
Initialize un-cacheable data.\r
VOID\r
)\r
{\r
+ UINTN CpuIndex;\r
+\r
if (mSmmMpSyncData != NULL) {\r
+ //\r
+ // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
+ // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
+ //\r
ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
- mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
+ mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
//\r
//\r
mSmmMpSyncData->BspIndex = (UINT32)-1;\r
}\r
- mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
+\r
+ mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
+\r
+ mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
+ mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
+ mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
+ ASSERT (\r
+ mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
+ mSmmMpSyncData->AllCpusInSync != NULL\r
+ );\r
+ *mSmmMpSyncData->Counter = 0;\r
+ *mSmmMpSyncData->InsideSmm = FALSE;\r
+ *mSmmMpSyncData->AllCpusInSync = FALSE;\r
+\r
+ mSmmMpSyncData->AllApArrivedWithException = FALSE;\r
+\r
+ for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {\r
+ mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
+ (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
+ mSmmMpSyncData->CpuData[CpuIndex].Run =\r
+ (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
+ mSmmMpSyncData->CpuData[CpuIndex].Present =\r
+ (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
+ }\r
}\r
}\r
\r
/**\r
Initialize global data for MP synchronization.\r
\r
- @param Stacks Base address of SMI stack buffer for all processors.\r
- @param StackSize Stack size for each processor in SMM.\r
+ @param Stacks Base address of SMI stack buffer for all processors.\r
+ @param StackSize Stack size for each processor in SMM.\r
+ @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
\r
**/\r
UINT32\r
InitializeMpServiceData (\r
- IN VOID *Stacks,\r
- IN UINTN StackSize\r
+ IN VOID *Stacks,\r
+ IN UINTN StackSize,\r
+ IN UINTN ShadowStackSize\r
)\r
{\r
- UINT32 Cr3;\r
- UINTN Index;\r
- MTRR_SETTINGS *Mtrr;\r
- PROCESSOR_SMM_DESCRIPTOR *Psd;\r
- UINT8 *GdtTssTables;\r
- UINTN GdtTableStepSize;\r
+ UINT32 Cr3;\r
+ UINTN Index;\r
+ UINT8 *GdtTssTables;\r
+ UINTN GdtTableStepSize;\r
+ CPUID_VERSION_INFO_EDX RegEdx;\r
+ UINT32 MaxExtendedFunction;\r
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
+\r
+ //\r
+ // Determine if this CPU supports machine check\r
+ //\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
+ mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
+\r
+ //\r
+ // Allocate memory for all locks and semaphores\r
+ //\r
+ InitializeSmmCpuSemaphores ();\r
+\r
+ //\r
+ // Initialize mSmmMpSyncData\r
+ //\r
+ mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
+ (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
+ ASSERT (mSmmMpSyncData != NULL);\r
+ mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
+ InitializeMpSyncData ();\r
\r
//\r
// Initialize physical address mask\r
// NOTE: Physical memory above virtual address limit is not supported !!!\r
//\r
- AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
- gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
- gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);\r
+ if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
+ } else {\r
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
+ }\r
+\r
+ gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;\r
+ //\r
+ // Clear the low 12 bits\r
+ //\r
+ gPhyMask &= 0xfffffffffffff000ULL;\r
\r
//\r
// Create page tables\r
GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
\r
//\r
- // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
+ // Install SMI handler for each CPU\r
//\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
- Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
- CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
- Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
-\r
- //\r
- // Install SMI handler\r
- //\r
InstallSmiHandler (\r
Index,\r
(UINT32)mCpuHotPlugData.SmBase[Index],\r
- (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
+ (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
StackSize,\r
- (UINTN)Psd->SmmGdtPtr,\r
- Psd->SmmGdtSize,\r
+ (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
+ gcSmiGdtr.Limit + 1,\r
gcSmiIdtr.Base,\r
gcSmiIdtr.Limit + 1,\r
Cr3\r
);\r
}\r
\r
- //\r
- // Initialize mSmmMpSyncData\r
- //\r
- mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
- (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
- mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
- ASSERT (mSmmMpSyncData != NULL);\r
- InitializeMpSyncData ();\r
-\r
//\r
// Record current MTRR settings\r
//\r
- ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r
- Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r
- MtrrGetAllMtrrs (Mtrr);\r
+ ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
+ MtrrGetAllMtrrs (&gSmiMtrrs);\r
\r
return Cr3;\r
}\r
gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
return EFI_SUCCESS;\r
}\r
+\r
+/**\r
+\r
+ Register the SMM Foundation entry point.\r
+\r
+ @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
+ of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
+ with the related definitions of\r
+ EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
+ If caller may pass a value of NULL to deregister any existing\r
+ startup procedure.\r
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
+ run by the AP. It is an optional common mailbox between APs and\r
+ the caller to share information\r
+\r
+ @retval EFI_SUCCESS The Procedure has been set successfully.\r
+ @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+RegisterStartupProcedure (\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN OUT VOID *ProcedureArguments OPTIONAL\r
+ )\r
+{\r
+ if ((Procedure == NULL) && (ProcedureArguments != NULL)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (mSmmMpSyncData == NULL) {\r
+ return EFI_NOT_READY;\r
+ }\r
+\r
+ mSmmMpSyncData->StartupProcedure = Procedure;\r
+ mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
+\r
+ return EFI_SUCCESS;\r
+}\r