//\r
// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
//\r
-UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r
+MTRR_SETTINGS gSmiMtrrs;\r
UINT64 gPhyMask;\r
SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
UINTN mSmmMpSyncDataSize;\r
SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
UINTN mSemaphoreSize;\r
+SPIN_LOCK *mPFLock = NULL;\r
+SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
\r
/**\r
Performs an atomic compare exchange operation to get semaphore.\r
\r
BspIndex = mSmmMpSyncData->BspIndex;\r
while (NumberOfAPs-- > 0) {\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
}\r
\r
\r
BspIndex = mSmmMpSyncData->BspIndex;\r
for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);\r
+ if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
}\r
}\r
}\r
SMM_CPU_DATA_BLOCK *CpuData;\r
EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
- if (mSmmMpSyncData->Counter == mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
return TRUE;\r
}\r
\r
CpuData = mSmmMpSyncData->CpuData;\r
ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
+ if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
continue;\r
}\r
UINT64 Timer;\r
UINTN Index;\r
\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
//\r
// Platform implementor should choose a timeout value appropriately:\r
// - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
// In both cases, adding SMI-disabling checking code increases overhead.\r
//\r
- if (mSmmMpSyncData->Counter < mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
//\r
// Send SMI IPIs to bring outside processors in\r
//\r
for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
+ if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
}\r
}\r
IN UINTN CpuIndex\r
)\r
{\r
- PROCESSOR_SMM_DESCRIPTOR *Psd;\r
- UINT64 *SmiMtrrs;\r
- MTRR_SETTINGS *BiosMtrr;\r
-\r
- Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r
- SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r
-\r
SmmCpuFeaturesDisableSmrr ();\r
\r
//\r
// Replace all MTRRs registers\r
//\r
- BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r
- MtrrSetAllMtrrs(BiosMtrr);\r
+ MtrrSetAllMtrrs (&gSmiMtrrs);\r
}\r
\r
/**\r
//\r
// Flag BSP's presence\r
//\r
- mSmmMpSyncData->InsideSmm = TRUE;\r
+ *mSmmMpSyncData->InsideSmm = TRUE;\r
\r
//\r
// Initialize Debug Agent to start source level debug in BSP handler\r
//\r
// Mark this processor's presence\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
\r
//\r
// Clear platform top level SMI status bit before calling SMI handlers. If\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
\r
//\r
// Wait for all APs to get ready for programming MTRRs\r
//\r
// The BUSY lock is initialized to Acquired state\r
//\r
- AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
\r
//\r
// Perform the pre tasks\r
// Make sure all APs have completed their pending none-block tasks\r
//\r
for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
- AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);\r
- ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;\r
+ if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
}\r
}\r
\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
//\r
// Make sure all APs have their Present flag set\r
//\r
while (TRUE) {\r
PresentCount = 0;\r
for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
- if (mSmmMpSyncData->CpuData[Index].Present) {\r
+ if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
PresentCount ++;\r
}\r
}\r
//\r
// Notify all APs to exit\r
//\r
- mSmmMpSyncData->InsideSmm = FALSE;\r
+ *mSmmMpSyncData->InsideSmm = FALSE;\r
ReleaseAllAPs ();\r
\r
//\r
//\r
// Clear the Present flag of BSP\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
\r
//\r
// Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
//\r
// Allow APs to check in from this point on\r
//\r
- mSmmMpSyncData->Counter = 0;\r
- mSmmMpSyncData->AllCpusInSync = FALSE;\r
+ *mSmmMpSyncData->Counter = 0;\r
+ *mSmmMpSyncData->AllCpusInSync = FALSE;\r
}\r
\r
/**\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
+ !(*mSmmMpSyncData->InsideSmm);\r
) {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// BSP timeout in the first round\r
//\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
+ !(*mSmmMpSyncData->InsideSmm);\r
) {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// Give up since BSP is unable to enter SMM\r
// and signal the completion of this AP\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
} else {\r
//\r
// Don't know BSP index. Give up without sending IPI to BSP.\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
}\r
//\r
// Mark this processor's presence\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
\r
if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
//\r
// Notify BSP of arrival at this point\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
\r
if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
//\r
// Wait for the signal from BSP to backup MTRRs\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Backup OS MTRRs\r
//\r
// Signal BSP the completion of this AP\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
\r
//\r
// Wait for BSP's signal to program MTRRs\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Replace OS MTRRs with SMI MTRRs\r
//\r
// Signal BSP the completion of this AP\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
}\r
\r
while (TRUE) {\r
//\r
// Wait for something to happen\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Check if BSP wants to exit SMM\r
//\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
break;\r
}\r
\r
// BUSY should be acquired by SmmStartupThisAp()\r
//\r
ASSERT (\r
- !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
+ !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
);\r
\r
//\r
//\r
// Release BUSY\r
//\r
- ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
}\r
\r
if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
//\r
// Notify BSP the readiness of this AP to program MTRRs\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
\r
//\r
// Wait for the signal from BSP to program MTRRs\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Restore OS MTRRs\r
//\r
// Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
\r
//\r
// Wait for the signal from BSP to Reset states/semaphore for this processor\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
//\r
// Reset states/semaphore for this processor\r
//\r
- mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
\r
//\r
// Notify BSP the readiness of this AP to exit SMM\r
//\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
\r
}\r
\r
/**\r
Create 4G PageTable in SMRAM.\r
\r
- @param ExtraPages Additional page numbers besides for 4G memory\r
- @param Is32BitPageTable Whether the page table is 32-bit PAE\r
+ @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
@return PageTable Address\r
\r
**/\r
UINT32\r
Gen4GPageTable (\r
- IN UINTN ExtraPages,\r
IN BOOLEAN Is32BitPageTable\r
)\r
{\r
//\r
// Allocate the page table\r
//\r
- PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);\r
+ PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
ASSERT (PageTable != NULL);\r
\r
- PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r
+ PageTable = (VOID *)((UINTN)PageTable);\r
Pte = (UINT64*)PageTable;\r
\r
//\r
PageTable[PTIndex] |= (UINT64)Cacheability;\r
}\r
\r
-\r
/**\r
Schedule a procedure to run on the specified CPU.\r
\r
- @param Procedure The address of the procedure to run\r
- @param CpuIndex Target CPU Index\r
- @param ProcArguments The parameter to pass to the procedure\r
+ @param[in] Procedure The address of the procedure to run\r
+ @param[in] CpuIndex Target CPU Index\r
+ @param[in, OUT] ProcArguments The parameter to pass to the procedure\r
+ @param[in] BlockingMode Startup AP in blocking mode or not\r
\r
@retval EFI_INVALID_PARAMETER CpuNumber not valid\r
@retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
\r
**/\r
EFI_STATUS\r
-EFIAPI\r
-SmmStartupThisAp (\r
+InternalSmmStartupThisAp (\r
IN EFI_AP_PROCEDURE Procedure,\r
IN UINTN CpuIndex,\r
- IN OUT VOID *ProcArguments OPTIONAL\r
+ IN OUT VOID *ProcArguments OPTIONAL,\r
+ IN BOOLEAN BlockingMode\r
)\r
{\r
- if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||\r
- CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||\r
- !mSmmMpSyncData->CpuData[CpuIndex].Present ||\r
- gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||\r
- !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
+ if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
+ DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
+ DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
+ if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
+ DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
+ }\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
+ }\r
return EFI_INVALID_PARAMETER;\r
}\r
\r
+ if (BlockingMode) {\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ } else {\r
+ if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
+ DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ }\r
+\r
mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
- ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
\r
- if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {\r
- AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
- ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ if (BlockingMode) {\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
}\r
return EFI_SUCCESS;\r
}\r
\r
+/**\r
+ Schedule a procedure to run on the specified CPU in blocking mode.\r
+\r
+ @param[in] Procedure The address of the procedure to run\r
+ @param[in] CpuIndex Target CPU Index\r
+ @param[in, out] ProcArguments The parameter to pass to the procedure\r
+\r
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
+ @retval EFI_SUCCESS The procedure has been successfully scheduled\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmBlockingStartupThisAp (\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
+ )\r
+{\r
+ return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r
+}\r
+\r
+/**\r
+ Schedule a procedure to run on the specified CPU.\r
+\r
+ @param Procedure The address of the procedure to run\r
+ @param CpuIndex Target CPU Index\r
+ @param ProcArguments The parameter to pass to the procedure\r
+\r
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
+ @retval EFI_SUCCESS The procedure has been successfully scheduled\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmStartupThisAp (\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
+ )\r
+{\r
+ return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r
+}\r
+\r
/**\r
This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
\r
if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
+ ASSERT(CpuIndex < mMaxNumberOfCpus);\r
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
AsmWriteDr6 (CpuSaveState->x86._DR6);\r
SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
\r
if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
+ ASSERT(CpuIndex < mMaxNumberOfCpus);\r
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
BOOLEAN BspInProgress;\r
UINTN Index;\r
UINTN Cr2;\r
- BOOLEAN XdDisableFlag;\r
- MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;\r
+\r
+ ASSERT(CpuIndex < mMaxNumberOfCpus);\r
\r
//\r
// Save Cr2 because Page Fault exception in SMM may override its value\r
// Determine if BSP has been already in progress. Note this must be checked after\r
// ValidSmi because BSP may clear a valid SMI source after checking in.\r
//\r
- BspInProgress = mSmmMpSyncData->InsideSmm;\r
+ BspInProgress = *mSmmMpSyncData->InsideSmm;\r
\r
if (!BspInProgress && !ValidSmi) {\r
//\r
//\r
// Signal presence of this processor\r
//\r
- if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {\r
+ if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
//\r
// BSP has already ended the synchronization, so QUIT!!!\r
//\r
//\r
// Wait for BSP's signal to finish SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
}\r
goto Exit;\r
// E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
// after AP's present flag is detected.\r
//\r
- InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
- }\r
-\r
- //\r
- // Try to enable XD\r
- //\r
- XdDisableFlag = FALSE;\r
- if (mXdSupported) {\r
- MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
- if (MiscEnableMsr.Bits.XD == 1) {\r
- XdDisableFlag = TRUE;\r
- MiscEnableMsr.Bits.XD = 0;\r
- AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r
- }\r
- ActivateXd ();\r
+ InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
}\r
\r
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
}\r
}\r
\r
- ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
+ ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
\r
//\r
// Wait for BSP's signal to exit SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
- }\r
-\r
- //\r
- // Restore XD\r
- //\r
- if (XdDisableFlag) {\r
- MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
- MiscEnableMsr.Bits.XD = 1;\r
- AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r
}\r
}\r
\r
UINTN ProcessorCount;\r
UINTN TotalSize;\r
UINTN GlobalSemaphoresSize;\r
+ UINTN CpuSemaphoresSize;\r
+ UINTN MsrSemahporeSize;\r
UINTN SemaphoreSize;\r
UINTN Pages;\r
UINTN *SemaphoreBlock;\r
SemaphoreSize = GetSpinLockProperties ();\r
ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
- TotalSize = GlobalSemaphoresSize;\r
+ CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
+ MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r
+ TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r
DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
SemaphoreAddr += SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
= (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
+ = (SPIN_LOCK *)SemaphoreAddr;\r
+\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
+ mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
+\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r
+ mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r
+ mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r
+ ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r
+ ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r
+\r
+ mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
+ mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
+ mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
\r
mSemaphoreSize = SemaphoreSize;\r
}\r
VOID\r
)\r
{\r
+ UINTN CpuIndex;\r
+\r
if (mSmmMpSyncData != NULL) {\r
+ //\r
+ // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
+ // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
+ //\r
ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
//\r
mSmmMpSyncData->BspIndex = (UINT32)-1;\r
}\r
- mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
-\r
- InitializeSmmCpuSemaphores ();\r
+ mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
+\r
+ mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
+ mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
+ mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
+ ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
+ mSmmMpSyncData->AllCpusInSync != NULL);\r
+ *mSmmMpSyncData->Counter = 0;\r
+ *mSmmMpSyncData->InsideSmm = FALSE;\r
+ *mSmmMpSyncData->AllCpusInSync = FALSE;\r
+\r
+ for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
+ mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
+ (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
+ mSmmMpSyncData->CpuData[CpuIndex].Run =\r
+ (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
+ mSmmMpSyncData->CpuData[CpuIndex].Present =\r
+ (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
+ }\r
}\r
}\r
\r
{\r
UINT32 Cr3;\r
UINTN Index;\r
- MTRR_SETTINGS *Mtrr;\r
- PROCESSOR_SMM_DESCRIPTOR *Psd;\r
UINT8 *GdtTssTables;\r
UINTN GdtTableStepSize;\r
\r
+ //\r
+ // Allocate memory for all locks and semaphores\r
+ //\r
+ InitializeSmmCpuSemaphores ();\r
+\r
//\r
// Initialize mSmmMpSyncData\r
//\r
(sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
ASSERT (mSmmMpSyncData != NULL);\r
+ mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
InitializeMpSyncData ();\r
\r
//\r
GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
\r
//\r
- // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
+ // Install SMI handler for each CPU\r
//\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
- Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
- CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
- Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
-\r
- //\r
- // Install SMI handler\r
- //\r
InstallSmiHandler (\r
Index,\r
(UINT32)mCpuHotPlugData.SmBase[Index],\r
(VOID*)((UINTN)Stacks + (StackSize * Index)),\r
StackSize,\r
- (UINTN)Psd->SmmGdtPtr,\r
- Psd->SmmGdtSize,\r
+ (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
+ gcSmiGdtr.Limit + 1,\r
gcSmiIdtr.Base,\r
gcSmiIdtr.Limit + 1,\r
Cr3\r
//\r
// Record current MTRR settings\r
//\r
- ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r
- Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r
- MtrrGetAllMtrrs (Mtrr);\r
+ ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
+ MtrrGetAllMtrrs (&gSmiMtrrs);\r
\r
return Cr3;\r
}\r