UINT64 gPhyMask;\r
SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
UINTN mSmmMpSyncDataSize;\r
+SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
+UINTN mSemaphoreSize;\r
+SPIN_LOCK *mPFLock = NULL;\r
\r
/**\r
Performs an atomic compare exchange operation to get semaphore.\r
SMM_CPU_DATA_BLOCK *CpuData;\r
EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
- if (mSmmMpSyncData->Counter == mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
return TRUE;\r
}\r
\r
UINT64 Timer;\r
UINTN Index;\r
\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
//\r
// Platform implementor should choose a timeout value appropriately:\r
// - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
// In both cases, adding SMI-disabling checking code increases overhead.\r
//\r
- if (mSmmMpSyncData->Counter < mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
//\r
// Send SMI IPIs to bring outside processors in\r
//\r
//\r
// Flag BSP's presence\r
//\r
- mSmmMpSyncData->InsideSmm = TRUE;\r
+ *mSmmMpSyncData->InsideSmm = TRUE;\r
\r
//\r
// Initialize Debug Agent to start source level debug in BSP handler\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
\r
//\r
// Wait for all APs to get ready for programming MTRRs\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
//\r
// Make sure all APs have their Present flag set\r
//\r
//\r
// Notify all APs to exit\r
//\r
- mSmmMpSyncData->InsideSmm = FALSE;\r
+ *mSmmMpSyncData->InsideSmm = FALSE;\r
ReleaseAllAPs ();\r
\r
//\r
//\r
// Allow APs to check in from this point on\r
//\r
- mSmmMpSyncData->Counter = 0;\r
- mSmmMpSyncData->AllCpusInSync = FALSE;\r
+ *mSmmMpSyncData->Counter = 0;\r
+ *mSmmMpSyncData->AllCpusInSync = FALSE;\r
}\r
\r
/**\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
+ !(*mSmmMpSyncData->InsideSmm);\r
) {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// BSP timeout in the first round\r
//\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
+ !(*mSmmMpSyncData->InsideSmm);\r
) {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// Give up since BSP is unable to enter SMM\r
// and signal the completion of this AP\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
} else {\r
//\r
// Don't know BSP index. Give up without sending IPI to BSP.\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
}\r
//\r
// Check if BSP wants to exit SMM\r
//\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
break;\r
}\r
\r
// Determine if BSP has been already in progress. Note this must be checked after\r
// ValidSmi because BSP may clear a valid SMI source after checking in.\r
//\r
- BspInProgress = mSmmMpSyncData->InsideSmm;\r
+ BspInProgress = *mSmmMpSyncData->InsideSmm;\r
\r
if (!BspInProgress && !ValidSmi) {\r
//\r
//\r
// Signal presence of this processor\r
//\r
- if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {\r
+ if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
//\r
// BSP has already ended the synchronization, so QUIT!!!\r
//\r
//\r
// Wait for BSP's signal to finish SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
}\r
goto Exit;\r
//\r
// Wait for BSP's signal to exit SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
}\r
\r
AsmWriteCr2 (Cr2);\r
}\r
\r
+/**\r
+ Allocate buffer for all semaphores and spin locks.\r
+\r
+**/\r
+VOID\r
+InitializeSmmCpuSemaphores (\r
+ VOID\r
+ )\r
+{\r
+ UINTN ProcessorCount;\r
+ UINTN TotalSize;\r
+ UINTN GlobalSemaphoresSize;\r
+ UINTN SemaphoreSize;\r
+ UINTN Pages;\r
+ UINTN *SemaphoreBlock;\r
+ UINTN SemaphoreAddr;\r
+\r
+ SemaphoreSize = GetSpinLockProperties ();\r
+ ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
+ TotalSize = GlobalSemaphoresSize;\r
+ DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
+ DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
+ Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
+ SemaphoreBlock = AllocatePages (Pages);\r
+ ASSERT (SemaphoreBlock != NULL);\r
+ ZeroMem (SemaphoreBlock, TotalSize);\r
+\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
+ = (SPIN_LOCK *)SemaphoreAddr;\r
+\r
+ mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
+ mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
+ mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
+ mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
+ mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
+\r
+ mSemaphoreSize = SemaphoreSize;\r
+}\r
\r
/**\r
Initialize un-cacheable data.\r
mSmmMpSyncData->BspIndex = (UINT32)-1;\r
}\r
mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
+\r
+ InitializeSmmCpuSemaphores ();\r
}\r
}\r
\r
UINT8 *GdtTssTables;\r
UINTN GdtTableStepSize;\r
\r
+ //\r
+ // Initialize mSmmMpSyncData\r
+ //\r
+ mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
+ (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
+ ASSERT (mSmmMpSyncData != NULL);\r
+ InitializeMpSyncData ();\r
+\r
//\r
// Initialize physical address mask\r
// NOTE: Physical memory above virtual address limit is not supported !!!\r
);\r
}\r
\r
- //\r
- // Initialize mSmmMpSyncData\r
- //\r
- mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
- (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
- mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
- ASSERT (mSmmMpSyncData != NULL);\r
- InitializeMpSyncData ();\r
-\r
//\r
// Record current MTRR settings\r
//\r