X-Git-Url: https://git.proxmox.com/?p=mirror_edk2.git;a=blobdiff_plain;f=UefiCpuPkg%2FPiSmmCpuDxeSmm%2FMpService.c;h=9b8db90ff6edb465787c972b250a0133aafc6dfe;hp=620b0136c5cfc2447442674de2f42f9214d8faf2;hb=717fb60443fbaedfab9a37fd186361b3b9e1ecfe;hpb=881520ea6778953c57d975ca2a9cf3f2114f99c4 diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c index 620b0136c5..9b8db90ff6 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c @@ -1,7 +1,7 @@ /** @file SMM MP service implementation -Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.
+Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at @@ -21,6 +21,9 @@ UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR UINT64 gPhyMask; SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL; UINTN mSmmMpSyncDataSize; +SMM_CPU_SEMAPHORES mSmmCpuSemaphores; +UINTN mSemaphoreSize; +SPIN_LOCK *mPFLock = NULL; /** Performs an atomic compare exchange operation to get semaphore. @@ -120,7 +123,7 @@ WaitForAllAPs ( BspIndex = mSmmMpSyncData->BspIndex; while (NumberOfAPs-- > 0) { - WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); } } @@ -139,8 +142,8 @@ ReleaseAllAPs ( BspIndex = mSmmMpSyncData->BspIndex; for (Index = mMaxNumberOfCpus; Index-- > 0;) { - if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) { - ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run); + if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) { + ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run); } } } @@ -163,16 +166,16 @@ AllCpusInSmmWithExceptions ( SMM_CPU_DATA_BLOCK *CpuData; EFI_PROCESSOR_INFORMATION *ProcessorInfo; - ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus); + ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus); - if (mSmmMpSyncData->Counter == mNumberOfCpus) { + if (*mSmmMpSyncData->Counter == mNumberOfCpus) { return TRUE; } CpuData = mSmmMpSyncData->CpuData; ProcessorInfo = gSmmCpuPrivate->ProcessorInfo; for (Index = mMaxNumberOfCpus; Index-- > 0;) { - if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) { + if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) { if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) { continue; } @@ -204,7 +207,7 @@ SmmWaitForApArrival ( UINT64 Timer; UINTN Index; - ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus); + ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus); // // Platform implementor should choose a timeout value appropriately: @@ -243,12 +246,12 @@ SmmWaitForApArrival ( // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function. // In both cases, adding SMI-disabling checking code increases overhead. // - if (mSmmMpSyncData->Counter < mNumberOfCpus) { + if (*mSmmMpSyncData->Counter < mNumberOfCpus) { // // Send SMI IPIs to bring outside processors in // for (Index = mMaxNumberOfCpus; Index-- > 0;) { - if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) { + if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) { SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId); } } @@ -320,7 +323,7 @@ BSPHandler ( // // Flag BSP's presence // - mSmmMpSyncData->InsideSmm = TRUE; + *mSmmMpSyncData->InsideSmm = TRUE; // // Initialize Debug Agent to start source level debug in BSP handler @@ -330,7 +333,7 @@ BSPHandler ( // // Mark this processor's presence // - mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE; + *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE; // // Clear platform top level SMI status bit before calling SMI handlers. If @@ -358,8 +361,8 @@ BSPHandler ( // // Lock the counter down and retrieve the number of APs // - mSmmMpSyncData->AllCpusInSync = TRUE; - ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1; + *mSmmMpSyncData->AllCpusInSync = TRUE; + ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1; // // Wait for all APs to get ready for programming MTRRs @@ -409,7 +412,7 @@ BSPHandler ( // // The BUSY lock is initialized to Acquired state // - AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy); // // Perform the pre tasks @@ -425,9 +428,9 @@ BSPHandler ( // Make sure all APs have completed their pending none-block tasks // for (Index = mMaxNumberOfCpus; Index-- > 0;) { - if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) { - AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy); - ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);; + if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) { + AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy); + ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy); } } @@ -446,15 +449,15 @@ BSPHandler ( // // Lock the counter down and retrieve the number of APs // - mSmmMpSyncData->AllCpusInSync = TRUE; - ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1; + *mSmmMpSyncData->AllCpusInSync = TRUE; + ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1; // // Make sure all APs have their Present flag set // while (TRUE) { PresentCount = 0; for (Index = mMaxNumberOfCpus; Index-- > 0;) { - if (mSmmMpSyncData->CpuData[Index].Present) { + if (*(mSmmMpSyncData->CpuData[Index].Present)) { PresentCount ++; } } @@ -467,7 +470,7 @@ BSPHandler ( // // Notify all APs to exit // - mSmmMpSyncData->InsideSmm = FALSE; + *mSmmMpSyncData->InsideSmm = FALSE; ReleaseAllAPs (); // @@ -512,7 +515,7 @@ BSPHandler ( // // Clear the Present flag of BSP // - mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE; + *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE; // // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but @@ -530,8 +533,8 @@ BSPHandler ( // // Allow APs to check in from this point on // - mSmmMpSyncData->Counter = 0; - mSmmMpSyncData->AllCpusInSync = FALSE; + *mSmmMpSyncData->Counter = 0; + *mSmmMpSyncData->AllCpusInSync = FALSE; } /** @@ -558,12 +561,12 @@ APHandler ( // for (Timer = StartSyncTimer (); !IsSyncTimerTimeout (Timer) && - !mSmmMpSyncData->InsideSmm; + !(*mSmmMpSyncData->InsideSmm); ) { CpuPause (); } - if (!mSmmMpSyncData->InsideSmm) { + if (!(*mSmmMpSyncData->InsideSmm)) { // // BSP timeout in the first round // @@ -584,23 +587,23 @@ APHandler ( // for (Timer = StartSyncTimer (); !IsSyncTimerTimeout (Timer) && - !mSmmMpSyncData->InsideSmm; + !(*mSmmMpSyncData->InsideSmm); ) { CpuPause (); } - if (!mSmmMpSyncData->InsideSmm) { + if (!(*mSmmMpSyncData->InsideSmm)) { // // Give up since BSP is unable to enter SMM // and signal the completion of this AP - WaitForSemaphore (&mSmmMpSyncData->Counter); + WaitForSemaphore (mSmmMpSyncData->Counter); return; } } else { // // Don't know BSP index. Give up without sending IPI to BSP. // - WaitForSemaphore (&mSmmMpSyncData->Counter); + WaitForSemaphore (mSmmMpSyncData->Counter); return; } } @@ -614,20 +617,20 @@ APHandler ( // // Mark this processor's presence // - mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE; + *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE; if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) { // // Notify BSP of arrival at this point // - ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); } if (SmmCpuFeaturesNeedConfigureMtrrs()) { // // Wait for the signal from BSP to backup MTRRs // - WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run); // // Backup OS MTRRs @@ -637,12 +640,12 @@ APHandler ( // // Signal BSP the completion of this AP // - ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); // // Wait for BSP's signal to program MTRRs // - WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run); // // Replace OS MTRRs with SMI MTRRs @@ -652,19 +655,19 @@ APHandler ( // // Signal BSP the completion of this AP // - ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); } while (TRUE) { // // Wait for something to happen // - WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run); // // Check if BSP wants to exit SMM // - if (!mSmmMpSyncData->InsideSmm) { + if (!(*mSmmMpSyncData->InsideSmm)) { break; } @@ -672,7 +675,7 @@ APHandler ( // BUSY should be acquired by SmmStartupThisAp() // ASSERT ( - !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy) + !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy) ); // @@ -685,19 +688,19 @@ APHandler ( // // Release BUSY // - ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy); } if (SmmCpuFeaturesNeedConfigureMtrrs()) { // // Notify BSP the readiness of this AP to program MTRRs // - ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); // // Wait for the signal from BSP to program MTRRs // - WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run); // // Restore OS MTRRs @@ -709,36 +712,34 @@ APHandler ( // // Notify BSP the readiness of this AP to Reset states/semaphore for this processor // - ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); // // Wait for the signal from BSP to Reset states/semaphore for this processor // - WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run); // // Reset states/semaphore for this processor // - mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE; + *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE; // // Notify BSP the readiness of this AP to exit SMM // - ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); } /** Create 4G PageTable in SMRAM. - @param ExtraPages Additional page numbers besides for 4G memory - @param Is32BitPageTable Whether the page table is 32-bit PAE + @param[in] Is32BitPageTable Whether the page table is 32-bit PAE @return PageTable Address **/ UINT32 Gen4GPageTable ( - IN UINTN ExtraPages, IN BOOLEAN Is32BitPageTable ) { @@ -772,10 +773,10 @@ Gen4GPageTable ( // // Allocate the page table // - PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded); + PageTable = AllocatePageTableMemory (5 + PagesNeeded); ASSERT (PageTable != NULL); - PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages)); + PageTable = (VOID *)((UINTN)PageTable); Pte = (UINT64*)PageTable; // @@ -900,13 +901,13 @@ SetCacheability ( PageTable[PTIndex] |= (UINT64)Cacheability; } - /** Schedule a procedure to run on the specified CPU. - @param Procedure The address of the procedure to run - @param CpuIndex Target CPU Index - @param ProcArguments The parameter to pass to the procedure + @param[in] Procedure The address of the procedure to run + @param[in] CpuIndex Target CPU Index + @param[in, OUT] ProcArguments The parameter to pass to the procedure + @param[in] BlockingMode Startup AP in blocking mode or not @retval EFI_INVALID_PARAMETER CpuNumber not valid @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP @@ -916,34 +917,106 @@ SetCacheability ( **/ EFI_STATUS -EFIAPI -SmmStartupThisAp ( +InternalSmmStartupThisAp ( IN EFI_AP_PROCEDURE Procedure, IN UINTN CpuIndex, - IN OUT VOID *ProcArguments OPTIONAL + IN OUT VOID *ProcArguments OPTIONAL, + IN BOOLEAN BlockingMode ) { - if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus || - CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu || - !mSmmMpSyncData->CpuData[CpuIndex].Present || - gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove || - !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) { + if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) { + DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus)); + return EFI_INVALID_PARAMETER; + } + if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) { + DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex)); + return EFI_INVALID_PARAMETER; + } + if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) { + if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) { + DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex)); + } + return EFI_INVALID_PARAMETER; + } + if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) { + if (!FeaturePcdGet (PcdCpuHotPlugSupport)) { + DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex)); + } return EFI_INVALID_PARAMETER; } + if (BlockingMode) { + AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy); + } else { + if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) { + DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex)); + return EFI_INVALID_PARAMETER; + } + } + mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure; mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments; - ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run); - if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) { - AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); - ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + if (BlockingMode) { + AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy); + ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy); } return EFI_SUCCESS; } /** - This funciton sets DR6 & DR7 according to SMM save state, before running SMM C code. + Schedule a procedure to run on the specified CPU in blocking mode. + + @param[in] Procedure The address of the procedure to run + @param[in] CpuIndex Target CPU Index + @param[in, out] ProcArguments The parameter to pass to the procedure + + @retval EFI_INVALID_PARAMETER CpuNumber not valid + @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy + @retval EFI_SUCCESS The procedure has been successfully scheduled + +**/ +EFI_STATUS +EFIAPI +SmmBlockingStartupThisAp ( + IN EFI_AP_PROCEDURE Procedure, + IN UINTN CpuIndex, + IN OUT VOID *ProcArguments OPTIONAL + ) +{ + return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE); +} + +/** + Schedule a procedure to run on the specified CPU. + + @param Procedure The address of the procedure to run + @param CpuIndex Target CPU Index + @param ProcArguments The parameter to pass to the procedure + + @retval EFI_INVALID_PARAMETER CpuNumber not valid + @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy + @retval EFI_SUCCESS The procedure has been successfully scheduled + +**/ +EFI_STATUS +EFIAPI +SmmStartupThisAp ( + IN EFI_AP_PROCEDURE Procedure, + IN UINTN CpuIndex, + IN OUT VOID *ProcArguments OPTIONAL + ) +{ + return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)); +} + +/** + This function sets DR6 & DR7 according to SMM save state, before running SMM C code. They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode. NOTE: It might not be appreciated in runtime since it might @@ -961,7 +1034,8 @@ CpuSmmDebugEntry ( SMRAM_SAVE_STATE_MAP *CpuSaveState; if (FeaturePcdGet (PcdCpuSmmDebug)) { - CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex]; + ASSERT(CpuIndex < mMaxNumberOfCpus); + CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex]; if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) { AsmWriteDr6 (CpuSaveState->x86._DR6); AsmWriteDr7 (CpuSaveState->x86._DR7); @@ -973,7 +1047,7 @@ CpuSmmDebugEntry ( } /** - This funciton restores DR6 & DR7 to SMM save state. + This function restores DR6 & DR7 to SMM save state. NOTE: It might not be appreciated in runtime since it might conflict with OS debugging facilities. Turn them off in RELEASE. @@ -990,7 +1064,8 @@ CpuSmmDebugExit ( SMRAM_SAVE_STATE_MAP *CpuSaveState; if (FeaturePcdGet (PcdCpuSmmDebug)) { - CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex]; + ASSERT(CpuIndex < mMaxNumberOfCpus); + CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex]; if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) { CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 (); CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 (); @@ -1013,12 +1088,14 @@ SmiRendezvous ( IN UINTN CpuIndex ) { - EFI_STATUS Status; - BOOLEAN ValidSmi; - BOOLEAN IsBsp; - BOOLEAN BspInProgress; - UINTN Index; - UINTN Cr2; + EFI_STATUS Status; + BOOLEAN ValidSmi; + BOOLEAN IsBsp; + BOOLEAN BspInProgress; + UINTN Index; + UINTN Cr2; + + ASSERT(CpuIndex < mMaxNumberOfCpus); // // Save Cr2 because Page Fault exception in SMM may override its value @@ -1039,7 +1116,7 @@ SmiRendezvous ( // Determine if BSP has been already in progress. Note this must be checked after // ValidSmi because BSP may clear a valid SMI source after checking in. // - BspInProgress = mSmmMpSyncData->InsideSmm; + BspInProgress = *mSmmMpSyncData->InsideSmm; if (!BspInProgress && !ValidSmi) { // @@ -1054,7 +1131,7 @@ SmiRendezvous ( // // Signal presence of this processor // - if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) { + if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) { // // BSP has already ended the synchronization, so QUIT!!! // @@ -1062,7 +1139,7 @@ SmiRendezvous ( // // Wait for BSP's signal to finish SMI // - while (mSmmMpSyncData->AllCpusInSync) { + while (*mSmmMpSyncData->AllCpusInSync) { CpuPause (); } goto Exit; @@ -1074,14 +1151,7 @@ SmiRendezvous ( // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately // after AP's present flag is detected. // - InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); - } - - // - // Try to enable NX - // - if (mXdSupported) { - ActivateXd (); + InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy); } if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { @@ -1152,18 +1222,17 @@ SmiRendezvous ( // BSP Handler is always called with a ValidSmi == TRUE // BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode); - } else { APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode); } } - ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0); + ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0); // // Wait for BSP's signal to exit SMI // - while (mSmmMpSyncData->AllCpusInSync) { + while (*mSmmMpSyncData->AllCpusInSync) { CpuPause (); } } @@ -1176,6 +1245,72 @@ Exit: AsmWriteCr2 (Cr2); } +/** + Allocate buffer for all semaphores and spin locks. + +**/ +VOID +InitializeSmmCpuSemaphores ( + VOID + ) +{ + UINTN ProcessorCount; + UINTN TotalSize; + UINTN GlobalSemaphoresSize; + UINTN CpuSemaphoresSize; + UINTN MsrSemahporeSize; + UINTN SemaphoreSize; + UINTN Pages; + UINTN *SemaphoreBlock; + UINTN SemaphoreAddr; + + SemaphoreSize = GetSpinLockProperties (); + ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; + GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize; + CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize; + MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize; + TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize; + DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize)); + DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize)); + Pages = EFI_SIZE_TO_PAGES (TotalSize); + SemaphoreBlock = AllocatePages (Pages); + ASSERT (SemaphoreBlock != NULL); + ZeroMem (SemaphoreBlock, TotalSize); + + SemaphoreAddr = (UINTN)SemaphoreBlock; + mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr; + SemaphoreAddr += SemaphoreSize; + mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr; + SemaphoreAddr += SemaphoreSize; + mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr; + SemaphoreAddr += SemaphoreSize; + mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr; + SemaphoreAddr += SemaphoreSize; + mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock + = (SPIN_LOCK *)SemaphoreAddr; + SemaphoreAddr += SemaphoreSize; + mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock + = (SPIN_LOCK *)SemaphoreAddr; + + SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize; + mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr; + SemaphoreAddr += ProcessorCount * SemaphoreSize; + mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr; + SemaphoreAddr += ProcessorCount * SemaphoreSize; + mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr; + + SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize; + mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr; + mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = + ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize; + ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM); + + mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock; + mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock; + mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock; + + mSemaphoreSize = SemaphoreSize; +} /** Initialize un-cacheable data. @@ -1187,7 +1322,13 @@ InitializeMpSyncData ( VOID ) { + UINTN CpuIndex; + if (mSmmMpSyncData != NULL) { + // + // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one + // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN. + // ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize); mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA)); mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus); @@ -1198,6 +1339,24 @@ InitializeMpSyncData ( mSmmMpSyncData->BspIndex = (UINT32)-1; } mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode); + + mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter; + mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm; + mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync; + ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL && + mSmmMpSyncData->AllCpusInSync != NULL); + *mSmmMpSyncData->Counter = 0; + *mSmmMpSyncData->InsideSmm = FALSE; + *mSmmMpSyncData->AllCpusInSync = FALSE; + + for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) { + mSmmMpSyncData->CpuData[CpuIndex].Busy = + (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex); + mSmmMpSyncData->CpuData[CpuIndex].Run = + (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex); + mSmmMpSyncData->CpuData[CpuIndex].Present = + (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex); + } } } @@ -1221,6 +1380,20 @@ InitializeMpServiceData ( UINT8 *GdtTssTables; UINTN GdtTableStepSize; + // + // Allocate memory for all locks and semaphores + // + InitializeSmmCpuSemaphores (); + + // + // Initialize mSmmMpSyncData + // + mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) + + (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; + mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize)); + ASSERT (mSmmMpSyncData != NULL); + InitializeMpSyncData (); + // // Initialize physical address mask // NOTE: Physical memory above virtual address limit is not supported !!! @@ -1261,15 +1434,6 @@ InitializeMpServiceData ( ); } - // - // Initialize mSmmMpSyncData - // - mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) + - (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; - mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize)); - ASSERT (mSmmMpSyncData != NULL); - InitializeMpSyncData (); - // // Record current MTRR settings //