2 SMM MP service implementation
4 Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
20 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
22 MTRR_SETTINGS gSmiMtrrs
;
24 SMM_DISPATCHER_MP_SYNC_DATA
*mSmmMpSyncData
= NULL
;
25 UINTN mSmmMpSyncDataSize
;
26 SMM_CPU_SEMAPHORES mSmmCpuSemaphores
;
28 SPIN_LOCK
*mPFLock
= NULL
;
29 SMM_CPU_SYNC_MODE mCpuSmmSyncMode
;
32 Performs an atomic compare exchange operation to get semaphore.
33 The compare exchange operation must be performed using
36 @param Sem IN: 32-bit unsigned integer
37 OUT: original integer - 1
38 @return Original integer - 1
43 IN OUT
volatile UINT32
*Sem
50 } while (Value
== 0 ||
51 InterlockedCompareExchange32 (
61 Performs an atomic compare exchange operation to release semaphore.
62 The compare exchange operation must be performed using
65 @param Sem IN: 32-bit unsigned integer
66 OUT: original integer + 1
67 @return Original integer + 1
72 IN OUT
volatile UINT32
*Sem
79 } while (Value
+ 1 != 0 &&
80 InterlockedCompareExchange32 (
89 Performs an atomic compare exchange operation to lock semaphore.
90 The compare exchange operation must be performed using
93 @param Sem IN: 32-bit unsigned integer
95 @return Original integer
100 IN OUT
volatile UINT32
*Sem
107 } while (InterlockedCompareExchange32 (
115 Wait all APs to performs an atomic compare exchange operation to release semaphore.
117 @param NumberOfAPs AP number
127 BspIndex
= mSmmMpSyncData
->BspIndex
;
128 while (NumberOfAPs
-- > 0) {
129 WaitForSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
134 Performs an atomic compare exchange operation to release semaphore
146 BspIndex
= mSmmMpSyncData
->BspIndex
;
147 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
148 if (Index
!= BspIndex
&& *(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
149 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[Index
].Run
);
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
157 @param Exceptions CPU Arrival exception flags.
159 @retval TRUE if all CPUs the have checked in.
160 @retval FALSE if at least one Normal AP hasn't checked in.
164 AllCpusInSmmWithExceptions (
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
169 SMM_CPU_DATA_BLOCK
*CpuData
;
170 EFI_PROCESSOR_INFORMATION
*ProcessorInfo
;
172 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
174 if (*mSmmMpSyncData
->Counter
== mNumberOfCpus
) {
178 CpuData
= mSmmMpSyncData
->CpuData
;
179 ProcessorInfo
= gSmmCpuPrivate
->ProcessorInfo
;
180 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
181 if (!(*(CpuData
[Index
].Present
)) && ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
182 if (((Exceptions
& ARRIVAL_EXCEPTION_DELAYED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmDelayed
) != 0) {
185 if (((Exceptions
& ARRIVAL_EXCEPTION_BLOCKED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmBlocked
) != 0) {
188 if (((Exceptions
& ARRIVAL_EXCEPTION_SMI_DISABLED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmEnable
) != 0) {
201 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
202 entering SMM, except SMI disabled APs.
206 SmmWaitForApArrival (
213 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
216 // Platform implementor should choose a timeout value appropriately:
217 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
218 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
219 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
220 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
221 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
222 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
223 // - The timeout value must be longer than longest possible IO operation in the system
227 // Sync with APs 1st timeout
229 for (Timer
= StartSyncTimer ();
230 !IsSyncTimerTimeout (Timer
) &&
231 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
237 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
239 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
240 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
241 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
242 // work while SMI handling is on-going.
243 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
244 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
245 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
246 // mode work while SMI handling is on-going.
247 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
248 // - In traditional flow, SMI disabling is discouraged.
249 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
250 // In both cases, adding SMI-disabling checking code increases overhead.
252 if (*mSmmMpSyncData
->Counter
< mNumberOfCpus
) {
254 // Send SMI IPIs to bring outside processors in
256 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
257 if (!(*(mSmmMpSyncData
->CpuData
[Index
].Present
)) && gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
258 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
263 // Sync with APs 2nd timeout.
265 for (Timer
= StartSyncTimer ();
266 !IsSyncTimerTimeout (Timer
) &&
267 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
278 Replace OS MTRR's with SMI MTRR's.
280 @param CpuIndex Processor Index
288 SmmCpuFeaturesDisableSmrr ();
291 // Replace all MTRRs registers
293 MtrrSetAllMtrrs (&gSmiMtrrs
);
299 @param CpuIndex BSP processor Index
300 @param SyncMode SMM MP sync mode
306 IN SMM_CPU_SYNC_MODE SyncMode
312 BOOLEAN ClearTopLevelSmiResult
;
315 ASSERT (CpuIndex
== mSmmMpSyncData
->BspIndex
);
319 // Flag BSP's presence
321 *mSmmMpSyncData
->InsideSmm
= TRUE
;
324 // Initialize Debug Agent to start source level debug in BSP handler
326 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI
, NULL
, NULL
);
329 // Mark this processor's presence
331 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
334 // Clear platform top level SMI status bit before calling SMI handlers. If
335 // we cleared it after SMI handlers are run, we would miss the SMI that
336 // occurs after SMI handlers are done and before SMI status bit is cleared.
338 ClearTopLevelSmiResult
= ClearTopLevelSmiStatus();
339 ASSERT (ClearTopLevelSmiResult
== TRUE
);
342 // Set running processor index
344 gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
= CpuIndex
;
347 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
349 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
352 // Wait for APs to arrive
354 SmmWaitForApArrival();
357 // Lock the counter down and retrieve the number of APs
359 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
360 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
363 // Wait for all APs to get ready for programming MTRRs
365 WaitForAllAPs (ApCount
);
367 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
369 // Signal all APs it's time for backup MTRRs
374 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
375 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
376 // to a large enough value to avoid this situation.
377 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
378 // We do the backup first and then set MTRR to avoid race condition for threads
381 MtrrGetAllMtrrs(&Mtrrs
);
384 // Wait for all APs to complete their MTRR saving
386 WaitForAllAPs (ApCount
);
389 // Let all processors program SMM MTRRs together
394 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
395 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
396 // to a large enough value to avoid this situation.
398 ReplaceOSMtrrs (CpuIndex
);
401 // Wait for all APs to complete their MTRR programming
403 WaitForAllAPs (ApCount
);
408 // The BUSY lock is initialized to Acquired state
410 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
413 // Perform the pre tasks
418 // Invoke SMM Foundation EntryPoint with the processor information context.
420 gSmmCpuPrivate
->SmmCoreEntry (&gSmmCpuPrivate
->SmmCoreEntryContext
);
423 // Make sure all APs have completed their pending none-block tasks
425 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
426 if (Index
!= CpuIndex
&& *(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
427 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
428 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
433 // Perform the remaining tasks
435 PerformRemainingTasks ();
438 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
439 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
440 // will run through freely.
442 if (SyncMode
!= SmmCpuSyncModeTradition
&& !SmmCpuFeaturesNeedConfigureMtrrs()) {
445 // Lock the counter down and retrieve the number of APs
447 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
448 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
450 // Make sure all APs have their Present flag set
454 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
455 if (*(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
459 if (PresentCount
> ApCount
) {
466 // Notify all APs to exit
468 *mSmmMpSyncData
->InsideSmm
= FALSE
;
472 // Wait for all APs to complete their pending tasks
474 WaitForAllAPs (ApCount
);
476 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
478 // Signal APs to restore MTRRs
485 SmmCpuFeaturesReenableSmrr ();
486 MtrrSetAllMtrrs(&Mtrrs
);
489 // Wait for all APs to complete MTRR programming
491 WaitForAllAPs (ApCount
);
495 // Stop source level debug in BSP handler, the code below will not be
498 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI
, NULL
, NULL
);
501 // Signal APs to Reset states/semaphore for this processor
506 // Perform pending operations for hot-plug
511 // Clear the Present flag of BSP
513 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
516 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
517 // WaitForAllAps does not depend on the Present flag.
519 WaitForAllAPs (ApCount
);
522 // Reset BspIndex to -1, meaning BSP has not been elected.
524 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
525 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
529 // Allow APs to check in from this point on
531 *mSmmMpSyncData
->Counter
= 0;
532 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
538 @param CpuIndex AP processor Index.
539 @param ValidSmi Indicates that current SMI is a valid SMI or not.
540 @param SyncMode SMM MP sync mode.
547 IN SMM_CPU_SYNC_MODE SyncMode
557 for (Timer
= StartSyncTimer ();
558 !IsSyncTimerTimeout (Timer
) &&
559 !(*mSmmMpSyncData
->InsideSmm
);
564 if (!(*mSmmMpSyncData
->InsideSmm
)) {
566 // BSP timeout in the first round
568 if (mSmmMpSyncData
->BspIndex
!= -1) {
570 // BSP Index is known
572 BspIndex
= mSmmMpSyncData
->BspIndex
;
573 ASSERT (CpuIndex
!= BspIndex
);
576 // Send SMI IPI to bring BSP in
578 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[BspIndex
].ProcessorId
);
581 // Now clock BSP for the 2nd time
583 for (Timer
= StartSyncTimer ();
584 !IsSyncTimerTimeout (Timer
) &&
585 !(*mSmmMpSyncData
->InsideSmm
);
590 if (!(*mSmmMpSyncData
->InsideSmm
)) {
592 // Give up since BSP is unable to enter SMM
593 // and signal the completion of this AP
594 WaitForSemaphore (mSmmMpSyncData
->Counter
);
599 // Don't know BSP index. Give up without sending IPI to BSP.
601 WaitForSemaphore (mSmmMpSyncData
->Counter
);
609 BspIndex
= mSmmMpSyncData
->BspIndex
;
610 ASSERT (CpuIndex
!= BspIndex
);
613 // Mark this processor's presence
615 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
617 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
619 // Notify BSP of arrival at this point
621 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
624 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
626 // Wait for the signal from BSP to backup MTRRs
628 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
633 MtrrGetAllMtrrs(&Mtrrs
);
636 // Signal BSP the completion of this AP
638 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
641 // Wait for BSP's signal to program MTRRs
643 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
646 // Replace OS MTRRs with SMI MTRRs
648 ReplaceOSMtrrs (CpuIndex
);
651 // Signal BSP the completion of this AP
653 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
658 // Wait for something to happen
660 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
663 // Check if BSP wants to exit SMM
665 if (!(*mSmmMpSyncData
->InsideSmm
)) {
670 // BUSY should be acquired by SmmStartupThisAp()
673 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)
677 // Invoke the scheduled procedure
679 (*mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
) (
680 (VOID
*)mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
686 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
689 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
691 // Notify BSP the readiness of this AP to program MTRRs
693 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
696 // Wait for the signal from BSP to program MTRRs
698 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
703 SmmCpuFeaturesReenableSmrr ();
704 MtrrSetAllMtrrs(&Mtrrs
);
708 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
710 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
713 // Wait for the signal from BSP to Reset states/semaphore for this processor
715 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
718 // Reset states/semaphore for this processor
720 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
723 // Notify BSP the readiness of this AP to exit SMM
725 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
730 Create 4G PageTable in SMRAM.
732 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
733 @return PageTable Address
738 IN BOOLEAN Is32BitPageTable
746 UINTN High2MBoundary
;
756 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
758 // Add one more page for known good stack, then find the lower 2MB aligned address.
760 Low2MBoundary
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
) & ~(SIZE_2MB
-1);
762 // Add two more pages for known good stack and stack guard page,
763 // then find the lower 2MB aligned address.
765 High2MBoundary
= (mSmmStackArrayEnd
- mSmmStackSize
+ EFI_PAGE_SIZE
* 2) & ~(SIZE_2MB
-1);
766 PagesNeeded
= ((High2MBoundary
- Low2MBoundary
) / SIZE_2MB
) + 1;
769 // Allocate the page table
771 PageTable
= AllocatePageTableMemory (5 + PagesNeeded
);
772 ASSERT (PageTable
!= NULL
);
774 PageTable
= (VOID
*)((UINTN
)PageTable
);
775 Pte
= (UINT64
*)PageTable
;
778 // Zero out all page table entries first
780 ZeroMem (Pte
, EFI_PAGES_TO_SIZE (1));
783 // Set Page Directory Pointers
785 for (Index
= 0; Index
< 4; Index
++) {
786 Pte
[Index
] = ((UINTN
)PageTable
+ EFI_PAGE_SIZE
* (Index
+ 1)) | mAddressEncMask
|
787 (Is32BitPageTable
? IA32_PAE_PDPTE_ATTRIBUTE_BITS
: PAGE_ATTRIBUTE_BITS
);
789 Pte
+= EFI_PAGE_SIZE
/ sizeof (*Pte
);
792 // Fill in Page Directory Entries
794 for (Index
= 0; Index
< EFI_PAGE_SIZE
* 4 / sizeof (*Pte
); Index
++) {
795 Pte
[Index
] = (Index
<< 21) | mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
798 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
799 Pages
= (UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (5);
800 GuardPage
= mSmmStackArrayBase
+ EFI_PAGE_SIZE
;
801 Pdpte
= (UINT64
*)PageTable
;
802 for (PageIndex
= Low2MBoundary
; PageIndex
<= High2MBoundary
; PageIndex
+= SIZE_2MB
) {
803 Pte
= (UINT64
*)(UINTN
)(Pdpte
[BitFieldRead32 ((UINT32
)PageIndex
, 30, 31)] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
804 Pte
[BitFieldRead32 ((UINT32
)PageIndex
, 21, 29)] = (UINT64
)Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
806 // Fill in Page Table Entries
808 Pte
= (UINT64
*)Pages
;
809 PageAddress
= PageIndex
;
810 for (Index
= 0; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
811 if (PageAddress
== GuardPage
) {
813 // Mark the guard page as non-present
815 Pte
[Index
] = PageAddress
| mAddressEncMask
;
816 GuardPage
+= mSmmStackSize
;
817 if (GuardPage
> mSmmStackArrayEnd
) {
821 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
823 PageAddress
+= EFI_PAGE_SIZE
;
825 Pages
+= EFI_PAGE_SIZE
;
829 return (UINT32
)(UINTN
)PageTable
;
833 Schedule a procedure to run on the specified CPU.
835 @param[in] Procedure The address of the procedure to run
836 @param[in] CpuIndex Target CPU Index
837 @param[in, out] ProcArguments The parameter to pass to the procedure
838 @param[in] BlockingMode Startup AP in blocking mode or not
840 @retval EFI_INVALID_PARAMETER CpuNumber not valid
841 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
842 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
843 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
844 @retval EFI_SUCCESS The procedure has been successfully scheduled
848 InternalSmmStartupThisAp (
849 IN EFI_AP_PROCEDURE Procedure
,
851 IN OUT VOID
*ProcArguments OPTIONAL
,
852 IN BOOLEAN BlockingMode
855 if (CpuIndex
>= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
) {
856 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex
, gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
));
857 return EFI_INVALID_PARAMETER
;
859 if (CpuIndex
== gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
860 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex
));
861 return EFI_INVALID_PARAMETER
;
863 if (gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
== INVALID_APIC_ID
) {
864 return EFI_INVALID_PARAMETER
;
866 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
867 if (mSmmMpSyncData
->EffectiveSyncMode
== SmmCpuSyncModeTradition
) {
868 DEBUG((DEBUG_ERROR
, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex
));
870 return EFI_INVALID_PARAMETER
;
872 if (gSmmCpuPrivate
->Operation
[CpuIndex
] == SmmCpuRemove
) {
873 if (!FeaturePcdGet (PcdCpuHotPlugSupport
)) {
874 DEBUG((DEBUG_ERROR
, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex
));
876 return EFI_INVALID_PARAMETER
;
880 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
882 if (!AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)) {
883 DEBUG((DEBUG_ERROR
, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex
));
884 return EFI_INVALID_PARAMETER
;
888 mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
= Procedure
;
889 mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
= ProcArguments
;
890 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
893 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
894 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
900 Schedule a procedure to run on the specified CPU in blocking mode.
902 @param[in] Procedure The address of the procedure to run
903 @param[in] CpuIndex Target CPU Index
904 @param[in, out] ProcArguments The parameter to pass to the procedure
906 @retval EFI_INVALID_PARAMETER CpuNumber not valid
907 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
908 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
909 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
910 @retval EFI_SUCCESS The procedure has been successfully scheduled
915 SmmBlockingStartupThisAp (
916 IN EFI_AP_PROCEDURE Procedure
,
918 IN OUT VOID
*ProcArguments OPTIONAL
921 return InternalSmmStartupThisAp(Procedure
, CpuIndex
, ProcArguments
, TRUE
);
925 Schedule a procedure to run on the specified CPU.
927 @param Procedure The address of the procedure to run
928 @param CpuIndex Target CPU Index
929 @param ProcArguments The parameter to pass to the procedure
931 @retval EFI_INVALID_PARAMETER CpuNumber not valid
932 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
933 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
934 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
935 @retval EFI_SUCCESS The procedure has been successfully scheduled
941 IN EFI_AP_PROCEDURE Procedure
,
943 IN OUT VOID
*ProcArguments OPTIONAL
946 return InternalSmmStartupThisAp(Procedure
, CpuIndex
, ProcArguments
, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp
));
950 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
951 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
953 NOTE: It might not be appreciated in runtime since it might
954 conflict with OS debugging facilities. Turn them off in RELEASE.
956 @param CpuIndex CPU Index
965 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
967 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
968 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
969 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
970 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
971 AsmWriteDr6 (CpuSaveState
->x86
._DR6
);
972 AsmWriteDr7 (CpuSaveState
->x86
._DR7
);
974 AsmWriteDr6 ((UINTN
)CpuSaveState
->x64
._DR6
);
975 AsmWriteDr7 ((UINTN
)CpuSaveState
->x64
._DR7
);
981 This function restores DR6 & DR7 to SMM save state.
983 NOTE: It might not be appreciated in runtime since it might
984 conflict with OS debugging facilities. Turn them off in RELEASE.
986 @param CpuIndex CPU Index
995 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
997 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
998 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
999 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1000 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1001 CpuSaveState
->x86
._DR7
= (UINT32
)AsmReadDr7 ();
1002 CpuSaveState
->x86
._DR6
= (UINT32
)AsmReadDr6 ();
1004 CpuSaveState
->x64
._DR7
= AsmReadDr7 ();
1005 CpuSaveState
->x64
._DR6
= AsmReadDr6 ();
1011 C function for SMI entry, each processor comes here upon SMI trigger.
1013 @param CpuIndex CPU Index
1025 BOOLEAN BspInProgress
;
1029 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1032 // Save Cr2 because Page Fault exception in SMM may override its value
1034 Cr2
= AsmReadCr2 ();
1037 // Perform CPU specific entry hooks
1039 SmmCpuFeaturesRendezvousEntry (CpuIndex
);
1042 // Determine if this is a valid SMI
1044 ValidSmi
= PlatformValidSmi();
1047 // Determine if BSP has been already in progress. Note this must be checked after
1048 // ValidSmi because BSP may clear a valid SMI source after checking in.
1050 BspInProgress
= *mSmmMpSyncData
->InsideSmm
;
1052 if (!BspInProgress
&& !ValidSmi
) {
1054 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1055 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1056 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1057 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1058 // is nothing we need to do.
1063 // Signal presence of this processor
1065 if (ReleaseSemaphore (mSmmMpSyncData
->Counter
) == 0) {
1067 // BSP has already ended the synchronization, so QUIT!!!
1071 // Wait for BSP's signal to finish SMI
1073 while (*mSmmMpSyncData
->AllCpusInSync
) {
1080 // The BUSY lock is initialized to Released state.
1081 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1082 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1083 // after AP's present flag is detected.
1085 InitializeSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1088 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1089 ActivateSmmProfile (CpuIndex
);
1092 if (BspInProgress
) {
1094 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1095 // as BSP may have cleared the SMI status
1097 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1100 // We have a valid SMI
1107 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1108 if (!mSmmMpSyncData
->SwitchBsp
|| mSmmMpSyncData
->CandidateBsp
[CpuIndex
]) {
1110 // Call platform hook to do BSP election
1112 Status
= PlatformSmmBspElection (&IsBsp
);
1113 if (EFI_SUCCESS
== Status
) {
1115 // Platform hook determines successfully
1118 mSmmMpSyncData
->BspIndex
= (UINT32
)CpuIndex
;
1122 // Platform hook fails to determine, use default BSP election method
1124 InterlockedCompareExchange32 (
1125 (UINT32
*)&mSmmMpSyncData
->BspIndex
,
1134 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1136 if (mSmmMpSyncData
->BspIndex
== CpuIndex
) {
1139 // Clear last request for SwitchBsp.
1141 if (mSmmMpSyncData
->SwitchBsp
) {
1142 mSmmMpSyncData
->SwitchBsp
= FALSE
;
1143 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1144 mSmmMpSyncData
->CandidateBsp
[Index
] = FALSE
;
1148 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1149 SmmProfileRecordSmiNum ();
1153 // BSP Handler is always called with a ValidSmi == TRUE
1155 BSPHandler (CpuIndex
, mSmmMpSyncData
->EffectiveSyncMode
);
1157 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1161 ASSERT (*mSmmMpSyncData
->CpuData
[CpuIndex
].Run
== 0);
1164 // Wait for BSP's signal to exit SMI
1166 while (*mSmmMpSyncData
->AllCpusInSync
) {
1172 SmmCpuFeaturesRendezvousExit (CpuIndex
);
1180 Allocate buffer for all semaphores and spin locks.
1184 InitializeSmmCpuSemaphores (
1188 UINTN ProcessorCount
;
1190 UINTN GlobalSemaphoresSize
;
1191 UINTN CpuSemaphoresSize
;
1192 UINTN MsrSemahporeSize
;
1193 UINTN SemaphoreSize
;
1195 UINTN
*SemaphoreBlock
;
1196 UINTN SemaphoreAddr
;
1198 SemaphoreSize
= GetSpinLockProperties ();
1199 ProcessorCount
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1200 GlobalSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_GLOBAL
) / sizeof (VOID
*)) * SemaphoreSize
;
1201 CpuSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_CPU
) / sizeof (VOID
*)) * ProcessorCount
* SemaphoreSize
;
1202 MsrSemahporeSize
= MSR_SPIN_LOCK_INIT_NUM
* SemaphoreSize
;
1203 TotalSize
= GlobalSemaphoresSize
+ CpuSemaphoresSize
+ MsrSemahporeSize
;
1204 DEBUG((EFI_D_INFO
, "One Semaphore Size = 0x%x\n", SemaphoreSize
));
1205 DEBUG((EFI_D_INFO
, "Total Semaphores Size = 0x%x\n", TotalSize
));
1206 Pages
= EFI_SIZE_TO_PAGES (TotalSize
);
1207 SemaphoreBlock
= AllocatePages (Pages
);
1208 ASSERT (SemaphoreBlock
!= NULL
);
1209 ZeroMem (SemaphoreBlock
, TotalSize
);
1211 SemaphoreAddr
= (UINTN
)SemaphoreBlock
;
1212 mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
= (UINT32
*)SemaphoreAddr
;
1213 SemaphoreAddr
+= SemaphoreSize
;
1214 mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
= (BOOLEAN
*)SemaphoreAddr
;
1215 SemaphoreAddr
+= SemaphoreSize
;
1216 mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
= (BOOLEAN
*)SemaphoreAddr
;
1217 SemaphoreAddr
+= SemaphoreSize
;
1218 mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
= (SPIN_LOCK
*)SemaphoreAddr
;
1219 SemaphoreAddr
+= SemaphoreSize
;
1220 mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
1221 = (SPIN_LOCK
*)SemaphoreAddr
;
1222 SemaphoreAddr
+= SemaphoreSize
;
1223 mSmmCpuSemaphores
.SemaphoreGlobal
.MemoryMappedLock
1224 = (SPIN_LOCK
*)SemaphoreAddr
;
1226 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
;
1227 mSmmCpuSemaphores
.SemaphoreCpu
.Busy
= (SPIN_LOCK
*)SemaphoreAddr
;
1228 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1229 mSmmCpuSemaphores
.SemaphoreCpu
.Run
= (UINT32
*)SemaphoreAddr
;
1230 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1231 mSmmCpuSemaphores
.SemaphoreCpu
.Present
= (BOOLEAN
*)SemaphoreAddr
;
1233 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
+ CpuSemaphoresSize
;
1234 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
= (SPIN_LOCK
*)SemaphoreAddr
;
1235 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
=
1236 ((UINTN
)SemaphoreBlock
+ Pages
* SIZE_4KB
- SemaphoreAddr
) / SemaphoreSize
;
1237 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
>= MSR_SPIN_LOCK_INIT_NUM
);
1239 mPFLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
;
1240 mConfigSmmCodeAccessCheckLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
;
1241 mMemoryMappedLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.MemoryMappedLock
;
1243 mSemaphoreSize
= SemaphoreSize
;
1247 Initialize un-cacheable data.
1252 InitializeMpSyncData (
1258 if (mSmmMpSyncData
!= NULL
) {
1260 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1261 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1263 ZeroMem (mSmmMpSyncData
, mSmmMpSyncDataSize
);
1264 mSmmMpSyncData
->CpuData
= (SMM_CPU_DATA_BLOCK
*)((UINT8
*)mSmmMpSyncData
+ sizeof (SMM_DISPATCHER_MP_SYNC_DATA
));
1265 mSmmMpSyncData
->CandidateBsp
= (BOOLEAN
*)(mSmmMpSyncData
->CpuData
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1266 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1268 // Enable BSP election by setting BspIndex to -1
1270 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
1272 mSmmMpSyncData
->EffectiveSyncMode
= mCpuSmmSyncMode
;
1274 mSmmMpSyncData
->Counter
= mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
;
1275 mSmmMpSyncData
->InsideSmm
= mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
;
1276 mSmmMpSyncData
->AllCpusInSync
= mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
;
1277 ASSERT (mSmmMpSyncData
->Counter
!= NULL
&& mSmmMpSyncData
->InsideSmm
!= NULL
&&
1278 mSmmMpSyncData
->AllCpusInSync
!= NULL
);
1279 *mSmmMpSyncData
->Counter
= 0;
1280 *mSmmMpSyncData
->InsideSmm
= FALSE
;
1281 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
1283 for (CpuIndex
= 0; CpuIndex
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; CpuIndex
++) {
1284 mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
=
1285 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Busy
+ mSemaphoreSize
* CpuIndex
);
1286 mSmmMpSyncData
->CpuData
[CpuIndex
].Run
=
1287 (UINT32
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Run
+ mSemaphoreSize
* CpuIndex
);
1288 mSmmMpSyncData
->CpuData
[CpuIndex
].Present
=
1289 (BOOLEAN
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Present
+ mSemaphoreSize
* CpuIndex
);
1290 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
) = 0;
1291 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Run
) = 0;
1292 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
1298 Initialize global data for MP synchronization.
1300 @param Stacks Base address of SMI stack buffer for all processors.
1301 @param StackSize Stack size for each processor in SMM.
1305 InitializeMpServiceData (
1312 UINT8
*GdtTssTables
;
1313 UINTN GdtTableStepSize
;
1316 // Allocate memory for all locks and semaphores
1318 InitializeSmmCpuSemaphores ();
1321 // Initialize mSmmMpSyncData
1323 mSmmMpSyncDataSize
= sizeof (SMM_DISPATCHER_MP_SYNC_DATA
) +
1324 (sizeof (SMM_CPU_DATA_BLOCK
) + sizeof (BOOLEAN
)) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1325 mSmmMpSyncData
= (SMM_DISPATCHER_MP_SYNC_DATA
*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize
));
1326 ASSERT (mSmmMpSyncData
!= NULL
);
1327 mCpuSmmSyncMode
= (SMM_CPU_SYNC_MODE
)PcdGet8 (PcdCpuSmmSyncMode
);
1328 InitializeMpSyncData ();
1331 // Initialize physical address mask
1332 // NOTE: Physical memory above virtual address limit is not supported !!!
1334 AsmCpuid (0x80000008, (UINT32
*)&Index
, NULL
, NULL
, NULL
);
1335 gPhyMask
= LShiftU64 (1, (UINT8
)Index
) - 1;
1336 gPhyMask
&= (1ull << 48) - EFI_PAGE_SIZE
;
1339 // Create page tables
1341 Cr3
= SmmInitPageTable ();
1343 GdtTssTables
= InitGdt (Cr3
, &GdtTableStepSize
);
1346 // Install SMI handler for each CPU
1348 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1351 (UINT32
)mCpuHotPlugData
.SmBase
[Index
],
1352 (VOID
*)((UINTN
)Stacks
+ (StackSize
* Index
)),
1354 (UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
),
1355 gcSmiGdtr
.Limit
+ 1,
1357 gcSmiIdtr
.Limit
+ 1,
1363 // Record current MTRR settings
1365 ZeroMem (&gSmiMtrrs
, sizeof (gSmiMtrrs
));
1366 MtrrGetAllMtrrs (&gSmiMtrrs
);
1373 Register the SMM Foundation entry point.
1375 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1376 @param SmmEntryPoint SMM Foundation EntryPoint
1378 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1384 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL
*This
,
1385 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1389 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1391 gSmmCpuPrivate
->SmmCoreEntry
= SmmEntryPoint
;