2 SMM MP service implementation
4 Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
16 MTRR_SETTINGS gSmiMtrrs
;
18 SMM_DISPATCHER_MP_SYNC_DATA
*mSmmMpSyncData
= NULL
;
19 UINTN mSmmMpSyncDataSize
;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores
;
22 SPIN_LOCK
*mPFLock
= NULL
;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode
;
24 BOOLEAN mMachineCheckSupported
= FALSE
;
25 MM_COMPLETION mSmmStartupThisApToken
;
27 extern UINTN mSmmShadowStackSize
;
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
41 IN OUT
volatile UINT32
*Sem
49 (InterlockedCompareExchange32 (
65 Performs an atomic compare exchange operation to release semaphore.
66 The compare exchange operation must be performed using
69 @param Sem IN: 32-bit unsigned integer
70 OUT: original integer + 1
71 @return Original integer + 1
76 IN OUT
volatile UINT32
*Sem
83 } while (Value
+ 1 != 0 &&
84 InterlockedCompareExchange32 (
94 Performs an atomic compare exchange operation to lock semaphore.
95 The compare exchange operation must be performed using
98 @param Sem IN: 32-bit unsigned integer
100 @return Original integer
105 IN OUT
volatile UINT32
*Sem
112 } while (InterlockedCompareExchange32 (
122 Wait all APs to performs an atomic compare exchange operation to release semaphore.
124 @param NumberOfAPs AP number
134 BspIndex
= mSmmMpSyncData
->BspIndex
;
135 while (NumberOfAPs
-- > 0) {
136 WaitForSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
141 Performs an atomic compare exchange operation to release semaphore
152 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
153 if (IsPresentAp (Index
)) {
154 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[Index
].Run
);
160 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
162 @param Exceptions CPU Arrival exception flags.
164 @retval TRUE if all CPUs the have checked in.
165 @retval FALSE if at least one Normal AP hasn't checked in.
169 AllCpusInSmmWithExceptions (
170 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
174 SMM_CPU_DATA_BLOCK
*CpuData
;
175 EFI_PROCESSOR_INFORMATION
*ProcessorInfo
;
177 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
179 if (*mSmmMpSyncData
->Counter
== mNumberOfCpus
) {
183 CpuData
= mSmmMpSyncData
->CpuData
;
184 ProcessorInfo
= gSmmCpuPrivate
->ProcessorInfo
;
185 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
186 if (!(*(CpuData
[Index
].Present
)) && (ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
)) {
187 if (((Exceptions
& ARRIVAL_EXCEPTION_DELAYED
) != 0) && (SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmDelayed
) != 0)) {
191 if (((Exceptions
& ARRIVAL_EXCEPTION_BLOCKED
) != 0) && (SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmBlocked
) != 0)) {
195 if (((Exceptions
& ARRIVAL_EXCEPTION_SMI_DISABLED
) != 0) && (SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmEnable
) != 0)) {
207 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
209 @retval TRUE Os enable lmce.
210 @retval FALSE Os not enable lmce.
218 MSR_IA32_MCG_CAP_REGISTER McgCap
;
219 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl
;
220 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl
;
222 McgCap
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_CAP
);
223 if (McgCap
.Bits
.MCG_LMCE_P
== 0) {
227 FeatureCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL
);
228 if (FeatureCtrl
.Bits
.LmceOn
== 0) {
232 McgExtCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL
);
233 return (BOOLEAN
)(McgExtCtrl
.Bits
.LMCE_EN
== 1);
237 Return if Local machine check exception signaled.
239 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
240 delivered to only the logical processor.
242 @retval TRUE LMCE was signaled.
243 @retval FALSE LMCE was not signaled.
251 MSR_IA32_MCG_STATUS_REGISTER McgStatus
;
253 McgStatus
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_STATUS
);
254 return (BOOLEAN
)(McgStatus
.Bits
.LMCE_S
== 1);
258 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
259 entering SMM, except SMI disabled APs.
263 SmmWaitForApArrival (
272 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
276 if (mMachineCheckSupported
) {
277 LmceEn
= IsLmceOsEnabled ();
278 LmceSignal
= IsLmceSignaled ();
282 // Platform implementor should choose a timeout value appropriately:
283 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
284 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
285 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
286 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
287 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
288 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
289 // - The timeout value must be longer than longest possible IO operation in the system
293 // Sync with APs 1st timeout
295 for (Timer
= StartSyncTimer ();
296 !IsSyncTimerTimeout (Timer
) && !(LmceEn
&& LmceSignal
) &&
297 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
304 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
306 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
307 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
308 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
309 // work while SMI handling is on-going.
310 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
311 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
312 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
313 // mode work while SMI handling is on-going.
314 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
315 // - In traditional flow, SMI disabling is discouraged.
316 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
317 // In both cases, adding SMI-disabling checking code increases overhead.
319 if (*mSmmMpSyncData
->Counter
< mNumberOfCpus
) {
321 // Send SMI IPIs to bring outside processors in
323 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
324 if (!(*(mSmmMpSyncData
->CpuData
[Index
].Present
)) && (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
)) {
325 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
330 // Sync with APs 2nd timeout.
332 for (Timer
= StartSyncTimer ();
333 !IsSyncTimerTimeout (Timer
) &&
334 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
345 Replace OS MTRR's with SMI MTRR's.
347 @param CpuIndex Processor Index
355 SmmCpuFeaturesDisableSmrr ();
358 // Replace all MTRRs registers
360 MtrrSetAllMtrrs (&gSmiMtrrs
);
364 Wheck whether task has been finished by all APs.
366 @param BlockMode Whether did it in block mode or non-block mode.
368 @retval TRUE Task has been finished by all APs.
369 @retval FALSE Task not has been finished by all APs.
373 WaitForAllAPsNotBusy (
379 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
381 // Ignore BSP and APs which not call in SMM.
383 if (!IsPresentAp (Index
)) {
388 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
389 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
391 if (AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
392 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
403 Check whether it is an present AP.
405 @param CpuIndex The AP index which calls this function.
407 @retval TRUE It's a present AP.
408 @retval TRUE This is not an AP or it is not present.
416 return ((CpuIndex
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) &&
417 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
));
421 Clean up the status flags used during executing the procedure.
423 @param CpuIndex The AP index which calls this function.
431 PROCEDURE_TOKEN
*Token
;
433 Token
= mSmmMpSyncData
->CpuData
[CpuIndex
].Token
;
435 if (InterlockedDecrement (&Token
->RunningApCount
) == 0) {
436 ReleaseSpinLock (Token
->SpinLock
);
439 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= NULL
;
443 Free the tokens in the maintained list.
452 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
454 gSmmCpuPrivate
->FirstFreeToken
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
460 @param CpuIndex BSP processor Index
461 @param SyncMode SMM MP sync mode
467 IN SMM_CPU_SYNC_MODE SyncMode
473 BOOLEAN ClearTopLevelSmiResult
;
476 ASSERT (CpuIndex
== mSmmMpSyncData
->BspIndex
);
480 // Flag BSP's presence
482 *mSmmMpSyncData
->InsideSmm
= TRUE
;
485 // Initialize Debug Agent to start source level debug in BSP handler
487 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI
, NULL
, NULL
);
490 // Mark this processor's presence
492 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
495 // Clear platform top level SMI status bit before calling SMI handlers. If
496 // we cleared it after SMI handlers are run, we would miss the SMI that
497 // occurs after SMI handlers are done and before SMI status bit is cleared.
499 ClearTopLevelSmiResult
= ClearTopLevelSmiStatus ();
500 ASSERT (ClearTopLevelSmiResult
== TRUE
);
503 // Set running processor index
505 gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
= CpuIndex
;
508 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
510 if ((SyncMode
== SmmCpuSyncModeTradition
) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
512 // Wait for APs to arrive
514 SmmWaitForApArrival ();
517 // Lock the counter down and retrieve the number of APs
519 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
520 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
523 // Wait for all APs to get ready for programming MTRRs
525 WaitForAllAPs (ApCount
);
527 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
529 // Signal all APs it's time for backup MTRRs
534 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
535 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
536 // to a large enough value to avoid this situation.
537 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
538 // We do the backup first and then set MTRR to avoid race condition for threads
541 MtrrGetAllMtrrs (&Mtrrs
);
544 // Wait for all APs to complete their MTRR saving
546 WaitForAllAPs (ApCount
);
549 // Let all processors program SMM MTRRs together
554 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
555 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
556 // to a large enough value to avoid this situation.
558 ReplaceOSMtrrs (CpuIndex
);
561 // Wait for all APs to complete their MTRR programming
563 WaitForAllAPs (ApCount
);
568 // The BUSY lock is initialized to Acquired state
570 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
573 // Perform the pre tasks
578 // Invoke SMM Foundation EntryPoint with the processor information context.
580 gSmmCpuPrivate
->SmmCoreEntry (&gSmmCpuPrivate
->SmmCoreEntryContext
);
583 // Make sure all APs have completed their pending none-block tasks
585 WaitForAllAPsNotBusy (TRUE
);
588 // Perform the remaining tasks
590 PerformRemainingTasks ();
593 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
594 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
595 // will run through freely.
597 if ((SyncMode
!= SmmCpuSyncModeTradition
) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {
599 // Lock the counter down and retrieve the number of APs
601 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
602 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
604 // Make sure all APs have their Present flag set
608 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
609 if (*(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
614 if (PresentCount
> ApCount
) {
621 // Notify all APs to exit
623 *mSmmMpSyncData
->InsideSmm
= FALSE
;
627 // Wait for all APs to complete their pending tasks
629 WaitForAllAPs (ApCount
);
631 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
633 // Signal APs to restore MTRRs
640 SmmCpuFeaturesReenableSmrr ();
641 MtrrSetAllMtrrs (&Mtrrs
);
644 // Wait for all APs to complete MTRR programming
646 WaitForAllAPs (ApCount
);
650 // Stop source level debug in BSP handler, the code below will not be
653 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI
, NULL
, NULL
);
656 // Signal APs to Reset states/semaphore for this processor
661 // Perform pending operations for hot-plug
666 // Clear the Present flag of BSP
668 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
671 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
672 // WaitForAllAps does not depend on the Present flag.
674 WaitForAllAPs (ApCount
);
677 // Reset the tokens buffer.
682 // Reset BspIndex to -1, meaning BSP has not been elected.
684 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
685 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
689 // Allow APs to check in from this point on
691 *mSmmMpSyncData
->Counter
= 0;
692 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
698 @param CpuIndex AP processor Index.
699 @param ValidSmi Indicates that current SMI is a valid SMI or not.
700 @param SyncMode SMM MP sync mode.
707 IN SMM_CPU_SYNC_MODE SyncMode
713 EFI_STATUS ProcedureStatus
;
718 for (Timer
= StartSyncTimer ();
719 !IsSyncTimerTimeout (Timer
) &&
720 !(*mSmmMpSyncData
->InsideSmm
);
726 if (!(*mSmmMpSyncData
->InsideSmm
)) {
728 // BSP timeout in the first round
730 if (mSmmMpSyncData
->BspIndex
!= -1) {
732 // BSP Index is known
734 BspIndex
= mSmmMpSyncData
->BspIndex
;
735 ASSERT (CpuIndex
!= BspIndex
);
738 // Send SMI IPI to bring BSP in
740 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[BspIndex
].ProcessorId
);
743 // Now clock BSP for the 2nd time
745 for (Timer
= StartSyncTimer ();
746 !IsSyncTimerTimeout (Timer
) &&
747 !(*mSmmMpSyncData
->InsideSmm
);
753 if (!(*mSmmMpSyncData
->InsideSmm
)) {
755 // Give up since BSP is unable to enter SMM
756 // and signal the completion of this AP
757 WaitForSemaphore (mSmmMpSyncData
->Counter
);
762 // Don't know BSP index. Give up without sending IPI to BSP.
764 WaitForSemaphore (mSmmMpSyncData
->Counter
);
772 BspIndex
= mSmmMpSyncData
->BspIndex
;
773 ASSERT (CpuIndex
!= BspIndex
);
776 // Mark this processor's presence
778 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
780 if ((SyncMode
== SmmCpuSyncModeTradition
) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
782 // Notify BSP of arrival at this point
784 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
787 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
789 // Wait for the signal from BSP to backup MTRRs
791 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
796 MtrrGetAllMtrrs (&Mtrrs
);
799 // Signal BSP the completion of this AP
801 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
804 // Wait for BSP's signal to program MTRRs
806 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
809 // Replace OS MTRRs with SMI MTRRs
811 ReplaceOSMtrrs (CpuIndex
);
814 // Signal BSP the completion of this AP
816 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
821 // Wait for something to happen
823 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
826 // Check if BSP wants to exit SMM
828 if (!(*mSmmMpSyncData
->InsideSmm
)) {
833 // BUSY should be acquired by SmmStartupThisAp()
836 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)
840 // Invoke the scheduled procedure
842 ProcedureStatus
= (*mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
)(
843 (VOID
*)mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
845 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
846 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= ProcedureStatus
;
849 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Token
!= NULL
) {
850 ReleaseToken (CpuIndex
);
856 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
859 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
861 // Notify BSP the readiness of this AP to program MTRRs
863 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
866 // Wait for the signal from BSP to program MTRRs
868 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
873 SmmCpuFeaturesReenableSmrr ();
874 MtrrSetAllMtrrs (&Mtrrs
);
878 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
880 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
883 // Wait for the signal from BSP to Reset states/semaphore for this processor
885 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
888 // Reset states/semaphore for this processor
890 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
893 // Notify BSP the readiness of this AP to exit SMM
895 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
899 Create 4G PageTable in SMRAM.
901 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
902 @return PageTable Address
907 IN BOOLEAN Is32BitPageTable
915 UINTN High2MBoundary
;
925 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
927 // Add one more page for known good stack, then find the lower 2MB aligned address.
929 Low2MBoundary
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
) & ~(SIZE_2MB
-1);
931 // Add two more pages for known good stack and stack guard page,
932 // then find the lower 2MB aligned address.
934 High2MBoundary
= (mSmmStackArrayEnd
- mSmmStackSize
- mSmmShadowStackSize
+ EFI_PAGE_SIZE
* 2) & ~(SIZE_2MB
-1);
935 PagesNeeded
= ((High2MBoundary
- Low2MBoundary
) / SIZE_2MB
) + 1;
939 // Allocate the page table
941 PageTable
= AllocatePageTableMemory (5 + PagesNeeded
);
942 ASSERT (PageTable
!= NULL
);
944 PageTable
= (VOID
*)((UINTN
)PageTable
);
945 Pte
= (UINT64
*)PageTable
;
948 // Zero out all page table entries first
950 ZeroMem (Pte
, EFI_PAGES_TO_SIZE (1));
953 // Set Page Directory Pointers
955 for (Index
= 0; Index
< 4; Index
++) {
956 Pte
[Index
] = ((UINTN
)PageTable
+ EFI_PAGE_SIZE
* (Index
+ 1)) | mAddressEncMask
|
957 (Is32BitPageTable
? IA32_PAE_PDPTE_ATTRIBUTE_BITS
: PAGE_ATTRIBUTE_BITS
);
960 Pte
+= EFI_PAGE_SIZE
/ sizeof (*Pte
);
963 // Fill in Page Directory Entries
965 for (Index
= 0; Index
< EFI_PAGE_SIZE
* 4 / sizeof (*Pte
); Index
++) {
966 Pte
[Index
] = (Index
<< 21) | mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
969 Pdpte
= (UINT64
*)PageTable
;
970 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
971 Pages
= (UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (5);
972 GuardPage
= mSmmStackArrayBase
+ EFI_PAGE_SIZE
;
973 for (PageIndex
= Low2MBoundary
; PageIndex
<= High2MBoundary
; PageIndex
+= SIZE_2MB
) {
974 Pte
= (UINT64
*)(UINTN
)(Pdpte
[BitFieldRead32 ((UINT32
)PageIndex
, 30, 31)] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
975 Pte
[BitFieldRead32 ((UINT32
)PageIndex
, 21, 29)] = (UINT64
)Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
977 // Fill in Page Table Entries
979 Pte
= (UINT64
*)Pages
;
980 PageAddress
= PageIndex
;
981 for (Index
= 0; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
982 if (PageAddress
== GuardPage
) {
984 // Mark the guard page as non-present
986 Pte
[Index
] = PageAddress
| mAddressEncMask
;
987 GuardPage
+= (mSmmStackSize
+ mSmmShadowStackSize
);
988 if (GuardPage
> mSmmStackArrayEnd
) {
992 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
995 PageAddress
+= EFI_PAGE_SIZE
;
998 Pages
+= EFI_PAGE_SIZE
;
1002 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0) {
1003 Pte
= (UINT64
*)(UINTN
)(Pdpte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
1004 if ((Pte
[0] & IA32_PG_PS
) == 0) {
1005 // 4K-page entries are already mapped. Just hide the first one anyway.
1006 Pte
= (UINT64
*)(UINTN
)(Pte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
1007 Pte
[0] &= ~(UINT64
)IA32_PG_P
; // Hide page 0
1009 // Create 4K-page entries
1010 Pages
= (UINTN
)AllocatePageTableMemory (1);
1011 ASSERT (Pages
!= 0);
1013 Pte
[0] = (UINT64
)(Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
1015 Pte
= (UINT64
*)Pages
;
1017 Pte
[0] = PageAddress
| mAddressEncMask
; // Hide page 0 but present left
1018 for (Index
= 1; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
1019 PageAddress
+= EFI_PAGE_SIZE
;
1020 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
1025 return (UINT32
)(UINTN
)PageTable
;
1029 Checks whether the input token is the current used token.
1031 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1034 @retval TRUE The input token is the current used token.
1035 @retval FALSE The input token is not the current used token.
1043 PROCEDURE_TOKEN
*ProcToken
;
1045 if (Token
== NULL
) {
1049 Link
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
1051 // Only search used tokens.
1053 while (Link
!= gSmmCpuPrivate
->FirstFreeToken
) {
1054 ProcToken
= PROCEDURE_TOKEN_FROM_LINK (Link
);
1056 if (ProcToken
->SpinLock
== Token
) {
1060 Link
= GetNextNode (&gSmmCpuPrivate
->TokenList
, Link
);
1067 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1069 @return First token of the token buffer.
1072 AllocateTokenBuffer (
1077 UINT32 TokenCountPerChunk
;
1079 SPIN_LOCK
*SpinLock
;
1080 UINT8
*SpinLockBuffer
;
1081 PROCEDURE_TOKEN
*ProcTokens
;
1083 SpinLockSize
= GetSpinLockProperties ();
1085 TokenCountPerChunk
= FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk
);
1086 ASSERT (TokenCountPerChunk
!= 0);
1087 if (TokenCountPerChunk
== 0) {
1088 DEBUG ((DEBUG_ERROR
, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1092 DEBUG ((DEBUG_INFO
, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize
, TokenCountPerChunk
));
1095 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1097 SpinLockBuffer
= AllocatePool (SpinLockSize
* TokenCountPerChunk
);
1098 ASSERT (SpinLockBuffer
!= NULL
);
1100 ProcTokens
= AllocatePool (sizeof (PROCEDURE_TOKEN
) * TokenCountPerChunk
);
1101 ASSERT (ProcTokens
!= NULL
);
1103 for (Index
= 0; Index
< TokenCountPerChunk
; Index
++) {
1104 SpinLock
= (SPIN_LOCK
*)(SpinLockBuffer
+ SpinLockSize
* Index
);
1105 InitializeSpinLock (SpinLock
);
1107 ProcTokens
[Index
].Signature
= PROCEDURE_TOKEN_SIGNATURE
;
1108 ProcTokens
[Index
].SpinLock
= SpinLock
;
1109 ProcTokens
[Index
].RunningApCount
= 0;
1111 InsertTailList (&gSmmCpuPrivate
->TokenList
, &ProcTokens
[Index
].Link
);
1114 return &ProcTokens
[0].Link
;
1120 If no free token, allocate new tokens then return the free one.
1122 @param RunningApsCount The Running Aps count for this token.
1124 @retval return the first free PROCEDURE_TOKEN.
1129 IN UINT32 RunningApsCount
1132 PROCEDURE_TOKEN
*NewToken
;
1135 // If FirstFreeToken meets the end of token list, enlarge the token list.
1136 // Set FirstFreeToken to the first free token.
1138 if (gSmmCpuPrivate
->FirstFreeToken
== &gSmmCpuPrivate
->TokenList
) {
1139 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1142 NewToken
= PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate
->FirstFreeToken
);
1143 gSmmCpuPrivate
->FirstFreeToken
= GetNextNode (&gSmmCpuPrivate
->TokenList
, gSmmCpuPrivate
->FirstFreeToken
);
1145 NewToken
->RunningApCount
= RunningApsCount
;
1146 AcquireSpinLock (NewToken
->SpinLock
);
1152 Checks status of specified AP.
1154 This function checks whether the specified AP has finished the task assigned
1155 by StartupThisAP(), and whether timeout expires.
1157 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1160 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1161 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1168 if (AcquireSpinLockOrFail (Token
)) {
1169 ReleaseSpinLock (Token
);
1173 return EFI_NOT_READY
;
1177 Schedule a procedure to run on the specified CPU.
1179 @param[in] Procedure The address of the procedure to run
1180 @param[in] CpuIndex Target CPU Index
1181 @param[in,out] ProcArguments The parameter to pass to the procedure
1182 @param[in] Token This is an optional parameter that allows the caller to execute the
1183 procedure in a blocking or non-blocking fashion. If it is NULL the
1184 call is blocking, and the call will not return until the AP has
1185 completed the procedure. If the token is not NULL, the call will
1186 return immediately. The caller can check whether the procedure has
1187 completed with CheckOnProcedure or WaitForProcedure.
1188 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1189 execution of Procedure, either for blocking or non-blocking mode.
1190 Zero means infinity. If the timeout expires before all APs return
1191 from Procedure, then Procedure on the failed APs is terminated. If
1192 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1193 If the timeout expires in non-blocking mode, the timeout determined
1194 can be through CheckOnProcedure or WaitForProcedure.
1195 Note that timeout support is optional. Whether an implementation
1196 supports this feature can be determined via the Attributes data
1198 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1199 by Procedure when it completes execution on the target AP, or with
1200 EFI_TIMEOUT if the Procedure fails to complete within the optional
1201 timeout. The implementation will update this variable with
1202 EFI_NOT_READY prior to starting Procedure on the target AP.
1204 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1205 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1206 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1207 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1208 @retval EFI_SUCCESS The procedure has been successfully scheduled
1212 InternalSmmStartupThisAp (
1213 IN EFI_AP_PROCEDURE2 Procedure
,
1215 IN OUT VOID
*ProcArguments OPTIONAL
,
1216 IN MM_COMPLETION
*Token
,
1217 IN UINTN TimeoutInMicroseconds
,
1218 IN OUT EFI_STATUS
*CpuStatus
1221 PROCEDURE_TOKEN
*ProcToken
;
1223 if (CpuIndex
>= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
) {
1224 DEBUG ((DEBUG_ERROR
, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex
, gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
));
1225 return EFI_INVALID_PARAMETER
;
1228 if (CpuIndex
== gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1229 DEBUG ((DEBUG_ERROR
, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex
));
1230 return EFI_INVALID_PARAMETER
;
1233 if (gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
== INVALID_APIC_ID
) {
1234 return EFI_INVALID_PARAMETER
;
1237 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
1238 if (mSmmMpSyncData
->EffectiveSyncMode
== SmmCpuSyncModeTradition
) {
1239 DEBUG ((DEBUG_ERROR
, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex
));
1242 return EFI_INVALID_PARAMETER
;
1245 if (gSmmCpuPrivate
->Operation
[CpuIndex
] == SmmCpuRemove
) {
1246 if (!FeaturePcdGet (PcdCpuHotPlugSupport
)) {
1247 DEBUG ((DEBUG_ERROR
, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex
));
1250 return EFI_INVALID_PARAMETER
;
1253 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1254 return EFI_INVALID_PARAMETER
;
1257 if (Procedure
== NULL
) {
1258 return EFI_INVALID_PARAMETER
;
1261 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1263 mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
= Procedure
;
1264 mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
= ProcArguments
;
1265 if (Token
!= NULL
) {
1266 if (Token
!= &mSmmStartupThisApToken
) {
1268 // When Token points to mSmmStartupThisApToken, this routine is called
1269 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1271 // In this case, caller wants to startup AP procedure in non-blocking
1272 // mode and cannot get the completion status from the Token because there
1273 // is no way to return the Token to caller from SmmStartupThisAp().
1274 // Caller needs to use its implementation specific way to query the completion status.
1276 // There is no need to allocate a token for such case so the 3 overheads
1278 // 1. Call AllocateTokenBuffer() when there is no free token.
1279 // 2. Get a free token from the token buffer.
1280 // 3. Call ReleaseToken() in APHandler().
1282 ProcToken
= GetFreeToken (1);
1283 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= ProcToken
;
1284 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1288 mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= CpuStatus
;
1289 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
1290 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= EFI_NOT_READY
;
1293 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
1295 if (Token
== NULL
) {
1296 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1297 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1304 Worker function to execute a caller provided function on all enabled APs.
1306 @param[in] Procedure A pointer to the function to be run on
1307 enabled APs of the system.
1308 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1309 APs to return from Procedure, either for
1310 blocking or non-blocking mode.
1311 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1313 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1314 procedure in a blocking or non-blocking fashion. If it is NULL the
1315 call is blocking, and the call will not return until the AP has
1316 completed the procedure. If the token is not NULL, the call will
1317 return immediately. The caller can check whether the procedure has
1318 completed with CheckOnProcedure or WaitForProcedure.
1319 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1320 by Procedure when it completes execution on the target AP, or with
1321 EFI_TIMEOUT if the Procedure fails to complete within the optional
1322 timeout. The implementation will update this variable with
1323 EFI_NOT_READY prior to starting Procedure on the target AP.
1326 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1327 the timeout expired.
1328 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1330 @retval others Failed to Startup all APs.
1334 InternalSmmStartupAllAPs (
1335 IN EFI_AP_PROCEDURE2 Procedure
,
1336 IN UINTN TimeoutInMicroseconds
,
1337 IN OUT VOID
*ProcedureArguments OPTIONAL
,
1338 IN OUT MM_COMPLETION
*Token
,
1339 IN OUT EFI_STATUS
*CPUStatus
1344 PROCEDURE_TOKEN
*ProcToken
;
1346 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1347 return EFI_INVALID_PARAMETER
;
1350 if (Procedure
== NULL
) {
1351 return EFI_INVALID_PARAMETER
;
1355 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1356 if (IsPresentAp (Index
)) {
1359 if (gSmmCpuPrivate
->Operation
[Index
] == SmmCpuRemove
) {
1360 return EFI_INVALID_PARAMETER
;
1363 if (!AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
1364 return EFI_NOT_READY
;
1367 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1371 if (CpuCount
== 0) {
1372 return EFI_NOT_STARTED
;
1375 if (Token
!= NULL
) {
1376 ProcToken
= GetFreeToken ((UINT32
)mMaxNumberOfCpus
);
1377 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1383 // Make sure all BUSY should be acquired.
1385 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1386 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1389 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1390 if (IsPresentAp (Index
)) {
1391 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1395 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1396 if (IsPresentAp (Index
)) {
1397 mSmmMpSyncData
->CpuData
[Index
].Procedure
= (EFI_AP_PROCEDURE2
)Procedure
;
1398 mSmmMpSyncData
->CpuData
[Index
].Parameter
= ProcedureArguments
;
1399 if (ProcToken
!= NULL
) {
1400 mSmmMpSyncData
->CpuData
[Index
].Token
= ProcToken
;
1403 if (CPUStatus
!= NULL
) {
1404 mSmmMpSyncData
->CpuData
[Index
].Status
= &CPUStatus
[Index
];
1405 if (mSmmMpSyncData
->CpuData
[Index
].Status
!= NULL
) {
1406 *mSmmMpSyncData
->CpuData
[Index
].Status
= EFI_NOT_READY
;
1411 // PI spec requirement:
1412 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1414 if (CPUStatus
!= NULL
) {
1415 CPUStatus
[Index
] = EFI_NOT_STARTED
;
1419 // Decrease the count to mark this processor(AP or BSP) as finished.
1421 if (ProcToken
!= NULL
) {
1422 WaitForSemaphore (&ProcToken
->RunningApCount
);
1429 if (Token
== NULL
) {
1431 // Make sure all APs have completed their tasks.
1433 WaitForAllAPsNotBusy (TRUE
);
1440 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1441 If the function is defined with a type that is not compatible with
1442 the type (of the expression) pointed to by the expression that
1443 denotes the called function, the behavior is undefined.
1445 So add below wrapper function to convert between EFI_AP_PROCEDURE
1446 and EFI_AP_PROCEDURE2.
1448 Wrapper for Procedures.
1450 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1459 PROCEDURE_WRAPPER
*Wrapper
;
1462 Wrapper
->Procedure (Wrapper
->ProcedureArgument
);
1468 Schedule a procedure to run on the specified CPU in blocking mode.
1470 @param[in] Procedure The address of the procedure to run
1471 @param[in] CpuIndex Target CPU Index
1472 @param[in, out] ProcArguments The parameter to pass to the procedure
1474 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1475 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1476 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1477 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1478 @retval EFI_SUCCESS The procedure has been successfully scheduled
1483 SmmBlockingStartupThisAp (
1484 IN EFI_AP_PROCEDURE Procedure
,
1486 IN OUT VOID
*ProcArguments OPTIONAL
1489 PROCEDURE_WRAPPER Wrapper
;
1491 Wrapper
.Procedure
= Procedure
;
1492 Wrapper
.ProcedureArgument
= ProcArguments
;
1495 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1497 return InternalSmmStartupThisAp (ProcedureWrapper
, CpuIndex
, &Wrapper
, NULL
, 0, NULL
);
1501 Schedule a procedure to run on the specified CPU.
1503 @param Procedure The address of the procedure to run
1504 @param CpuIndex Target CPU Index
1505 @param ProcArguments The parameter to pass to the procedure
1507 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1508 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1509 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1510 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1511 @retval EFI_SUCCESS The procedure has been successfully scheduled
1517 IN EFI_AP_PROCEDURE Procedure
,
1519 IN OUT VOID
*ProcArguments OPTIONAL
1522 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].Procedure
= Procedure
;
1523 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].ProcedureArgument
= ProcArguments
;
1526 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1528 return InternalSmmStartupThisAp (
1531 &gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
],
1532 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp
) ? NULL
: &mSmmStartupThisApToken
,
1539 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1540 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1542 NOTE: It might not be appreciated in runtime since it might
1543 conflict with OS debugging facilities. Turn them off in RELEASE.
1545 @param CpuIndex CPU Index
1554 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1556 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1557 ASSERT (CpuIndex
< mMaxNumberOfCpus
);
1558 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1559 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1560 AsmWriteDr6 (CpuSaveState
->x86
._DR6
);
1561 AsmWriteDr7 (CpuSaveState
->x86
._DR7
);
1563 AsmWriteDr6 ((UINTN
)CpuSaveState
->x64
._DR6
);
1564 AsmWriteDr7 ((UINTN
)CpuSaveState
->x64
._DR7
);
1570 This function restores DR6 & DR7 to SMM save state.
1572 NOTE: It might not be appreciated in runtime since it might
1573 conflict with OS debugging facilities. Turn them off in RELEASE.
1575 @param CpuIndex CPU Index
1584 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1586 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1587 ASSERT (CpuIndex
< mMaxNumberOfCpus
);
1588 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1589 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1590 CpuSaveState
->x86
._DR7
= (UINT32
)AsmReadDr7 ();
1591 CpuSaveState
->x86
._DR6
= (UINT32
)AsmReadDr6 ();
1593 CpuSaveState
->x64
._DR7
= AsmReadDr7 ();
1594 CpuSaveState
->x64
._DR6
= AsmReadDr6 ();
1600 C function for SMI entry, each processor comes here upon SMI trigger.
1602 @param CpuIndex CPU Index
1614 BOOLEAN BspInProgress
;
1618 ASSERT (CpuIndex
< mMaxNumberOfCpus
);
1621 // Save Cr2 because Page Fault exception in SMM may override its value,
1622 // when using on-demand paging for above 4G memory.
1628 // Call the user register Startup function first.
1630 if (mSmmMpSyncData
->StartupProcedure
!= NULL
) {
1631 mSmmMpSyncData
->StartupProcedure (mSmmMpSyncData
->StartupProcArgs
);
1635 // Perform CPU specific entry hooks
1637 SmmCpuFeaturesRendezvousEntry (CpuIndex
);
1640 // Determine if this is a valid SMI
1642 ValidSmi
= PlatformValidSmi ();
1645 // Determine if BSP has been already in progress. Note this must be checked after
1646 // ValidSmi because BSP may clear a valid SMI source after checking in.
1648 BspInProgress
= *mSmmMpSyncData
->InsideSmm
;
1650 if (!BspInProgress
&& !ValidSmi
) {
1652 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1653 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1654 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1655 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1656 // is nothing we need to do.
1661 // Signal presence of this processor
1663 if (ReleaseSemaphore (mSmmMpSyncData
->Counter
) == 0) {
1665 // BSP has already ended the synchronization, so QUIT!!!
1669 // Wait for BSP's signal to finish SMI
1671 while (*mSmmMpSyncData
->AllCpusInSync
) {
1678 // The BUSY lock is initialized to Released state.
1679 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1680 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1681 // after AP's present flag is detected.
1683 InitializeSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1686 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1687 ActivateSmmProfile (CpuIndex
);
1690 if (BspInProgress
) {
1692 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1693 // as BSP may have cleared the SMI status
1695 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1698 // We have a valid SMI
1705 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1706 if (!mSmmMpSyncData
->SwitchBsp
|| mSmmMpSyncData
->CandidateBsp
[CpuIndex
]) {
1708 // Call platform hook to do BSP election
1710 Status
= PlatformSmmBspElection (&IsBsp
);
1711 if (EFI_SUCCESS
== Status
) {
1713 // Platform hook determines successfully
1716 mSmmMpSyncData
->BspIndex
= (UINT32
)CpuIndex
;
1720 // Platform hook fails to determine, use default BSP election method
1722 InterlockedCompareExchange32 (
1723 (UINT32
*)&mSmmMpSyncData
->BspIndex
,
1732 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1734 if (mSmmMpSyncData
->BspIndex
== CpuIndex
) {
1736 // Clear last request for SwitchBsp.
1738 if (mSmmMpSyncData
->SwitchBsp
) {
1739 mSmmMpSyncData
->SwitchBsp
= FALSE
;
1740 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1741 mSmmMpSyncData
->CandidateBsp
[Index
] = FALSE
;
1745 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1746 SmmProfileRecordSmiNum ();
1750 // BSP Handler is always called with a ValidSmi == TRUE
1752 BSPHandler (CpuIndex
, mSmmMpSyncData
->EffectiveSyncMode
);
1754 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1758 ASSERT (*mSmmMpSyncData
->CpuData
[CpuIndex
].Run
== 0);
1761 // Wait for BSP's signal to exit SMI
1763 while (*mSmmMpSyncData
->AllCpusInSync
) {
1769 SmmCpuFeaturesRendezvousExit (CpuIndex
);
1778 Allocate buffer for SpinLock and Wrapper function buffer.
1782 InitializeDataForMmMp (
1786 gSmmCpuPrivate
->ApWrapperFunc
= AllocatePool (sizeof (PROCEDURE_WRAPPER
) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1787 ASSERT (gSmmCpuPrivate
->ApWrapperFunc
!= NULL
);
1789 InitializeListHead (&gSmmCpuPrivate
->TokenList
);
1791 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1795 Allocate buffer for all semaphores and spin locks.
1799 InitializeSmmCpuSemaphores (
1803 UINTN ProcessorCount
;
1805 UINTN GlobalSemaphoresSize
;
1806 UINTN CpuSemaphoresSize
;
1807 UINTN SemaphoreSize
;
1809 UINTN
*SemaphoreBlock
;
1810 UINTN SemaphoreAddr
;
1812 SemaphoreSize
= GetSpinLockProperties ();
1813 ProcessorCount
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1814 GlobalSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_GLOBAL
) / sizeof (VOID
*)) * SemaphoreSize
;
1815 CpuSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_CPU
) / sizeof (VOID
*)) * ProcessorCount
* SemaphoreSize
;
1816 TotalSize
= GlobalSemaphoresSize
+ CpuSemaphoresSize
;
1817 DEBUG ((DEBUG_INFO
, "One Semaphore Size = 0x%x\n", SemaphoreSize
));
1818 DEBUG ((DEBUG_INFO
, "Total Semaphores Size = 0x%x\n", TotalSize
));
1819 Pages
= EFI_SIZE_TO_PAGES (TotalSize
);
1820 SemaphoreBlock
= AllocatePages (Pages
);
1821 ASSERT (SemaphoreBlock
!= NULL
);
1822 ZeroMem (SemaphoreBlock
, TotalSize
);
1824 SemaphoreAddr
= (UINTN
)SemaphoreBlock
;
1825 mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
= (UINT32
*)SemaphoreAddr
;
1826 SemaphoreAddr
+= SemaphoreSize
;
1827 mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
= (BOOLEAN
*)SemaphoreAddr
;
1828 SemaphoreAddr
+= SemaphoreSize
;
1829 mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
= (BOOLEAN
*)SemaphoreAddr
;
1830 SemaphoreAddr
+= SemaphoreSize
;
1831 mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
= (SPIN_LOCK
*)SemaphoreAddr
;
1832 SemaphoreAddr
+= SemaphoreSize
;
1833 mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
1834 = (SPIN_LOCK
*)SemaphoreAddr
;
1835 SemaphoreAddr
+= SemaphoreSize
;
1837 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
;
1838 mSmmCpuSemaphores
.SemaphoreCpu
.Busy
= (SPIN_LOCK
*)SemaphoreAddr
;
1839 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1840 mSmmCpuSemaphores
.SemaphoreCpu
.Run
= (UINT32
*)SemaphoreAddr
;
1841 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1842 mSmmCpuSemaphores
.SemaphoreCpu
.Present
= (BOOLEAN
*)SemaphoreAddr
;
1844 mPFLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
;
1845 mConfigSmmCodeAccessCheckLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
;
1847 mSemaphoreSize
= SemaphoreSize
;
1851 Initialize un-cacheable data.
1856 InitializeMpSyncData (
1862 if (mSmmMpSyncData
!= NULL
) {
1864 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1865 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1867 ZeroMem (mSmmMpSyncData
, mSmmMpSyncDataSize
);
1868 mSmmMpSyncData
->CpuData
= (SMM_CPU_DATA_BLOCK
*)((UINT8
*)mSmmMpSyncData
+ sizeof (SMM_DISPATCHER_MP_SYNC_DATA
));
1869 mSmmMpSyncData
->CandidateBsp
= (BOOLEAN
*)(mSmmMpSyncData
->CpuData
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1870 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1872 // Enable BSP election by setting BspIndex to -1
1874 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
1877 mSmmMpSyncData
->EffectiveSyncMode
= mCpuSmmSyncMode
;
1879 mSmmMpSyncData
->Counter
= mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
;
1880 mSmmMpSyncData
->InsideSmm
= mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
;
1881 mSmmMpSyncData
->AllCpusInSync
= mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
;
1883 mSmmMpSyncData
->Counter
!= NULL
&& mSmmMpSyncData
->InsideSmm
!= NULL
&&
1884 mSmmMpSyncData
->AllCpusInSync
!= NULL
1886 *mSmmMpSyncData
->Counter
= 0;
1887 *mSmmMpSyncData
->InsideSmm
= FALSE
;
1888 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
1890 for (CpuIndex
= 0; CpuIndex
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; CpuIndex
++) {
1891 mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
=
1892 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Busy
+ mSemaphoreSize
* CpuIndex
);
1893 mSmmMpSyncData
->CpuData
[CpuIndex
].Run
=
1894 (UINT32
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Run
+ mSemaphoreSize
* CpuIndex
);
1895 mSmmMpSyncData
->CpuData
[CpuIndex
].Present
=
1896 (BOOLEAN
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Present
+ mSemaphoreSize
* CpuIndex
);
1897 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
) = 0;
1898 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Run
) = 0;
1899 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
1905 Initialize global data for MP synchronization.
1907 @param Stacks Base address of SMI stack buffer for all processors.
1908 @param StackSize Stack size for each processor in SMM.
1909 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1913 InitializeMpServiceData (
1916 IN UINTN ShadowStackSize
1921 UINT8
*GdtTssTables
;
1922 UINTN GdtTableStepSize
;
1923 CPUID_VERSION_INFO_EDX RegEdx
;
1924 UINT32 MaxExtendedFunction
;
1925 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize
;
1928 // Determine if this CPU supports machine check
1930 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
.Uint32
);
1931 mMachineCheckSupported
= (BOOLEAN
)(RegEdx
.Bits
.MCA
== 1);
1934 // Allocate memory for all locks and semaphores
1936 InitializeSmmCpuSemaphores ();
1939 // Initialize mSmmMpSyncData
1941 mSmmMpSyncDataSize
= sizeof (SMM_DISPATCHER_MP_SYNC_DATA
) +
1942 (sizeof (SMM_CPU_DATA_BLOCK
) + sizeof (BOOLEAN
)) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1943 mSmmMpSyncData
= (SMM_DISPATCHER_MP_SYNC_DATA
*)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize
));
1944 ASSERT (mSmmMpSyncData
!= NULL
);
1945 mCpuSmmSyncMode
= (SMM_CPU_SYNC_MODE
)PcdGet8 (PcdCpuSmmSyncMode
);
1946 InitializeMpSyncData ();
1949 // Initialize physical address mask
1950 // NOTE: Physical memory above virtual address limit is not supported !!!
1952 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &MaxExtendedFunction
, NULL
, NULL
, NULL
);
1953 if (MaxExtendedFunction
>= CPUID_VIR_PHY_ADDRESS_SIZE
) {
1954 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE
, &VirPhyAddressSize
.Uint32
, NULL
, NULL
, NULL
);
1956 VirPhyAddressSize
.Bits
.PhysicalAddressBits
= 36;
1959 gPhyMask
= LShiftU64 (1, VirPhyAddressSize
.Bits
.PhysicalAddressBits
) - 1;
1961 // Clear the low 12 bits
1963 gPhyMask
&= 0xfffffffffffff000ULL
;
1966 // Create page tables
1968 Cr3
= SmmInitPageTable ();
1970 GdtTssTables
= InitGdt (Cr3
, &GdtTableStepSize
);
1973 // Install SMI handler for each CPU
1975 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1978 (UINT32
)mCpuHotPlugData
.SmBase
[Index
],
1979 (VOID
*)((UINTN
)Stacks
+ (StackSize
+ ShadowStackSize
) * Index
),
1981 (UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
),
1982 gcSmiGdtr
.Limit
+ 1,
1984 gcSmiIdtr
.Limit
+ 1,
1990 // Record current MTRR settings
1992 ZeroMem (&gSmiMtrrs
, sizeof (gSmiMtrrs
));
1993 MtrrGetAllMtrrs (&gSmiMtrrs
);
2000 Register the SMM Foundation entry point.
2002 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
2003 @param SmmEntryPoint SMM Foundation EntryPoint
2005 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
2011 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL
*This
,
2012 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2016 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2018 gSmmCpuPrivate
->SmmCoreEntry
= SmmEntryPoint
;
2024 Register the SMM Foundation entry point.
2026 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2027 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2028 with the related definitions of
2029 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2030 If caller may pass a value of NULL to deregister any existing
2032 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2033 run by the AP. It is an optional common mailbox between APs and
2034 the caller to share information
2036 @retval EFI_SUCCESS The Procedure has been set successfully.
2037 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2041 RegisterStartupProcedure (
2042 IN EFI_AP_PROCEDURE Procedure
,
2043 IN OUT VOID
*ProcedureArguments OPTIONAL
2046 if ((Procedure
== NULL
) && (ProcedureArguments
!= NULL
)) {
2047 return EFI_INVALID_PARAMETER
;
2050 if (mSmmMpSyncData
== NULL
) {
2051 return EFI_NOT_READY
;
2054 mSmmMpSyncData
->StartupProcedure
= Procedure
;
2055 mSmmMpSyncData
->StartupProcArgs
= ProcedureArguments
;