2 SMM MP service implementation
4 Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
16 MTRR_SETTINGS gSmiMtrrs
;
18 SMM_DISPATCHER_MP_SYNC_DATA
*mSmmMpSyncData
= NULL
;
19 UINTN mSmmMpSyncDataSize
;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores
;
22 SPIN_LOCK
*mPFLock
= NULL
;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode
;
24 BOOLEAN mMachineCheckSupported
= FALSE
;
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
38 IN OUT
volatile UINT32
*Sem
46 InterlockedCompareExchange32 (
60 Performs an atomic compare exchange operation to release semaphore.
61 The compare exchange operation must be performed using
64 @param Sem IN: 32-bit unsigned integer
65 OUT: original integer + 1
66 @return Original integer + 1
71 IN OUT
volatile UINT32
*Sem
78 } while (Value
+ 1 != 0 &&
79 InterlockedCompareExchange32 (
88 Performs an atomic compare exchange operation to lock semaphore.
89 The compare exchange operation must be performed using
92 @param Sem IN: 32-bit unsigned integer
94 @return Original integer
99 IN OUT
volatile UINT32
*Sem
106 } while (InterlockedCompareExchange32 (
114 Wait all APs to performs an atomic compare exchange operation to release semaphore.
116 @param NumberOfAPs AP number
126 BspIndex
= mSmmMpSyncData
->BspIndex
;
127 while (NumberOfAPs
-- > 0) {
128 WaitForSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
133 Performs an atomic compare exchange operation to release semaphore
144 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
145 if (IsPresentAp (Index
)) {
146 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[Index
].Run
);
152 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
154 @param Exceptions CPU Arrival exception flags.
156 @retval TRUE if all CPUs the have checked in.
157 @retval FALSE if at least one Normal AP hasn't checked in.
161 AllCpusInSmmWithExceptions (
162 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
166 SMM_CPU_DATA_BLOCK
*CpuData
;
167 EFI_PROCESSOR_INFORMATION
*ProcessorInfo
;
169 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
171 if (*mSmmMpSyncData
->Counter
== mNumberOfCpus
) {
175 CpuData
= mSmmMpSyncData
->CpuData
;
176 ProcessorInfo
= gSmmCpuPrivate
->ProcessorInfo
;
177 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
178 if (!(*(CpuData
[Index
].Present
)) && ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
179 if (((Exceptions
& ARRIVAL_EXCEPTION_DELAYED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmDelayed
) != 0) {
182 if (((Exceptions
& ARRIVAL_EXCEPTION_BLOCKED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmBlocked
) != 0) {
185 if (((Exceptions
& ARRIVAL_EXCEPTION_SMI_DISABLED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmEnable
) != 0) {
197 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
199 @retval TRUE Os enable lmce.
200 @retval FALSE Os not enable lmce.
208 MSR_IA32_MCG_CAP_REGISTER McgCap
;
209 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl
;
210 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl
;
212 McgCap
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_CAP
);
213 if (McgCap
.Bits
.MCG_LMCE_P
== 0) {
217 FeatureCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL
);
218 if (FeatureCtrl
.Bits
.LmceOn
== 0) {
222 McgExtCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL
);
223 return (BOOLEAN
) (McgExtCtrl
.Bits
.LMCE_EN
== 1);
227 Return if Local machine check exception signaled.
229 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
230 delivered to only the logical processor.
232 @retval TRUE LMCE was signaled.
233 @retval FALSE LMCE was not signaled.
241 MSR_IA32_MCG_STATUS_REGISTER McgStatus
;
243 McgStatus
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_STATUS
);
244 return (BOOLEAN
) (McgStatus
.Bits
.LMCE_S
== 1);
248 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
249 entering SMM, except SMI disabled APs.
253 SmmWaitForApArrival (
262 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
266 if (mMachineCheckSupported
) {
267 LmceEn
= IsLmceOsEnabled ();
268 LmceSignal
= IsLmceSignaled();
272 // Platform implementor should choose a timeout value appropriately:
273 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
274 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
275 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
276 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
277 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
278 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
279 // - The timeout value must be longer than longest possible IO operation in the system
283 // Sync with APs 1st timeout
285 for (Timer
= StartSyncTimer ();
286 !IsSyncTimerTimeout (Timer
) && !(LmceEn
&& LmceSignal
) &&
287 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
293 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
295 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
296 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
297 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
298 // work while SMI handling is on-going.
299 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
300 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
301 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
302 // mode work while SMI handling is on-going.
303 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
304 // - In traditional flow, SMI disabling is discouraged.
305 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
306 // In both cases, adding SMI-disabling checking code increases overhead.
308 if (*mSmmMpSyncData
->Counter
< mNumberOfCpus
) {
310 // Send SMI IPIs to bring outside processors in
312 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
313 if (!(*(mSmmMpSyncData
->CpuData
[Index
].Present
)) && gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
314 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
319 // Sync with APs 2nd timeout.
321 for (Timer
= StartSyncTimer ();
322 !IsSyncTimerTimeout (Timer
) &&
323 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
334 Replace OS MTRR's with SMI MTRR's.
336 @param CpuIndex Processor Index
344 SmmCpuFeaturesDisableSmrr ();
347 // Replace all MTRRs registers
349 MtrrSetAllMtrrs (&gSmiMtrrs
);
353 Wheck whether task has been finished by all APs.
355 @param BlockMode Whether did it in block mode or non-block mode.
357 @retval TRUE Task has been finished by all APs.
358 @retval FALSE Task not has been finished by all APs.
362 WaitForAllAPsNotBusy (
368 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
370 // Ignore BSP and APs which not call in SMM.
372 if (!IsPresentAp(Index
)) {
377 AcquireSpinLock(mSmmMpSyncData
->CpuData
[Index
].Busy
);
378 ReleaseSpinLock(mSmmMpSyncData
->CpuData
[Index
].Busy
);
380 if (AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
381 ReleaseSpinLock(mSmmMpSyncData
->CpuData
[Index
].Busy
);
392 Check whether it is an present AP.
394 @param CpuIndex The AP index which calls this function.
396 @retval TRUE It's a present AP.
397 @retval TRUE This is not an AP or it is not present.
405 return ((CpuIndex
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) &&
406 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
));
410 Clean up the status flags used during executing the procedure.
412 @param CpuIndex The AP index which calls this function.
420 PROCEDURE_TOKEN
*Token
;
422 Token
= mSmmMpSyncData
->CpuData
[CpuIndex
].Token
;
424 if (InterlockedDecrement (&Token
->RunningApCount
) == 0) {
425 ReleaseSpinLock (Token
->SpinLock
);
428 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= NULL
;
432 Free the tokens in the maintained list.
441 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
443 gSmmCpuPrivate
->FirstFreeToken
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
449 @param CpuIndex BSP processor Index
450 @param SyncMode SMM MP sync mode
456 IN SMM_CPU_SYNC_MODE SyncMode
462 BOOLEAN ClearTopLevelSmiResult
;
465 ASSERT (CpuIndex
== mSmmMpSyncData
->BspIndex
);
469 // Flag BSP's presence
471 *mSmmMpSyncData
->InsideSmm
= TRUE
;
474 // Initialize Debug Agent to start source level debug in BSP handler
476 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI
, NULL
, NULL
);
479 // Mark this processor's presence
481 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
484 // Clear platform top level SMI status bit before calling SMI handlers. If
485 // we cleared it after SMI handlers are run, we would miss the SMI that
486 // occurs after SMI handlers are done and before SMI status bit is cleared.
488 ClearTopLevelSmiResult
= ClearTopLevelSmiStatus();
489 ASSERT (ClearTopLevelSmiResult
== TRUE
);
492 // Set running processor index
494 gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
= CpuIndex
;
497 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
499 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
502 // Wait for APs to arrive
504 SmmWaitForApArrival();
507 // Lock the counter down and retrieve the number of APs
509 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
510 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
513 // Wait for all APs to get ready for programming MTRRs
515 WaitForAllAPs (ApCount
);
517 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
519 // Signal all APs it's time for backup MTRRs
524 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
525 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
526 // to a large enough value to avoid this situation.
527 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
528 // We do the backup first and then set MTRR to avoid race condition for threads
531 MtrrGetAllMtrrs(&Mtrrs
);
534 // Wait for all APs to complete their MTRR saving
536 WaitForAllAPs (ApCount
);
539 // Let all processors program SMM MTRRs together
544 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
545 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
546 // to a large enough value to avoid this situation.
548 ReplaceOSMtrrs (CpuIndex
);
551 // Wait for all APs to complete their MTRR programming
553 WaitForAllAPs (ApCount
);
558 // The BUSY lock is initialized to Acquired state
560 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
563 // Perform the pre tasks
568 // Invoke SMM Foundation EntryPoint with the processor information context.
570 gSmmCpuPrivate
->SmmCoreEntry (&gSmmCpuPrivate
->SmmCoreEntryContext
);
573 // Make sure all APs have completed their pending none-block tasks
575 WaitForAllAPsNotBusy (TRUE
);
578 // Perform the remaining tasks
580 PerformRemainingTasks ();
583 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
584 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
585 // will run through freely.
587 if (SyncMode
!= SmmCpuSyncModeTradition
&& !SmmCpuFeaturesNeedConfigureMtrrs()) {
590 // Lock the counter down and retrieve the number of APs
592 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
593 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
595 // Make sure all APs have their Present flag set
599 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
600 if (*(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
604 if (PresentCount
> ApCount
) {
611 // Notify all APs to exit
613 *mSmmMpSyncData
->InsideSmm
= FALSE
;
617 // Wait for all APs to complete their pending tasks
619 WaitForAllAPs (ApCount
);
621 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
623 // Signal APs to restore MTRRs
630 SmmCpuFeaturesReenableSmrr ();
631 MtrrSetAllMtrrs(&Mtrrs
);
634 // Wait for all APs to complete MTRR programming
636 WaitForAllAPs (ApCount
);
640 // Stop source level debug in BSP handler, the code below will not be
643 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI
, NULL
, NULL
);
646 // Signal APs to Reset states/semaphore for this processor
651 // Perform pending operations for hot-plug
656 // Clear the Present flag of BSP
658 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
661 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
662 // WaitForAllAps does not depend on the Present flag.
664 WaitForAllAPs (ApCount
);
667 // Reset the tokens buffer.
672 // Reset BspIndex to -1, meaning BSP has not been elected.
674 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
675 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
679 // Allow APs to check in from this point on
681 *mSmmMpSyncData
->Counter
= 0;
682 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
688 @param CpuIndex AP processor Index.
689 @param ValidSmi Indicates that current SMI is a valid SMI or not.
690 @param SyncMode SMM MP sync mode.
697 IN SMM_CPU_SYNC_MODE SyncMode
703 EFI_STATUS ProcedureStatus
;
708 for (Timer
= StartSyncTimer ();
709 !IsSyncTimerTimeout (Timer
) &&
710 !(*mSmmMpSyncData
->InsideSmm
);
715 if (!(*mSmmMpSyncData
->InsideSmm
)) {
717 // BSP timeout in the first round
719 if (mSmmMpSyncData
->BspIndex
!= -1) {
721 // BSP Index is known
723 BspIndex
= mSmmMpSyncData
->BspIndex
;
724 ASSERT (CpuIndex
!= BspIndex
);
727 // Send SMI IPI to bring BSP in
729 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[BspIndex
].ProcessorId
);
732 // Now clock BSP for the 2nd time
734 for (Timer
= StartSyncTimer ();
735 !IsSyncTimerTimeout (Timer
) &&
736 !(*mSmmMpSyncData
->InsideSmm
);
741 if (!(*mSmmMpSyncData
->InsideSmm
)) {
743 // Give up since BSP is unable to enter SMM
744 // and signal the completion of this AP
745 WaitForSemaphore (mSmmMpSyncData
->Counter
);
750 // Don't know BSP index. Give up without sending IPI to BSP.
752 WaitForSemaphore (mSmmMpSyncData
->Counter
);
760 BspIndex
= mSmmMpSyncData
->BspIndex
;
761 ASSERT (CpuIndex
!= BspIndex
);
764 // Mark this processor's presence
766 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
768 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
770 // Notify BSP of arrival at this point
772 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
775 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
777 // Wait for the signal from BSP to backup MTRRs
779 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
784 MtrrGetAllMtrrs(&Mtrrs
);
787 // Signal BSP the completion of this AP
789 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
792 // Wait for BSP's signal to program MTRRs
794 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
797 // Replace OS MTRRs with SMI MTRRs
799 ReplaceOSMtrrs (CpuIndex
);
802 // Signal BSP the completion of this AP
804 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
809 // Wait for something to happen
811 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
814 // Check if BSP wants to exit SMM
816 if (!(*mSmmMpSyncData
->InsideSmm
)) {
821 // BUSY should be acquired by SmmStartupThisAp()
824 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)
828 // Invoke the scheduled procedure
830 ProcedureStatus
= (*mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
) (
831 (VOID
*)mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
833 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
834 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= ProcedureStatus
;
837 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Token
!= NULL
) {
838 ReleaseToken (CpuIndex
);
844 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
847 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
849 // Notify BSP the readiness of this AP to program MTRRs
851 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
854 // Wait for the signal from BSP to program MTRRs
856 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
861 SmmCpuFeaturesReenableSmrr ();
862 MtrrSetAllMtrrs(&Mtrrs
);
866 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
868 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
871 // Wait for the signal from BSP to Reset states/semaphore for this processor
873 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
876 // Reset states/semaphore for this processor
878 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
881 // Notify BSP the readiness of this AP to exit SMM
883 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
888 Create 4G PageTable in SMRAM.
890 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
891 @return PageTable Address
896 IN BOOLEAN Is32BitPageTable
904 UINTN High2MBoundary
;
914 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
916 // Add one more page for known good stack, then find the lower 2MB aligned address.
918 Low2MBoundary
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
) & ~(SIZE_2MB
-1);
920 // Add two more pages for known good stack and stack guard page,
921 // then find the lower 2MB aligned address.
923 High2MBoundary
= (mSmmStackArrayEnd
- mSmmStackSize
+ EFI_PAGE_SIZE
* 2) & ~(SIZE_2MB
-1);
924 PagesNeeded
= ((High2MBoundary
- Low2MBoundary
) / SIZE_2MB
) + 1;
927 // Allocate the page table
929 PageTable
= AllocatePageTableMemory (5 + PagesNeeded
);
930 ASSERT (PageTable
!= NULL
);
932 PageTable
= (VOID
*)((UINTN
)PageTable
);
933 Pte
= (UINT64
*)PageTable
;
936 // Zero out all page table entries first
938 ZeroMem (Pte
, EFI_PAGES_TO_SIZE (1));
941 // Set Page Directory Pointers
943 for (Index
= 0; Index
< 4; Index
++) {
944 Pte
[Index
] = ((UINTN
)PageTable
+ EFI_PAGE_SIZE
* (Index
+ 1)) | mAddressEncMask
|
945 (Is32BitPageTable
? IA32_PAE_PDPTE_ATTRIBUTE_BITS
: PAGE_ATTRIBUTE_BITS
);
947 Pte
+= EFI_PAGE_SIZE
/ sizeof (*Pte
);
950 // Fill in Page Directory Entries
952 for (Index
= 0; Index
< EFI_PAGE_SIZE
* 4 / sizeof (*Pte
); Index
++) {
953 Pte
[Index
] = (Index
<< 21) | mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
956 Pdpte
= (UINT64
*)PageTable
;
957 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
958 Pages
= (UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (5);
959 GuardPage
= mSmmStackArrayBase
+ EFI_PAGE_SIZE
;
960 for (PageIndex
= Low2MBoundary
; PageIndex
<= High2MBoundary
; PageIndex
+= SIZE_2MB
) {
961 Pte
= (UINT64
*)(UINTN
)(Pdpte
[BitFieldRead32 ((UINT32
)PageIndex
, 30, 31)] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
962 Pte
[BitFieldRead32 ((UINT32
)PageIndex
, 21, 29)] = (UINT64
)Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
964 // Fill in Page Table Entries
966 Pte
= (UINT64
*)Pages
;
967 PageAddress
= PageIndex
;
968 for (Index
= 0; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
969 if (PageAddress
== GuardPage
) {
971 // Mark the guard page as non-present
973 Pte
[Index
] = PageAddress
| mAddressEncMask
;
974 GuardPage
+= mSmmStackSize
;
975 if (GuardPage
> mSmmStackArrayEnd
) {
979 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
981 PageAddress
+= EFI_PAGE_SIZE
;
983 Pages
+= EFI_PAGE_SIZE
;
987 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0) {
988 Pte
= (UINT64
*)(UINTN
)(Pdpte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
989 if ((Pte
[0] & IA32_PG_PS
) == 0) {
990 // 4K-page entries are already mapped. Just hide the first one anyway.
991 Pte
= (UINT64
*)(UINTN
)(Pte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
992 Pte
[0] &= ~(UINT64
)IA32_PG_P
; // Hide page 0
994 // Create 4K-page entries
995 Pages
= (UINTN
)AllocatePageTableMemory (1);
998 Pte
[0] = (UINT64
)(Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
1000 Pte
= (UINT64
*)Pages
;
1002 Pte
[0] = PageAddress
| mAddressEncMask
; // Hide page 0 but present left
1003 for (Index
= 1; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
1004 PageAddress
+= EFI_PAGE_SIZE
;
1005 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
1010 return (UINT32
)(UINTN
)PageTable
;
1014 Checks whether the input token is the current used token.
1016 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1019 @retval TRUE The input token is the current used token.
1020 @retval FALSE The input token is not the current used token.
1028 PROCEDURE_TOKEN
*ProcToken
;
1030 if (Token
== NULL
) {
1034 Link
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
1036 // Only search used tokens.
1038 while (Link
!= gSmmCpuPrivate
->FirstFreeToken
) {
1039 ProcToken
= PROCEDURE_TOKEN_FROM_LINK (Link
);
1041 if (ProcToken
->SpinLock
== Token
) {
1045 Link
= GetNextNode (&gSmmCpuPrivate
->TokenList
, Link
);
1052 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1054 @return First token of the token buffer.
1057 AllocateTokenBuffer (
1062 UINT32 TokenCountPerChunk
;
1064 SPIN_LOCK
*SpinLock
;
1065 UINT8
*SpinLockBuffer
;
1066 PROCEDURE_TOKEN
*ProcTokens
;
1068 SpinLockSize
= GetSpinLockProperties ();
1070 TokenCountPerChunk
= FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk
);
1071 ASSERT (TokenCountPerChunk
!= 0);
1072 if (TokenCountPerChunk
== 0) {
1073 DEBUG ((DEBUG_ERROR
, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1076 DEBUG ((DEBUG_INFO
, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize
, TokenCountPerChunk
));
1079 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1081 SpinLockBuffer
= AllocatePool (SpinLockSize
* TokenCountPerChunk
);
1082 ASSERT (SpinLockBuffer
!= NULL
);
1084 ProcTokens
= AllocatePool (sizeof (PROCEDURE_TOKEN
) * TokenCountPerChunk
);
1085 ASSERT (ProcTokens
!= NULL
);
1087 for (Index
= 0; Index
< TokenCountPerChunk
; Index
++) {
1088 SpinLock
= (SPIN_LOCK
*)(SpinLockBuffer
+ SpinLockSize
* Index
);
1089 InitializeSpinLock (SpinLock
);
1091 ProcTokens
[Index
].Signature
= PROCEDURE_TOKEN_SIGNATURE
;
1092 ProcTokens
[Index
].SpinLock
= SpinLock
;
1093 ProcTokens
[Index
].RunningApCount
= 0;
1095 InsertTailList (&gSmmCpuPrivate
->TokenList
, &ProcTokens
[Index
].Link
);
1098 return &ProcTokens
[0].Link
;
1104 If no free token, allocate new tokens then return the free one.
1106 @param RunningApsCount The Running Aps count for this token.
1108 @retval return the first free PROCEDURE_TOKEN.
1113 IN UINT32 RunningApsCount
1116 PROCEDURE_TOKEN
*NewToken
;
1119 // If FirstFreeToken meets the end of token list, enlarge the token list.
1120 // Set FirstFreeToken to the first free token.
1122 if (gSmmCpuPrivate
->FirstFreeToken
== &gSmmCpuPrivate
->TokenList
) {
1123 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1125 NewToken
= PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate
->FirstFreeToken
);
1126 gSmmCpuPrivate
->FirstFreeToken
= GetNextNode (&gSmmCpuPrivate
->TokenList
, gSmmCpuPrivate
->FirstFreeToken
);
1128 NewToken
->RunningApCount
= RunningApsCount
;
1129 AcquireSpinLock (NewToken
->SpinLock
);
1135 Checks status of specified AP.
1137 This function checks whether the specified AP has finished the task assigned
1138 by StartupThisAP(), and whether timeout expires.
1140 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1143 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1144 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1151 if (AcquireSpinLockOrFail (Token
)) {
1152 ReleaseSpinLock (Token
);
1156 return EFI_NOT_READY
;
1160 Schedule a procedure to run on the specified CPU.
1162 @param[in] Procedure The address of the procedure to run
1163 @param[in] CpuIndex Target CPU Index
1164 @param[in,out] ProcArguments The parameter to pass to the procedure
1165 @param[in] Token This is an optional parameter that allows the caller to execute the
1166 procedure in a blocking or non-blocking fashion. If it is NULL the
1167 call is blocking, and the call will not return until the AP has
1168 completed the procedure. If the token is not NULL, the call will
1169 return immediately. The caller can check whether the procedure has
1170 completed with CheckOnProcedure or WaitForProcedure.
1171 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1172 execution of Procedure, either for blocking or non-blocking mode.
1173 Zero means infinity. If the timeout expires before all APs return
1174 from Procedure, then Procedure on the failed APs is terminated. If
1175 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1176 If the timeout expires in non-blocking mode, the timeout determined
1177 can be through CheckOnProcedure or WaitForProcedure.
1178 Note that timeout support is optional. Whether an implementation
1179 supports this feature can be determined via the Attributes data
1181 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1182 by Procedure when it completes execution on the target AP, or with
1183 EFI_TIMEOUT if the Procedure fails to complete within the optional
1184 timeout. The implementation will update this variable with
1185 EFI_NOT_READY prior to starting Procedure on the target AP.
1187 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1188 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1189 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1190 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1191 @retval EFI_SUCCESS The procedure has been successfully scheduled
1195 InternalSmmStartupThisAp (
1196 IN EFI_AP_PROCEDURE2 Procedure
,
1198 IN OUT VOID
*ProcArguments OPTIONAL
,
1199 IN MM_COMPLETION
*Token
,
1200 IN UINTN TimeoutInMicroseconds
,
1201 IN OUT EFI_STATUS
*CpuStatus
1204 PROCEDURE_TOKEN
*ProcToken
;
1206 if (CpuIndex
>= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
) {
1207 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex
, gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
));
1208 return EFI_INVALID_PARAMETER
;
1210 if (CpuIndex
== gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1211 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex
));
1212 return EFI_INVALID_PARAMETER
;
1214 if (gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
== INVALID_APIC_ID
) {
1215 return EFI_INVALID_PARAMETER
;
1217 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
1218 if (mSmmMpSyncData
->EffectiveSyncMode
== SmmCpuSyncModeTradition
) {
1219 DEBUG((DEBUG_ERROR
, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex
));
1221 return EFI_INVALID_PARAMETER
;
1223 if (gSmmCpuPrivate
->Operation
[CpuIndex
] == SmmCpuRemove
) {
1224 if (!FeaturePcdGet (PcdCpuHotPlugSupport
)) {
1225 DEBUG((DEBUG_ERROR
, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex
));
1227 return EFI_INVALID_PARAMETER
;
1229 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1230 return EFI_INVALID_PARAMETER
;
1232 if (Procedure
== NULL
) {
1233 return EFI_INVALID_PARAMETER
;
1236 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1238 mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
= Procedure
;
1239 mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
= ProcArguments
;
1240 if (Token
!= NULL
) {
1241 ProcToken
= GetFreeToken (1);
1242 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= ProcToken
;
1243 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1245 mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= CpuStatus
;
1246 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
1247 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= EFI_NOT_READY
;
1250 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
1252 if (Token
== NULL
) {
1253 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1254 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1261 Worker function to execute a caller provided function on all enabled APs.
1263 @param[in] Procedure A pointer to the function to be run on
1264 enabled APs of the system.
1265 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1266 APs to return from Procedure, either for
1267 blocking or non-blocking mode.
1268 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1270 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1271 procedure in a blocking or non-blocking fashion. If it is NULL the
1272 call is blocking, and the call will not return until the AP has
1273 completed the procedure. If the token is not NULL, the call will
1274 return immediately. The caller can check whether the procedure has
1275 completed with CheckOnProcedure or WaitForProcedure.
1276 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1277 by Procedure when it completes execution on the target AP, or with
1278 EFI_TIMEOUT if the Procedure fails to complete within the optional
1279 timeout. The implementation will update this variable with
1280 EFI_NOT_READY prior to starting Procedure on the target AP.
1283 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1284 the timeout expired.
1285 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1287 @retval others Failed to Startup all APs.
1291 InternalSmmStartupAllAPs (
1292 IN EFI_AP_PROCEDURE2 Procedure
,
1293 IN UINTN TimeoutInMicroseconds
,
1294 IN OUT VOID
*ProcedureArguments OPTIONAL
,
1295 IN OUT MM_COMPLETION
*Token
,
1296 IN OUT EFI_STATUS
*CPUStatus
1301 PROCEDURE_TOKEN
*ProcToken
;
1303 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1304 return EFI_INVALID_PARAMETER
;
1306 if (Procedure
== NULL
) {
1307 return EFI_INVALID_PARAMETER
;
1311 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1312 if (IsPresentAp (Index
)) {
1315 if (gSmmCpuPrivate
->Operation
[Index
] == SmmCpuRemove
) {
1316 return EFI_INVALID_PARAMETER
;
1319 if (!AcquireSpinLockOrFail(mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
1320 return EFI_NOT_READY
;
1322 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1325 if (CpuCount
== 0) {
1326 return EFI_NOT_STARTED
;
1329 if (Token
!= NULL
) {
1330 ProcToken
= GetFreeToken ((UINT32
)mMaxNumberOfCpus
);
1331 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1337 // Make sure all BUSY should be acquired.
1339 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1340 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1343 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1344 if (IsPresentAp (Index
)) {
1345 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1349 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1350 if (IsPresentAp (Index
)) {
1351 mSmmMpSyncData
->CpuData
[Index
].Procedure
= (EFI_AP_PROCEDURE2
) Procedure
;
1352 mSmmMpSyncData
->CpuData
[Index
].Parameter
= ProcedureArguments
;
1353 if (ProcToken
!= NULL
) {
1354 mSmmMpSyncData
->CpuData
[Index
].Token
= ProcToken
;
1356 if (CPUStatus
!= NULL
) {
1357 mSmmMpSyncData
->CpuData
[Index
].Status
= &CPUStatus
[Index
];
1358 if (mSmmMpSyncData
->CpuData
[Index
].Status
!= NULL
) {
1359 *mSmmMpSyncData
->CpuData
[Index
].Status
= EFI_NOT_READY
;
1364 // PI spec requirement:
1365 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1367 if (CPUStatus
!= NULL
) {
1368 CPUStatus
[Index
] = EFI_NOT_STARTED
;
1372 // Decrease the count to mark this processor(AP or BSP) as finished.
1374 if (ProcToken
!= NULL
) {
1375 WaitForSemaphore (&ProcToken
->RunningApCount
);
1382 if (Token
== NULL
) {
1384 // Make sure all APs have completed their tasks.
1386 WaitForAllAPsNotBusy (TRUE
);
1393 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1394 If the function is defined with a type that is not compatible with
1395 the type (of the expression) pointed to by the expression that
1396 denotes the called function, the behavior is undefined.
1398 So add below wrapper function to convert between EFI_AP_PROCEDURE
1399 and EFI_AP_PROCEDURE2.
1401 Wrapper for Procedures.
1403 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1412 PROCEDURE_WRAPPER
*Wrapper
;
1415 Wrapper
->Procedure (Wrapper
->ProcedureArgument
);
1421 Schedule a procedure to run on the specified CPU in blocking mode.
1423 @param[in] Procedure The address of the procedure to run
1424 @param[in] CpuIndex Target CPU Index
1425 @param[in, out] ProcArguments The parameter to pass to the procedure
1427 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1428 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1429 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1430 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1431 @retval EFI_SUCCESS The procedure has been successfully scheduled
1436 SmmBlockingStartupThisAp (
1437 IN EFI_AP_PROCEDURE Procedure
,
1439 IN OUT VOID
*ProcArguments OPTIONAL
1442 PROCEDURE_WRAPPER Wrapper
;
1444 Wrapper
.Procedure
= Procedure
;
1445 Wrapper
.ProcedureArgument
= ProcArguments
;
1448 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1450 return InternalSmmStartupThisAp (ProcedureWrapper
, CpuIndex
, &Wrapper
, NULL
, 0, NULL
);
1454 Schedule a procedure to run on the specified CPU.
1456 @param Procedure The address of the procedure to run
1457 @param CpuIndex Target CPU Index
1458 @param ProcArguments The parameter to pass to the procedure
1460 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1461 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1462 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1463 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1464 @retval EFI_SUCCESS The procedure has been successfully scheduled
1470 IN EFI_AP_PROCEDURE Procedure
,
1472 IN OUT VOID
*ProcArguments OPTIONAL
1475 MM_COMPLETION Token
;
1477 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].Procedure
= Procedure
;
1478 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].ProcedureArgument
= ProcArguments
;
1481 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1483 return InternalSmmStartupThisAp (
1486 &gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
],
1487 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp
) ? NULL
: &Token
,
1494 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1495 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1497 NOTE: It might not be appreciated in runtime since it might
1498 conflict with OS debugging facilities. Turn them off in RELEASE.
1500 @param CpuIndex CPU Index
1509 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1511 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1512 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1513 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1514 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1515 AsmWriteDr6 (CpuSaveState
->x86
._DR6
);
1516 AsmWriteDr7 (CpuSaveState
->x86
._DR7
);
1518 AsmWriteDr6 ((UINTN
)CpuSaveState
->x64
._DR6
);
1519 AsmWriteDr7 ((UINTN
)CpuSaveState
->x64
._DR7
);
1525 This function restores DR6 & DR7 to SMM save state.
1527 NOTE: It might not be appreciated in runtime since it might
1528 conflict with OS debugging facilities. Turn them off in RELEASE.
1530 @param CpuIndex CPU Index
1539 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1541 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1542 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1543 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1544 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1545 CpuSaveState
->x86
._DR7
= (UINT32
)AsmReadDr7 ();
1546 CpuSaveState
->x86
._DR6
= (UINT32
)AsmReadDr6 ();
1548 CpuSaveState
->x64
._DR7
= AsmReadDr7 ();
1549 CpuSaveState
->x64
._DR6
= AsmReadDr6 ();
1555 C function for SMI entry, each processor comes here upon SMI trigger.
1557 @param CpuIndex CPU Index
1569 BOOLEAN BspInProgress
;
1573 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1576 // Save Cr2 because Page Fault exception in SMM may override its value,
1577 // when using on-demand paging for above 4G memory.
1583 // Call the user register Startup function first.
1585 if (mSmmMpSyncData
->StartupProcedure
!= NULL
) {
1586 mSmmMpSyncData
->StartupProcedure (mSmmMpSyncData
->StartupProcArgs
);
1590 // Perform CPU specific entry hooks
1592 SmmCpuFeaturesRendezvousEntry (CpuIndex
);
1595 // Determine if this is a valid SMI
1597 ValidSmi
= PlatformValidSmi();
1600 // Determine if BSP has been already in progress. Note this must be checked after
1601 // ValidSmi because BSP may clear a valid SMI source after checking in.
1603 BspInProgress
= *mSmmMpSyncData
->InsideSmm
;
1605 if (!BspInProgress
&& !ValidSmi
) {
1607 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1608 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1609 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1610 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1611 // is nothing we need to do.
1616 // Signal presence of this processor
1618 if (ReleaseSemaphore (mSmmMpSyncData
->Counter
) == 0) {
1620 // BSP has already ended the synchronization, so QUIT!!!
1624 // Wait for BSP's signal to finish SMI
1626 while (*mSmmMpSyncData
->AllCpusInSync
) {
1633 // The BUSY lock is initialized to Released state.
1634 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1635 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1636 // after AP's present flag is detected.
1638 InitializeSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1641 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1642 ActivateSmmProfile (CpuIndex
);
1645 if (BspInProgress
) {
1647 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1648 // as BSP may have cleared the SMI status
1650 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1653 // We have a valid SMI
1660 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1661 if (!mSmmMpSyncData
->SwitchBsp
|| mSmmMpSyncData
->CandidateBsp
[CpuIndex
]) {
1663 // Call platform hook to do BSP election
1665 Status
= PlatformSmmBspElection (&IsBsp
);
1666 if (EFI_SUCCESS
== Status
) {
1668 // Platform hook determines successfully
1671 mSmmMpSyncData
->BspIndex
= (UINT32
)CpuIndex
;
1675 // Platform hook fails to determine, use default BSP election method
1677 InterlockedCompareExchange32 (
1678 (UINT32
*)&mSmmMpSyncData
->BspIndex
,
1687 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1689 if (mSmmMpSyncData
->BspIndex
== CpuIndex
) {
1692 // Clear last request for SwitchBsp.
1694 if (mSmmMpSyncData
->SwitchBsp
) {
1695 mSmmMpSyncData
->SwitchBsp
= FALSE
;
1696 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1697 mSmmMpSyncData
->CandidateBsp
[Index
] = FALSE
;
1701 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1702 SmmProfileRecordSmiNum ();
1706 // BSP Handler is always called with a ValidSmi == TRUE
1708 BSPHandler (CpuIndex
, mSmmMpSyncData
->EffectiveSyncMode
);
1710 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1714 ASSERT (*mSmmMpSyncData
->CpuData
[CpuIndex
].Run
== 0);
1717 // Wait for BSP's signal to exit SMI
1719 while (*mSmmMpSyncData
->AllCpusInSync
) {
1725 SmmCpuFeaturesRendezvousExit (CpuIndex
);
1734 Allocate buffer for SpinLock and Wrapper function buffer.
1738 InitializeDataForMmMp (
1742 gSmmCpuPrivate
->ApWrapperFunc
= AllocatePool (sizeof (PROCEDURE_WRAPPER
) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1743 ASSERT (gSmmCpuPrivate
->ApWrapperFunc
!= NULL
);
1745 InitializeListHead (&gSmmCpuPrivate
->TokenList
);
1747 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1751 Allocate buffer for all semaphores and spin locks.
1755 InitializeSmmCpuSemaphores (
1759 UINTN ProcessorCount
;
1761 UINTN GlobalSemaphoresSize
;
1762 UINTN CpuSemaphoresSize
;
1763 UINTN SemaphoreSize
;
1765 UINTN
*SemaphoreBlock
;
1766 UINTN SemaphoreAddr
;
1768 SemaphoreSize
= GetSpinLockProperties ();
1769 ProcessorCount
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1770 GlobalSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_GLOBAL
) / sizeof (VOID
*)) * SemaphoreSize
;
1771 CpuSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_CPU
) / sizeof (VOID
*)) * ProcessorCount
* SemaphoreSize
;
1772 TotalSize
= GlobalSemaphoresSize
+ CpuSemaphoresSize
;
1773 DEBUG((EFI_D_INFO
, "One Semaphore Size = 0x%x\n", SemaphoreSize
));
1774 DEBUG((EFI_D_INFO
, "Total Semaphores Size = 0x%x\n", TotalSize
));
1775 Pages
= EFI_SIZE_TO_PAGES (TotalSize
);
1776 SemaphoreBlock
= AllocatePages (Pages
);
1777 ASSERT (SemaphoreBlock
!= NULL
);
1778 ZeroMem (SemaphoreBlock
, TotalSize
);
1780 SemaphoreAddr
= (UINTN
)SemaphoreBlock
;
1781 mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
= (UINT32
*)SemaphoreAddr
;
1782 SemaphoreAddr
+= SemaphoreSize
;
1783 mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
= (BOOLEAN
*)SemaphoreAddr
;
1784 SemaphoreAddr
+= SemaphoreSize
;
1785 mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
= (BOOLEAN
*)SemaphoreAddr
;
1786 SemaphoreAddr
+= SemaphoreSize
;
1787 mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
= (SPIN_LOCK
*)SemaphoreAddr
;
1788 SemaphoreAddr
+= SemaphoreSize
;
1789 mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
1790 = (SPIN_LOCK
*)SemaphoreAddr
;
1791 SemaphoreAddr
+= SemaphoreSize
;
1793 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
;
1794 mSmmCpuSemaphores
.SemaphoreCpu
.Busy
= (SPIN_LOCK
*)SemaphoreAddr
;
1795 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1796 mSmmCpuSemaphores
.SemaphoreCpu
.Run
= (UINT32
*)SemaphoreAddr
;
1797 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1798 mSmmCpuSemaphores
.SemaphoreCpu
.Present
= (BOOLEAN
*)SemaphoreAddr
;
1800 mPFLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
;
1801 mConfigSmmCodeAccessCheckLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
;
1803 mSemaphoreSize
= SemaphoreSize
;
1807 Initialize un-cacheable data.
1812 InitializeMpSyncData (
1818 if (mSmmMpSyncData
!= NULL
) {
1820 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1821 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1823 ZeroMem (mSmmMpSyncData
, mSmmMpSyncDataSize
);
1824 mSmmMpSyncData
->CpuData
= (SMM_CPU_DATA_BLOCK
*)((UINT8
*)mSmmMpSyncData
+ sizeof (SMM_DISPATCHER_MP_SYNC_DATA
));
1825 mSmmMpSyncData
->CandidateBsp
= (BOOLEAN
*)(mSmmMpSyncData
->CpuData
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1826 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1828 // Enable BSP election by setting BspIndex to -1
1830 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
1832 mSmmMpSyncData
->EffectiveSyncMode
= mCpuSmmSyncMode
;
1834 mSmmMpSyncData
->Counter
= mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
;
1835 mSmmMpSyncData
->InsideSmm
= mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
;
1836 mSmmMpSyncData
->AllCpusInSync
= mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
;
1837 ASSERT (mSmmMpSyncData
->Counter
!= NULL
&& mSmmMpSyncData
->InsideSmm
!= NULL
&&
1838 mSmmMpSyncData
->AllCpusInSync
!= NULL
);
1839 *mSmmMpSyncData
->Counter
= 0;
1840 *mSmmMpSyncData
->InsideSmm
= FALSE
;
1841 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
1843 for (CpuIndex
= 0; CpuIndex
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; CpuIndex
++) {
1844 mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
=
1845 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Busy
+ mSemaphoreSize
* CpuIndex
);
1846 mSmmMpSyncData
->CpuData
[CpuIndex
].Run
=
1847 (UINT32
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Run
+ mSemaphoreSize
* CpuIndex
);
1848 mSmmMpSyncData
->CpuData
[CpuIndex
].Present
=
1849 (BOOLEAN
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Present
+ mSemaphoreSize
* CpuIndex
);
1850 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
) = 0;
1851 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Run
) = 0;
1852 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
1858 Initialize global data for MP synchronization.
1860 @param Stacks Base address of SMI stack buffer for all processors.
1861 @param StackSize Stack size for each processor in SMM.
1862 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1866 InitializeMpServiceData (
1869 IN UINTN ShadowStackSize
1874 UINT8
*GdtTssTables
;
1875 UINTN GdtTableStepSize
;
1876 CPUID_VERSION_INFO_EDX RegEdx
;
1879 // Determine if this CPU supports machine check
1881 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
.Uint32
);
1882 mMachineCheckSupported
= (BOOLEAN
)(RegEdx
.Bits
.MCA
== 1);
1885 // Allocate memory for all locks and semaphores
1887 InitializeSmmCpuSemaphores ();
1890 // Initialize mSmmMpSyncData
1892 mSmmMpSyncDataSize
= sizeof (SMM_DISPATCHER_MP_SYNC_DATA
) +
1893 (sizeof (SMM_CPU_DATA_BLOCK
) + sizeof (BOOLEAN
)) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1894 mSmmMpSyncData
= (SMM_DISPATCHER_MP_SYNC_DATA
*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize
));
1895 ASSERT (mSmmMpSyncData
!= NULL
);
1896 mCpuSmmSyncMode
= (SMM_CPU_SYNC_MODE
)PcdGet8 (PcdCpuSmmSyncMode
);
1897 InitializeMpSyncData ();
1900 // Initialize physical address mask
1901 // NOTE: Physical memory above virtual address limit is not supported !!!
1903 AsmCpuid (0x80000008, (UINT32
*)&Index
, NULL
, NULL
, NULL
);
1904 gPhyMask
= LShiftU64 (1, (UINT8
)Index
) - 1;
1905 gPhyMask
&= (1ull << 48) - EFI_PAGE_SIZE
;
1908 // Create page tables
1910 Cr3
= SmmInitPageTable ();
1912 GdtTssTables
= InitGdt (Cr3
, &GdtTableStepSize
);
1915 // Install SMI handler for each CPU
1917 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1920 (UINT32
)mCpuHotPlugData
.SmBase
[Index
],
1921 (VOID
*)((UINTN
)Stacks
+ (StackSize
+ ShadowStackSize
) * Index
),
1923 (UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
),
1924 gcSmiGdtr
.Limit
+ 1,
1926 gcSmiIdtr
.Limit
+ 1,
1932 // Record current MTRR settings
1934 ZeroMem (&gSmiMtrrs
, sizeof (gSmiMtrrs
));
1935 MtrrGetAllMtrrs (&gSmiMtrrs
);
1942 Register the SMM Foundation entry point.
1944 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1945 @param SmmEntryPoint SMM Foundation EntryPoint
1947 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1953 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL
*This
,
1954 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1958 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1960 gSmmCpuPrivate
->SmmCoreEntry
= SmmEntryPoint
;
1966 Register the SMM Foundation entry point.
1968 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
1969 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
1970 with the related definitions of
1971 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
1972 If caller may pass a value of NULL to deregister any existing
1974 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
1975 run by the AP. It is an optional common mailbox between APs and
1976 the caller to share information
1978 @retval EFI_SUCCESS The Procedure has been set successfully.
1979 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
1983 RegisterStartupProcedure (
1984 IN EFI_AP_PROCEDURE Procedure
,
1985 IN OUT VOID
*ProcedureArguments OPTIONAL
1988 if (Procedure
== NULL
&& ProcedureArguments
!= NULL
) {
1989 return EFI_INVALID_PARAMETER
;
1991 if (mSmmMpSyncData
== NULL
) {
1992 return EFI_NOT_READY
;
1995 mSmmMpSyncData
->StartupProcedure
= Procedure
;
1996 mSmmMpSyncData
->StartupProcArgs
= ProcedureArguments
;