2 SMM MP service implementation
4 Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
16 MTRR_SETTINGS gSmiMtrrs
;
18 SMM_DISPATCHER_MP_SYNC_DATA
*mSmmMpSyncData
= NULL
;
19 UINTN mSmmMpSyncDataSize
;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores
;
22 SPIN_LOCK
*mPFLock
= NULL
;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode
;
24 BOOLEAN mMachineCheckSupported
= FALSE
;
25 MM_COMPLETION mSmmStartupThisApToken
;
27 extern UINTN mSmmShadowStackSize
;
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
41 IN OUT
volatile UINT32
*Sem
49 (InterlockedCompareExchange32 (
65 Performs an atomic compare exchange operation to release semaphore.
66 The compare exchange operation must be performed using
69 @param Sem IN: 32-bit unsigned integer
70 OUT: original integer + 1
71 @return Original integer + 1
76 IN OUT
volatile UINT32
*Sem
83 } while (Value
+ 1 != 0 &&
84 InterlockedCompareExchange32 (
94 Performs an atomic compare exchange operation to lock semaphore.
95 The compare exchange operation must be performed using
98 @param Sem IN: 32-bit unsigned integer
100 @return Original integer
105 IN OUT
volatile UINT32
*Sem
112 } while (InterlockedCompareExchange32 (
122 Wait all APs to performs an atomic compare exchange operation to release semaphore.
124 @param NumberOfAPs AP number
134 BspIndex
= mSmmMpSyncData
->BspIndex
;
135 while (NumberOfAPs
-- > 0) {
136 WaitForSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
141 Performs an atomic compare exchange operation to release semaphore
152 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
153 if (IsPresentAp (Index
)) {
154 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[Index
].Run
);
160 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
162 @param Exceptions CPU Arrival exception flags.
164 @retval TRUE if all CPUs the have checked in.
165 @retval FALSE if at least one Normal AP hasn't checked in.
169 AllCpusInSmmWithExceptions (
170 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
174 SMM_CPU_DATA_BLOCK
*CpuData
;
175 EFI_PROCESSOR_INFORMATION
*ProcessorInfo
;
177 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
179 if (*mSmmMpSyncData
->Counter
== mNumberOfCpus
) {
183 CpuData
= mSmmMpSyncData
->CpuData
;
184 ProcessorInfo
= gSmmCpuPrivate
->ProcessorInfo
;
185 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
186 if (!(*(CpuData
[Index
].Present
)) && (ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
)) {
187 if (((Exceptions
& ARRIVAL_EXCEPTION_DELAYED
) != 0) && (SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmDelayed
) != 0)) {
191 if (((Exceptions
& ARRIVAL_EXCEPTION_BLOCKED
) != 0) && (SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmBlocked
) != 0)) {
195 if (((Exceptions
& ARRIVAL_EXCEPTION_SMI_DISABLED
) != 0) && (SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmEnable
) != 0)) {
207 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
209 @retval TRUE Os enable lmce.
210 @retval FALSE Os not enable lmce.
218 MSR_IA32_MCG_CAP_REGISTER McgCap
;
219 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl
;
220 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl
;
222 McgCap
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_CAP
);
223 if (McgCap
.Bits
.MCG_LMCE_P
== 0) {
227 FeatureCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL
);
228 if (FeatureCtrl
.Bits
.LmceOn
== 0) {
232 McgExtCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL
);
233 return (BOOLEAN
)(McgExtCtrl
.Bits
.LMCE_EN
== 1);
237 Return if Local machine check exception signaled.
239 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
240 delivered to only the logical processor.
242 @retval TRUE LMCE was signaled.
243 @retval FALSE LMCE was not signaled.
251 MSR_IA32_MCG_STATUS_REGISTER McgStatus
;
253 McgStatus
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_STATUS
);
254 return (BOOLEAN
)(McgStatus
.Bits
.LMCE_S
== 1);
258 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
259 entering SMM, except SMI disabled APs.
263 SmmWaitForApArrival (
272 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
276 if (mMachineCheckSupported
) {
277 LmceEn
= IsLmceOsEnabled ();
278 LmceSignal
= IsLmceSignaled ();
282 // Platform implementor should choose a timeout value appropriately:
283 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
284 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
285 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
286 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
287 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
288 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
289 // - The timeout value must be longer than longest possible IO operation in the system
293 // Sync with APs 1st timeout
295 for (Timer
= StartSyncTimer ();
296 !IsSyncTimerTimeout (Timer
) && !(LmceEn
&& LmceSignal
);
299 mSmmMpSyncData
->AllApArrivedWithException
= AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
300 if (mSmmMpSyncData
->AllApArrivedWithException
) {
308 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
310 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
311 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
312 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
313 // work while SMI handling is on-going.
314 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
315 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
316 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
317 // mode work while SMI handling is on-going.
318 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
319 // - In traditional flow, SMI disabling is discouraged.
320 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
321 // In both cases, adding SMI-disabling checking code increases overhead.
323 if (*mSmmMpSyncData
->Counter
< mNumberOfCpus
) {
325 // Send SMI IPIs to bring outside processors in
327 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
328 if (!(*(mSmmMpSyncData
->CpuData
[Index
].Present
)) && (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
)) {
329 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
334 // Sync with APs 2nd timeout.
336 for (Timer
= StartSyncTimer ();
337 !IsSyncTimerTimeout (Timer
);
340 mSmmMpSyncData
->AllApArrivedWithException
= AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
341 if (mSmmMpSyncData
->AllApArrivedWithException
) {
353 Replace OS MTRR's with SMI MTRR's.
355 @param CpuIndex Processor Index
363 SmmCpuFeaturesDisableSmrr ();
366 // Replace all MTRRs registers
368 MtrrSetAllMtrrs (&gSmiMtrrs
);
372 Wheck whether task has been finished by all APs.
374 @param BlockMode Whether did it in block mode or non-block mode.
376 @retval TRUE Task has been finished by all APs.
377 @retval FALSE Task not has been finished by all APs.
381 WaitForAllAPsNotBusy (
387 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
389 // Ignore BSP and APs which not call in SMM.
391 if (!IsPresentAp (Index
)) {
396 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
397 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
399 if (AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
400 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
411 Check whether it is an present AP.
413 @param CpuIndex The AP index which calls this function.
415 @retval TRUE It's a present AP.
416 @retval TRUE This is not an AP or it is not present.
424 return ((CpuIndex
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) &&
425 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
));
429 Clean up the status flags used during executing the procedure.
431 @param CpuIndex The AP index which calls this function.
439 PROCEDURE_TOKEN
*Token
;
441 Token
= mSmmMpSyncData
->CpuData
[CpuIndex
].Token
;
443 if (InterlockedDecrement (&Token
->RunningApCount
) == 0) {
444 ReleaseSpinLock (Token
->SpinLock
);
447 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= NULL
;
451 Free the tokens in the maintained list.
460 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
462 gSmmCpuPrivate
->FirstFreeToken
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
468 @param CpuIndex BSP processor Index
469 @param SyncMode SMM MP sync mode
475 IN SMM_CPU_SYNC_MODE SyncMode
481 BOOLEAN ClearTopLevelSmiResult
;
484 ASSERT (CpuIndex
== mSmmMpSyncData
->BspIndex
);
488 // Flag BSP's presence
490 *mSmmMpSyncData
->InsideSmm
= TRUE
;
493 // Initialize Debug Agent to start source level debug in BSP handler
495 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI
, NULL
, NULL
);
498 // Mark this processor's presence
500 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
503 // Clear platform top level SMI status bit before calling SMI handlers. If
504 // we cleared it after SMI handlers are run, we would miss the SMI that
505 // occurs after SMI handlers are done and before SMI status bit is cleared.
507 ClearTopLevelSmiResult
= ClearTopLevelSmiStatus ();
508 ASSERT (ClearTopLevelSmiResult
== TRUE
);
511 // Set running processor index
513 gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
= CpuIndex
;
516 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
518 if ((SyncMode
== SmmCpuSyncModeTradition
) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
520 // Wait for APs to arrive
522 SmmWaitForApArrival ();
525 // Lock the counter down and retrieve the number of APs
527 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
528 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
531 // Wait for all APs to get ready for programming MTRRs
533 WaitForAllAPs (ApCount
);
535 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
537 // Signal all APs it's time for backup MTRRs
542 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
543 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
544 // to a large enough value to avoid this situation.
545 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
546 // We do the backup first and then set MTRR to avoid race condition for threads
549 MtrrGetAllMtrrs (&Mtrrs
);
552 // Wait for all APs to complete their MTRR saving
554 WaitForAllAPs (ApCount
);
557 // Let all processors program SMM MTRRs together
562 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
563 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
564 // to a large enough value to avoid this situation.
566 ReplaceOSMtrrs (CpuIndex
);
569 // Wait for all APs to complete their MTRR programming
571 WaitForAllAPs (ApCount
);
576 // The BUSY lock is initialized to Acquired state
578 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
581 // Perform the pre tasks
586 // Invoke SMM Foundation EntryPoint with the processor information context.
588 gSmmCpuPrivate
->SmmCoreEntry (&gSmmCpuPrivate
->SmmCoreEntryContext
);
591 // Make sure all APs have completed their pending none-block tasks
593 WaitForAllAPsNotBusy (TRUE
);
596 // Perform the remaining tasks
598 PerformRemainingTasks ();
601 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
602 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
603 // will run through freely.
605 if ((SyncMode
!= SmmCpuSyncModeTradition
) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {
607 // Lock the counter down and retrieve the number of APs
609 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
610 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
612 // Make sure all APs have their Present flag set
616 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
617 if (*(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
622 if (PresentCount
> ApCount
) {
629 // Notify all APs to exit
631 *mSmmMpSyncData
->InsideSmm
= FALSE
;
635 // Wait for all APs to complete their pending tasks
637 WaitForAllAPs (ApCount
);
639 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
641 // Signal APs to restore MTRRs
648 SmmCpuFeaturesReenableSmrr ();
649 MtrrSetAllMtrrs (&Mtrrs
);
652 // Wait for all APs to complete MTRR programming
654 WaitForAllAPs (ApCount
);
658 // Stop source level debug in BSP handler, the code below will not be
661 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI
, NULL
, NULL
);
664 // Signal APs to Reset states/semaphore for this processor
669 // Perform pending operations for hot-plug
674 // Clear the Present flag of BSP
676 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
679 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
680 // WaitForAllAps does not depend on the Present flag.
682 WaitForAllAPs (ApCount
);
685 // Reset the tokens buffer.
690 // Reset BspIndex to -1, meaning BSP has not been elected.
692 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
693 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
697 // Allow APs to check in from this point on
699 *mSmmMpSyncData
->Counter
= 0;
700 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
706 @param CpuIndex AP processor Index.
707 @param ValidSmi Indicates that current SMI is a valid SMI or not.
708 @param SyncMode SMM MP sync mode.
715 IN SMM_CPU_SYNC_MODE SyncMode
721 EFI_STATUS ProcedureStatus
;
726 for (Timer
= StartSyncTimer ();
727 !IsSyncTimerTimeout (Timer
) &&
728 !(*mSmmMpSyncData
->InsideSmm
);
734 if (!(*mSmmMpSyncData
->InsideSmm
)) {
736 // BSP timeout in the first round
738 if (mSmmMpSyncData
->BspIndex
!= -1) {
740 // BSP Index is known
742 BspIndex
= mSmmMpSyncData
->BspIndex
;
743 ASSERT (CpuIndex
!= BspIndex
);
746 // Send SMI IPI to bring BSP in
748 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[BspIndex
].ProcessorId
);
751 // Now clock BSP for the 2nd time
753 for (Timer
= StartSyncTimer ();
754 !IsSyncTimerTimeout (Timer
) &&
755 !(*mSmmMpSyncData
->InsideSmm
);
761 if (!(*mSmmMpSyncData
->InsideSmm
)) {
763 // Give up since BSP is unable to enter SMM
764 // and signal the completion of this AP
765 WaitForSemaphore (mSmmMpSyncData
->Counter
);
770 // Don't know BSP index. Give up without sending IPI to BSP.
772 WaitForSemaphore (mSmmMpSyncData
->Counter
);
780 BspIndex
= mSmmMpSyncData
->BspIndex
;
781 ASSERT (CpuIndex
!= BspIndex
);
784 // Mark this processor's presence
786 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
788 if ((SyncMode
== SmmCpuSyncModeTradition
) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
790 // Notify BSP of arrival at this point
792 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
795 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
797 // Wait for the signal from BSP to backup MTRRs
799 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
804 MtrrGetAllMtrrs (&Mtrrs
);
807 // Signal BSP the completion of this AP
809 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
812 // Wait for BSP's signal to program MTRRs
814 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
817 // Replace OS MTRRs with SMI MTRRs
819 ReplaceOSMtrrs (CpuIndex
);
822 // Signal BSP the completion of this AP
824 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
829 // Wait for something to happen
831 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
834 // Check if BSP wants to exit SMM
836 if (!(*mSmmMpSyncData
->InsideSmm
)) {
841 // BUSY should be acquired by SmmStartupThisAp()
844 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)
848 // Invoke the scheduled procedure
850 ProcedureStatus
= (*mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
)(
851 (VOID
*)mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
853 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
854 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= ProcedureStatus
;
857 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Token
!= NULL
) {
858 ReleaseToken (CpuIndex
);
864 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
867 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
869 // Notify BSP the readiness of this AP to program MTRRs
871 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
874 // Wait for the signal from BSP to program MTRRs
876 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
881 SmmCpuFeaturesReenableSmrr ();
882 MtrrSetAllMtrrs (&Mtrrs
);
886 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
888 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
891 // Wait for the signal from BSP to Reset states/semaphore for this processor
893 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
896 // Reset states/semaphore for this processor
898 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
901 // Notify BSP the readiness of this AP to exit SMM
903 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
907 Create 4G PageTable in SMRAM.
909 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
910 @return PageTable Address
915 IN BOOLEAN Is32BitPageTable
923 UINTN High2MBoundary
;
933 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
935 // Add one more page for known good stack, then find the lower 2MB aligned address.
937 Low2MBoundary
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
) & ~(SIZE_2MB
-1);
939 // Add two more pages for known good stack and stack guard page,
940 // then find the lower 2MB aligned address.
942 High2MBoundary
= (mSmmStackArrayEnd
- mSmmStackSize
- mSmmShadowStackSize
+ EFI_PAGE_SIZE
* 2) & ~(SIZE_2MB
-1);
943 PagesNeeded
= ((High2MBoundary
- Low2MBoundary
) / SIZE_2MB
) + 1;
947 // Allocate the page table
949 PageTable
= AllocatePageTableMemory (5 + PagesNeeded
);
950 ASSERT (PageTable
!= NULL
);
952 PageTable
= (VOID
*)((UINTN
)PageTable
);
953 Pte
= (UINT64
*)PageTable
;
956 // Zero out all page table entries first
958 ZeroMem (Pte
, EFI_PAGES_TO_SIZE (1));
961 // Set Page Directory Pointers
963 for (Index
= 0; Index
< 4; Index
++) {
964 Pte
[Index
] = ((UINTN
)PageTable
+ EFI_PAGE_SIZE
* (Index
+ 1)) | mAddressEncMask
|
965 (Is32BitPageTable
? IA32_PAE_PDPTE_ATTRIBUTE_BITS
: PAGE_ATTRIBUTE_BITS
);
968 Pte
+= EFI_PAGE_SIZE
/ sizeof (*Pte
);
971 // Fill in Page Directory Entries
973 for (Index
= 0; Index
< EFI_PAGE_SIZE
* 4 / sizeof (*Pte
); Index
++) {
974 Pte
[Index
] = (Index
<< 21) | mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
977 Pdpte
= (UINT64
*)PageTable
;
978 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
979 Pages
= (UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (5);
980 GuardPage
= mSmmStackArrayBase
+ EFI_PAGE_SIZE
;
981 for (PageIndex
= Low2MBoundary
; PageIndex
<= High2MBoundary
; PageIndex
+= SIZE_2MB
) {
982 Pte
= (UINT64
*)(UINTN
)(Pdpte
[BitFieldRead32 ((UINT32
)PageIndex
, 30, 31)] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
983 Pte
[BitFieldRead32 ((UINT32
)PageIndex
, 21, 29)] = (UINT64
)Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
985 // Fill in Page Table Entries
987 Pte
= (UINT64
*)Pages
;
988 PageAddress
= PageIndex
;
989 for (Index
= 0; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
990 if (PageAddress
== GuardPage
) {
992 // Mark the guard page as non-present
994 Pte
[Index
] = PageAddress
| mAddressEncMask
;
995 GuardPage
+= (mSmmStackSize
+ mSmmShadowStackSize
);
996 if (GuardPage
> mSmmStackArrayEnd
) {
1000 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
1003 PageAddress
+= EFI_PAGE_SIZE
;
1006 Pages
+= EFI_PAGE_SIZE
;
1010 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0) {
1011 Pte
= (UINT64
*)(UINTN
)(Pdpte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
1012 if ((Pte
[0] & IA32_PG_PS
) == 0) {
1013 // 4K-page entries are already mapped. Just hide the first one anyway.
1014 Pte
= (UINT64
*)(UINTN
)(Pte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
1015 Pte
[0] &= ~(UINT64
)IA32_PG_P
; // Hide page 0
1017 // Create 4K-page entries
1018 Pages
= (UINTN
)AllocatePageTableMemory (1);
1019 ASSERT (Pages
!= 0);
1021 Pte
[0] = (UINT64
)(Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
1023 Pte
= (UINT64
*)Pages
;
1025 Pte
[0] = PageAddress
| mAddressEncMask
; // Hide page 0 but present left
1026 for (Index
= 1; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
1027 PageAddress
+= EFI_PAGE_SIZE
;
1028 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
1033 return (UINT32
)(UINTN
)PageTable
;
1037 Checks whether the input token is the current used token.
1039 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1042 @retval TRUE The input token is the current used token.
1043 @retval FALSE The input token is not the current used token.
1051 PROCEDURE_TOKEN
*ProcToken
;
1053 if (Token
== NULL
) {
1057 Link
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
1059 // Only search used tokens.
1061 while (Link
!= gSmmCpuPrivate
->FirstFreeToken
) {
1062 ProcToken
= PROCEDURE_TOKEN_FROM_LINK (Link
);
1064 if (ProcToken
->SpinLock
== Token
) {
1068 Link
= GetNextNode (&gSmmCpuPrivate
->TokenList
, Link
);
1075 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1077 @return First token of the token buffer.
1080 AllocateTokenBuffer (
1085 UINT32 TokenCountPerChunk
;
1087 SPIN_LOCK
*SpinLock
;
1088 UINT8
*SpinLockBuffer
;
1089 PROCEDURE_TOKEN
*ProcTokens
;
1091 SpinLockSize
= GetSpinLockProperties ();
1093 TokenCountPerChunk
= FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk
);
1094 ASSERT (TokenCountPerChunk
!= 0);
1095 if (TokenCountPerChunk
== 0) {
1096 DEBUG ((DEBUG_ERROR
, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1100 DEBUG ((DEBUG_INFO
, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize
, TokenCountPerChunk
));
1103 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1105 SpinLockBuffer
= AllocatePool (SpinLockSize
* TokenCountPerChunk
);
1106 ASSERT (SpinLockBuffer
!= NULL
);
1108 ProcTokens
= AllocatePool (sizeof (PROCEDURE_TOKEN
) * TokenCountPerChunk
);
1109 ASSERT (ProcTokens
!= NULL
);
1111 for (Index
= 0; Index
< TokenCountPerChunk
; Index
++) {
1112 SpinLock
= (SPIN_LOCK
*)(SpinLockBuffer
+ SpinLockSize
* Index
);
1113 InitializeSpinLock (SpinLock
);
1115 ProcTokens
[Index
].Signature
= PROCEDURE_TOKEN_SIGNATURE
;
1116 ProcTokens
[Index
].SpinLock
= SpinLock
;
1117 ProcTokens
[Index
].RunningApCount
= 0;
1119 InsertTailList (&gSmmCpuPrivate
->TokenList
, &ProcTokens
[Index
].Link
);
1122 return &ProcTokens
[0].Link
;
1128 If no free token, allocate new tokens then return the free one.
1130 @param RunningApsCount The Running Aps count for this token.
1132 @retval return the first free PROCEDURE_TOKEN.
1137 IN UINT32 RunningApsCount
1140 PROCEDURE_TOKEN
*NewToken
;
1143 // If FirstFreeToken meets the end of token list, enlarge the token list.
1144 // Set FirstFreeToken to the first free token.
1146 if (gSmmCpuPrivate
->FirstFreeToken
== &gSmmCpuPrivate
->TokenList
) {
1147 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1150 NewToken
= PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate
->FirstFreeToken
);
1151 gSmmCpuPrivate
->FirstFreeToken
= GetNextNode (&gSmmCpuPrivate
->TokenList
, gSmmCpuPrivate
->FirstFreeToken
);
1153 NewToken
->RunningApCount
= RunningApsCount
;
1154 AcquireSpinLock (NewToken
->SpinLock
);
1160 Checks status of specified AP.
1162 This function checks whether the specified AP has finished the task assigned
1163 by StartupThisAP(), and whether timeout expires.
1165 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1168 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1169 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1176 if (AcquireSpinLockOrFail (Token
)) {
1177 ReleaseSpinLock (Token
);
1181 return EFI_NOT_READY
;
1185 Schedule a procedure to run on the specified CPU.
1187 @param[in] Procedure The address of the procedure to run
1188 @param[in] CpuIndex Target CPU Index
1189 @param[in,out] ProcArguments The parameter to pass to the procedure
1190 @param[in] Token This is an optional parameter that allows the caller to execute the
1191 procedure in a blocking or non-blocking fashion. If it is NULL the
1192 call is blocking, and the call will not return until the AP has
1193 completed the procedure. If the token is not NULL, the call will
1194 return immediately. The caller can check whether the procedure has
1195 completed with CheckOnProcedure or WaitForProcedure.
1196 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1197 execution of Procedure, either for blocking or non-blocking mode.
1198 Zero means infinity. If the timeout expires before all APs return
1199 from Procedure, then Procedure on the failed APs is terminated. If
1200 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1201 If the timeout expires in non-blocking mode, the timeout determined
1202 can be through CheckOnProcedure or WaitForProcedure.
1203 Note that timeout support is optional. Whether an implementation
1204 supports this feature can be determined via the Attributes data
1206 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1207 by Procedure when it completes execution on the target AP, or with
1208 EFI_TIMEOUT if the Procedure fails to complete within the optional
1209 timeout. The implementation will update this variable with
1210 EFI_NOT_READY prior to starting Procedure on the target AP.
1212 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1213 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1214 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1215 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1216 @retval EFI_SUCCESS The procedure has been successfully scheduled
1220 InternalSmmStartupThisAp (
1221 IN EFI_AP_PROCEDURE2 Procedure
,
1223 IN OUT VOID
*ProcArguments OPTIONAL
,
1224 IN MM_COMPLETION
*Token
,
1225 IN UINTN TimeoutInMicroseconds
,
1226 IN OUT EFI_STATUS
*CpuStatus
1229 PROCEDURE_TOKEN
*ProcToken
;
1231 if (CpuIndex
>= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
) {
1232 DEBUG ((DEBUG_ERROR
, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex
, gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
));
1233 return EFI_INVALID_PARAMETER
;
1236 if (CpuIndex
== gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1237 DEBUG ((DEBUG_ERROR
, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex
));
1238 return EFI_INVALID_PARAMETER
;
1241 if (gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
== INVALID_APIC_ID
) {
1242 return EFI_INVALID_PARAMETER
;
1245 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
1246 if (mSmmMpSyncData
->EffectiveSyncMode
== SmmCpuSyncModeTradition
) {
1247 DEBUG ((DEBUG_ERROR
, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex
));
1250 return EFI_INVALID_PARAMETER
;
1253 if (gSmmCpuPrivate
->Operation
[CpuIndex
] == SmmCpuRemove
) {
1254 if (!FeaturePcdGet (PcdCpuHotPlugSupport
)) {
1255 DEBUG ((DEBUG_ERROR
, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex
));
1258 return EFI_INVALID_PARAMETER
;
1261 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1262 return EFI_INVALID_PARAMETER
;
1265 if (Procedure
== NULL
) {
1266 return EFI_INVALID_PARAMETER
;
1269 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1271 mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
= Procedure
;
1272 mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
= ProcArguments
;
1273 if (Token
!= NULL
) {
1274 if (Token
!= &mSmmStartupThisApToken
) {
1276 // When Token points to mSmmStartupThisApToken, this routine is called
1277 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1279 // In this case, caller wants to startup AP procedure in non-blocking
1280 // mode and cannot get the completion status from the Token because there
1281 // is no way to return the Token to caller from SmmStartupThisAp().
1282 // Caller needs to use its implementation specific way to query the completion status.
1284 // There is no need to allocate a token for such case so the 3 overheads
1286 // 1. Call AllocateTokenBuffer() when there is no free token.
1287 // 2. Get a free token from the token buffer.
1288 // 3. Call ReleaseToken() in APHandler().
1290 ProcToken
= GetFreeToken (1);
1291 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= ProcToken
;
1292 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1296 mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= CpuStatus
;
1297 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
1298 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= EFI_NOT_READY
;
1301 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
1303 if (Token
== NULL
) {
1304 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1305 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1312 Worker function to execute a caller provided function on all enabled APs.
1314 @param[in] Procedure A pointer to the function to be run on
1315 enabled APs of the system.
1316 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1317 APs to return from Procedure, either for
1318 blocking or non-blocking mode.
1319 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1321 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1322 procedure in a blocking or non-blocking fashion. If it is NULL the
1323 call is blocking, and the call will not return until the AP has
1324 completed the procedure. If the token is not NULL, the call will
1325 return immediately. The caller can check whether the procedure has
1326 completed with CheckOnProcedure or WaitForProcedure.
1327 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1328 by Procedure when it completes execution on the target AP, or with
1329 EFI_TIMEOUT if the Procedure fails to complete within the optional
1330 timeout. The implementation will update this variable with
1331 EFI_NOT_READY prior to starting Procedure on the target AP.
1334 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1335 the timeout expired.
1336 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1338 @retval others Failed to Startup all APs.
1342 InternalSmmStartupAllAPs (
1343 IN EFI_AP_PROCEDURE2 Procedure
,
1344 IN UINTN TimeoutInMicroseconds
,
1345 IN OUT VOID
*ProcedureArguments OPTIONAL
,
1346 IN OUT MM_COMPLETION
*Token
,
1347 IN OUT EFI_STATUS
*CPUStatus
1352 PROCEDURE_TOKEN
*ProcToken
;
1354 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1355 return EFI_INVALID_PARAMETER
;
1358 if (Procedure
== NULL
) {
1359 return EFI_INVALID_PARAMETER
;
1363 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1364 if (IsPresentAp (Index
)) {
1367 if (gSmmCpuPrivate
->Operation
[Index
] == SmmCpuRemove
) {
1368 return EFI_INVALID_PARAMETER
;
1371 if (!AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
1372 return EFI_NOT_READY
;
1375 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1379 if (CpuCount
== 0) {
1380 return EFI_NOT_STARTED
;
1383 if (Token
!= NULL
) {
1384 ProcToken
= GetFreeToken ((UINT32
)mMaxNumberOfCpus
);
1385 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1391 // Make sure all BUSY should be acquired.
1393 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1394 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1397 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1398 if (IsPresentAp (Index
)) {
1399 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1403 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1404 if (IsPresentAp (Index
)) {
1405 mSmmMpSyncData
->CpuData
[Index
].Procedure
= (EFI_AP_PROCEDURE2
)Procedure
;
1406 mSmmMpSyncData
->CpuData
[Index
].Parameter
= ProcedureArguments
;
1407 if (ProcToken
!= NULL
) {
1408 mSmmMpSyncData
->CpuData
[Index
].Token
= ProcToken
;
1411 if (CPUStatus
!= NULL
) {
1412 mSmmMpSyncData
->CpuData
[Index
].Status
= &CPUStatus
[Index
];
1413 if (mSmmMpSyncData
->CpuData
[Index
].Status
!= NULL
) {
1414 *mSmmMpSyncData
->CpuData
[Index
].Status
= EFI_NOT_READY
;
1419 // PI spec requirement:
1420 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1422 if (CPUStatus
!= NULL
) {
1423 CPUStatus
[Index
] = EFI_NOT_STARTED
;
1427 // Decrease the count to mark this processor(AP or BSP) as finished.
1429 if (ProcToken
!= NULL
) {
1430 WaitForSemaphore (&ProcToken
->RunningApCount
);
1437 if (Token
== NULL
) {
1439 // Make sure all APs have completed their tasks.
1441 WaitForAllAPsNotBusy (TRUE
);
1448 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1449 If the function is defined with a type that is not compatible with
1450 the type (of the expression) pointed to by the expression that
1451 denotes the called function, the behavior is undefined.
1453 So add below wrapper function to convert between EFI_AP_PROCEDURE
1454 and EFI_AP_PROCEDURE2.
1456 Wrapper for Procedures.
1458 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1467 PROCEDURE_WRAPPER
*Wrapper
;
1470 Wrapper
->Procedure (Wrapper
->ProcedureArgument
);
1476 Schedule a procedure to run on the specified CPU in blocking mode.
1478 @param[in] Procedure The address of the procedure to run
1479 @param[in] CpuIndex Target CPU Index
1480 @param[in, out] ProcArguments The parameter to pass to the procedure
1482 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1483 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1484 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1485 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1486 @retval EFI_SUCCESS The procedure has been successfully scheduled
1491 SmmBlockingStartupThisAp (
1492 IN EFI_AP_PROCEDURE Procedure
,
1494 IN OUT VOID
*ProcArguments OPTIONAL
1497 PROCEDURE_WRAPPER Wrapper
;
1499 Wrapper
.Procedure
= Procedure
;
1500 Wrapper
.ProcedureArgument
= ProcArguments
;
1503 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1505 return InternalSmmStartupThisAp (ProcedureWrapper
, CpuIndex
, &Wrapper
, NULL
, 0, NULL
);
1509 Schedule a procedure to run on the specified CPU.
1511 @param Procedure The address of the procedure to run
1512 @param CpuIndex Target CPU Index
1513 @param ProcArguments The parameter to pass to the procedure
1515 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1516 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1517 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1518 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1519 @retval EFI_SUCCESS The procedure has been successfully scheduled
1525 IN EFI_AP_PROCEDURE Procedure
,
1527 IN OUT VOID
*ProcArguments OPTIONAL
1530 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].Procedure
= Procedure
;
1531 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].ProcedureArgument
= ProcArguments
;
1534 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1536 return InternalSmmStartupThisAp (
1539 &gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
],
1540 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp
) ? NULL
: &mSmmStartupThisApToken
,
1547 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1548 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1550 NOTE: It might not be appreciated in runtime since it might
1551 conflict with OS debugging facilities. Turn them off in RELEASE.
1553 @param CpuIndex CPU Index
1562 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1564 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1565 ASSERT (CpuIndex
< mMaxNumberOfCpus
);
1566 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1567 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1568 AsmWriteDr6 (CpuSaveState
->x86
._DR6
);
1569 AsmWriteDr7 (CpuSaveState
->x86
._DR7
);
1571 AsmWriteDr6 ((UINTN
)CpuSaveState
->x64
._DR6
);
1572 AsmWriteDr7 ((UINTN
)CpuSaveState
->x64
._DR7
);
1578 This function restores DR6 & DR7 to SMM save state.
1580 NOTE: It might not be appreciated in runtime since it might
1581 conflict with OS debugging facilities. Turn them off in RELEASE.
1583 @param CpuIndex CPU Index
1592 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1594 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1595 ASSERT (CpuIndex
< mMaxNumberOfCpus
);
1596 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1597 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1598 CpuSaveState
->x86
._DR7
= (UINT32
)AsmReadDr7 ();
1599 CpuSaveState
->x86
._DR6
= (UINT32
)AsmReadDr6 ();
1601 CpuSaveState
->x64
._DR7
= AsmReadDr7 ();
1602 CpuSaveState
->x64
._DR6
= AsmReadDr6 ();
1608 C function for SMI entry, each processor comes here upon SMI trigger.
1610 @param CpuIndex CPU Index
1622 BOOLEAN BspInProgress
;
1626 ASSERT (CpuIndex
< mMaxNumberOfCpus
);
1629 // Save Cr2 because Page Fault exception in SMM may override its value,
1630 // when using on-demand paging for above 4G memory.
1636 // Call the user register Startup function first.
1638 if (mSmmMpSyncData
->StartupProcedure
!= NULL
) {
1639 mSmmMpSyncData
->StartupProcedure (mSmmMpSyncData
->StartupProcArgs
);
1643 // Perform CPU specific entry hooks
1645 SmmCpuFeaturesRendezvousEntry (CpuIndex
);
1648 // Determine if this is a valid SMI
1650 ValidSmi
= PlatformValidSmi ();
1653 // Determine if BSP has been already in progress. Note this must be checked after
1654 // ValidSmi because BSP may clear a valid SMI source after checking in.
1656 BspInProgress
= *mSmmMpSyncData
->InsideSmm
;
1658 if (!BspInProgress
&& !ValidSmi
) {
1660 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1661 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1662 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1663 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1664 // is nothing we need to do.
1669 // Signal presence of this processor
1671 if (ReleaseSemaphore (mSmmMpSyncData
->Counter
) == 0) {
1673 // BSP has already ended the synchronization, so QUIT!!!
1677 // Wait for BSP's signal to finish SMI
1679 while (*mSmmMpSyncData
->AllCpusInSync
) {
1686 // The BUSY lock is initialized to Released state.
1687 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1688 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1689 // after AP's present flag is detected.
1691 InitializeSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1694 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1695 ActivateSmmProfile (CpuIndex
);
1698 if (BspInProgress
) {
1700 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1701 // as BSP may have cleared the SMI status
1703 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1706 // We have a valid SMI
1713 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1714 if (!mSmmMpSyncData
->SwitchBsp
|| mSmmMpSyncData
->CandidateBsp
[CpuIndex
]) {
1716 // Call platform hook to do BSP election
1718 Status
= PlatformSmmBspElection (&IsBsp
);
1719 if (EFI_SUCCESS
== Status
) {
1721 // Platform hook determines successfully
1724 mSmmMpSyncData
->BspIndex
= (UINT32
)CpuIndex
;
1728 // Platform hook fails to determine, use default BSP election method
1730 InterlockedCompareExchange32 (
1731 (UINT32
*)&mSmmMpSyncData
->BspIndex
,
1740 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1742 if (mSmmMpSyncData
->BspIndex
== CpuIndex
) {
1744 // Clear last request for SwitchBsp.
1746 if (mSmmMpSyncData
->SwitchBsp
) {
1747 mSmmMpSyncData
->SwitchBsp
= FALSE
;
1748 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1749 mSmmMpSyncData
->CandidateBsp
[Index
] = FALSE
;
1753 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1754 SmmProfileRecordSmiNum ();
1758 // BSP Handler is always called with a ValidSmi == TRUE
1760 BSPHandler (CpuIndex
, mSmmMpSyncData
->EffectiveSyncMode
);
1762 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1766 ASSERT (*mSmmMpSyncData
->CpuData
[CpuIndex
].Run
== 0);
1769 // Wait for BSP's signal to exit SMI
1771 while (*mSmmMpSyncData
->AllCpusInSync
) {
1777 SmmCpuFeaturesRendezvousExit (CpuIndex
);
1786 Allocate buffer for SpinLock and Wrapper function buffer.
1790 InitializeDataForMmMp (
1794 gSmmCpuPrivate
->ApWrapperFunc
= AllocatePool (sizeof (PROCEDURE_WRAPPER
) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1795 ASSERT (gSmmCpuPrivate
->ApWrapperFunc
!= NULL
);
1797 InitializeListHead (&gSmmCpuPrivate
->TokenList
);
1799 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1803 Allocate buffer for all semaphores and spin locks.
1807 InitializeSmmCpuSemaphores (
1811 UINTN ProcessorCount
;
1813 UINTN GlobalSemaphoresSize
;
1814 UINTN CpuSemaphoresSize
;
1815 UINTN SemaphoreSize
;
1817 UINTN
*SemaphoreBlock
;
1818 UINTN SemaphoreAddr
;
1820 SemaphoreSize
= GetSpinLockProperties ();
1821 ProcessorCount
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1822 GlobalSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_GLOBAL
) / sizeof (VOID
*)) * SemaphoreSize
;
1823 CpuSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_CPU
) / sizeof (VOID
*)) * ProcessorCount
* SemaphoreSize
;
1824 TotalSize
= GlobalSemaphoresSize
+ CpuSemaphoresSize
;
1825 DEBUG ((DEBUG_INFO
, "One Semaphore Size = 0x%x\n", SemaphoreSize
));
1826 DEBUG ((DEBUG_INFO
, "Total Semaphores Size = 0x%x\n", TotalSize
));
1827 Pages
= EFI_SIZE_TO_PAGES (TotalSize
);
1828 SemaphoreBlock
= AllocatePages (Pages
);
1829 ASSERT (SemaphoreBlock
!= NULL
);
1830 ZeroMem (SemaphoreBlock
, TotalSize
);
1832 SemaphoreAddr
= (UINTN
)SemaphoreBlock
;
1833 mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
= (UINT32
*)SemaphoreAddr
;
1834 SemaphoreAddr
+= SemaphoreSize
;
1835 mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
= (BOOLEAN
*)SemaphoreAddr
;
1836 SemaphoreAddr
+= SemaphoreSize
;
1837 mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
= (BOOLEAN
*)SemaphoreAddr
;
1838 SemaphoreAddr
+= SemaphoreSize
;
1839 mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
= (SPIN_LOCK
*)SemaphoreAddr
;
1840 SemaphoreAddr
+= SemaphoreSize
;
1841 mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
1842 = (SPIN_LOCK
*)SemaphoreAddr
;
1843 SemaphoreAddr
+= SemaphoreSize
;
1845 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
;
1846 mSmmCpuSemaphores
.SemaphoreCpu
.Busy
= (SPIN_LOCK
*)SemaphoreAddr
;
1847 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1848 mSmmCpuSemaphores
.SemaphoreCpu
.Run
= (UINT32
*)SemaphoreAddr
;
1849 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1850 mSmmCpuSemaphores
.SemaphoreCpu
.Present
= (BOOLEAN
*)SemaphoreAddr
;
1852 mPFLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
;
1853 mConfigSmmCodeAccessCheckLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
;
1855 mSemaphoreSize
= SemaphoreSize
;
1859 Initialize un-cacheable data.
1864 InitializeMpSyncData (
1870 if (mSmmMpSyncData
!= NULL
) {
1872 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1873 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1875 ZeroMem (mSmmMpSyncData
, mSmmMpSyncDataSize
);
1876 mSmmMpSyncData
->CpuData
= (SMM_CPU_DATA_BLOCK
*)((UINT8
*)mSmmMpSyncData
+ sizeof (SMM_DISPATCHER_MP_SYNC_DATA
));
1877 mSmmMpSyncData
->CandidateBsp
= (BOOLEAN
*)(mSmmMpSyncData
->CpuData
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1878 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1880 // Enable BSP election by setting BspIndex to -1
1882 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
1885 mSmmMpSyncData
->EffectiveSyncMode
= mCpuSmmSyncMode
;
1887 mSmmMpSyncData
->Counter
= mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
;
1888 mSmmMpSyncData
->InsideSmm
= mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
;
1889 mSmmMpSyncData
->AllCpusInSync
= mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
;
1891 mSmmMpSyncData
->Counter
!= NULL
&& mSmmMpSyncData
->InsideSmm
!= NULL
&&
1892 mSmmMpSyncData
->AllCpusInSync
!= NULL
1894 *mSmmMpSyncData
->Counter
= 0;
1895 *mSmmMpSyncData
->InsideSmm
= FALSE
;
1896 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
1898 mSmmMpSyncData
->AllApArrivedWithException
= FALSE
;
1900 for (CpuIndex
= 0; CpuIndex
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; CpuIndex
++) {
1901 mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
=
1902 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Busy
+ mSemaphoreSize
* CpuIndex
);
1903 mSmmMpSyncData
->CpuData
[CpuIndex
].Run
=
1904 (UINT32
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Run
+ mSemaphoreSize
* CpuIndex
);
1905 mSmmMpSyncData
->CpuData
[CpuIndex
].Present
=
1906 (BOOLEAN
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Present
+ mSemaphoreSize
* CpuIndex
);
1907 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
) = 0;
1908 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Run
) = 0;
1909 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
1915 Initialize global data for MP synchronization.
1917 @param Stacks Base address of SMI stack buffer for all processors.
1918 @param StackSize Stack size for each processor in SMM.
1919 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1923 InitializeMpServiceData (
1926 IN UINTN ShadowStackSize
1931 UINT8
*GdtTssTables
;
1932 UINTN GdtTableStepSize
;
1933 CPUID_VERSION_INFO_EDX RegEdx
;
1934 UINT32 MaxExtendedFunction
;
1935 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize
;
1938 // Determine if this CPU supports machine check
1940 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
.Uint32
);
1941 mMachineCheckSupported
= (BOOLEAN
)(RegEdx
.Bits
.MCA
== 1);
1944 // Allocate memory for all locks and semaphores
1946 InitializeSmmCpuSemaphores ();
1949 // Initialize mSmmMpSyncData
1951 mSmmMpSyncDataSize
= sizeof (SMM_DISPATCHER_MP_SYNC_DATA
) +
1952 (sizeof (SMM_CPU_DATA_BLOCK
) + sizeof (BOOLEAN
)) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1953 mSmmMpSyncData
= (SMM_DISPATCHER_MP_SYNC_DATA
*)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize
));
1954 ASSERT (mSmmMpSyncData
!= NULL
);
1955 mCpuSmmSyncMode
= (SMM_CPU_SYNC_MODE
)PcdGet8 (PcdCpuSmmSyncMode
);
1956 InitializeMpSyncData ();
1959 // Initialize physical address mask
1960 // NOTE: Physical memory above virtual address limit is not supported !!!
1962 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &MaxExtendedFunction
, NULL
, NULL
, NULL
);
1963 if (MaxExtendedFunction
>= CPUID_VIR_PHY_ADDRESS_SIZE
) {
1964 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE
, &VirPhyAddressSize
.Uint32
, NULL
, NULL
, NULL
);
1966 VirPhyAddressSize
.Bits
.PhysicalAddressBits
= 36;
1969 gPhyMask
= LShiftU64 (1, VirPhyAddressSize
.Bits
.PhysicalAddressBits
) - 1;
1971 // Clear the low 12 bits
1973 gPhyMask
&= 0xfffffffffffff000ULL
;
1976 // Create page tables
1978 Cr3
= SmmInitPageTable ();
1980 GdtTssTables
= InitGdt (Cr3
, &GdtTableStepSize
);
1983 // Install SMI handler for each CPU
1985 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1988 (UINT32
)mCpuHotPlugData
.SmBase
[Index
],
1989 (VOID
*)((UINTN
)Stacks
+ (StackSize
+ ShadowStackSize
) * Index
),
1991 (UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
),
1992 gcSmiGdtr
.Limit
+ 1,
1994 gcSmiIdtr
.Limit
+ 1,
2000 // Record current MTRR settings
2002 ZeroMem (&gSmiMtrrs
, sizeof (gSmiMtrrs
));
2003 MtrrGetAllMtrrs (&gSmiMtrrs
);
2010 Register the SMM Foundation entry point.
2012 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
2013 @param SmmEntryPoint SMM Foundation EntryPoint
2015 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
2021 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL
*This
,
2022 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2026 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2028 gSmmCpuPrivate
->SmmCoreEntry
= SmmEntryPoint
;
2034 Register the SMM Foundation entry point.
2036 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2037 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2038 with the related definitions of
2039 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2040 If caller may pass a value of NULL to deregister any existing
2042 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2043 run by the AP. It is an optional common mailbox between APs and
2044 the caller to share information
2046 @retval EFI_SUCCESS The Procedure has been set successfully.
2047 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2051 RegisterStartupProcedure (
2052 IN EFI_AP_PROCEDURE Procedure
,
2053 IN OUT VOID
*ProcedureArguments OPTIONAL
2056 if ((Procedure
== NULL
) && (ProcedureArguments
!= NULL
)) {
2057 return EFI_INVALID_PARAMETER
;
2060 if (mSmmMpSyncData
== NULL
) {
2061 return EFI_NOT_READY
;
2064 mSmmMpSyncData
->StartupProcedure
= Procedure
;
2065 mSmmMpSyncData
->StartupProcArgs
= ProcedureArguments
;