2 SMM MP service implementation
4 Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
16 MTRR_SETTINGS gSmiMtrrs
;
18 SMM_DISPATCHER_MP_SYNC_DATA
*mSmmMpSyncData
= NULL
;
19 UINTN mSmmMpSyncDataSize
;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores
;
22 SPIN_LOCK
*mPFLock
= NULL
;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode
;
24 BOOLEAN mMachineCheckSupported
= FALSE
;
25 MM_COMPLETION mSmmStartupThisApToken
;
27 extern UINTN mSmmShadowStackSize
;
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
41 IN OUT
volatile UINT32
*Sem
49 InterlockedCompareExchange32 (
63 Performs an atomic compare exchange operation to release semaphore.
64 The compare exchange operation must be performed using
67 @param Sem IN: 32-bit unsigned integer
68 OUT: original integer + 1
69 @return Original integer + 1
74 IN OUT
volatile UINT32
*Sem
81 } while (Value
+ 1 != 0 &&
82 InterlockedCompareExchange32 (
91 Performs an atomic compare exchange operation to lock semaphore.
92 The compare exchange operation must be performed using
95 @param Sem IN: 32-bit unsigned integer
97 @return Original integer
102 IN OUT
volatile UINT32
*Sem
109 } while (InterlockedCompareExchange32 (
117 Wait all APs to performs an atomic compare exchange operation to release semaphore.
119 @param NumberOfAPs AP number
129 BspIndex
= mSmmMpSyncData
->BspIndex
;
130 while (NumberOfAPs
-- > 0) {
131 WaitForSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
136 Performs an atomic compare exchange operation to release semaphore
147 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
148 if (IsPresentAp (Index
)) {
149 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[Index
].Run
);
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
157 @param Exceptions CPU Arrival exception flags.
159 @retval TRUE if all CPUs the have checked in.
160 @retval FALSE if at least one Normal AP hasn't checked in.
164 AllCpusInSmmWithExceptions (
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
169 SMM_CPU_DATA_BLOCK
*CpuData
;
170 EFI_PROCESSOR_INFORMATION
*ProcessorInfo
;
172 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
174 if (*mSmmMpSyncData
->Counter
== mNumberOfCpus
) {
178 CpuData
= mSmmMpSyncData
->CpuData
;
179 ProcessorInfo
= gSmmCpuPrivate
->ProcessorInfo
;
180 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
181 if (!(*(CpuData
[Index
].Present
)) && ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
182 if (((Exceptions
& ARRIVAL_EXCEPTION_DELAYED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmDelayed
) != 0) {
185 if (((Exceptions
& ARRIVAL_EXCEPTION_BLOCKED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmBlocked
) != 0) {
188 if (((Exceptions
& ARRIVAL_EXCEPTION_SMI_DISABLED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmEnable
) != 0) {
200 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
202 @retval TRUE Os enable lmce.
203 @retval FALSE Os not enable lmce.
211 MSR_IA32_MCG_CAP_REGISTER McgCap
;
212 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl
;
213 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl
;
215 McgCap
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_CAP
);
216 if (McgCap
.Bits
.MCG_LMCE_P
== 0) {
220 FeatureCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL
);
221 if (FeatureCtrl
.Bits
.LmceOn
== 0) {
225 McgExtCtrl
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL
);
226 return (BOOLEAN
) (McgExtCtrl
.Bits
.LMCE_EN
== 1);
230 Return if Local machine check exception signaled.
232 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
233 delivered to only the logical processor.
235 @retval TRUE LMCE was signaled.
236 @retval FALSE LMCE was not signaled.
244 MSR_IA32_MCG_STATUS_REGISTER McgStatus
;
246 McgStatus
.Uint64
= AsmReadMsr64 (MSR_IA32_MCG_STATUS
);
247 return (BOOLEAN
) (McgStatus
.Bits
.LMCE_S
== 1);
251 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
252 entering SMM, except SMI disabled APs.
256 SmmWaitForApArrival (
265 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
269 if (mMachineCheckSupported
) {
270 LmceEn
= IsLmceOsEnabled ();
271 LmceSignal
= IsLmceSignaled();
275 // Platform implementor should choose a timeout value appropriately:
276 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
277 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
278 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
279 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
280 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
281 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
282 // - The timeout value must be longer than longest possible IO operation in the system
286 // Sync with APs 1st timeout
288 for (Timer
= StartSyncTimer ();
289 !IsSyncTimerTimeout (Timer
) && !(LmceEn
&& LmceSignal
) &&
290 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
296 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
298 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
299 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
300 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
301 // work while SMI handling is on-going.
302 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
303 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
304 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
305 // mode work while SMI handling is on-going.
306 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
307 // - In traditional flow, SMI disabling is discouraged.
308 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
309 // In both cases, adding SMI-disabling checking code increases overhead.
311 if (*mSmmMpSyncData
->Counter
< mNumberOfCpus
) {
313 // Send SMI IPIs to bring outside processors in
315 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
316 if (!(*(mSmmMpSyncData
->CpuData
[Index
].Present
)) && gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
317 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
322 // Sync with APs 2nd timeout.
324 for (Timer
= StartSyncTimer ();
325 !IsSyncTimerTimeout (Timer
) &&
326 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
337 Replace OS MTRR's with SMI MTRR's.
339 @param CpuIndex Processor Index
347 SmmCpuFeaturesDisableSmrr ();
350 // Replace all MTRRs registers
352 MtrrSetAllMtrrs (&gSmiMtrrs
);
356 Wheck whether task has been finished by all APs.
358 @param BlockMode Whether did it in block mode or non-block mode.
360 @retval TRUE Task has been finished by all APs.
361 @retval FALSE Task not has been finished by all APs.
365 WaitForAllAPsNotBusy (
371 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
373 // Ignore BSP and APs which not call in SMM.
375 if (!IsPresentAp(Index
)) {
380 AcquireSpinLock(mSmmMpSyncData
->CpuData
[Index
].Busy
);
381 ReleaseSpinLock(mSmmMpSyncData
->CpuData
[Index
].Busy
);
383 if (AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
384 ReleaseSpinLock(mSmmMpSyncData
->CpuData
[Index
].Busy
);
395 Check whether it is an present AP.
397 @param CpuIndex The AP index which calls this function.
399 @retval TRUE It's a present AP.
400 @retval TRUE This is not an AP or it is not present.
408 return ((CpuIndex
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) &&
409 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
));
413 Clean up the status flags used during executing the procedure.
415 @param CpuIndex The AP index which calls this function.
423 PROCEDURE_TOKEN
*Token
;
425 Token
= mSmmMpSyncData
->CpuData
[CpuIndex
].Token
;
427 if (InterlockedDecrement (&Token
->RunningApCount
) == 0) {
428 ReleaseSpinLock (Token
->SpinLock
);
431 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= NULL
;
435 Free the tokens in the maintained list.
444 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
446 gSmmCpuPrivate
->FirstFreeToken
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
452 @param CpuIndex BSP processor Index
453 @param SyncMode SMM MP sync mode
459 IN SMM_CPU_SYNC_MODE SyncMode
465 BOOLEAN ClearTopLevelSmiResult
;
468 ASSERT (CpuIndex
== mSmmMpSyncData
->BspIndex
);
472 // Flag BSP's presence
474 *mSmmMpSyncData
->InsideSmm
= TRUE
;
477 // Initialize Debug Agent to start source level debug in BSP handler
479 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI
, NULL
, NULL
);
482 // Mark this processor's presence
484 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
487 // Clear platform top level SMI status bit before calling SMI handlers. If
488 // we cleared it after SMI handlers are run, we would miss the SMI that
489 // occurs after SMI handlers are done and before SMI status bit is cleared.
491 ClearTopLevelSmiResult
= ClearTopLevelSmiStatus();
492 ASSERT (ClearTopLevelSmiResult
== TRUE
);
495 // Set running processor index
497 gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
= CpuIndex
;
500 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
502 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
505 // Wait for APs to arrive
507 SmmWaitForApArrival();
510 // Lock the counter down and retrieve the number of APs
512 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
513 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
516 // Wait for all APs to get ready for programming MTRRs
518 WaitForAllAPs (ApCount
);
520 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
522 // Signal all APs it's time for backup MTRRs
527 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
528 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
529 // to a large enough value to avoid this situation.
530 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
531 // We do the backup first and then set MTRR to avoid race condition for threads
534 MtrrGetAllMtrrs(&Mtrrs
);
537 // Wait for all APs to complete their MTRR saving
539 WaitForAllAPs (ApCount
);
542 // Let all processors program SMM MTRRs together
547 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
548 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
549 // to a large enough value to avoid this situation.
551 ReplaceOSMtrrs (CpuIndex
);
554 // Wait for all APs to complete their MTRR programming
556 WaitForAllAPs (ApCount
);
561 // The BUSY lock is initialized to Acquired state
563 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
566 // Perform the pre tasks
571 // Invoke SMM Foundation EntryPoint with the processor information context.
573 gSmmCpuPrivate
->SmmCoreEntry (&gSmmCpuPrivate
->SmmCoreEntryContext
);
576 // Make sure all APs have completed their pending none-block tasks
578 WaitForAllAPsNotBusy (TRUE
);
581 // Perform the remaining tasks
583 PerformRemainingTasks ();
586 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
587 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
588 // will run through freely.
590 if (SyncMode
!= SmmCpuSyncModeTradition
&& !SmmCpuFeaturesNeedConfigureMtrrs()) {
593 // Lock the counter down and retrieve the number of APs
595 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
596 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
598 // Make sure all APs have their Present flag set
602 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
603 if (*(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
607 if (PresentCount
> ApCount
) {
614 // Notify all APs to exit
616 *mSmmMpSyncData
->InsideSmm
= FALSE
;
620 // Wait for all APs to complete their pending tasks
622 WaitForAllAPs (ApCount
);
624 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
626 // Signal APs to restore MTRRs
633 SmmCpuFeaturesReenableSmrr ();
634 MtrrSetAllMtrrs(&Mtrrs
);
637 // Wait for all APs to complete MTRR programming
639 WaitForAllAPs (ApCount
);
643 // Stop source level debug in BSP handler, the code below will not be
646 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI
, NULL
, NULL
);
649 // Signal APs to Reset states/semaphore for this processor
654 // Perform pending operations for hot-plug
659 // Clear the Present flag of BSP
661 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
664 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
665 // WaitForAllAps does not depend on the Present flag.
667 WaitForAllAPs (ApCount
);
670 // Reset the tokens buffer.
675 // Reset BspIndex to -1, meaning BSP has not been elected.
677 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
678 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
682 // Allow APs to check in from this point on
684 *mSmmMpSyncData
->Counter
= 0;
685 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
691 @param CpuIndex AP processor Index.
692 @param ValidSmi Indicates that current SMI is a valid SMI or not.
693 @param SyncMode SMM MP sync mode.
700 IN SMM_CPU_SYNC_MODE SyncMode
706 EFI_STATUS ProcedureStatus
;
711 for (Timer
= StartSyncTimer ();
712 !IsSyncTimerTimeout (Timer
) &&
713 !(*mSmmMpSyncData
->InsideSmm
);
718 if (!(*mSmmMpSyncData
->InsideSmm
)) {
720 // BSP timeout in the first round
722 if (mSmmMpSyncData
->BspIndex
!= -1) {
724 // BSP Index is known
726 BspIndex
= mSmmMpSyncData
->BspIndex
;
727 ASSERT (CpuIndex
!= BspIndex
);
730 // Send SMI IPI to bring BSP in
732 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[BspIndex
].ProcessorId
);
735 // Now clock BSP for the 2nd time
737 for (Timer
= StartSyncTimer ();
738 !IsSyncTimerTimeout (Timer
) &&
739 !(*mSmmMpSyncData
->InsideSmm
);
744 if (!(*mSmmMpSyncData
->InsideSmm
)) {
746 // Give up since BSP is unable to enter SMM
747 // and signal the completion of this AP
748 WaitForSemaphore (mSmmMpSyncData
->Counter
);
753 // Don't know BSP index. Give up without sending IPI to BSP.
755 WaitForSemaphore (mSmmMpSyncData
->Counter
);
763 BspIndex
= mSmmMpSyncData
->BspIndex
;
764 ASSERT (CpuIndex
!= BspIndex
);
767 // Mark this processor's presence
769 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
771 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
773 // Notify BSP of arrival at this point
775 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
778 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
780 // Wait for the signal from BSP to backup MTRRs
782 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
787 MtrrGetAllMtrrs(&Mtrrs
);
790 // Signal BSP the completion of this AP
792 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
795 // Wait for BSP's signal to program MTRRs
797 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
800 // Replace OS MTRRs with SMI MTRRs
802 ReplaceOSMtrrs (CpuIndex
);
805 // Signal BSP the completion of this AP
807 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
812 // Wait for something to happen
814 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
817 // Check if BSP wants to exit SMM
819 if (!(*mSmmMpSyncData
->InsideSmm
)) {
824 // BUSY should be acquired by SmmStartupThisAp()
827 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)
831 // Invoke the scheduled procedure
833 ProcedureStatus
= (*mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
) (
834 (VOID
*)mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
836 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
837 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= ProcedureStatus
;
840 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Token
!= NULL
) {
841 ReleaseToken (CpuIndex
);
847 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
850 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
852 // Notify BSP the readiness of this AP to program MTRRs
854 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
857 // Wait for the signal from BSP to program MTRRs
859 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
864 SmmCpuFeaturesReenableSmrr ();
865 MtrrSetAllMtrrs(&Mtrrs
);
869 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
871 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
874 // Wait for the signal from BSP to Reset states/semaphore for this processor
876 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
879 // Reset states/semaphore for this processor
881 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
884 // Notify BSP the readiness of this AP to exit SMM
886 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
891 Create 4G PageTable in SMRAM.
893 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
894 @return PageTable Address
899 IN BOOLEAN Is32BitPageTable
907 UINTN High2MBoundary
;
917 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
919 // Add one more page for known good stack, then find the lower 2MB aligned address.
921 Low2MBoundary
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
) & ~(SIZE_2MB
-1);
923 // Add two more pages for known good stack and stack guard page,
924 // then find the lower 2MB aligned address.
926 High2MBoundary
= (mSmmStackArrayEnd
- mSmmStackSize
- mSmmShadowStackSize
+ EFI_PAGE_SIZE
* 2) & ~(SIZE_2MB
-1);
927 PagesNeeded
= ((High2MBoundary
- Low2MBoundary
) / SIZE_2MB
) + 1;
930 // Allocate the page table
932 PageTable
= AllocatePageTableMemory (5 + PagesNeeded
);
933 ASSERT (PageTable
!= NULL
);
935 PageTable
= (VOID
*)((UINTN
)PageTable
);
936 Pte
= (UINT64
*)PageTable
;
939 // Zero out all page table entries first
941 ZeroMem (Pte
, EFI_PAGES_TO_SIZE (1));
944 // Set Page Directory Pointers
946 for (Index
= 0; Index
< 4; Index
++) {
947 Pte
[Index
] = ((UINTN
)PageTable
+ EFI_PAGE_SIZE
* (Index
+ 1)) | mAddressEncMask
|
948 (Is32BitPageTable
? IA32_PAE_PDPTE_ATTRIBUTE_BITS
: PAGE_ATTRIBUTE_BITS
);
950 Pte
+= EFI_PAGE_SIZE
/ sizeof (*Pte
);
953 // Fill in Page Directory Entries
955 for (Index
= 0; Index
< EFI_PAGE_SIZE
* 4 / sizeof (*Pte
); Index
++) {
956 Pte
[Index
] = (Index
<< 21) | mAddressEncMask
| IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
959 Pdpte
= (UINT64
*)PageTable
;
960 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
961 Pages
= (UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (5);
962 GuardPage
= mSmmStackArrayBase
+ EFI_PAGE_SIZE
;
963 for (PageIndex
= Low2MBoundary
; PageIndex
<= High2MBoundary
; PageIndex
+= SIZE_2MB
) {
964 Pte
= (UINT64
*)(UINTN
)(Pdpte
[BitFieldRead32 ((UINT32
)PageIndex
, 30, 31)] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
965 Pte
[BitFieldRead32 ((UINT32
)PageIndex
, 21, 29)] = (UINT64
)Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
967 // Fill in Page Table Entries
969 Pte
= (UINT64
*)Pages
;
970 PageAddress
= PageIndex
;
971 for (Index
= 0; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
972 if (PageAddress
== GuardPage
) {
974 // Mark the guard page as non-present
976 Pte
[Index
] = PageAddress
| mAddressEncMask
;
977 GuardPage
+= (mSmmStackSize
+ mSmmShadowStackSize
);
978 if (GuardPage
> mSmmStackArrayEnd
) {
982 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
984 PageAddress
+= EFI_PAGE_SIZE
;
986 Pages
+= EFI_PAGE_SIZE
;
990 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask
) & BIT1
) != 0) {
991 Pte
= (UINT64
*)(UINTN
)(Pdpte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
992 if ((Pte
[0] & IA32_PG_PS
) == 0) {
993 // 4K-page entries are already mapped. Just hide the first one anyway.
994 Pte
= (UINT64
*)(UINTN
)(Pte
[0] & ~mAddressEncMask
& ~(EFI_PAGE_SIZE
- 1));
995 Pte
[0] &= ~(UINT64
)IA32_PG_P
; // Hide page 0
997 // Create 4K-page entries
998 Pages
= (UINTN
)AllocatePageTableMemory (1);
1001 Pte
[0] = (UINT64
)(Pages
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
1003 Pte
= (UINT64
*)Pages
;
1005 Pte
[0] = PageAddress
| mAddressEncMask
; // Hide page 0 but present left
1006 for (Index
= 1; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
1007 PageAddress
+= EFI_PAGE_SIZE
;
1008 Pte
[Index
] = PageAddress
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
1013 return (UINT32
)(UINTN
)PageTable
;
1017 Checks whether the input token is the current used token.
1019 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1022 @retval TRUE The input token is the current used token.
1023 @retval FALSE The input token is not the current used token.
1031 PROCEDURE_TOKEN
*ProcToken
;
1033 if (Token
== NULL
) {
1037 Link
= GetFirstNode (&gSmmCpuPrivate
->TokenList
);
1039 // Only search used tokens.
1041 while (Link
!= gSmmCpuPrivate
->FirstFreeToken
) {
1042 ProcToken
= PROCEDURE_TOKEN_FROM_LINK (Link
);
1044 if (ProcToken
->SpinLock
== Token
) {
1048 Link
= GetNextNode (&gSmmCpuPrivate
->TokenList
, Link
);
1055 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1057 @return First token of the token buffer.
1060 AllocateTokenBuffer (
1065 UINT32 TokenCountPerChunk
;
1067 SPIN_LOCK
*SpinLock
;
1068 UINT8
*SpinLockBuffer
;
1069 PROCEDURE_TOKEN
*ProcTokens
;
1071 SpinLockSize
= GetSpinLockProperties ();
1073 TokenCountPerChunk
= FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk
);
1074 ASSERT (TokenCountPerChunk
!= 0);
1075 if (TokenCountPerChunk
== 0) {
1076 DEBUG ((DEBUG_ERROR
, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1079 DEBUG ((DEBUG_INFO
, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize
, TokenCountPerChunk
));
1082 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1084 SpinLockBuffer
= AllocatePool (SpinLockSize
* TokenCountPerChunk
);
1085 ASSERT (SpinLockBuffer
!= NULL
);
1087 ProcTokens
= AllocatePool (sizeof (PROCEDURE_TOKEN
) * TokenCountPerChunk
);
1088 ASSERT (ProcTokens
!= NULL
);
1090 for (Index
= 0; Index
< TokenCountPerChunk
; Index
++) {
1091 SpinLock
= (SPIN_LOCK
*)(SpinLockBuffer
+ SpinLockSize
* Index
);
1092 InitializeSpinLock (SpinLock
);
1094 ProcTokens
[Index
].Signature
= PROCEDURE_TOKEN_SIGNATURE
;
1095 ProcTokens
[Index
].SpinLock
= SpinLock
;
1096 ProcTokens
[Index
].RunningApCount
= 0;
1098 InsertTailList (&gSmmCpuPrivate
->TokenList
, &ProcTokens
[Index
].Link
);
1101 return &ProcTokens
[0].Link
;
1107 If no free token, allocate new tokens then return the free one.
1109 @param RunningApsCount The Running Aps count for this token.
1111 @retval return the first free PROCEDURE_TOKEN.
1116 IN UINT32 RunningApsCount
1119 PROCEDURE_TOKEN
*NewToken
;
1122 // If FirstFreeToken meets the end of token list, enlarge the token list.
1123 // Set FirstFreeToken to the first free token.
1125 if (gSmmCpuPrivate
->FirstFreeToken
== &gSmmCpuPrivate
->TokenList
) {
1126 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1128 NewToken
= PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate
->FirstFreeToken
);
1129 gSmmCpuPrivate
->FirstFreeToken
= GetNextNode (&gSmmCpuPrivate
->TokenList
, gSmmCpuPrivate
->FirstFreeToken
);
1131 NewToken
->RunningApCount
= RunningApsCount
;
1132 AcquireSpinLock (NewToken
->SpinLock
);
1138 Checks status of specified AP.
1140 This function checks whether the specified AP has finished the task assigned
1141 by StartupThisAP(), and whether timeout expires.
1143 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1146 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1147 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1154 if (AcquireSpinLockOrFail (Token
)) {
1155 ReleaseSpinLock (Token
);
1159 return EFI_NOT_READY
;
1163 Schedule a procedure to run on the specified CPU.
1165 @param[in] Procedure The address of the procedure to run
1166 @param[in] CpuIndex Target CPU Index
1167 @param[in,out] ProcArguments The parameter to pass to the procedure
1168 @param[in] Token This is an optional parameter that allows the caller to execute the
1169 procedure in a blocking or non-blocking fashion. If it is NULL the
1170 call is blocking, and the call will not return until the AP has
1171 completed the procedure. If the token is not NULL, the call will
1172 return immediately. The caller can check whether the procedure has
1173 completed with CheckOnProcedure or WaitForProcedure.
1174 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1175 execution of Procedure, either for blocking or non-blocking mode.
1176 Zero means infinity. If the timeout expires before all APs return
1177 from Procedure, then Procedure on the failed APs is terminated. If
1178 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1179 If the timeout expires in non-blocking mode, the timeout determined
1180 can be through CheckOnProcedure or WaitForProcedure.
1181 Note that timeout support is optional. Whether an implementation
1182 supports this feature can be determined via the Attributes data
1184 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1185 by Procedure when it completes execution on the target AP, or with
1186 EFI_TIMEOUT if the Procedure fails to complete within the optional
1187 timeout. The implementation will update this variable with
1188 EFI_NOT_READY prior to starting Procedure on the target AP.
1190 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1191 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1192 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1193 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1194 @retval EFI_SUCCESS The procedure has been successfully scheduled
1198 InternalSmmStartupThisAp (
1199 IN EFI_AP_PROCEDURE2 Procedure
,
1201 IN OUT VOID
*ProcArguments OPTIONAL
,
1202 IN MM_COMPLETION
*Token
,
1203 IN UINTN TimeoutInMicroseconds
,
1204 IN OUT EFI_STATUS
*CpuStatus
1207 PROCEDURE_TOKEN
*ProcToken
;
1209 if (CpuIndex
>= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
) {
1210 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex
, gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
));
1211 return EFI_INVALID_PARAMETER
;
1213 if (CpuIndex
== gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1214 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex
));
1215 return EFI_INVALID_PARAMETER
;
1217 if (gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
== INVALID_APIC_ID
) {
1218 return EFI_INVALID_PARAMETER
;
1220 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
1221 if (mSmmMpSyncData
->EffectiveSyncMode
== SmmCpuSyncModeTradition
) {
1222 DEBUG((DEBUG_ERROR
, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex
));
1224 return EFI_INVALID_PARAMETER
;
1226 if (gSmmCpuPrivate
->Operation
[CpuIndex
] == SmmCpuRemove
) {
1227 if (!FeaturePcdGet (PcdCpuHotPlugSupport
)) {
1228 DEBUG((DEBUG_ERROR
, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex
));
1230 return EFI_INVALID_PARAMETER
;
1232 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1233 return EFI_INVALID_PARAMETER
;
1235 if (Procedure
== NULL
) {
1236 return EFI_INVALID_PARAMETER
;
1239 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1241 mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
= Procedure
;
1242 mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
= ProcArguments
;
1243 if (Token
!= NULL
) {
1244 if (Token
!= &mSmmStartupThisApToken
) {
1246 // When Token points to mSmmStartupThisApToken, this routine is called
1247 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1249 // In this case, caller wants to startup AP procedure in non-blocking
1250 // mode and cannot get the completion status from the Token because there
1251 // is no way to return the Token to caller from SmmStartupThisAp().
1252 // Caller needs to use its implementation specific way to query the completion status.
1254 // There is no need to allocate a token for such case so the 3 overheads
1256 // 1. Call AllocateTokenBuffer() when there is no free token.
1257 // 2. Get a free token from the token buffer.
1258 // 3. Call ReleaseToken() in APHandler().
1260 ProcToken
= GetFreeToken (1);
1261 mSmmMpSyncData
->CpuData
[CpuIndex
].Token
= ProcToken
;
1262 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1265 mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= CpuStatus
;
1266 if (mSmmMpSyncData
->CpuData
[CpuIndex
].Status
!= NULL
) {
1267 *mSmmMpSyncData
->CpuData
[CpuIndex
].Status
= EFI_NOT_READY
;
1270 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
1272 if (Token
== NULL
) {
1273 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1274 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1281 Worker function to execute a caller provided function on all enabled APs.
1283 @param[in] Procedure A pointer to the function to be run on
1284 enabled APs of the system.
1285 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1286 APs to return from Procedure, either for
1287 blocking or non-blocking mode.
1288 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1290 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1291 procedure in a blocking or non-blocking fashion. If it is NULL the
1292 call is blocking, and the call will not return until the AP has
1293 completed the procedure. If the token is not NULL, the call will
1294 return immediately. The caller can check whether the procedure has
1295 completed with CheckOnProcedure or WaitForProcedure.
1296 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1297 by Procedure when it completes execution on the target AP, or with
1298 EFI_TIMEOUT if the Procedure fails to complete within the optional
1299 timeout. The implementation will update this variable with
1300 EFI_NOT_READY prior to starting Procedure on the target AP.
1303 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1304 the timeout expired.
1305 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1307 @retval others Failed to Startup all APs.
1311 InternalSmmStartupAllAPs (
1312 IN EFI_AP_PROCEDURE2 Procedure
,
1313 IN UINTN TimeoutInMicroseconds
,
1314 IN OUT VOID
*ProcedureArguments OPTIONAL
,
1315 IN OUT MM_COMPLETION
*Token
,
1316 IN OUT EFI_STATUS
*CPUStatus
1321 PROCEDURE_TOKEN
*ProcToken
;
1323 if ((TimeoutInMicroseconds
!= 0) && ((mSmmMp
.Attributes
& EFI_MM_MP_TIMEOUT_SUPPORTED
) == 0)) {
1324 return EFI_INVALID_PARAMETER
;
1326 if (Procedure
== NULL
) {
1327 return EFI_INVALID_PARAMETER
;
1331 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1332 if (IsPresentAp (Index
)) {
1335 if (gSmmCpuPrivate
->Operation
[Index
] == SmmCpuRemove
) {
1336 return EFI_INVALID_PARAMETER
;
1339 if (!AcquireSpinLockOrFail(mSmmMpSyncData
->CpuData
[Index
].Busy
)) {
1340 return EFI_NOT_READY
;
1342 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1345 if (CpuCount
== 0) {
1346 return EFI_NOT_STARTED
;
1349 if (Token
!= NULL
) {
1350 ProcToken
= GetFreeToken ((UINT32
)mMaxNumberOfCpus
);
1351 *Token
= (MM_COMPLETION
)ProcToken
->SpinLock
;
1357 // Make sure all BUSY should be acquired.
1359 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1360 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1363 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1364 if (IsPresentAp (Index
)) {
1365 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
1369 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1370 if (IsPresentAp (Index
)) {
1371 mSmmMpSyncData
->CpuData
[Index
].Procedure
= (EFI_AP_PROCEDURE2
) Procedure
;
1372 mSmmMpSyncData
->CpuData
[Index
].Parameter
= ProcedureArguments
;
1373 if (ProcToken
!= NULL
) {
1374 mSmmMpSyncData
->CpuData
[Index
].Token
= ProcToken
;
1376 if (CPUStatus
!= NULL
) {
1377 mSmmMpSyncData
->CpuData
[Index
].Status
= &CPUStatus
[Index
];
1378 if (mSmmMpSyncData
->CpuData
[Index
].Status
!= NULL
) {
1379 *mSmmMpSyncData
->CpuData
[Index
].Status
= EFI_NOT_READY
;
1384 // PI spec requirement:
1385 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1387 if (CPUStatus
!= NULL
) {
1388 CPUStatus
[Index
] = EFI_NOT_STARTED
;
1392 // Decrease the count to mark this processor(AP or BSP) as finished.
1394 if (ProcToken
!= NULL
) {
1395 WaitForSemaphore (&ProcToken
->RunningApCount
);
1402 if (Token
== NULL
) {
1404 // Make sure all APs have completed their tasks.
1406 WaitForAllAPsNotBusy (TRUE
);
1413 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1414 If the function is defined with a type that is not compatible with
1415 the type (of the expression) pointed to by the expression that
1416 denotes the called function, the behavior is undefined.
1418 So add below wrapper function to convert between EFI_AP_PROCEDURE
1419 and EFI_AP_PROCEDURE2.
1421 Wrapper for Procedures.
1423 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1432 PROCEDURE_WRAPPER
*Wrapper
;
1435 Wrapper
->Procedure (Wrapper
->ProcedureArgument
);
1441 Schedule a procedure to run on the specified CPU in blocking mode.
1443 @param[in] Procedure The address of the procedure to run
1444 @param[in] CpuIndex Target CPU Index
1445 @param[in, out] ProcArguments The parameter to pass to the procedure
1447 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1448 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1449 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1450 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1451 @retval EFI_SUCCESS The procedure has been successfully scheduled
1456 SmmBlockingStartupThisAp (
1457 IN EFI_AP_PROCEDURE Procedure
,
1459 IN OUT VOID
*ProcArguments OPTIONAL
1462 PROCEDURE_WRAPPER Wrapper
;
1464 Wrapper
.Procedure
= Procedure
;
1465 Wrapper
.ProcedureArgument
= ProcArguments
;
1468 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1470 return InternalSmmStartupThisAp (ProcedureWrapper
, CpuIndex
, &Wrapper
, NULL
, 0, NULL
);
1474 Schedule a procedure to run on the specified CPU.
1476 @param Procedure The address of the procedure to run
1477 @param CpuIndex Target CPU Index
1478 @param ProcArguments The parameter to pass to the procedure
1480 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1481 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1482 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1483 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1484 @retval EFI_SUCCESS The procedure has been successfully scheduled
1490 IN EFI_AP_PROCEDURE Procedure
,
1492 IN OUT VOID
*ProcArguments OPTIONAL
1495 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].Procedure
= Procedure
;
1496 gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
].ProcedureArgument
= ProcArguments
;
1499 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1501 return InternalSmmStartupThisAp (
1504 &gSmmCpuPrivate
->ApWrapperFunc
[CpuIndex
],
1505 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp
) ? NULL
: &mSmmStartupThisApToken
,
1512 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1513 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1515 NOTE: It might not be appreciated in runtime since it might
1516 conflict with OS debugging facilities. Turn them off in RELEASE.
1518 @param CpuIndex CPU Index
1527 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1529 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1530 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1531 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1532 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1533 AsmWriteDr6 (CpuSaveState
->x86
._DR6
);
1534 AsmWriteDr7 (CpuSaveState
->x86
._DR7
);
1536 AsmWriteDr6 ((UINTN
)CpuSaveState
->x64
._DR6
);
1537 AsmWriteDr7 ((UINTN
)CpuSaveState
->x64
._DR7
);
1543 This function restores DR6 & DR7 to SMM save state.
1545 NOTE: It might not be appreciated in runtime since it might
1546 conflict with OS debugging facilities. Turn them off in RELEASE.
1548 @param CpuIndex CPU Index
1557 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1559 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1560 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1561 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1562 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1563 CpuSaveState
->x86
._DR7
= (UINT32
)AsmReadDr7 ();
1564 CpuSaveState
->x86
._DR6
= (UINT32
)AsmReadDr6 ();
1566 CpuSaveState
->x64
._DR7
= AsmReadDr7 ();
1567 CpuSaveState
->x64
._DR6
= AsmReadDr6 ();
1573 C function for SMI entry, each processor comes here upon SMI trigger.
1575 @param CpuIndex CPU Index
1587 BOOLEAN BspInProgress
;
1591 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1594 // Save Cr2 because Page Fault exception in SMM may override its value,
1595 // when using on-demand paging for above 4G memory.
1601 // Call the user register Startup function first.
1603 if (mSmmMpSyncData
->StartupProcedure
!= NULL
) {
1604 mSmmMpSyncData
->StartupProcedure (mSmmMpSyncData
->StartupProcArgs
);
1608 // Perform CPU specific entry hooks
1610 SmmCpuFeaturesRendezvousEntry (CpuIndex
);
1613 // Determine if this is a valid SMI
1615 ValidSmi
= PlatformValidSmi();
1618 // Determine if BSP has been already in progress. Note this must be checked after
1619 // ValidSmi because BSP may clear a valid SMI source after checking in.
1621 BspInProgress
= *mSmmMpSyncData
->InsideSmm
;
1623 if (!BspInProgress
&& !ValidSmi
) {
1625 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1626 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1627 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1628 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1629 // is nothing we need to do.
1634 // Signal presence of this processor
1636 if (ReleaseSemaphore (mSmmMpSyncData
->Counter
) == 0) {
1638 // BSP has already ended the synchronization, so QUIT!!!
1642 // Wait for BSP's signal to finish SMI
1644 while (*mSmmMpSyncData
->AllCpusInSync
) {
1651 // The BUSY lock is initialized to Released state.
1652 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1653 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1654 // after AP's present flag is detected.
1656 InitializeSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1659 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1660 ActivateSmmProfile (CpuIndex
);
1663 if (BspInProgress
) {
1665 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1666 // as BSP may have cleared the SMI status
1668 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1671 // We have a valid SMI
1678 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1679 if (!mSmmMpSyncData
->SwitchBsp
|| mSmmMpSyncData
->CandidateBsp
[CpuIndex
]) {
1681 // Call platform hook to do BSP election
1683 Status
= PlatformSmmBspElection (&IsBsp
);
1684 if (EFI_SUCCESS
== Status
) {
1686 // Platform hook determines successfully
1689 mSmmMpSyncData
->BspIndex
= (UINT32
)CpuIndex
;
1693 // Platform hook fails to determine, use default BSP election method
1695 InterlockedCompareExchange32 (
1696 (UINT32
*)&mSmmMpSyncData
->BspIndex
,
1705 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1707 if (mSmmMpSyncData
->BspIndex
== CpuIndex
) {
1710 // Clear last request for SwitchBsp.
1712 if (mSmmMpSyncData
->SwitchBsp
) {
1713 mSmmMpSyncData
->SwitchBsp
= FALSE
;
1714 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1715 mSmmMpSyncData
->CandidateBsp
[Index
] = FALSE
;
1719 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1720 SmmProfileRecordSmiNum ();
1724 // BSP Handler is always called with a ValidSmi == TRUE
1726 BSPHandler (CpuIndex
, mSmmMpSyncData
->EffectiveSyncMode
);
1728 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1732 ASSERT (*mSmmMpSyncData
->CpuData
[CpuIndex
].Run
== 0);
1735 // Wait for BSP's signal to exit SMI
1737 while (*mSmmMpSyncData
->AllCpusInSync
) {
1743 SmmCpuFeaturesRendezvousExit (CpuIndex
);
1752 Allocate buffer for SpinLock and Wrapper function buffer.
1756 InitializeDataForMmMp (
1760 gSmmCpuPrivate
->ApWrapperFunc
= AllocatePool (sizeof (PROCEDURE_WRAPPER
) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1761 ASSERT (gSmmCpuPrivate
->ApWrapperFunc
!= NULL
);
1763 InitializeListHead (&gSmmCpuPrivate
->TokenList
);
1765 gSmmCpuPrivate
->FirstFreeToken
= AllocateTokenBuffer ();
1769 Allocate buffer for all semaphores and spin locks.
1773 InitializeSmmCpuSemaphores (
1777 UINTN ProcessorCount
;
1779 UINTN GlobalSemaphoresSize
;
1780 UINTN CpuSemaphoresSize
;
1781 UINTN SemaphoreSize
;
1783 UINTN
*SemaphoreBlock
;
1784 UINTN SemaphoreAddr
;
1786 SemaphoreSize
= GetSpinLockProperties ();
1787 ProcessorCount
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1788 GlobalSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_GLOBAL
) / sizeof (VOID
*)) * SemaphoreSize
;
1789 CpuSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_CPU
) / sizeof (VOID
*)) * ProcessorCount
* SemaphoreSize
;
1790 TotalSize
= GlobalSemaphoresSize
+ CpuSemaphoresSize
;
1791 DEBUG((EFI_D_INFO
, "One Semaphore Size = 0x%x\n", SemaphoreSize
));
1792 DEBUG((EFI_D_INFO
, "Total Semaphores Size = 0x%x\n", TotalSize
));
1793 Pages
= EFI_SIZE_TO_PAGES (TotalSize
);
1794 SemaphoreBlock
= AllocatePages (Pages
);
1795 ASSERT (SemaphoreBlock
!= NULL
);
1796 ZeroMem (SemaphoreBlock
, TotalSize
);
1798 SemaphoreAddr
= (UINTN
)SemaphoreBlock
;
1799 mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
= (UINT32
*)SemaphoreAddr
;
1800 SemaphoreAddr
+= SemaphoreSize
;
1801 mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
= (BOOLEAN
*)SemaphoreAddr
;
1802 SemaphoreAddr
+= SemaphoreSize
;
1803 mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
= (BOOLEAN
*)SemaphoreAddr
;
1804 SemaphoreAddr
+= SemaphoreSize
;
1805 mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
= (SPIN_LOCK
*)SemaphoreAddr
;
1806 SemaphoreAddr
+= SemaphoreSize
;
1807 mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
1808 = (SPIN_LOCK
*)SemaphoreAddr
;
1809 SemaphoreAddr
+= SemaphoreSize
;
1811 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
;
1812 mSmmCpuSemaphores
.SemaphoreCpu
.Busy
= (SPIN_LOCK
*)SemaphoreAddr
;
1813 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1814 mSmmCpuSemaphores
.SemaphoreCpu
.Run
= (UINT32
*)SemaphoreAddr
;
1815 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1816 mSmmCpuSemaphores
.SemaphoreCpu
.Present
= (BOOLEAN
*)SemaphoreAddr
;
1818 mPFLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
;
1819 mConfigSmmCodeAccessCheckLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
;
1821 mSemaphoreSize
= SemaphoreSize
;
1825 Initialize un-cacheable data.
1830 InitializeMpSyncData (
1836 if (mSmmMpSyncData
!= NULL
) {
1838 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1839 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1841 ZeroMem (mSmmMpSyncData
, mSmmMpSyncDataSize
);
1842 mSmmMpSyncData
->CpuData
= (SMM_CPU_DATA_BLOCK
*)((UINT8
*)mSmmMpSyncData
+ sizeof (SMM_DISPATCHER_MP_SYNC_DATA
));
1843 mSmmMpSyncData
->CandidateBsp
= (BOOLEAN
*)(mSmmMpSyncData
->CpuData
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1844 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1846 // Enable BSP election by setting BspIndex to -1
1848 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
1850 mSmmMpSyncData
->EffectiveSyncMode
= mCpuSmmSyncMode
;
1852 mSmmMpSyncData
->Counter
= mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
;
1853 mSmmMpSyncData
->InsideSmm
= mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
;
1854 mSmmMpSyncData
->AllCpusInSync
= mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
;
1855 ASSERT (mSmmMpSyncData
->Counter
!= NULL
&& mSmmMpSyncData
->InsideSmm
!= NULL
&&
1856 mSmmMpSyncData
->AllCpusInSync
!= NULL
);
1857 *mSmmMpSyncData
->Counter
= 0;
1858 *mSmmMpSyncData
->InsideSmm
= FALSE
;
1859 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
1861 for (CpuIndex
= 0; CpuIndex
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; CpuIndex
++) {
1862 mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
=
1863 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Busy
+ mSemaphoreSize
* CpuIndex
);
1864 mSmmMpSyncData
->CpuData
[CpuIndex
].Run
=
1865 (UINT32
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Run
+ mSemaphoreSize
* CpuIndex
);
1866 mSmmMpSyncData
->CpuData
[CpuIndex
].Present
=
1867 (BOOLEAN
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Present
+ mSemaphoreSize
* CpuIndex
);
1868 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
) = 0;
1869 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Run
) = 0;
1870 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
1876 Initialize global data for MP synchronization.
1878 @param Stacks Base address of SMI stack buffer for all processors.
1879 @param StackSize Stack size for each processor in SMM.
1880 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1884 InitializeMpServiceData (
1887 IN UINTN ShadowStackSize
1892 UINT8
*GdtTssTables
;
1893 UINTN GdtTableStepSize
;
1894 CPUID_VERSION_INFO_EDX RegEdx
;
1895 UINT32 MaxExtendedFunction
;
1896 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize
;
1899 // Determine if this CPU supports machine check
1901 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
.Uint32
);
1902 mMachineCheckSupported
= (BOOLEAN
)(RegEdx
.Bits
.MCA
== 1);
1905 // Allocate memory for all locks and semaphores
1907 InitializeSmmCpuSemaphores ();
1910 // Initialize mSmmMpSyncData
1912 mSmmMpSyncDataSize
= sizeof (SMM_DISPATCHER_MP_SYNC_DATA
) +
1913 (sizeof (SMM_CPU_DATA_BLOCK
) + sizeof (BOOLEAN
)) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1914 mSmmMpSyncData
= (SMM_DISPATCHER_MP_SYNC_DATA
*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize
));
1915 ASSERT (mSmmMpSyncData
!= NULL
);
1916 mCpuSmmSyncMode
= (SMM_CPU_SYNC_MODE
)PcdGet8 (PcdCpuSmmSyncMode
);
1917 InitializeMpSyncData ();
1920 // Initialize physical address mask
1921 // NOTE: Physical memory above virtual address limit is not supported !!!
1923 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &MaxExtendedFunction
, NULL
, NULL
, NULL
);
1924 if (MaxExtendedFunction
>= CPUID_VIR_PHY_ADDRESS_SIZE
) {
1925 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE
, &VirPhyAddressSize
.Uint32
, NULL
, NULL
, NULL
);
1927 VirPhyAddressSize
.Bits
.PhysicalAddressBits
= 36;
1929 gPhyMask
= LShiftU64 (1, VirPhyAddressSize
.Bits
.PhysicalAddressBits
) - 1;
1931 // Clear the low 12 bits
1933 gPhyMask
&= 0xfffffffffffff000ULL
;
1936 // Create page tables
1938 Cr3
= SmmInitPageTable ();
1940 GdtTssTables
= InitGdt (Cr3
, &GdtTableStepSize
);
1943 // Install SMI handler for each CPU
1945 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1948 (UINT32
)mCpuHotPlugData
.SmBase
[Index
],
1949 (VOID
*)((UINTN
)Stacks
+ (StackSize
+ ShadowStackSize
) * Index
),
1951 (UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
),
1952 gcSmiGdtr
.Limit
+ 1,
1954 gcSmiIdtr
.Limit
+ 1,
1960 // Record current MTRR settings
1962 ZeroMem (&gSmiMtrrs
, sizeof (gSmiMtrrs
));
1963 MtrrGetAllMtrrs (&gSmiMtrrs
);
1970 Register the SMM Foundation entry point.
1972 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1973 @param SmmEntryPoint SMM Foundation EntryPoint
1975 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1981 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL
*This
,
1982 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1986 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1988 gSmmCpuPrivate
->SmmCoreEntry
= SmmEntryPoint
;
1994 Register the SMM Foundation entry point.
1996 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
1997 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
1998 with the related definitions of
1999 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2000 If caller may pass a value of NULL to deregister any existing
2002 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2003 run by the AP. It is an optional common mailbox between APs and
2004 the caller to share information
2006 @retval EFI_SUCCESS The Procedure has been set successfully.
2007 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2011 RegisterStartupProcedure (
2012 IN EFI_AP_PROCEDURE Procedure
,
2013 IN OUT VOID
*ProcedureArguments OPTIONAL
2016 if (Procedure
== NULL
&& ProcedureArguments
!= NULL
) {
2017 return EFI_INVALID_PARAMETER
;
2019 if (mSmmMpSyncData
== NULL
) {
2020 return EFI_NOT_READY
;
2023 mSmmMpSyncData
->StartupProcedure
= Procedure
;
2024 mSmmMpSyncData
->StartupProcArgs
= ProcedureArguments
;