2 SMM MP service implementation
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
20 UINT64 gSmiMtrrs
[MTRR_NUMBER_OF_FIXED_MTRR
+ 2 * MTRR_NUMBER_OF_VARIABLE_MTRR
+ 1];
22 SMM_DISPATCHER_MP_SYNC_DATA
*mSmmMpSyncData
= NULL
;
23 UINTN mSmmMpSyncDataSize
;
24 SMM_CPU_SEMAPHORES mSmmCpuSemaphores
;
26 SPIN_LOCK
*mPFLock
= NULL
;
29 Performs an atomic compare exchange operation to get semaphore.
30 The compare exchange operation must be performed using
33 @param Sem IN: 32-bit unsigned integer
34 OUT: original integer - 1
35 @return Original integer - 1
40 IN OUT
volatile UINT32
*Sem
47 } while (Value
== 0 ||
48 InterlockedCompareExchange32 (
58 Performs an atomic compare exchange operation to release semaphore.
59 The compare exchange operation must be performed using
62 @param Sem IN: 32-bit unsigned integer
63 OUT: original integer + 1
64 @return Original integer + 1
69 IN OUT
volatile UINT32
*Sem
76 } while (Value
+ 1 != 0 &&
77 InterlockedCompareExchange32 (
86 Performs an atomic compare exchange operation to lock semaphore.
87 The compare exchange operation must be performed using
90 @param Sem IN: 32-bit unsigned integer
92 @return Original integer
97 IN OUT
volatile UINT32
*Sem
104 } while (InterlockedCompareExchange32 (
112 Wait all APs to performs an atomic compare exchange operation to release semaphore.
114 @param NumberOfAPs AP number
124 BspIndex
= mSmmMpSyncData
->BspIndex
;
125 while (NumberOfAPs
-- > 0) {
126 WaitForSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
131 Performs an atomic compare exchange operation to release semaphore
143 BspIndex
= mSmmMpSyncData
->BspIndex
;
144 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
145 if (Index
!= BspIndex
&& *(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
146 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[Index
].Run
);
152 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
154 @param Exceptions CPU Arrival exception flags.
156 @retval TRUE if all CPUs the have checked in.
157 @retval FALSE if at least one Normal AP hasn't checked in.
161 AllCpusInSmmWithExceptions (
162 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
166 SMM_CPU_DATA_BLOCK
*CpuData
;
167 EFI_PROCESSOR_INFORMATION
*ProcessorInfo
;
169 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
171 if (*mSmmMpSyncData
->Counter
== mNumberOfCpus
) {
175 CpuData
= mSmmMpSyncData
->CpuData
;
176 ProcessorInfo
= gSmmCpuPrivate
->ProcessorInfo
;
177 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
178 if (!(*(CpuData
[Index
].Present
)) && ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
179 if (((Exceptions
& ARRIVAL_EXCEPTION_DELAYED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmDelayed
) != 0) {
182 if (((Exceptions
& ARRIVAL_EXCEPTION_BLOCKED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmBlocked
) != 0) {
185 if (((Exceptions
& ARRIVAL_EXCEPTION_SMI_DISABLED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmEnable
) != 0) {
198 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
199 entering SMM, except SMI disabled APs.
203 SmmWaitForApArrival (
210 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
213 // Platform implementor should choose a timeout value appropriately:
214 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
215 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
216 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
217 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
218 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
219 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
220 // - The timeout value must be longer than longest possible IO operation in the system
224 // Sync with APs 1st timeout
226 for (Timer
= StartSyncTimer ();
227 !IsSyncTimerTimeout (Timer
) &&
228 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
234 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
236 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
237 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
238 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
239 // work while SMI handling is on-going.
240 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
241 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
242 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
243 // mode work while SMI handling is on-going.
244 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
245 // - In traditional flow, SMI disabling is discouraged.
246 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
247 // In both cases, adding SMI-disabling checking code increases overhead.
249 if (*mSmmMpSyncData
->Counter
< mNumberOfCpus
) {
251 // Send SMI IPIs to bring outside processors in
253 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
254 if (!(*(mSmmMpSyncData
->CpuData
[Index
].Present
)) && gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
255 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
260 // Sync with APs 2nd timeout.
262 for (Timer
= StartSyncTimer ();
263 !IsSyncTimerTimeout (Timer
) &&
264 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
275 Replace OS MTRR's with SMI MTRR's.
277 @param CpuIndex Processor Index
285 PROCESSOR_SMM_DESCRIPTOR
*Psd
;
287 MTRR_SETTINGS
*BiosMtrr
;
289 Psd
= (PROCESSOR_SMM_DESCRIPTOR
*)(mCpuHotPlugData
.SmBase
[CpuIndex
] + SMM_PSD_OFFSET
);
290 SmiMtrrs
= (UINT64
*)(UINTN
)Psd
->MtrrBaseMaskPtr
;
292 SmmCpuFeaturesDisableSmrr ();
295 // Replace all MTRRs registers
297 BiosMtrr
= (MTRR_SETTINGS
*)SmiMtrrs
;
298 MtrrSetAllMtrrs(BiosMtrr
);
304 @param CpuIndex BSP processor Index
305 @param SyncMode SMM MP sync mode
311 IN SMM_CPU_SYNC_MODE SyncMode
317 BOOLEAN ClearTopLevelSmiResult
;
320 ASSERT (CpuIndex
== mSmmMpSyncData
->BspIndex
);
324 // Flag BSP's presence
326 *mSmmMpSyncData
->InsideSmm
= TRUE
;
329 // Initialize Debug Agent to start source level debug in BSP handler
331 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI
, NULL
, NULL
);
334 // Mark this processor's presence
336 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
339 // Clear platform top level SMI status bit before calling SMI handlers. If
340 // we cleared it after SMI handlers are run, we would miss the SMI that
341 // occurs after SMI handlers are done and before SMI status bit is cleared.
343 ClearTopLevelSmiResult
= ClearTopLevelSmiStatus();
344 ASSERT (ClearTopLevelSmiResult
== TRUE
);
347 // Set running processor index
349 gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
= CpuIndex
;
352 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
354 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
357 // Wait for APs to arrive
359 SmmWaitForApArrival();
362 // Lock the counter down and retrieve the number of APs
364 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
365 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
368 // Wait for all APs to get ready for programming MTRRs
370 WaitForAllAPs (ApCount
);
372 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
374 // Signal all APs it's time for backup MTRRs
379 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
380 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
381 // to a large enough value to avoid this situation.
382 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
383 // We do the backup first and then set MTRR to avoid race condition for threads
386 MtrrGetAllMtrrs(&Mtrrs
);
389 // Wait for all APs to complete their MTRR saving
391 WaitForAllAPs (ApCount
);
394 // Let all processors program SMM MTRRs together
399 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
400 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
401 // to a large enough value to avoid this situation.
403 ReplaceOSMtrrs (CpuIndex
);
406 // Wait for all APs to complete their MTRR programming
408 WaitForAllAPs (ApCount
);
413 // The BUSY lock is initialized to Acquired state
415 AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
418 // Perform the pre tasks
423 // Invoke SMM Foundation EntryPoint with the processor information context.
425 gSmmCpuPrivate
->SmmCoreEntry (&gSmmCpuPrivate
->SmmCoreEntryContext
);
428 // Make sure all APs have completed their pending none-block tasks
430 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
431 if (Index
!= CpuIndex
&& *(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
432 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
433 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
438 // Perform the remaining tasks
440 PerformRemainingTasks ();
443 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
444 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
445 // will run through freely.
447 if (SyncMode
!= SmmCpuSyncModeTradition
&& !SmmCpuFeaturesNeedConfigureMtrrs()) {
450 // Lock the counter down and retrieve the number of APs
452 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
453 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
455 // Make sure all APs have their Present flag set
459 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
460 if (*(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
464 if (PresentCount
> ApCount
) {
471 // Notify all APs to exit
473 *mSmmMpSyncData
->InsideSmm
= FALSE
;
477 // Wait for all APs to complete their pending tasks
479 WaitForAllAPs (ApCount
);
481 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
483 // Signal APs to restore MTRRs
490 SmmCpuFeaturesReenableSmrr ();
491 MtrrSetAllMtrrs(&Mtrrs
);
494 // Wait for all APs to complete MTRR programming
496 WaitForAllAPs (ApCount
);
500 // Stop source level debug in BSP handler, the code below will not be
503 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI
, NULL
, NULL
);
506 // Signal APs to Reset states/semaphore for this processor
511 // Perform pending operations for hot-plug
516 // Clear the Present flag of BSP
518 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
521 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
522 // WaitForAllAps does not depend on the Present flag.
524 WaitForAllAPs (ApCount
);
527 // Reset BspIndex to -1, meaning BSP has not been elected.
529 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
530 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
534 // Allow APs to check in from this point on
536 *mSmmMpSyncData
->Counter
= 0;
537 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
543 @param CpuIndex AP processor Index.
544 @param ValidSmi Indicates that current SMI is a valid SMI or not.
545 @param SyncMode SMM MP sync mode.
552 IN SMM_CPU_SYNC_MODE SyncMode
562 for (Timer
= StartSyncTimer ();
563 !IsSyncTimerTimeout (Timer
) &&
564 !(*mSmmMpSyncData
->InsideSmm
);
569 if (!(*mSmmMpSyncData
->InsideSmm
)) {
571 // BSP timeout in the first round
573 if (mSmmMpSyncData
->BspIndex
!= -1) {
575 // BSP Index is known
577 BspIndex
= mSmmMpSyncData
->BspIndex
;
578 ASSERT (CpuIndex
!= BspIndex
);
581 // Send SMI IPI to bring BSP in
583 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[BspIndex
].ProcessorId
);
586 // Now clock BSP for the 2nd time
588 for (Timer
= StartSyncTimer ();
589 !IsSyncTimerTimeout (Timer
) &&
590 !(*mSmmMpSyncData
->InsideSmm
);
595 if (!(*mSmmMpSyncData
->InsideSmm
)) {
597 // Give up since BSP is unable to enter SMM
598 // and signal the completion of this AP
599 WaitForSemaphore (mSmmMpSyncData
->Counter
);
604 // Don't know BSP index. Give up without sending IPI to BSP.
606 WaitForSemaphore (mSmmMpSyncData
->Counter
);
614 BspIndex
= mSmmMpSyncData
->BspIndex
;
615 ASSERT (CpuIndex
!= BspIndex
);
618 // Mark this processor's presence
620 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
622 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
624 // Notify BSP of arrival at this point
626 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
629 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
631 // Wait for the signal from BSP to backup MTRRs
633 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
638 MtrrGetAllMtrrs(&Mtrrs
);
641 // Signal BSP the completion of this AP
643 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
646 // Wait for BSP's signal to program MTRRs
648 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
651 // Replace OS MTRRs with SMI MTRRs
653 ReplaceOSMtrrs (CpuIndex
);
656 // Signal BSP the completion of this AP
658 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
663 // Wait for something to happen
665 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
668 // Check if BSP wants to exit SMM
670 if (!(*mSmmMpSyncData
->InsideSmm
)) {
675 // BUSY should be acquired by SmmStartupThisAp()
678 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)
682 // Invoke the scheduled procedure
684 (*mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
) (
685 (VOID
*)mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
691 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
694 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
696 // Notify BSP the readiness of this AP to program MTRRs
698 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
701 // Wait for the signal from BSP to program MTRRs
703 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
708 SmmCpuFeaturesReenableSmrr ();
709 MtrrSetAllMtrrs(&Mtrrs
);
713 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
715 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
718 // Wait for the signal from BSP to Reset states/semaphore for this processor
720 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
723 // Reset states/semaphore for this processor
725 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
728 // Notify BSP the readiness of this AP to exit SMM
730 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
735 Create 4G PageTable in SMRAM.
737 @param ExtraPages Additional page numbers besides for 4G memory
738 @param Is32BitPageTable Whether the page table is 32-bit PAE
739 @return PageTable Address
745 IN BOOLEAN Is32BitPageTable
753 UINTN High2MBoundary
;
763 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
765 // Add one more page for known good stack, then find the lower 2MB aligned address.
767 Low2MBoundary
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
) & ~(SIZE_2MB
-1);
769 // Add two more pages for known good stack and stack guard page,
770 // then find the lower 2MB aligned address.
772 High2MBoundary
= (mSmmStackArrayEnd
- mSmmStackSize
+ EFI_PAGE_SIZE
* 2) & ~(SIZE_2MB
-1);
773 PagesNeeded
= ((High2MBoundary
- Low2MBoundary
) / SIZE_2MB
) + 1;
776 // Allocate the page table
778 PageTable
= AllocatePageTableMemory (ExtraPages
+ 5 + PagesNeeded
);
779 ASSERT (PageTable
!= NULL
);
781 PageTable
= (VOID
*)((UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (ExtraPages
));
782 Pte
= (UINT64
*)PageTable
;
785 // Zero out all page table entries first
787 ZeroMem (Pte
, EFI_PAGES_TO_SIZE (1));
790 // Set Page Directory Pointers
792 for (Index
= 0; Index
< 4; Index
++) {
793 Pte
[Index
] = (UINTN
)PageTable
+ EFI_PAGE_SIZE
* (Index
+ 1) + (Is32BitPageTable
? IA32_PAE_PDPTE_ATTRIBUTE_BITS
: PAGE_ATTRIBUTE_BITS
);
795 Pte
+= EFI_PAGE_SIZE
/ sizeof (*Pte
);
798 // Fill in Page Directory Entries
800 for (Index
= 0; Index
< EFI_PAGE_SIZE
* 4 / sizeof (*Pte
); Index
++) {
801 Pte
[Index
] = (Index
<< 21) | IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
804 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
805 Pages
= (UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (5);
806 GuardPage
= mSmmStackArrayBase
+ EFI_PAGE_SIZE
;
807 Pdpte
= (UINT64
*)PageTable
;
808 for (PageIndex
= Low2MBoundary
; PageIndex
<= High2MBoundary
; PageIndex
+= SIZE_2MB
) {
809 Pte
= (UINT64
*)(UINTN
)(Pdpte
[BitFieldRead32 ((UINT32
)PageIndex
, 30, 31)] & ~(EFI_PAGE_SIZE
- 1));
810 Pte
[BitFieldRead32 ((UINT32
)PageIndex
, 21, 29)] = (UINT64
)Pages
| PAGE_ATTRIBUTE_BITS
;
812 // Fill in Page Table Entries
814 Pte
= (UINT64
*)Pages
;
815 PageAddress
= PageIndex
;
816 for (Index
= 0; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
817 if (PageAddress
== GuardPage
) {
819 // Mark the guard page as non-present
821 Pte
[Index
] = PageAddress
;
822 GuardPage
+= mSmmStackSize
;
823 if (GuardPage
> mSmmStackArrayEnd
) {
827 Pte
[Index
] = PageAddress
| PAGE_ATTRIBUTE_BITS
;
829 PageAddress
+= EFI_PAGE_SIZE
;
831 Pages
+= EFI_PAGE_SIZE
;
835 return (UINT32
)(UINTN
)PageTable
;
839 Set memory cache ability.
841 @param PageTable PageTable Address
842 @param Address Memory Address to change cache ability
843 @param Cacheability Cache ability to set
848 IN UINT64
*PageTable
,
850 IN UINT8 Cacheability
854 VOID
*NewPageTableAddress
;
855 UINT64
*NewPageTable
;
858 ASSERT ((Address
& EFI_PAGE_MASK
) == 0);
860 if (sizeof (UINTN
) == sizeof (UINT64
)) {
861 PTIndex
= (UINTN
)RShiftU64 (Address
, 39) & 0x1ff;
862 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
863 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
866 PTIndex
= (UINTN
)RShiftU64 (Address
, 30) & 0x1ff;
867 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
868 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
871 // A perfect implementation should check the original cacheability with the
872 // one being set, and break a 2M page entry into pieces only when they
875 PTIndex
= (UINTN
)RShiftU64 (Address
, 21) & 0x1ff;
876 if ((PageTable
[PTIndex
] & IA32_PG_PS
) != 0) {
878 // Allocate a page from SMRAM
880 NewPageTableAddress
= AllocatePageTableMemory (1);
881 ASSERT (NewPageTableAddress
!= NULL
);
883 NewPageTable
= (UINT64
*)NewPageTableAddress
;
885 for (Index
= 0; Index
< 0x200; Index
++) {
886 NewPageTable
[Index
] = PageTable
[PTIndex
];
887 if ((NewPageTable
[Index
] & IA32_PG_PAT_2M
) != 0) {
888 NewPageTable
[Index
] &= ~((UINT64
)IA32_PG_PAT_2M
);
889 NewPageTable
[Index
] |= (UINT64
)IA32_PG_PAT_4K
;
891 NewPageTable
[Index
] |= (UINT64
)(Index
<< EFI_PAGE_SHIFT
);
894 PageTable
[PTIndex
] = ((UINTN
)NewPageTableAddress
& gPhyMask
) | PAGE_ATTRIBUTE_BITS
;
897 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
898 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
900 PTIndex
= (UINTN
)RShiftU64 (Address
, 12) & 0x1ff;
901 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
902 PageTable
[PTIndex
] &= ~((UINT64
)((IA32_PG_PAT_4K
| IA32_PG_CD
| IA32_PG_WT
)));
903 PageTable
[PTIndex
] |= (UINT64
)Cacheability
;
908 Schedule a procedure to run on the specified CPU.
910 @param Procedure The address of the procedure to run
911 @param CpuIndex Target CPU Index
912 @param ProcArguments The parameter to pass to the procedure
914 @retval EFI_INVALID_PARAMETER CpuNumber not valid
915 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
916 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
917 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
918 @retval EFI_SUCCESS The procedure has been successfully scheduled
924 IN EFI_AP_PROCEDURE Procedure
,
926 IN OUT VOID
*ProcArguments OPTIONAL
929 if (CpuIndex
>= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
||
930 CpuIndex
== gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
||
931 !(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
)) ||
932 gSmmCpuPrivate
->Operation
[CpuIndex
] == SmmCpuRemove
||
933 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)) {
934 return EFI_INVALID_PARAMETER
;
937 mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
= Procedure
;
938 mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
= ProcArguments
;
939 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
941 if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp
)) {
942 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
943 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
949 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
950 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
952 NOTE: It might not be appreciated in runtime since it might
953 conflict with OS debugging facilities. Turn them off in RELEASE.
955 @param CpuIndex CPU Index
964 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
966 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
967 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
968 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
969 AsmWriteDr6 (CpuSaveState
->x86
._DR6
);
970 AsmWriteDr7 (CpuSaveState
->x86
._DR7
);
972 AsmWriteDr6 ((UINTN
)CpuSaveState
->x64
._DR6
);
973 AsmWriteDr7 ((UINTN
)CpuSaveState
->x64
._DR7
);
979 This function restores DR6 & DR7 to SMM save state.
981 NOTE: It might not be appreciated in runtime since it might
982 conflict with OS debugging facilities. Turn them off in RELEASE.
984 @param CpuIndex CPU Index
993 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
995 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
996 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
997 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
998 CpuSaveState
->x86
._DR7
= (UINT32
)AsmReadDr7 ();
999 CpuSaveState
->x86
._DR6
= (UINT32
)AsmReadDr6 ();
1001 CpuSaveState
->x64
._DR7
= AsmReadDr7 ();
1002 CpuSaveState
->x64
._DR6
= AsmReadDr6 ();
1008 C function for SMI entry, each processor comes here upon SMI trigger.
1010 @param CpuIndex CPU Index
1022 BOOLEAN BspInProgress
;
1025 BOOLEAN XdDisableFlag
;
1026 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr
;
1029 // Save Cr2 because Page Fault exception in SMM may override its value
1031 Cr2
= AsmReadCr2 ();
1034 // Perform CPU specific entry hooks
1036 SmmCpuFeaturesRendezvousEntry (CpuIndex
);
1039 // Determine if this is a valid SMI
1041 ValidSmi
= PlatformValidSmi();
1044 // Determine if BSP has been already in progress. Note this must be checked after
1045 // ValidSmi because BSP may clear a valid SMI source after checking in.
1047 BspInProgress
= *mSmmMpSyncData
->InsideSmm
;
1049 if (!BspInProgress
&& !ValidSmi
) {
1051 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1052 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1053 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1054 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1055 // is nothing we need to do.
1060 // Signal presence of this processor
1062 if (ReleaseSemaphore (mSmmMpSyncData
->Counter
) == 0) {
1064 // BSP has already ended the synchronization, so QUIT!!!
1068 // Wait for BSP's signal to finish SMI
1070 while (*mSmmMpSyncData
->AllCpusInSync
) {
1077 // The BUSY lock is initialized to Released state.
1078 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1079 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1080 // after AP's present flag is detected.
1082 InitializeSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1088 XdDisableFlag
= FALSE
;
1090 MiscEnableMsr
.Uint64
= AsmReadMsr64 (MSR_IA32_MISC_ENABLE
);
1091 if (MiscEnableMsr
.Bits
.XD
== 1) {
1092 XdDisableFlag
= TRUE
;
1093 MiscEnableMsr
.Bits
.XD
= 0;
1094 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE
, MiscEnableMsr
.Uint64
);
1099 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1100 ActivateSmmProfile (CpuIndex
);
1103 if (BspInProgress
) {
1105 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1106 // as BSP may have cleared the SMI status
1108 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1111 // We have a valid SMI
1118 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1119 if (!mSmmMpSyncData
->SwitchBsp
|| mSmmMpSyncData
->CandidateBsp
[CpuIndex
]) {
1121 // Call platform hook to do BSP election
1123 Status
= PlatformSmmBspElection (&IsBsp
);
1124 if (EFI_SUCCESS
== Status
) {
1126 // Platform hook determines successfully
1129 mSmmMpSyncData
->BspIndex
= (UINT32
)CpuIndex
;
1133 // Platform hook fails to determine, use default BSP election method
1135 InterlockedCompareExchange32 (
1136 (UINT32
*)&mSmmMpSyncData
->BspIndex
,
1145 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1147 if (mSmmMpSyncData
->BspIndex
== CpuIndex
) {
1150 // Clear last request for SwitchBsp.
1152 if (mSmmMpSyncData
->SwitchBsp
) {
1153 mSmmMpSyncData
->SwitchBsp
= FALSE
;
1154 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1155 mSmmMpSyncData
->CandidateBsp
[Index
] = FALSE
;
1159 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1160 SmmProfileRecordSmiNum ();
1164 // BSP Handler is always called with a ValidSmi == TRUE
1166 BSPHandler (CpuIndex
, mSmmMpSyncData
->EffectiveSyncMode
);
1168 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1172 ASSERT (*mSmmMpSyncData
->CpuData
[CpuIndex
].Run
== 0);
1175 // Wait for BSP's signal to exit SMI
1177 while (*mSmmMpSyncData
->AllCpusInSync
) {
1184 if (XdDisableFlag
) {
1185 MiscEnableMsr
.Uint64
= AsmReadMsr64 (MSR_IA32_MISC_ENABLE
);
1186 MiscEnableMsr
.Bits
.XD
= 1;
1187 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE
, MiscEnableMsr
.Uint64
);
1192 SmmCpuFeaturesRendezvousExit (CpuIndex
);
1200 Allocate buffer for all semaphores and spin locks.
1204 InitializeSmmCpuSemaphores (
1208 UINTN ProcessorCount
;
1210 UINTN GlobalSemaphoresSize
;
1211 UINTN CpuSemaphoresSize
;
1212 UINTN MsrSemahporeSize
;
1213 UINTN SemaphoreSize
;
1215 UINTN
*SemaphoreBlock
;
1216 UINTN SemaphoreAddr
;
1218 SemaphoreSize
= GetSpinLockProperties ();
1219 ProcessorCount
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1220 GlobalSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_GLOBAL
) / sizeof (VOID
*)) * SemaphoreSize
;
1221 CpuSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_CPU
) / sizeof (VOID
*)) * ProcessorCount
* SemaphoreSize
;
1222 MsrSemahporeSize
= MSR_SPIN_LOCK_INIT_NUM
* SemaphoreSize
;
1223 TotalSize
= GlobalSemaphoresSize
+ CpuSemaphoresSize
+ MsrSemahporeSize
;
1224 DEBUG((EFI_D_INFO
, "One Semaphore Size = 0x%x\n", SemaphoreSize
));
1225 DEBUG((EFI_D_INFO
, "Total Semaphores Size = 0x%x\n", TotalSize
));
1226 Pages
= EFI_SIZE_TO_PAGES (TotalSize
);
1227 SemaphoreBlock
= AllocatePages (Pages
);
1228 ASSERT (SemaphoreBlock
!= NULL
);
1229 ZeroMem (SemaphoreBlock
, TotalSize
);
1231 SemaphoreAddr
= (UINTN
)SemaphoreBlock
;
1232 mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
= (UINT32
*)SemaphoreAddr
;
1233 SemaphoreAddr
+= SemaphoreSize
;
1234 mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
= (BOOLEAN
*)SemaphoreAddr
;
1235 SemaphoreAddr
+= SemaphoreSize
;
1236 mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
= (BOOLEAN
*)SemaphoreAddr
;
1237 SemaphoreAddr
+= SemaphoreSize
;
1238 mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
= (SPIN_LOCK
*)SemaphoreAddr
;
1239 SemaphoreAddr
+= SemaphoreSize
;
1240 mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
1241 = (SPIN_LOCK
*)SemaphoreAddr
;
1242 SemaphoreAddr
+= SemaphoreSize
;
1243 mSmmCpuSemaphores
.SemaphoreGlobal
.MemoryMappedLock
1244 = (SPIN_LOCK
*)SemaphoreAddr
;
1246 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
;
1247 mSmmCpuSemaphores
.SemaphoreCpu
.Busy
= (SPIN_LOCK
*)SemaphoreAddr
;
1248 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1249 mSmmCpuSemaphores
.SemaphoreCpu
.Run
= (UINT32
*)SemaphoreAddr
;
1250 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1251 mSmmCpuSemaphores
.SemaphoreCpu
.Present
= (BOOLEAN
*)SemaphoreAddr
;
1253 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
+ CpuSemaphoresSize
;
1254 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
= (SPIN_LOCK
*)SemaphoreAddr
;
1255 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
=
1256 ((UINTN
)SemaphoreBlock
+ Pages
* SIZE_4KB
- SemaphoreAddr
) / SemaphoreSize
;
1257 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
>= MSR_SPIN_LOCK_INIT_NUM
);
1259 mPFLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
;
1260 mConfigSmmCodeAccessCheckLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
;
1261 mMemoryMappedLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.MemoryMappedLock
;
1263 mSemaphoreSize
= SemaphoreSize
;
1267 Initialize un-cacheable data.
1272 InitializeMpSyncData (
1278 if (mSmmMpSyncData
!= NULL
) {
1280 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1281 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1283 ZeroMem (mSmmMpSyncData
, mSmmMpSyncDataSize
);
1284 mSmmMpSyncData
->CpuData
= (SMM_CPU_DATA_BLOCK
*)((UINT8
*)mSmmMpSyncData
+ sizeof (SMM_DISPATCHER_MP_SYNC_DATA
));
1285 mSmmMpSyncData
->CandidateBsp
= (BOOLEAN
*)(mSmmMpSyncData
->CpuData
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1286 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1288 // Enable BSP election by setting BspIndex to -1
1290 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
1292 mSmmMpSyncData
->EffectiveSyncMode
= (SMM_CPU_SYNC_MODE
) PcdGet8 (PcdCpuSmmSyncMode
);
1294 mSmmMpSyncData
->Counter
= mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
;
1295 mSmmMpSyncData
->InsideSmm
= mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
;
1296 mSmmMpSyncData
->AllCpusInSync
= mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
;
1297 ASSERT (mSmmMpSyncData
->Counter
!= NULL
&& mSmmMpSyncData
->InsideSmm
!= NULL
&&
1298 mSmmMpSyncData
->AllCpusInSync
!= NULL
);
1299 *mSmmMpSyncData
->Counter
= 0;
1300 *mSmmMpSyncData
->InsideSmm
= FALSE
;
1301 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
1303 for (CpuIndex
= 0; CpuIndex
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; CpuIndex
++) {
1304 mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
=
1305 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Busy
+ mSemaphoreSize
* CpuIndex
);
1306 mSmmMpSyncData
->CpuData
[CpuIndex
].Run
=
1307 (UINT32
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Run
+ mSemaphoreSize
* CpuIndex
);
1308 mSmmMpSyncData
->CpuData
[CpuIndex
].Present
=
1309 (BOOLEAN
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Present
+ mSemaphoreSize
* CpuIndex
);
1315 Initialize global data for MP synchronization.
1317 @param Stacks Base address of SMI stack buffer for all processors.
1318 @param StackSize Stack size for each processor in SMM.
1322 InitializeMpServiceData (
1329 MTRR_SETTINGS
*Mtrr
;
1330 PROCESSOR_SMM_DESCRIPTOR
*Psd
;
1331 UINT8
*GdtTssTables
;
1332 UINTN GdtTableStepSize
;
1335 // Allocate memory for all locks and semaphores
1337 InitializeSmmCpuSemaphores ();
1340 // Initialize mSmmMpSyncData
1342 mSmmMpSyncDataSize
= sizeof (SMM_DISPATCHER_MP_SYNC_DATA
) +
1343 (sizeof (SMM_CPU_DATA_BLOCK
) + sizeof (BOOLEAN
)) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1344 mSmmMpSyncData
= (SMM_DISPATCHER_MP_SYNC_DATA
*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize
));
1345 ASSERT (mSmmMpSyncData
!= NULL
);
1346 InitializeMpSyncData ();
1349 // Initialize physical address mask
1350 // NOTE: Physical memory above virtual address limit is not supported !!!
1352 AsmCpuid (0x80000008, (UINT32
*)&Index
, NULL
, NULL
, NULL
);
1353 gPhyMask
= LShiftU64 (1, (UINT8
)Index
) - 1;
1354 gPhyMask
&= (1ull << 48) - EFI_PAGE_SIZE
;
1357 // Create page tables
1359 Cr3
= SmmInitPageTable ();
1361 GdtTssTables
= InitGdt (Cr3
, &GdtTableStepSize
);
1364 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1366 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1367 Psd
= (PROCESSOR_SMM_DESCRIPTOR
*)(VOID
*)(UINTN
)(mCpuHotPlugData
.SmBase
[Index
] + SMM_PSD_OFFSET
);
1368 CopyMem (Psd
, &gcPsd
, sizeof (gcPsd
));
1369 Psd
->SmmGdtPtr
= (UINT64
)(UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
);
1370 Psd
->SmmGdtSize
= gcSmiGdtr
.Limit
+ 1;
1373 // Install SMI handler
1377 (UINT32
)mCpuHotPlugData
.SmBase
[Index
],
1378 (VOID
*)((UINTN
)Stacks
+ (StackSize
* Index
)),
1380 (UINTN
)Psd
->SmmGdtPtr
,
1383 gcSmiIdtr
.Limit
+ 1,
1389 // Record current MTRR settings
1391 ZeroMem(gSmiMtrrs
, sizeof (gSmiMtrrs
));
1392 Mtrr
= (MTRR_SETTINGS
*)gSmiMtrrs
;
1393 MtrrGetAllMtrrs (Mtrr
);
1400 Register the SMM Foundation entry point.
1402 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1403 @param SmmEntryPoint SMM Foundation EntryPoint
1405 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1411 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL
*This
,
1412 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1416 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1418 gSmmCpuPrivate
->SmmCoreEntry
= SmmEntryPoint
;