2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2023, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
9 #include "PiSmmCpuDxeSmm.h"
17 IA32_DESCRIPTOR GdtrProfile
;
18 IA32_DESCRIPTOR IdtrProfile
;
21 UINTN InitializeFloatingPointUnitsAddress
;
22 } MP_CPU_EXCHANGE_INFO
;
26 UINT8
*RendezvousFunnelAddress
;
27 UINTN PModeEntryOffset
;
30 UINTN LModeEntryOffset
;
32 } MP_ASSEMBLY_ADDRESS_MAP
;
35 // Flags used when program the register.
38 volatile UINTN MemoryMappedLock
; // Spinlock used to program mmio
39 volatile UINT32
*CoreSemaphoreCount
; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32
*PackageSemaphoreCount
; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS
;
46 // Signal that SMM BASE relocation is complete.
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc
;
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
54 @param AddressMap Output buffer for address map information.
59 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags
;
66 ACPI_CPU_DATA mAcpiCpuData
;
67 volatile UINT32 mNumberToFinish
;
68 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
69 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
74 BOOLEAN mSmmS3Flag
= FALSE
;
77 // Pointer to structure used during S3 Resume
79 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
81 BOOLEAN mAcpiS3Enable
= TRUE
;
83 UINT8
*mApHltLoopCode
= NULL
;
84 UINT8 mApHltLoopCodeTemplate
[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
93 Sync up the MTRR values for all processors.
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
100 EFI_PHYSICAL_ADDRESS MtrrTable
107 Sync up the MTRR values for all processors.
116 MTRR_SETTINGS
*MtrrSettings
;
118 MtrrSettings
= (MTRR_SETTINGS
*)(UINTN
)MtrrTable
;
119 MtrrSetAllMtrrs (MtrrSettings
);
123 Increment semaphore by 1.
125 @param Sem IN: 32-bit unsigned integer
130 IN OUT
volatile UINT32
*Sem
133 InterlockedIncrement (Sem
);
137 Decrement the semaphore by 1 if it is not zero.
139 Performs an atomic decrement operation for semaphore.
140 The compare exchange operation must be performed using
143 @param Sem IN: 32-bit unsigned integer
148 IN OUT
volatile UINT32
*Sem
155 } while (Value
== 0 ||
156 InterlockedCompareExchange32 (
164 Read / write CR value.
166 @param[in] CrIndex The CR index which need to read/write.
167 @param[in] Read Read or write. TRUE is read.
168 @param[in,out] CrValue CR value.
170 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
176 IN OUT UINTN
*CrValue
182 *CrValue
= AsmReadCr0 ();
184 AsmWriteCr0 (*CrValue
);
190 *CrValue
= AsmReadCr2 ();
192 AsmWriteCr2 (*CrValue
);
198 *CrValue
= AsmReadCr3 ();
200 AsmWriteCr3 (*CrValue
);
206 *CrValue
= AsmReadCr4 ();
208 AsmWriteCr4 (*CrValue
);
213 return EFI_UNSUPPORTED
;
220 Initialize the CPU registers from a register table.
222 @param[in] RegisterTable The register table for this AP.
223 @param[in] ApLocation AP location info for this ap.
224 @param[in] CpuStatus CPU status info for this CPU.
225 @param[in] CpuFlags Flags data structure used when program the register.
227 @note This service could be called by BSP/APs.
230 ProgramProcessorRegister (
231 IN CPU_REGISTER_TABLE
*RegisterTable
,
232 IN EFI_CPU_PHYSICAL_LOCATION
*ApLocation
,
233 IN CPU_STATUS_INFORMATION
*CpuStatus
,
234 IN PROGRAM_CPU_REGISTER_FLAGS
*CpuFlags
237 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
240 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntryHead
;
241 volatile UINT32
*SemaphorePtr
;
243 UINT32 CurrentThread
;
245 UINTN ProcessorIndex
;
246 UINT32
*ThreadCountPerPackage
;
247 UINT8
*ThreadCountPerCore
;
252 // Traverse Register Table of this logical processor
254 RegisterTableEntryHead
= (CPU_REGISTER_TABLE_ENTRY
*)(UINTN
)RegisterTable
->RegisterTableEntry
;
256 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++) {
257 RegisterTableEntry
= &RegisterTableEntryHead
[Index
];
260 // Check the type of specified register
262 switch (RegisterTableEntry
->RegisterType
) {
264 // The specified register is Control Register
266 case ControlRegister
:
267 Status
= ReadWriteCr (RegisterTableEntry
->Index
, TRUE
, &Value
);
268 if (EFI_ERROR (Status
)) {
272 if (RegisterTableEntry
->TestThenWrite
) {
273 CurrentValue
= BitFieldRead64 (
275 RegisterTableEntry
->ValidBitStart
,
276 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1
278 if (CurrentValue
== RegisterTableEntry
->Value
) {
283 Value
= (UINTN
)BitFieldWrite64 (
285 RegisterTableEntry
->ValidBitStart
,
286 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
287 RegisterTableEntry
->Value
289 ReadWriteCr (RegisterTableEntry
->Index
, FALSE
, &Value
);
292 // The specified register is Model Specific Register
295 if (RegisterTableEntry
->TestThenWrite
) {
296 Value
= (UINTN
)AsmReadMsr64 (RegisterTableEntry
->Index
);
297 if (RegisterTableEntry
->ValidBitLength
>= 64) {
298 if (Value
== RegisterTableEntry
->Value
) {
302 CurrentValue
= BitFieldRead64 (
304 RegisterTableEntry
->ValidBitStart
,
305 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1
307 if (CurrentValue
== RegisterTableEntry
->Value
) {
314 // If this function is called to restore register setting after INIT signal,
315 // there is no need to restore MSRs in register table.
317 if (RegisterTableEntry
->ValidBitLength
>= 64) {
319 // If length is not less than 64 bits, then directly write without reading
322 RegisterTableEntry
->Index
,
323 RegisterTableEntry
->Value
327 // Set the bit section according to bit start and length
329 AsmMsrBitFieldWrite64 (
330 RegisterTableEntry
->Index
,
331 RegisterTableEntry
->ValidBitStart
,
332 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
333 RegisterTableEntry
->Value
339 // MemoryMapped operations
342 AcquireSpinLock (&CpuFlags
->MemoryMappedLock
);
343 MmioBitFieldWrite32 (
344 (UINTN
)(RegisterTableEntry
->Index
| LShiftU64 (RegisterTableEntry
->HighIndex
, 32)),
345 RegisterTableEntry
->ValidBitStart
,
346 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
347 (UINT32
)RegisterTableEntry
->Value
349 ReleaseSpinLock (&CpuFlags
->MemoryMappedLock
);
352 // Enable or disable cache
356 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
358 if (RegisterTableEntry
->Value
== 0) {
367 // Semaphore works logic like below:
369 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
370 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
372 // All threads (T0...Tn) waits in P() line and continues running
378 // V(0...n) V(0...n) ... V(0...n)
379 // n * P(0) n * P(1) ... n * P(n)
382 (ApLocation
!= NULL
) &&
383 (CpuStatus
->ThreadCountPerPackage
!= 0) &&
384 (CpuStatus
->ThreadCountPerCore
!= 0) &&
385 (CpuFlags
->CoreSemaphoreCount
!= NULL
) &&
386 (CpuFlags
->PackageSemaphoreCount
!= NULL
)
388 switch (RegisterTableEntry
->Value
) {
390 SemaphorePtr
= CpuFlags
->CoreSemaphoreCount
;
391 ThreadCountPerCore
= (UINT8
*)(UINTN
)CpuStatus
->ThreadCountPerCore
;
393 CurrentCore
= ApLocation
->Package
* CpuStatus
->MaxCoreCount
+ ApLocation
->Core
;
395 // Get Offset info for the first thread in the core which current thread belongs to.
397 FirstThread
= CurrentCore
* CpuStatus
->MaxThreadCount
;
398 CurrentThread
= FirstThread
+ ApLocation
->Thread
;
401 // Different cores may have different valid threads in them. If driver maintail clearly
402 // thread index in different cores, the logic will be much complicated.
403 // Here driver just simply records the max thread number in all cores and use it as expect
404 // thread number for all cores.
405 // In below two steps logic, first current thread will Release semaphore for each thread
406 // in current core. Maybe some threads are not valid in this core, but driver don't
407 // care. Second, driver will let current thread wait semaphore for all valid threads in
408 // current core. Because only the valid threads will do release semaphore for this
409 // thread, driver here only need to wait the valid thread count.
413 // First Notify ALL THREADs in current Core that this thread is ready.
415 for (ProcessorIndex
= 0; ProcessorIndex
< CpuStatus
->MaxThreadCount
; ProcessorIndex
++) {
416 S3ReleaseSemaphore (&SemaphorePtr
[FirstThread
+ ProcessorIndex
]);
420 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
422 for (ProcessorIndex
= 0; ProcessorIndex
< ThreadCountPerCore
[CurrentCore
]; ProcessorIndex
++) {
423 S3WaitForSemaphore (&SemaphorePtr
[CurrentThread
]);
429 SemaphorePtr
= CpuFlags
->PackageSemaphoreCount
;
430 ThreadCountPerPackage
= (UINT32
*)(UINTN
)CpuStatus
->ThreadCountPerPackage
;
432 // Get Offset info for the first thread in the package which current thread belongs to.
434 FirstThread
= ApLocation
->Package
* CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
;
436 // Get the possible threads count for current package.
438 CurrentThread
= FirstThread
+ CpuStatus
->MaxThreadCount
* ApLocation
->Core
+ ApLocation
->Thread
;
441 // Different packages may have different valid threads in them. If driver maintail clearly
442 // thread index in different packages, the logic will be much complicated.
443 // Here driver just simply records the max thread number in all packages and use it as expect
444 // thread number for all packages.
445 // In below two steps logic, first current thread will Release semaphore for each thread
446 // in current package. Maybe some threads are not valid in this package, but driver don't
447 // care. Second, driver will let current thread wait semaphore for all valid threads in
448 // current package. Because only the valid threads will do release semaphore for this
449 // thread, driver here only need to wait the valid thread count.
453 // First Notify ALL THREADS in current package that this thread is ready.
455 for (ProcessorIndex
= 0; ProcessorIndex
< CpuStatus
->MaxThreadCount
* CpuStatus
->MaxCoreCount
; ProcessorIndex
++) {
456 S3ReleaseSemaphore (&SemaphorePtr
[FirstThread
+ ProcessorIndex
]);
460 // Second, check whether VALID THREADS (not all threads) in current package are ready.
462 for (ProcessorIndex
= 0; ProcessorIndex
< ThreadCountPerPackage
[ApLocation
->Package
]; ProcessorIndex
++) {
463 S3WaitForSemaphore (&SemaphorePtr
[CurrentThread
]);
482 Set Processor register for one AP.
484 @param PreSmmRegisterTable Use pre Smm register table or register table.
489 IN BOOLEAN PreSmmRegisterTable
492 CPU_FEATURE_INIT_DATA
*FeatureInitData
;
493 CPU_REGISTER_TABLE
*RegisterTable
;
494 CPU_REGISTER_TABLE
*RegisterTables
;
499 FeatureInitData
= &mAcpiCpuData
.CpuFeatureInitData
;
501 if (PreSmmRegisterTable
) {
502 RegisterTables
= (CPU_REGISTER_TABLE
*)(UINTN
)FeatureInitData
->PreSmmInitRegisterTable
;
504 RegisterTables
= (CPU_REGISTER_TABLE
*)(UINTN
)FeatureInitData
->RegisterTable
;
507 if (RegisterTables
== NULL
) {
511 InitApicId
= GetInitialApicId ();
512 RegisterTable
= NULL
;
513 ProcIndex
= (UINTN
)-1;
514 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
515 if (RegisterTables
[Index
].InitialApicId
== InitApicId
) {
516 RegisterTable
= &RegisterTables
[Index
];
522 ASSERT (RegisterTable
!= NULL
);
524 if (FeatureInitData
->ApLocation
!= 0) {
525 ProgramProcessorRegister (
527 (EFI_CPU_PHYSICAL_LOCATION
*)(UINTN
)FeatureInitData
->ApLocation
+ ProcIndex
,
528 &FeatureInitData
->CpuStatus
,
532 ProgramProcessorRegister (
535 &FeatureInitData
->CpuStatus
,
542 AP initialization before then after SMBASE relocation in the S3 boot path.
552 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
557 // Count down the number with lock mechanism.
559 InterlockedDecrement (&mNumberToFinish
);
562 // Wait for BSP to signal SMM Base relocation done.
564 while (!mInitApsAfterSmmBaseReloc
) {
568 ProgramVirtualWireMode ();
569 DisableLvtInterrupts ();
574 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
576 TopOfStack
= (UINTN
)Stack
+ sizeof (Stack
);
577 TopOfStack
&= ~(UINTN
)(CPU_STACK_ALIGNMENT
- 1);
578 CopyMem ((VOID
*)(UINTN
)mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
579 TransferApToSafeState ((UINTN
)mApHltLoopCode
, TopOfStack
, (UINTN
)&mNumberToFinish
);
583 Prepares startup vector for APs.
585 This function prepares startup vector for APs.
587 @param WorkingBuffer The address of the work buffer.
590 PrepareApStartupVector (
591 EFI_PHYSICAL_ADDRESS WorkingBuffer
594 EFI_PHYSICAL_ADDRESS StartupVector
;
595 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
598 // Get the address map of startup code for AP,
599 // including code size, and offset of long jump instructions to redirect.
601 ZeroMem (&AddressMap
, sizeof (AddressMap
));
602 AsmGetAddressMap (&AddressMap
);
604 StartupVector
= WorkingBuffer
;
607 // Copy AP startup code to startup vector, and then redirect the long jump
608 // instructions for mode switching.
610 CopyMem ((VOID
*)(UINTN
)StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
611 *(UINT32
*)(UINTN
)(StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
)(StartupVector
+ AddressMap
.PModeEntryOffset
);
612 if (AddressMap
.LongJumpOffset
!= 0) {
613 *(UINT32
*)(UINTN
)(StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
)(StartupVector
+ AddressMap
.LModeEntryOffset
);
617 // Get the start address of exchange data between BSP and AP.
619 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*)(UINTN
)(StartupVector
+ AddressMap
.Size
);
620 ZeroMem ((VOID
*)mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
622 CopyMem ((VOID
*)(UINTN
)&mExchangeInfo
->GdtrProfile
, (VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
623 CopyMem ((VOID
*)(UINTN
)&mExchangeInfo
->IdtrProfile
, (VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
625 mExchangeInfo
->StackStart
= (VOID
*)(UINTN
)mAcpiCpuData
.StackAddress
;
626 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
627 mExchangeInfo
->BufferStart
= (UINT32
)StartupVector
;
628 mExchangeInfo
->Cr3
= (UINT32
)(AsmReadCr3 ());
629 mExchangeInfo
->InitializeFloatingPointUnitsAddress
= (UINTN
)InitializeFloatingPointUnits
;
633 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
635 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
636 and restores MTRRs for both BSP and APs.
640 InitializeCpuBeforeRebase (
644 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
648 ProgramVirtualWireMode ();
650 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
652 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
653 ASSERT (mNumberOfCpus
<= mAcpiCpuData
.NumberOfCpus
);
655 ASSERT (mNumberOfCpus
== mAcpiCpuData
.NumberOfCpus
);
658 mNumberToFinish
= (UINT32
)(mNumberOfCpus
- 1);
659 mExchangeInfo
->ApFunction
= (VOID
*)(UINTN
)InitializeAp
;
662 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
664 mInitApsAfterSmmBaseReloc
= FALSE
;
667 // Send INIT IPI - SIPI to all APs
669 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
671 while (mNumberToFinish
> 0) {
677 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
679 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
680 data saved by normal boot path for both BSP and APs.
684 InitializeCpuAfterRebase (
688 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
689 ASSERT (mNumberOfCpus
<= mAcpiCpuData
.NumberOfCpus
);
691 ASSERT (mNumberOfCpus
== mAcpiCpuData
.NumberOfCpus
);
694 mNumberToFinish
= (UINT32
)(mNumberOfCpus
- 1);
697 // Signal that SMM base relocation is complete and to continue initialization for all APs.
699 mInitApsAfterSmmBaseReloc
= TRUE
;
702 // Must begin set register after all APs have continue their initialization.
703 // This is a requirement to support semaphore mechanism in register table.
704 // Because if semaphore's dependence type is package type, semaphore will wait
705 // for all Aps in one package finishing their tasks before set next register
706 // for all APs. If the Aps not begin its task during BSP doing its task, the
707 // BSP thread will hang because it is waiting for other Aps in the same
708 // package finishing their task.
712 while (mNumberToFinish
> 0) {
718 Restore SMM Configuration in S3 boot path.
722 RestoreSmmConfigurationInS3 (
726 if (!mAcpiS3Enable
) {
731 // Restore SMM Configuration in S3 boot path.
733 if (mRestoreSmmConfigurationInS3
) {
735 // Need make sure gSmst is correct because below function may use them.
737 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
738 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
739 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
740 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
741 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
744 // Configure SMM Code Access Check feature if available.
746 ConfigSmmCodeAccessCheck ();
748 SmmCpuFeaturesCompleteSmmReadyToLock ();
750 mRestoreSmmConfigurationInS3
= FALSE
;
755 Perform SMM initialization for all processors in the S3 boot path.
757 For a native platform, MP initialization in the S3 boot path is also performed in this function.
765 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
766 IA32_DESCRIPTOR Ia32Idtr
;
767 IA32_DESCRIPTOR X64Idtr
;
768 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
771 DEBUG ((DEBUG_INFO
, "SmmRestoreCpu()\n"));
776 // See if there is enough context to resume PEI Phase
778 if (mSmmS3ResumeState
== NULL
) {
779 DEBUG ((DEBUG_ERROR
, "No context to return to PEI Phase\n"));
783 SmmS3ResumeState
= mSmmS3ResumeState
;
784 ASSERT (SmmS3ResumeState
!= NULL
);
787 // Setup 64bit IDT in 64bit SMM env when called from 32bit PEI.
788 // Note: 64bit PEI and 32bit DXE is not a supported combination.
790 if ((SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) && (FeaturePcdGet (PcdDxeIplSwitchToLongMode
) == TRUE
)) {
792 // Save the IA32 IDT Descriptor
794 AsmReadIdtr ((IA32_DESCRIPTOR
*)&Ia32Idtr
);
797 // Setup X64 IDT table
799 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
800 X64Idtr
.Base
= (UINTN
)IdtEntryTable
;
801 X64Idtr
.Limit
= (UINT16
)(sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
802 AsmWriteIdtr ((IA32_DESCRIPTOR
*)&X64Idtr
);
805 // Setup the default exception handler
807 Status
= InitializeCpuExceptionHandlers (NULL
);
808 ASSERT_EFI_ERROR (Status
);
811 // Initialize Debug Agent to support source level debug
813 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
817 // Skip initialization if mAcpiCpuData is not valid
819 if (mAcpiCpuData
.NumberOfCpus
> 0) {
821 // First time microcode load and restore MTRRs
823 InitializeCpuBeforeRebase ();
827 // Make sure the gSmmBaseHobGuid existence status is the same between normal and S3 boot.
829 ASSERT (mSmmRelocated
== (BOOLEAN
)(GetFirstGuidHob (&gSmmBaseHobGuid
) != NULL
));
830 if (mSmmRelocated
!= (BOOLEAN
)(GetFirstGuidHob (&gSmmBaseHobGuid
) != NULL
)) {
833 "gSmmBaseHobGuid %a produced in normal boot but %a in S3 boot!",
834 mSmmRelocated
? "is" : "is not",
835 mSmmRelocated
? "is not" : "is"
841 // Check whether Smm Relocation is done or not.
842 // If not, will do the SmmBases Relocation here!!!
844 if (!mSmmRelocated
) {
846 // Restore SMBASE for BSP and all APs
851 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
853 ExecuteFirstSmiInit ();
857 // Skip initialization if mAcpiCpuData is not valid
859 if (mAcpiCpuData
.NumberOfCpus
> 0) {
861 // Restore MSRs for BSP and all APs
863 InitializeCpuAfterRebase ();
867 // Set a flag to restore SMM configuration in S3 path.
869 mRestoreSmmConfigurationInS3
= TRUE
;
871 DEBUG ((DEBUG_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
872 DEBUG ((DEBUG_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
873 DEBUG ((DEBUG_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
874 DEBUG ((DEBUG_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
875 DEBUG ((DEBUG_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
878 // If SMM is in 32-bit mode or PcdDxeIplSwitchToLongMode is FALSE, then use SwitchStack() to resume PEI Phase.
879 // Note: 64bit PEI and 32bit DXE is not a supported combination.
881 if ((SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) || (FeaturePcdGet (PcdDxeIplSwitchToLongMode
) == FALSE
)) {
882 DEBUG ((DEBUG_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
885 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
886 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
887 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
888 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
893 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
895 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
896 DEBUG ((DEBUG_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
898 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
900 SaveAndSetDebugTimerInterrupt (FALSE
);
902 // Restore IA32 IDT table
904 AsmWriteIdtr ((IA32_DESCRIPTOR
*)&Ia32Idtr
);
906 SmmS3ResumeState
->ReturnCs
,
907 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
908 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
909 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
910 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
915 // Can not resume PEI Phase
917 DEBUG ((DEBUG_ERROR
, "No context to return to PEI Phase\n"));
922 Initialize SMM S3 resume state structure used during S3 Resume.
924 @param[in] Cr3 The base address of the page tables to use in SMM.
928 InitSmmS3ResumeState (
933 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
934 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
935 EFI_PHYSICAL_ADDRESS Address
;
938 if (!mAcpiS3Enable
) {
942 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
943 if (GuidHob
== NULL
) {
946 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
948 &gEfiAcpiVariableGuid
952 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*)GET_GUID_HOB_DATA (GuidHob
);
954 DEBUG ((DEBUG_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
955 DEBUG ((DEBUG_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
957 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
958 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
960 mSmmS3ResumeState
= SmmS3ResumeState
;
961 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
963 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
965 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
966 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
967 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
968 SmmS3ResumeState
->SmmS3StackSize
= 0;
971 SmmS3ResumeState
->SmmS3Cr0
= mSmmCr0
;
972 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
973 SmmS3ResumeState
->SmmS3Cr4
= mSmmCr4
;
975 if (sizeof (UINTN
) == sizeof (UINT64
)) {
976 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
979 if (sizeof (UINTN
) == sizeof (UINT32
)) {
980 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
984 // Patch SmmS3ResumeState->SmmS3Cr3
990 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
991 // protected mode on S3 path
993 Address
= BASE_4GB
- 1;
994 Status
= gBS
->AllocatePages (
997 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
1000 ASSERT_EFI_ERROR (Status
);
1001 mApHltLoopCode
= (UINT8
*)(UINTN
)Address
;
1005 Copy register table from non-SMRAM into SMRAM.
1007 @param[in] DestinationRegisterTableList Points to destination register table.
1008 @param[in] SourceRegisterTableList Points to source register table.
1009 @param[in] NumberOfCpus Number of CPUs.
1014 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
1015 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
1016 IN UINT32 NumberOfCpus
1020 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
1022 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
1023 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
1024 if (DestinationRegisterTableList
[Index
].TableLength
!= 0) {
1025 DestinationRegisterTableList
[Index
].AllocatedSize
= DestinationRegisterTableList
[Index
].TableLength
* sizeof (CPU_REGISTER_TABLE_ENTRY
);
1026 RegisterTableEntry
= AllocateCopyPool (
1027 DestinationRegisterTableList
[Index
].AllocatedSize
,
1028 (VOID
*)(UINTN
)SourceRegisterTableList
[Index
].RegisterTableEntry
1030 ASSERT (RegisterTableEntry
!= NULL
);
1031 DestinationRegisterTableList
[Index
].RegisterTableEntry
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)RegisterTableEntry
;
1037 Check whether the register table is empty or not.
1039 @param[in] RegisterTable Point to the register table.
1040 @param[in] NumberOfCpus Number of CPUs.
1042 @retval TRUE The register table is empty.
1043 @retval FALSE The register table is not empty.
1046 IsRegisterTableEmpty (
1047 IN CPU_REGISTER_TABLE
*RegisterTable
,
1048 IN UINT32 NumberOfCpus
1053 if (RegisterTable
!= NULL
) {
1054 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
1055 if (RegisterTable
[Index
].TableLength
!= 0) {
1065 Copy the data used to initialize processor register into SMRAM.
1067 @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.
1068 @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.
1072 CopyCpuFeatureInitDatatoSmram (
1073 IN OUT CPU_FEATURE_INIT_DATA
*CpuFeatureInitDataDst
,
1074 IN CPU_FEATURE_INIT_DATA
*CpuFeatureInitDataSrc
1077 CPU_STATUS_INFORMATION
*CpuStatus
;
1079 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE
*)(UINTN
)CpuFeatureInitDataSrc
->PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
)) {
1080 CpuFeatureInitDataDst
->PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
1081 ASSERT (CpuFeatureInitDataDst
->PreSmmInitRegisterTable
!= 0);
1084 (CPU_REGISTER_TABLE
*)(UINTN
)CpuFeatureInitDataDst
->PreSmmInitRegisterTable
,
1085 (CPU_REGISTER_TABLE
*)(UINTN
)CpuFeatureInitDataSrc
->PreSmmInitRegisterTable
,
1086 mAcpiCpuData
.NumberOfCpus
1090 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE
*)(UINTN
)CpuFeatureInitDataSrc
->RegisterTable
, mAcpiCpuData
.NumberOfCpus
)) {
1091 CpuFeatureInitDataDst
->RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
1092 ASSERT (CpuFeatureInitDataDst
->RegisterTable
!= 0);
1095 (CPU_REGISTER_TABLE
*)(UINTN
)CpuFeatureInitDataDst
->RegisterTable
,
1096 (CPU_REGISTER_TABLE
*)(UINTN
)CpuFeatureInitDataSrc
->RegisterTable
,
1097 mAcpiCpuData
.NumberOfCpus
1101 CpuStatus
= &CpuFeatureInitDataDst
->CpuStatus
;
1102 CopyMem (CpuStatus
, &CpuFeatureInitDataSrc
->CpuStatus
, sizeof (CPU_STATUS_INFORMATION
));
1104 if (CpuFeatureInitDataSrc
->CpuStatus
.ThreadCountPerPackage
!= 0) {
1105 CpuStatus
->ThreadCountPerPackage
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1106 sizeof (UINT32
) * CpuStatus
->PackageCount
,
1107 (UINT32
*)(UINTN
)CpuFeatureInitDataSrc
->CpuStatus
.ThreadCountPerPackage
1109 ASSERT (CpuStatus
->ThreadCountPerPackage
!= 0);
1112 if (CpuFeatureInitDataSrc
->CpuStatus
.ThreadCountPerCore
!= 0) {
1113 CpuStatus
->ThreadCountPerCore
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1114 sizeof (UINT8
) * (CpuStatus
->PackageCount
* CpuStatus
->MaxCoreCount
),
1115 (UINT32
*)(UINTN
)CpuFeatureInitDataSrc
->CpuStatus
.ThreadCountPerCore
1117 ASSERT (CpuStatus
->ThreadCountPerCore
!= 0);
1120 if (CpuFeatureInitDataSrc
->ApLocation
!= 0) {
1121 CpuFeatureInitDataDst
->ApLocation
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1122 mAcpiCpuData
.NumberOfCpus
* sizeof (EFI_CPU_PHYSICAL_LOCATION
),
1123 (EFI_CPU_PHYSICAL_LOCATION
*)(UINTN
)CpuFeatureInitDataSrc
->ApLocation
1125 ASSERT (CpuFeatureInitDataDst
->ApLocation
!= 0);
1138 ACPI_CPU_DATA
*AcpiCpuData
;
1139 IA32_DESCRIPTOR
*Gdtr
;
1140 IA32_DESCRIPTOR
*Idtr
;
1143 VOID
*MachineCheckHandlerForAp
;
1144 CPU_STATUS_INFORMATION
*CpuStatus
;
1146 if (!mAcpiS3Enable
) {
1151 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1153 mAcpiCpuData
.NumberOfCpus
= 0;
1156 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1158 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
1159 if (AcpiCpuData
== 0) {
1164 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1166 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
1168 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
1169 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
1171 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
1173 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
1174 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
1176 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
1178 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
1179 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
1181 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
1184 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1186 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
1187 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
1189 GdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
1190 ASSERT (GdtForAp
!= NULL
);
1191 IdtForAp
= (VOID
*)((UINTN
)GdtForAp
+ (Gdtr
->Limit
+ 1));
1192 MachineCheckHandlerForAp
= (VOID
*)((UINTN
)IdtForAp
+ (Idtr
->Limit
+ 1));
1194 CopyMem (GdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
1195 CopyMem (IdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
1196 CopyMem (MachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
1198 Gdtr
->Base
= (UINTN
)GdtForAp
;
1199 Idtr
->Base
= (UINTN
)IdtForAp
;
1200 mAcpiCpuData
.ApMachineCheckHandlerBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)MachineCheckHandlerForAp
;
1202 ZeroMem (&mAcpiCpuData
.CpuFeatureInitData
, sizeof (CPU_FEATURE_INIT_DATA
));
1204 if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume
)) {
1206 // If the CPU features will not be initialized by CpuFeaturesPei module during
1207 // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
1208 // which will be consumed in SmmRestoreCpu during next S3 resume.
1210 CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData
.CpuFeatureInitData
, &AcpiCpuData
->CpuFeatureInitData
);
1212 CpuStatus
= &mAcpiCpuData
.CpuFeatureInitData
.CpuStatus
;
1214 mCpuFlags
.CoreSemaphoreCount
= AllocateZeroPool (
1215 sizeof (UINT32
) * CpuStatus
->PackageCount
*
1216 CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
1218 ASSERT (mCpuFlags
.CoreSemaphoreCount
!= NULL
);
1220 mCpuFlags
.PackageSemaphoreCount
= AllocateZeroPool (
1221 sizeof (UINT32
) * CpuStatus
->PackageCount
*
1222 CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
1224 ASSERT (mCpuFlags
.PackageSemaphoreCount
!= NULL
);
1226 InitializeSpinLock ((SPIN_LOCK
*)&mCpuFlags
.MemoryMappedLock
);
1231 Get ACPI S3 enable flag.
1235 GetAcpiS3EnableFlag (
1239 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);