2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
9 #include "PiSmmCpuDxeSmm.h"
17 IA32_DESCRIPTOR GdtrProfile
;
18 IA32_DESCRIPTOR IdtrProfile
;
21 UINTN InitializeFloatingPointUnitsAddress
;
22 } MP_CPU_EXCHANGE_INFO
;
26 UINT8
*RendezvousFunnelAddress
;
27 UINTN PModeEntryOffset
;
30 UINTN LModeEntryOffset
;
32 } MP_ASSEMBLY_ADDRESS_MAP
;
35 // Flags used when program the register.
38 volatile UINTN MemoryMappedLock
; // Spinlock used to program mmio
39 volatile UINT32
*CoreSemaphoreCount
; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32
*PackageSemaphoreCount
; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS
;
46 // Signal that SMM BASE relocation is complete.
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc
;
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
54 @param AddressMap Output buffer for address map information.
59 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags
;
66 ACPI_CPU_DATA mAcpiCpuData
;
67 volatile UINT32 mNumberToFinish
;
68 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
69 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
74 BOOLEAN mSmmS3Flag
= FALSE
;
77 // Pointer to structure used during S3 Resume
79 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
81 BOOLEAN mAcpiS3Enable
= TRUE
;
83 UINT8
*mApHltLoopCode
= NULL
;
84 UINT8 mApHltLoopCodeTemplate
[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
93 Sync up the MTRR values for all processors.
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
100 EFI_PHYSICAL_ADDRESS MtrrTable
106 Sync up the MTRR values for all processors.
115 MTRR_SETTINGS
*MtrrSettings
;
117 MtrrSettings
= (MTRR_SETTINGS
*) (UINTN
) MtrrTable
;
118 MtrrSetAllMtrrs (MtrrSettings
);
122 Increment semaphore by 1.
124 @param Sem IN: 32-bit unsigned integer
129 IN OUT
volatile UINT32
*Sem
132 InterlockedIncrement (Sem
);
136 Decrement the semaphore by 1 if it is not zero.
138 Performs an atomic decrement operation for semaphore.
139 The compare exchange operation must be performed using
142 @param Sem IN: 32-bit unsigned integer
147 IN OUT
volatile UINT32
*Sem
154 } while (Value
== 0 ||
155 InterlockedCompareExchange32 (
163 Read / write CR value.
165 @param[in] CrIndex The CR index which need to read/write.
166 @param[in] Read Read or write. TRUE is read.
167 @param[in,out] CrValue CR value.
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
175 IN OUT UINTN
*CrValue
181 *CrValue
= AsmReadCr0 ();
183 AsmWriteCr0 (*CrValue
);
188 *CrValue
= AsmReadCr2 ();
190 AsmWriteCr2 (*CrValue
);
195 *CrValue
= AsmReadCr3 ();
197 AsmWriteCr3 (*CrValue
);
202 *CrValue
= AsmReadCr4 ();
204 AsmWriteCr4 (*CrValue
);
208 return EFI_UNSUPPORTED
;;
215 Initialize the CPU registers from a register table.
217 @param[in] RegisterTable The register table for this AP.
218 @param[in] ApLocation AP location info for this ap.
219 @param[in] CpuStatus CPU status info for this CPU.
220 @param[in] CpuFlags Flags data structure used when program the register.
222 @note This service could be called by BSP/APs.
225 ProgramProcessorRegister (
226 IN CPU_REGISTER_TABLE
*RegisterTable
,
227 IN EFI_CPU_PHYSICAL_LOCATION
*ApLocation
,
228 IN CPU_STATUS_INFORMATION
*CpuStatus
,
229 IN PROGRAM_CPU_REGISTER_FLAGS
*CpuFlags
232 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
235 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntryHead
;
236 volatile UINT32
*SemaphorePtr
;
238 UINT32 CurrentThread
;
240 UINTN ProcessorIndex
;
241 UINT32
*ThreadCountPerPackage
;
242 UINT8
*ThreadCountPerCore
;
247 // Traverse Register Table of this logical processor
249 RegisterTableEntryHead
= (CPU_REGISTER_TABLE_ENTRY
*) (UINTN
) RegisterTable
->RegisterTableEntry
;
251 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++) {
253 RegisterTableEntry
= &RegisterTableEntryHead
[Index
];
256 // Check the type of specified register
258 switch (RegisterTableEntry
->RegisterType
) {
260 // The specified register is Control Register
262 case ControlRegister
:
263 Status
= ReadWriteCr (RegisterTableEntry
->Index
, TRUE
, &Value
);
264 if (EFI_ERROR (Status
)) {
267 if (RegisterTableEntry
->TestThenWrite
) {
268 CurrentValue
= BitFieldRead64 (
270 RegisterTableEntry
->ValidBitStart
,
271 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1
273 if (CurrentValue
== RegisterTableEntry
->Value
) {
277 Value
= (UINTN
) BitFieldWrite64 (
279 RegisterTableEntry
->ValidBitStart
,
280 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
281 RegisterTableEntry
->Value
283 ReadWriteCr (RegisterTableEntry
->Index
, FALSE
, &Value
);
286 // The specified register is Model Specific Register
289 if (RegisterTableEntry
->TestThenWrite
) {
290 Value
= (UINTN
)AsmReadMsr64 (RegisterTableEntry
->Index
);
291 if (RegisterTableEntry
->ValidBitLength
>= 64) {
292 if (Value
== RegisterTableEntry
->Value
) {
296 CurrentValue
= BitFieldRead64 (
298 RegisterTableEntry
->ValidBitStart
,
299 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1
301 if (CurrentValue
== RegisterTableEntry
->Value
) {
308 // If this function is called to restore register setting after INIT signal,
309 // there is no need to restore MSRs in register table.
311 if (RegisterTableEntry
->ValidBitLength
>= 64) {
313 // If length is not less than 64 bits, then directly write without reading
316 RegisterTableEntry
->Index
,
317 RegisterTableEntry
->Value
321 // Set the bit section according to bit start and length
323 AsmMsrBitFieldWrite64 (
324 RegisterTableEntry
->Index
,
325 RegisterTableEntry
->ValidBitStart
,
326 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
327 RegisterTableEntry
->Value
332 // MemoryMapped operations
335 AcquireSpinLock (&CpuFlags
->MemoryMappedLock
);
336 MmioBitFieldWrite32 (
337 (UINTN
)(RegisterTableEntry
->Index
| LShiftU64 (RegisterTableEntry
->HighIndex
, 32)),
338 RegisterTableEntry
->ValidBitStart
,
339 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
340 (UINT32
)RegisterTableEntry
->Value
342 ReleaseSpinLock (&CpuFlags
->MemoryMappedLock
);
345 // Enable or disable cache
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
351 if (RegisterTableEntry
->Value
== 0) {
359 // Semaphore works logic like below:
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
364 // All threads (T0...Tn) waits in P() line and continues running
370 // V(0...n) V(0...n) ... V(0...n)
371 // n * P(0) n * P(1) ... n * P(n)
374 (ApLocation
!= NULL
) &&
375 (CpuStatus
->ThreadCountPerPackage
!= 0) &&
376 (CpuStatus
->ThreadCountPerCore
!= 0) &&
377 (CpuFlags
->CoreSemaphoreCount
!= NULL
) &&
378 (CpuFlags
->PackageSemaphoreCount
!= NULL
)
380 switch (RegisterTableEntry
->Value
) {
382 SemaphorePtr
= CpuFlags
->CoreSemaphoreCount
;
383 ThreadCountPerCore
= (UINT8
*)(UINTN
)CpuStatus
->ThreadCountPerCore
;
385 CurrentCore
= ApLocation
->Package
* CpuStatus
->MaxCoreCount
+ ApLocation
->Core
;
387 // Get Offset info for the first thread in the core which current thread belongs to.
389 FirstThread
= CurrentCore
* CpuStatus
->MaxThreadCount
;
390 CurrentThread
= FirstThread
+ ApLocation
->Thread
;
393 // Different cores may have different valid threads in them. If driver maintail clearly
394 // thread index in different cores, the logic will be much complicated.
395 // Here driver just simply records the max thread number in all cores and use it as expect
396 // thread number for all cores.
397 // In below two steps logic, first current thread will Release semaphore for each thread
398 // in current core. Maybe some threads are not valid in this core, but driver don't
399 // care. Second, driver will let current thread wait semaphore for all valid threads in
400 // current core. Because only the valid threads will do release semaphore for this
401 // thread, driver here only need to wait the valid thread count.
405 // First Notify ALL THREADs in current Core that this thread is ready.
407 for (ProcessorIndex
= 0; ProcessorIndex
< CpuStatus
->MaxThreadCount
; ProcessorIndex
++) {
408 S3ReleaseSemaphore (&SemaphorePtr
[FirstThread
+ ProcessorIndex
]);
411 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
413 for (ProcessorIndex
= 0; ProcessorIndex
< ThreadCountPerCore
[CurrentCore
]; ProcessorIndex
++) {
414 S3WaitForSemaphore (&SemaphorePtr
[CurrentThread
]);
419 SemaphorePtr
= CpuFlags
->PackageSemaphoreCount
;
420 ThreadCountPerPackage
= (UINT32
*)(UINTN
)CpuStatus
->ThreadCountPerPackage
;
422 // Get Offset info for the first thread in the package which current thread belongs to.
424 FirstThread
= ApLocation
->Package
* CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
;
426 // Get the possible threads count for current package.
428 CurrentThread
= FirstThread
+ CpuStatus
->MaxThreadCount
* ApLocation
->Core
+ ApLocation
->Thread
;
431 // Different packages may have different valid threads in them. If driver maintail clearly
432 // thread index in different packages, the logic will be much complicated.
433 // Here driver just simply records the max thread number in all packages and use it as expect
434 // thread number for all packages.
435 // In below two steps logic, first current thread will Release semaphore for each thread
436 // in current package. Maybe some threads are not valid in this package, but driver don't
437 // care. Second, driver will let current thread wait semaphore for all valid threads in
438 // current package. Because only the valid threads will do release semaphore for this
439 // thread, driver here only need to wait the valid thread count.
443 // First Notify ALL THREADS in current package that this thread is ready.
445 for (ProcessorIndex
= 0; ProcessorIndex
< CpuStatus
->MaxThreadCount
* CpuStatus
->MaxCoreCount
; ProcessorIndex
++) {
446 S3ReleaseSemaphore (&SemaphorePtr
[FirstThread
+ ProcessorIndex
]);
449 // Second, check whether VALID THREADS (not all threads) in current package are ready.
451 for (ProcessorIndex
= 0; ProcessorIndex
< ThreadCountPerPackage
[ApLocation
->Package
]; ProcessorIndex
++) {
452 S3WaitForSemaphore (&SemaphorePtr
[CurrentThread
]);
469 Set Processor register for one AP.
471 @param PreSmmRegisterTable Use pre Smm register table or register table.
476 IN BOOLEAN PreSmmRegisterTable
479 CPU_REGISTER_TABLE
*RegisterTable
;
480 CPU_REGISTER_TABLE
*RegisterTables
;
485 if (PreSmmRegisterTable
) {
486 RegisterTables
= (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
;
488 RegisterTables
= (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
;
490 if (RegisterTables
== NULL
) {
494 InitApicId
= GetInitialApicId ();
495 RegisterTable
= NULL
;
496 ProcIndex
= (UINTN
)-1;
497 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
498 if (RegisterTables
[Index
].InitialApicId
== InitApicId
) {
499 RegisterTable
= &RegisterTables
[Index
];
504 ASSERT (RegisterTable
!= NULL
);
506 if (mAcpiCpuData
.ApLocation
!= 0) {
507 ProgramProcessorRegister (
509 (EFI_CPU_PHYSICAL_LOCATION
*)(UINTN
)mAcpiCpuData
.ApLocation
+ ProcIndex
,
510 &mAcpiCpuData
.CpuStatus
,
514 ProgramProcessorRegister (
517 &mAcpiCpuData
.CpuStatus
,
524 AP initialization before then after SMBASE relocation in the S3 boot path.
534 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
539 // Count down the number with lock mechanism.
541 InterlockedDecrement (&mNumberToFinish
);
544 // Wait for BSP to signal SMM Base relocation done.
546 while (!mInitApsAfterSmmBaseReloc
) {
550 ProgramVirtualWireMode ();
551 DisableLvtInterrupts ();
556 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
558 TopOfStack
= (UINTN
) Stack
+ sizeof (Stack
);
559 TopOfStack
&= ~(UINTN
) (CPU_STACK_ALIGNMENT
- 1);
560 CopyMem ((VOID
*) (UINTN
) mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
561 TransferApToSafeState ((UINTN
)mApHltLoopCode
, TopOfStack
, (UINTN
)&mNumberToFinish
);
565 Prepares startup vector for APs.
567 This function prepares startup vector for APs.
569 @param WorkingBuffer The address of the work buffer.
572 PrepareApStartupVector (
573 EFI_PHYSICAL_ADDRESS WorkingBuffer
576 EFI_PHYSICAL_ADDRESS StartupVector
;
577 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
580 // Get the address map of startup code for AP,
581 // including code size, and offset of long jump instructions to redirect.
583 ZeroMem (&AddressMap
, sizeof (AddressMap
));
584 AsmGetAddressMap (&AddressMap
);
586 StartupVector
= WorkingBuffer
;
589 // Copy AP startup code to startup vector, and then redirect the long jump
590 // instructions for mode switching.
592 CopyMem ((VOID
*) (UINTN
) StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
593 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
) (StartupVector
+ AddressMap
.PModeEntryOffset
);
594 if (AddressMap
.LongJumpOffset
!= 0) {
595 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
) (StartupVector
+ AddressMap
.LModeEntryOffset
);
599 // Get the start address of exchange data between BSP and AP.
601 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*) (UINTN
) (StartupVector
+ AddressMap
.Size
);
602 ZeroMem ((VOID
*) mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
604 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->GdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
605 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->IdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
607 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
608 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
609 mExchangeInfo
->BufferStart
= (UINT32
) StartupVector
;
610 mExchangeInfo
->Cr3
= (UINT32
) (AsmReadCr3 ());
611 mExchangeInfo
->InitializeFloatingPointUnitsAddress
= (UINTN
)InitializeFloatingPointUnits
;
615 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
617 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
618 and restores MTRRs for both BSP and APs.
622 InitializeCpuBeforeRebase (
626 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
630 ProgramVirtualWireMode ();
632 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
634 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
635 ASSERT (mNumberOfCpus
<= mAcpiCpuData
.NumberOfCpus
);
637 ASSERT (mNumberOfCpus
== mAcpiCpuData
.NumberOfCpus
);
639 mNumberToFinish
= (UINT32
)(mNumberOfCpus
- 1);
640 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) InitializeAp
;
643 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
645 mInitApsAfterSmmBaseReloc
= FALSE
;
648 // Send INIT IPI - SIPI to all APs
650 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
652 while (mNumberToFinish
> 0) {
658 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
660 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
661 data saved by normal boot path for both BSP and APs.
665 InitializeCpuAfterRebase (
669 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
670 ASSERT (mNumberOfCpus
<= mAcpiCpuData
.NumberOfCpus
);
672 ASSERT (mNumberOfCpus
== mAcpiCpuData
.NumberOfCpus
);
674 mNumberToFinish
= (UINT32
)(mNumberOfCpus
- 1);
677 // Signal that SMM base relocation is complete and to continue initialization for all APs.
679 mInitApsAfterSmmBaseReloc
= TRUE
;
682 // Must begin set register after all APs have continue their initialization.
683 // This is a requirement to support semaphore mechanism in register table.
684 // Because if semaphore's dependence type is package type, semaphore will wait
685 // for all Aps in one package finishing their tasks before set next register
686 // for all APs. If the Aps not begin its task during BSP doing its task, the
687 // BSP thread will hang because it is waiting for other Aps in the same
688 // package finishing their task.
692 while (mNumberToFinish
> 0) {
698 Restore SMM Configuration in S3 boot path.
702 RestoreSmmConfigurationInS3 (
706 if (!mAcpiS3Enable
) {
711 // Restore SMM Configuration in S3 boot path.
713 if (mRestoreSmmConfigurationInS3
) {
715 // Need make sure gSmst is correct because below function may use them.
717 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
718 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
719 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
720 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
721 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
724 // Configure SMM Code Access Check feature if available.
726 ConfigSmmCodeAccessCheck ();
728 SmmCpuFeaturesCompleteSmmReadyToLock ();
730 mRestoreSmmConfigurationInS3
= FALSE
;
735 Perform SMM initialization for all processors in the S3 boot path.
737 For a native platform, MP initialization in the S3 boot path is also performed in this function.
745 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
746 IA32_DESCRIPTOR Ia32Idtr
;
747 IA32_DESCRIPTOR X64Idtr
;
748 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
751 DEBUG ((EFI_D_INFO
, "SmmRestoreCpu()\n"));
756 // See if there is enough context to resume PEI Phase
758 if (mSmmS3ResumeState
== NULL
) {
759 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
763 SmmS3ResumeState
= mSmmS3ResumeState
;
764 ASSERT (SmmS3ResumeState
!= NULL
);
766 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
768 // Save the IA32 IDT Descriptor
770 AsmReadIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
773 // Setup X64 IDT table
775 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
776 X64Idtr
.Base
= (UINTN
) IdtEntryTable
;
777 X64Idtr
.Limit
= (UINT16
) (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
778 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &X64Idtr
);
781 // Setup the default exception handler
783 Status
= InitializeCpuExceptionHandlers (NULL
);
784 ASSERT_EFI_ERROR (Status
);
787 // Initialize Debug Agent to support source level debug
789 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
793 // Skip initialization if mAcpiCpuData is not valid
795 if (mAcpiCpuData
.NumberOfCpus
> 0) {
797 // First time microcode load and restore MTRRs
799 InitializeCpuBeforeRebase ();
803 // Restore SMBASE for BSP and all APs
808 // Skip initialization if mAcpiCpuData is not valid
810 if (mAcpiCpuData
.NumberOfCpus
> 0) {
812 // Restore MSRs for BSP and all APs
814 InitializeCpuAfterRebase ();
818 // Set a flag to restore SMM configuration in S3 path.
820 mRestoreSmmConfigurationInS3
= TRUE
;
822 DEBUG (( EFI_D_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
823 DEBUG (( EFI_D_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
824 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
825 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
826 DEBUG (( EFI_D_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
829 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
831 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) {
832 DEBUG ((EFI_D_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
835 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
836 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
837 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
838 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
843 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
845 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
846 DEBUG ((EFI_D_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
848 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
850 SaveAndSetDebugTimerInterrupt (FALSE
);
852 // Restore IA32 IDT table
854 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
856 SmmS3ResumeState
->ReturnCs
,
857 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
858 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
859 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
860 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
865 // Can not resume PEI Phase
867 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
872 Initialize SMM S3 resume state structure used during S3 Resume.
874 @param[in] Cr3 The base address of the page tables to use in SMM.
878 InitSmmS3ResumeState (
883 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
884 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
885 EFI_PHYSICAL_ADDRESS Address
;
888 if (!mAcpiS3Enable
) {
892 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
893 if (GuidHob
== NULL
) {
896 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
898 &gEfiAcpiVariableGuid
902 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*) GET_GUID_HOB_DATA (GuidHob
);
904 DEBUG ((EFI_D_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
905 DEBUG ((EFI_D_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
907 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
908 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
910 mSmmS3ResumeState
= SmmS3ResumeState
;
911 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
913 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
915 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
916 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
917 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
918 SmmS3ResumeState
->SmmS3StackSize
= 0;
921 SmmS3ResumeState
->SmmS3Cr0
= mSmmCr0
;
922 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
923 SmmS3ResumeState
->SmmS3Cr4
= mSmmCr4
;
925 if (sizeof (UINTN
) == sizeof (UINT64
)) {
926 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
928 if (sizeof (UINTN
) == sizeof (UINT32
)) {
929 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
933 // Patch SmmS3ResumeState->SmmS3Cr3
939 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
940 // protected mode on S3 path
942 Address
= BASE_4GB
- 1;
943 Status
= gBS
->AllocatePages (
946 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
949 ASSERT_EFI_ERROR (Status
);
950 mApHltLoopCode
= (UINT8
*) (UINTN
) Address
;
954 Copy register table from non-SMRAM into SMRAM.
956 @param[in] DestinationRegisterTableList Points to destination register table.
957 @param[in] SourceRegisterTableList Points to source register table.
958 @param[in] NumberOfCpus Number of CPUs.
963 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
964 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
965 IN UINT32 NumberOfCpus
969 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
971 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
972 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
973 if (DestinationRegisterTableList
[Index
].TableLength
!= 0) {
974 DestinationRegisterTableList
[Index
].AllocatedSize
= DestinationRegisterTableList
[Index
].TableLength
* sizeof (CPU_REGISTER_TABLE_ENTRY
);
975 RegisterTableEntry
= AllocateCopyPool (
976 DestinationRegisterTableList
[Index
].AllocatedSize
,
977 (VOID
*)(UINTN
)SourceRegisterTableList
[Index
].RegisterTableEntry
979 ASSERT (RegisterTableEntry
!= NULL
);
980 DestinationRegisterTableList
[Index
].RegisterTableEntry
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)RegisterTableEntry
;
986 Check whether the register table is empty or not.
988 @param[in] RegisterTable Point to the register table.
989 @param[in] NumberOfCpus Number of CPUs.
991 @retval TRUE The register table is empty.
992 @retval FALSE The register table is not empty.
995 IsRegisterTableEmpty (
996 IN CPU_REGISTER_TABLE
*RegisterTable
,
997 IN UINT32 NumberOfCpus
1002 if (RegisterTable
!= NULL
) {
1003 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
1004 if (RegisterTable
[Index
].TableLength
!= 0) {
1022 ACPI_CPU_DATA
*AcpiCpuData
;
1023 IA32_DESCRIPTOR
*Gdtr
;
1024 IA32_DESCRIPTOR
*Idtr
;
1027 VOID
*MachineCheckHandlerForAp
;
1028 CPU_STATUS_INFORMATION
*CpuStatus
;
1030 if (!mAcpiS3Enable
) {
1035 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1037 mAcpiCpuData
.NumberOfCpus
= 0;
1040 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1042 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
1043 if (AcpiCpuData
== 0) {
1048 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1050 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
1052 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
1053 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
1055 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
1057 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
1058 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
1060 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
1062 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
1063 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
1065 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
1067 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
)) {
1068 mAcpiCpuData
.PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
1069 ASSERT (mAcpiCpuData
.PreSmmInitRegisterTable
!= 0);
1072 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
,
1073 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
,
1074 mAcpiCpuData
.NumberOfCpus
1077 mAcpiCpuData
.PreSmmInitRegisterTable
= 0;
1080 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
, mAcpiCpuData
.NumberOfCpus
)) {
1081 mAcpiCpuData
.RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
1082 ASSERT (mAcpiCpuData
.RegisterTable
!= 0);
1085 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
,
1086 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
,
1087 mAcpiCpuData
.NumberOfCpus
1090 mAcpiCpuData
.RegisterTable
= 0;
1094 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1096 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
1097 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
1099 GdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
1100 ASSERT (GdtForAp
!= NULL
);
1101 IdtForAp
= (VOID
*) ((UINTN
)GdtForAp
+ (Gdtr
->Limit
+ 1));
1102 MachineCheckHandlerForAp
= (VOID
*) ((UINTN
)IdtForAp
+ (Idtr
->Limit
+ 1));
1104 CopyMem (GdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
1105 CopyMem (IdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
1106 CopyMem (MachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
1108 Gdtr
->Base
= (UINTN
)GdtForAp
;
1109 Idtr
->Base
= (UINTN
)IdtForAp
;
1110 mAcpiCpuData
.ApMachineCheckHandlerBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)MachineCheckHandlerForAp
;
1112 CpuStatus
= &mAcpiCpuData
.CpuStatus
;
1113 CopyMem (CpuStatus
, &AcpiCpuData
->CpuStatus
, sizeof (CPU_STATUS_INFORMATION
));
1114 if (AcpiCpuData
->CpuStatus
.ThreadCountPerPackage
!= 0) {
1115 CpuStatus
->ThreadCountPerPackage
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1116 sizeof (UINT32
) * CpuStatus
->PackageCount
,
1117 (UINT32
*)(UINTN
)AcpiCpuData
->CpuStatus
.ThreadCountPerPackage
1119 ASSERT (CpuStatus
->ThreadCountPerPackage
!= 0);
1121 if (AcpiCpuData
->CpuStatus
.ThreadCountPerCore
!= 0) {
1122 CpuStatus
->ThreadCountPerCore
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1123 sizeof (UINT8
) * (CpuStatus
->PackageCount
* CpuStatus
->MaxCoreCount
),
1124 (UINT32
*)(UINTN
)AcpiCpuData
->CpuStatus
.ThreadCountPerCore
1126 ASSERT (CpuStatus
->ThreadCountPerCore
!= 0);
1128 if (AcpiCpuData
->ApLocation
!= 0) {
1129 mAcpiCpuData
.ApLocation
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1130 mAcpiCpuData
.NumberOfCpus
* sizeof (EFI_CPU_PHYSICAL_LOCATION
),
1131 (EFI_CPU_PHYSICAL_LOCATION
*)(UINTN
)AcpiCpuData
->ApLocation
1133 ASSERT (mAcpiCpuData
.ApLocation
!= 0);
1135 if (CpuStatus
->PackageCount
!= 0) {
1136 mCpuFlags
.CoreSemaphoreCount
= AllocateZeroPool (
1137 sizeof (UINT32
) * CpuStatus
->PackageCount
*
1138 CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
1140 ASSERT (mCpuFlags
.CoreSemaphoreCount
!= NULL
);
1141 mCpuFlags
.PackageSemaphoreCount
= AllocateZeroPool (
1142 sizeof (UINT32
) * CpuStatus
->PackageCount
*
1143 CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
1145 ASSERT (mCpuFlags
.PackageSemaphoreCount
!= NULL
);
1147 InitializeSpinLock((SPIN_LOCK
*) &mCpuFlags
.MemoryMappedLock
);
1151 Get ACPI S3 enable flag.
1155 GetAcpiS3EnableFlag (
1159 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);