2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
23 IA32_DESCRIPTOR GdtrProfile
;
24 IA32_DESCRIPTOR IdtrProfile
;
27 UINTN InitializeFloatingPointUnitsAddress
;
28 } MP_CPU_EXCHANGE_INFO
;
32 UINT8
*RendezvousFunnelAddress
;
33 UINTN PModeEntryOffset
;
36 UINTN LModeEntryOffset
;
38 } MP_ASSEMBLY_ADDRESS_MAP
;
41 // Flags used when program the register.
44 volatile UINTN ConsoleLogLock
; // Spinlock used to control console.
45 volatile UINTN MemoryMappedLock
; // Spinlock used to program mmio
46 volatile UINT32
*SemaphoreCount
; // Semaphore used to program semaphore.
47 } PROGRAM_CPU_REGISTER_FLAGS
;
50 // Signal that SMM BASE relocation is complete.
52 volatile BOOLEAN mInitApsAfterSmmBaseReloc
;
55 Get starting address and size of the rendezvous entry for APs.
56 Information for fixing a jump instruction in the code is also returned.
58 @param AddressMap Output buffer for address map information.
63 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
66 #define LEGACY_REGION_SIZE (2 * 0x1000)
67 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
69 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags
;
70 ACPI_CPU_DATA mAcpiCpuData
;
71 volatile UINT32 mNumberToFinish
;
72 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
73 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
78 BOOLEAN mSmmS3Flag
= FALSE
;
81 // Pointer to structure used during S3 Resume
83 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
85 BOOLEAN mAcpiS3Enable
= TRUE
;
87 UINT8
*mApHltLoopCode
= NULL
;
88 UINT8 mApHltLoopCodeTemplate
[] = {
89 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
90 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
96 CHAR16
*mRegisterTypeStr
[] = {L
"MSR", L
"CR", L
"MMIO", L
"CACHE", L
"SEMAP", L
"INVALID" };
99 Sync up the MTRR values for all processors.
101 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
106 EFI_PHYSICAL_ADDRESS MtrrTable
112 Sync up the MTRR values for all processors.
121 MTRR_SETTINGS
*MtrrSettings
;
123 MtrrSettings
= (MTRR_SETTINGS
*) (UINTN
) MtrrTable
;
124 MtrrSetAllMtrrs (MtrrSettings
);
128 Increment semaphore by 1.
130 @param Sem IN: 32-bit unsigned integer
135 IN OUT
volatile UINT32
*Sem
138 InterlockedIncrement (Sem
);
142 Decrement the semaphore by 1 if it is not zero.
144 Performs an atomic decrement operation for semaphore.
145 The compare exchange operation must be performed using
148 @param Sem IN: 32-bit unsigned integer
153 IN OUT
volatile UINT32
*Sem
160 } while (Value
== 0 ||
161 InterlockedCompareExchange32 (
169 Initialize the CPU registers from a register table.
171 @param[in] RegisterTable The register table for this AP.
172 @param[in] ApLocation AP location info for this ap.
173 @param[in] CpuStatus CPU status info for this CPU.
174 @param[in] CpuFlags Flags data structure used when program the register.
176 @note This service could be called by BSP/APs.
179 ProgramProcessorRegister (
180 IN CPU_REGISTER_TABLE
*RegisterTable
,
181 IN EFI_CPU_PHYSICAL_LOCATION
*ApLocation
,
182 IN CPU_STATUS_INFORMATION
*CpuStatus
,
183 IN PROGRAM_CPU_REGISTER_FLAGS
*CpuFlags
186 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
189 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntryHead
;
190 volatile UINT32
*SemaphorePtr
;
192 UINT32 PackageThreadsCount
;
193 UINT32 CurrentThread
;
194 UINTN ProcessorIndex
;
196 UINTN ValidThreadCount
;
197 UINT32
*ValidCoreCountPerPackage
;
200 // Traverse Register Table of this logical processor
202 RegisterTableEntryHead
= (CPU_REGISTER_TABLE_ENTRY
*) (UINTN
) RegisterTable
->RegisterTableEntry
;
204 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++) {
206 RegisterTableEntry
= &RegisterTableEntryHead
[Index
];
209 if (ApLocation
!= NULL
) {
210 AcquireSpinLock (&CpuFlags
->ConsoleLogLock
);
211 ThreadIndex
= ApLocation
->Package
* CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
+
212 ApLocation
->Core
* CpuStatus
->MaxThreadCount
+
216 "Processor = %lu, Entry Index %lu, Type = %s!\n",
219 mRegisterTypeStr
[MIN ((REGISTER_TYPE
)RegisterTableEntry
->RegisterType
, InvalidReg
)]
221 ReleaseSpinLock (&CpuFlags
->ConsoleLogLock
);
226 // Check the type of specified register
228 switch (RegisterTableEntry
->RegisterType
) {
230 // The specified register is Control Register
232 case ControlRegister
:
233 switch (RegisterTableEntry
->Index
) {
235 Value
= AsmReadCr0 ();
236 Value
= (UINTN
) BitFieldWrite64 (
238 RegisterTableEntry
->ValidBitStart
,
239 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
240 (UINTN
) RegisterTableEntry
->Value
245 Value
= AsmReadCr2 ();
246 Value
= (UINTN
) BitFieldWrite64 (
248 RegisterTableEntry
->ValidBitStart
,
249 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
250 (UINTN
) RegisterTableEntry
->Value
255 Value
= AsmReadCr3 ();
256 Value
= (UINTN
) BitFieldWrite64 (
258 RegisterTableEntry
->ValidBitStart
,
259 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
260 (UINTN
) RegisterTableEntry
->Value
265 Value
= AsmReadCr4 ();
266 Value
= (UINTN
) BitFieldWrite64 (
268 RegisterTableEntry
->ValidBitStart
,
269 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
270 (UINTN
) RegisterTableEntry
->Value
279 // The specified register is Model Specific Register
283 // If this function is called to restore register setting after INIT signal,
284 // there is no need to restore MSRs in register table.
286 if (RegisterTableEntry
->ValidBitLength
>= 64) {
288 // If length is not less than 64 bits, then directly write without reading
291 RegisterTableEntry
->Index
,
292 RegisterTableEntry
->Value
296 // Set the bit section according to bit start and length
298 AsmMsrBitFieldWrite64 (
299 RegisterTableEntry
->Index
,
300 RegisterTableEntry
->ValidBitStart
,
301 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
302 RegisterTableEntry
->Value
307 // MemoryMapped operations
310 AcquireSpinLock (&CpuFlags
->MemoryMappedLock
);
311 MmioBitFieldWrite32 (
312 (UINTN
)(RegisterTableEntry
->Index
| LShiftU64 (RegisterTableEntry
->HighIndex
, 32)),
313 RegisterTableEntry
->ValidBitStart
,
314 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
315 (UINT32
)RegisterTableEntry
->Value
317 ReleaseSpinLock (&CpuFlags
->MemoryMappedLock
);
320 // Enable or disable cache
324 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
326 if (RegisterTableEntry
->Value
== 0) {
334 // Semaphore works logic like below:
336 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
337 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
339 // All threads (T0...Tn) waits in P() line and continues running
345 // V(0...n) V(0...n) ... V(0...n)
346 // n * P(0) n * P(1) ... n * P(n)
349 (ApLocation
!= NULL
) &&
350 (CpuStatus
->ValidCoreCountPerPackage
!= 0) &&
351 (CpuFlags
->SemaphoreCount
) != NULL
353 SemaphorePtr
= CpuFlags
->SemaphoreCount
;
354 switch (RegisterTableEntry
->Value
) {
357 // Get Offset info for the first thread in the core which current thread belongs to.
359 FirstThread
= (ApLocation
->Package
* CpuStatus
->MaxCoreCount
+ ApLocation
->Core
) * CpuStatus
->MaxThreadCount
;
360 CurrentThread
= FirstThread
+ ApLocation
->Thread
;
362 // First Notify all threads in current Core that this thread has ready.
364 for (ProcessorIndex
= 0; ProcessorIndex
< CpuStatus
->MaxThreadCount
; ProcessorIndex
++) {
365 S3ReleaseSemaphore (&SemaphorePtr
[FirstThread
+ ProcessorIndex
]);
368 // Second, check whether all valid threads in current core have ready.
370 for (ProcessorIndex
= 0; ProcessorIndex
< CpuStatus
->MaxThreadCount
; ProcessorIndex
++) {
371 S3WaitForSemaphore (&SemaphorePtr
[CurrentThread
]);
376 ValidCoreCountPerPackage
= (UINT32
*)(UINTN
)CpuStatus
->ValidCoreCountPerPackage
;
378 // Get Offset info for the first thread in the package which current thread belongs to.
380 FirstThread
= ApLocation
->Package
* CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
;
382 // Get the possible threads count for current package.
384 PackageThreadsCount
= CpuStatus
->MaxThreadCount
* CpuStatus
->MaxCoreCount
;
385 CurrentThread
= FirstThread
+ CpuStatus
->MaxThreadCount
* ApLocation
->Core
+ ApLocation
->Thread
;
387 // Get the valid thread count for current package.
389 ValidThreadCount
= CpuStatus
->MaxThreadCount
* ValidCoreCountPerPackage
[ApLocation
->Package
];
392 // Different packages may have different valid cores in them. If driver maintail clearly
393 // cores number in different packages, the logic will be much complicated.
394 // Here driver just simply records the max core number in all packages and use it as expect
395 // core number for all packages.
396 // In below two steps logic, first current thread will Release semaphore for each thread
397 // in current package. Maybe some threads are not valid in this package, but driver don't
398 // care. Second, driver will let current thread wait semaphore for all valid threads in
399 // current package. Because only the valid threads will do release semaphore for this
400 // thread, driver here only need to wait the valid thread count.
404 // First Notify all threads in current package that this thread has ready.
406 for (ProcessorIndex
= 0; ProcessorIndex
< PackageThreadsCount
; ProcessorIndex
++) {
407 S3ReleaseSemaphore (&SemaphorePtr
[FirstThread
+ ProcessorIndex
]);
410 // Second, check whether all valid threads in current package have ready.
412 for (ProcessorIndex
= 0; ProcessorIndex
< ValidThreadCount
; ProcessorIndex
++) {
413 S3WaitForSemaphore (&SemaphorePtr
[CurrentThread
]);
430 Set Processor register for one AP.
432 @param PreSmmRegisterTable Use pre Smm register table or register table.
437 IN BOOLEAN PreSmmRegisterTable
440 CPU_REGISTER_TABLE
*RegisterTable
;
441 CPU_REGISTER_TABLE
*RegisterTables
;
446 if (PreSmmRegisterTable
) {
447 RegisterTables
= (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
;
449 RegisterTables
= (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
;
452 InitApicId
= GetInitialApicId ();
453 RegisterTable
= NULL
;
454 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
455 if (RegisterTables
[Index
].InitialApicId
== InitApicId
) {
456 RegisterTable
= &RegisterTables
[Index
];
461 ASSERT (RegisterTable
!= NULL
);
463 if (mAcpiCpuData
.ApLocation
!= 0) {
464 ProgramProcessorRegister (
466 (EFI_CPU_PHYSICAL_LOCATION
*)(UINTN
)mAcpiCpuData
.ApLocation
+ ProcIndex
,
467 &mAcpiCpuData
.CpuStatus
,
471 ProgramProcessorRegister (
474 &mAcpiCpuData
.CpuStatus
,
481 AP initialization before then after SMBASE relocation in the S3 boot path.
491 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
496 // Count down the number with lock mechanism.
498 InterlockedDecrement (&mNumberToFinish
);
501 // Wait for BSP to signal SMM Base relocation done.
503 while (!mInitApsAfterSmmBaseReloc
) {
507 ProgramVirtualWireMode ();
508 DisableLvtInterrupts ();
513 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
515 TopOfStack
= (UINTN
) Stack
+ sizeof (Stack
);
516 TopOfStack
&= ~(UINTN
) (CPU_STACK_ALIGNMENT
- 1);
517 CopyMem ((VOID
*) (UINTN
) mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
518 TransferApToSafeState ((UINTN
)mApHltLoopCode
, TopOfStack
, (UINTN
)&mNumberToFinish
);
522 Prepares startup vector for APs.
524 This function prepares startup vector for APs.
526 @param WorkingBuffer The address of the work buffer.
529 PrepareApStartupVector (
530 EFI_PHYSICAL_ADDRESS WorkingBuffer
533 EFI_PHYSICAL_ADDRESS StartupVector
;
534 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
537 // Get the address map of startup code for AP,
538 // including code size, and offset of long jump instructions to redirect.
540 ZeroMem (&AddressMap
, sizeof (AddressMap
));
541 AsmGetAddressMap (&AddressMap
);
543 StartupVector
= WorkingBuffer
;
546 // Copy AP startup code to startup vector, and then redirect the long jump
547 // instructions for mode switching.
549 CopyMem ((VOID
*) (UINTN
) StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
550 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
) (StartupVector
+ AddressMap
.PModeEntryOffset
);
551 if (AddressMap
.LongJumpOffset
!= 0) {
552 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
) (StartupVector
+ AddressMap
.LModeEntryOffset
);
556 // Get the start address of exchange data between BSP and AP.
558 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*) (UINTN
) (StartupVector
+ AddressMap
.Size
);
559 ZeroMem ((VOID
*) mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
561 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->GdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
562 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->IdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
564 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
565 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
566 mExchangeInfo
->BufferStart
= (UINT32
) StartupVector
;
567 mExchangeInfo
->Cr3
= (UINT32
) (AsmReadCr3 ());
568 mExchangeInfo
->InitializeFloatingPointUnitsAddress
= (UINTN
)InitializeFloatingPointUnits
;
572 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
574 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
575 and restores MTRRs for both BSP and APs.
579 InitializeCpuBeforeRebase (
583 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
587 ProgramVirtualWireMode ();
589 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
591 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
592 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) InitializeAp
;
595 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
597 mInitApsAfterSmmBaseReloc
= FALSE
;
600 // Send INIT IPI - SIPI to all APs
602 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
604 while (mNumberToFinish
> 0) {
610 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
612 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
613 data saved by normal boot path for both BSP and APs.
617 InitializeCpuAfterRebase (
621 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
624 // Signal that SMM base relocation is complete and to continue initialization for all APs.
626 mInitApsAfterSmmBaseReloc
= TRUE
;
629 // Must begin set register after all APs have continue their initialization.
630 // This is a requirement to support semaphore mechanism in register table.
631 // Because if semaphore's dependence type is package type, semaphore will wait
632 // for all Aps in one package finishing their tasks before set next register
633 // for all APs. If the Aps not begin its task during BSP doing its task, the
634 // BSP thread will hang because it is waiting for other Aps in the same
635 // package finishing their task.
639 while (mNumberToFinish
> 0) {
645 Restore SMM Configuration in S3 boot path.
649 RestoreSmmConfigurationInS3 (
653 if (!mAcpiS3Enable
) {
658 // Restore SMM Configuration in S3 boot path.
660 if (mRestoreSmmConfigurationInS3
) {
662 // Need make sure gSmst is correct because below function may use them.
664 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
665 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
666 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
667 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
668 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
671 // Configure SMM Code Access Check feature if available.
673 ConfigSmmCodeAccessCheck ();
675 SmmCpuFeaturesCompleteSmmReadyToLock ();
677 mRestoreSmmConfigurationInS3
= FALSE
;
682 Perform SMM initialization for all processors in the S3 boot path.
684 For a native platform, MP initialization in the S3 boot path is also performed in this function.
692 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
693 IA32_DESCRIPTOR Ia32Idtr
;
694 IA32_DESCRIPTOR X64Idtr
;
695 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
698 DEBUG ((EFI_D_INFO
, "SmmRestoreCpu()\n"));
703 // See if there is enough context to resume PEI Phase
705 if (mSmmS3ResumeState
== NULL
) {
706 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
710 SmmS3ResumeState
= mSmmS3ResumeState
;
711 ASSERT (SmmS3ResumeState
!= NULL
);
713 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
715 // Save the IA32 IDT Descriptor
717 AsmReadIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
720 // Setup X64 IDT table
722 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
723 X64Idtr
.Base
= (UINTN
) IdtEntryTable
;
724 X64Idtr
.Limit
= (UINT16
) (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
725 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &X64Idtr
);
728 // Setup the default exception handler
730 Status
= InitializeCpuExceptionHandlers (NULL
);
731 ASSERT_EFI_ERROR (Status
);
734 // Initialize Debug Agent to support source level debug
736 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
740 // Skip initialization if mAcpiCpuData is not valid
742 if (mAcpiCpuData
.NumberOfCpus
> 0) {
744 // First time microcode load and restore MTRRs
746 InitializeCpuBeforeRebase ();
750 // Restore SMBASE for BSP and all APs
755 // Skip initialization if mAcpiCpuData is not valid
757 if (mAcpiCpuData
.NumberOfCpus
> 0) {
759 // Restore MSRs for BSP and all APs
761 InitializeCpuAfterRebase ();
765 // Set a flag to restore SMM configuration in S3 path.
767 mRestoreSmmConfigurationInS3
= TRUE
;
769 DEBUG (( EFI_D_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
770 DEBUG (( EFI_D_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
771 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
772 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
773 DEBUG (( EFI_D_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
776 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
778 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) {
779 DEBUG ((EFI_D_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
782 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
783 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
784 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
785 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
790 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
792 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
793 DEBUG ((EFI_D_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
795 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
797 SaveAndSetDebugTimerInterrupt (FALSE
);
799 // Restore IA32 IDT table
801 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
803 SmmS3ResumeState
->ReturnCs
,
804 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
805 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
806 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
807 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
812 // Can not resume PEI Phase
814 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
819 Initialize SMM S3 resume state structure used during S3 Resume.
821 @param[in] Cr3 The base address of the page tables to use in SMM.
825 InitSmmS3ResumeState (
830 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
831 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
832 EFI_PHYSICAL_ADDRESS Address
;
835 if (!mAcpiS3Enable
) {
839 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
840 if (GuidHob
== NULL
) {
843 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
845 &gEfiAcpiVariableGuid
849 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*) GET_GUID_HOB_DATA (GuidHob
);
851 DEBUG ((EFI_D_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
852 DEBUG ((EFI_D_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
854 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
855 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
857 mSmmS3ResumeState
= SmmS3ResumeState
;
858 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
860 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
862 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
863 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
864 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
865 SmmS3ResumeState
->SmmS3StackSize
= 0;
868 SmmS3ResumeState
->SmmS3Cr0
= mSmmCr0
;
869 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
870 SmmS3ResumeState
->SmmS3Cr4
= mSmmCr4
;
872 if (sizeof (UINTN
) == sizeof (UINT64
)) {
873 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
875 if (sizeof (UINTN
) == sizeof (UINT32
)) {
876 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
880 // Patch SmmS3ResumeState->SmmS3Cr3
886 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
887 // protected mode on S3 path
889 Address
= BASE_4GB
- 1;
890 Status
= gBS
->AllocatePages (
893 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
896 ASSERT_EFI_ERROR (Status
);
897 mApHltLoopCode
= (UINT8
*) (UINTN
) Address
;
901 Copy register table from ACPI NVS memory into SMRAM.
903 @param[in] DestinationRegisterTableList Points to destination register table.
904 @param[in] SourceRegisterTableList Points to source register table.
905 @param[in] NumberOfCpus Number of CPUs.
910 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
911 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
912 IN UINT32 NumberOfCpus
916 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
918 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
919 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
920 if (DestinationRegisterTableList
[Index
].AllocatedSize
!= 0) {
921 RegisterTableEntry
= AllocateCopyPool (
922 DestinationRegisterTableList
[Index
].AllocatedSize
,
923 (VOID
*)(UINTN
)SourceRegisterTableList
[Index
].RegisterTableEntry
925 ASSERT (RegisterTableEntry
!= NULL
);
926 DestinationRegisterTableList
[Index
].RegisterTableEntry
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)RegisterTableEntry
;
940 ACPI_CPU_DATA
*AcpiCpuData
;
941 IA32_DESCRIPTOR
*Gdtr
;
942 IA32_DESCRIPTOR
*Idtr
;
945 VOID
*MachineCheckHandlerForAp
;
946 CPU_STATUS_INFORMATION
*CpuStatus
;
948 if (!mAcpiS3Enable
) {
953 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
955 mAcpiCpuData
.NumberOfCpus
= 0;
958 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
960 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
961 if (AcpiCpuData
== 0) {
966 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
968 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
970 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
971 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
973 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
975 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
976 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
978 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
980 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
981 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
983 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
985 mAcpiCpuData
.PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
986 ASSERT (mAcpiCpuData
.PreSmmInitRegisterTable
!= 0);
989 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
,
990 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
,
991 mAcpiCpuData
.NumberOfCpus
994 mAcpiCpuData
.RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
995 ASSERT (mAcpiCpuData
.RegisterTable
!= 0);
998 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
,
999 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
,
1000 mAcpiCpuData
.NumberOfCpus
1004 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1006 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
1007 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
1009 GdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
1010 ASSERT (GdtForAp
!= NULL
);
1011 IdtForAp
= (VOID
*) ((UINTN
)GdtForAp
+ (Gdtr
->Limit
+ 1));
1012 MachineCheckHandlerForAp
= (VOID
*) ((UINTN
)IdtForAp
+ (Idtr
->Limit
+ 1));
1014 CopyMem (GdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
1015 CopyMem (IdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
1016 CopyMem (MachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
1018 Gdtr
->Base
= (UINTN
)GdtForAp
;
1019 Idtr
->Base
= (UINTN
)IdtForAp
;
1020 mAcpiCpuData
.ApMachineCheckHandlerBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)MachineCheckHandlerForAp
;
1022 CpuStatus
= &mAcpiCpuData
.CpuStatus
;
1023 CopyMem (CpuStatus
, &AcpiCpuData
->CpuStatus
, sizeof (CPU_STATUS_INFORMATION
));
1024 if (AcpiCpuData
->CpuStatus
.ValidCoreCountPerPackage
!= 0) {
1025 CpuStatus
->ValidCoreCountPerPackage
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1026 sizeof (UINT32
) * CpuStatus
->PackageCount
,
1027 (UINT32
*)(UINTN
)AcpiCpuData
->CpuStatus
.ValidCoreCountPerPackage
1029 ASSERT (CpuStatus
->ValidCoreCountPerPackage
!= 0);
1031 if (AcpiCpuData
->ApLocation
!= 0) {
1032 mAcpiCpuData
.ApLocation
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocateCopyPool (
1033 mAcpiCpuData
.NumberOfCpus
* sizeof (EFI_CPU_PHYSICAL_LOCATION
),
1034 (EFI_CPU_PHYSICAL_LOCATION
*)(UINTN
)AcpiCpuData
->ApLocation
1036 ASSERT (mAcpiCpuData
.ApLocation
!= 0);
1038 if (CpuStatus
->PackageCount
!= 0) {
1039 mCpuFlags
.SemaphoreCount
= AllocateZeroPool (
1040 sizeof (UINT32
) * CpuStatus
->PackageCount
*
1041 CpuStatus
->MaxCoreCount
* CpuStatus
->MaxThreadCount
);
1042 ASSERT (mCpuFlags
.SemaphoreCount
!= NULL
);
1044 InitializeSpinLock((SPIN_LOCK
*) &mCpuFlags
.MemoryMappedLock
);
1045 InitializeSpinLock((SPIN_LOCK
*) &mCpuFlags
.ConsoleLogLock
);
1049 Get ACPI S3 enable flag.
1053 GetAcpiS3EnableFlag (
1057 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);