2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
23 IA32_DESCRIPTOR GdtrProfile
;
24 IA32_DESCRIPTOR IdtrProfile
;
27 UINTN InitializeFloatingPointUnitsAddress
;
28 } MP_CPU_EXCHANGE_INFO
;
32 UINT8
*RendezvousFunnelAddress
;
33 UINTN PModeEntryOffset
;
36 UINTN LModeEntryOffset
;
38 } MP_ASSEMBLY_ADDRESS_MAP
;
41 // Spin lock used to serialize MemoryMapped operation
43 SPIN_LOCK
*mMemoryMappedLock
= NULL
;
46 // Signal that SMM BASE relocation is complete.
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc
;
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
54 @param AddressMap Output buffer for address map information.
59 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
65 ACPI_CPU_DATA mAcpiCpuData
;
66 volatile UINT32 mNumberToFinish
;
67 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
68 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
69 MP_MSR_LOCK
*mMsrSpinLocks
= NULL
;
70 UINTN mMsrSpinLockCount
;
76 BOOLEAN mSmmS3Flag
= FALSE
;
79 // Pointer to structure used during S3 Resume
81 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
83 BOOLEAN mAcpiS3Enable
= TRUE
;
85 UINT8
*mApHltLoopCode
= NULL
;
86 UINT8 mApHltLoopCodeTemplate
[] = {
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
95 Get MSR spin lock by MSR index.
97 @param MsrIndex MSR index value.
99 @return Pointer to MSR spin lock.
103 GetMsrSpinLockByIndex (
108 for (Index
= 0; Index
< mMsrCount
; Index
++) {
109 if (MsrIndex
== mMsrSpinLocks
[Index
].MsrIndex
) {
110 return mMsrSpinLocks
[Index
].SpinLock
;
117 Initialize MSR spin lock by MSR index.
119 @param MsrIndex MSR index value.
123 InitMsrSpinLockByIndex (
127 UINTN MsrSpinLockCount
;
128 UINTN NewMsrSpinLockCount
;
132 if (mMsrSpinLocks
== NULL
) {
133 MsrSpinLockCount
= mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
;
134 mMsrSpinLocks
= (MP_MSR_LOCK
*) AllocatePool (sizeof (MP_MSR_LOCK
) * MsrSpinLockCount
);
135 ASSERT (mMsrSpinLocks
!= NULL
);
136 for (Index
= 0; Index
< MsrSpinLockCount
; Index
++) {
137 mMsrSpinLocks
[Index
].SpinLock
=
138 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+ Index
* mSemaphoreSize
);
139 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
141 mMsrSpinLockCount
= MsrSpinLockCount
;
142 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
= 0;
144 if (GetMsrSpinLockByIndex (MsrIndex
) == NULL
) {
146 // Initialize spin lock for MSR programming
148 mMsrSpinLocks
[mMsrCount
].MsrIndex
= MsrIndex
;
149 InitializeSpinLock (mMsrSpinLocks
[mMsrCount
].SpinLock
);
151 if (mMsrCount
== mMsrSpinLockCount
) {
153 // If MSR spin lock buffer is full, enlarge it
155 AddedSize
= SIZE_4KB
;
156 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
=
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize
));
158 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.Msr
!= NULL
);
159 NewMsrSpinLockCount
= mMsrSpinLockCount
+ AddedSize
/ mSemaphoreSize
;
160 mMsrSpinLocks
= ReallocatePool (
161 sizeof (MP_MSR_LOCK
) * mMsrSpinLockCount
,
162 sizeof (MP_MSR_LOCK
) * NewMsrSpinLockCount
,
165 ASSERT (mMsrSpinLocks
!= NULL
);
166 mMsrSpinLockCount
= NewMsrSpinLockCount
;
167 for (Index
= mMsrCount
; Index
< mMsrSpinLockCount
; Index
++) {
168 mMsrSpinLocks
[Index
].SpinLock
=
169 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+
170 (Index
- mMsrCount
) * mSemaphoreSize
);
171 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
178 Sync up the MTRR values for all processors.
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
185 EFI_PHYSICAL_ADDRESS MtrrTable
191 Sync up the MTRR values for all processors.
200 MTRR_SETTINGS
*MtrrSettings
;
202 MtrrSettings
= (MTRR_SETTINGS
*) (UINTN
) MtrrTable
;
203 MtrrSetAllMtrrs (MtrrSettings
);
207 Programs registers for the calling processor.
209 This function programs registers for the calling processor.
211 @param RegisterTables Pointer to register table of the running processor.
212 @param RegisterTableCount Register table count.
216 SetProcessorRegister (
217 IN CPU_REGISTER_TABLE
*RegisterTables
,
218 IN UINTN RegisterTableCount
221 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
224 SPIN_LOCK
*MsrSpinLock
;
226 CPU_REGISTER_TABLE
*RegisterTable
;
228 InitApicId
= GetInitialApicId ();
229 RegisterTable
= NULL
;
230 for (Index
= 0; Index
< RegisterTableCount
; Index
++) {
231 if (RegisterTables
[Index
].InitialApicId
== InitApicId
) {
232 RegisterTable
= &RegisterTables
[Index
];
236 ASSERT (RegisterTable
!= NULL
);
239 // Traverse Register Table of this logical processor
241 RegisterTableEntry
= (CPU_REGISTER_TABLE_ENTRY
*) (UINTN
) RegisterTable
->RegisterTableEntry
;
242 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++, RegisterTableEntry
++) {
244 // Check the type of specified register
246 switch (RegisterTableEntry
->RegisterType
) {
248 // The specified register is Control Register
250 case ControlRegister
:
251 switch (RegisterTableEntry
->Index
) {
253 Value
= AsmReadCr0 ();
254 Value
= (UINTN
) BitFieldWrite64 (
256 RegisterTableEntry
->ValidBitStart
,
257 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
258 (UINTN
) RegisterTableEntry
->Value
263 Value
= AsmReadCr2 ();
264 Value
= (UINTN
) BitFieldWrite64 (
266 RegisterTableEntry
->ValidBitStart
,
267 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
268 (UINTN
) RegisterTableEntry
->Value
273 Value
= AsmReadCr3 ();
274 Value
= (UINTN
) BitFieldWrite64 (
276 RegisterTableEntry
->ValidBitStart
,
277 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
278 (UINTN
) RegisterTableEntry
->Value
283 Value
= AsmReadCr4 ();
284 Value
= (UINTN
) BitFieldWrite64 (
286 RegisterTableEntry
->ValidBitStart
,
287 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
288 (UINTN
) RegisterTableEntry
->Value
297 // The specified register is Model Specific Register
301 // If this function is called to restore register setting after INIT signal,
302 // there is no need to restore MSRs in register table.
304 if (RegisterTableEntry
->ValidBitLength
>= 64) {
306 // If length is not less than 64 bits, then directly write without reading
309 RegisterTableEntry
->Index
,
310 RegisterTableEntry
->Value
314 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
315 // to make sure MSR read/write operation is atomic.
317 MsrSpinLock
= GetMsrSpinLockByIndex (RegisterTableEntry
->Index
);
318 AcquireSpinLock (MsrSpinLock
);
320 // Set the bit section according to bit start and length
322 AsmMsrBitFieldWrite64 (
323 RegisterTableEntry
->Index
,
324 RegisterTableEntry
->ValidBitStart
,
325 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
326 RegisterTableEntry
->Value
328 ReleaseSpinLock (MsrSpinLock
);
332 // MemoryMapped operations
335 AcquireSpinLock (mMemoryMappedLock
);
336 MmioBitFieldWrite32 (
337 (UINTN
)(RegisterTableEntry
->Index
| LShiftU64 (RegisterTableEntry
->HighIndex
, 32)),
338 RegisterTableEntry
->ValidBitStart
,
339 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
340 (UINT32
)RegisterTableEntry
->Value
342 ReleaseSpinLock (mMemoryMappedLock
);
345 // Enable or disable cache
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
351 if (RegisterTableEntry
->Value
== 0) {
365 AP initialization before then after SMBASE relocation in the S3 boot path.
375 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
377 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
);
380 // Count down the number with lock mechanism.
382 InterlockedDecrement (&mNumberToFinish
);
385 // Wait for BSP to signal SMM Base relocation done.
387 while (!mInitApsAfterSmmBaseReloc
) {
391 ProgramVirtualWireMode ();
392 DisableLvtInterrupts ();
394 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
, mAcpiCpuData
.NumberOfCpus
);
397 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
399 TopOfStack
= (UINTN
) Stack
+ sizeof (Stack
);
400 TopOfStack
&= ~(UINTN
) (CPU_STACK_ALIGNMENT
- 1);
401 CopyMem ((VOID
*) (UINTN
) mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
402 TransferApToSafeState ((UINTN
)mApHltLoopCode
, TopOfStack
, (UINTN
)&mNumberToFinish
);
406 Prepares startup vector for APs.
408 This function prepares startup vector for APs.
410 @param WorkingBuffer The address of the work buffer.
413 PrepareApStartupVector (
414 EFI_PHYSICAL_ADDRESS WorkingBuffer
417 EFI_PHYSICAL_ADDRESS StartupVector
;
418 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
421 // Get the address map of startup code for AP,
422 // including code size, and offset of long jump instructions to redirect.
424 ZeroMem (&AddressMap
, sizeof (AddressMap
));
425 AsmGetAddressMap (&AddressMap
);
427 StartupVector
= WorkingBuffer
;
430 // Copy AP startup code to startup vector, and then redirect the long jump
431 // instructions for mode switching.
433 CopyMem ((VOID
*) (UINTN
) StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
434 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
) (StartupVector
+ AddressMap
.PModeEntryOffset
);
435 if (AddressMap
.LongJumpOffset
!= 0) {
436 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
) (StartupVector
+ AddressMap
.LModeEntryOffset
);
440 // Get the start address of exchange data between BSP and AP.
442 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*) (UINTN
) (StartupVector
+ AddressMap
.Size
);
443 ZeroMem ((VOID
*) mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
445 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->GdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
446 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->IdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
448 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
449 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
450 mExchangeInfo
->BufferStart
= (UINT32
) StartupVector
;
451 mExchangeInfo
->Cr3
= (UINT32
) (AsmReadCr3 ());
452 mExchangeInfo
->InitializeFloatingPointUnitsAddress
= (UINTN
)InitializeFloatingPointUnits
;
456 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
458 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
459 and restores MTRRs for both BSP and APs.
463 InitializeCpuBeforeRebase (
467 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
469 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
);
471 ProgramVirtualWireMode ();
473 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
475 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
476 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) InitializeAp
;
479 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
481 mInitApsAfterSmmBaseReloc
= FALSE
;
484 // Send INIT IPI - SIPI to all APs
486 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
488 while (mNumberToFinish
> 0) {
494 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
496 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
497 data saved by normal boot path for both BSP and APs.
501 InitializeCpuAfterRebase (
505 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
, mAcpiCpuData
.NumberOfCpus
);
507 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
510 // Signal that SMM base relocation is complete and to continue initialization.
512 mInitApsAfterSmmBaseReloc
= TRUE
;
514 while (mNumberToFinish
> 0) {
520 Restore SMM Configuration in S3 boot path.
524 RestoreSmmConfigurationInS3 (
528 if (!mAcpiS3Enable
) {
533 // Restore SMM Configuration in S3 boot path.
535 if (mRestoreSmmConfigurationInS3
) {
537 // Need make sure gSmst is correct because below function may use them.
539 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
540 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
541 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
542 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
543 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
546 // Configure SMM Code Access Check feature if available.
548 ConfigSmmCodeAccessCheck ();
550 SmmCpuFeaturesCompleteSmmReadyToLock ();
552 mRestoreSmmConfigurationInS3
= FALSE
;
557 Perform SMM initialization for all processors in the S3 boot path.
559 For a native platform, MP initialization in the S3 boot path is also performed in this function.
567 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
568 IA32_DESCRIPTOR Ia32Idtr
;
569 IA32_DESCRIPTOR X64Idtr
;
570 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
573 DEBUG ((EFI_D_INFO
, "SmmRestoreCpu()\n"));
577 InitializeSpinLock (mMemoryMappedLock
);
580 // See if there is enough context to resume PEI Phase
582 if (mSmmS3ResumeState
== NULL
) {
583 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
587 SmmS3ResumeState
= mSmmS3ResumeState
;
588 ASSERT (SmmS3ResumeState
!= NULL
);
590 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
592 // Save the IA32 IDT Descriptor
594 AsmReadIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
597 // Setup X64 IDT table
599 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
600 X64Idtr
.Base
= (UINTN
) IdtEntryTable
;
601 X64Idtr
.Limit
= (UINT16
) (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
602 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &X64Idtr
);
605 // Setup the default exception handler
607 Status
= InitializeCpuExceptionHandlers (NULL
);
608 ASSERT_EFI_ERROR (Status
);
611 // Initialize Debug Agent to support source level debug
613 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
617 // Skip initialization if mAcpiCpuData is not valid
619 if (mAcpiCpuData
.NumberOfCpus
> 0) {
621 // First time microcode load and restore MTRRs
623 InitializeCpuBeforeRebase ();
627 // Restore SMBASE for BSP and all APs
632 // Skip initialization if mAcpiCpuData is not valid
634 if (mAcpiCpuData
.NumberOfCpus
> 0) {
636 // Restore MSRs for BSP and all APs
638 InitializeCpuAfterRebase ();
642 // Set a flag to restore SMM configuration in S3 path.
644 mRestoreSmmConfigurationInS3
= TRUE
;
646 DEBUG (( EFI_D_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
647 DEBUG (( EFI_D_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
648 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
649 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
650 DEBUG (( EFI_D_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
653 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
655 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) {
656 DEBUG ((EFI_D_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
659 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
660 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
661 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
662 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
667 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
669 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
670 DEBUG ((EFI_D_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
672 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
674 SaveAndSetDebugTimerInterrupt (FALSE
);
676 // Restore IA32 IDT table
678 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
680 SmmS3ResumeState
->ReturnCs
,
681 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
682 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
683 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
684 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
689 // Can not resume PEI Phase
691 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
696 Initialize SMM S3 resume state structure used during S3 Resume.
698 @param[in] Cr3 The base address of the page tables to use in SMM.
702 InitSmmS3ResumeState (
707 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
708 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
709 EFI_PHYSICAL_ADDRESS Address
;
712 if (!mAcpiS3Enable
) {
716 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
717 if (GuidHob
== NULL
) {
720 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
722 &gEfiAcpiVariableGuid
726 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*) GET_GUID_HOB_DATA (GuidHob
);
728 DEBUG ((EFI_D_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
729 DEBUG ((EFI_D_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
731 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
732 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
734 mSmmS3ResumeState
= SmmS3ResumeState
;
735 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
737 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
739 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
740 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
741 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
742 SmmS3ResumeState
->SmmS3StackSize
= 0;
745 SmmS3ResumeState
->SmmS3Cr0
= mSmmCr0
;
746 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
747 SmmS3ResumeState
->SmmS3Cr4
= mSmmCr4
;
749 if (sizeof (UINTN
) == sizeof (UINT64
)) {
750 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
752 if (sizeof (UINTN
) == sizeof (UINT32
)) {
753 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
758 // Patch SmmS3ResumeState->SmmS3Cr3
763 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
764 // protected mode on S3 path
766 Address
= BASE_4GB
- 1;
767 Status
= gBS
->AllocatePages (
770 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
773 ASSERT_EFI_ERROR (Status
);
774 mApHltLoopCode
= (UINT8
*) (UINTN
) Address
;
778 Copy register table from ACPI NVS memory into SMRAM.
780 @param[in] DestinationRegisterTableList Points to destination register table.
781 @param[in] SourceRegisterTableList Points to source register table.
782 @param[in] NumberOfCpus Number of CPUs.
787 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
788 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
789 IN UINT32 NumberOfCpus
794 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
796 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
797 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
798 if (DestinationRegisterTableList
[Index
].AllocatedSize
!= 0) {
799 RegisterTableEntry
= AllocateCopyPool (
800 DestinationRegisterTableList
[Index
].AllocatedSize
,
801 (VOID
*)(UINTN
)SourceRegisterTableList
[Index
].RegisterTableEntry
803 ASSERT (RegisterTableEntry
!= NULL
);
804 DestinationRegisterTableList
[Index
].RegisterTableEntry
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)RegisterTableEntry
;
806 // Go though all MSRs in register table to initialize MSR spin lock
808 for (Index1
= 0; Index1
< DestinationRegisterTableList
[Index
].TableLength
; Index1
++, RegisterTableEntry
++) {
809 if ((RegisterTableEntry
->RegisterType
== Msr
) && (RegisterTableEntry
->ValidBitLength
< 64)) {
811 // Initialize MSR spin lock only for those MSRs need bit field writing
813 InitMsrSpinLockByIndex (RegisterTableEntry
->Index
);
829 ACPI_CPU_DATA
*AcpiCpuData
;
830 IA32_DESCRIPTOR
*Gdtr
;
831 IA32_DESCRIPTOR
*Idtr
;
834 VOID
*MachineCheckHandlerForAp
;
836 if (!mAcpiS3Enable
) {
841 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
843 mAcpiCpuData
.NumberOfCpus
= 0;
846 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
848 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
849 if (AcpiCpuData
== 0) {
854 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
856 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
858 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
859 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
861 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
863 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
864 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
866 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
868 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
869 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
871 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
873 mAcpiCpuData
.PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
874 ASSERT (mAcpiCpuData
.PreSmmInitRegisterTable
!= 0);
877 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
,
878 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
,
879 mAcpiCpuData
.NumberOfCpus
882 mAcpiCpuData
.RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
883 ASSERT (mAcpiCpuData
.RegisterTable
!= 0);
886 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
,
887 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
,
888 mAcpiCpuData
.NumberOfCpus
892 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
894 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
895 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
897 GdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
898 ASSERT (GdtForAp
!= NULL
);
899 IdtForAp
= (VOID
*) ((UINTN
)GdtForAp
+ (Gdtr
->Limit
+ 1));
900 MachineCheckHandlerForAp
= (VOID
*) ((UINTN
)IdtForAp
+ (Idtr
->Limit
+ 1));
902 CopyMem (GdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
903 CopyMem (IdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
904 CopyMem (MachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
906 Gdtr
->Base
= (UINTN
)GdtForAp
;
907 Idtr
->Base
= (UINTN
)IdtForAp
;
908 mAcpiCpuData
.ApMachineCheckHandlerBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)MachineCheckHandlerForAp
;
912 Get ACPI S3 enable flag.
916 GetAcpiS3EnableFlag (
920 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);