2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
22 IA32_DESCRIPTOR GdtrProfile
;
23 IA32_DESCRIPTOR IdtrProfile
;
26 } MP_CPU_EXCHANGE_INFO
;
29 UINT8
*RendezvousFunnelAddress
;
30 UINTN PModeEntryOffset
;
33 UINTN LModeEntryOffset
;
35 } MP_ASSEMBLY_ADDRESS_MAP
;
38 // Spin lock used to serialize MemoryMapped operation
40 SPIN_LOCK
*mMemoryMappedLock
= NULL
;
43 Get starting address and size of the rendezvous entry for APs.
44 Information for fixing a jump instruction in the code is also returned.
46 @param AddressMap Output buffer for address map information.
51 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
54 #define LEGACY_REGION_SIZE (2 * 0x1000)
55 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
57 ACPI_CPU_DATA mAcpiCpuData
;
58 volatile UINT32 mNumberToFinish
;
59 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
60 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
61 VOID
*mGdtForAp
= NULL
;
62 VOID
*mIdtForAp
= NULL
;
63 VOID
*mMachineCheckHandlerForAp
= NULL
;
64 MP_MSR_LOCK
*mMsrSpinLocks
= NULL
;
65 UINTN mMsrSpinLockCount
;
71 BOOLEAN mSmmS3Flag
= FALSE
;
74 // Pointer to structure used during S3 Resume
76 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
78 BOOLEAN mAcpiS3Enable
= TRUE
;
80 UINT8
*mApHltLoopCode
= NULL
;
81 UINT8 mApHltLoopCodeTemplate
[] = {
82 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
83 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
90 Get MSR spin lock by MSR index.
92 @param MsrIndex MSR index value.
94 @return Pointer to MSR spin lock.
98 GetMsrSpinLockByIndex (
103 for (Index
= 0; Index
< mMsrCount
; Index
++) {
104 if (MsrIndex
== mMsrSpinLocks
[Index
].MsrIndex
) {
105 return mMsrSpinLocks
[Index
].SpinLock
;
112 Initialize MSR spin lock by MSR index.
114 @param MsrIndex MSR index value.
118 InitMsrSpinLockByIndex (
122 UINTN MsrSpinLockCount
;
123 UINTN NewMsrSpinLockCount
;
127 if (mMsrSpinLocks
== NULL
) {
128 MsrSpinLockCount
= mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
;
129 mMsrSpinLocks
= (MP_MSR_LOCK
*) AllocatePool (sizeof (MP_MSR_LOCK
) * MsrSpinLockCount
);
130 ASSERT (mMsrSpinLocks
!= NULL
);
131 for (Index
= 0; Index
< MsrSpinLockCount
; Index
++) {
132 mMsrSpinLocks
[Index
].SpinLock
=
133 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+ Index
* mSemaphoreSize
);
134 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
136 mMsrSpinLockCount
= MsrSpinLockCount
;
137 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
= 0;
139 if (GetMsrSpinLockByIndex (MsrIndex
) == NULL
) {
141 // Initialize spin lock for MSR programming
143 mMsrSpinLocks
[mMsrCount
].MsrIndex
= MsrIndex
;
144 InitializeSpinLock (mMsrSpinLocks
[mMsrCount
].SpinLock
);
146 if (mMsrCount
== mMsrSpinLockCount
) {
148 // If MSR spin lock buffer is full, enlarge it
150 AddedSize
= SIZE_4KB
;
151 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
=
152 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize
));
153 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.Msr
!= NULL
);
154 NewMsrSpinLockCount
= mMsrSpinLockCount
+ AddedSize
/ mSemaphoreSize
;
155 mMsrSpinLocks
= ReallocatePool (
156 sizeof (MP_MSR_LOCK
) * mMsrSpinLockCount
,
157 sizeof (MP_MSR_LOCK
) * NewMsrSpinLockCount
,
160 ASSERT (mMsrSpinLocks
!= NULL
);
161 mMsrSpinLockCount
= NewMsrSpinLockCount
;
162 for (Index
= mMsrCount
; Index
< mMsrSpinLockCount
; Index
++) {
163 mMsrSpinLocks
[Index
].SpinLock
=
164 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+
165 (Index
- mMsrCount
) * mSemaphoreSize
);
166 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
173 Sync up the MTRR values for all processors.
175 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
180 EFI_PHYSICAL_ADDRESS MtrrTable
186 Sync up the MTRR values for all processors.
195 MTRR_SETTINGS
*MtrrSettings
;
197 MtrrSettings
= (MTRR_SETTINGS
*) (UINTN
) MtrrTable
;
198 MtrrSetAllMtrrs (MtrrSettings
);
202 Programs registers for the calling processor.
204 This function programs registers for the calling processor.
206 @param RegisterTable Pointer to register table of the running processor.
210 SetProcessorRegister (
211 IN CPU_REGISTER_TABLE
*RegisterTable
214 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
217 SPIN_LOCK
*MsrSpinLock
;
220 // Traverse Register Table of this logical processor
222 RegisterTableEntry
= (CPU_REGISTER_TABLE_ENTRY
*) (UINTN
) RegisterTable
->RegisterTableEntry
;
223 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++, RegisterTableEntry
++) {
225 // Check the type of specified register
227 switch (RegisterTableEntry
->RegisterType
) {
229 // The specified register is Control Register
231 case ControlRegister
:
232 switch (RegisterTableEntry
->Index
) {
234 Value
= AsmReadCr0 ();
235 Value
= (UINTN
) BitFieldWrite64 (
237 RegisterTableEntry
->ValidBitStart
,
238 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
239 (UINTN
) RegisterTableEntry
->Value
244 Value
= AsmReadCr2 ();
245 Value
= (UINTN
) BitFieldWrite64 (
247 RegisterTableEntry
->ValidBitStart
,
248 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
249 (UINTN
) RegisterTableEntry
->Value
254 Value
= AsmReadCr3 ();
255 Value
= (UINTN
) BitFieldWrite64 (
257 RegisterTableEntry
->ValidBitStart
,
258 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
259 (UINTN
) RegisterTableEntry
->Value
264 Value
= AsmReadCr4 ();
265 Value
= (UINTN
) BitFieldWrite64 (
267 RegisterTableEntry
->ValidBitStart
,
268 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
269 (UINTN
) RegisterTableEntry
->Value
278 // The specified register is Model Specific Register
282 // If this function is called to restore register setting after INIT signal,
283 // there is no need to restore MSRs in register table.
285 if (RegisterTableEntry
->ValidBitLength
>= 64) {
287 // If length is not less than 64 bits, then directly write without reading
290 RegisterTableEntry
->Index
,
291 RegisterTableEntry
->Value
295 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
296 // to make sure MSR read/write operation is atomic.
298 MsrSpinLock
= GetMsrSpinLockByIndex (RegisterTableEntry
->Index
);
299 AcquireSpinLock (MsrSpinLock
);
301 // Set the bit section according to bit start and length
303 AsmMsrBitFieldWrite64 (
304 RegisterTableEntry
->Index
,
305 RegisterTableEntry
->ValidBitStart
,
306 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
307 RegisterTableEntry
->Value
309 ReleaseSpinLock (MsrSpinLock
);
313 // MemoryMapped operations
316 AcquireSpinLock (mMemoryMappedLock
);
317 MmioBitFieldWrite32 (
318 (UINTN
)(RegisterTableEntry
->Index
| LShiftU64 (RegisterTableEntry
->HighIndex
, 32)),
319 RegisterTableEntry
->ValidBitStart
,
320 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
321 (UINT32
)RegisterTableEntry
->Value
323 ReleaseSpinLock (mMemoryMappedLock
);
326 // Enable or disable cache
330 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
332 if (RegisterTableEntry
->Value
== 0) {
346 AP initialization before SMBASE relocation in the S3 boot path.
349 EarlyMPRendezvousProcedure (
353 CPU_REGISTER_TABLE
*RegisterTableList
;
357 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
360 // Find processor number for this CPU.
362 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
;
363 InitApicId
= GetInitialApicId ();
364 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
365 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
366 SetProcessorRegister (&RegisterTableList
[Index
]);
372 // Count down the number with lock mechanism.
374 InterlockedDecrement (&mNumberToFinish
);
378 AP initialization after SMBASE relocation in the S3 boot path.
381 MPRendezvousProcedure (
385 CPU_REGISTER_TABLE
*RegisterTableList
;
391 ProgramVirtualWireMode ();
392 DisableLvtInterrupts ();
394 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
;
395 InitApicId
= GetInitialApicId ();
396 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
397 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
398 SetProcessorRegister (&RegisterTableList
[Index
]);
404 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
406 TopOfStack
= (UINTN
) Stack
+ sizeof (Stack
);
407 TopOfStack
&= ~(UINTN
) (CPU_STACK_ALIGNMENT
- 1);
408 CopyMem ((VOID
*) (UINTN
) mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
409 TransferApToSafeState ((UINTN
)mApHltLoopCode
, TopOfStack
, (UINTN
)&mNumberToFinish
);
413 Prepares startup vector for APs.
415 This function prepares startup vector for APs.
417 @param WorkingBuffer The address of the work buffer.
420 PrepareApStartupVector (
421 EFI_PHYSICAL_ADDRESS WorkingBuffer
424 EFI_PHYSICAL_ADDRESS StartupVector
;
425 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
428 // Get the address map of startup code for AP,
429 // including code size, and offset of long jump instructions to redirect.
431 ZeroMem (&AddressMap
, sizeof (AddressMap
));
432 AsmGetAddressMap (&AddressMap
);
434 StartupVector
= WorkingBuffer
;
437 // Copy AP startup code to startup vector, and then redirect the long jump
438 // instructions for mode switching.
440 CopyMem ((VOID
*) (UINTN
) StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
441 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
) (StartupVector
+ AddressMap
.PModeEntryOffset
);
442 if (AddressMap
.LongJumpOffset
!= 0) {
443 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
) (StartupVector
+ AddressMap
.LModeEntryOffset
);
447 // Get the start address of exchange data between BSP and AP.
449 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*) (UINTN
) (StartupVector
+ AddressMap
.Size
);
450 ZeroMem ((VOID
*) mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
452 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->GdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
453 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->IdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
456 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
458 CopyMem ((VOID
*) mExchangeInfo
->GdtrProfile
.Base
, mGdtForAp
, mExchangeInfo
->GdtrProfile
.Limit
+ 1);
459 CopyMem ((VOID
*) mExchangeInfo
->IdtrProfile
.Base
, mIdtForAp
, mExchangeInfo
->IdtrProfile
.Limit
+ 1);
460 CopyMem ((VOID
*)(UINTN
) mAcpiCpuData
.ApMachineCheckHandlerBase
, mMachineCheckHandlerForAp
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
462 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
463 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
464 mExchangeInfo
->BufferStart
= (UINT32
) StartupVector
;
465 mExchangeInfo
->Cr3
= (UINT32
) (AsmReadCr3 ());
469 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
471 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
472 and restores MTRRs for both BSP and APs.
480 CPU_REGISTER_TABLE
*RegisterTableList
;
484 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
487 // Find processor number for this CPU.
489 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
;
490 InitApicId
= GetInitialApicId ();
491 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
492 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
493 SetProcessorRegister (&RegisterTableList
[Index
]);
498 ProgramVirtualWireMode ();
500 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
502 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
503 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) EarlyMPRendezvousProcedure
;
506 // Send INIT IPI - SIPI to all APs
508 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
510 while (mNumberToFinish
> 0) {
516 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
518 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
519 data saved by normal boot path for both BSP and APs.
527 CPU_REGISTER_TABLE
*RegisterTableList
;
531 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
;
532 InitApicId
= GetInitialApicId ();
533 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
534 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
535 SetProcessorRegister (&RegisterTableList
[Index
]);
540 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
542 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
543 // Re-initialize StackAddress to original beginning address.
545 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
546 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) MPRendezvousProcedure
;
549 // Send INIT IPI - SIPI to all APs
551 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
553 while (mNumberToFinish
> 0) {
559 Restore SMM Configuration in S3 boot path.
563 RestoreSmmConfigurationInS3 (
567 if (!mAcpiS3Enable
) {
572 // Restore SMM Configuration in S3 boot path.
574 if (mRestoreSmmConfigurationInS3
) {
576 // Need make sure gSmst is correct because below function may use them.
578 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
579 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
580 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
581 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
582 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
585 // Configure SMM Code Access Check feature if available.
587 ConfigSmmCodeAccessCheck ();
589 SmmCpuFeaturesCompleteSmmReadyToLock ();
591 mRestoreSmmConfigurationInS3
= FALSE
;
596 Perform SMM initialization for all processors in the S3 boot path.
598 For a native platform, MP initialization in the S3 boot path is also performed in this function.
606 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
607 IA32_DESCRIPTOR Ia32Idtr
;
608 IA32_DESCRIPTOR X64Idtr
;
609 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
612 DEBUG ((EFI_D_INFO
, "SmmRestoreCpu()\n"));
616 InitializeSpinLock (mMemoryMappedLock
);
619 // See if there is enough context to resume PEI Phase
621 if (mSmmS3ResumeState
== NULL
) {
622 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
626 SmmS3ResumeState
= mSmmS3ResumeState
;
627 ASSERT (SmmS3ResumeState
!= NULL
);
629 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
631 // Save the IA32 IDT Descriptor
633 AsmReadIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
636 // Setup X64 IDT table
638 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
639 X64Idtr
.Base
= (UINTN
) IdtEntryTable
;
640 X64Idtr
.Limit
= (UINT16
) (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
641 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &X64Idtr
);
644 // Setup the default exception handler
646 Status
= InitializeCpuExceptionHandlers (NULL
);
647 ASSERT_EFI_ERROR (Status
);
650 // Initialize Debug Agent to support source level debug
652 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
656 // Skip initialization if mAcpiCpuData is not valid
658 if (mAcpiCpuData
.NumberOfCpus
> 0) {
660 // First time microcode load and restore MTRRs
662 EarlyInitializeCpu ();
666 // Restore SMBASE for BSP and all APs
671 // Skip initialization if mAcpiCpuData is not valid
673 if (mAcpiCpuData
.NumberOfCpus
> 0) {
675 // Restore MSRs for BSP and all APs
681 // Set a flag to restore SMM configuration in S3 path.
683 mRestoreSmmConfigurationInS3
= TRUE
;
685 DEBUG (( EFI_D_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
686 DEBUG (( EFI_D_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
687 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
688 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
689 DEBUG (( EFI_D_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
692 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
694 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) {
695 DEBUG ((EFI_D_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
698 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
699 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
700 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
701 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
706 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
708 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
709 DEBUG ((EFI_D_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
711 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
713 SaveAndSetDebugTimerInterrupt (FALSE
);
715 // Restore IA32 IDT table
717 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
719 SmmS3ResumeState
->ReturnCs
,
720 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
721 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
722 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
723 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
728 // Can not resume PEI Phase
730 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
735 Initialize SMM S3 resume state structure used during S3 Resume.
737 @param[in] Cr3 The base address of the page tables to use in SMM.
741 InitSmmS3ResumeState (
746 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
747 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
748 EFI_PHYSICAL_ADDRESS Address
;
751 if (!mAcpiS3Enable
) {
755 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
756 if (GuidHob
!= NULL
) {
757 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*) GET_GUID_HOB_DATA (GuidHob
);
759 DEBUG ((EFI_D_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
760 DEBUG ((EFI_D_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
762 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
763 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
765 mSmmS3ResumeState
= SmmS3ResumeState
;
766 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
768 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
770 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
771 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
772 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
773 SmmS3ResumeState
->SmmS3StackSize
= 0;
776 SmmS3ResumeState
->SmmS3Cr0
= gSmmCr0
;
777 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
778 SmmS3ResumeState
->SmmS3Cr4
= gSmmCr4
;
780 if (sizeof (UINTN
) == sizeof (UINT64
)) {
781 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
783 if (sizeof (UINTN
) == sizeof (UINT32
)) {
784 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
789 // Patch SmmS3ResumeState->SmmS3Cr3
794 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
795 // protected mode on S3 path
797 Address
= BASE_4GB
- 1;
798 Status
= gBS
->AllocatePages (
801 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
804 ASSERT_EFI_ERROR (Status
);
805 mApHltLoopCode
= (UINT8
*) (UINTN
) Address
;
809 Copy register table from ACPI NVS memory into SMRAM.
811 @param[in] DestinationRegisterTableList Points to destination register table.
812 @param[in] SourceRegisterTableList Points to source register table.
813 @param[in] NumberOfCpus Number of CPUs.
818 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
819 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
820 IN UINT32 NumberOfCpus
825 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
827 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
828 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
829 if (DestinationRegisterTableList
[Index
].AllocatedSize
!= 0) {
830 RegisterTableEntry
= AllocateCopyPool (
831 DestinationRegisterTableList
[Index
].AllocatedSize
,
832 (VOID
*)(UINTN
)SourceRegisterTableList
[Index
].RegisterTableEntry
834 ASSERT (RegisterTableEntry
!= NULL
);
835 DestinationRegisterTableList
[Index
].RegisterTableEntry
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)RegisterTableEntry
;
837 // Go though all MSRs in register table to initialize MSR spin lock
839 for (Index1
= 0; Index1
< DestinationRegisterTableList
[Index
].TableLength
; Index1
++, RegisterTableEntry
++) {
840 if ((RegisterTableEntry
->RegisterType
== Msr
) && (RegisterTableEntry
->ValidBitLength
< 64)) {
842 // Initialize MSR spin lock only for those MSRs need bit field writing
844 InitMsrSpinLockByIndex (RegisterTableEntry
->Index
);
860 ACPI_CPU_DATA
*AcpiCpuData
;
861 IA32_DESCRIPTOR
*Gdtr
;
862 IA32_DESCRIPTOR
*Idtr
;
864 if (!mAcpiS3Enable
) {
869 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
871 mAcpiCpuData
.NumberOfCpus
= 0;
874 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
876 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
877 if (AcpiCpuData
== 0) {
882 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
884 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
886 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
887 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
889 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
891 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
892 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
894 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
896 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
897 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
899 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
901 mAcpiCpuData
.PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
902 ASSERT (mAcpiCpuData
.PreSmmInitRegisterTable
!= 0);
905 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
,
906 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
,
907 mAcpiCpuData
.NumberOfCpus
910 mAcpiCpuData
.RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
911 ASSERT (mAcpiCpuData
.RegisterTable
!= 0);
914 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
,
915 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
,
916 mAcpiCpuData
.NumberOfCpus
920 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
922 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
923 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
925 mGdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
926 ASSERT (mGdtForAp
!= NULL
);
927 mIdtForAp
= (VOID
*) ((UINTN
)mGdtForAp
+ (Gdtr
->Limit
+ 1));
928 mMachineCheckHandlerForAp
= (VOID
*) ((UINTN
)mIdtForAp
+ (Idtr
->Limit
+ 1));
930 CopyMem (mGdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
931 CopyMem (mIdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
932 CopyMem (mMachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
936 Get ACPI S3 enable flag.
940 GetAcpiS3EnableFlag (
944 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);