2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
23 IA32_DESCRIPTOR GdtrProfile
;
24 IA32_DESCRIPTOR IdtrProfile
;
27 UINTN InitializeFloatingPointUnitsAddress
;
28 } MP_CPU_EXCHANGE_INFO
;
32 UINT8
*RendezvousFunnelAddress
;
33 UINTN PModeEntryOffset
;
36 UINTN LModeEntryOffset
;
38 } MP_ASSEMBLY_ADDRESS_MAP
;
41 // Spin lock used to serialize MemoryMapped operation
43 SPIN_LOCK
*mMemoryMappedLock
= NULL
;
46 // Signal that SMM BASE relocation is complete.
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc
;
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
54 @param AddressMap Output buffer for address map information.
59 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
65 ACPI_CPU_DATA mAcpiCpuData
;
66 volatile UINT32 mNumberToFinish
;
67 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
68 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
69 VOID
*mGdtForAp
= NULL
;
70 VOID
*mIdtForAp
= NULL
;
71 VOID
*mMachineCheckHandlerForAp
= NULL
;
72 MP_MSR_LOCK
*mMsrSpinLocks
= NULL
;
73 UINTN mMsrSpinLockCount
;
79 BOOLEAN mSmmS3Flag
= FALSE
;
82 // Pointer to structure used during S3 Resume
84 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
86 BOOLEAN mAcpiS3Enable
= TRUE
;
88 UINT8
*mApHltLoopCode
= NULL
;
89 UINT8 mApHltLoopCodeTemplate
[] = {
90 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
91 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
98 Get MSR spin lock by MSR index.
100 @param MsrIndex MSR index value.
102 @return Pointer to MSR spin lock.
106 GetMsrSpinLockByIndex (
111 for (Index
= 0; Index
< mMsrCount
; Index
++) {
112 if (MsrIndex
== mMsrSpinLocks
[Index
].MsrIndex
) {
113 return mMsrSpinLocks
[Index
].SpinLock
;
120 Initialize MSR spin lock by MSR index.
122 @param MsrIndex MSR index value.
126 InitMsrSpinLockByIndex (
130 UINTN MsrSpinLockCount
;
131 UINTN NewMsrSpinLockCount
;
135 if (mMsrSpinLocks
== NULL
) {
136 MsrSpinLockCount
= mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
;
137 mMsrSpinLocks
= (MP_MSR_LOCK
*) AllocatePool (sizeof (MP_MSR_LOCK
) * MsrSpinLockCount
);
138 ASSERT (mMsrSpinLocks
!= NULL
);
139 for (Index
= 0; Index
< MsrSpinLockCount
; Index
++) {
140 mMsrSpinLocks
[Index
].SpinLock
=
141 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+ Index
* mSemaphoreSize
);
142 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
144 mMsrSpinLockCount
= MsrSpinLockCount
;
145 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
= 0;
147 if (GetMsrSpinLockByIndex (MsrIndex
) == NULL
) {
149 // Initialize spin lock for MSR programming
151 mMsrSpinLocks
[mMsrCount
].MsrIndex
= MsrIndex
;
152 InitializeSpinLock (mMsrSpinLocks
[mMsrCount
].SpinLock
);
154 if (mMsrCount
== mMsrSpinLockCount
) {
156 // If MSR spin lock buffer is full, enlarge it
158 AddedSize
= SIZE_4KB
;
159 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
=
160 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize
));
161 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.Msr
!= NULL
);
162 NewMsrSpinLockCount
= mMsrSpinLockCount
+ AddedSize
/ mSemaphoreSize
;
163 mMsrSpinLocks
= ReallocatePool (
164 sizeof (MP_MSR_LOCK
) * mMsrSpinLockCount
,
165 sizeof (MP_MSR_LOCK
) * NewMsrSpinLockCount
,
168 ASSERT (mMsrSpinLocks
!= NULL
);
169 mMsrSpinLockCount
= NewMsrSpinLockCount
;
170 for (Index
= mMsrCount
; Index
< mMsrSpinLockCount
; Index
++) {
171 mMsrSpinLocks
[Index
].SpinLock
=
172 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+
173 (Index
- mMsrCount
) * mSemaphoreSize
);
174 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
181 Sync up the MTRR values for all processors.
183 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
188 EFI_PHYSICAL_ADDRESS MtrrTable
194 Sync up the MTRR values for all processors.
203 MTRR_SETTINGS
*MtrrSettings
;
205 MtrrSettings
= (MTRR_SETTINGS
*) (UINTN
) MtrrTable
;
206 MtrrSetAllMtrrs (MtrrSettings
);
210 Programs registers for the calling processor.
212 This function programs registers for the calling processor.
214 @param RegisterTables Pointer to register table of the running processor.
215 @param RegisterTableCount Register table count.
219 SetProcessorRegister (
220 IN CPU_REGISTER_TABLE
*RegisterTables
,
221 IN UINTN RegisterTableCount
224 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
227 SPIN_LOCK
*MsrSpinLock
;
229 CPU_REGISTER_TABLE
*RegisterTable
;
231 InitApicId
= GetInitialApicId ();
232 RegisterTable
= NULL
;
233 for (Index
= 0; Index
< RegisterTableCount
; Index
++) {
234 if (RegisterTables
[Index
].InitialApicId
== InitApicId
) {
235 RegisterTable
= &RegisterTables
[Index
];
239 ASSERT (RegisterTable
!= NULL
);
242 // Traverse Register Table of this logical processor
244 RegisterTableEntry
= (CPU_REGISTER_TABLE_ENTRY
*) (UINTN
) RegisterTable
->RegisterTableEntry
;
245 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++, RegisterTableEntry
++) {
247 // Check the type of specified register
249 switch (RegisterTableEntry
->RegisterType
) {
251 // The specified register is Control Register
253 case ControlRegister
:
254 switch (RegisterTableEntry
->Index
) {
256 Value
= AsmReadCr0 ();
257 Value
= (UINTN
) BitFieldWrite64 (
259 RegisterTableEntry
->ValidBitStart
,
260 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
261 (UINTN
) RegisterTableEntry
->Value
266 Value
= AsmReadCr2 ();
267 Value
= (UINTN
) BitFieldWrite64 (
269 RegisterTableEntry
->ValidBitStart
,
270 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
271 (UINTN
) RegisterTableEntry
->Value
276 Value
= AsmReadCr3 ();
277 Value
= (UINTN
) BitFieldWrite64 (
279 RegisterTableEntry
->ValidBitStart
,
280 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
281 (UINTN
) RegisterTableEntry
->Value
286 Value
= AsmReadCr4 ();
287 Value
= (UINTN
) BitFieldWrite64 (
289 RegisterTableEntry
->ValidBitStart
,
290 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
291 (UINTN
) RegisterTableEntry
->Value
300 // The specified register is Model Specific Register
304 // If this function is called to restore register setting after INIT signal,
305 // there is no need to restore MSRs in register table.
307 if (RegisterTableEntry
->ValidBitLength
>= 64) {
309 // If length is not less than 64 bits, then directly write without reading
312 RegisterTableEntry
->Index
,
313 RegisterTableEntry
->Value
317 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
318 // to make sure MSR read/write operation is atomic.
320 MsrSpinLock
= GetMsrSpinLockByIndex (RegisterTableEntry
->Index
);
321 AcquireSpinLock (MsrSpinLock
);
323 // Set the bit section according to bit start and length
325 AsmMsrBitFieldWrite64 (
326 RegisterTableEntry
->Index
,
327 RegisterTableEntry
->ValidBitStart
,
328 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
329 RegisterTableEntry
->Value
331 ReleaseSpinLock (MsrSpinLock
);
335 // MemoryMapped operations
338 AcquireSpinLock (mMemoryMappedLock
);
339 MmioBitFieldWrite32 (
340 (UINTN
)(RegisterTableEntry
->Index
| LShiftU64 (RegisterTableEntry
->HighIndex
, 32)),
341 RegisterTableEntry
->ValidBitStart
,
342 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
343 (UINT32
)RegisterTableEntry
->Value
345 ReleaseSpinLock (mMemoryMappedLock
);
348 // Enable or disable cache
352 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
354 if (RegisterTableEntry
->Value
== 0) {
368 AP initialization before then after SMBASE relocation in the S3 boot path.
378 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
380 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
);
383 // Count down the number with lock mechanism.
385 InterlockedDecrement (&mNumberToFinish
);
388 // Wait for BSP to signal SMM Base relocation done.
390 while (!mInitApsAfterSmmBaseReloc
) {
394 ProgramVirtualWireMode ();
395 DisableLvtInterrupts ();
397 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
, mAcpiCpuData
.NumberOfCpus
);
400 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
402 TopOfStack
= (UINTN
) Stack
+ sizeof (Stack
);
403 TopOfStack
&= ~(UINTN
) (CPU_STACK_ALIGNMENT
- 1);
404 CopyMem ((VOID
*) (UINTN
) mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
405 TransferApToSafeState ((UINTN
)mApHltLoopCode
, TopOfStack
, (UINTN
)&mNumberToFinish
);
409 Prepares startup vector for APs.
411 This function prepares startup vector for APs.
413 @param WorkingBuffer The address of the work buffer.
416 PrepareApStartupVector (
417 EFI_PHYSICAL_ADDRESS WorkingBuffer
420 EFI_PHYSICAL_ADDRESS StartupVector
;
421 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
424 // Get the address map of startup code for AP,
425 // including code size, and offset of long jump instructions to redirect.
427 ZeroMem (&AddressMap
, sizeof (AddressMap
));
428 AsmGetAddressMap (&AddressMap
);
430 StartupVector
= WorkingBuffer
;
433 // Copy AP startup code to startup vector, and then redirect the long jump
434 // instructions for mode switching.
436 CopyMem ((VOID
*) (UINTN
) StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
437 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
) (StartupVector
+ AddressMap
.PModeEntryOffset
);
438 if (AddressMap
.LongJumpOffset
!= 0) {
439 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
) (StartupVector
+ AddressMap
.LModeEntryOffset
);
443 // Get the start address of exchange data between BSP and AP.
445 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*) (UINTN
) (StartupVector
+ AddressMap
.Size
);
446 ZeroMem ((VOID
*) mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
448 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->GdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
449 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->IdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
452 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
454 CopyMem ((VOID
*) mExchangeInfo
->GdtrProfile
.Base
, mGdtForAp
, mExchangeInfo
->GdtrProfile
.Limit
+ 1);
455 CopyMem ((VOID
*) mExchangeInfo
->IdtrProfile
.Base
, mIdtForAp
, mExchangeInfo
->IdtrProfile
.Limit
+ 1);
456 CopyMem ((VOID
*)(UINTN
) mAcpiCpuData
.ApMachineCheckHandlerBase
, mMachineCheckHandlerForAp
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
458 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
459 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
460 mExchangeInfo
->BufferStart
= (UINT32
) StartupVector
;
461 mExchangeInfo
->Cr3
= (UINT32
) (AsmReadCr3 ());
462 mExchangeInfo
->InitializeFloatingPointUnitsAddress
= (UINTN
)InitializeFloatingPointUnits
;
466 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
468 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
469 and restores MTRRs for both BSP and APs.
473 InitializeCpuBeforeRebase (
477 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
479 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
);
481 ProgramVirtualWireMode ();
483 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
485 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
486 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) InitializeAp
;
489 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
491 mInitApsAfterSmmBaseReloc
= FALSE
;
494 // Send INIT IPI - SIPI to all APs
496 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
498 while (mNumberToFinish
> 0) {
504 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
506 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
507 data saved by normal boot path for both BSP and APs.
511 InitializeCpuAfterRebase (
515 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
, mAcpiCpuData
.NumberOfCpus
);
517 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
520 // Signal that SMM base relocation is complete and to continue initialization.
522 mInitApsAfterSmmBaseReloc
= TRUE
;
524 while (mNumberToFinish
> 0) {
530 Restore SMM Configuration in S3 boot path.
534 RestoreSmmConfigurationInS3 (
538 if (!mAcpiS3Enable
) {
543 // Restore SMM Configuration in S3 boot path.
545 if (mRestoreSmmConfigurationInS3
) {
547 // Need make sure gSmst is correct because below function may use them.
549 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
550 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
551 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
552 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
553 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
556 // Configure SMM Code Access Check feature if available.
558 ConfigSmmCodeAccessCheck ();
560 SmmCpuFeaturesCompleteSmmReadyToLock ();
562 mRestoreSmmConfigurationInS3
= FALSE
;
567 Perform SMM initialization for all processors in the S3 boot path.
569 For a native platform, MP initialization in the S3 boot path is also performed in this function.
577 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
578 IA32_DESCRIPTOR Ia32Idtr
;
579 IA32_DESCRIPTOR X64Idtr
;
580 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
583 DEBUG ((EFI_D_INFO
, "SmmRestoreCpu()\n"));
587 InitializeSpinLock (mMemoryMappedLock
);
590 // See if there is enough context to resume PEI Phase
592 if (mSmmS3ResumeState
== NULL
) {
593 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
597 SmmS3ResumeState
= mSmmS3ResumeState
;
598 ASSERT (SmmS3ResumeState
!= NULL
);
600 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
602 // Save the IA32 IDT Descriptor
604 AsmReadIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
607 // Setup X64 IDT table
609 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
610 X64Idtr
.Base
= (UINTN
) IdtEntryTable
;
611 X64Idtr
.Limit
= (UINT16
) (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
612 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &X64Idtr
);
615 // Setup the default exception handler
617 Status
= InitializeCpuExceptionHandlers (NULL
);
618 ASSERT_EFI_ERROR (Status
);
621 // Initialize Debug Agent to support source level debug
623 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
627 // Skip initialization if mAcpiCpuData is not valid
629 if (mAcpiCpuData
.NumberOfCpus
> 0) {
631 // First time microcode load and restore MTRRs
633 InitializeCpuBeforeRebase ();
637 // Restore SMBASE for BSP and all APs
642 // Skip initialization if mAcpiCpuData is not valid
644 if (mAcpiCpuData
.NumberOfCpus
> 0) {
646 // Restore MSRs for BSP and all APs
648 InitializeCpuAfterRebase ();
652 // Set a flag to restore SMM configuration in S3 path.
654 mRestoreSmmConfigurationInS3
= TRUE
;
656 DEBUG (( EFI_D_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
657 DEBUG (( EFI_D_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
658 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
659 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
660 DEBUG (( EFI_D_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
663 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
665 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) {
666 DEBUG ((EFI_D_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
669 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
670 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
671 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
672 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
677 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
679 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
680 DEBUG ((EFI_D_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
682 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
684 SaveAndSetDebugTimerInterrupt (FALSE
);
686 // Restore IA32 IDT table
688 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
690 SmmS3ResumeState
->ReturnCs
,
691 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
692 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
693 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
694 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
699 // Can not resume PEI Phase
701 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
706 Initialize SMM S3 resume state structure used during S3 Resume.
708 @param[in] Cr3 The base address of the page tables to use in SMM.
712 InitSmmS3ResumeState (
717 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
718 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
719 EFI_PHYSICAL_ADDRESS Address
;
722 if (!mAcpiS3Enable
) {
726 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
727 if (GuidHob
!= NULL
) {
728 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*) GET_GUID_HOB_DATA (GuidHob
);
730 DEBUG ((EFI_D_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
731 DEBUG ((EFI_D_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
733 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
734 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
736 mSmmS3ResumeState
= SmmS3ResumeState
;
737 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
739 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
741 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
742 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
743 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
744 SmmS3ResumeState
->SmmS3StackSize
= 0;
747 SmmS3ResumeState
->SmmS3Cr0
= mSmmCr0
;
748 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
749 SmmS3ResumeState
->SmmS3Cr4
= mSmmCr4
;
751 if (sizeof (UINTN
) == sizeof (UINT64
)) {
752 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
754 if (sizeof (UINTN
) == sizeof (UINT32
)) {
755 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
760 // Patch SmmS3ResumeState->SmmS3Cr3
765 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
766 // protected mode on S3 path
768 Address
= BASE_4GB
- 1;
769 Status
= gBS
->AllocatePages (
772 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
775 ASSERT_EFI_ERROR (Status
);
776 mApHltLoopCode
= (UINT8
*) (UINTN
) Address
;
780 Copy register table from ACPI NVS memory into SMRAM.
782 @param[in] DestinationRegisterTableList Points to destination register table.
783 @param[in] SourceRegisterTableList Points to source register table.
784 @param[in] NumberOfCpus Number of CPUs.
789 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
790 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
791 IN UINT32 NumberOfCpus
796 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
798 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
799 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
800 if (DestinationRegisterTableList
[Index
].AllocatedSize
!= 0) {
801 RegisterTableEntry
= AllocateCopyPool (
802 DestinationRegisterTableList
[Index
].AllocatedSize
,
803 (VOID
*)(UINTN
)SourceRegisterTableList
[Index
].RegisterTableEntry
805 ASSERT (RegisterTableEntry
!= NULL
);
806 DestinationRegisterTableList
[Index
].RegisterTableEntry
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)RegisterTableEntry
;
808 // Go though all MSRs in register table to initialize MSR spin lock
810 for (Index1
= 0; Index1
< DestinationRegisterTableList
[Index
].TableLength
; Index1
++, RegisterTableEntry
++) {
811 if ((RegisterTableEntry
->RegisterType
== Msr
) && (RegisterTableEntry
->ValidBitLength
< 64)) {
813 // Initialize MSR spin lock only for those MSRs need bit field writing
815 InitMsrSpinLockByIndex (RegisterTableEntry
->Index
);
831 ACPI_CPU_DATA
*AcpiCpuData
;
832 IA32_DESCRIPTOR
*Gdtr
;
833 IA32_DESCRIPTOR
*Idtr
;
835 if (!mAcpiS3Enable
) {
840 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
842 mAcpiCpuData
.NumberOfCpus
= 0;
845 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
847 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
848 if (AcpiCpuData
== 0) {
853 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
855 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
857 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
858 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
860 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
862 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
863 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
865 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
867 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
868 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
870 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
872 mAcpiCpuData
.PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
873 ASSERT (mAcpiCpuData
.PreSmmInitRegisterTable
!= 0);
876 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
,
877 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
,
878 mAcpiCpuData
.NumberOfCpus
881 mAcpiCpuData
.RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
882 ASSERT (mAcpiCpuData
.RegisterTable
!= 0);
885 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
,
886 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
,
887 mAcpiCpuData
.NumberOfCpus
891 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
893 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
894 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
896 mGdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
897 ASSERT (mGdtForAp
!= NULL
);
898 mIdtForAp
= (VOID
*) ((UINTN
)mGdtForAp
+ (Gdtr
->Limit
+ 1));
899 mMachineCheckHandlerForAp
= (VOID
*) ((UINTN
)mIdtForAp
+ (Idtr
->Limit
+ 1));
901 CopyMem (mGdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
902 CopyMem (mIdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
903 CopyMem (mMachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
907 Get ACPI S3 enable flag.
911 GetAcpiS3EnableFlag (
915 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);