2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
22 IA32_DESCRIPTOR GdtrProfile
;
23 IA32_DESCRIPTOR IdtrProfile
;
26 } MP_CPU_EXCHANGE_INFO
;
29 UINT8
*RendezvousFunnelAddress
;
30 UINTN PModeEntryOffset
;
33 UINTN LModeEntryOffset
;
35 } MP_ASSEMBLY_ADDRESS_MAP
;
38 // Spin lock used to serialize MemoryMapped operation
40 SPIN_LOCK
*mMemoryMappedLock
= NULL
;
43 // Signal that SMM BASE relocation is complete.
45 volatile BOOLEAN mInitApsAfterSmmBaseReloc
;
48 Get starting address and size of the rendezvous entry for APs.
49 Information for fixing a jump instruction in the code is also returned.
51 @param AddressMap Output buffer for address map information.
56 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
59 #define LEGACY_REGION_SIZE (2 * 0x1000)
60 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
62 ACPI_CPU_DATA mAcpiCpuData
;
63 volatile UINT32 mNumberToFinish
;
64 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
65 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
66 VOID
*mGdtForAp
= NULL
;
67 VOID
*mIdtForAp
= NULL
;
68 VOID
*mMachineCheckHandlerForAp
= NULL
;
69 MP_MSR_LOCK
*mMsrSpinLocks
= NULL
;
70 UINTN mMsrSpinLockCount
;
76 BOOLEAN mSmmS3Flag
= FALSE
;
79 // Pointer to structure used during S3 Resume
81 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
83 BOOLEAN mAcpiS3Enable
= TRUE
;
85 UINT8
*mApHltLoopCode
= NULL
;
86 UINT8 mApHltLoopCodeTemplate
[] = {
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
95 Get MSR spin lock by MSR index.
97 @param MsrIndex MSR index value.
99 @return Pointer to MSR spin lock.
103 GetMsrSpinLockByIndex (
108 for (Index
= 0; Index
< mMsrCount
; Index
++) {
109 if (MsrIndex
== mMsrSpinLocks
[Index
].MsrIndex
) {
110 return mMsrSpinLocks
[Index
].SpinLock
;
117 Initialize MSR spin lock by MSR index.
119 @param MsrIndex MSR index value.
123 InitMsrSpinLockByIndex (
127 UINTN MsrSpinLockCount
;
128 UINTN NewMsrSpinLockCount
;
132 if (mMsrSpinLocks
== NULL
) {
133 MsrSpinLockCount
= mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
;
134 mMsrSpinLocks
= (MP_MSR_LOCK
*) AllocatePool (sizeof (MP_MSR_LOCK
) * MsrSpinLockCount
);
135 ASSERT (mMsrSpinLocks
!= NULL
);
136 for (Index
= 0; Index
< MsrSpinLockCount
; Index
++) {
137 mMsrSpinLocks
[Index
].SpinLock
=
138 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+ Index
* mSemaphoreSize
);
139 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
141 mMsrSpinLockCount
= MsrSpinLockCount
;
142 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
= 0;
144 if (GetMsrSpinLockByIndex (MsrIndex
) == NULL
) {
146 // Initialize spin lock for MSR programming
148 mMsrSpinLocks
[mMsrCount
].MsrIndex
= MsrIndex
;
149 InitializeSpinLock (mMsrSpinLocks
[mMsrCount
].SpinLock
);
151 if (mMsrCount
== mMsrSpinLockCount
) {
153 // If MSR spin lock buffer is full, enlarge it
155 AddedSize
= SIZE_4KB
;
156 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
=
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize
));
158 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.Msr
!= NULL
);
159 NewMsrSpinLockCount
= mMsrSpinLockCount
+ AddedSize
/ mSemaphoreSize
;
160 mMsrSpinLocks
= ReallocatePool (
161 sizeof (MP_MSR_LOCK
) * mMsrSpinLockCount
,
162 sizeof (MP_MSR_LOCK
) * NewMsrSpinLockCount
,
165 ASSERT (mMsrSpinLocks
!= NULL
);
166 mMsrSpinLockCount
= NewMsrSpinLockCount
;
167 for (Index
= mMsrCount
; Index
< mMsrSpinLockCount
; Index
++) {
168 mMsrSpinLocks
[Index
].SpinLock
=
169 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+
170 (Index
- mMsrCount
) * mSemaphoreSize
);
171 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
178 Sync up the MTRR values for all processors.
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
185 EFI_PHYSICAL_ADDRESS MtrrTable
191 Sync up the MTRR values for all processors.
200 MTRR_SETTINGS
*MtrrSettings
;
202 MtrrSettings
= (MTRR_SETTINGS
*) (UINTN
) MtrrTable
;
203 MtrrSetAllMtrrs (MtrrSettings
);
207 Programs registers for the calling processor.
209 This function programs registers for the calling processor.
211 @param RegisterTables Pointer to register table of the running processor.
212 @param RegisterTableCount Register table count.
216 SetProcessorRegister (
217 IN CPU_REGISTER_TABLE
*RegisterTables
,
218 IN UINTN RegisterTableCount
221 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
224 SPIN_LOCK
*MsrSpinLock
;
226 CPU_REGISTER_TABLE
*RegisterTable
;
228 InitApicId
= GetInitialApicId ();
229 for (Index
= 0; Index
< RegisterTableCount
; Index
++) {
230 if (RegisterTables
[Index
].InitialApicId
== InitApicId
) {
231 RegisterTable
= &RegisterTables
[Index
];
237 // Traverse Register Table of this logical processor
239 RegisterTableEntry
= (CPU_REGISTER_TABLE_ENTRY
*) (UINTN
) RegisterTable
->RegisterTableEntry
;
240 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++, RegisterTableEntry
++) {
242 // Check the type of specified register
244 switch (RegisterTableEntry
->RegisterType
) {
246 // The specified register is Control Register
248 case ControlRegister
:
249 switch (RegisterTableEntry
->Index
) {
251 Value
= AsmReadCr0 ();
252 Value
= (UINTN
) BitFieldWrite64 (
254 RegisterTableEntry
->ValidBitStart
,
255 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
256 (UINTN
) RegisterTableEntry
->Value
261 Value
= AsmReadCr2 ();
262 Value
= (UINTN
) BitFieldWrite64 (
264 RegisterTableEntry
->ValidBitStart
,
265 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
266 (UINTN
) RegisterTableEntry
->Value
271 Value
= AsmReadCr3 ();
272 Value
= (UINTN
) BitFieldWrite64 (
274 RegisterTableEntry
->ValidBitStart
,
275 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
276 (UINTN
) RegisterTableEntry
->Value
281 Value
= AsmReadCr4 ();
282 Value
= (UINTN
) BitFieldWrite64 (
284 RegisterTableEntry
->ValidBitStart
,
285 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
286 (UINTN
) RegisterTableEntry
->Value
295 // The specified register is Model Specific Register
299 // If this function is called to restore register setting after INIT signal,
300 // there is no need to restore MSRs in register table.
302 if (RegisterTableEntry
->ValidBitLength
>= 64) {
304 // If length is not less than 64 bits, then directly write without reading
307 RegisterTableEntry
->Index
,
308 RegisterTableEntry
->Value
312 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
313 // to make sure MSR read/write operation is atomic.
315 MsrSpinLock
= GetMsrSpinLockByIndex (RegisterTableEntry
->Index
);
316 AcquireSpinLock (MsrSpinLock
);
318 // Set the bit section according to bit start and length
320 AsmMsrBitFieldWrite64 (
321 RegisterTableEntry
->Index
,
322 RegisterTableEntry
->ValidBitStart
,
323 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
324 RegisterTableEntry
->Value
326 ReleaseSpinLock (MsrSpinLock
);
330 // MemoryMapped operations
333 AcquireSpinLock (mMemoryMappedLock
);
334 MmioBitFieldWrite32 (
335 (UINTN
)(RegisterTableEntry
->Index
| LShiftU64 (RegisterTableEntry
->HighIndex
, 32)),
336 RegisterTableEntry
->ValidBitStart
,
337 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
338 (UINT32
)RegisterTableEntry
->Value
340 ReleaseSpinLock (mMemoryMappedLock
);
343 // Enable or disable cache
347 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
349 if (RegisterTableEntry
->Value
== 0) {
363 AP initialization before then after SMBASE relocation in the S3 boot path.
373 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
375 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
);
378 // Count down the number with lock mechanism.
380 InterlockedDecrement (&mNumberToFinish
);
383 // Wait for BSP to signal SMM Base relocation done.
385 while (!mInitApsAfterSmmBaseReloc
) {
389 ProgramVirtualWireMode ();
390 DisableLvtInterrupts ();
392 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
, mAcpiCpuData
.NumberOfCpus
);
395 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
397 TopOfStack
= (UINTN
) Stack
+ sizeof (Stack
);
398 TopOfStack
&= ~(UINTN
) (CPU_STACK_ALIGNMENT
- 1);
399 CopyMem ((VOID
*) (UINTN
) mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
400 TransferApToSafeState ((UINTN
)mApHltLoopCode
, TopOfStack
, (UINTN
)&mNumberToFinish
);
404 Prepares startup vector for APs.
406 This function prepares startup vector for APs.
408 @param WorkingBuffer The address of the work buffer.
411 PrepareApStartupVector (
412 EFI_PHYSICAL_ADDRESS WorkingBuffer
415 EFI_PHYSICAL_ADDRESS StartupVector
;
416 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
419 // Get the address map of startup code for AP,
420 // including code size, and offset of long jump instructions to redirect.
422 ZeroMem (&AddressMap
, sizeof (AddressMap
));
423 AsmGetAddressMap (&AddressMap
);
425 StartupVector
= WorkingBuffer
;
428 // Copy AP startup code to startup vector, and then redirect the long jump
429 // instructions for mode switching.
431 CopyMem ((VOID
*) (UINTN
) StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
432 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
) (StartupVector
+ AddressMap
.PModeEntryOffset
);
433 if (AddressMap
.LongJumpOffset
!= 0) {
434 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
) (StartupVector
+ AddressMap
.LModeEntryOffset
);
438 // Get the start address of exchange data between BSP and AP.
440 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*) (UINTN
) (StartupVector
+ AddressMap
.Size
);
441 ZeroMem ((VOID
*) mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
443 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->GdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
444 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->IdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
447 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
449 CopyMem ((VOID
*) mExchangeInfo
->GdtrProfile
.Base
, mGdtForAp
, mExchangeInfo
->GdtrProfile
.Limit
+ 1);
450 CopyMem ((VOID
*) mExchangeInfo
->IdtrProfile
.Base
, mIdtForAp
, mExchangeInfo
->IdtrProfile
.Limit
+ 1);
451 CopyMem ((VOID
*)(UINTN
) mAcpiCpuData
.ApMachineCheckHandlerBase
, mMachineCheckHandlerForAp
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
453 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
454 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
455 mExchangeInfo
->BufferStart
= (UINT32
) StartupVector
;
456 mExchangeInfo
->Cr3
= (UINT32
) (AsmReadCr3 ());
460 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
462 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
463 and restores MTRRs for both BSP and APs.
467 InitializeCpuBeforeRebase (
471 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
473 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
, mAcpiCpuData
.NumberOfCpus
);
475 ProgramVirtualWireMode ();
477 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
479 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
480 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) InitializeAp
;
483 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
485 mInitApsAfterSmmBaseReloc
= FALSE
;
488 // Send INIT IPI - SIPI to all APs
490 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
492 while (mNumberToFinish
> 0) {
498 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
500 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
501 data saved by normal boot path for both BSP and APs.
505 InitializeCpuAfterRebase (
509 SetProcessorRegister ((CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
, mAcpiCpuData
.NumberOfCpus
);
511 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
514 // Signal that SMM base relocation is complete and to continue initialization.
516 mInitApsAfterSmmBaseReloc
= TRUE
;
518 while (mNumberToFinish
> 0) {
524 Restore SMM Configuration in S3 boot path.
528 RestoreSmmConfigurationInS3 (
532 if (!mAcpiS3Enable
) {
537 // Restore SMM Configuration in S3 boot path.
539 if (mRestoreSmmConfigurationInS3
) {
541 // Need make sure gSmst is correct because below function may use them.
543 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
544 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
545 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
546 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
547 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
550 // Configure SMM Code Access Check feature if available.
552 ConfigSmmCodeAccessCheck ();
554 SmmCpuFeaturesCompleteSmmReadyToLock ();
556 mRestoreSmmConfigurationInS3
= FALSE
;
561 Perform SMM initialization for all processors in the S3 boot path.
563 For a native platform, MP initialization in the S3 boot path is also performed in this function.
571 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
572 IA32_DESCRIPTOR Ia32Idtr
;
573 IA32_DESCRIPTOR X64Idtr
;
574 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
577 DEBUG ((EFI_D_INFO
, "SmmRestoreCpu()\n"));
581 InitializeSpinLock (mMemoryMappedLock
);
584 // See if there is enough context to resume PEI Phase
586 if (mSmmS3ResumeState
== NULL
) {
587 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
591 SmmS3ResumeState
= mSmmS3ResumeState
;
592 ASSERT (SmmS3ResumeState
!= NULL
);
594 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
596 // Save the IA32 IDT Descriptor
598 AsmReadIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
601 // Setup X64 IDT table
603 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
604 X64Idtr
.Base
= (UINTN
) IdtEntryTable
;
605 X64Idtr
.Limit
= (UINT16
) (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
606 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &X64Idtr
);
609 // Setup the default exception handler
611 Status
= InitializeCpuExceptionHandlers (NULL
);
612 ASSERT_EFI_ERROR (Status
);
615 // Initialize Debug Agent to support source level debug
617 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
621 // Skip initialization if mAcpiCpuData is not valid
623 if (mAcpiCpuData
.NumberOfCpus
> 0) {
625 // First time microcode load and restore MTRRs
627 InitializeCpuBeforeRebase ();
631 // Restore SMBASE for BSP and all APs
636 // Skip initialization if mAcpiCpuData is not valid
638 if (mAcpiCpuData
.NumberOfCpus
> 0) {
640 // Restore MSRs for BSP and all APs
642 InitializeCpuAfterRebase ();
646 // Set a flag to restore SMM configuration in S3 path.
648 mRestoreSmmConfigurationInS3
= TRUE
;
650 DEBUG (( EFI_D_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
651 DEBUG (( EFI_D_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
652 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
653 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
654 DEBUG (( EFI_D_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
657 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
659 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) {
660 DEBUG ((EFI_D_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
663 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
664 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
665 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
666 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
671 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
673 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
674 DEBUG ((EFI_D_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
676 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
678 SaveAndSetDebugTimerInterrupt (FALSE
);
680 // Restore IA32 IDT table
682 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
684 SmmS3ResumeState
->ReturnCs
,
685 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
686 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
687 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
688 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
693 // Can not resume PEI Phase
695 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
700 Initialize SMM S3 resume state structure used during S3 Resume.
702 @param[in] Cr3 The base address of the page tables to use in SMM.
706 InitSmmS3ResumeState (
711 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
712 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
713 EFI_PHYSICAL_ADDRESS Address
;
716 if (!mAcpiS3Enable
) {
720 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
721 if (GuidHob
!= NULL
) {
722 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*) GET_GUID_HOB_DATA (GuidHob
);
724 DEBUG ((EFI_D_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
725 DEBUG ((EFI_D_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
727 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
728 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
730 mSmmS3ResumeState
= SmmS3ResumeState
;
731 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
733 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
735 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
736 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
737 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
738 SmmS3ResumeState
->SmmS3StackSize
= 0;
741 SmmS3ResumeState
->SmmS3Cr0
= gSmmCr0
;
742 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
743 SmmS3ResumeState
->SmmS3Cr4
= gSmmCr4
;
745 if (sizeof (UINTN
) == sizeof (UINT64
)) {
746 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
748 if (sizeof (UINTN
) == sizeof (UINT32
)) {
749 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
754 // Patch SmmS3ResumeState->SmmS3Cr3
759 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
760 // protected mode on S3 path
762 Address
= BASE_4GB
- 1;
763 Status
= gBS
->AllocatePages (
766 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
769 ASSERT_EFI_ERROR (Status
);
770 mApHltLoopCode
= (UINT8
*) (UINTN
) Address
;
774 Copy register table from ACPI NVS memory into SMRAM.
776 @param[in] DestinationRegisterTableList Points to destination register table.
777 @param[in] SourceRegisterTableList Points to source register table.
778 @param[in] NumberOfCpus Number of CPUs.
783 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
784 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
785 IN UINT32 NumberOfCpus
790 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
792 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
793 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
794 if (DestinationRegisterTableList
[Index
].AllocatedSize
!= 0) {
795 RegisterTableEntry
= AllocateCopyPool (
796 DestinationRegisterTableList
[Index
].AllocatedSize
,
797 (VOID
*)(UINTN
)SourceRegisterTableList
[Index
].RegisterTableEntry
799 ASSERT (RegisterTableEntry
!= NULL
);
800 DestinationRegisterTableList
[Index
].RegisterTableEntry
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)RegisterTableEntry
;
802 // Go though all MSRs in register table to initialize MSR spin lock
804 for (Index1
= 0; Index1
< DestinationRegisterTableList
[Index
].TableLength
; Index1
++, RegisterTableEntry
++) {
805 if ((RegisterTableEntry
->RegisterType
== Msr
) && (RegisterTableEntry
->ValidBitLength
< 64)) {
807 // Initialize MSR spin lock only for those MSRs need bit field writing
809 InitMsrSpinLockByIndex (RegisterTableEntry
->Index
);
825 ACPI_CPU_DATA
*AcpiCpuData
;
826 IA32_DESCRIPTOR
*Gdtr
;
827 IA32_DESCRIPTOR
*Idtr
;
829 if (!mAcpiS3Enable
) {
834 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
836 mAcpiCpuData
.NumberOfCpus
= 0;
839 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
841 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
842 if (AcpiCpuData
== 0) {
847 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
849 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
851 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
852 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
854 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
856 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
857 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
859 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
861 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
862 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
864 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
866 mAcpiCpuData
.PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
867 ASSERT (mAcpiCpuData
.PreSmmInitRegisterTable
!= 0);
870 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
,
871 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
,
872 mAcpiCpuData
.NumberOfCpus
875 mAcpiCpuData
.RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
876 ASSERT (mAcpiCpuData
.RegisterTable
!= 0);
879 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
,
880 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
,
881 mAcpiCpuData
.NumberOfCpus
885 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
887 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
888 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
890 mGdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
891 ASSERT (mGdtForAp
!= NULL
);
892 mIdtForAp
= (VOID
*) ((UINTN
)mGdtForAp
+ (Gdtr
->Limit
+ 1));
893 mMachineCheckHandlerForAp
= (VOID
*) ((UINTN
)mIdtForAp
+ (Idtr
->Limit
+ 1));
895 CopyMem (mGdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
896 CopyMem (mIdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
897 CopyMem (mMachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
901 Get ACPI S3 enable flag.
905 GetAcpiS3EnableFlag (
909 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);