2 Code for Processor S3 restoration
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
22 IA32_DESCRIPTOR GdtrProfile
;
23 IA32_DESCRIPTOR IdtrProfile
;
26 } MP_CPU_EXCHANGE_INFO
;
29 UINT8
*RendezvousFunnelAddress
;
30 UINTN PModeEntryOffset
;
33 UINTN LModeEntryOffset
;
35 } MP_ASSEMBLY_ADDRESS_MAP
;
38 // Spin lock used to serialize MemoryMapped operation
40 SPIN_LOCK
*mMemoryMappedLock
= NULL
;
43 Get starting address and size of the rendezvous entry for APs.
44 Information for fixing a jump instruction in the code is also returned.
46 @param AddressMap Output buffer for address map information.
51 MP_ASSEMBLY_ADDRESS_MAP
*AddressMap
54 #define LEGACY_REGION_SIZE (2 * 0x1000)
55 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
57 ACPI_CPU_DATA mAcpiCpuData
;
58 UINT32 mNumberToFinish
;
59 MP_CPU_EXCHANGE_INFO
*mExchangeInfo
;
60 BOOLEAN mRestoreSmmConfigurationInS3
= FALSE
;
61 VOID
*mGdtForAp
= NULL
;
62 VOID
*mIdtForAp
= NULL
;
63 VOID
*mMachineCheckHandlerForAp
= NULL
;
64 MP_MSR_LOCK
*mMsrSpinLocks
= NULL
;
65 UINTN mMsrSpinLockCount
;
71 BOOLEAN mSmmS3Flag
= FALSE
;
74 // Pointer to structure used during S3 Resume
76 SMM_S3_RESUME_STATE
*mSmmS3ResumeState
= NULL
;
78 BOOLEAN mAcpiS3Enable
= TRUE
;
80 UINT8
*mApHltLoopCode
= NULL
;
81 UINT8 mApHltLoopCodeTemplate
[] = {
88 Get MSR spin lock by MSR index.
90 @param MsrIndex MSR index value.
92 @return Pointer to MSR spin lock.
96 GetMsrSpinLockByIndex (
101 for (Index
= 0; Index
< mMsrCount
; Index
++) {
102 if (MsrIndex
== mMsrSpinLocks
[Index
].MsrIndex
) {
103 return mMsrSpinLocks
[Index
].SpinLock
;
110 Initialize MSR spin lock by MSR index.
112 @param MsrIndex MSR index value.
116 InitMsrSpinLockByIndex (
120 UINTN MsrSpinLockCount
;
121 UINTN NewMsrSpinLockCount
;
125 if (mMsrSpinLocks
== NULL
) {
126 MsrSpinLockCount
= mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
;
127 mMsrSpinLocks
= (MP_MSR_LOCK
*) AllocatePool (sizeof (MP_MSR_LOCK
) * MsrSpinLockCount
);
128 ASSERT (mMsrSpinLocks
!= NULL
);
129 for (Index
= 0; Index
< MsrSpinLockCount
; Index
++) {
130 mMsrSpinLocks
[Index
].SpinLock
=
131 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+ Index
* mSemaphoreSize
);
132 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
134 mMsrSpinLockCount
= MsrSpinLockCount
;
135 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
= 0;
137 if (GetMsrSpinLockByIndex (MsrIndex
) == NULL
) {
139 // Initialize spin lock for MSR programming
141 mMsrSpinLocks
[mMsrCount
].MsrIndex
= MsrIndex
;
142 InitializeSpinLock (mMsrSpinLocks
[mMsrCount
].SpinLock
);
144 if (mMsrCount
== mMsrSpinLockCount
) {
146 // If MSR spin lock buffer is full, enlarge it
148 AddedSize
= SIZE_4KB
;
149 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
=
150 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize
));
151 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.Msr
!= NULL
);
152 NewMsrSpinLockCount
= mMsrSpinLockCount
+ AddedSize
/ mSemaphoreSize
;
153 mMsrSpinLocks
= ReallocatePool (
154 sizeof (MP_MSR_LOCK
) * mMsrSpinLockCount
,
155 sizeof (MP_MSR_LOCK
) * NewMsrSpinLockCount
,
158 ASSERT (mMsrSpinLocks
!= NULL
);
159 mMsrSpinLockCount
= NewMsrSpinLockCount
;
160 for (Index
= mMsrCount
; Index
< mMsrSpinLockCount
; Index
++) {
161 mMsrSpinLocks
[Index
].SpinLock
=
162 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreMsr
.Msr
+
163 (Index
- mMsrCount
) * mSemaphoreSize
);
164 mMsrSpinLocks
[Index
].MsrIndex
= (UINT32
)-1;
171 Sync up the MTRR values for all processors.
173 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
178 EFI_PHYSICAL_ADDRESS MtrrTable
184 Sync up the MTRR values for all processors.
193 MTRR_SETTINGS
*MtrrSettings
;
195 MtrrSettings
= (MTRR_SETTINGS
*) (UINTN
) MtrrTable
;
196 MtrrSetAllMtrrs (MtrrSettings
);
200 Programs registers for the calling processor.
202 This function programs registers for the calling processor.
204 @param RegisterTable Pointer to register table of the running processor.
208 SetProcessorRegister (
209 IN CPU_REGISTER_TABLE
*RegisterTable
212 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
215 SPIN_LOCK
*MsrSpinLock
;
218 // Traverse Register Table of this logical processor
220 RegisterTableEntry
= (CPU_REGISTER_TABLE_ENTRY
*) (UINTN
) RegisterTable
->RegisterTableEntry
;
221 for (Index
= 0; Index
< RegisterTable
->TableLength
; Index
++, RegisterTableEntry
++) {
223 // Check the type of specified register
225 switch (RegisterTableEntry
->RegisterType
) {
227 // The specified register is Control Register
229 case ControlRegister
:
230 switch (RegisterTableEntry
->Index
) {
232 Value
= AsmReadCr0 ();
233 Value
= (UINTN
) BitFieldWrite64 (
235 RegisterTableEntry
->ValidBitStart
,
236 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
237 (UINTN
) RegisterTableEntry
->Value
242 Value
= AsmReadCr2 ();
243 Value
= (UINTN
) BitFieldWrite64 (
245 RegisterTableEntry
->ValidBitStart
,
246 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
247 (UINTN
) RegisterTableEntry
->Value
252 Value
= AsmReadCr3 ();
253 Value
= (UINTN
) BitFieldWrite64 (
255 RegisterTableEntry
->ValidBitStart
,
256 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
257 (UINTN
) RegisterTableEntry
->Value
262 Value
= AsmReadCr4 ();
263 Value
= (UINTN
) BitFieldWrite64 (
265 RegisterTableEntry
->ValidBitStart
,
266 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
267 (UINTN
) RegisterTableEntry
->Value
276 // The specified register is Model Specific Register
280 // If this function is called to restore register setting after INIT signal,
281 // there is no need to restore MSRs in register table.
283 if (RegisterTableEntry
->ValidBitLength
>= 64) {
285 // If length is not less than 64 bits, then directly write without reading
288 RegisterTableEntry
->Index
,
289 RegisterTableEntry
->Value
293 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
294 // to make sure MSR read/write operation is atomic.
296 MsrSpinLock
= GetMsrSpinLockByIndex (RegisterTableEntry
->Index
);
297 AcquireSpinLock (MsrSpinLock
);
299 // Set the bit section according to bit start and length
301 AsmMsrBitFieldWrite64 (
302 RegisterTableEntry
->Index
,
303 RegisterTableEntry
->ValidBitStart
,
304 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
305 RegisterTableEntry
->Value
307 ReleaseSpinLock (MsrSpinLock
);
311 // MemoryMapped operations
314 AcquireSpinLock (mMemoryMappedLock
);
315 MmioBitFieldWrite32 (
316 RegisterTableEntry
->Index
,
317 RegisterTableEntry
->ValidBitStart
,
318 RegisterTableEntry
->ValidBitStart
+ RegisterTableEntry
->ValidBitLength
- 1,
319 (UINT32
)RegisterTableEntry
->Value
321 ReleaseSpinLock (mMemoryMappedLock
);
324 // Enable or disable cache
328 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
330 if (RegisterTableEntry
->Value
== 0) {
344 AP initialization before SMBASE relocation in the S3 boot path.
347 EarlyMPRendezvousProcedure (
351 CPU_REGISTER_TABLE
*RegisterTableList
;
355 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
358 // Find processor number for this CPU.
360 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
;
361 InitApicId
= GetInitialApicId ();
362 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
363 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
364 SetProcessorRegister (&RegisterTableList
[Index
]);
370 // Count down the number with lock mechanism.
372 InterlockedDecrement (&mNumberToFinish
);
376 AP initialization after SMBASE relocation in the S3 boot path.
379 MPRendezvousProcedure (
383 CPU_REGISTER_TABLE
*RegisterTableList
;
389 ProgramVirtualWireMode ();
390 DisableLvtInterrupts ();
392 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
;
393 InitApicId
= GetInitialApicId ();
394 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
395 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
396 SetProcessorRegister (&RegisterTableList
[Index
]);
402 // Count down the number with lock mechanism.
404 InterlockedDecrement (&mNumberToFinish
);
407 // Place AP into the safe code
409 TopOfStack
= (UINT32
) (UINTN
) Stack
+ sizeof (Stack
);
410 TopOfStack
&= ~(UINT32
) (CPU_STACK_ALIGNMENT
- 1);
411 CopyMem ((VOID
*) (UINTN
) mApHltLoopCode
, mApHltLoopCodeTemplate
, sizeof (mApHltLoopCodeTemplate
));
412 TransferApToSafeState ((UINT32
) (UINTN
) mApHltLoopCode
, TopOfStack
);
416 Prepares startup vector for APs.
418 This function prepares startup vector for APs.
420 @param WorkingBuffer The address of the work buffer.
423 PrepareApStartupVector (
424 EFI_PHYSICAL_ADDRESS WorkingBuffer
427 EFI_PHYSICAL_ADDRESS StartupVector
;
428 MP_ASSEMBLY_ADDRESS_MAP AddressMap
;
431 // Get the address map of startup code for AP,
432 // including code size, and offset of long jump instructions to redirect.
434 ZeroMem (&AddressMap
, sizeof (AddressMap
));
435 AsmGetAddressMap (&AddressMap
);
437 StartupVector
= WorkingBuffer
;
440 // Copy AP startup code to startup vector, and then redirect the long jump
441 // instructions for mode switching.
443 CopyMem ((VOID
*) (UINTN
) StartupVector
, AddressMap
.RendezvousFunnelAddress
, AddressMap
.Size
);
444 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.FlatJumpOffset
+ 3) = (UINT32
) (StartupVector
+ AddressMap
.PModeEntryOffset
);
445 if (AddressMap
.LongJumpOffset
!= 0) {
446 *(UINT32
*) (UINTN
) (StartupVector
+ AddressMap
.LongJumpOffset
+ 2) = (UINT32
) (StartupVector
+ AddressMap
.LModeEntryOffset
);
450 // Get the start address of exchange data between BSP and AP.
452 mExchangeInfo
= (MP_CPU_EXCHANGE_INFO
*) (UINTN
) (StartupVector
+ AddressMap
.Size
);
453 ZeroMem ((VOID
*) mExchangeInfo
, sizeof (MP_CPU_EXCHANGE_INFO
));
455 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->GdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
456 CopyMem ((VOID
*) (UINTN
) &mExchangeInfo
->IdtrProfile
, (VOID
*) (UINTN
) mAcpiCpuData
.IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
459 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
461 CopyMem ((VOID
*) mExchangeInfo
->GdtrProfile
.Base
, mGdtForAp
, mExchangeInfo
->GdtrProfile
.Limit
+ 1);
462 CopyMem ((VOID
*) mExchangeInfo
->IdtrProfile
.Base
, mIdtForAp
, mExchangeInfo
->IdtrProfile
.Limit
+ 1);
463 CopyMem ((VOID
*)(UINTN
) mAcpiCpuData
.ApMachineCheckHandlerBase
, mMachineCheckHandlerForAp
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
465 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
466 mExchangeInfo
->StackSize
= mAcpiCpuData
.StackSize
;
467 mExchangeInfo
->BufferStart
= (UINT32
) StartupVector
;
468 mExchangeInfo
->Cr3
= (UINT32
) (AsmReadCr3 ());
472 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
474 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
475 and restores MTRRs for both BSP and APs.
483 CPU_REGISTER_TABLE
*RegisterTableList
;
487 LoadMtrrData (mAcpiCpuData
.MtrrTable
);
490 // Find processor number for this CPU.
492 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.PreSmmInitRegisterTable
;
493 InitApicId
= GetInitialApicId ();
494 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
495 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
496 SetProcessorRegister (&RegisterTableList
[Index
]);
501 ProgramVirtualWireMode ();
503 PrepareApStartupVector (mAcpiCpuData
.StartupVector
);
505 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
506 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) EarlyMPRendezvousProcedure
;
509 // Send INIT IPI - SIPI to all APs
511 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
513 while (mNumberToFinish
> 0) {
519 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
521 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
522 data saved by normal boot path for both BSP and APs.
530 CPU_REGISTER_TABLE
*RegisterTableList
;
534 RegisterTableList
= (CPU_REGISTER_TABLE
*) (UINTN
) mAcpiCpuData
.RegisterTable
;
535 InitApicId
= GetInitialApicId ();
536 for (Index
= 0; Index
< mAcpiCpuData
.NumberOfCpus
; Index
++) {
537 if (RegisterTableList
[Index
].InitialApicId
== InitApicId
) {
538 SetProcessorRegister (&RegisterTableList
[Index
]);
543 mNumberToFinish
= mAcpiCpuData
.NumberOfCpus
- 1;
545 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
546 // Re-initialize StackAddress to original beginning address.
548 mExchangeInfo
->StackStart
= (VOID
*) (UINTN
) mAcpiCpuData
.StackAddress
;
549 mExchangeInfo
->ApFunction
= (VOID
*) (UINTN
) MPRendezvousProcedure
;
552 // Send INIT IPI - SIPI to all APs
554 SendInitSipiSipiAllExcludingSelf ((UINT32
)mAcpiCpuData
.StartupVector
);
556 while (mNumberToFinish
> 0) {
562 Restore SMM Configuration in S3 boot path.
566 RestoreSmmConfigurationInS3 (
570 if (!mAcpiS3Enable
) {
575 // Restore SMM Configuration in S3 boot path.
577 if (mRestoreSmmConfigurationInS3
) {
579 // Need make sure gSmst is correct because below function may use them.
581 gSmst
->SmmStartupThisAp
= gSmmCpuPrivate
->SmmCoreEntryContext
.SmmStartupThisAp
;
582 gSmst
->CurrentlyExecutingCpu
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
583 gSmst
->NumberOfCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
584 gSmst
->CpuSaveStateSize
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveStateSize
;
585 gSmst
->CpuSaveState
= gSmmCpuPrivate
->SmmCoreEntryContext
.CpuSaveState
;
588 // Configure SMM Code Access Check feature if available.
590 ConfigSmmCodeAccessCheck ();
592 SmmCpuFeaturesCompleteSmmReadyToLock ();
594 mRestoreSmmConfigurationInS3
= FALSE
;
599 Perform SMM initialization for all processors in the S3 boot path.
601 For a native platform, MP initialization in the S3 boot path is also performed in this function.
609 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
610 IA32_DESCRIPTOR Ia32Idtr
;
611 IA32_DESCRIPTOR X64Idtr
;
612 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable
[EXCEPTION_VECTOR_NUMBER
];
615 DEBUG ((EFI_D_INFO
, "SmmRestoreCpu()\n"));
619 InitializeSpinLock (mMemoryMappedLock
);
622 // See if there is enough context to resume PEI Phase
624 if (mSmmS3ResumeState
== NULL
) {
625 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
629 SmmS3ResumeState
= mSmmS3ResumeState
;
630 ASSERT (SmmS3ResumeState
!= NULL
);
632 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
634 // Save the IA32 IDT Descriptor
636 AsmReadIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
639 // Setup X64 IDT table
641 ZeroMem (IdtEntryTable
, sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32);
642 X64Idtr
.Base
= (UINTN
) IdtEntryTable
;
643 X64Idtr
.Limit
= (UINT16
) (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32 - 1);
644 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &X64Idtr
);
647 // Setup the default exception handler
649 Status
= InitializeCpuExceptionHandlers (NULL
);
650 ASSERT_EFI_ERROR (Status
);
653 // Initialize Debug Agent to support source level debug
655 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64
, (VOID
*)&Ia32Idtr
, NULL
);
659 // Skip initialization if mAcpiCpuData is not valid
661 if (mAcpiCpuData
.NumberOfCpus
> 0) {
663 // First time microcode load and restore MTRRs
665 EarlyInitializeCpu ();
669 // Restore SMBASE for BSP and all APs
674 // Skip initialization if mAcpiCpuData is not valid
676 if (mAcpiCpuData
.NumberOfCpus
> 0) {
678 // Restore MSRs for BSP and all APs
684 // Set a flag to restore SMM configuration in S3 path.
686 mRestoreSmmConfigurationInS3
= TRUE
;
688 DEBUG (( EFI_D_INFO
, "SMM S3 Return CS = %x\n", SmmS3ResumeState
->ReturnCs
));
689 DEBUG (( EFI_D_INFO
, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState
->ReturnEntryPoint
));
690 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState
->ReturnContext1
));
691 DEBUG (( EFI_D_INFO
, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState
->ReturnContext2
));
692 DEBUG (( EFI_D_INFO
, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState
->ReturnStackPointer
));
695 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
697 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_32
) {
698 DEBUG ((EFI_D_INFO
, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
701 (SWITCH_STACK_ENTRY_POINT
)(UINTN
)SmmS3ResumeState
->ReturnEntryPoint
,
702 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext1
,
703 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnContext2
,
704 (VOID
*)(UINTN
)SmmS3ResumeState
->ReturnStackPointer
709 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
711 if (SmmS3ResumeState
->Signature
== SMM_S3_RESUME_SMM_64
) {
712 DEBUG ((EFI_D_INFO
, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
714 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
716 SaveAndSetDebugTimerInterrupt (FALSE
);
718 // Restore IA32 IDT table
720 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &Ia32Idtr
);
722 SmmS3ResumeState
->ReturnCs
,
723 (UINT32
)SmmS3ResumeState
->ReturnEntryPoint
,
724 (UINT32
)SmmS3ResumeState
->ReturnContext1
,
725 (UINT32
)SmmS3ResumeState
->ReturnContext2
,
726 (UINT32
)SmmS3ResumeState
->ReturnStackPointer
731 // Can not resume PEI Phase
733 DEBUG ((EFI_D_ERROR
, "No context to return to PEI Phase\n"));
738 Initialize SMM S3 resume state structure used during S3 Resume.
740 @param[in] Cr3 The base address of the page tables to use in SMM.
744 InitSmmS3ResumeState (
749 EFI_SMRAM_DESCRIPTOR
*SmramDescriptor
;
750 SMM_S3_RESUME_STATE
*SmmS3ResumeState
;
751 EFI_PHYSICAL_ADDRESS Address
;
754 if (!mAcpiS3Enable
) {
758 GuidHob
= GetFirstGuidHob (&gEfiAcpiVariableGuid
);
759 if (GuidHob
!= NULL
) {
760 SmramDescriptor
= (EFI_SMRAM_DESCRIPTOR
*) GET_GUID_HOB_DATA (GuidHob
);
762 DEBUG ((EFI_D_INFO
, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor
));
763 DEBUG ((EFI_D_INFO
, "SMM S3 Structure = %x\n", SmramDescriptor
->CpuStart
));
765 SmmS3ResumeState
= (SMM_S3_RESUME_STATE
*)(UINTN
)SmramDescriptor
->CpuStart
;
766 ZeroMem (SmmS3ResumeState
, sizeof (SMM_S3_RESUME_STATE
));
768 mSmmS3ResumeState
= SmmS3ResumeState
;
769 SmmS3ResumeState
->Smst
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)gSmst
;
771 SmmS3ResumeState
->SmmS3ResumeEntryPoint
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)SmmRestoreCpu
;
773 SmmS3ResumeState
->SmmS3StackSize
= SIZE_32KB
;
774 SmmS3ResumeState
->SmmS3StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN
)SmmS3ResumeState
->SmmS3StackSize
));
775 if (SmmS3ResumeState
->SmmS3StackBase
== 0) {
776 SmmS3ResumeState
->SmmS3StackSize
= 0;
779 SmmS3ResumeState
->SmmS3Cr0
= gSmmCr0
;
780 SmmS3ResumeState
->SmmS3Cr3
= Cr3
;
781 SmmS3ResumeState
->SmmS3Cr4
= gSmmCr4
;
783 if (sizeof (UINTN
) == sizeof (UINT64
)) {
784 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_64
;
786 if (sizeof (UINTN
) == sizeof (UINT32
)) {
787 SmmS3ResumeState
->Signature
= SMM_S3_RESUME_SMM_32
;
792 // Patch SmmS3ResumeState->SmmS3Cr3
797 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
798 // protected mode on S3 path
800 Address
= BASE_4GB
- 1;
801 Status
= gBS
->AllocatePages (
804 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate
)),
807 ASSERT_EFI_ERROR (Status
);
808 mApHltLoopCode
= (UINT8
*) (UINTN
) Address
;
812 Copy register table from ACPI NVS memory into SMRAM.
814 @param[in] DestinationRegisterTableList Points to destination register table.
815 @param[in] SourceRegisterTableList Points to source register table.
816 @param[in] NumberOfCpus Number of CPUs.
821 IN CPU_REGISTER_TABLE
*DestinationRegisterTableList
,
822 IN CPU_REGISTER_TABLE
*SourceRegisterTableList
,
823 IN UINT32 NumberOfCpus
828 CPU_REGISTER_TABLE_ENTRY
*RegisterTableEntry
;
830 CopyMem (DestinationRegisterTableList
, SourceRegisterTableList
, NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
831 for (Index
= 0; Index
< NumberOfCpus
; Index
++) {
832 DestinationRegisterTableList
[Index
].RegisterTableEntry
= AllocatePool (DestinationRegisterTableList
[Index
].AllocatedSize
);
833 ASSERT (DestinationRegisterTableList
[Index
].RegisterTableEntry
!= NULL
);
834 CopyMem (DestinationRegisterTableList
[Index
].RegisterTableEntry
, SourceRegisterTableList
[Index
].RegisterTableEntry
, DestinationRegisterTableList
[Index
].AllocatedSize
);
836 // Go though all MSRs in register table to initialize MSR spin lock
838 RegisterTableEntry
= DestinationRegisterTableList
[Index
].RegisterTableEntry
;
839 for (Index1
= 0; Index1
< DestinationRegisterTableList
[Index
].TableLength
; Index1
++, RegisterTableEntry
++) {
840 if ((RegisterTableEntry
->RegisterType
== Msr
) && (RegisterTableEntry
->ValidBitLength
< 64)) {
842 // Initialize MSR spin lock only for those MSRs need bit field writing
844 InitMsrSpinLockByIndex (RegisterTableEntry
->Index
);
859 ACPI_CPU_DATA
*AcpiCpuData
;
860 IA32_DESCRIPTOR
*Gdtr
;
861 IA32_DESCRIPTOR
*Idtr
;
863 if (!mAcpiS3Enable
) {
868 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
870 mAcpiCpuData
.NumberOfCpus
= 0;
873 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
875 AcpiCpuData
= (ACPI_CPU_DATA
*)(UINTN
)PcdGet64 (PcdCpuS3DataAddress
);
876 if (AcpiCpuData
== 0) {
881 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
883 CopyMem (&mAcpiCpuData
, AcpiCpuData
, sizeof (mAcpiCpuData
));
885 mAcpiCpuData
.MtrrTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (MTRR_SETTINGS
));
886 ASSERT (mAcpiCpuData
.MtrrTable
!= 0);
888 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.MtrrTable
, (VOID
*)(UINTN
)AcpiCpuData
->MtrrTable
, sizeof (MTRR_SETTINGS
));
890 mAcpiCpuData
.GdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
891 ASSERT (mAcpiCpuData
.GdtrProfile
!= 0);
893 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.GdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->GdtrProfile
, sizeof (IA32_DESCRIPTOR
));
895 mAcpiCpuData
.IdtrProfile
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (sizeof (IA32_DESCRIPTOR
));
896 ASSERT (mAcpiCpuData
.IdtrProfile
!= 0);
898 CopyMem ((VOID
*)(UINTN
)mAcpiCpuData
.IdtrProfile
, (VOID
*)(UINTN
)AcpiCpuData
->IdtrProfile
, sizeof (IA32_DESCRIPTOR
));
900 mAcpiCpuData
.PreSmmInitRegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
901 ASSERT (mAcpiCpuData
.PreSmmInitRegisterTable
!= 0);
904 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.PreSmmInitRegisterTable
,
905 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->PreSmmInitRegisterTable
,
906 mAcpiCpuData
.NumberOfCpus
909 mAcpiCpuData
.RegisterTable
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)AllocatePool (mAcpiCpuData
.NumberOfCpus
* sizeof (CPU_REGISTER_TABLE
));
910 ASSERT (mAcpiCpuData
.RegisterTable
!= 0);
913 (CPU_REGISTER_TABLE
*)(UINTN
)mAcpiCpuData
.RegisterTable
,
914 (CPU_REGISTER_TABLE
*)(UINTN
)AcpiCpuData
->RegisterTable
,
915 mAcpiCpuData
.NumberOfCpus
919 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
921 Gdtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.GdtrProfile
;
922 Idtr
= (IA32_DESCRIPTOR
*)(UINTN
)mAcpiCpuData
.IdtrProfile
;
924 mGdtForAp
= AllocatePool ((Gdtr
->Limit
+ 1) + (Idtr
->Limit
+ 1) + mAcpiCpuData
.ApMachineCheckHandlerSize
);
925 ASSERT (mGdtForAp
!= NULL
);
926 mIdtForAp
= (VOID
*) ((UINTN
)mGdtForAp
+ (Gdtr
->Limit
+ 1));
927 mMachineCheckHandlerForAp
= (VOID
*) ((UINTN
)mIdtForAp
+ (Idtr
->Limit
+ 1));
929 CopyMem (mGdtForAp
, (VOID
*)Gdtr
->Base
, Gdtr
->Limit
+ 1);
930 CopyMem (mIdtForAp
, (VOID
*)Idtr
->Base
, Idtr
->Limit
+ 1);
931 CopyMem (mMachineCheckHandlerForAp
, (VOID
*)(UINTN
)mAcpiCpuData
.ApMachineCheckHandlerBase
, mAcpiCpuData
.ApMachineCheckHandlerSize
);
935 Get ACPI S3 enable flag.
939 GetAcpiS3EnableFlag (
943 mAcpiS3Enable
= PcdGetBool (PcdAcpiS3Enable
);