/** @file\r
Code for Processor S3 restoration\r
\r
-Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
-\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
\r
#pragma pack(1)\r
typedef struct {\r
- UINTN Lock;\r
- VOID *StackStart;\r
- UINTN StackSize;\r
- VOID *ApFunction;\r
- IA32_DESCRIPTOR GdtrProfile;\r
- IA32_DESCRIPTOR IdtrProfile;\r
- UINT32 BufferStart;\r
- UINT32 Cr3;\r
- UINTN InitializeFloatingPointUnitsAddress;\r
+ UINTN Lock;\r
+ VOID *StackStart;\r
+ UINTN StackSize;\r
+ VOID *ApFunction;\r
+ IA32_DESCRIPTOR GdtrProfile;\r
+ IA32_DESCRIPTOR IdtrProfile;\r
+ UINT32 BufferStart;\r
+ UINT32 Cr3;\r
+ UINTN InitializeFloatingPointUnitsAddress;\r
} MP_CPU_EXCHANGE_INFO;\r
#pragma pack()\r
\r
typedef struct {\r
- UINT8 *RendezvousFunnelAddress;\r
- UINTN PModeEntryOffset;\r
- UINTN FlatJumpOffset;\r
- UINTN Size;\r
- UINTN LModeEntryOffset;\r
- UINTN LongJumpOffset;\r
+ UINT8 *RendezvousFunnelAddress;\r
+ UINTN PModeEntryOffset;\r
+ UINTN FlatJumpOffset;\r
+ UINTN Size;\r
+ UINTN LModeEntryOffset;\r
+ UINTN LongJumpOffset;\r
} MP_ASSEMBLY_ADDRESS_MAP;\r
\r
//\r
-// Spin lock used to serialize MemoryMapped operation\r
+// Flags used when program the register.\r
//\r
-SPIN_LOCK *mMemoryMappedLock = NULL;\r
+typedef struct {\r
+ volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
+ volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
+ // core level semaphore.\r
+ volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
+ // package level semaphore.\r
+} PROGRAM_CPU_REGISTER_FLAGS;\r
\r
//\r
// Signal that SMM BASE relocation is complete.\r
//\r
-volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
+volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
\r
/**\r
Get starting address and size of the rendezvous entry for APs.\r
VOID *\r
EFIAPI\r
AsmGetAddressMap (\r
- MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
+ MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
);\r
\r
-#define LEGACY_REGION_SIZE (2 * 0x1000)\r
-#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
+#define LEGACY_REGION_SIZE (2 * 0x1000)\r
+#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
\r
-ACPI_CPU_DATA mAcpiCpuData;\r
-volatile UINT32 mNumberToFinish;\r
-MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
-BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
-MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
-UINTN mMsrSpinLockCount;\r
-UINTN mMsrCount = 0;\r
+PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
+ACPI_CPU_DATA mAcpiCpuData;\r
+volatile UINT32 mNumberToFinish;\r
+MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
+BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
\r
//\r
// S3 boot flag\r
//\r
-BOOLEAN mSmmS3Flag = FALSE;\r
+BOOLEAN mSmmS3Flag = FALSE;\r
\r
//\r
// Pointer to structure used during S3 Resume\r
//\r
-SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
+SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
\r
-BOOLEAN mAcpiS3Enable = TRUE;\r
+BOOLEAN mAcpiS3Enable = TRUE;\r
\r
-UINT8 *mApHltLoopCode = NULL;\r
-UINT8 mApHltLoopCodeTemplate[] = {\r
- 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
- 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
- 0xFA, // cli\r
- 0xF4, // hlt\r
- 0xEB, 0xFC // jmp $-2\r
- };\r
+UINT8 *mApHltLoopCode = NULL;\r
+UINT8 mApHltLoopCodeTemplate[] = {\r
+ 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
+ 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
+ 0xFA, // cli\r
+ 0xF4, // hlt\r
+ 0xEB, 0xFC // jmp $-2\r
+};\r
\r
/**\r
- Get MSR spin lock by MSR index.\r
-\r
- @param MsrIndex MSR index value.\r
-\r
- @return Pointer to MSR spin lock.\r
+ Sync up the MTRR values for all processors.\r
\r
+ @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
**/\r
-SPIN_LOCK *\r
-GetMsrSpinLockByIndex (\r
- IN UINT32 MsrIndex\r
+VOID\r
+EFIAPI\r
+LoadMtrrData (\r
+ EFI_PHYSICAL_ADDRESS MtrrTable\r
)\r
+\r
+/*++\r
+\r
+Routine Description:\r
+\r
+ Sync up the MTRR values for all processors.\r
+\r
+Arguments:\r
+\r
+Returns:\r
+ None\r
+\r
+--*/\r
{\r
- UINTN Index;\r
- for (Index = 0; Index < mMsrCount; Index++) {\r
- if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
- return mMsrSpinLocks[Index].SpinLock;\r
- }\r
- }\r
- return NULL;\r
+ MTRR_SETTINGS *MtrrSettings;\r
+\r
+ MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;\r
+ MtrrSetAllMtrrs (MtrrSettings);\r
}\r
\r
/**\r
- Initialize MSR spin lock by MSR index.\r
+ Increment semaphore by 1.\r
\r
- @param MsrIndex MSR index value.\r
+ @param Sem IN: 32-bit unsigned integer\r
\r
**/\r
VOID\r
-InitMsrSpinLockByIndex (\r
- IN UINT32 MsrIndex\r
+S3ReleaseSemaphore (\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
- UINTN MsrSpinLockCount;\r
- UINTN NewMsrSpinLockCount;\r
- UINTN Index;\r
- UINTN AddedSize;\r
-\r
- if (mMsrSpinLocks == NULL) {\r
- MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
- mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
- ASSERT (mMsrSpinLocks != NULL);\r
- for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
- mMsrSpinLocks[Index].SpinLock =\r
- (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
- mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
- }\r
- mMsrSpinLockCount = MsrSpinLockCount;\r
- mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
- }\r
- if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
- //\r
- // Initialize spin lock for MSR programming\r
- //\r
- mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
- InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
- mMsrCount ++;\r
- if (mMsrCount == mMsrSpinLockCount) {\r
- //\r
- // If MSR spin lock buffer is full, enlarge it\r
- //\r
- AddedSize = SIZE_4KB;\r
- mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
- AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
- ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
- NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
- mMsrSpinLocks = ReallocatePool (\r
- sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
- sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
- mMsrSpinLocks\r
- );\r
- ASSERT (mMsrSpinLocks != NULL);\r
- mMsrSpinLockCount = NewMsrSpinLockCount;\r
- for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
- mMsrSpinLocks[Index].SpinLock =\r
- (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
- (Index - mMsrCount) * mSemaphoreSize);\r
- mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
- }\r
- }\r
- }\r
+ InterlockedIncrement (Sem);\r
}\r
\r
/**\r
- Sync up the MTRR values for all processors.\r
+ Decrement the semaphore by 1 if it is not zero.\r
+\r
+ Performs an atomic decrement operation for semaphore.\r
+ The compare exchange operation must be performed using\r
+ MP safe mechanisms.\r
+\r
+ @param Sem IN: 32-bit unsigned integer\r
\r
- @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
**/\r
VOID\r
-EFIAPI\r
-LoadMtrrData (\r
- EFI_PHYSICAL_ADDRESS MtrrTable\r
+S3WaitForSemaphore (\r
+ IN OUT volatile UINT32 *Sem\r
)\r
-/*++\r
+{\r
+ UINT32 Value;\r
+\r
+ do {\r
+ Value = *Sem;\r
+ } while (Value == 0 ||\r
+ InterlockedCompareExchange32 (\r
+ Sem,\r
+ Value,\r
+ Value - 1\r
+ ) != Value);\r
+}\r
\r
-Routine Description:\r
+/**\r
+ Read / write CR value.\r
\r
- Sync up the MTRR values for all processors.\r
+ @param[in] CrIndex The CR index which need to read/write.\r
+ @param[in] Read Read or write. TRUE is read.\r
+ @param[in,out] CrValue CR value.\r
\r
-Arguments:\r
+ @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.\r
+**/\r
+UINTN\r
+ReadWriteCr (\r
+ IN UINT32 CrIndex,\r
+ IN BOOLEAN Read,\r
+ IN OUT UINTN *CrValue\r
+ )\r
+{\r
+ switch (CrIndex) {\r
+ case 0:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr0 ();\r
+ } else {\r
+ AsmWriteCr0 (*CrValue);\r
+ }\r
\r
-Returns:\r
- None\r
+ break;\r
+ case 2:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr2 ();\r
+ } else {\r
+ AsmWriteCr2 (*CrValue);\r
+ }\r
\r
---*/\r
-{\r
- MTRR_SETTINGS *MtrrSettings;\r
+ break;\r
+ case 3:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr3 ();\r
+ } else {\r
+ AsmWriteCr3 (*CrValue);\r
+ }\r
\r
- MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
- MtrrSetAllMtrrs (MtrrSettings);\r
+ break;\r
+ case 4:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr4 ();\r
+ } else {\r
+ AsmWriteCr4 (*CrValue);\r
+ }\r
+\r
+ break;\r
+ default:\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ return EFI_SUCCESS;\r
}\r
\r
/**\r
- Programs registers for the calling processor.\r
-\r
- This function programs registers for the calling processor.\r
+ Initialize the CPU registers from a register table.\r
\r
- @param RegisterTables Pointer to register table of the running processor.\r
- @param RegisterTableCount Register table count.\r
+ @param[in] RegisterTable The register table for this AP.\r
+ @param[in] ApLocation AP location info for this ap.\r
+ @param[in] CpuStatus CPU status info for this CPU.\r
+ @param[in] CpuFlags Flags data structure used when program the register.\r
\r
+ @note This service could be called by BSP/APs.\r
**/\r
VOID\r
-SetProcessorRegister (\r
- IN CPU_REGISTER_TABLE *RegisterTables,\r
- IN UINTN RegisterTableCount\r
+ProgramProcessorRegister (\r
+ IN CPU_REGISTER_TABLE *RegisterTable,\r
+ IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
+ IN CPU_STATUS_INFORMATION *CpuStatus,\r
+ IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
)\r
{\r
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
UINTN Index;\r
UINTN Value;\r
- SPIN_LOCK *MsrSpinLock;\r
- UINT32 InitApicId;\r
- CPU_REGISTER_TABLE *RegisterTable;\r
-\r
- InitApicId = GetInitialApicId ();\r
- RegisterTable = NULL;\r
- for (Index = 0; Index < RegisterTableCount; Index++) {\r
- if (RegisterTables[Index].InitialApicId == InitApicId) {\r
- RegisterTable = &RegisterTables[Index];\r
- break;\r
- }\r
- }\r
- ASSERT (RegisterTable != NULL);\r
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r
+ volatile UINT32 *SemaphorePtr;\r
+ UINT32 FirstThread;\r
+ UINT32 CurrentThread;\r
+ UINT32 CurrentCore;\r
+ UINTN ProcessorIndex;\r
+ UINT32 *ThreadCountPerPackage;\r
+ UINT8 *ThreadCountPerCore;\r
+ EFI_STATUS Status;\r
+ UINT64 CurrentValue;\r
\r
//\r
// Traverse Register Table of this logical processor\r
//\r
- RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
- for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
+ RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;\r
+\r
+ for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
+ RegisterTableEntry = &RegisterTableEntryHead[Index];\r
+\r
//\r
// Check the type of specified register\r
//\r
switch (RegisterTableEntry->RegisterType) {\r
- //\r
- // The specified register is Control Register\r
- //\r
- case ControlRegister:\r
- switch (RegisterTableEntry->Index) {\r
- case 0:\r
- Value = AsmReadCr0 ();\r
- Value = (UINTN) BitFieldWrite64 (\r
- Value,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- (UINTN) RegisterTableEntry->Value\r
- );\r
- AsmWriteCr0 (Value);\r
- break;\r
- case 2:\r
- Value = AsmReadCr2 ();\r
- Value = (UINTN) BitFieldWrite64 (\r
- Value,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- (UINTN) RegisterTableEntry->Value\r
- );\r
- AsmWriteCr2 (Value);\r
- break;\r
- case 3:\r
- Value = AsmReadCr3 ();\r
- Value = (UINTN) BitFieldWrite64 (\r
- Value,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- (UINTN) RegisterTableEntry->Value\r
- );\r
- AsmWriteCr3 (Value);\r
+ //\r
+ // The specified register is Control Register\r
+ //\r
+ case ControlRegister:\r
+ Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r
+ if (EFI_ERROR (Status)) {\r
+ break;\r
+ }\r
+\r
+ if (RegisterTableEntry->TestThenWrite) {\r
+ CurrentValue = BitFieldRead64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
+ );\r
+ if (CurrentValue == RegisterTableEntry->Value) {\r
+ break;\r
+ }\r
+ }\r
+\r
+ Value = (UINTN)BitFieldWrite64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ RegisterTableEntry->Value\r
+ );\r
+ ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r
break;\r
- case 4:\r
- Value = AsmReadCr4 ();\r
- Value = (UINTN) BitFieldWrite64 (\r
- Value,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- (UINTN) RegisterTableEntry->Value\r
- );\r
- AsmWriteCr4 (Value);\r
+ //\r
+ // The specified register is Model Specific Register\r
+ //\r
+ case Msr:\r
+ if (RegisterTableEntry->TestThenWrite) {\r
+ Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);\r
+ if (RegisterTableEntry->ValidBitLength >= 64) {\r
+ if (Value == RegisterTableEntry->Value) {\r
+ break;\r
+ }\r
+ } else {\r
+ CurrentValue = BitFieldRead64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
+ );\r
+ if (CurrentValue == RegisterTableEntry->Value) {\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // If this function is called to restore register setting after INIT signal,\r
+ // there is no need to restore MSRs in register table.\r
+ //\r
+ if (RegisterTableEntry->ValidBitLength >= 64) {\r
+ //\r
+ // If length is not less than 64 bits, then directly write without reading\r
+ //\r
+ AsmWriteMsr64 (\r
+ RegisterTableEntry->Index,\r
+ RegisterTableEntry->Value\r
+ );\r
+ } else {\r
+ //\r
+ // Set the bit section according to bit start and length\r
+ //\r
+ AsmMsrBitFieldWrite64 (\r
+ RegisterTableEntry->Index,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ RegisterTableEntry->Value\r
+ );\r
+ }\r
+\r
break;\r
- default:\r
+ //\r
+ // MemoryMapped operations\r
+ //\r
+ case MemoryMapped:\r
+ AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
+ MmioBitFieldWrite32 (\r
+ (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINT32)RegisterTableEntry->Value\r
+ );\r
+ ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
break;\r
- }\r
- break;\r
- //\r
- // The specified register is Model Specific Register\r
- //\r
- case Msr:\r
//\r
- // If this function is called to restore register setting after INIT signal,\r
- // there is no need to restore MSRs in register table.\r
+ // Enable or disable cache\r
//\r
- if (RegisterTableEntry->ValidBitLength >= 64) {\r
+ case CacheControl:\r
//\r
- // If length is not less than 64 bits, then directly write without reading\r
+ // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
//\r
- AsmWriteMsr64 (\r
- RegisterTableEntry->Index,\r
- RegisterTableEntry->Value\r
- );\r
- } else {\r
+ if (RegisterTableEntry->Value == 0) {\r
+ AsmDisableCache ();\r
+ } else {\r
+ AsmEnableCache ();\r
+ }\r
+\r
+ break;\r
+\r
+ case Semaphore:\r
+ // Semaphore works logic like below:\r
//\r
- // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
- // to make sure MSR read/write operation is atomic.\r
+ // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
+ // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
//\r
- MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
- AcquireSpinLock (MsrSpinLock);\r
+ // All threads (T0...Tn) waits in P() line and continues running\r
+ // together.\r
//\r
- // Set the bit section according to bit start and length\r
//\r
- AsmMsrBitFieldWrite64 (\r
- RegisterTableEntry->Index,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- RegisterTableEntry->Value\r
+ // T0 T1 ... Tn\r
+ //\r
+ // V(0...n) V(0...n) ... V(0...n)\r
+ // n * P(0) n * P(1) ... n * P(n)\r
+ //\r
+ ASSERT (\r
+ (ApLocation != NULL) &&\r
+ (CpuStatus->ThreadCountPerPackage != 0) &&\r
+ (CpuStatus->ThreadCountPerCore != 0) &&\r
+ (CpuFlags->CoreSemaphoreCount != NULL) &&\r
+ (CpuFlags->PackageSemaphoreCount != NULL)\r
);\r
- ReleaseSpinLock (MsrSpinLock);\r
- }\r
- break;\r
- //\r
- // MemoryMapped operations\r
- //\r
- case MemoryMapped:\r
- AcquireSpinLock (mMemoryMappedLock);\r
- MmioBitFieldWrite32 (\r
- (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- (UINT32)RegisterTableEntry->Value\r
- );\r
- ReleaseSpinLock (mMemoryMappedLock);\r
- break;\r
- //\r
- // Enable or disable cache\r
- //\r
- case CacheControl:\r
- //\r
- // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
- //\r
- if (RegisterTableEntry->Value == 0) {\r
- AsmDisableCache ();\r
- } else {\r
- AsmEnableCache ();\r
- }\r
- break;\r
+ switch (RegisterTableEntry->Value) {\r
+ case CoreDepType:\r
+ SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
+ ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;\r
+\r
+ CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;\r
+ //\r
+ // Get Offset info for the first thread in the core which current thread belongs to.\r
+ //\r
+ FirstThread = CurrentCore * CpuStatus->MaxThreadCount;\r
+ CurrentThread = FirstThread + ApLocation->Thread;\r
+\r
+ //\r
+ // Different cores may have different valid threads in them. If driver maintail clearly\r
+ // thread index in different cores, the logic will be much complicated.\r
+ // Here driver just simply records the max thread number in all cores and use it as expect\r
+ // thread number for all cores.\r
+ // In below two steps logic, first current thread will Release semaphore for each thread\r
+ // in current core. Maybe some threads are not valid in this core, but driver don't\r
+ // care. Second, driver will let current thread wait semaphore for all valid threads in\r
+ // current core. Because only the valid threads will do release semaphore for this\r
+ // thread, driver here only need to wait the valid thread count.\r
+ //\r
+\r
+ //\r
+ // First Notify ALL THREADs in current Core that this thread is ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {\r
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
+ }\r
+\r
+ //\r
+ // Second, check whether all VALID THREADs (not all threads) in current core are ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {\r
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ }\r
+\r
+ break;\r
+\r
+ case PackageDepType:\r
+ SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
+ ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;\r
+ //\r
+ // Get Offset info for the first thread in the package which current thread belongs to.\r
+ //\r
+ FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
+ //\r
+ // Get the possible threads count for current package.\r
+ //\r
+ CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
+\r
+ //\r
+ // Different packages may have different valid threads in them. If driver maintail clearly\r
+ // thread index in different packages, the logic will be much complicated.\r
+ // Here driver just simply records the max thread number in all packages and use it as expect\r
+ // thread number for all packages.\r
+ // In below two steps logic, first current thread will Release semaphore for each thread\r
+ // in current package. Maybe some threads are not valid in this package, but driver don't\r
+ // care. Second, driver will let current thread wait semaphore for all valid threads in\r
+ // current package. Because only the valid threads will do release semaphore for this\r
+ // thread, driver here only need to wait the valid thread count.\r
+ //\r
+\r
+ //\r
+ // First Notify ALL THREADS in current package that this thread is ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {\r
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
+ }\r
+\r
+ //\r
+ // Second, check whether VALID THREADS (not all threads) in current package are ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {\r
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ }\r
+\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
\r
- default:\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+\r
+ Set Processor register for one AP.\r
+\r
+ @param PreSmmRegisterTable Use pre Smm register table or register table.\r
+\r
+**/\r
+VOID\r
+SetRegister (\r
+ IN BOOLEAN PreSmmRegisterTable\r
+ )\r
+{\r
+ CPU_FEATURE_INIT_DATA *FeatureInitData;\r
+ CPU_REGISTER_TABLE *RegisterTable;\r
+ CPU_REGISTER_TABLE *RegisterTables;\r
+ UINT32 InitApicId;\r
+ UINTN ProcIndex;\r
+ UINTN Index;\r
+\r
+ FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;\r
+\r
+ if (PreSmmRegisterTable) {\r
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;\r
+ } else {\r
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;\r
+ }\r
+\r
+ if (RegisterTables == NULL) {\r
+ return;\r
+ }\r
+\r
+ InitApicId = GetInitialApicId ();\r
+ RegisterTable = NULL;\r
+ ProcIndex = (UINTN)-1;\r
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
+ if (RegisterTables[Index].InitialApicId == InitApicId) {\r
+ RegisterTable = &RegisterTables[Index];\r
+ ProcIndex = Index;\r
break;\r
}\r
}\r
+\r
+ ASSERT (RegisterTable != NULL);\r
+\r
+ if (FeatureInitData->ApLocation != 0) {\r
+ ProgramProcessorRegister (\r
+ RegisterTable,\r
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,\r
+ &FeatureInitData->CpuStatus,\r
+ &mCpuFlags\r
+ );\r
+ } else {\r
+ ProgramProcessorRegister (\r
+ RegisterTable,\r
+ NULL,\r
+ &FeatureInitData->CpuStatus,\r
+ &mCpuFlags\r
+ );\r
+ }\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- UINTN TopOfStack;\r
- UINT8 Stack[128];\r
+ UINTN TopOfStack;\r
+ UINT8 Stack[128];\r
\r
LoadMtrrData (mAcpiCpuData.MtrrTable);\r
\r
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
+ SetRegister (TRUE);\r
\r
//\r
// Count down the number with lock mechanism.\r
ProgramVirtualWireMode ();\r
DisableLvtInterrupts ();\r
\r
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
+ SetRegister (FALSE);\r
\r
//\r
// Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
//\r
- TopOfStack = (UINTN) Stack + sizeof (Stack);\r
- TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
- CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
+ TopOfStack = (UINTN)Stack + sizeof (Stack);\r
+ TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);\r
+ CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
}\r
\r
EFI_PHYSICAL_ADDRESS WorkingBuffer\r
)\r
{\r
- EFI_PHYSICAL_ADDRESS StartupVector;\r
- MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
+ EFI_PHYSICAL_ADDRESS StartupVector;\r
+ MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
\r
//\r
// Get the address map of startup code for AP,\r
// Copy AP startup code to startup vector, and then redirect the long jump\r
// instructions for mode switching.\r
//\r
- CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
- *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
+ CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
+ *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);\r
if (AddressMap.LongJumpOffset != 0) {\r
- *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
+ *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);\r
}\r
\r
//\r
// Get the start address of exchange data between BSP and AP.\r
//\r
- mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
- ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
+ mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);\r
+ ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
\r
- CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
- CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+ CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+ CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
\r
- mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
- mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
- mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
- mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
+ mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;\r
+ mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
+ mExchangeInfo->BufferStart = (UINT32)StartupVector;\r
+ mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());\r
mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
}\r
\r
{\r
LoadMtrrData (mAcpiCpuData.MtrrTable);\r
\r
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
+ SetRegister (TRUE);\r
\r
ProgramVirtualWireMode ();\r
\r
PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
\r
- mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
- mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r
+ } else {\r
+ ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
+ }\r
+\r
+ mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
+ mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeAp;\r
\r
//\r
// Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
VOID\r
)\r
{\r
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r
+ } else {\r
+ ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
+ }\r
\r
- mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
+ mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
\r
//\r
- // Signal that SMM base relocation is complete and to continue initialization.\r
+ // Signal that SMM base relocation is complete and to continue initialization for all APs.\r
//\r
mInitApsAfterSmmBaseReloc = TRUE;\r
\r
+ //\r
+ // Must begin set register after all APs have continue their initialization.\r
+ // This is a requirement to support semaphore mechanism in register table.\r
+ // Because if semaphore's dependence type is package type, semaphore will wait\r
+ // for all Aps in one package finishing their tasks before set next register\r
+ // for all APs. If the Aps not begin its task during BSP doing its task, the\r
+ // BSP thread will hang because it is waiting for other Aps in the same\r
+ // package finishing their task.\r
+ //\r
+ SetRegister (FALSE);\r
+\r
while (mNumberToFinish > 0) {\r
CpuPause ();\r
}\r
VOID\r
)\r
{\r
- SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
- IA32_DESCRIPTOR Ia32Idtr;\r
- IA32_DESCRIPTOR X64Idtr;\r
- IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
- EFI_STATUS Status;\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ IA32_DESCRIPTOR Ia32Idtr;\r
+ IA32_DESCRIPTOR X64Idtr;\r
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
+ EFI_STATUS Status;\r
\r
- DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
+ DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));\r
\r
mSmmS3Flag = TRUE;\r
\r
- InitializeSpinLock (mMemoryMappedLock);\r
-\r
//\r
// See if there is enough context to resume PEI Phase\r
//\r
if (mSmmS3ResumeState == NULL) {\r
- DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
+ DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));\r
CpuDeadLoop ();\r
}\r
\r
//\r
// Save the IA32 IDT Descriptor\r
//\r
- AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+ AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);\r
\r
//\r
// Setup X64 IDT table\r
//\r
ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
- X64Idtr.Base = (UINTN) IdtEntryTable;\r
- X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
- AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
+ X64Idtr.Base = (UINTN)IdtEntryTable;\r
+ X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);\r
\r
//\r
// Setup the default exception handler\r
//\r
mRestoreSmmConfigurationInS3 = TRUE;\r
\r
- DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
- DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
- DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
- DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
- DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
\r
//\r
// If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
//\r
if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
- DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
+ DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
\r
SwitchStack (\r
(SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
// If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
//\r
if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
- DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
+ DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
//\r
// Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
//\r
//\r
// Restore IA32 IDT table\r
//\r
- AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);\r
AsmDisablePaging64 (\r
SmmS3ResumeState->ReturnCs,\r
(UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
//\r
// Can not resume PEI Phase\r
//\r
- DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
+ DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));\r
CpuDeadLoop ();\r
}\r
\r
IN UINT32 Cr3\r
)\r
{\r
- VOID *GuidHob;\r
- EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
- SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
- EFI_PHYSICAL_ADDRESS Address;\r
- EFI_STATUS Status;\r
+ VOID *GuidHob;\r
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ EFI_PHYSICAL_ADDRESS Address;\r
+ EFI_STATUS Status;\r
\r
if (!mAcpiS3Enable) {\r
return;\r
"ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
__FUNCTION__,\r
&gEfiAcpiVariableGuid\r
- ));\r
+ ));\r
CpuDeadLoop ();\r
} else {\r
- SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);\r
\r
- DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
- DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
\r
SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
\r
- mSmmS3ResumeState = SmmS3ResumeState;\r
+ mSmmS3ResumeState = SmmS3ResumeState;\r
SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
\r
SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
if (sizeof (UINTN) == sizeof (UINT64)) {\r
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
}\r
+\r
if (sizeof (UINTN) == sizeof (UINT32)) {\r
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
}\r
- }\r
\r
- //\r
- // Patch SmmS3ResumeState->SmmS3Cr3\r
- //\r
- InitSmmS3Cr3 ();\r
+ //\r
+ // Patch SmmS3ResumeState->SmmS3Cr3\r
+ //\r
+ InitSmmS3Cr3 ();\r
+ }\r
\r
//\r
// Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
&Address\r
);\r
ASSERT_EFI_ERROR (Status);\r
- mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
+ mApHltLoopCode = (UINT8 *)(UINTN)Address;\r
}\r
\r
/**\r
- Copy register table from ACPI NVS memory into SMRAM.\r
+ Copy register table from non-SMRAM into SMRAM.\r
\r
@param[in] DestinationRegisterTableList Points to destination register table.\r
@param[in] SourceRegisterTableList Points to source register table.\r
**/\r
VOID\r
CopyRegisterTable (\r
- IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
- IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
- IN UINT32 NumberOfCpus\r
+ IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
+ IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
+ IN UINT32 NumberOfCpus\r
)\r
{\r
- UINTN Index;\r
- UINTN Index1;\r
- CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
+ UINTN Index;\r
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
\r
CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
for (Index = 0; Index < NumberOfCpus; Index++) {\r
- if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
- RegisterTableEntry = AllocateCopyPool (\r
- DestinationRegisterTableList[Index].AllocatedSize,\r
- (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
- );\r
+ if (DestinationRegisterTableList[Index].TableLength != 0) {\r
+ DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);\r
+ RegisterTableEntry = AllocateCopyPool (\r
+ DestinationRegisterTableList[Index].AllocatedSize,\r
+ (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
+ );\r
ASSERT (RegisterTableEntry != NULL);\r
DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
- //\r
- // Go though all MSRs in register table to initialize MSR spin lock\r
- //\r
- for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
- if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
- //\r
- // Initialize MSR spin lock only for those MSRs need bit field writing\r
- //\r
- InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
- }\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Check whether the register table is empty or not.\r
+\r
+ @param[in] RegisterTable Point to the register table.\r
+ @param[in] NumberOfCpus Number of CPUs.\r
+\r
+ @retval TRUE The register table is empty.\r
+ @retval FALSE The register table is not empty.\r
+**/\r
+BOOLEAN\r
+IsRegisterTableEmpty (\r
+ IN CPU_REGISTER_TABLE *RegisterTable,\r
+ IN UINT32 NumberOfCpus\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ if (RegisterTable != NULL) {\r
+ for (Index = 0; Index < NumberOfCpus; Index++) {\r
+ if (RegisterTable[Index].TableLength != 0) {\r
+ return FALSE;\r
}\r
}\r
}\r
+\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+ Copy the data used to initialize processor register into SMRAM.\r
+\r
+ @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.\r
+ @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.\r
+\r
+**/\r
+VOID\r
+CopyCpuFeatureInitDatatoSmram (\r
+ IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,\r
+ IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc\r
+ )\r
+{\r
+ CPU_STATUS_INFORMATION *CpuStatus;\r
+\r
+ if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {\r
+ CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);\r
+\r
+ CopyRegisterTable (\r
+ (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,\r
+ (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,\r
+ mAcpiCpuData.NumberOfCpus\r
+ );\r
+ }\r
+\r
+ if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {\r
+ CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);\r
+\r
+ CopyRegisterTable (\r
+ (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,\r
+ (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,\r
+ mAcpiCpuData.NumberOfCpus\r
+ );\r
+ }\r
+\r
+ CpuStatus = &CpuFeatureInitDataDst->CpuStatus;\r
+ CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r
+\r
+ if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {\r
+ CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
+ sizeof (UINT32) * CpuStatus->PackageCount,\r
+ (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage\r
+ );\r
+ ASSERT (CpuStatus->ThreadCountPerPackage != 0);\r
+ }\r
+\r
+ if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {\r
+ CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
+ sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),\r
+ (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore\r
+ );\r
+ ASSERT (CpuStatus->ThreadCountPerCore != 0);\r
+ }\r
+\r
+ if (CpuFeatureInitDataSrc->ApLocation != 0) {\r
+ CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
+ mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation\r
+ );\r
+ ASSERT (CpuFeatureInitDataDst->ApLocation != 0);\r
+ }\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- ACPI_CPU_DATA *AcpiCpuData;\r
- IA32_DESCRIPTOR *Gdtr;\r
- IA32_DESCRIPTOR *Idtr;\r
- VOID *GdtForAp;\r
- VOID *IdtForAp;\r
- VOID *MachineCheckHandlerForAp;\r
+ ACPI_CPU_DATA *AcpiCpuData;\r
+ IA32_DESCRIPTOR *Gdtr;\r
+ IA32_DESCRIPTOR *Idtr;\r
+ VOID *GdtForAp;\r
+ VOID *IdtForAp;\r
+ VOID *MachineCheckHandlerForAp;\r
+ CPU_STATUS_INFORMATION *CpuStatus;\r
\r
if (!mAcpiS3Enable) {\r
return;\r
\r
CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
\r
- mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
- ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
-\r
- CopyRegisterTable (\r
- (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
- (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
- mAcpiCpuData.NumberOfCpus\r
- );\r
-\r
- mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
- ASSERT (mAcpiCpuData.RegisterTable != 0);\r
-\r
- CopyRegisterTable (\r
- (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
- (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
- mAcpiCpuData.NumberOfCpus\r
- );\r
-\r
//\r
// Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
//\r
Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
\r
- GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
+ GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
ASSERT (GdtForAp != NULL);\r
- IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
- MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
+ IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
+ MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));\r
\r
CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
\r
- Gdtr->Base = (UINTN)GdtForAp;\r
- Idtr->Base = (UINTN)IdtForAp;\r
+ Gdtr->Base = (UINTN)GdtForAp;\r
+ Idtr->Base = (UINTN)IdtForAp;\r
mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
+\r
+ ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));\r
+\r
+ if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {\r
+ //\r
+ // If the CPU features will not be initialized by CpuFeaturesPei module during\r
+ // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,\r
+ // which will be consumed in SmmRestoreCpu during next S3 resume.\r
+ //\r
+ CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);\r
+\r
+ CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;\r
+\r
+ mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (\r
+ sizeof (UINT32) * CpuStatus->PackageCount *\r
+ CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
+ );\r
+ ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);\r
+\r
+ mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (\r
+ sizeof (UINT32) * CpuStatus->PackageCount *\r
+ CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
+ );\r
+ ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r
+\r
+ InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);\r
+ }\r
}\r
\r
/**\r