/** @file\r
Code for Processor S3 restoration\r
\r
-Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2006 - 2023, Intel Corporation. All rights reserved.<BR>\r
SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
#pragma pack(1)\r
typedef struct {\r
- UINTN Lock;\r
- VOID *StackStart;\r
- UINTN StackSize;\r
- VOID *ApFunction;\r
- IA32_DESCRIPTOR GdtrProfile;\r
- IA32_DESCRIPTOR IdtrProfile;\r
- UINT32 BufferStart;\r
- UINT32 Cr3;\r
- UINTN InitializeFloatingPointUnitsAddress;\r
+ UINTN Lock;\r
+ VOID *StackStart;\r
+ UINTN StackSize;\r
+ VOID *ApFunction;\r
+ IA32_DESCRIPTOR GdtrProfile;\r
+ IA32_DESCRIPTOR IdtrProfile;\r
+ UINT32 BufferStart;\r
+ UINT32 Cr3;\r
+ UINTN InitializeFloatingPointUnitsAddress;\r
} MP_CPU_EXCHANGE_INFO;\r
#pragma pack()\r
\r
typedef struct {\r
- UINT8 *RendezvousFunnelAddress;\r
- UINTN PModeEntryOffset;\r
- UINTN FlatJumpOffset;\r
- UINTN Size;\r
- UINTN LModeEntryOffset;\r
- UINTN LongJumpOffset;\r
+ UINT8 *RendezvousFunnelAddress;\r
+ UINTN PModeEntryOffset;\r
+ UINTN FlatJumpOffset;\r
+ UINTN Size;\r
+ UINTN LModeEntryOffset;\r
+ UINTN LongJumpOffset;\r
} MP_ASSEMBLY_ADDRESS_MAP;\r
\r
//\r
// Flags used when program the register.\r
//\r
typedef struct {\r
- volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
- volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
+ volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
+ volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
// core level semaphore.\r
- volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
+ volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
// package level semaphore.\r
} PROGRAM_CPU_REGISTER_FLAGS;\r
\r
//\r
// Signal that SMM BASE relocation is complete.\r
//\r
-volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
+volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
\r
/**\r
Get starting address and size of the rendezvous entry for APs.\r
VOID *\r
EFIAPI\r
AsmGetAddressMap (\r
- MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
+ MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
);\r
\r
-#define LEGACY_REGION_SIZE (2 * 0x1000)\r
-#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
+#define LEGACY_REGION_SIZE (2 * 0x1000)\r
+#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
\r
-PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
-ACPI_CPU_DATA mAcpiCpuData;\r
-volatile UINT32 mNumberToFinish;\r
-MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
-BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
+PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
+ACPI_CPU_DATA mAcpiCpuData;\r
+volatile UINT32 mNumberToFinish;\r
+MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
+BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
\r
//\r
// S3 boot flag\r
//\r
-BOOLEAN mSmmS3Flag = FALSE;\r
+BOOLEAN mSmmS3Flag = FALSE;\r
\r
//\r
// Pointer to structure used during S3 Resume\r
//\r
-SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
+SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
\r
-BOOLEAN mAcpiS3Enable = TRUE;\r
+BOOLEAN mAcpiS3Enable = TRUE;\r
\r
-UINT8 *mApHltLoopCode = NULL;\r
-UINT8 mApHltLoopCodeTemplate[] = {\r
- 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
- 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
- 0xFA, // cli\r
- 0xF4, // hlt\r
- 0xEB, 0xFC // jmp $-2\r
- };\r
+UINT8 *mApHltLoopCode = NULL;\r
+UINT8 mApHltLoopCodeTemplate[] = {\r
+ 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
+ 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
+ 0xFA, // cli\r
+ 0xF4, // hlt\r
+ 0xEB, 0xFC // jmp $-2\r
+};\r
\r
/**\r
Sync up the MTRR values for all processors.\r
VOID\r
EFIAPI\r
LoadMtrrData (\r
- EFI_PHYSICAL_ADDRESS MtrrTable\r
+ EFI_PHYSICAL_ADDRESS MtrrTable\r
)\r
+\r
/*++\r
\r
Routine Description:\r
\r
--*/\r
{\r
- MTRR_SETTINGS *MtrrSettings;\r
+ MTRR_SETTINGS *MtrrSettings;\r
\r
- MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
+ MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;\r
MtrrSetAllMtrrs (MtrrSettings);\r
}\r
\r
**/\r
VOID\r
S3ReleaseSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
InterlockedIncrement (Sem);\r
**/\r
VOID\r
S3WaitForSemaphore (\r
- IN OUT volatile UINT32 *Sem\r
+ IN OUT volatile UINT32 *Sem\r
)\r
{\r
UINT32 Value;\r
**/\r
UINTN\r
ReadWriteCr (\r
- IN UINT32 CrIndex,\r
- IN BOOLEAN Read,\r
- IN OUT UINTN *CrValue\r
+ IN UINT32 CrIndex,\r
+ IN BOOLEAN Read,\r
+ IN OUT UINTN *CrValue\r
)\r
{\r
switch (CrIndex) {\r
- case 0:\r
- if (Read) {\r
- *CrValue = AsmReadCr0 ();\r
- } else {\r
- AsmWriteCr0 (*CrValue);\r
- }\r
- break;\r
- case 2:\r
- if (Read) {\r
- *CrValue = AsmReadCr2 ();\r
- } else {\r
- AsmWriteCr2 (*CrValue);\r
- }\r
- break;\r
- case 3:\r
- if (Read) {\r
- *CrValue = AsmReadCr3 ();\r
- } else {\r
- AsmWriteCr3 (*CrValue);\r
- }\r
- break;\r
- case 4:\r
- if (Read) {\r
- *CrValue = AsmReadCr4 ();\r
- } else {\r
- AsmWriteCr4 (*CrValue);\r
- }\r
- break;\r
- default:\r
- return EFI_UNSUPPORTED;;\r
+ case 0:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr0 ();\r
+ } else {\r
+ AsmWriteCr0 (*CrValue);\r
+ }\r
+\r
+ break;\r
+ case 2:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr2 ();\r
+ } else {\r
+ AsmWriteCr2 (*CrValue);\r
+ }\r
+\r
+ break;\r
+ case 3:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr3 ();\r
+ } else {\r
+ AsmWriteCr3 (*CrValue);\r
+ }\r
+\r
+ break;\r
+ case 4:\r
+ if (Read) {\r
+ *CrValue = AsmReadCr4 ();\r
+ } else {\r
+ AsmWriteCr4 (*CrValue);\r
+ }\r
+\r
+ break;\r
+ default:\r
+ return EFI_UNSUPPORTED;\r
}\r
\r
return EFI_SUCCESS;\r
**/\r
VOID\r
ProgramProcessorRegister (\r
- IN CPU_REGISTER_TABLE *RegisterTable,\r
- IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
- IN CPU_STATUS_INFORMATION *CpuStatus,\r
- IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
+ IN CPU_REGISTER_TABLE *RegisterTable,\r
+ IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
+ IN CPU_STATUS_INFORMATION *CpuStatus,\r
+ IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
)\r
{\r
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
//\r
// Traverse Register Table of this logical processor\r
//\r
- RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
+ RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;\r
\r
for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
-\r
RegisterTableEntry = &RegisterTableEntryHead[Index];\r
\r
//\r
// Check the type of specified register\r
//\r
switch (RegisterTableEntry->RegisterType) {\r
- //\r
- // The specified register is Control Register\r
- //\r
- case ControlRegister:\r
- Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r
- if (EFI_ERROR (Status)) {\r
- break;\r
- }\r
- if (RegisterTableEntry->TestThenWrite) {\r
- CurrentValue = BitFieldRead64 (\r
- Value,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
- );\r
- if (CurrentValue == RegisterTableEntry->Value) {\r
+ //\r
+ // The specified register is Control Register\r
+ //\r
+ case ControlRegister:\r
+ Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r
+ if (EFI_ERROR (Status)) {\r
break;\r
}\r
- }\r
- Value = (UINTN) BitFieldWrite64 (\r
- Value,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- RegisterTableEntry->Value\r
- );\r
- ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r
- break;\r
- //\r
- // The specified register is Model Specific Register\r
- //\r
- case Msr:\r
- if (RegisterTableEntry->TestThenWrite) {\r
- Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);\r
- if (RegisterTableEntry->ValidBitLength >= 64) {\r
- if (Value == RegisterTableEntry->Value) {\r
- break;\r
- }\r
- } else {\r
+\r
+ if (RegisterTableEntry->TestThenWrite) {\r
CurrentValue = BitFieldRead64 (\r
Value,\r
RegisterTableEntry->ValidBitStart,\r
break;\r
}\r
}\r
- }\r
\r
+ Value = (UINTN)BitFieldWrite64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ RegisterTableEntry->Value\r
+ );\r
+ ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r
+ break;\r
//\r
- // If this function is called to restore register setting after INIT signal,\r
- // there is no need to restore MSRs in register table.\r
+ // The specified register is Model Specific Register\r
//\r
- if (RegisterTableEntry->ValidBitLength >= 64) {\r
- //\r
- // If length is not less than 64 bits, then directly write without reading\r
- //\r
- AsmWriteMsr64 (\r
- RegisterTableEntry->Index,\r
- RegisterTableEntry->Value\r
- );\r
- } else {\r
+ case Msr:\r
+ if (RegisterTableEntry->TestThenWrite) {\r
+ Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);\r
+ if (RegisterTableEntry->ValidBitLength >= 64) {\r
+ if (Value == RegisterTableEntry->Value) {\r
+ break;\r
+ }\r
+ } else {\r
+ CurrentValue = BitFieldRead64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
+ );\r
+ if (CurrentValue == RegisterTableEntry->Value) {\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
//\r
- // Set the bit section according to bit start and length\r
+ // If this function is called to restore register setting after INIT signal,\r
+ // there is no need to restore MSRs in register table.\r
//\r
- AsmMsrBitFieldWrite64 (\r
- RegisterTableEntry->Index,\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- RegisterTableEntry->Value\r
- );\r
- }\r
- break;\r
- //\r
- // MemoryMapped operations\r
- //\r
- case MemoryMapped:\r
- AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
- MmioBitFieldWrite32 (\r
- (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
- RegisterTableEntry->ValidBitStart,\r
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
- (UINT32)RegisterTableEntry->Value\r
- );\r
- ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
- break;\r
- //\r
- // Enable or disable cache\r
- //\r
- case CacheControl:\r
- //\r
- // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
- //\r
- if (RegisterTableEntry->Value == 0) {\r
- AsmDisableCache ();\r
- } else {\r
- AsmEnableCache ();\r
- }\r
- break;\r
+ if (RegisterTableEntry->ValidBitLength >= 64) {\r
+ //\r
+ // If length is not less than 64 bits, then directly write without reading\r
+ //\r
+ AsmWriteMsr64 (\r
+ RegisterTableEntry->Index,\r
+ RegisterTableEntry->Value\r
+ );\r
+ } else {\r
+ //\r
+ // Set the bit section according to bit start and length\r
+ //\r
+ AsmMsrBitFieldWrite64 (\r
+ RegisterTableEntry->Index,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ RegisterTableEntry->Value\r
+ );\r
+ }\r
\r
- case Semaphore:\r
- // Semaphore works logic like below:\r
- //\r
- // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
- // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
- //\r
- // All threads (T0...Tn) waits in P() line and continues running\r
- // together.\r
+ break;\r
//\r
+ // MemoryMapped operations\r
//\r
- // T0 T1 ... Tn\r
+ case MemoryMapped:\r
+ AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
+ MmioBitFieldWrite32 (\r
+ (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINT32)RegisterTableEntry->Value\r
+ );\r
+ ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
+ break;\r
//\r
- // V(0...n) V(0...n) ... V(0...n)\r
- // n * P(0) n * P(1) ... n * P(n)\r
+ // Enable or disable cache\r
//\r
- ASSERT (\r
- (ApLocation != NULL) &&\r
- (CpuStatus->ThreadCountPerPackage != 0) &&\r
- (CpuStatus->ThreadCountPerCore != 0) &&\r
- (CpuFlags->CoreSemaphoreCount != NULL) &&\r
- (CpuFlags->PackageSemaphoreCount != NULL)\r
- );\r
- switch (RegisterTableEntry->Value) {\r
- case CoreDepType:\r
- SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
- ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;\r
-\r
- CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;\r
- //\r
- // Get Offset info for the first thread in the core which current thread belongs to.\r
- //\r
- FirstThread = CurrentCore * CpuStatus->MaxThreadCount;\r
- CurrentThread = FirstThread + ApLocation->Thread;\r
-\r
- //\r
- // Different cores may have different valid threads in them. If driver maintail clearly\r
- // thread index in different cores, the logic will be much complicated.\r
- // Here driver just simply records the max thread number in all cores and use it as expect\r
- // thread number for all cores.\r
- // In below two steps logic, first current thread will Release semaphore for each thread\r
- // in current core. Maybe some threads are not valid in this core, but driver don't\r
- // care. Second, driver will let current thread wait semaphore for all valid threads in\r
- // current core. Because only the valid threads will do release semaphore for this\r
- // thread, driver here only need to wait the valid thread count.\r
- //\r
-\r
- //\r
- // First Notify ALL THREADs in current Core that this thread is ready.\r
+ case CacheControl:\r
//\r
- for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
- S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
- }\r
- //\r
- // Second, check whether all VALID THREADs (not all threads) in current core are ready.\r
+ // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
//\r
- for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) {\r
- S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ if (RegisterTableEntry->Value == 0) {\r
+ AsmDisableCache ();\r
+ } else {\r
+ AsmEnableCache ();\r
}\r
+\r
break;\r
\r
- case PackageDepType:\r
- SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
- ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;\r
+ case Semaphore:\r
+ // Semaphore works logic like below:\r
//\r
- // Get Offset info for the first thread in the package which current thread belongs to.\r
+ // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
+ // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
//\r
- FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
+ // All threads (T0...Tn) waits in P() line and continues running\r
+ // together.\r
//\r
- // Get the possible threads count for current package.\r
//\r
- CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
-\r
+ // T0 T1 ... Tn\r
//\r
- // Different packages may have different valid threads in them. If driver maintail clearly\r
- // thread index in different packages, the logic will be much complicated.\r
- // Here driver just simply records the max thread number in all packages and use it as expect\r
- // thread number for all packages.\r
- // In below two steps logic, first current thread will Release semaphore for each thread\r
- // in current package. Maybe some threads are not valid in this package, but driver don't\r
- // care. Second, driver will let current thread wait semaphore for all valid threads in\r
- // current package. Because only the valid threads will do release semaphore for this\r
- // thread, driver here only need to wait the valid thread count.\r
+ // V(0...n) V(0...n) ... V(0...n)\r
+ // n * P(0) n * P(1) ... n * P(n)\r
//\r
+ ASSERT (\r
+ (ApLocation != NULL) &&\r
+ (CpuStatus->ThreadCountPerPackage != 0) &&\r
+ (CpuStatus->ThreadCountPerCore != 0) &&\r
+ (CpuFlags->CoreSemaphoreCount != NULL) &&\r
+ (CpuFlags->PackageSemaphoreCount != NULL)\r
+ );\r
+ switch (RegisterTableEntry->Value) {\r
+ case CoreDepType:\r
+ SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
+ ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;\r
+\r
+ CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;\r
+ //\r
+ // Get Offset info for the first thread in the core which current thread belongs to.\r
+ //\r
+ FirstThread = CurrentCore * CpuStatus->MaxThreadCount;\r
+ CurrentThread = FirstThread + ApLocation->Thread;\r
+\r
+ //\r
+ // Different cores may have different valid threads in them. If driver maintail clearly\r
+ // thread index in different cores, the logic will be much complicated.\r
+ // Here driver just simply records the max thread number in all cores and use it as expect\r
+ // thread number for all cores.\r
+ // In below two steps logic, first current thread will Release semaphore for each thread\r
+ // in current core. Maybe some threads are not valid in this core, but driver don't\r
+ // care. Second, driver will let current thread wait semaphore for all valid threads in\r
+ // current core. Because only the valid threads will do release semaphore for this\r
+ // thread, driver here only need to wait the valid thread count.\r
+ //\r
+\r
+ //\r
+ // First Notify ALL THREADs in current Core that this thread is ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {\r
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
+ }\r
+\r
+ //\r
+ // Second, check whether all VALID THREADs (not all threads) in current core are ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {\r
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ }\r
\r
- //\r
- // First Notify ALL THREADS in current package that this thread is ready.\r
- //\r
- for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) {\r
- S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
- }\r
- //\r
- // Second, check whether VALID THREADS (not all threads) in current package are ready.\r
- //\r
- for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) {\r
- S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ break;\r
+\r
+ case PackageDepType:\r
+ SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
+ ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;\r
+ //\r
+ // Get Offset info for the first thread in the package which current thread belongs to.\r
+ //\r
+ FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
+ //\r
+ // Get the possible threads count for current package.\r
+ //\r
+ CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
+\r
+ //\r
+ // Different packages may have different valid threads in them. If driver maintail clearly\r
+ // thread index in different packages, the logic will be much complicated.\r
+ // Here driver just simply records the max thread number in all packages and use it as expect\r
+ // thread number for all packages.\r
+ // In below two steps logic, first current thread will Release semaphore for each thread\r
+ // in current package. Maybe some threads are not valid in this package, but driver don't\r
+ // care. Second, driver will let current thread wait semaphore for all valid threads in\r
+ // current package. Because only the valid threads will do release semaphore for this\r
+ // thread, driver here only need to wait the valid thread count.\r
+ //\r
+\r
+ //\r
+ // First Notify ALL THREADS in current package that this thread is ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {\r
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
+ }\r
+\r
+ //\r
+ // Second, check whether VALID THREADS (not all threads) in current package are ready.\r
+ //\r
+ for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {\r
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
+ }\r
+\r
+ break;\r
+\r
+ default:\r
+ break;\r
}\r
+\r
break;\r
\r
default:\r
break;\r
- }\r
- break;\r
-\r
- default:\r
- break;\r
}\r
}\r
}\r
**/\r
VOID\r
SetRegister (\r
- IN BOOLEAN PreSmmRegisterTable\r
+ IN BOOLEAN PreSmmRegisterTable\r
)\r
{\r
- CPU_FEATURE_INIT_DATA *FeatureInitData;\r
- CPU_REGISTER_TABLE *RegisterTable;\r
- CPU_REGISTER_TABLE *RegisterTables;\r
- UINT32 InitApicId;\r
- UINTN ProcIndex;\r
- UINTN Index;\r
+ CPU_FEATURE_INIT_DATA *FeatureInitData;\r
+ CPU_REGISTER_TABLE *RegisterTable;\r
+ CPU_REGISTER_TABLE *RegisterTables;\r
+ UINT32 InitApicId;\r
+ UINTN ProcIndex;\r
+ UINTN Index;\r
\r
FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;\r
\r
} else {\r
RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;\r
}\r
+\r
if (RegisterTables == NULL) {\r
return;\r
}\r
\r
- InitApicId = GetInitialApicId ();\r
+ InitApicId = GetInitialApicId ();\r
RegisterTable = NULL;\r
- ProcIndex = (UINTN)-1;\r
+ ProcIndex = (UINTN)-1;\r
for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
if (RegisterTables[Index].InitialApicId == InitApicId) {\r
RegisterTable = &RegisterTables[Index];\r
- ProcIndex = Index;\r
+ ProcIndex = Index;\r
break;\r
}\r
}\r
+\r
ASSERT (RegisterTable != NULL);\r
\r
if (FeatureInitData->ApLocation != 0) {\r
VOID\r
)\r
{\r
- UINTN TopOfStack;\r
- UINT8 Stack[128];\r
+ UINTN TopOfStack;\r
+ UINT8 Stack[128];\r
\r
LoadMtrrData (mAcpiCpuData.MtrrTable);\r
\r
//\r
// Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
//\r
- TopOfStack = (UINTN) Stack + sizeof (Stack);\r
- TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
- CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
+ TopOfStack = (UINTN)Stack + sizeof (Stack);\r
+ TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);\r
+ CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
}\r
\r
EFI_PHYSICAL_ADDRESS WorkingBuffer\r
)\r
{\r
- EFI_PHYSICAL_ADDRESS StartupVector;\r
- MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
+ EFI_PHYSICAL_ADDRESS StartupVector;\r
+ MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
\r
//\r
// Get the address map of startup code for AP,\r
// Copy AP startup code to startup vector, and then redirect the long jump\r
// instructions for mode switching.\r
//\r
- CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
- *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
+ CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
+ *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);\r
if (AddressMap.LongJumpOffset != 0) {\r
- *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
+ *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);\r
}\r
\r
//\r
// Get the start address of exchange data between BSP and AP.\r
//\r
- mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
- ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
+ mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);\r
+ ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
\r
- CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
- CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+ CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+ CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
\r
- mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
- mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
- mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
- mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
+ mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;\r
+ mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
+ mExchangeInfo->BufferStart = (UINT32)StartupVector;\r
+ mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());\r
mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
}\r
\r
} else {\r
ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
}\r
- mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
- mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
+\r
+ mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
+ mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeAp;\r
\r
//\r
// Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
} else {\r
ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
}\r
+\r
mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
\r
//\r
VOID\r
)\r
{\r
- SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
- IA32_DESCRIPTOR Ia32Idtr;\r
- IA32_DESCRIPTOR X64Idtr;\r
- IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
- EFI_STATUS Status;\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ IA32_DESCRIPTOR Ia32Idtr;\r
+ IA32_DESCRIPTOR X64Idtr;\r
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
+ EFI_STATUS Status;\r
\r
DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));\r
\r
SmmS3ResumeState = mSmmS3ResumeState;\r
ASSERT (SmmS3ResumeState != NULL);\r
\r
- if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
+ //\r
+ // Setup 64bit IDT in 64bit SMM env when called from 32bit PEI.\r
+ // Note: 64bit PEI and 32bit DXE is not a supported combination.\r
+ //\r
+ if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) && (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == TRUE)) {\r
//\r
// Save the IA32 IDT Descriptor\r
//\r
- AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+ AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);\r
\r
//\r
// Setup X64 IDT table\r
//\r
ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
- X64Idtr.Base = (UINTN) IdtEntryTable;\r
- X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
- AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
+ X64Idtr.Base = (UINTN)IdtEntryTable;\r
+ X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);\r
\r
//\r
// Setup the default exception handler\r
}\r
\r
//\r
- // Restore SMBASE for BSP and all APs\r
+ // Make sure the gSmmBaseHobGuid existence status is the same between normal and S3 boot.\r
//\r
- SmmRelocateBases ();\r
+ ASSERT (mSmmRelocated == (BOOLEAN)(GetFirstGuidHob (&gSmmBaseHobGuid) != NULL));\r
+ if (mSmmRelocated != (BOOLEAN)(GetFirstGuidHob (&gSmmBaseHobGuid) != NULL)) {\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "gSmmBaseHobGuid %a produced in normal boot but %a in S3 boot!",\r
+ mSmmRelocated ? "is" : "is not",\r
+ mSmmRelocated ? "is not" : "is"\r
+ ));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ //\r
+ // Check whether Smm Relocation is done or not.\r
+ // If not, will do the SmmBases Relocation here!!!\r
+ //\r
+ if (!mSmmRelocated) {\r
+ //\r
+ // Restore SMBASE for BSP and all APs\r
+ //\r
+ SmmRelocateBases ();\r
+ } else {\r
+ //\r
+ // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.\r
+ //\r
+ ExecuteFirstSmiInit ();\r
+ }\r
\r
//\r
// Skip initialization if mAcpiCpuData is not valid\r
//\r
mRestoreSmmConfigurationInS3 = TRUE;\r
\r
- DEBUG (( DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
- DEBUG (( DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
- DEBUG (( DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
- DEBUG (( DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
- DEBUG (( DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
+ DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
\r
//\r
- // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
+ // If SMM is in 32-bit mode or PcdDxeIplSwitchToLongMode is FALSE, then use SwitchStack() to resume PEI Phase.\r
+ // Note: 64bit PEI and 32bit DXE is not a supported combination.\r
//\r
- if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
+ if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) || (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == FALSE)) {\r
DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
\r
SwitchStack (\r
//\r
// Restore IA32 IDT table\r
//\r
- AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);\r
AsmDisablePaging64 (\r
SmmS3ResumeState->ReturnCs,\r
(UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
IN UINT32 Cr3\r
)\r
{\r
- VOID *GuidHob;\r
- EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
- SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
- EFI_PHYSICAL_ADDRESS Address;\r
- EFI_STATUS Status;\r
+ VOID *GuidHob;\r
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ EFI_PHYSICAL_ADDRESS Address;\r
+ EFI_STATUS Status;\r
\r
if (!mAcpiS3Enable) {\r
return;\r
"ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
__FUNCTION__,\r
&gEfiAcpiVariableGuid\r
- ));\r
+ ));\r
CpuDeadLoop ();\r
} else {\r
- SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);\r
\r
DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
\r
- mSmmS3ResumeState = SmmS3ResumeState;\r
+ mSmmS3ResumeState = SmmS3ResumeState;\r
SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
\r
SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
if (sizeof (UINTN) == sizeof (UINT64)) {\r
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
}\r
+\r
if (sizeof (UINTN) == sizeof (UINT32)) {\r
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
}\r
&Address\r
);\r
ASSERT_EFI_ERROR (Status);\r
- mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
+ mApHltLoopCode = (UINT8 *)(UINTN)Address;\r
}\r
\r
/**\r
**/\r
VOID\r
CopyRegisterTable (\r
- IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
- IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
- IN UINT32 NumberOfCpus\r
+ IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
+ IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
+ IN UINT32 NumberOfCpus\r
)\r
{\r
- UINTN Index;\r
- CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
+ UINTN Index;\r
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
\r
CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
for (Index = 0; Index < NumberOfCpus; Index++) {\r
if (DestinationRegisterTableList[Index].TableLength != 0) {\r
DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);\r
- RegisterTableEntry = AllocateCopyPool (\r
- DestinationRegisterTableList[Index].AllocatedSize,\r
- (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
- );\r
+ RegisterTableEntry = AllocateCopyPool (\r
+ DestinationRegisterTableList[Index].AllocatedSize,\r
+ (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
+ );\r
ASSERT (RegisterTableEntry != NULL);\r
DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
}\r
**/\r
BOOLEAN\r
IsRegisterTableEmpty (\r
- IN CPU_REGISTER_TABLE *RegisterTable,\r
- IN UINT32 NumberOfCpus\r
+ IN CPU_REGISTER_TABLE *RegisterTable,\r
+ IN UINT32 NumberOfCpus\r
)\r
{\r
- UINTN Index;\r
+ UINTN Index;\r
\r
if (RegisterTable != NULL) {\r
for (Index = 0; Index < NumberOfCpus; Index++) {\r
**/\r
VOID\r
CopyCpuFeatureInitDatatoSmram (\r
- IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,\r
- IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc\r
+ IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,\r
+ IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc\r
)\r
{\r
- CPU_STATUS_INFORMATION *CpuStatus;\r
+ CPU_STATUS_INFORMATION *CpuStatus;\r
\r
if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {\r
CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
\r
if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {\r
CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
- sizeof (UINT32) * CpuStatus->PackageCount,\r
- (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage\r
- );\r
+ sizeof (UINT32) * CpuStatus->PackageCount,\r
+ (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage\r
+ );\r
ASSERT (CpuStatus->ThreadCountPerPackage != 0);\r
}\r
\r
if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {\r
CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
- sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),\r
- (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore\r
- );\r
+ sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),\r
+ (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore\r
+ );\r
ASSERT (CpuStatus->ThreadCountPerCore != 0);\r
}\r
\r
if (CpuFeatureInitDataSrc->ApLocation != 0) {\r
CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
- mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
- (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation\r
- );\r
+ mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation\r
+ );\r
ASSERT (CpuFeatureInitDataDst->ApLocation != 0);\r
}\r
}\r
VOID\r
)\r
{\r
- ACPI_CPU_DATA *AcpiCpuData;\r
- IA32_DESCRIPTOR *Gdtr;\r
- IA32_DESCRIPTOR *Idtr;\r
- VOID *GdtForAp;\r
- VOID *IdtForAp;\r
- VOID *MachineCheckHandlerForAp;\r
- CPU_STATUS_INFORMATION *CpuStatus;\r
+ ACPI_CPU_DATA *AcpiCpuData;\r
+ IA32_DESCRIPTOR *Gdtr;\r
+ IA32_DESCRIPTOR *Idtr;\r
+ VOID *GdtForAp;\r
+ VOID *IdtForAp;\r
+ VOID *MachineCheckHandlerForAp;\r
+ CPU_STATUS_INFORMATION *CpuStatus;\r
\r
if (!mAcpiS3Enable) {\r
return;\r
\r
GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
ASSERT (GdtForAp != NULL);\r
- IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
- MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
+ IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
+ MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));\r
\r
CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
\r
- Gdtr->Base = (UINTN)GdtForAp;\r
- Idtr->Base = (UINTN)IdtForAp;\r
+ Gdtr->Base = (UINTN)GdtForAp;\r
+ Idtr->Base = (UINTN)IdtForAp;\r
mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
\r
ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));\r
);\r
ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r
\r
- InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);\r
+ InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);\r
}\r
}\r
\r