--- /dev/null
+/** @file\r
+Code for Processor S3 restoration\r
+\r
+Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+typedef struct {\r
+ UINTN Lock;\r
+ VOID *StackStart;\r
+ UINTN StackSize;\r
+ VOID *ApFunction;\r
+ IA32_DESCRIPTOR GdtrProfile;\r
+ IA32_DESCRIPTOR IdtrProfile;\r
+ UINT32 BufferStart;\r
+ UINT32 Cr3;\r
+} MP_CPU_EXCHANGE_INFO;\r
+\r
+typedef struct {\r
+ UINT8 *RendezvousFunnelAddress;\r
+ UINTN PModeEntryOffset;\r
+ UINTN FlatJumpOffset;\r
+ UINTN Size;\r
+ UINTN LModeEntryOffset;\r
+ UINTN LongJumpOffset;\r
+} MP_ASSEMBLY_ADDRESS_MAP;\r
+\r
+/**\r
+ Get starting address and size of the rendezvous entry for APs.\r
+ Information for fixing a jump instruction in the code is also returned.\r
+\r
+ @param AddressMap Output buffer for address map information.\r
+**/\r
+VOID *\r
+EFIAPI\r
+AsmGetAddressMap (\r
+ MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
+ );\r
+\r
+#define LEGACY_REGION_SIZE (2 * 0x1000)\r
+#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
+#define MSR_SPIN_LOCK_INIT_NUM 15\r
+\r
+ACPI_CPU_DATA mAcpiCpuData;\r
+UINT32 mNumberToFinish;\r
+MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
+BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
+VOID *mGdtForAp = NULL;\r
+VOID *mIdtForAp = NULL;\r
+VOID *mMachineCheckHandlerForAp = NULL;\r
+MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
+UINTN mMsrSpinLockCount = MSR_SPIN_LOCK_INIT_NUM;\r
+UINTN mMsrCount = 0;\r
+\r
+/**\r
+ Get MSR spin lock by MSR index.\r
+\r
+ @param MsrIndex MSR index value.\r
+\r
+ @return Pointer to MSR spin lock.\r
+\r
+**/\r
+SPIN_LOCK *\r
+GetMsrSpinLockByIndex (\r
+ IN UINT32 MsrIndex\r
+ )\r
+{\r
+ UINTN Index;\r
+ for (Index = 0; Index < mMsrCount; Index++) {\r
+ if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
+ return &mMsrSpinLocks[Index].SpinLock;\r
+ }\r
+ }\r
+ return NULL;\r
+}\r
+\r
+/**\r
+ Initialize MSR spin lock by MSR index.\r
+\r
+ @param MsrIndex MSR index value.\r
+\r
+**/\r
+VOID\r
+InitMsrSpinLockByIndex (\r
+ IN UINT32 MsrIndex\r
+ )\r
+{\r
+ UINTN NewMsrSpinLockCount;\r
+\r
+ if (mMsrSpinLocks == NULL) {\r
+ mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * mMsrSpinLockCount);\r
+ ASSERT (mMsrSpinLocks != NULL);\r
+ }\r
+ if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
+ //\r
+ // Initialize spin lock for MSR programming\r
+ //\r
+ mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
+ InitializeSpinLock (&mMsrSpinLocks[mMsrCount].SpinLock);\r
+ mMsrCount ++;\r
+ if (mMsrCount == mMsrSpinLockCount) {\r
+ //\r
+ // If MSR spin lock buffer is full, enlarge it\r
+ //\r
+ NewMsrSpinLockCount = mMsrSpinLockCount + MSR_SPIN_LOCK_INIT_NUM;\r
+ mMsrSpinLocks = ReallocatePool (\r
+ sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
+ sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
+ mMsrSpinLocks\r
+ );\r
+ mMsrSpinLockCount = NewMsrSpinLockCount;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Sync up the MTRR values for all processors.\r
+\r
+ @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
+**/\r
+VOID\r
+EFIAPI\r
+LoadMtrrData (\r
+ EFI_PHYSICAL_ADDRESS MtrrTable\r
+ )\r
+/*++\r
+\r
+Routine Description:\r
+\r
+ Sync up the MTRR values for all processors.\r
+\r
+Arguments:\r
+\r
+Returns:\r
+ None\r
+\r
+--*/\r
+{\r
+ MTRR_SETTINGS *MtrrSettings;\r
+\r
+ MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
+ MtrrSetAllMtrrs (MtrrSettings);\r
+}\r
+\r
+/**\r
+ Programs registers for the calling processor.\r
+\r
+ This function programs registers for the calling processor.\r
+\r
+ @param RegisterTable Pointer to register table of the running processor.\r
+\r
+**/\r
+VOID\r
+SetProcessorRegister (\r
+ IN CPU_REGISTER_TABLE *RegisterTable\r
+ )\r
+{\r
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
+ UINTN Index;\r
+ UINTN Value;\r
+ SPIN_LOCK *MsrSpinLock;\r
+\r
+ //\r
+ // Traverse Register Table of this logical processor\r
+ //\r
+ RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
+ for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
+ //\r
+ // Check the type of specified register\r
+ //\r
+ switch (RegisterTableEntry->RegisterType) {\r
+ //\r
+ // The specified register is Control Register\r
+ //\r
+ case ControlRegister:\r
+ switch (RegisterTableEntry->Index) {\r
+ case 0:\r
+ Value = AsmReadCr0 ();\r
+ Value = (UINTN) BitFieldWrite64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINTN) RegisterTableEntry->Value\r
+ );\r
+ AsmWriteCr0 (Value);\r
+ break;\r
+ case 2:\r
+ Value = AsmReadCr2 ();\r
+ Value = (UINTN) BitFieldWrite64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINTN) RegisterTableEntry->Value\r
+ );\r
+ AsmWriteCr2 (Value);\r
+ break;\r
+ case 3:\r
+ Value = AsmReadCr3 ();\r
+ Value = (UINTN) BitFieldWrite64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINTN) RegisterTableEntry->Value\r
+ );\r
+ AsmWriteCr3 (Value);\r
+ break;\r
+ case 4:\r
+ Value = AsmReadCr4 ();\r
+ Value = (UINTN) BitFieldWrite64 (\r
+ Value,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINTN) RegisterTableEntry->Value\r
+ );\r
+ AsmWriteCr4 (Value);\r
+ break;\r
+ default:\r
+ break;\r
+ }\r
+ break;\r
+ //\r
+ // The specified register is Model Specific Register\r
+ //\r
+ case Msr:\r
+ //\r
+ // If this function is called to restore register setting after INIT signal,\r
+ // there is no need to restore MSRs in register table.\r
+ //\r
+ if (RegisterTableEntry->ValidBitLength >= 64) {\r
+ //\r
+ // If length is not less than 64 bits, then directly write without reading\r
+ //\r
+ AsmWriteMsr64 (\r
+ RegisterTableEntry->Index,\r
+ RegisterTableEntry->Value\r
+ );\r
+ } else {\r
+ //\r
+ // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
+ // to make sure MSR read/write operation is atomic.\r
+ //\r
+ MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
+ AcquireSpinLock (MsrSpinLock);\r
+ //\r
+ // Set the bit section according to bit start and length\r
+ //\r
+ AsmMsrBitFieldWrite64 (\r
+ RegisterTableEntry->Index,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ RegisterTableEntry->Value\r
+ );\r
+ ReleaseSpinLock (MsrSpinLock);\r
+ }\r
+ break;\r
+ //\r
+ // Enable or disable cache\r
+ //\r
+ case CacheControl:\r
+ //\r
+ // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
+ //\r
+ if (RegisterTableEntry->Value == 0) {\r
+ AsmDisableCache ();\r
+ } else {\r
+ AsmEnableCache ();\r
+ }\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ AP initialization before SMBASE relocation in the S3 boot path.\r
+**/\r
+VOID\r
+EarlyMPRendezvousProcedure (\r
+ VOID\r
+ )\r
+{\r
+ CPU_REGISTER_TABLE *RegisterTableList;\r
+ UINT32 InitApicId;\r
+ UINTN Index;\r
+\r
+ LoadMtrrData (mAcpiCpuData.MtrrTable);\r
+\r
+ //\r
+ // Find processor number for this CPU.\r
+ //\r
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
+ InitApicId = GetInitialApicId ();\r
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
+ SetProcessorRegister (&RegisterTableList[Index]);\r
+ break;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Count down the number with lock mechanism.\r
+ //\r
+ InterlockedDecrement (&mNumberToFinish);\r
+}\r
+\r
+/**\r
+ AP initialization after SMBASE relocation in the S3 boot path.\r
+**/\r
+VOID\r
+MPRendezvousProcedure (\r
+ VOID\r
+ )\r
+{\r
+ CPU_REGISTER_TABLE *RegisterTableList;\r
+ UINT32 InitApicId;\r
+ UINTN Index;\r
+\r
+ ProgramVirtualWireMode ();\r
+ DisableLvtInterrupts ();\r
+\r
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
+ InitApicId = GetInitialApicId ();\r
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
+ SetProcessorRegister (&RegisterTableList[Index]);\r
+ break;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Count down the number with lock mechanism.\r
+ //\r
+ InterlockedDecrement (&mNumberToFinish);\r
+}\r
+\r
+/**\r
+ Prepares startup vector for APs.\r
+\r
+ This function prepares startup vector for APs.\r
+\r
+ @param WorkingBuffer The address of the work buffer.\r
+**/\r
+VOID\r
+PrepareApStartupVector (\r
+ EFI_PHYSICAL_ADDRESS WorkingBuffer\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS StartupVector;\r
+ MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
+\r
+ //\r
+ // Get the address map of startup code for AP,\r
+ // including code size, and offset of long jump instructions to redirect.\r
+ //\r
+ ZeroMem (&AddressMap, sizeof (AddressMap));\r
+ AsmGetAddressMap (&AddressMap);\r
+\r
+ StartupVector = WorkingBuffer;\r
+\r
+ //\r
+ // Copy AP startup code to startup vector, and then redirect the long jump\r
+ // instructions for mode switching.\r
+ //\r
+ CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
+ *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
+ if (AddressMap.LongJumpOffset != 0) {\r
+ *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
+ }\r
+\r
+ //\r
+ // Get the start address of exchange data between BSP and AP.\r
+ //\r
+ mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
+ ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
+\r
+ CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+ CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+\r
+ //\r
+ // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
+ //\r
+ CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
+ CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
+ CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
+\r
+ mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
+ mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
+ mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
+ mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
+}\r
+\r
+/**\r
+ The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
+\r
+ The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
+ and restores MTRRs for both BSP and APs.\r
+\r
+**/\r
+VOID\r
+EarlyInitializeCpu (\r
+ VOID\r
+ )\r
+{\r
+ CPU_REGISTER_TABLE *RegisterTableList;\r
+ UINT32 InitApicId;\r
+ UINTN Index;\r
+\r
+ LoadMtrrData (mAcpiCpuData.MtrrTable);\r
+\r
+ //\r
+ // Find processor number for this CPU.\r
+ //\r
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
+ InitApicId = GetInitialApicId ();\r
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
+ SetProcessorRegister (&RegisterTableList[Index]);\r
+ break;\r
+ }\r
+ }\r
+\r
+ ProgramVirtualWireMode ();\r
+\r
+ PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
+\r
+ mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
+ mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r
+\r
+ //\r
+ // Send INIT IPI - SIPI to all APs\r
+ //\r
+ SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
+\r
+ while (mNumberToFinish > 0) {\r
+ CpuPause ();\r
+ }\r
+}\r
+\r
+/**\r
+ The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
+\r
+ The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
+ data saved by normal boot path for both BSP and APs.\r
+\r
+**/\r
+VOID\r
+InitializeCpu (\r
+ VOID\r
+ )\r
+{\r
+ CPU_REGISTER_TABLE *RegisterTableList;\r
+ UINT32 InitApicId;\r
+ UINTN Index;\r
+\r
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
+ InitApicId = GetInitialApicId ();\r
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
+ SetProcessorRegister (&RegisterTableList[Index]);\r
+ break;\r
+ }\r
+ }\r
+\r
+ mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
+ //\r
+ // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r
+ // Re-initialize StackAddress to original beginning address.\r
+ //\r
+ mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
+ mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r
+\r
+ //\r
+ // Send INIT IPI - SIPI to all APs\r
+ //\r
+ SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
+\r
+ while (mNumberToFinish > 0) {\r
+ CpuPause ();\r
+ }\r
+}\r
--- /dev/null
+/** @file\r
+Implementation of SMM CPU Services Protocol.\r
+\r
+Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+//\r
+// SMM CPU Service Protocol instance\r
+//\r
+EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService = {\r
+ SmmGetProcessorInfo,\r
+ SmmSwitchBsp,\r
+ SmmAddProcessor,\r
+ SmmRemoveProcessor,\r
+ SmmWhoAmI,\r
+ SmmRegisterExceptionHandler\r
+};\r
+\r
+/**\r
+ Get Package ID/Core ID/Thread ID of a processor.\r
+\r
+ APIC ID must be an initial APIC ID.\r
+\r
+ The algorithm below assumes the target system has symmetry across physical package boundaries\r
+ with respect to the number of logical processors per package, number of cores per package.\r
+\r
+ @param ApicId APIC ID of the target logical processor.\r
+ @param Location Returns the processor location information.\r
+**/\r
+VOID\r
+SmmGetProcessorLocation (\r
+ IN UINT32 ApicId,\r
+ OUT EFI_CPU_PHYSICAL_LOCATION *Location\r
+ )\r
+{\r
+ UINTN ThreadBits;\r
+ UINTN CoreBits;\r
+ UINT32 RegEax;\r
+ UINT32 RegEbx;\r
+ UINT32 RegEcx;\r
+ UINT32 RegEdx;\r
+ UINT32 MaxCpuIdIndex;\r
+ UINT32 SubIndex;\r
+ UINTN LevelType;\r
+ UINT32 MaxLogicProcessorsPerPackage;\r
+ UINT32 MaxCoresPerPackage;\r
+ BOOLEAN TopologyLeafSupported;\r
+\r
+ ASSERT (Location != NULL);\r
+\r
+ ThreadBits = 0;\r
+ CoreBits = 0;\r
+ TopologyLeafSupported = FALSE;\r
+\r
+ //\r
+ // Check if the processor is capable of supporting more than one logical processor.\r
+ //\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);\r
+ ASSERT ((RegEdx & BIT28) != 0);\r
+\r
+ //\r
+ // Assume three-level mapping of APIC ID: Package:Core:SMT.\r
+ //\r
+\r
+ //\r
+ // Get the max index of basic CPUID\r
+ //\r
+ AsmCpuid (CPUID_SIGNATURE, &MaxCpuIdIndex, NULL, NULL, NULL);\r
+\r
+ //\r
+ // If the extended topology enumeration leaf is available, it\r
+ // is the preferred mechanism for enumerating topology.\r
+ //\r
+ if (MaxCpuIdIndex >= CPUID_EXTENDED_TOPOLOGY) {\r
+ AsmCpuidEx (CPUID_EXTENDED_TOPOLOGY, 0, &RegEax, &RegEbx, &RegEcx, NULL);\r
+ //\r
+ // If CPUID.(EAX=0BH, ECX=0H):EBX returns zero and maximum input value for\r
+ // basic CPUID information is greater than 0BH, then CPUID.0BH leaf is not\r
+ // supported on that processor.\r
+ //\r
+ if ((RegEbx & 0xffff) != 0) {\r
+ TopologyLeafSupported = TRUE;\r
+\r
+ //\r
+ // Sub-leaf index 0 (ECX= 0 as input) provides enumeration parameters to extract\r
+ // the SMT sub-field of x2APIC ID.\r
+ //\r
+ LevelType = (RegEcx >> 8) & 0xff;\r
+ ASSERT (LevelType == CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_SMT);\r
+ if ((RegEbx & 0xffff) > 1 ) {\r
+ ThreadBits = RegEax & 0x1f;\r
+ } else {\r
+ //\r
+ // HT is not supported\r
+ //\r
+ ThreadBits = 0;\r
+ }\r
+\r
+ //\r
+ // Software must not assume any "level type" encoding\r
+ // value to be related to any sub-leaf index, except sub-leaf 0.\r
+ //\r
+ SubIndex = 1;\r
+ do {\r
+ AsmCpuidEx (CPUID_EXTENDED_TOPOLOGY, SubIndex, &RegEax, NULL, &RegEcx, NULL);\r
+ LevelType = (RegEcx >> 8) & 0xff;\r
+ if (LevelType == CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_CORE) {\r
+ CoreBits = (RegEax & 0x1f) - ThreadBits;\r
+ break;\r
+ }\r
+ SubIndex++;\r
+ } while (LevelType != CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_INVALID);\r
+ }\r
+ }\r
+\r
+ if (!TopologyLeafSupported) {\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, &RegEbx, NULL, NULL);\r
+ MaxLogicProcessorsPerPackage = (RegEbx >> 16) & 0xff;\r
+ if (MaxCpuIdIndex >= CPUID_CACHE_PARAMS) {\r
+ AsmCpuidEx (CPUID_CACHE_PARAMS, 0, &RegEax, NULL, NULL, NULL);\r
+ MaxCoresPerPackage = (RegEax >> 26) + 1;\r
+ } else {\r
+ //\r
+ // Must be a single-core processor.\r
+ //\r
+ MaxCoresPerPackage = 1;\r
+ }\r
+\r
+ ThreadBits = (UINTN) (HighBitSet32 (MaxLogicProcessorsPerPackage / MaxCoresPerPackage - 1) + 1);\r
+ CoreBits = (UINTN) (HighBitSet32 (MaxCoresPerPackage - 1) + 1);\r
+ }\r
+\r
+ Location->Thread = ApicId & ~((-1) << ThreadBits);\r
+ Location->Core = (ApicId >> ThreadBits) & ~((-1) << CoreBits);\r
+ Location->Package = (ApicId >> (ThreadBits+ CoreBits));\r
+}\r
+\r
+/**\r
+ Gets processor information on the requested processor at the instant this call is made.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorNumber The handle number of processor.\r
+ @param[out] ProcessorInfoBuffer A pointer to the buffer where information for\r
+ the requested processor is deposited.\r
+\r
+ @retval EFI_SUCCESS Processor information was returned.\r
+ @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+ @retval EFI_NOT_FOUND The processor with the handle specified by\r
+ ProcessorNumber does not exist in the platform.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmGetProcessorInfo (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINTN ProcessorNumber,\r
+ OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer\r
+ )\r
+{\r
+ //\r
+ // Check parameter\r
+ //\r
+ if (ProcessorNumber >= mMaxNumberOfCpus || ProcessorInfoBuffer == NULL) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Fill in processor information\r
+ //\r
+ CopyMem (ProcessorInfoBuffer, &gSmmCpuPrivate->ProcessorInfo[ProcessorNumber], sizeof (EFI_PROCESSOR_INFORMATION));\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ This service switches the requested AP to be the BSP since the next SMI.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorNumber The handle number of AP that is to become the new BSP.\r
+\r
+ @retval EFI_SUCCESS BSP will be switched in next SMI.\r
+ @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported.\r
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmSwitchBsp (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINTN ProcessorNumber\r
+ )\r
+{\r
+ //\r
+ // Check parameter\r
+ //\r
+ if (ProcessorNumber >= mMaxNumberOfCpus) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone ||\r
+ gSmst->CurrentlyExecutingCpu == ProcessorNumber) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Setting of the BSP for next SMI is pending until all SMI handlers are finished\r
+ //\r
+ gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuSwitchBsp;\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ Notify that a processor was hot-added.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorId Local APIC ID of the hot-added processor.\r
+ @param[out] ProcessorNumber The handle number of the hot-added processor.\r
+\r
+ @retval EFI_SUCCESS The hot-addition of the specified processors was successfully notified.\r
+ @retval EFI_UNSUPPORTED Hot addition of processor is not supported.\r
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+ @retval EFI_ALREADY_STARTED The processor is already online in the system.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmAddProcessor (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINT64 ProcessorId,\r
+ OUT UINTN *ProcessorNumber\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Check parameter\r
+ //\r
+ if (ProcessorNumber == NULL || ProcessorId == INVALID_APIC_ID) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Check if the processor already exists\r
+ //\r
+\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ProcessorId) {\r
+ return EFI_ALREADY_STARTED;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Check CPU hot plug data. The CPU RAS handler should have created the mapping\r
+ // of the APIC ID to SMBASE.\r
+ //\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (mCpuHotPlugData.ApicId[Index] == ProcessorId &&\r
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = ProcessorId;\r
+ gSmmCpuPrivate->ProcessorInfo[Index].StatusFlag = 0;\r
+ SmmGetProcessorLocation ((UINT32)ProcessorId, &gSmmCpuPrivate->ProcessorInfo[Index].Location);\r
+\r
+ *ProcessorNumber = Index;\r
+ gSmmCpuPrivate->Operation[Index] = SmmCpuAdd;\r
+ return EFI_SUCCESS;\r
+ }\r
+ }\r
+\r
+ return EFI_INVALID_PARAMETER;\r
+}\r
+\r
+/**\r
+ Notify that a processor was hot-removed.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorNumber The handle number of the hot-added processor.\r
+\r
+ @retval EFI_SUCCESS The hot-removal of the specified processors was successfully notified.\r
+ @retval EFI_UNSUPPORTED Hot removal of processor is not supported.\r
+ @retval EFI_UNSUPPORTED Hot removal of BSP is not supported.\r
+ @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmRemoveProcessor (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINTN ProcessorNumber\r
+ )\r
+{\r
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Check parameter\r
+ //\r
+ if (ProcessorNumber >= mMaxNumberOfCpus ||\r
+ gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Can't remove BSP\r
+ //\r
+ if (ProcessorNumber == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId = INVALID_APIC_ID;\r
+ mCpuHotPlugData.ApicId[ProcessorNumber] = INVALID_APIC_ID;\r
+\r
+ //\r
+ // Removal of the processor from the CPU list is pending until all SMI handlers are finished\r
+ //\r
+ gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuRemove;\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ This return the handle number for the calling processor.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[out] ProcessorNumber The handle number of currently executing processor.\r
+\r
+ @retval EFI_SUCCESS The current processor handle number was returned\r
+ in ProcessorNumber.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmWhoAmI (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ OUT UINTN *ProcessorNumber\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINT64 ApicId;\r
+\r
+ //\r
+ // Check parameter\r
+ //\r
+ if (ProcessorNumber == NULL) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ ApicId = GetApicId ();\r
+\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {\r
+ *ProcessorNumber = Index;\r
+ return EFI_SUCCESS;\r
+ }\r
+ }\r
+ //\r
+ // This should not happen\r
+ //\r
+ ASSERT (FALSE);\r
+ return EFI_NOT_FOUND;\r
+}\r
+\r
+/**\r
+ Update the SMM CPU list per the pending operation.\r
+\r
+ This function is called after return from SMI handlers.\r
+**/\r
+VOID\r
+SmmCpuUpdate (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ //\r
+ // Handle pending BSP switch operations\r
+ //\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuSwitchBsp) {\r
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
+ mSmmMpSyncData->SwitchBsp = TRUE;\r
+ mSmmMpSyncData->CandidateBsp[Index] = TRUE;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Handle pending hot-add operations\r
+ //\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuAdd) {\r
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
+ mNumberOfCpus++;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Handle pending hot-remove operations\r
+ //\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
+ mNumberOfCpus--;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Register exception handler.\r
+\r
+ @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and\r
+ the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL\r
+ of the UEFI 2.0 specification.\r
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER\r
+ that is called when a processor interrupt occurs.\r
+ If this parameter is NULL, then the handler will be uninstalled.\r
+\r
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.\r
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed.\r
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed.\r
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmRegisterExceptionHandler (\r
+ IN EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN EFI_EXCEPTION_TYPE ExceptionType,\r
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler\r
+ )\r
+{\r
+ return RegisterCpuInterruptHandler (ExceptionType, InterruptHandler);\r
+}\r
+\r
+/**\r
+ Initialize SMM CPU Services.\r
+\r
+ It installs EFI SMM CPU Services Protocol.\r
+\r
+ @param ImageHandle The firmware allocated handle for the EFI image.\r
+\r
+ @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully.\r
+**/\r
+EFI_STATUS\r
+InitializeSmmCpuServices (\r
+ IN EFI_HANDLE Handle\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &Handle,\r
+ &gEfiSmmCpuServiceProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmCpuService\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+ return Status;\r
+}\r
+\r
--- /dev/null
+/** @file\r
+Include file for SMM CPU Services protocol implementation.\r
+\r
+Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef _CPU_SERVICE_H_\r
+#define _CPU_SERVICE_H_\r
+\r
+typedef enum {\r
+ SmmCpuNone,\r
+ SmmCpuAdd,\r
+ SmmCpuRemove,\r
+ SmmCpuSwitchBsp\r
+} SMM_CPU_OPERATION;\r
+\r
+//\r
+// SMM CPU Service Protocol function prototypes.\r
+//\r
+\r
+/**\r
+ Gets processor information on the requested processor at the instant this call is made.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorNumber The handle number of processor.\r
+ @param[out] ProcessorInfoBuffer A pointer to the buffer where information for\r
+ the requested processor is deposited.\r
+\r
+ @retval EFI_SUCCESS Processor information was returned.\r
+ @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+ @retval EFI_NOT_FOUND The processor with the handle specified by\r
+ ProcessorNumber does not exist in the platform.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmGetProcessorInfo (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINTN ProcessorNumber,\r
+ OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer\r
+ );\r
+\r
+/**\r
+ This service switches the requested AP to be the BSP since the next SMI.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorNumber The handle number of AP that is to become the new BSP.\r
+\r
+ @retval EFI_SUCCESS BSP will be switched in next SMI.\r
+ @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported.\r
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmSwitchBsp (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINTN ProcessorNumber\r
+ );\r
+\r
+/**\r
+ Notify that a processor was hot-added.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorId Local APIC ID of the hot-added processor.\r
+ @param[out] ProcessorNumber The handle number of the hot-added processor.\r
+\r
+ @retval EFI_SUCCESS The hot-addition of the specified processors was successfully notified.\r
+ @retval EFI_UNSUPPORTED Hot addition of processor is not supported.\r
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+ @retval EFI_ALREADY_STARTED The processor is already online in the system.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmAddProcessor (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINT64 ProcessorId,\r
+ OUT UINTN *ProcessorNumber\r
+ );\r
+\r
+/**\r
+ Notify that a processor was hot-removed.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[in] ProcessorNumber The handle number of the hot-added processor.\r
+\r
+ @retval EFI_SUCCESS The hot-removal of the specified processors was successfully notified.\r
+ @retval EFI_UNSUPPORTED Hot removal of processor is not supported.\r
+ @retval EFI_UNSUPPORTED Hot removal of BSP is not supported.\r
+ @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmRemoveProcessor (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN UINTN ProcessorNumber\r
+ );\r
+\r
+/**\r
+ This return the handle number for the calling processor.\r
+\r
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param[out] ProcessorNumber The handle number of currently executing processor.\r
+\r
+ @retval EFI_SUCCESS The current processor handle number was returned\r
+ in ProcessorNumber.\r
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmWhoAmI (\r
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ OUT UINTN *ProcessorNumber\r
+ );\r
+\r
+/**\r
+ Register exception handler.\r
+\r
+ @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance.\r
+ @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and\r
+ the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL\r
+ of the UEFI 2.0 specification.\r
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER\r
+ that is called when a processor interrupt occurs.\r
+ If this parameter is NULL, then the handler will be uninstalled.\r
+\r
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.\r
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed.\r
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed.\r
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmRegisterExceptionHandler (\r
+ IN EFI_SMM_CPU_SERVICE_PROTOCOL *This,\r
+ IN EFI_EXCEPTION_TYPE ExceptionType,\r
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler\r
+ );\r
+\r
+//\r
+// Internal function prototypes\r
+//\r
+\r
+/**\r
+ Update the SMM CPU list per the pending operation.\r
+\r
+ This function is called after return from SMI handlers.\r
+**/\r
+VOID\r
+SmmCpuUpdate (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Initialize SMM CPU Services.\r
+\r
+ It installs EFI SMM CPU Services Protocol.\r
+\r
+ @param ImageHandle The firmware allocated handle for the EFI image.\r
+\r
+ @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully.\r
+**/\r
+EFI_STATUS\r
+InitializeSmmCpuServices (\r
+ IN EFI_HANDLE Handle\r
+ );\r
+\r
+#endif\r
--- /dev/null
+/** @file\r
+SMM MP service implementation\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+//\r
+// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
+//\r
+UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r
+UINT64 gPhyMask;\r
+SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
+UINTN mSmmMpSyncDataSize;\r
+\r
+/**\r
+ Performs an atomic compare exchange operation to get semaphore.\r
+ The compare exchange operation must be performed using\r
+ MP safe mechanisms.\r
+\r
+ @param Sem IN: 32-bit unsigned integer\r
+ OUT: original integer - 1\r
+ @return Original integer - 1\r
+\r
+**/\r
+UINT32\r
+WaitForSemaphore (\r
+ IN OUT volatile UINT32 *Sem\r
+ )\r
+{\r
+ UINT32 Value;\r
+\r
+ do {\r
+ Value = *Sem;\r
+ } while (Value == 0 ||\r
+ InterlockedCompareExchange32 (\r
+ (UINT32*)Sem,\r
+ Value,\r
+ Value - 1\r
+ ) != Value);\r
+ return Value - 1;\r
+}\r
+\r
+\r
+/**\r
+ Performs an atomic compare exchange operation to release semaphore.\r
+ The compare exchange operation must be performed using\r
+ MP safe mechanisms.\r
+\r
+ @param Sem IN: 32-bit unsigned integer\r
+ OUT: original integer + 1\r
+ @return Original integer + 1\r
+\r
+**/\r
+UINT32\r
+ReleaseSemaphore (\r
+ IN OUT volatile UINT32 *Sem\r
+ )\r
+{\r
+ UINT32 Value;\r
+\r
+ do {\r
+ Value = *Sem;\r
+ } while (Value + 1 != 0 &&\r
+ InterlockedCompareExchange32 (\r
+ (UINT32*)Sem,\r
+ Value,\r
+ Value + 1\r
+ ) != Value);\r
+ return Value + 1;\r
+}\r
+\r
+/**\r
+ Performs an atomic compare exchange operation to lock semaphore.\r
+ The compare exchange operation must be performed using\r
+ MP safe mechanisms.\r
+\r
+ @param Sem IN: 32-bit unsigned integer\r
+ OUT: -1\r
+ @return Original integer\r
+\r
+**/\r
+UINT32\r
+LockdownSemaphore (\r
+ IN OUT volatile UINT32 *Sem\r
+ )\r
+{\r
+ UINT32 Value;\r
+\r
+ do {\r
+ Value = *Sem;\r
+ } while (InterlockedCompareExchange32 (\r
+ (UINT32*)Sem,\r
+ Value, (UINT32)-1\r
+ ) != Value);\r
+ return Value;\r
+}\r
+\r
+/**\r
+ Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
+\r
+ @param NumberOfAPs AP number\r
+\r
+**/\r
+VOID\r
+WaitForAllAPs (\r
+ IN UINTN NumberOfAPs\r
+ )\r
+{\r
+ UINTN BspIndex;\r
+\r
+ BspIndex = mSmmMpSyncData->BspIndex;\r
+ while (NumberOfAPs-- > 0) {\r
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ }\r
+}\r
+\r
+/**\r
+ Performs an atomic compare exchange operation to release semaphore\r
+ for each AP.\r
+\r
+**/\r
+VOID\r
+ReleaseAllAPs (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN BspIndex;\r
+\r
+ BspIndex = mSmmMpSyncData->BspIndex;\r
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
+ if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
+\r
+ @param Exceptions CPU Arrival exception flags.\r
+\r
+ @retval TRUE if all CPUs the have checked in.\r
+ @retval FALSE if at least one Normal AP hasn't checked in.\r
+\r
+**/\r
+BOOLEAN\r
+AllCpusInSmmWithExceptions (\r
+ SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
+ )\r
+{\r
+ UINTN Index;\r
+ SMM_CPU_DATA_BLOCK *CpuData;\r
+ EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
+\r
+ ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+\r
+ if (mSmmMpSyncData->Counter == mNumberOfCpus) {\r
+ return TRUE;\r
+ }\r
+\r
+ CpuData = mSmmMpSyncData->CpuData;\r
+ ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
+ if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
+ if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
+ continue;\r
+ }\r
+ if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
+ continue;\r
+ }\r
+ if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
+ continue;\r
+ }\r
+ return FALSE;\r
+ }\r
+ }\r
+\r
+\r
+ return TRUE;\r
+}\r
+\r
+\r
+/**\r
+ Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
+ entering SMM, except SMI disabled APs.\r
+\r
+**/\r
+VOID\r
+SmmWaitForApArrival (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 Timer;\r
+ UINTN Index;\r
+\r
+ ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+\r
+ //\r
+ // Platform implementor should choose a timeout value appropriately:\r
+ // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
+ // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
+ // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
+ // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
+ // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
+ // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
+ // - The timeout value must be longer than longest possible IO operation in the system\r
+ //\r
+\r
+ //\r
+ // Sync with APs 1st timeout\r
+ //\r
+ for (Timer = StartSyncTimer ();\r
+ !IsSyncTimerTimeout (Timer) &&\r
+ !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
+ ) {\r
+ CpuPause ();\r
+ }\r
+\r
+ //\r
+ // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
+ // because:\r
+ // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
+ // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
+ // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
+ // work while SMI handling is on-going.\r
+ // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
+ // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
+ // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
+ // mode work while SMI handling is on-going.\r
+ // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
+ // - In traditional flow, SMI disabling is discouraged.\r
+ // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
+ // In both cases, adding SMI-disabling checking code increases overhead.\r
+ //\r
+ if (mSmmMpSyncData->Counter < mNumberOfCpus) {\r
+ //\r
+ // Send SMI IPIs to bring outside processors in\r
+ //\r
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
+ if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
+ }\r
+ }\r
+\r
+ //\r
+ // Sync with APs 2nd timeout.\r
+ //\r
+ for (Timer = StartSyncTimer ();\r
+ !IsSyncTimerTimeout (Timer) &&\r
+ !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
+ ) {\r
+ CpuPause ();\r
+ }\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
+/**\r
+ Replace OS MTRR's with SMI MTRR's.\r
+\r
+ @param CpuIndex Processor Index\r
+\r
+**/\r
+VOID\r
+ReplaceOSMtrrs (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ PROCESSOR_SMM_DESCRIPTOR *Psd;\r
+ UINT64 *SmiMtrrs;\r
+ MTRR_SETTINGS *BiosMtrr;\r
+\r
+ Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r
+ SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r
+\r
+ SmmCpuFeaturesDisableSmrr ();\r
+\r
+ //\r
+ // Replace all MTRRs registers\r
+ //\r
+ BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r
+ MtrrSetAllMtrrs(BiosMtrr);\r
+}\r
+\r
+/**\r
+ SMI handler for BSP.\r
+\r
+ @param CpuIndex BSP processor Index\r
+ @param SyncMode SMM MP sync mode\r
+\r
+**/\r
+VOID\r
+BSPHandler (\r
+ IN UINTN CpuIndex,\r
+ IN SMM_CPU_SYNC_MODE SyncMode\r
+ )\r
+{\r
+ UINTN Index;\r
+ MTRR_SETTINGS Mtrrs;\r
+ UINTN ApCount;\r
+ BOOLEAN ClearTopLevelSmiResult;\r
+ UINTN PresentCount;\r
+\r
+ ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
+ ApCount = 0;\r
+\r
+ //\r
+ // Flag BSP's presence\r
+ //\r
+ mSmmMpSyncData->InsideSmm = TRUE;\r
+\r
+ //\r
+ // Initialize Debug Agent to start source level debug in BSP handler\r
+ //\r
+ InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
+\r
+ //\r
+ // Mark this processor's presence\r
+ //\r
+ mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
+\r
+ //\r
+ // Clear platform top level SMI status bit before calling SMI handlers. If\r
+ // we cleared it after SMI handlers are run, we would miss the SMI that\r
+ // occurs after SMI handlers are done and before SMI status bit is cleared.\r
+ //\r
+ ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
+ ASSERT (ClearTopLevelSmiResult == TRUE);\r
+\r
+ //\r
+ // Set running processor index\r
+ //\r
+ gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
+\r
+ //\r
+ // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
+ //\r
+ if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+\r
+ //\r
+ // Wait for APs to arrive\r
+ //\r
+ SmmWaitForApArrival();\r
+\r
+ //\r
+ // Lock the counter down and retrieve the number of APs\r
+ //\r
+ mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+\r
+ //\r
+ // Wait for all APs to get ready for programming MTRRs\r
+ //\r
+ WaitForAllAPs (ApCount);\r
+\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ //\r
+ // Signal all APs it's time for backup MTRRs\r
+ //\r
+ ReleaseAllAPs ();\r
+\r
+ //\r
+ // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
+ // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
+ // to a large enough value to avoid this situation.\r
+ // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
+ // We do the backup first and then set MTRR to avoid race condition for threads\r
+ // in the same core.\r
+ //\r
+ MtrrGetAllMtrrs(&Mtrrs);\r
+\r
+ //\r
+ // Wait for all APs to complete their MTRR saving\r
+ //\r
+ WaitForAllAPs (ApCount);\r
+\r
+ //\r
+ // Let all processors program SMM MTRRs together\r
+ //\r
+ ReleaseAllAPs ();\r
+\r
+ //\r
+ // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
+ // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
+ // to a large enough value to avoid this situation.\r
+ //\r
+ ReplaceOSMtrrs (CpuIndex);\r
+\r
+ //\r
+ // Wait for all APs to complete their MTRR programming\r
+ //\r
+ WaitForAllAPs (ApCount);\r
+ }\r
+ }\r
+\r
+ //\r
+ // The BUSY lock is initialized to Acquired state\r
+ //\r
+ AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+\r
+ //\r
+ // Restore SMM Configuration in S3 boot path.\r
+ //\r
+ if (mRestoreSmmConfigurationInS3) {\r
+ //\r
+ // Configure SMM Code Access Check feature if available.\r
+ //\r
+ ConfigSmmCodeAccessCheck ();\r
+ mRestoreSmmConfigurationInS3 = FALSE;\r
+ }\r
+\r
+ //\r
+ // Invoke SMM Foundation EntryPoint with the processor information context.\r
+ //\r
+ gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
+\r
+ //\r
+ // Make sure all APs have completed their pending none-block tasks\r
+ //\r
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
+ if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
+ AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);\r
+ ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Perform the remaining tasks\r
+ //\r
+ PerformRemainingTasks ();\r
+\r
+ //\r
+ // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
+ // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
+ // will run through freely.\r
+ //\r
+ if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+\r
+ //\r
+ // Lock the counter down and retrieve the number of APs\r
+ //\r
+ mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ //\r
+ // Make sure all APs have their Present flag set\r
+ //\r
+ while (TRUE) {\r
+ PresentCount = 0;\r
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
+ if (mSmmMpSyncData->CpuData[Index].Present) {\r
+ PresentCount ++;\r
+ }\r
+ }\r
+ if (PresentCount > ApCount) {\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Notify all APs to exit\r
+ //\r
+ mSmmMpSyncData->InsideSmm = FALSE;\r
+ ReleaseAllAPs ();\r
+\r
+ //\r
+ // Wait for all APs to complete their pending tasks\r
+ //\r
+ WaitForAllAPs (ApCount);\r
+\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ //\r
+ // Signal APs to restore MTRRs\r
+ //\r
+ ReleaseAllAPs ();\r
+\r
+ //\r
+ // Restore OS MTRRs\r
+ //\r
+ SmmCpuFeaturesReenableSmrr ();\r
+ MtrrSetAllMtrrs(&Mtrrs);\r
+\r
+ //\r
+ // Wait for all APs to complete MTRR programming\r
+ //\r
+ WaitForAllAPs (ApCount);\r
+ }\r
+\r
+ //\r
+ // Stop source level debug in BSP handler, the code below will not be\r
+ // debugged.\r
+ //\r
+ InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
+\r
+ //\r
+ // Signal APs to Reset states/semaphore for this processor\r
+ //\r
+ ReleaseAllAPs ();\r
+\r
+ //\r
+ // Perform pending operations for hot-plug\r
+ //\r
+ SmmCpuUpdate ();\r
+\r
+ //\r
+ // Clear the Present flag of BSP\r
+ //\r
+ mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
+\r
+ //\r
+ // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
+ // WaitForAllAps does not depend on the Present flag.\r
+ //\r
+ WaitForAllAPs (ApCount);\r
+\r
+ //\r
+ // Reset BspIndex to -1, meaning BSP has not been elected.\r
+ //\r
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
+ mSmmMpSyncData->BspIndex = (UINT32)-1;\r
+ }\r
+\r
+ //\r
+ // Allow APs to check in from this point on\r
+ //\r
+ mSmmMpSyncData->Counter = 0;\r
+ mSmmMpSyncData->AllCpusInSync = FALSE;\r
+}\r
+\r
+/**\r
+ SMI handler for AP.\r
+\r
+ @param CpuIndex AP processor Index.\r
+ @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
+ @param SyncMode SMM MP sync mode.\r
+\r
+**/\r
+VOID\r
+APHandler (\r
+ IN UINTN CpuIndex,\r
+ IN BOOLEAN ValidSmi,\r
+ IN SMM_CPU_SYNC_MODE SyncMode\r
+ )\r
+{\r
+ UINT64 Timer;\r
+ UINTN BspIndex;\r
+ MTRR_SETTINGS Mtrrs;\r
+\r
+ //\r
+ // Timeout BSP\r
+ //\r
+ for (Timer = StartSyncTimer ();\r
+ !IsSyncTimerTimeout (Timer) &&\r
+ !mSmmMpSyncData->InsideSmm;\r
+ ) {\r
+ CpuPause ();\r
+ }\r
+\r
+ if (!mSmmMpSyncData->InsideSmm) {\r
+ //\r
+ // BSP timeout in the first round\r
+ //\r
+ if (mSmmMpSyncData->BspIndex != -1) {\r
+ //\r
+ // BSP Index is known\r
+ //\r
+ BspIndex = mSmmMpSyncData->BspIndex;\r
+ ASSERT (CpuIndex != BspIndex);\r
+\r
+ //\r
+ // Send SMI IPI to bring BSP in\r
+ //\r
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
+\r
+ //\r
+ // Now clock BSP for the 2nd time\r
+ //\r
+ for (Timer = StartSyncTimer ();\r
+ !IsSyncTimerTimeout (Timer) &&\r
+ !mSmmMpSyncData->InsideSmm;\r
+ ) {\r
+ CpuPause ();\r
+ }\r
+\r
+ if (!mSmmMpSyncData->InsideSmm) {\r
+ //\r
+ // Give up since BSP is unable to enter SMM\r
+ // and signal the completion of this AP\r
+ WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ return;\r
+ }\r
+ } else {\r
+ //\r
+ // Don't know BSP index. Give up without sending IPI to BSP.\r
+ //\r
+ WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ return;\r
+ }\r
+ }\r
+\r
+ //\r
+ // BSP is available\r
+ //\r
+ BspIndex = mSmmMpSyncData->BspIndex;\r
+ ASSERT (CpuIndex != BspIndex);\r
+\r
+ //\r
+ // Mark this processor's presence\r
+ //\r
+ mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
+\r
+ if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ //\r
+ // Notify BSP of arrival at this point\r
+ //\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ }\r
+\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ //\r
+ // Wait for the signal from BSP to backup MTRRs\r
+ //\r
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+\r
+ //\r
+ // Backup OS MTRRs\r
+ //\r
+ MtrrGetAllMtrrs(&Mtrrs);\r
+\r
+ //\r
+ // Signal BSP the completion of this AP\r
+ //\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+\r
+ //\r
+ // Wait for BSP's signal to program MTRRs\r
+ //\r
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+\r
+ //\r
+ // Replace OS MTRRs with SMI MTRRs\r
+ //\r
+ ReplaceOSMtrrs (CpuIndex);\r
+\r
+ //\r
+ // Signal BSP the completion of this AP\r
+ //\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+ }\r
+\r
+ while (TRUE) {\r
+ //\r
+ // Wait for something to happen\r
+ //\r
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+\r
+ //\r
+ // Check if BSP wants to exit SMM\r
+ //\r
+ if (!mSmmMpSyncData->InsideSmm) {\r
+ break;\r
+ }\r
+\r
+ //\r
+ // BUSY should be acquired by SmmStartupThisAp()\r
+ //\r
+ ASSERT (\r
+ !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
+ );\r
+\r
+ //\r
+ // Invoke the scheduled procedure\r
+ //\r
+ (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
+ (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
+ );\r
+\r
+ //\r
+ // Release BUSY\r
+ //\r
+ ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ }\r
+\r
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
+ //\r
+ // Notify BSP the readiness of this AP to program MTRRs\r
+ //\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+\r
+ //\r
+ // Wait for the signal from BSP to program MTRRs\r
+ //\r
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+\r
+ //\r
+ // Restore OS MTRRs\r
+ //\r
+ SmmCpuFeaturesReenableSmrr ();\r
+ MtrrSetAllMtrrs(&Mtrrs);\r
+ }\r
+\r
+ //\r
+ // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
+ //\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+\r
+ //\r
+ // Wait for the signal from BSP to Reset states/semaphore for this processor\r
+ //\r
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+\r
+ //\r
+ // Reset states/semaphore for this processor\r
+ //\r
+ mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
+\r
+ //\r
+ // Notify BSP the readiness of this AP to exit SMM\r
+ //\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
+\r
+}\r
+\r
+/**\r
+ Create 4G PageTable in SMRAM.\r
+\r
+ @param ExtraPages Additional page numbers besides for 4G memory\r
+ @return PageTable Address\r
+\r
+**/\r
+UINT32\r
+Gen4GPageTable (\r
+ IN UINTN ExtraPages\r
+ )\r
+{\r
+ VOID *PageTable;\r
+ UINTN Index;\r
+ UINT64 *Pte;\r
+ UINTN PagesNeeded;\r
+ UINTN Low2MBoundary;\r
+ UINTN High2MBoundary;\r
+ UINTN Pages;\r
+ UINTN GuardPage;\r
+ UINT64 *Pdpte;\r
+ UINTN PageIndex;\r
+ UINTN PageAddress;\r
+\r
+ Low2MBoundary = 0;\r
+ High2MBoundary = 0;\r
+ PagesNeeded = 0;\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // Add one more page for known good stack, then find the lower 2MB aligned address.\r
+ //\r
+ Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
+ //\r
+ // Add two more pages for known good stack and stack guard page,\r
+ // then find the lower 2MB aligned address.\r
+ //\r
+ High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
+ PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
+ }\r
+ //\r
+ // Allocate the page table\r
+ //\r
+ PageTable = AllocatePages (ExtraPages + 5 + PagesNeeded);\r
+ ASSERT (PageTable != NULL);\r
+\r
+ PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r
+ Pte = (UINT64*)PageTable;\r
+\r
+ //\r
+ // Zero out all page table entries first\r
+ //\r
+ ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
+\r
+ //\r
+ // Set Page Directory Pointers\r
+ //\r
+ for (Index = 0; Index < 4; Index++) {\r
+ Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + IA32_PG_P;\r
+ }\r
+ Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
+\r
+ //\r
+ // Fill in Page Directory Entries\r
+ //\r
+ for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
+ Pte[Index] = (Index << 21) + IA32_PG_PS + IA32_PG_RW + IA32_PG_P;\r
+ }\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
+ GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
+ Pdpte = (UINT64*)PageTable;\r
+ for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
+ Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
+ Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages + IA32_PG_RW + IA32_PG_P;\r
+ //\r
+ // Fill in Page Table Entries\r
+ //\r
+ Pte = (UINT64*)Pages;\r
+ PageAddress = PageIndex;\r
+ for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
+ if (PageAddress == GuardPage) {\r
+ //\r
+ // Mark the guard page as non-present\r
+ //\r
+ Pte[Index] = PageAddress;\r
+ GuardPage += mSmmStackSize;\r
+ if (GuardPage > mSmmStackArrayEnd) {\r
+ GuardPage = 0;\r
+ }\r
+ } else {\r
+ Pte[Index] = PageAddress + IA32_PG_RW + IA32_PG_P;\r
+ }\r
+ PageAddress+= EFI_PAGE_SIZE;\r
+ }\r
+ Pages += EFI_PAGE_SIZE;\r
+ }\r
+ }\r
+\r
+ return (UINT32)(UINTN)PageTable;\r
+}\r
+\r
+/**\r
+ Set memory cache ability.\r
+\r
+ @param PageTable PageTable Address\r
+ @param Address Memory Address to change cache ability\r
+ @param Cacheability Cache ability to set\r
+\r
+**/\r
+VOID\r
+SetCacheability (\r
+ IN UINT64 *PageTable,\r
+ IN UINTN Address,\r
+ IN UINT8 Cacheability\r
+ )\r
+{\r
+ UINTN PTIndex;\r
+ VOID *NewPageTableAddress;\r
+ UINT64 *NewPageTable;\r
+ UINTN Index;\r
+\r
+ ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
+\r
+ if (sizeof (UINTN) == sizeof (UINT64)) {\r
+ PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
+ }\r
+\r
+ PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
+\r
+ //\r
+ // A perfect implementation should check the original cacheability with the\r
+ // one being set, and break a 2M page entry into pieces only when they\r
+ // disagreed.\r
+ //\r
+ PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
+ //\r
+ // Allocate a page from SMRAM\r
+ //\r
+ NewPageTableAddress = AllocatePages (1);\r
+ ASSERT (NewPageTableAddress != NULL);\r
+\r
+ NewPageTable = (UINT64 *)NewPageTableAddress;\r
+\r
+ for (Index = 0; Index < 0x200; Index++) {\r
+ NewPageTable[Index] = PageTable[PTIndex];\r
+ if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
+ NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
+ NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
+ }\r
+ NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
+ }\r
+\r
+ PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P;\r
+ }\r
+\r
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
+\r
+ PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
+ PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
+ PageTable[PTIndex] |= (UINT64)Cacheability;\r
+}\r
+\r
+\r
+/**\r
+ Schedule a procedure to run on the specified CPU.\r
+\r
+ @param Procedure The address of the procedure to run\r
+ @param CpuIndex Target CPU Index\r
+ @param ProcArguments The parameter to pass to the procedure\r
+\r
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
+ @retval EFI_SUCCESS The procedure has been successfully scheduled\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmStartupThisAp (\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
+ )\r
+{\r
+ if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||\r
+ CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||\r
+ !mSmmMpSyncData->CpuData[CpuIndex].Present ||\r
+ gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||\r
+ !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
+ mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {\r
+ AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ }\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ C function for SMI entry, each processor comes here upon SMI trigger.\r
+\r
+ @param CpuIndex CPU Index\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+SmiRendezvous (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ BOOLEAN ValidSmi;\r
+ BOOLEAN IsBsp;\r
+ BOOLEAN BspInProgress;\r
+ UINTN Index;\r
+ UINTN Cr2;\r
+\r
+ //\r
+ // Save Cr2 because Page Fault exception in SMM may override its value\r
+ //\r
+ Cr2 = AsmReadCr2 ();\r
+\r
+ //\r
+ // Perform CPU specific entry hooks\r
+ //\r
+ SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
+\r
+ //\r
+ // Determine if this is a valid SMI\r
+ //\r
+ ValidSmi = PlatformValidSmi();\r
+\r
+ //\r
+ // Determine if BSP has been already in progress. Note this must be checked after\r
+ // ValidSmi because BSP may clear a valid SMI source after checking in.\r
+ //\r
+ BspInProgress = mSmmMpSyncData->InsideSmm;\r
+\r
+ if (!BspInProgress && !ValidSmi) {\r
+ //\r
+ // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
+ // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
+ // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
+ // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
+ // is nothing we need to do.\r
+ //\r
+ goto Exit;\r
+ } else {\r
+ //\r
+ // Signal presence of this processor\r
+ //\r
+ if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {\r
+ //\r
+ // BSP has already ended the synchronization, so QUIT!!!\r
+ //\r
+\r
+ //\r
+ // Wait for BSP's signal to finish SMI\r
+ //\r
+ while (mSmmMpSyncData->AllCpusInSync) {\r
+ CpuPause ();\r
+ }\r
+ goto Exit;\r
+ } else {\r
+\r
+ //\r
+ // The BUSY lock is initialized to Released state.\r
+ // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
+ // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
+ // after AP's present flag is detected.\r
+ //\r
+ InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ }\r
+\r
+ //\r
+ // Try to enable NX\r
+ //\r
+ if (mXdSupported) {\r
+ ActivateXd ();\r
+ }\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ ActivateSmmProfile (CpuIndex);\r
+ }\r
+\r
+ if (BspInProgress) {\r
+ //\r
+ // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
+ // as BSP may have cleared the SMI status\r
+ //\r
+ APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
+ } else {\r
+ //\r
+ // We have a valid SMI\r
+ //\r
+\r
+ //\r
+ // Elect BSP\r
+ //\r
+ IsBsp = FALSE;\r
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
+ if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
+ //\r
+ // Call platform hook to do BSP election\r
+ //\r
+ Status = PlatformSmmBspElection (&IsBsp);\r
+ if (EFI_SUCCESS == Status) {\r
+ //\r
+ // Platform hook determines successfully\r
+ //\r
+ if (IsBsp) {\r
+ mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
+ }\r
+ } else {\r
+ //\r
+ // Platform hook fails to determine, use default BSP election method\r
+ //\r
+ InterlockedCompareExchange32 (\r
+ (UINT32*)&mSmmMpSyncData->BspIndex,\r
+ (UINT32)-1,\r
+ (UINT32)CpuIndex\r
+ );\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
+ //\r
+ if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
+\r
+ //\r
+ // Clear last request for SwitchBsp.\r
+ //\r
+ if (mSmmMpSyncData->SwitchBsp) {\r
+ mSmmMpSyncData->SwitchBsp = FALSE;\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
+ }\r
+ }\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ SmmProfileRecordSmiNum ();\r
+ }\r
+\r
+ //\r
+ // BSP Handler is always called with a ValidSmi == TRUE\r
+ //\r
+ BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
+\r
+ } else {\r
+ APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
+ }\r
+ }\r
+\r
+ ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
+\r
+ //\r
+ // Wait for BSP's signal to exit SMI\r
+ //\r
+ while (mSmmMpSyncData->AllCpusInSync) {\r
+ CpuPause ();\r
+ }\r
+ }\r
+\r
+Exit:\r
+ SmmCpuFeaturesRendezvousExit (CpuIndex);\r
+ //\r
+ // Restore Cr2\r
+ //\r
+ AsmWriteCr2 (Cr2);\r
+}\r
+\r
+\r
+/**\r
+ Initialize un-cacheable data.\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+InitializeMpSyncData (\r
+ VOID\r
+ )\r
+{\r
+ if (mSmmMpSyncData != NULL) {\r
+ ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
+ mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
+ mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
+ //\r
+ // Enable BSP election by setting BspIndex to -1\r
+ //\r
+ mSmmMpSyncData->BspIndex = (UINT32)-1;\r
+ }\r
+ mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
+ }\r
+}\r
+\r
+/**\r
+ Initialize global data for MP synchronization.\r
+\r
+ @param Stacks Base address of SMI stack buffer for all processors.\r
+ @param StackSize Stack size for each processor in SMM.\r
+\r
+**/\r
+UINT32\r
+InitializeMpServiceData (\r
+ IN VOID *Stacks,\r
+ IN UINTN StackSize\r
+ )\r
+{\r
+ UINT32 Cr3;\r
+ UINTN Index;\r
+ MTRR_SETTINGS *Mtrr;\r
+ PROCESSOR_SMM_DESCRIPTOR *Psd;\r
+ UINTN GdtTssTableSize;\r
+ UINT8 *GdtTssTables;\r
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;\r
+ UINTN TssBase;\r
+ UINTN GdtTableStepSize;\r
+\r
+ //\r
+ // Initialize physical address mask\r
+ // NOTE: Physical memory above virtual address limit is not supported !!!\r
+ //\r
+ AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
+ gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
+ gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
+\r
+ //\r
+ // Create page tables\r
+ //\r
+ Cr3 = SmmInitPageTable ();\r
+\r
+ GdtTssTables = NULL;\r
+ GdtTssTableSize = 0;\r
+ GdtTableStepSize = 0;\r
+ //\r
+ // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
+ // on each SMI entry.\r
+ //\r
+ if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) {\r
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned\r
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ ASSERT (GdtTssTables != NULL);\r
+ GdtTableStepSize = GdtTssTableSize;\r
+\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // Setup top of known good stack as IST1 for each processor.\r
+ //\r
+ *(UINTN *)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1 + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);\r
+ }\r
+ }\r
+ } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+\r
+ //\r
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.\r
+ // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
+ // on each SMI entry.\r
+ //\r
+\r
+ //\r
+ // Enlarge GDT to contain 2 TSS descriptors\r
+ //\r
+ gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));\r
+\r
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned\r
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ ASSERT (GdtTssTables != NULL);\r
+ GdtTableStepSize = GdtTssTableSize;\r
+\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);\r
+ //\r
+ // Fixup TSS descriptors\r
+ //\r
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);\r
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;\r
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+\r
+ TssBase += TSS_SIZE;\r
+ GdtDescriptor++;\r
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+ //\r
+ // Fixup TSS segments\r
+ //\r
+ // ESP as known good stack\r
+ //\r
+ *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;\r
+ *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
+ //\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
+ CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
+ if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED (EFI_IMAGE_MACHINE_X64)) {\r
+ //\r
+ // For X64 SMM, set GDT to the copy allocated above.\r
+ //\r
+ Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
+ } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, set GDT to the copy allocated above.\r
+ //\r
+ Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
+ Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
+ }\r
+\r
+ //\r
+ // Install SMI handler\r
+ //\r
+ InstallSmiHandler (\r
+ Index,\r
+ (UINT32)mCpuHotPlugData.SmBase[Index],\r
+ (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
+ StackSize,\r
+ (UINTN)Psd->SmmGdtPtr,\r
+ Psd->SmmGdtSize,\r
+ gcSmiIdtr.Base,\r
+ gcSmiIdtr.Limit + 1,\r
+ Cr3\r
+ );\r
+ }\r
+\r
+ //\r
+ // Initialize mSmmMpSyncData\r
+ //\r
+ mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
+ (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
+ ASSERT (mSmmMpSyncData != NULL);\r
+ InitializeMpSyncData ();\r
+\r
+ //\r
+ // Record current MTRR settings\r
+ //\r
+ ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r
+ Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r
+ MtrrGetAllMtrrs (Mtrr);\r
+\r
+ return Cr3;\r
+}\r
+\r
+/**\r
+\r
+ Register the SMM Foundation entry point.\r
+\r
+ @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
+ @param SmmEntryPoint SMM Foundation EntryPoint\r
+\r
+ @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+RegisterSmmEntry (\r
+ IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
+ IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
+ )\r
+{\r
+ //\r
+ // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
+ //\r
+ gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
+ return EFI_SUCCESS;\r
+}\r
--- /dev/null
+/** @file\r
+Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+//\r
+// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
+// along its supporting fields.\r
+//\r
+SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
+ SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
+ NULL, // SmmCpuHandle\r
+ NULL, // Pointer to ProcessorInfo array\r
+ NULL, // Pointer to Operation array\r
+ NULL, // Pointer to CpuSaveStateSize array\r
+ NULL, // Pointer to CpuSaveState array\r
+ { {0} }, // SmmReservedSmramRegion\r
+ {\r
+ SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
+ 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
+ 0, // SmmCoreEntryContext.NumberOfCpus\r
+ NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
+ NULL // SmmCoreEntryContext.CpuSaveState\r
+ },\r
+ NULL, // SmmCoreEntry\r
+ {\r
+ mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
+ RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
+ },\r
+};\r
+\r
+CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
+ CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
+ 0, // Array Length of SmBase and APIC ID\r
+ NULL, // Pointer to APIC ID array\r
+ NULL, // Pointer to SMBASE array\r
+ 0, // Reserved\r
+ 0, // SmrrBase\r
+ 0 // SmrrSize\r
+};\r
+\r
+//\r
+// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
+//\r
+SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
+\r
+//\r
+// SMM Relocation variables\r
+//\r
+volatile BOOLEAN *mRebased;\r
+volatile BOOLEAN mIsBsp;\r
+\r
+///\r
+/// Handle for the SMM CPU Protocol\r
+///\r
+EFI_HANDLE mSmmCpuHandle = NULL;\r
+\r
+///\r
+/// SMM CPU Protocol instance\r
+///\r
+EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
+ SmmReadSaveState,\r
+ SmmWriteSaveState\r
+};\r
+\r
+EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
+\r
+///\r
+/// SMM CPU Save State Protocol instance\r
+///\r
+EFI_SMM_CPU_SAVE_STATE_PROTOCOL mSmmCpuSaveState = {\r
+ NULL\r
+};\r
+\r
+//\r
+// SMM stack information\r
+//\r
+UINTN mSmmStackArrayBase;\r
+UINTN mSmmStackArrayEnd;\r
+UINTN mSmmStackSize;\r
+\r
+//\r
+// Pointer to structure used during S3 Resume\r
+//\r
+SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
+\r
+UINTN mMaxNumberOfCpus = 1;\r
+UINTN mNumberOfCpus = 1;\r
+\r
+//\r
+// SMM ready to lock flag\r
+//\r
+BOOLEAN mSmmReadyToLock = FALSE;\r
+\r
+//\r
+// Global used to cache PCD for SMM Code Access Check enable\r
+//\r
+BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
+\r
+//\r
+// Spin lock used to serialize setting of SMM Code Access Check feature\r
+//\r
+SPIN_LOCK mConfigSmmCodeAccessCheckLock;\r
+\r
+/**\r
+ Initialize IDT to setup exception handlers for SMM.\r
+\r
+**/\r
+VOID\r
+InitializeSmmIdt (\r
+ VOID\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ BOOLEAN InterruptState;\r
+ IA32_DESCRIPTOR DxeIdtr;\r
+ //\r
+ // Disable Interrupt and save DXE IDT table\r
+ //\r
+ InterruptState = SaveAndDisableInterrupts ();\r
+ AsmReadIdtr (&DxeIdtr);\r
+ //\r
+ // Load SMM temporary IDT table\r
+ //\r
+ AsmWriteIdtr (&gcSmiIdtr);\r
+ //\r
+ // Setup SMM default exception handlers, SMM IDT table\r
+ // will be updated and saved in gcSmiIdtr\r
+ //\r
+ Status = InitializeCpuExceptionHandlers (NULL);\r
+ ASSERT_EFI_ERROR (Status);\r
+ //\r
+ // Restore DXE IDT table and CPU interrupt\r
+ //\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
+ SetInterruptState (InterruptState);\r
+}\r
+\r
+/**\r
+ Search module name by input IP address and output it.\r
+\r
+ @param CallerIpAddress Caller instruction pointer.\r
+\r
+**/\r
+VOID\r
+DumpModuleInfoByIp (\r
+ IN UINTN CallerIpAddress\r
+ )\r
+{\r
+ UINTN Pe32Data;\r
+ EFI_IMAGE_DOS_HEADER *DosHdr;\r
+ EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
+ VOID *PdbPointer;\r
+ UINT64 DumpIpAddress;\r
+\r
+ //\r
+ // Find Image Base\r
+ //\r
+ Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
+ while (Pe32Data != 0) {\r
+ DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
+ if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
+ //\r
+ // DOS image header is present, so read the PE header after the DOS image header.\r
+ //\r
+ Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
+ //\r
+ // Make sure PE header address does not overflow and is less than the initial address.\r
+ //\r
+ if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
+ if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
+ //\r
+ // It's PE image.\r
+ //\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Not found the image base, check the previous aligned address\r
+ //\r
+ Pe32Data -= SIZE_4KB;\r
+ }\r
+\r
+ DumpIpAddress = CallerIpAddress;\r
+ DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
+\r
+ if (Pe32Data != 0) {\r
+ PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
+ if (PdbPointer != NULL) {\r
+ DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Read information from the CPU save state.\r
+\r
+ @param This EFI_SMM_CPU_PROTOCOL instance\r
+ @param Width The number of bytes to read from the CPU save state.\r
+ @param Register Specifies the CPU register to read form the save state.\r
+ @param CpuIndex Specifies the zero-based index of the CPU save state.\r
+ @param Buffer Upon return, this holds the CPU register value read from the save state.\r
+\r
+ @retval EFI_SUCCESS The register was read from Save State\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmReadSaveState (\r
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
+ IN UINTN Width,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN CpuIndex,\r
+ OUT VOID *Buffer\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // Retrieve pointer to the specified CPU's SMM Save State buffer\r
+ //\r
+ if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
+ //\r
+ // The pseudo-register only supports the 64-bit size specified by Width.\r
+ //\r
+ if (Width != sizeof (UINT64)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ //\r
+ // If the processor is in SMM at the time the SMI occurred,\r
+ // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
+ // Otherwise, EFI_NOT_FOUND is returned.\r
+ //\r
+ if (mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
+ *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
+ return EFI_SUCCESS;\r
+ } else {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+ }\r
+\r
+ if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
+ if (Status == EFI_UNSUPPORTED) {\r
+ Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
+ }\r
+ return Status;\r
+}\r
+\r
+/**\r
+ Write data to the CPU save state.\r
+\r
+ @param This EFI_SMM_CPU_PROTOCOL instance\r
+ @param Width The number of bytes to read from the CPU save state.\r
+ @param Register Specifies the CPU register to write to the save state.\r
+ @param CpuIndex Specifies the zero-based index of the CPU save state\r
+ @param Buffer Upon entry, this holds the new CPU register value.\r
+\r
+ @retval EFI_SUCCESS The register was written from Save State\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
+ @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmWriteSaveState (\r
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
+ IN UINTN Width,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN CpuIndex,\r
+ IN CONST VOID *Buffer\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // Retrieve pointer to the specified CPU's SMM Save State buffer\r
+ //\r
+ if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
+ if (Status == EFI_UNSUPPORTED) {\r
+ Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
+ }\r
+ return Status;\r
+}\r
+\r
+\r
+/**\r
+ C function for SMI handler. To change all processor's SMMBase Register.\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmInitHandler (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 ApicId;\r
+ UINTN Index;\r
+\r
+ //\r
+ // Update SMM IDT entries' code segment and load IDT\r
+ //\r
+ AsmWriteIdtr (&gcSmiIdtr);\r
+ ApicId = GetApicId ();\r
+\r
+ ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+\r
+ for (Index = 0; Index < mNumberOfCpus; Index++) {\r
+ if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
+ //\r
+ // Initialize SMM specific features on the currently executing CPU\r
+ //\r
+ SmmCpuFeaturesInitializeProcessor (\r
+ Index,\r
+ mIsBsp,\r
+ gSmmCpuPrivate->ProcessorInfo,\r
+ &mCpuHotPlugData\r
+ );\r
+\r
+ if (mIsBsp) {\r
+ //\r
+ // BSP rebase is already done above.\r
+ // Initialize private data during S3 resume\r
+ //\r
+ InitializeMpSyncData ();\r
+ }\r
+\r
+ //\r
+ // Hook return after RSM to set SMM re-based flag\r
+ //\r
+ SemaphoreHook (Index, &mRebased[Index]);\r
+\r
+ return;\r
+ }\r
+ }\r
+ ASSERT (FALSE);\r
+}\r
+\r
+/**\r
+ Relocate SmmBases for each processor.\r
+\r
+ Execute on first boot and all S3 resumes\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmRelocateBases (\r
+ VOID\r
+ )\r
+{\r
+ UINT8 BakBuf[BACK_BUF_SIZE];\r
+ SMRAM_SAVE_STATE_MAP BakBuf2;\r
+ SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
+ UINT8 *U8Ptr;\r
+ UINT32 ApicId;\r
+ UINTN Index;\r
+ UINTN BspIndex;\r
+\r
+ //\r
+ // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
+ //\r
+ ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
+\r
+ //\r
+ // Patch ASM code template with current CR0, CR3, and CR4 values\r
+ //\r
+ gSmmCr0 = (UINT32)AsmReadCr0 ();\r
+ gSmmCr3 = (UINT32)AsmReadCr3 ();\r
+ gSmmCr4 = (UINT32)AsmReadCr4 ();\r
+\r
+ //\r
+ // Patch GDTR for SMM base relocation\r
+ //\r
+ gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
+ gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
+\r
+ U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
+ CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
+\r
+ //\r
+ // Backup original contents at address 0x38000\r
+ //\r
+ CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
+ CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
+\r
+ //\r
+ // Load image for relocation\r
+ //\r
+ CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
+\r
+ //\r
+ // Retrieve the local APIC ID of current processor\r
+ //\r
+ ApicId = GetApicId ();\r
+\r
+ //\r
+ // Relocate SM bases for all APs\r
+ // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
+ //\r
+ mIsBsp = FALSE;\r
+ BspIndex = (UINTN)-1;\r
+ for (Index = 0; Index < mNumberOfCpus; Index++) {\r
+ mRebased[Index] = FALSE;\r
+ if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
+ //\r
+ // Wait for this AP to finish its 1st SMI\r
+ //\r
+ while (!mRebased[Index]);\r
+ } else {\r
+ //\r
+ // BSP will be Relocated later\r
+ //\r
+ BspIndex = Index;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Relocate BSP's SMM base\r
+ //\r
+ ASSERT (BspIndex != (UINTN)-1);\r
+ mIsBsp = TRUE;\r
+ SendSmiIpi (ApicId);\r
+ //\r
+ // Wait for the BSP to finish its 1st SMI\r
+ //\r
+ while (!mRebased[BspIndex]);\r
+\r
+ //\r
+ // Restore contents at address 0x38000\r
+ //\r
+ CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
+ CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
+}\r
+\r
+/**\r
+ Perform SMM initialization for all processors in the S3 boot path.\r
+\r
+ For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmRestoreCpu (\r
+ VOID\r
+ )\r
+{\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ IA32_DESCRIPTOR Ia32Idtr;\r
+ IA32_DESCRIPTOR X64Idtr;\r
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
+ EFI_STATUS Status;\r
+\r
+ DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
+\r
+ //\r
+ // See if there is enough context to resume PEI Phase\r
+ //\r
+ if (mSmmS3ResumeState == NULL) {\r
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ SmmS3ResumeState = mSmmS3ResumeState;\r
+ ASSERT (SmmS3ResumeState != NULL);\r
+\r
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
+ //\r
+ // Save the IA32 IDT Descriptor\r
+ //\r
+ AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+\r
+ //\r
+ // Setup X64 IDT table\r
+ //\r
+ ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
+ X64Idtr.Base = (UINTN) IdtEntryTable;\r
+ X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
+\r
+ //\r
+ // Setup the default exception handler\r
+ //\r
+ Status = InitializeCpuExceptionHandlers (NULL);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Initialize Debug Agent to support source level debug\r
+ //\r
+ InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
+ }\r
+\r
+ //\r
+ // Do below CPU things for native platform only\r
+ //\r
+ if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {\r
+ //\r
+ // Skip initialization if mAcpiCpuData is not valid\r
+ //\r
+ if (mAcpiCpuData.NumberOfCpus > 0) {\r
+ //\r
+ // First time microcode load and restore MTRRs\r
+ //\r
+ EarlyInitializeCpu ();\r
+ }\r
+ }\r
+\r
+ //\r
+ // Restore SMBASE for BSP and all APs\r
+ //\r
+ SmmRelocateBases ();\r
+\r
+ //\r
+ // Do below CPU things for native platform only\r
+ //\r
+ if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {\r
+ //\r
+ // Skip initialization if mAcpiCpuData is not valid\r
+ //\r
+ if (mAcpiCpuData.NumberOfCpus > 0) {\r
+ //\r
+ // Restore MSRs for BSP and all APs\r
+ //\r
+ InitializeCpu ();\r
+ }\r
+ }\r
+\r
+ //\r
+ // Set a flag to restore SMM configuration in S3 path.\r
+ //\r
+ mRestoreSmmConfigurationInS3 = TRUE;\r
+\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
+\r
+ //\r
+ // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
+ //\r
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
+ DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
+\r
+ SwitchStack (\r
+ (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
+ );\r
+ }\r
+\r
+ //\r
+ // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
+ //\r
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
+ DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
+ //\r
+ // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
+ //\r
+ SaveAndSetDebugTimerInterrupt (FALSE);\r
+ //\r
+ // Restore IA32 IDT table\r
+ //\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
+ AsmDisablePaging64 (\r
+ SmmS3ResumeState->ReturnCs,\r
+ (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
+ (UINT32)SmmS3ResumeState->ReturnContext1,\r
+ (UINT32)SmmS3ResumeState->ReturnContext2,\r
+ (UINT32)SmmS3ResumeState->ReturnStackPointer\r
+ );\r
+ }\r
+\r
+ //\r
+ // Can not resume PEI Phase\r
+ //\r
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
+ CpuDeadLoop ();\r
+}\r
+\r
+/**\r
+ Copy register table from ACPI NVS memory into SMRAM.\r
+\r
+ @param[in] DestinationRegisterTableList Points to destination register table.\r
+ @param[in] SourceRegisterTableList Points to source register table.\r
+ @param[in] NumberOfCpus Number of CPUs.\r
+\r
+**/\r
+VOID\r
+CopyRegisterTable (\r
+ IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
+ IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
+ IN UINT32 NumberOfCpus\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN Index1;\r
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
+\r
+ CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ for (Index = 0; Index < NumberOfCpus; Index++) {\r
+ DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
+ ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
+ CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
+ //\r
+ // Go though all MSRs in register table to initialize MSR spin lock\r
+ //\r
+ RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
+ for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
+ if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
+ //\r
+ // Initialize MSR spin lock only for those MSRs need bit field writing\r
+ //\r
+ InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ SMM Ready To Lock event notification handler.\r
+\r
+ The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
+ perform additional lock actions that must be performed from SMM on the next SMI.\r
+\r
+ @param[in] Protocol Points to the protocol's unique identifier.\r
+ @param[in] Interface Points to the interface instance.\r
+ @param[in] Handle The handle on which the interface was installed.\r
+\r
+ @retval EFI_SUCCESS Notification handler runs successfully.\r
+ **/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmReadyToLockEventNotify (\r
+ IN CONST EFI_GUID *Protocol,\r
+ IN VOID *Interface,\r
+ IN EFI_HANDLE Handle\r
+ )\r
+{\r
+ ACPI_CPU_DATA *AcpiCpuData;\r
+ IA32_DESCRIPTOR *Gdtr;\r
+ IA32_DESCRIPTOR *Idtr;\r
+\r
+ //\r
+ // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
+ //\r
+ mAcpiCpuData.NumberOfCpus = 0;\r
+\r
+ //\r
+ // If FrameworkCompatibilitySspport is enabled, then do not copy CPU S3 Data into SMRAM\r
+ //\r
+ if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) {\r
+ goto Done;\r
+ }\r
+\r
+ //\r
+ // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
+ //\r
+ AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
+ if (AcpiCpuData == 0) {\r
+ goto Done;\r
+ }\r
+\r
+ //\r
+ // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
+ //\r
+ CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
+\r
+ mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
+ ASSERT (mAcpiCpuData.MtrrTable != 0);\r
+\r
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
+\r
+ mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
+ ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
+\r
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+\r
+ mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
+ ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
+\r
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
+\r
+ mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
+\r
+ CopyRegisterTable (\r
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
+ mAcpiCpuData.NumberOfCpus\r
+ );\r
+\r
+ mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
+ ASSERT (mAcpiCpuData.RegisterTable != 0);\r
+\r
+ CopyRegisterTable (\r
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
+ mAcpiCpuData.NumberOfCpus\r
+ );\r
+\r
+ //\r
+ // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
+ //\r
+ Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
+ Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
+\r
+ mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
+ ASSERT (mGdtForAp != NULL);\r
+ mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
+ mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
+\r
+ CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
+ CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
+ CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
+\r
+Done:\r
+ //\r
+ // Set SMM ready to lock flag and return\r
+ //\r
+ mSmmReadyToLock = TRUE;\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ The module Entry Point of the CPU SMM driver.\r
+\r
+ @param ImageHandle The firmware allocated handle for the EFI image.\r
+ @param SystemTable A pointer to the EFI System Table.\r
+\r
+ @retval EFI_SUCCESS The entry point is executed successfully.\r
+ @retval Other Some error occurs when executing this entry point.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+PiCpuSmmEntry (\r
+ IN EFI_HANDLE ImageHandle,\r
+ IN EFI_SYSTEM_TABLE *SystemTable\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ EFI_MP_SERVICES_PROTOCOL *MpServices;\r
+ UINTN NumberOfEnabledProcessors;\r
+ UINTN Index;\r
+ VOID *Buffer;\r
+ UINTN TileSize;\r
+ VOID *GuidHob;\r
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
+ UINT8 *Stacks;\r
+ VOID *Registration;\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ UINTN FamilyId;\r
+ UINTN ModelId;\r
+ UINT32 Cr3;\r
+\r
+ //\r
+ // Initialize Debug Agent to support source level debug in SMM code\r
+ //\r
+ InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
+\r
+ //\r
+ // Report the start of CPU SMM initialization.\r
+ //\r
+ REPORT_STATUS_CODE (\r
+ EFI_PROGRESS_CODE,\r
+ EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
+ );\r
+\r
+ //\r
+ // Fix segment address of the long-mode-switch jump\r
+ //\r
+ if (sizeof (UINTN) == sizeof (UINT64)) {\r
+ gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
+ }\r
+\r
+ //\r
+ // Find out SMRR Base and SMRR Size\r
+ //\r
+ FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
+\r
+ //\r
+ // Get MP Services Protocol\r
+ //\r
+ Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
+ //\r
+ Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
+ ASSERT_EFI_ERROR (Status);\r
+ ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+\r
+ //\r
+ // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
+ // A constant BSP index makes no sense because it may be hot removed.\r
+ //\r
+ DEBUG_CODE (\r
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+\r
+ ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
+ }\r
+ );\r
+\r
+ //\r
+ // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
+ //\r
+ mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
+ DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
+\r
+ //\r
+ // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
+ //\r
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
+ } else {\r
+ mMaxNumberOfCpus = mNumberOfCpus;\r
+ }\r
+ gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
+\r
+ //\r
+ // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
+ // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
+ // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
+ // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
+ // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
+ // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
+ // by adding the // CPU save state size, any extra CPU specific context, and\r
+ // the size of code that must be placed at the SMI entry point to transfer\r
+ // control to a C function in the native SMM execution mode. This size is\r
+ // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
+ // The total amount of memory required is the maximum number of CPUs that\r
+ // platform supports times the tile size. The picture below shows the tiling,\r
+ // where m is the number of tiles that fit in 32KB.\r
+ //\r
+ // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
+ // | CPU m+1 Save State |\r
+ // +-----------------------------+\r
+ // | CPU m+1 Extra Data |\r
+ // +-----------------------------+\r
+ // | Padding |\r
+ // +-----------------------------+\r
+ // | CPU 2m SMI Entry |\r
+ // +#############################+ <-- Base of allocated buffer + 64 KB\r
+ // | CPU m-1 Save State |\r
+ // +-----------------------------+\r
+ // | CPU m-1 Extra Data |\r
+ // +-----------------------------+\r
+ // | Padding |\r
+ // +-----------------------------+\r
+ // | CPU 2m-1 SMI Entry |\r
+ // +=============================+ <-- 2^n offset from Base of allocated buffer\r
+ // | . . . . . . . . . . . . |\r
+ // +=============================+ <-- 2^n offset from Base of allocated buffer\r
+ // | CPU 2 Save State |\r
+ // +-----------------------------+\r
+ // | CPU 2 Extra Data |\r
+ // +-----------------------------+\r
+ // | Padding |\r
+ // +-----------------------------+\r
+ // | CPU m+1 SMI Entry |\r
+ // +=============================+ <-- Base of allocated buffer + 32 KB\r
+ // | CPU 1 Save State |\r
+ // +-----------------------------+\r
+ // | CPU 1 Extra Data |\r
+ // +-----------------------------+\r
+ // | Padding |\r
+ // +-----------------------------+\r
+ // | CPU m SMI Entry |\r
+ // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
+ // | CPU 0 Save State |\r
+ // +-----------------------------+\r
+ // | CPU 0 Extra Data |\r
+ // +-----------------------------+\r
+ // | Padding |\r
+ // +-----------------------------+\r
+ // | CPU m-1 SMI Entry |\r
+ // +=============================+ <-- 2^n offset from Base of allocated buffer\r
+ // | . . . . . . . . . . . . |\r
+ // +=============================+ <-- 2^n offset from Base of allocated buffer\r
+ // | Padding |\r
+ // +-----------------------------+\r
+ // | CPU 1 SMI Entry |\r
+ // +=============================+ <-- 2^n offset from Base of allocated buffer\r
+ // | Padding |\r
+ // +-----------------------------+\r
+ // | CPU 0 SMI Entry |\r
+ // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
+ //\r
+\r
+ //\r
+ // Retrieve CPU Family\r
+ //\r
+ AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);\r
+ FamilyId = (RegEax >> 8) & 0xf;\r
+ ModelId = (RegEax >> 4) & 0xf;\r
+ if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
+ ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
+ }\r
+\r
+ //\r
+ // Determine the mode of the CPU at the time an SMI occurs\r
+ // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
+ // Volume 3C, Section 34.4.1.1\r
+ //\r
+ mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
+ if ((RegEdx & BIT29) != 0) {\r
+ mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
+ }\r
+ if (FamilyId == 0x06) {\r
+ if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
+ mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
+ // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size\r
+ // is rounded up to nearest power of 2.\r
+ //\r
+ TileSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR) + GetSmiHandlerSize () - 1;\r
+ TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
+ DEBUG ((EFI_D_INFO, "SMRAM TileSize = %08x\n", TileSize));\r
+\r
+ //\r
+ // If the TileSize is larger than space available for the SMI Handler of CPU[i],\r
+ // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],\r
+ // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be\r
+ // reduced.\r
+ //\r
+ ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
+\r
+ //\r
+ // Allocate buffer for all of the tiles.\r
+ //\r
+ // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
+ // Volume 3C, Section 34.11 SMBASE Relocation\r
+ // For Pentium and Intel486 processors, the SMBASE values must be\r
+ // aligned on a 32-KByte boundary or the processor will enter shutdown\r
+ // state during the execution of a RSM instruction.\r
+ //\r
+ // Intel486 processors: FamilyId is 4\r
+ // Pentium processors : FamilyId is 5\r
+ //\r
+ if ((FamilyId == 4) || (FamilyId == 5)) {\r
+ Buffer = AllocateAlignedPages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)), SIZE_32KB);\r
+ } else {\r
+ Buffer = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)));\r
+ }\r
+ ASSERT (Buffer != NULL);\r
+\r
+ //\r
+ // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
+ //\r
+ gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
+ ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
+\r
+ gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
+ ASSERT (gSmmCpuPrivate->Operation != NULL);\r
+\r
+ gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
+ ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
+\r
+ gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
+ ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
+\r
+ mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
+ mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
+ mSmmCpuSaveState.CpuSaveState = (EFI_SMM_CPU_STATE **)gSmmCpuPrivate->CpuSaveState;\r
+\r
+ //\r
+ // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
+ //\r
+ mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
+ ASSERT (mCpuHotPlugData.ApicId != NULL);\r
+ mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
+ ASSERT (mCpuHotPlugData.SmBase != NULL);\r
+ mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
+\r
+ //\r
+ // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
+ // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
+ // size for each CPU in the platform\r
+ //\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
+ mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
+ gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
+ gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
+\r
+ if (Index < mNumberOfCpus) {\r
+ Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
+ ASSERT_EFI_ERROR (Status);\r
+ mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
+\r
+ DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
+ Index,\r
+ (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
+ mCpuHotPlugData.SmBase[Index],\r
+ gSmmCpuPrivate->CpuSaveState[Index],\r
+ gSmmCpuPrivate->CpuSaveStateSize[Index]\r
+ ));\r
+ } else {\r
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
+ mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Allocate SMI stacks for all processors.\r
+ //\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // 2 more pages is allocated for each processor.\r
+ // one is guard page and the other is known good stack.\r
+ //\r
+ // +-------------------------------------------+-----+-------------------------------------------+\r
+ // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
+ // +-------------------------------------------+-----+-------------------------------------------+\r
+ // | | | |\r
+ // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
+ //\r
+ mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
+ Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
+ ASSERT (Stacks != NULL);\r
+ mSmmStackArrayBase = (UINTN)Stacks;\r
+ mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
+ } else {\r
+ mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
+ Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
+ ASSERT (Stacks != NULL);\r
+ }\r
+\r
+ //\r
+ // Set SMI stack for SMM base relocation\r
+ //\r
+ gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
+\r
+ //\r
+ // Initialize IDT\r
+ //\r
+ InitializeSmmIdt ();\r
+\r
+ //\r
+ // Relocate SMM Base addresses to the ones allocated from SMRAM\r
+ //\r
+ mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
+ ASSERT (mRebased != NULL);\r
+ SmmRelocateBases ();\r
+\r
+ //\r
+ // Call hook for BSP to perform extra actions in normal mode after all\r
+ // SMM base addresses have been relocated on all CPUs\r
+ //\r
+ SmmCpuFeaturesSmmRelocationComplete ();\r
+\r
+ //\r
+ // SMM Time initialization\r
+ //\r
+ InitializeSmmTimer ();\r
+\r
+ //\r
+ // Initialize MP globals\r
+ //\r
+ Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
+\r
+ //\r
+ // Fill in SMM Reserved Regions\r
+ //\r
+ gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
+ gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
+\r
+ //\r
+ // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
+ // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
+ // to an SMRAM address will be present in the handle database\r
+ //\r
+ Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
+ &gSmmCpuPrivate->SmmCpuHandle,\r
+ &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
+ NULL\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Install the SMM CPU Protocol into SMM protocol database\r
+ //\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mSmmCpuHandle,\r
+ &gEfiSmmCpuProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmCpu\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
+ //\r
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ PcdSet64 (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
+ }\r
+\r
+ //\r
+ // Initialize SMM CPU Services Support\r
+ //\r
+ Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) {\r
+ //\r
+ // Install Framework SMM Save State Protocol into UEFI protocol database for backward compatibility\r
+ //\r
+ Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
+ &gSmmCpuPrivate->SmmCpuHandle,\r
+ &gEfiSmmCpuSaveStateProtocolGuid,\r
+ &mSmmCpuSaveState,\r
+ NULL\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+ //\r
+ // The SmmStartupThisAp service in Framework SMST should always be non-null.\r
+ // Update SmmStartupThisAp pointer in PI SMST here so that PI/Framework SMM thunk\r
+ // can have it ready when constructing Framework SMST.\r
+ //\r
+ gSmst->SmmStartupThisAp = SmmStartupThisAp;\r
+ }\r
+\r
+ //\r
+ // register SMM Ready To Lock Protocol notification\r
+ //\r
+ Status = gSmst->SmmRegisterProtocolNotify (\r
+ &gEfiSmmReadyToLockProtocolGuid,\r
+ SmmReadyToLockEventNotify,\r
+ &Registration\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
+ if (GuidHob != NULL) {\r
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
+\r
+ DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
+ DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
+\r
+ SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
+ ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
+\r
+ mSmmS3ResumeState = SmmS3ResumeState;\r
+ SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
+\r
+ SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
+\r
+ SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
+ SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
+ if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
+ SmmS3ResumeState->SmmS3StackSize = 0;\r
+ }\r
+\r
+ SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
+ SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
+ SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
+\r
+ if (sizeof (UINTN) == sizeof (UINT64)) {\r
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
+ }\r
+ if (sizeof (UINTN) == sizeof (UINT32)) {\r
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Check XD and BTS features\r
+ //\r
+ CheckProcessorFeature ();\r
+\r
+ //\r
+ // Initialize SMM Profile feature\r
+ //\r
+ InitSmmProfile (Cr3);\r
+\r
+ //\r
+ // Patch SmmS3ResumeState->SmmS3Cr3\r
+ //\r
+ InitSmmS3Cr3 ();\r
+\r
+ DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+\r
+ Find out SMRAM information including SMRR base and SMRR size.\r
+\r
+ @param SmrrBase SMRR base\r
+ @param SmrrSize SMRR size\r
+\r
+**/\r
+VOID\r
+FindSmramInfo (\r
+ OUT UINT32 *SmrrBase,\r
+ OUT UINT32 *SmrrSize\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINTN Size;\r
+ EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
+ EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
+ EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
+ UINTN SmramRangeCount;\r
+ UINTN Index;\r
+ UINT64 MaxSize;\r
+ BOOLEAN Found;\r
+\r
+ //\r
+ // Get SMM Access Protocol\r
+ //\r
+ Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Get SMRAM information\r
+ //\r
+ Size = 0;\r
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
+ ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
+\r
+ SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
+ ASSERT (SmramRanges != NULL);\r
+\r
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
+\r
+ //\r
+ // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
+ //\r
+ CurrentSmramRange = NULL;\r
+ for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
+ //\r
+ // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
+ //\r
+ if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
+ continue;\r
+ }\r
+\r
+ if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
+ if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
+ if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
+ MaxSize = SmramRanges[Index].PhysicalSize;\r
+ CurrentSmramRange = &SmramRanges[Index];\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ ASSERT (CurrentSmramRange != NULL);\r
+\r
+ *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
+ *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
+\r
+ do {\r
+ Found = FALSE;\r
+ for (Index = 0; Index < SmramRangeCount; Index++) {\r
+ if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
+ *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
+ *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
+ Found = TRUE;\r
+ } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
+ *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
+ Found = TRUE;\r
+ }\r
+ }\r
+ } while (Found);\r
+\r
+ DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
+}\r
+\r
+/**\r
+Configure SMM Code Access Check feature on an AP.\r
+SMM Feature Control MSR will be locked after configuration.\r
+\r
+@param[in,out] Buffer Pointer to private data buffer.\r
+**/\r
+VOID\r
+EFIAPI\r
+ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
+ IN OUT VOID *Buffer\r
+ )\r
+{\r
+ UINTN CpuIndex;\r
+ UINT64 SmmFeatureControlMsr;\r
+ UINT64 NewSmmFeatureControlMsr;\r
+\r
+ //\r
+ // Retrieve the CPU Index from the context passed in\r
+ //\r
+ CpuIndex = *(UINTN *)Buffer;\r
+\r
+ //\r
+ // Get the current SMM Feature Control MSR value\r
+ //\r
+ SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
+\r
+ //\r
+ // Compute the new SMM Feature Control MSR value\r
+ //\r
+ NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
+ if (mSmmCodeAccessCheckEnable) {\r
+ NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
+ }\r
+ if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
+ NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
+ }\r
+\r
+ //\r
+ // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
+ //\r
+ if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
+ SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
+ }\r
+\r
+ //\r
+ // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
+ //\r
+ ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);\r
+}\r
+\r
+/**\r
+Configure SMM Code Access Check feature for all processors.\r
+SMM Feature Control MSR will be locked after configuration.\r
+**/\r
+VOID\r
+ConfigSmmCodeAccessCheck (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Index;\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // Check to see if the Feature Control MSR is supported on this CPU\r
+ //\r
+ Index = gSmst->CurrentlyExecutingCpu;\r
+ if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
+ mSmmCodeAccessCheckEnable = FALSE;\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Check to see if the CPU supports the SMM Code Access Check feature\r
+ // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
+ //\r
+ if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
+ mSmmCodeAccessCheckEnable = FALSE;\r
+ }\r
+\r
+ //\r
+ // If the SMM Code Access Check feature is disabled and the Feature Control MSR\r
+ // is not being locked, then no additional work is required\r
+ //\r
+ if (!mSmmCodeAccessCheckEnable && !FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
+ //\r
+ InitializeSpinLock (&mConfigSmmCodeAccessCheckLock);\r
+\r
+ //\r
+ // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
+ // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
+ //\r
+ AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);\r
+\r
+ //\r
+ // Enable SMM Code Access Check feature on the BSP.\r
+ //\r
+ ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
+\r
+ //\r
+ // Enable SMM Code Access Check feature for the APs.\r
+ //\r
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
+ if (Index != gSmst->CurrentlyExecutingCpu) {\r
+\r
+ //\r
+ // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
+ // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
+ //\r
+ AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);\r
+\r
+ //\r
+ // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
+ //\r
+ Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
+ //\r
+ while (!AcquireSpinLockOrFail (&mConfigSmmCodeAccessCheckLock)) {\r
+ CpuPause ();\r
+ }\r
+\r
+ //\r
+ // Release the Config SMM Code Access Check spin lock.\r
+ //\r
+ ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Perform the remaining tasks.\r
+\r
+**/\r
+VOID\r
+PerformRemainingTasks (\r
+ VOID\r
+ )\r
+{\r
+ if (mSmmReadyToLock) {\r
+ //\r
+ // Start SMM Profile feature\r
+ //\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ SmmProfileStart ();\r
+ }\r
+ //\r
+ // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
+ //\r
+ InitPaging ();\r
+ //\r
+ // Configure SMM Code Access Check feature if available.\r
+ //\r
+ ConfigSmmCodeAccessCheck ();\r
+\r
+ //\r
+ // Clean SMM ready to lock flag\r
+ //\r
+ mSmmReadyToLock = FALSE;\r
+ }\r
+}\r
--- /dev/null
+/** @file\r
+Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef _CPU_PISMMCPUDXESMM_H_\r
+#define _CPU_PISMMCPUDXESMM_H_\r
+\r
+#include <PiSmm.h>\r
+\r
+#include <Protocol/MpService.h>\r
+#include <Protocol/SmmConfiguration.h>\r
+#include <Protocol/SmmCpu.h>\r
+#include <Protocol/SmmAccess2.h>\r
+#include <Protocol/SmmCpuSaveState.h>\r
+#include <Protocol/SmmReadyToLock.h>\r
+#include <Protocol/SmmCpuService.h>\r
+\r
+#include <Guid/AcpiS3Context.h>\r
+\r
+#include <Library/BaseLib.h>\r
+#include <Library/IoLib.h>\r
+#include <Library/TimerLib.h>\r
+#include <Library/SmmLib.h>\r
+#include <Library/SynchronizationLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/PcdLib.h>\r
+#include <Library/CacheMaintenanceLib.h>\r
+#include <Library/MtrrLib.h>\r
+#include <Library/SmmCpuPlatformHookLib.h>\r
+#include <Library/SmmServicesTableLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/UefiBootServicesTableLib.h>\r
+#include <Library/UefiRuntimeServicesTableLib.h>\r
+#include <Library/DebugAgentLib.h>\r
+#include <Library/HobLib.h>\r
+#include <Library/LocalApicLib.h>\r
+#include <Library/UefiCpuLib.h>\r
+#include <Library/CpuExceptionHandlerLib.h>\r
+#include <Library/ReportStatusCodeLib.h>\r
+#include <Library/SmmCpuFeaturesLib.h>\r
+#include <Library/PeCoffGetEntryPointLib.h>\r
+\r
+#include <AcpiCpuData.h>\r
+#include <CpuHotPlugData.h>\r
+\r
+#include <Register/Cpuid.h>\r
+\r
+#include "CpuService.h"\r
+#include "SmmProfile.h"\r
+\r
+//\r
+// MSRs required for configuration of SMM Code Access Check\r
+//\r
+#define EFI_MSR_SMM_MCA_CAP 0x17D\r
+#define SMM_CODE_ACCESS_CHK_BIT BIT58\r
+\r
+#define SMM_FEATURE_CONTROL_LOCK_BIT BIT0\r
+#define SMM_CODE_CHK_EN_BIT BIT2\r
+\r
+///\r
+/// Page Table Entry\r
+///\r
+#define IA32_PG_P BIT0\r
+#define IA32_PG_RW BIT1\r
+#define IA32_PG_WT BIT3\r
+#define IA32_PG_CD BIT4\r
+#define IA32_PG_A BIT5\r
+#define IA32_PG_PS BIT7\r
+#define IA32_PG_PAT_2M BIT12\r
+#define IA32_PG_PAT_4K IA32_PG_PS\r
+#define IA32_PG_PMNT BIT62\r
+#define IA32_PG_NX BIT63\r
+\r
+//\r
+// Size of Task-State Segment defined in IA32 Manual\r
+//\r
+#define TSS_SIZE 104\r
+#define TSS_X64_IST1_OFFSET 36\r
+#define TSS_IA32_CR3_OFFSET 28\r
+#define TSS_IA32_ESP_OFFSET 56\r
+\r
+//\r
+// Code select value\r
+//\r
+#define PROTECT_MODE_CODE_SEGMENT 0x08\r
+#define LONG_MODE_CODE_SEGMENT 0x38\r
+\r
+//\r
+// The size 0x20 must be bigger than\r
+// the size of template code of SmmInit. Currently,\r
+// the size of SmmInit requires the 0x16 Bytes buffer\r
+// at least.\r
+//\r
+#define BACK_BUF_SIZE 0x20\r
+\r
+#define EXCEPTION_VECTOR_NUMBER 0x20\r
+\r
+#define INVALID_APIC_ID 0xFFFFFFFFFFFFFFFFULL\r
+\r
+typedef UINT32 SMM_CPU_ARRIVAL_EXCEPTIONS;\r
+#define ARRIVAL_EXCEPTION_BLOCKED 0x1\r
+#define ARRIVAL_EXCEPTION_DELAYED 0x2\r
+#define ARRIVAL_EXCEPTION_SMI_DISABLED 0x4\r
+\r
+//\r
+// Private structure for the SMM CPU module that is stored in DXE Runtime memory\r
+// Contains the SMM Configuration Protocols that is produced.\r
+// Contains a mix of DXE and SMM contents. All the fields must be used properly.\r
+//\r
+#define SMM_CPU_PRIVATE_DATA_SIGNATURE SIGNATURE_32 ('s', 'c', 'p', 'u')\r
+\r
+typedef struct {\r
+ UINTN Signature;\r
+\r
+ EFI_HANDLE SmmCpuHandle;\r
+\r
+ EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
+ SMM_CPU_OPERATION *Operation;\r
+ UINTN *CpuSaveStateSize;\r
+ VOID **CpuSaveState;\r
+\r
+ EFI_SMM_RESERVED_SMRAM_REGION SmmReservedSmramRegion[1];\r
+ EFI_SMM_ENTRY_CONTEXT SmmCoreEntryContext;\r
+ EFI_SMM_ENTRY_POINT SmmCoreEntry;\r
+\r
+ EFI_SMM_CONFIGURATION_PROTOCOL SmmConfiguration;\r
+} SMM_CPU_PRIVATE_DATA;\r
+\r
+extern SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate;\r
+extern CPU_HOT_PLUG_DATA mCpuHotPlugData;\r
+extern UINTN mMaxNumberOfCpus;\r
+extern UINTN mNumberOfCpus;\r
+extern BOOLEAN mRestoreSmmConfigurationInS3;\r
+extern EFI_SMM_CPU_PROTOCOL mSmmCpu;\r
+\r
+///\r
+/// The mode of the CPU at the time an SMI occurs\r
+///\r
+extern UINT8 mSmmSaveStateRegisterLma;\r
+\r
+\r
+//\r
+// SMM CPU Protocol function prototypes.\r
+//\r
+\r
+/**\r
+ Read information from the CPU save state.\r
+\r
+ @param This EFI_SMM_CPU_PROTOCOL instance\r
+ @param Width The number of bytes to read from the CPU save state.\r
+ @param Register Specifies the CPU register to read form the save state.\r
+ @param CpuIndex Specifies the zero-based index of the CPU save state\r
+ @param Buffer Upon return, this holds the CPU register value read from the save state.\r
+\r
+ @retval EFI_SUCCESS The register was read from Save State\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmReadSaveState (\r
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
+ IN UINTN Width,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN CpuIndex,\r
+ OUT VOID *Buffer\r
+ );\r
+\r
+/**\r
+ Write data to the CPU save state.\r
+\r
+ @param This EFI_SMM_CPU_PROTOCOL instance\r
+ @param Width The number of bytes to read from the CPU save state.\r
+ @param Register Specifies the CPU register to write to the save state.\r
+ @param CpuIndex Specifies the zero-based index of the CPU save state\r
+ @param Buffer Upon entry, this holds the new CPU register value.\r
+\r
+ @retval EFI_SUCCESS The register was written from Save State\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
+ @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmWriteSaveState (\r
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
+ IN UINTN Width,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN CpuIndex,\r
+ IN CONST VOID *Buffer\r
+ );\r
+\r
+/**\r
+Read a CPU Save State register on the target processor.\r
+\r
+This function abstracts the differences that whether the CPU Save State register is in the\r
+IA32 CPU Save State Map or X64 CPU Save State Map.\r
+\r
+This function supports reading a CPU Save State register in SMBase relocation handler.\r
+\r
+@param[in] CpuIndex Specifies the zero-based index of the CPU save state.\r
+@param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.\r
+@param[in] Width The number of bytes to read from the CPU save state.\r
+@param[out] Buffer Upon return, this holds the CPU register value read from the save state.\r
+\r
+@retval EFI_SUCCESS The register was read from Save State.\r
+@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.\r
+@retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+ReadSaveStateRegister (\r
+ IN UINTN CpuIndex,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN Width,\r
+ OUT VOID *Buffer\r
+ );\r
+\r
+/**\r
+Write value to a CPU Save State register on the target processor.\r
+\r
+This function abstracts the differences that whether the CPU Save State register is in the\r
+IA32 CPU Save State Map or X64 CPU Save State Map.\r
+\r
+This function supports writing a CPU Save State register in SMBase relocation handler.\r
+\r
+@param[in] CpuIndex Specifies the zero-based index of the CPU save state.\r
+@param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.\r
+@param[in] Width The number of bytes to read from the CPU save state.\r
+@param[in] Buffer Upon entry, this holds the new CPU register value.\r
+\r
+@retval EFI_SUCCESS The register was written to Save State.\r
+@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.\r
+@retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+WriteSaveStateRegister (\r
+ IN UINTN CpuIndex,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN Width,\r
+ IN CONST VOID *Buffer\r
+ );\r
+\r
+//\r
+//\r
+//\r
+typedef struct {\r
+ UINT32 Offset;\r
+ UINT16 Segment;\r
+ UINT16 Reserved;\r
+} IA32_FAR_ADDRESS;\r
+\r
+extern IA32_FAR_ADDRESS gSmmJmpAddr;\r
+\r
+extern CONST UINT8 gcSmmInitTemplate[];\r
+extern CONST UINT16 gcSmmInitSize;\r
+extern UINT32 gSmmCr0;\r
+extern UINT32 gSmmCr3;\r
+extern UINT32 gSmmCr4;\r
+extern UINTN gSmmInitStack;\r
+\r
+/**\r
+ Semaphore operation for all processor relocate SMMBase.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmRelocationSemaphoreComplete (\r
+ VOID\r
+ );\r
+\r
+///\r
+/// The type of SMM CPU Information\r
+///\r
+typedef struct {\r
+ SPIN_LOCK Busy;\r
+ volatile EFI_AP_PROCEDURE Procedure;\r
+ volatile VOID *Parameter;\r
+ volatile UINT32 Run;\r
+ volatile BOOLEAN Present;\r
+} SMM_CPU_DATA_BLOCK;\r
+\r
+typedef enum {\r
+ SmmCpuSyncModeTradition,\r
+ SmmCpuSyncModeRelaxedAp,\r
+ SmmCpuSyncModeMax\r
+} SMM_CPU_SYNC_MODE;\r
+\r
+typedef struct {\r
+ //\r
+ // Pointer to an array. The array should be located immediately after this structure\r
+ // so that UC cache-ability can be set together.\r
+ //\r
+ SMM_CPU_DATA_BLOCK *CpuData;\r
+ volatile UINT32 Counter;\r
+ volatile UINT32 BspIndex;\r
+ volatile BOOLEAN InsideSmm;\r
+ volatile BOOLEAN AllCpusInSync;\r
+ volatile SMM_CPU_SYNC_MODE EffectiveSyncMode;\r
+ volatile BOOLEAN SwitchBsp;\r
+ volatile BOOLEAN *CandidateBsp;\r
+} SMM_DISPATCHER_MP_SYNC_DATA;\r
+\r
+typedef struct {\r
+ SPIN_LOCK SpinLock;\r
+ UINT32 MsrIndex;\r
+} MP_MSR_LOCK;\r
+\r
+#define SMM_PSD_OFFSET 0xfb00\r
+\r
+typedef struct {\r
+ UINT64 Signature; // Offset 0x00\r
+ UINT16 Reserved1; // Offset 0x08\r
+ UINT16 Reserved2; // Offset 0x0A\r
+ UINT16 Reserved3; // Offset 0x0C\r
+ UINT16 SmmCs; // Offset 0x0E\r
+ UINT16 SmmDs; // Offset 0x10\r
+ UINT16 SmmSs; // Offset 0x12\r
+ UINT16 SmmOtherSegment; // Offset 0x14\r
+ UINT16 Reserved4; // Offset 0x16\r
+ UINT64 Reserved5; // Offset 0x18\r
+ UINT64 Reserved6; // Offset 0x20\r
+ UINT64 Reserved7; // Offset 0x28\r
+ UINT64 SmmGdtPtr; // Offset 0x30\r
+ UINT32 SmmGdtSize; // Offset 0x38\r
+ UINT32 Reserved8; // Offset 0x3C\r
+ UINT64 Reserved9; // Offset 0x40\r
+ UINT64 Reserved10; // Offset 0x48\r
+ UINT16 Reserved11; // Offset 0x50\r
+ UINT16 Reserved12; // Offset 0x52\r
+ UINT32 Reserved13; // Offset 0x54\r
+ UINT64 MtrrBaseMaskPtr; // Offset 0x58\r
+} PROCESSOR_SMM_DESCRIPTOR;\r
+\r
+extern IA32_DESCRIPTOR gcSmiGdtr;\r
+extern IA32_DESCRIPTOR gcSmiIdtr;\r
+extern VOID *gcSmiIdtrPtr;\r
+extern CONST PROCESSOR_SMM_DESCRIPTOR gcPsd;\r
+extern UINT64 gPhyMask;\r
+extern ACPI_CPU_DATA mAcpiCpuData;\r
+extern SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData;\r
+extern VOID *mGdtForAp;\r
+extern VOID *mIdtForAp;\r
+extern VOID *mMachineCheckHandlerForAp;\r
+extern UINTN mSmmStackArrayBase;\r
+extern UINTN mSmmStackArrayEnd;\r
+extern UINTN mSmmStackSize;\r
+extern EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService;\r
+extern IA32_DESCRIPTOR gcSmiInitGdtr;\r
+\r
+/**\r
+ Create 4G PageTable in SMRAM.\r
+\r
+ @param ExtraPages Additional page numbers besides for 4G memory\r
+ @return PageTable Address\r
+\r
+**/\r
+UINT32\r
+Gen4GPageTable (\r
+ IN UINTN ExtraPages\r
+ );\r
+\r
+\r
+/**\r
+ Initialize global data for MP synchronization.\r
+\r
+ @param Stacks Base address of SMI stack buffer for all processors.\r
+ @param StackSize Stack size for each processor in SMM.\r
+\r
+**/\r
+UINT32\r
+InitializeMpServiceData (\r
+ IN VOID *Stacks,\r
+ IN UINTN StackSize\r
+ );\r
+\r
+/**\r
+ Initialize Timer for SMM AP Sync.\r
+\r
+**/\r
+VOID\r
+InitializeSmmTimer (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Start Timer for SMM AP Sync.\r
+\r
+**/\r
+UINT64\r
+EFIAPI\r
+StartSyncTimer (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Check if the SMM AP Sync timer is timeout.\r
+\r
+ @param Timer The start timer from the begin.\r
+\r
+**/\r
+BOOLEAN\r
+EFIAPI\r
+IsSyncTimerTimeout (\r
+ IN UINT64 Timer\r
+ );\r
+\r
+/**\r
+ Initialize IDT for SMM Stack Guard.\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+InitializeIDTSmmStackGuard (\r
+ VOID\r
+ );\r
+\r
+/**\r
+\r
+ Register the SMM Foundation entry point.\r
+\r
+ @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
+ @param SmmEntryPoint SMM Foundation EntryPoint\r
+\r
+ @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+RegisterSmmEntry (\r
+ IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
+ IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
+ );\r
+\r
+/**\r
+ Create PageTable for SMM use.\r
+\r
+ @return PageTable Address\r
+\r
+**/\r
+UINT32\r
+SmmInitPageTable (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Schedule a procedure to run on the specified CPU.\r
+\r
+ @param Procedure The address of the procedure to run\r
+ @param CpuIndex Target CPU number\r
+ @param ProcArguments The parameter to pass to the procedure\r
+\r
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
+ @retval EFI_SUCCESS - The procedure has been successfully scheduled\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmStartupThisAp (\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
+ );\r
+\r
+/**\r
+ Schedule a procedure to run on the specified CPU in a blocking fashion.\r
+\r
+ @param Procedure The address of the procedure to run\r
+ @param CpuIndex Target CPU Index\r
+ @param ProcArguments The parameter to pass to the procedure\r
+\r
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
+ @retval EFI_SUCCESS The procedure has been successfully scheduled\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmBlockingStartupThisAp (\r
+ IN EFI_AP_PROCEDURE Procedure,\r
+ IN UINTN CpuIndex,\r
+ IN OUT VOID *ProcArguments OPTIONAL\r
+ );\r
+\r
+/**\r
+ Initialize MP synchronization data.\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+InitializeMpSyncData (\r
+ VOID\r
+ );\r
+\r
+/**\r
+\r
+ Find out SMRAM information including SMRR base and SMRR size.\r
+\r
+ @param SmrrBase SMRR base\r
+ @param SmrrSize SMRR size\r
+\r
+**/\r
+VOID\r
+FindSmramInfo (\r
+ OUT UINT32 *SmrrBase,\r
+ OUT UINT32 *SmrrSize\r
+ );\r
+\r
+/**\r
+ The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
+\r
+ The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
+ and restores MTRRs for both BSP and APs.\r
+\r
+**/\r
+VOID\r
+EarlyInitializeCpu (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
+\r
+ The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
+ data saved by normal boot path for both BSP and APs.\r
+\r
+**/\r
+VOID\r
+InitializeCpu (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Page Fault handler for SMM use.\r
+\r
+ @param InterruptType Defines the type of interrupt or exception that\r
+ occurred on the processor.This parameter is processor architecture specific.\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmiPFHandler (\r
+ IN EFI_EXCEPTION_TYPE InterruptType,\r
+ IN EFI_SYSTEM_CONTEXT SystemContext\r
+ );\r
+\r
+/**\r
+ Perform the remaining tasks.\r
+\r
+**/\r
+VOID\r
+PerformRemainingTasks (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Initialize MSR spin lock by MSR index.\r
+\r
+ @param MsrIndex MSR index value.\r
+\r
+**/\r
+VOID\r
+InitMsrSpinLockByIndex (\r
+ IN UINT32 MsrIndex\r
+ );\r
+\r
+/**\r
+ Hook return address of SMM Save State so that semaphore code\r
+ can be executed immediately after AP exits SMM to indicate to\r
+ the BSP that an AP has exited SMM after SMBASE relocation.\r
+\r
+ @param[in] CpuIndex The processor index.\r
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE\r
+ immediately after AP exits SMM.\r
+\r
+**/\r
+VOID\r
+SemaphoreHook (\r
+ IN UINTN CpuIndex,\r
+ IN volatile BOOLEAN *RebasedFlag\r
+ );\r
+\r
+/**\r
+Configure SMM Code Access Check feature for all processors.\r
+SMM Feature Control MSR will be locked after configuration.\r
+**/\r
+VOID\r
+ConfigSmmCodeAccessCheck (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Hook the code executed immediately after an RSM instruction on the currently\r
+ executing CPU. The mode of code executed immediately after RSM must be\r
+ detected, and the appropriate hook must be selected. Always clear the auto\r
+ HALT restart flag if it is set.\r
+\r
+ @param[in] CpuIndex The processor index for the currently\r
+ executing CPU.\r
+ @param[in] CpuState Pointer to SMRAM Save State Map for the\r
+ currently executing CPU.\r
+ @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to\r
+ 32-bit mode from 64-bit SMM.\r
+ @param[in] NewInstructionPointer Instruction pointer to use if resuming to\r
+ same mode as SMM.\r
+\r
+ @retval The value of the original instruction pointer before it was hooked.\r
+\r
+**/\r
+UINT64\r
+EFIAPI\r
+HookReturnFromSmm (\r
+ IN UINTN CpuIndex,\r
+ SMRAM_SAVE_STATE_MAP *CpuState,\r
+ UINT64 NewInstructionPointer32,\r
+ UINT64 NewInstructionPointer\r
+ );\r
+\r
+/**\r
+ Get the size of the SMI Handler in bytes.\r
+\r
+ @retval The size, in bytes, of the SMI Handler.\r
+\r
+**/\r
+UINTN\r
+EFIAPI\r
+GetSmiHandlerSize (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Install the SMI handler for the CPU specified by CpuIndex. This function\r
+ is called by the CPU that was elected as monarch during System Management\r
+ Mode initialization.\r
+\r
+ @param[in] CpuIndex The index of the CPU to install the custom SMI handler.\r
+ The value must be between 0 and the NumberOfCpus field\r
+ in the System Management System Table (SMST).\r
+ @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex.\r
+ @param[in] SmiStack The stack to use when an SMI is processed by the\r
+ the CPU specified by CpuIndex.\r
+ @param[in] StackSize The size, in bytes, if the stack used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] GdtBase The base address of the GDT to use when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] IdtBase The base address of the IDT to use when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] Cr3 The base address of the page tables to use when an SMI\r
+ is processed by the CPU specified by CpuIndex.\r
+**/\r
+VOID\r
+EFIAPI\r
+InstallSmiHandler (\r
+ IN UINTN CpuIndex,\r
+ IN UINT32 SmBase,\r
+ IN VOID *SmiStack,\r
+ IN UINTN StackSize,\r
+ IN UINTN GdtBase,\r
+ IN UINTN GdtSize,\r
+ IN UINTN IdtBase,\r
+ IN UINTN IdtSize,\r
+ IN UINT32 Cr3\r
+ );\r
+\r
+/**\r
+ Search module name by input IP address and output it.\r
+\r
+ @param CallerIpAddress Caller instruction pointer.\r
+\r
+**/\r
+VOID\r
+DumpModuleInfoByIp (\r
+ IN UINTN CallerIpAddress\r
+ );\r
+#endif\r
--- /dev/null
+## @file\r
+# CPU SMM driver.\r
+#\r
+# This SMM driver performs SMM initialization, deploy SMM Entry Vector,\r
+# provides CPU specific services in SMM.\r
+#\r
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+##\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = PiSmmCpuDxeSmm\r
+ MODULE_UNI_FILE = PiSmmCpuDxeSmm.uni\r
+ FILE_GUID = A3FF0EF5-0C28-42f5-B544-8C7DE1E80014\r
+ MODULE_TYPE = DXE_SMM_DRIVER\r
+ VERSION_STRING = 1.0\r
+ PI_SPECIFICATION_VERSION = 0x0001000A\r
+ ENTRY_POINT = PiCpuSmmEntry\r
+\r
+#\r
+# The following information is for reference only and not required by the build tools.\r
+#\r
+# VALID_ARCHITECTURES = IA32 X64\r
+#\r
+\r
+[Sources]\r
+ PiSmmCpuDxeSmm.c\r
+ PiSmmCpuDxeSmm.h\r
+ MpService.c\r
+ SyncTimer.c\r
+ CpuS3.c\r
+ CpuService.c\r
+ CpuService.h\r
+ SmmProfile.c\r
+ SmmProfile.h\r
+ SmmProfileInternal.h\r
+ SmramSaveState.c\r
+\r
+[Sources.Ia32]\r
+ Ia32/Semaphore.c\r
+ Ia32/PageTbl.c\r
+ Ia32/SmmProfileArch.c\r
+ Ia32/SmmProfileArch.h\r
+ Ia32/SmmInit.asm | MSFT\r
+ Ia32/SmiEntry.asm | MSFT\r
+ Ia32/SmiException.asm | MSFT\r
+ Ia32/MpFuncs.asm | MSFT\r
+\r
+ Ia32/SmmInit.asm | INTEL\r
+ Ia32/SmiEntry.asm | INTEL\r
+ Ia32/SmiException.asm | INTEL\r
+ Ia32/MpFuncs.asm | INTEL\r
+\r
+ Ia32/SmmInit.S | GCC\r
+ Ia32/SmiEntry.S | GCC\r
+ Ia32/SmiException.S | GCC\r
+ Ia32/MpFuncs.S | GCC\r
+\r
+[Sources.X64]\r
+ X64/Semaphore.c\r
+ X64/PageTbl.c\r
+ X64/SmmProfileArch.c\r
+ X64/SmmProfileArch.h\r
+ X64/SmmInit.asm | MSFT\r
+ X64/SmiEntry.asm | MSFT\r
+ X64/SmiException.asm | MSFT\r
+ X64/MpFuncs.asm | MSFT\r
+\r
+ X64/SmmInit.asm | INTEL\r
+ X64/SmiEntry.asm | INTEL\r
+ X64/SmiException.asm | INTEL\r
+ X64/MpFuncs.asm | INTEL\r
+\r
+ X64/SmmInit.S | GCC\r
+ X64/SmiEntry.S | GCC\r
+ X64/SmiException.S | GCC\r
+ X64/MpFuncs.S | GCC\r
+\r
+[Packages]\r
+ MdePkg/MdePkg.dec\r
+ MdeModulePkg/MdeModulePkg.dec\r
+ UefiCpuPkg/UefiCpuPkg.dec\r
+ IntelFrameworkPkg/IntelFrameworkPkg.dec\r
+\r
+[LibraryClasses]\r
+ UefiDriverEntryPoint\r
+ UefiRuntimeServicesTableLib\r
+ CacheMaintenanceLib\r
+ PcdLib\r
+ DebugLib\r
+ BaseLib\r
+ SynchronizationLib\r
+ BaseMemoryLib\r
+ MtrrLib\r
+ SmmLib\r
+ IoLib\r
+ TimerLib\r
+ SmmServicesTableLib\r
+ MemoryAllocationLib\r
+ DebugAgentLib\r
+ HobLib\r
+ PciLib\r
+ LocalApicLib\r
+ UefiCpuLib\r
+ SmmCpuPlatformHookLib\r
+ CpuExceptionHandlerLib\r
+ UefiLib\r
+ DxeServicesTableLib\r
+ CpuLib\r
+ ReportStatusCodeLib\r
+ SmmCpuFeaturesLib\r
+ PeCoffGetEntryPointLib\r
+\r
+[Protocols]\r
+ gEfiSmmAccess2ProtocolGuid ## CONSUMES\r
+ gEfiMpServiceProtocolGuid ## CONSUMES\r
+ gEfiSmmConfigurationProtocolGuid ## PRODUCES\r
+ gEfiSmmCpuProtocolGuid ## PRODUCES\r
+ gEfiSmmReadyToLockProtocolGuid ## NOTIFY\r
+ gEfiSmmCpuServiceProtocolGuid ## PRODUCES\r
+ gEfiSmmCpuSaveStateProtocolGuid ## SOMETIMES_PRODUCES\r
+\r
+[Guids]\r
+ gEfiAcpiVariableGuid ## SOMETIMES_CONSUMES ## HOB # it is used for S3 boot.\r
+ gEfiGlobalVariableGuid ## SOMETIMES_PRODUCES ## Variable:L"SmmProfileData"\r
+ gEfiAcpi20TableGuid ## SOMETIMES_CONSUMES ## SystemTable\r
+ gEfiAcpi10TableGuid ## SOMETIMES_CONSUMES ## SystemTable\r
+\r
+[FeaturePcd]\r
+ gEfiMdeModulePkgTokenSpaceGuid.PcdFrameworkCompatibilitySupport ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmBlockStartupThisAp ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmEnableBspElection ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugSupport ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileEnable ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileRingBuffer ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmFeatureControlMsrLock ## CONSUMES\r
+\r
+[Pcd]\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber ## SOMETIMES_CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileSize ## SOMETIMES_CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackSize ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmApSyncTimeout ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuS3DataAddress ## SOMETIMES_CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress ## SOMETIMES_PRODUCES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable ## CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES\r
+\r
+[Depex]\r
+ gEfiMpServiceProtocolGuid\r
+\r
+[UserExtensions.TianoCore."ExtraFiles"]\r
+ PiSmmCpuDxeSmmExtra.uni\r
--- /dev/null
+/** @file\r
+Enable SMM profile.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+#include "SmmProfileInternal.h"\r
+\r
+UINT32 mSmmProfileCr3;\r
+\r
+SMM_PROFILE_HEADER *mSmmProfileBase;\r
+MSR_DS_AREA_STRUCT *mMsrDsAreaBase;\r
+//\r
+// The buffer to store SMM profile data.\r
+//\r
+UINTN mSmmProfileSize;\r
+\r
+//\r
+// The buffer to enable branch trace store.\r
+//\r
+UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;\r
+\r
+//\r
+// The flag indicates if execute-disable is supported by processor.\r
+//\r
+BOOLEAN mXdSupported = FALSE;\r
+\r
+//\r
+// The flag indicates if execute-disable is enabled on processor.\r
+//\r
+BOOLEAN mXdEnabled = FALSE;\r
+\r
+//\r
+// The flag indicates if BTS is supported by processor.\r
+//\r
+BOOLEAN mBtsSupported = FALSE;\r
+\r
+//\r
+// The flag indicates if SMM profile starts to record data.\r
+//\r
+BOOLEAN mSmmProfileStart = FALSE;\r
+\r
+//\r
+// Record the page fault exception count for one instruction execution.\r
+//\r
+UINTN *mPFEntryCount;\r
+\r
+UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];\r
+UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];\r
+\r
+MSR_DS_AREA_STRUCT **mMsrDsArea;\r
+BRANCH_TRACE_RECORD **mMsrBTSRecord;\r
+UINTN mBTSRecordNumber;\r
+PEBS_RECORD **mMsrPEBSRecord;\r
+\r
+//\r
+// These memory ranges are always present, they does not generate the access type of page fault exception,\r
+// but they possibly generate instruction fetch type of page fault exception.\r
+//\r
+MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;\r
+UINTN mProtectionMemRangeCount = 0;\r
+\r
+//\r
+// Some predefined memory ranges.\r
+//\r
+MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {\r
+ //\r
+ // SMRAM range (to be fixed in runtime).\r
+ // It is always present and instruction fetches are allowed.\r
+ //\r
+ {{0x00000000, 0x00000000},TRUE,FALSE},\r
+\r
+ //\r
+ // SMM profile data range( to be fixed in runtime).\r
+ // It is always present and instruction fetches are not allowed.\r
+ //\r
+ {{0x00000000, 0x00000000},TRUE,TRUE},\r
+\r
+ //\r
+ // Future extended range could be added here.\r
+ //\r
+\r
+ //\r
+ // PCI MMIO ranges (to be added in runtime).\r
+ // They are always present and instruction fetches are not allowed.\r
+ //\r
+};\r
+\r
+//\r
+// These memory ranges are mapped by 4KB-page instead of 2MB-page.\r
+//\r
+MEMORY_RANGE *mSplitMemRange = NULL;\r
+UINTN mSplitMemRangeCount = 0;\r
+\r
+//\r
+// SMI command port.\r
+//\r
+UINT32 mSmiCommandPort;\r
+\r
+/**\r
+ Disable branch trace store.\r
+\r
+**/\r
+VOID\r
+DisableBTS (\r
+ VOID\r
+ )\r
+{\r
+ AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));\r
+}\r
+\r
+/**\r
+ Enable branch trace store.\r
+\r
+**/\r
+VOID\r
+EnableBTS (\r
+ VOID\r
+ )\r
+{\r
+ AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));\r
+}\r
+\r
+/**\r
+ Get CPU Index from APIC ID.\r
+\r
+**/\r
+UINTN\r
+GetCpuIndex (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINT32 ApicId;\r
+\r
+ ApicId = GetApicId ();\r
+\r
+ for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {\r
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {\r
+ return Index;\r
+ }\r
+ }\r
+ ASSERT (FALSE);\r
+ return 0;\r
+}\r
+\r
+/**\r
+ Get the source of IP after execute-disable exception is triggered.\r
+\r
+ @param CpuIndex The index of CPU.\r
+ @param DestinationIP The destination address.\r
+\r
+**/\r
+UINT64\r
+GetSourceFromDestinationOnBts (\r
+ UINTN CpuIndex,\r
+ UINT64 DestinationIP\r
+ )\r
+{\r
+ BRANCH_TRACE_RECORD *CurrentBTSRecord;\r
+ UINTN Index;\r
+ BOOLEAN FirstMatch;\r
+\r
+ FirstMatch = FALSE;\r
+\r
+ CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;\r
+ for (Index = 0; Index < mBTSRecordNumber; Index++) {\r
+ if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {\r
+ //\r
+ // Underflow\r
+ //\r
+ CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);\r
+ CurrentBTSRecord --;\r
+ }\r
+ if (CurrentBTSRecord->LastBranchTo == DestinationIP) {\r
+ //\r
+ // Good! find 1st one, then find 2nd one.\r
+ //\r
+ if (!FirstMatch) {\r
+ //\r
+ // The first one is DEBUG exception\r
+ //\r
+ FirstMatch = TRUE;\r
+ } else {\r
+ //\r
+ // Good find proper one.\r
+ //\r
+ return CurrentBTSRecord->LastBranchFrom;\r
+ }\r
+ }\r
+ CurrentBTSRecord--;\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+/**\r
+ SMM profile specific INT 1 (single-step) exception handler.\r
+\r
+ @param InterruptType Defines the type of interrupt or exception that\r
+ occurred on the processor.This parameter is processor architecture specific.\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+**/\r
+VOID\r
+EFIAPI\r
+DebugExceptionHandler (\r
+ IN EFI_EXCEPTION_TYPE InterruptType,\r
+ IN EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ UINTN CpuIndex;\r
+ UINTN PFEntry;\r
+\r
+ if (!mSmmProfileStart) {\r
+ return;\r
+ }\r
+ CpuIndex = GetCpuIndex ();\r
+\r
+ //\r
+ // Clear last PF entries\r
+ //\r
+ for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {\r
+ *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];\r
+ }\r
+\r
+ //\r
+ // Reset page fault exception count for next page fault.\r
+ //\r
+ mPFEntryCount[CpuIndex] = 0;\r
+\r
+ //\r
+ // Flush TLB\r
+ //\r
+ CpuFlushTlb ();\r
+\r
+ //\r
+ // Clear TF in EFLAGS\r
+ //\r
+ ClearTrapFlag (SystemContext);\r
+}\r
+\r
+/**\r
+ Check if the memory address will be mapped by 4KB-page.\r
+\r
+ @param Address The address of Memory.\r
+ @param Nx The flag indicates if the memory is execute-disable.\r
+\r
+**/\r
+BOOLEAN\r
+IsAddressValid (\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN BOOLEAN *Nx\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ *Nx = FALSE;\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ //\r
+ // Check configuration\r
+ //\r
+ for (Index = 0; Index < mProtectionMemRangeCount; Index++) {\r
+ if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {\r
+ *Nx = mProtectionMemRange[Index].Nx;\r
+ return mProtectionMemRange[Index].Present;\r
+ }\r
+ }\r
+ *Nx = TRUE;\r
+ return FALSE;\r
+\r
+ } else {\r
+ if ((Address < mCpuHotPlugData.SmrrBase) ||\r
+ (Address >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
+ *Nx = TRUE;\r
+ }\r
+ return TRUE;\r
+ }\r
+}\r
+\r
+/**\r
+ Check if the memory address will be mapped by 4KB-page.\r
+\r
+ @param Address The address of Memory.\r
+\r
+**/\r
+BOOLEAN\r
+IsAddressSplit (\r
+ IN EFI_PHYSICAL_ADDRESS Address\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ //\r
+ // Check configuration\r
+ //\r
+ for (Index = 0; Index < mSplitMemRangeCount; Index++) {\r
+ if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {\r
+ return TRUE;\r
+ }\r
+ }\r
+ } else {\r
+ if (Address < mCpuHotPlugData.SmrrBase) {\r
+ if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {\r
+ return TRUE;\r
+ }\r
+ } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {\r
+ if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {\r
+ return TRUE;\r
+ }\r
+ }\r
+ }\r
+ //\r
+ // Return default\r
+ //\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Initialize the protected memory ranges and the 4KB-page mapped memory ranges.\r
+\r
+**/\r
+VOID\r
+InitProtectedMemRange (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN NumberOfDescriptors;\r
+ UINTN NumberOfMmioDescriptors;\r
+ UINTN NumberOfProtectRange;\r
+ UINTN NumberOfSpliteRange;\r
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;\r
+ UINTN TotalSize;\r
+ EFI_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS ProtectBaseAddress;\r
+ EFI_PHYSICAL_ADDRESS ProtectEndAddress;\r
+ EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;\r
+ EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;\r
+ UINT64 High4KBPageSize;\r
+ UINT64 Low4KBPageSize;\r
+\r
+ NumberOfDescriptors = 0;\r
+ NumberOfMmioDescriptors = 0;\r
+ NumberOfSpliteRange = 0;\r
+ MemorySpaceMap = NULL;\r
+\r
+ //\r
+ // Get MMIO ranges from GCD and add them into protected memory ranges.\r
+ //\r
+ Status = gDS->GetMemorySpaceMap (\r
+ &NumberOfDescriptors,\r
+ &MemorySpaceMap\r
+ );\r
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
+ if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {\r
+ NumberOfMmioDescriptors++;\r
+ }\r
+ }\r
+\r
+ if (NumberOfMmioDescriptors != 0) {\r
+ TotalSize = NumberOfMmioDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);\r
+ mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);\r
+ ASSERT (mProtectionMemRange != NULL);\r
+ mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);\r
+\r
+ //\r
+ // Copy existing ranges.\r
+ //\r
+ CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));\r
+\r
+ //\r
+ // Create split ranges which come from protected ranges.\r
+ //\r
+ TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);\r
+ mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);\r
+ ASSERT (mSplitMemRange != NULL);\r
+\r
+ //\r
+ // Create MMIO ranges which are set to present and execution-disable.\r
+ //\r
+ NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
+ if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {\r
+ continue;\r
+ }\r
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;\r
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;\r
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;\r
+ mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;\r
+ NumberOfProtectRange++;\r
+ }\r
+ }\r
+\r
+ //\r
+ // According to protected ranges, create the ranges which will be mapped by 2KB page.\r
+ //\r
+ NumberOfSpliteRange = 0;\r
+ NumberOfProtectRange = mProtectionMemRangeCount;\r
+ for (Index = 0; Index < NumberOfProtectRange; Index++) {\r
+ //\r
+ // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.\r
+ //\r
+ ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;\r
+ ProtectEndAddress = mProtectionMemRange[Index].Range.Top;\r
+ if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {\r
+ //\r
+ // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.\r
+ // A mix of 4KB and 2MB page could save SMRAM space.\r
+ //\r
+ Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);\r
+ Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
+ if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&\r
+ ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {\r
+ //\r
+ // There is an range which could be mapped by 2MB-page.\r
+ //\r
+ High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));\r
+ Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));\r
+ if (High4KBPageSize != 0) {\r
+ //\r
+ // Add not 2MB-aligned range to be mapped by 4KB-page.\r
+ //\r
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);\r
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
+ NumberOfSpliteRange++;\r
+ }\r
+ if (Low4KBPageSize != 0) {\r
+ //\r
+ // Add not 2MB-aligned range to be mapped by 4KB-page.\r
+ //\r
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);\r
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
+ NumberOfSpliteRange++;\r
+ }\r
+ } else {\r
+ //\r
+ // The range could only be mapped by 4KB-page.\r
+ //\r
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);\r
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
+ NumberOfSpliteRange++;\r
+ }\r
+ }\r
+ }\r
+\r
+ mSplitMemRangeCount = NumberOfSpliteRange;\r
+\r
+ DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));\r
+ for (Index = 0; Index < mProtectionMemRangeCount; Index++) {\r
+ DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));\r
+ DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));\r
+ }\r
+ for (Index = 0; Index < mSplitMemRangeCount; Index++) {\r
+ DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));\r
+ DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));\r
+ }\r
+}\r
+\r
+/**\r
+ Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.\r
+\r
+**/\r
+VOID\r
+InitPaging (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 *Pml4;\r
+ UINT64 *Pde;\r
+ UINT64 *Pte;\r
+ UINT64 *Pt;\r
+ UINTN Address;\r
+ UINTN Level1;\r
+ UINTN Level2;\r
+ UINTN Level3;\r
+ UINTN Level4;\r
+ UINTN NumberOfPdpEntries;\r
+ UINTN NumberOfPml4Entries;\r
+ UINTN SizeOfMemorySpace;\r
+ BOOLEAN Nx;\r
+\r
+ if (sizeof (UINTN) == sizeof (UINT64)) {\r
+ Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;\r
+ SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;\r
+ //\r
+ // Calculate the table entries of PML4E and PDPTE.\r
+ //\r
+ if (SizeOfMemorySpace <= 39 ) {\r
+ NumberOfPml4Entries = 1;\r
+ NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));\r
+ } else {\r
+ NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));\r
+ NumberOfPdpEntries = 512;\r
+ }\r
+ } else {\r
+ NumberOfPml4Entries = 1;\r
+ NumberOfPdpEntries = 4;\r
+ }\r
+\r
+ //\r
+ // Go through page table and change 2MB-page into 4KB-page.\r
+ //\r
+ for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {\r
+ if (sizeof (UINTN) == sizeof (UINT64)) {\r
+ if ((Pml4[Level1] & IA32_PG_P) == 0) {\r
+ //\r
+ // If Pml4 entry does not exist, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);\r
+ } else {\r
+ Pde = (UINT64*)(UINTN)mSmmProfileCr3;\r
+ }\r
+ for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {\r
+ if ((*Pde & IA32_PG_P) == 0) {\r
+ //\r
+ // If PDE entry does not exist, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);\r
+ if (Pte == 0) {\r
+ continue;\r
+ }\r
+ for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {\r
+ if ((*Pte & IA32_PG_P) == 0) {\r
+ //\r
+ // If PTE entry does not exist, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Address = (((Level2 << 9) + Level3) << 21);\r
+\r
+ //\r
+ // If it is 2M page, check IsAddressSplit()\r
+ //\r
+ if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {\r
+ //\r
+ // Based on current page table, create 4KB page table for split area.\r
+ //\r
+ ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));\r
+\r
+ Pt = AllocatePages (1);\r
+ ASSERT (Pt != NULL);\r
+\r
+ // Split it\r
+ for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {\r
+ Pt[Level4] = Address + ((Level4 << 12) | IA32_PG_RW | IA32_PG_P);\r
+ } // end for PT\r
+ *Pte = (UINTN)Pt | IA32_PG_RW | IA32_PG_P;\r
+ } // end if IsAddressSplit\r
+ } // end for PTE\r
+ } // end for PDE\r
+ }\r
+\r
+ //\r
+ // Go through page table and set several page table entries to absent or execute-disable.\r
+ //\r
+ DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));\r
+ for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {\r
+ if (sizeof (UINTN) == sizeof (UINT64)) {\r
+ if ((Pml4[Level1] & IA32_PG_P) == 0) {\r
+ //\r
+ // If Pml4 entry does not exist, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);\r
+ } else {\r
+ Pde = (UINT64*)(UINTN)mSmmProfileCr3;\r
+ }\r
+ for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {\r
+ if ((*Pde & IA32_PG_P) == 0) {\r
+ //\r
+ // If PDE entry does not exist, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);\r
+ if (Pte == 0) {\r
+ continue;\r
+ }\r
+ for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {\r
+ if ((*Pte & IA32_PG_P) == 0) {\r
+ //\r
+ // If PTE entry does not exist, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Address = (((Level2 << 9) + Level3) << 21);\r
+\r
+ if ((*Pte & IA32_PG_PS) != 0) {\r
+ // 2MB page\r
+\r
+ if (!IsAddressValid (Address, &Nx)) {\r
+ //\r
+ // Patch to remove Present flag and RW flag\r
+ //\r
+ *Pte = *Pte & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
+ }\r
+ if (Nx && mXdSupported) {\r
+ *Pte = *Pte | IA32_PG_NX;\r
+ }\r
+ } else {\r
+ // 4KB page\r
+ Pt = (UINT64 *)(UINTN)(*Pte & PHYSICAL_ADDRESS_MASK);\r
+ if (Pt == 0) {\r
+ continue;\r
+ }\r
+ for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {\r
+ if (!IsAddressValid (Address, &Nx)) {\r
+ *Pt = *Pt & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
+ }\r
+ if (Nx && mXdSupported) {\r
+ *Pt = *Pt | IA32_PG_NX;\r
+ }\r
+ Address += SIZE_4KB;\r
+ } // end for PT\r
+ } // end if PS\r
+ } // end for PTE\r
+ } // end for PDE\r
+ }\r
+\r
+ //\r
+ // Flush TLB\r
+ //\r
+ CpuFlushTlb ();\r
+ DEBUG ((EFI_D_INFO, "Patch page table done!\n"));\r
+ //\r
+ // Set execute-disable flag\r
+ //\r
+ mXdEnabled = TRUE;\r
+\r
+ return ;\r
+}\r
+\r
+/**\r
+ To find FADT in ACPI tables.\r
+\r
+ @param AcpiTableGuid The GUID used to find ACPI table in UEFI ConfigurationTable.\r
+\r
+ @return FADT table pointer.\r
+**/\r
+EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *\r
+FindAcpiFadtTableByAcpiGuid (\r
+ IN EFI_GUID *AcpiTableGuid\r
+ )\r
+{\r
+ EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER *Rsdp;\r
+ EFI_ACPI_DESCRIPTION_HEADER *Rsdt;\r
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
+ UINTN Index;\r
+ UINT32 Data32;\r
+ Rsdp = NULL;\r
+ Rsdt = NULL;\r
+ Fadt = NULL;\r
+ //\r
+ // found ACPI table RSD_PTR from system table\r
+ //\r
+ for (Index = 0; Index < gST->NumberOfTableEntries; Index++) {\r
+ if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), AcpiTableGuid)) {\r
+ //\r
+ // A match was found.\r
+ //\r
+ Rsdp = gST->ConfigurationTable[Index].VendorTable;\r
+ break;\r
+ }\r
+ }\r
+\r
+ if (Rsdp == NULL) {\r
+ return NULL;\r
+ }\r
+\r
+ Rsdt = (EFI_ACPI_DESCRIPTION_HEADER *)(UINTN) Rsdp->RsdtAddress;\r
+ if (Rsdt == NULL || Rsdt->Signature != EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE) {\r
+ return NULL;\r
+ }\r
+\r
+ for (Index = sizeof (EFI_ACPI_DESCRIPTION_HEADER); Index < Rsdt->Length; Index = Index + sizeof (UINT32)) {\r
+\r
+ Data32 = *(UINT32 *) ((UINT8 *) Rsdt + Index);\r
+ Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) (UINT32 *) (UINTN) Data32;\r
+ if (Fadt->Header.Signature == EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) {\r
+ break;\r
+ }\r
+ }\r
+\r
+ if (Fadt == NULL || Fadt->Header.Signature != EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) {\r
+ return NULL;\r
+ }\r
+\r
+ return Fadt;\r
+}\r
+\r
+/**\r
+ To find FADT in ACPI tables.\r
+\r
+ @return FADT table pointer.\r
+**/\r
+EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *\r
+FindAcpiFadtTable (\r
+ VOID\r
+ )\r
+{\r
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
+\r
+ Fadt = FindAcpiFadtTableByAcpiGuid (&gEfiAcpi20TableGuid);\r
+ if (Fadt != NULL) {\r
+ return Fadt;\r
+ }\r
+\r
+ return FindAcpiFadtTableByAcpiGuid (&gEfiAcpi10TableGuid);\r
+}\r
+\r
+/**\r
+ To get system port address of the SMI Command Port in FADT table.\r
+\r
+**/\r
+VOID\r
+GetSmiCommandPort (\r
+ VOID\r
+ )\r
+{\r
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
+\r
+ Fadt = FindAcpiFadtTable ();\r
+ ASSERT (Fadt != NULL);\r
+\r
+ mSmiCommandPort = Fadt->SmiCmd;\r
+ DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));\r
+}\r
+\r
+/**\r
+ Updates page table to make some memory ranges (like system memory) absent\r
+ and make some memory ranges (like MMIO) present and execute disable. It also\r
+ update 2MB-page to 4KB-page for some memory ranges.\r
+\r
+**/\r
+VOID\r
+SmmProfileStart (\r
+ VOID\r
+ )\r
+{\r
+ //\r
+ // The flag indicates SMM profile starts to work.\r
+ //\r
+ mSmmProfileStart = TRUE;\r
+}\r
+\r
+/**\r
+ Initialize SMM profile in SmmReadyToLock protocol callback function.\r
+\r
+ @param Protocol Points to the protocol's unique identifier.\r
+ @param Interface Points to the interface instance.\r
+ @param Handle The handle on which the interface was installed.\r
+\r
+ @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+InitSmmProfileCallBack (\r
+ IN CONST EFI_GUID *Protocol,\r
+ IN VOID *Interface,\r
+ IN EFI_HANDLE Handle\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // Save to variable so that SMM profile data can be found.\r
+ //\r
+ Status = gRT->SetVariable (\r
+ SMM_PROFILE_NAME,\r
+ &gEfiCallerIdGuid,\r
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,\r
+ sizeof(mSmmProfileBase),\r
+ &mSmmProfileBase\r
+ );\r
+\r
+ //\r
+ // Get Software SMI from FADT\r
+ //\r
+ GetSmiCommandPort ();\r
+\r
+ //\r
+ // Initialize protected memory range for patching page table later.\r
+ //\r
+ InitProtectedMemRange ();\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ Initialize SMM profile data structures.\r
+\r
+**/\r
+VOID\r
+InitSmmProfileInternal (\r
+ VOID\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS Base;\r
+ VOID *Registration;\r
+ UINTN Index;\r
+ UINTN MsrDsAreaSizePerCpu;\r
+ UINTN TotalSize;\r
+\r
+ mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ ASSERT (mPFEntryCount != NULL);\r
+ mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
+ sizeof (mLastPFEntryValue[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ ASSERT (mLastPFEntryValue != NULL);\r
+ mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
+ sizeof (mLastPFEntryPointer[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ ASSERT (mLastPFEntryPointer != NULL);\r
+\r
+ //\r
+ // Allocate memory for SmmProfile below 4GB.\r
+ // The base address\r
+ //\r
+ mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);\r
+ ASSERT ((mSmmProfileSize & 0xFFF) == 0);\r
+\r
+ if (mBtsSupported) {\r
+ TotalSize = mSmmProfileSize + mMsrDsAreaSize;\r
+ } else {\r
+ TotalSize = mSmmProfileSize;\r
+ }\r
+\r
+ Base = 0xFFFFFFFF;\r
+ Status = gBS->AllocatePages (\r
+ AllocateMaxAddress,\r
+ EfiReservedMemoryType,\r
+ EFI_SIZE_TO_PAGES (TotalSize),\r
+ &Base\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+ ZeroMem ((VOID *)(UINTN)Base, TotalSize);\r
+ mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;\r
+\r
+ //\r
+ // Initialize SMM profile data header.\r
+ //\r
+ mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);\r
+ mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));\r
+ mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));\r
+ mSmmProfileBase->CurDataEntries = 0;\r
+ mSmmProfileBase->CurDataSize = 0;\r
+ mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;\r
+ mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;\r
+ mSmmProfileBase->NumSmis = 0;\r
+ mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+\r
+ if (mBtsSupported) {\r
+ mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ ASSERT (mMsrDsArea != NULL);\r
+ mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ ASSERT (mMsrBTSRecord != NULL);\r
+ mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ ASSERT (mMsrPEBSRecord != NULL);\r
+\r
+ mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);\r
+ MsrDsAreaSizePerCpu = mMsrDsAreaSize / PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
+ mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);\r
+ for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {\r
+ mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);\r
+ mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));\r
+ mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);\r
+\r
+ mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];\r
+ mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;\r
+ mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;\r
+ mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;\r
+\r
+ mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];\r
+ mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;\r
+ mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;\r
+ mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;\r
+ }\r
+ }\r
+\r
+ mProtectionMemRange = mProtectionMemRangeTemplate;\r
+ mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
+\r
+ //\r
+ // Update TSeg entry.\r
+ //\r
+ mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;\r
+ mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;\r
+\r
+ //\r
+ // Update SMM profile entry.\r
+ //\r
+ mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;\r
+ mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;\r
+\r
+ //\r
+ // Allocate memory reserved for creating 4KB pages.\r
+ //\r
+ InitPagesForPFHandler ();\r
+\r
+ //\r
+ // Start SMM profile when SmmReadyToLock protocol is installed.\r
+ //\r
+ Status = gSmst->SmmRegisterProtocolNotify (\r
+ &gEfiSmmReadyToLockProtocolGuid,\r
+ InitSmmProfileCallBack,\r
+ &Registration\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ return ;\r
+}\r
+\r
+/**\r
+ Check if XD feature is supported by a processor.\r
+\r
+**/\r
+VOID\r
+CheckFeatureSupported (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+\r
+ if (mXdSupported) {\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax <= CPUID_EXTENDED_FUNCTION) {\r
+ //\r
+ // Extended CPUID functions are not supported on this processor.\r
+ //\r
+ mXdSupported = FALSE;\r
+ }\r
+\r
+ AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {\r
+ //\r
+ // Execute Disable Bit feature is not supported on this processor.\r
+ //\r
+ mXdSupported = FALSE;\r
+ }\r
+ }\r
+\r
+ if (mBtsSupported) {\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {\r
+ //\r
+ // Per IA32 manuals:\r
+ // When CPUID.1:EDX[21] is set, the following BTS facilities are available:\r
+ // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the\r
+ // availability of the BTS facilities, including the ability to set the BTS and\r
+ // BTINT bits in the MSR_DEBUGCTLA MSR.\r
+ // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.\r
+ //\r
+ if ((AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 11, 11) == 0) &&\r
+ (AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 12, 12) == 0)) {\r
+ //\r
+ // BTS facilities is supported.\r
+ //\r
+ mBtsSupported = FALSE;\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ Check if XD and BTS features are supported by all processors.\r
+\r
+**/\r
+VOID\r
+CheckProcessorFeature (\r
+ VOID\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ EFI_MP_SERVICES_PROTOCOL *MpServices;\r
+\r
+ Status = gBS->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // First detect if XD and BTS are supported\r
+ //\r
+ mXdSupported = TRUE;\r
+ mBtsSupported = TRUE;\r
+\r
+ //\r
+ // Check if XD and BTS are supported on all processors.\r
+ //\r
+ CheckFeatureSupported ();\r
+\r
+ //\r
+ //Check on other processors if BSP supports this\r
+ //\r
+ if (mXdSupported || mBtsSupported) {\r
+ MpServices->StartupAllAPs (\r
+ MpServices,\r
+ (EFI_AP_PROCEDURE) CheckFeatureSupported,\r
+ TRUE,\r
+ NULL,\r
+ 0,\r
+ NULL,\r
+ NULL\r
+ );\r
+ }\r
+}\r
+\r
+/**\r
+ Enable XD feature.\r
+\r
+**/\r
+VOID\r
+ActivateXd (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 MsrRegisters;\r
+\r
+ MsrRegisters = AsmReadMsr64 (MSR_EFER);\r
+ if ((MsrRegisters & MSR_EFER_XD) != 0) {\r
+ return ;\r
+ }\r
+ MsrRegisters |= MSR_EFER_XD;\r
+ AsmWriteMsr64 (MSR_EFER, MsrRegisters);\r
+}\r
+\r
+/**\r
+ Enable single step.\r
+\r
+**/\r
+VOID\r
+ActivateSingleStepDB (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Dr6;\r
+\r
+ Dr6 = AsmReadDr6 ();\r
+ if ((Dr6 & DR6_SINGLE_STEP) != 0) {\r
+ return;\r
+ }\r
+ Dr6 |= DR6_SINGLE_STEP;\r
+ AsmWriteDr6 (Dr6);\r
+}\r
+\r
+/**\r
+ Enable last branch.\r
+\r
+**/\r
+VOID\r
+ActivateLBR (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 DebugCtl;\r
+\r
+ DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);\r
+ if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {\r
+ return ;\r
+ }\r
+ AsmWriteMsr64 (MSR_LER_FROM_LIP, 0);\r
+ AsmWriteMsr64 (MSR_LER_TO_LIP, 0);\r
+ DebugCtl |= MSR_DEBUG_CTL_LBR;\r
+ AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);\r
+}\r
+\r
+/**\r
+ Enable branch trace store.\r
+\r
+ @param CpuIndex The index of the processor.\r
+\r
+**/\r
+VOID\r
+ActivateBTS (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ UINT64 DebugCtl;\r
+\r
+ DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);\r
+ if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {\r
+ return ;\r
+ }\r
+\r
+ AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);\r
+ DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);\r
+ DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);\r
+ AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);\r
+}\r
+\r
+/**\r
+ Increase SMI number in each SMI entry.\r
+\r
+**/\r
+VOID\r
+SmmProfileRecordSmiNum (\r
+ VOID\r
+ )\r
+{\r
+ if (mSmmProfileStart) {\r
+ mSmmProfileBase->NumSmis++;\r
+ }\r
+}\r
+\r
+/**\r
+ Initialize processor environment for SMM profile.\r
+\r
+ @param CpuIndex The index of the processor.\r
+\r
+**/\r
+VOID\r
+ActivateSmmProfile (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ //\r
+ // Enable Single Step DB#\r
+ //\r
+ ActivateSingleStepDB ();\r
+\r
+ if (mBtsSupported) {\r
+ //\r
+ // We can not get useful information from LER, so we have to use BTS.\r
+ //\r
+ ActivateLBR ();\r
+\r
+ //\r
+ // Enable BTS\r
+ //\r
+ ActivateBTS (CpuIndex);\r
+ }\r
+}\r
+\r
+/**\r
+ Initialize SMM profile in SMM CPU entry point.\r
+\r
+ @param[in] Cr3 The base address of the page tables to use in SMM.\r
+\r
+**/\r
+VOID\r
+InitSmmProfile (\r
+ UINT32 Cr3\r
+ )\r
+{\r
+ //\r
+ // Save Cr3\r
+ //\r
+ mSmmProfileCr3 = Cr3;\r
+\r
+ //\r
+ // Skip SMM profile initialization if feature is disabled\r
+ //\r
+ if (!FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Initialize SmmProfile here\r
+ //\r
+ InitSmmProfileInternal ();\r
+\r
+ //\r
+ // Initialize profile IDT.\r
+ //\r
+ InitIdtr ();\r
+}\r
+\r
+/**\r
+ Update page table to map the memory correctly in order to make the instruction\r
+ which caused page fault execute successfully. And it also save the original page\r
+ table to be restored in single-step exception.\r
+\r
+ @param PageTable PageTable Address.\r
+ @param PFAddress The memory address which caused page fault exception.\r
+ @param CpuIndex The index of the processor.\r
+ @param ErrorCode The Error code of exception.\r
+\r
+**/\r
+VOID\r
+RestorePageTableBelow4G (\r
+ UINT64 *PageTable,\r
+ UINT64 PFAddress,\r
+ UINTN CpuIndex,\r
+ UINTN ErrorCode\r
+ )\r
+{\r
+ UINTN PTIndex;\r
+ UINTN PFIndex;\r
+\r
+ //\r
+ // PML4\r
+ //\r
+ if (sizeof(UINT64) == sizeof(UINTN)) {\r
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);\r
+ ASSERT (PageTable[PTIndex] != 0);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ }\r
+\r
+ //\r
+ // PDPTE\r
+ //\r
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);\r
+ ASSERT (PageTable[PTIndex] != 0);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+\r
+ //\r
+ // PD\r
+ //\r
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);\r
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
+ //\r
+ // Large page\r
+ //\r
+\r
+ //\r
+ // Record old entries with non-present status\r
+ // Old entries include the memory which instruction is at and the memory which instruction access.\r
+ //\r
+ //\r
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
+ PFIndex = mPFEntryCount[CpuIndex];\r
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
+ mPFEntryCount[CpuIndex]++;\r
+ }\r
+\r
+ //\r
+ // Set new entry\r
+ //\r
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));\r
+ PageTable[PTIndex] |= (UINT64)IA32_PG_PS;\r
+ PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);\r
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
+ PageTable[PTIndex] &= ~IA32_PG_NX;\r
+ }\r
+ } else {\r
+ //\r
+ // Small page\r
+ //\r
+ ASSERT (PageTable[PTIndex] != 0);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+\r
+ //\r
+ // 4K PTE\r
+ //\r
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);\r
+\r
+ //\r
+ // Record old entries with non-present status\r
+ // Old entries include the memory which instruction is at and the memory which instruction access.\r
+ //\r
+ //\r
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
+ PFIndex = mPFEntryCount[CpuIndex];\r
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
+ mPFEntryCount[CpuIndex]++;\r
+ }\r
+\r
+ //\r
+ // Set new entry\r
+ //\r
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));\r
+ PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);\r
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
+ PageTable[PTIndex] &= ~IA32_PG_NX;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ The Page fault handler to save SMM profile data.\r
+\r
+ @param Rip The RIP when exception happens.\r
+ @param ErrorCode The Error code of exception.\r
+\r
+**/\r
+VOID\r
+SmmProfilePFHandler (\r
+ UINTN Rip,\r
+ UINTN ErrorCode\r
+ )\r
+{\r
+ UINT64 *PageTable;\r
+ UINT64 PFAddress;\r
+ UINTN CpuIndex;\r
+ UINTN Index;\r
+ UINT64 InstructionAddress;\r
+ UINTN MaxEntryNumber;\r
+ UINTN CurrentEntryNumber;\r
+ BOOLEAN IsValidPFAddress;\r
+ SMM_PROFILE_ENTRY *SmmProfileEntry;\r
+ UINT64 SmiCommand;\r
+ EFI_STATUS Status;\r
+ UINTN SwSmiCpuIndex;\r
+ UINT8 SoftSmiValue;\r
+ EFI_SMM_SAVE_STATE_IO_INFO IoInfo;\r
+\r
+ if (!mSmmProfileStart) {\r
+ //\r
+ // If SMM profile does not start, call original page fault handler.\r
+ //\r
+ SmiDefaultPFHandler ();\r
+ return;\r
+ }\r
+\r
+ if (mBtsSupported) {\r
+ DisableBTS ();\r
+ }\r
+\r
+ IsValidPFAddress = FALSE;\r
+ PageTable = (UINT64 *)AsmReadCr3 ();\r
+ PFAddress = AsmReadCr2 ();\r
+ CpuIndex = GetCpuIndex ();\r
+\r
+ if (PFAddress <= 0xFFFFFFFF) {\r
+ RestorePageTableBelow4G (PageTable, PFAddress, CpuIndex, ErrorCode);\r
+ } else {\r
+ RestorePageTableAbove4G (PageTable, PFAddress, CpuIndex, ErrorCode, &IsValidPFAddress);\r
+ }\r
+\r
+ if (!IsValidPFAddress) {\r
+ InstructionAddress = Rip;\r
+ if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {\r
+ //\r
+ // If it is instruction fetch failure, get the correct IP from BTS.\r
+ //\r
+ InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);\r
+ if (InstructionAddress == 0) {\r
+ //\r
+ // It indicates the instruction which caused page fault is not a jump instruction,\r
+ // set instruction address same as the page fault address.\r
+ //\r
+ InstructionAddress = PFAddress;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Try to find which CPU trigger SWSMI\r
+ //\r
+ SwSmiCpuIndex = 0;\r
+ //\r
+ // Indicate it is not software SMI\r
+ //\r
+ SmiCommand = 0xFFFFFFFFFFFFFFFFULL;\r
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
+ Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);\r
+ if (EFI_ERROR (Status)) {\r
+ continue;\r
+ }\r
+ if (IoInfo.IoPort == mSmiCommandPort) {\r
+ //\r
+ // Great! Find it.\r
+ //\r
+ SwSmiCpuIndex = Index;\r
+ //\r
+ // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.\r
+ //\r
+ SoftSmiValue = IoRead8 (mSmiCommandPort);\r
+ SmiCommand = (UINT64)SoftSmiValue;\r
+ break;\r
+ }\r
+ }\r
+\r
+ SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);\r
+ //\r
+ // Check if there is already a same entry in profile data.\r
+ //\r
+ for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {\r
+ if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&\r
+ (SmmProfileEntry[Index].Address == PFAddress) &&\r
+ (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&\r
+ (SmmProfileEntry[Index].Instruction == InstructionAddress) &&\r
+ (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {\r
+ //\r
+ // Same record exist, need not save again.\r
+ //\r
+ break;\r
+ }\r
+ }\r
+ if (Index == mSmmProfileBase->CurDataEntries) {\r
+ CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;\r
+ MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;\r
+ if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {\r
+ CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;\r
+ }\r
+ if (CurrentEntryNumber < MaxEntryNumber) {\r
+ //\r
+ // Log the new entry\r
+ //\r
+ SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;\r
+ SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;\r
+ SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();\r
+ SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;\r
+ SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;\r
+ SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;\r
+ SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;\r
+ //\r
+ // Update current entry index and data size in the header.\r
+ //\r
+ mSmmProfileBase->CurDataEntries++;\r
+ mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));\r
+ }\r
+ }\r
+ }\r
+ //\r
+ // Flush TLB\r
+ //\r
+ CpuFlushTlb ();\r
+\r
+ if (mBtsSupported) {\r
+ EnableBTS ();\r
+ }\r
+}\r
+\r
+/**\r
+ Replace INT1 exception handler to restore page table to absent/execute-disable state\r
+ in order to trigger page fault again to save SMM profile data..\r
+\r
+**/\r
+VOID\r
+InitIdtr (\r
+ VOID\r
+ )\r
+{\r
+ SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);\r
+}\r
--- /dev/null
+/** @file\r
+SMM profile header file.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef _SMM_PROFILE_H_\r
+#define _SMM_PROFILE_H_\r
+\r
+#include "SmmProfileInternal.h"\r
+\r
+///\r
+/// MSR Register Index\r
+///\r
+#define MSR_IA32_MISC_ENABLE 0x1A0\r
+\r
+//\r
+// External functions\r
+//\r
+\r
+/**\r
+ Initialize processor environment for SMM profile.\r
+\r
+ @param CpuIndex The index of the processor.\r
+\r
+**/\r
+VOID\r
+ActivateSmmProfile (\r
+ IN UINTN CpuIndex\r
+ );\r
+\r
+/**\r
+ Initialize SMM profile in SMM CPU entry point.\r
+\r
+ @param[in] Cr3 The base address of the page tables to use in SMM.\r
+\r
+**/\r
+VOID\r
+InitSmmProfile (\r
+ UINT32 Cr3\r
+ );\r
+\r
+/**\r
+ Increase SMI number in each SMI entry.\r
+\r
+**/\r
+VOID\r
+SmmProfileRecordSmiNum (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ The Page fault handler to save SMM profile data.\r
+\r
+ @param Rip The RIP when exception happens.\r
+ @param ErrorCode The Error code of exception.\r
+\r
+**/\r
+VOID\r
+SmmProfilePFHandler (\r
+ UINTN Rip,\r
+ UINTN ErrorCode\r
+ );\r
+\r
+/**\r
+ Updates page table to make some memory ranges (like system memory) absent\r
+ and make some memory ranges (like MMIO) present and execute disable. It also\r
+ update 2MB-page to 4KB-page for some memory ranges.\r
+\r
+**/\r
+VOID\r
+SmmProfileStart (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Page fault IDT handler for SMM Profile.\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+PageFaultIdtHandlerSmmProfile (\r
+ VOID\r
+ );\r
+\r
+\r
+/**\r
+ Check if XD feature is supported by a processor.\r
+\r
+**/\r
+VOID\r
+CheckFeatureSupported (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Enable XD feature.\r
+\r
+**/\r
+VOID\r
+ActivateXd (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.\r
+\r
+**/\r
+VOID\r
+InitPaging (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Check if XD and BTS features are supported by all processors.\r
+\r
+**/\r
+VOID\r
+CheckProcessorFeature (\r
+ VOID\r
+ );\r
+\r
+extern BOOLEAN mXdSupported;\r
+extern BOOLEAN mXdEnabled;\r
+\r
+#endif // _SMM_PROFILE_H_\r
--- /dev/null
+/** @file\r
+SMM profile internal header file.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef _SMM_PROFILE_INTERNAL_H_\r
+#define _SMM_PROFILE_INTERNAL_H_\r
+\r
+#include <Guid/GlobalVariable.h>\r
+#include <Guid/Acpi.h>\r
+#include <Protocol/SmmReadyToLock.h>\r
+#include <Library/UefiRuntimeServicesTableLib.h>\r
+#include <Library/DxeServicesTableLib.h>\r
+#include <Library/CpuLib.h>\r
+#include <IndustryStandard/Acpi.h>\r
+\r
+#include "SmmProfileArch.h"\r
+\r
+//\r
+// Configure the SMM_PROFILE DTS region size\r
+//\r
+#define SMM_PROFILE_DTS_SIZE (4 * 1024 * 1024) // 4M\r
+\r
+#define MAX_PF_PAGE_COUNT 0x2\r
+\r
+#define PEBS_RECORD_NUMBER 0x2\r
+\r
+#define MAX_PF_ENTRY_COUNT 10\r
+\r
+//\r
+// This MACRO just enable unit test for the profile\r
+// Please disable it.\r
+//\r
+\r
+#define IA32_PF_EC_P (1u << 0)\r
+#define IA32_PF_EC_WR (1u << 1)\r
+#define IA32_PF_EC_US (1u << 2)\r
+#define IA32_PF_EC_RSVD (1u << 3)\r
+#define IA32_PF_EC_ID (1u << 4)\r
+\r
+#define SMM_PROFILE_NAME L"SmmProfileData"\r
+\r
+//\r
+// CPU generic definition\r
+//\r
+#define CPUID1_EDX_XD_SUPPORT 0x100000\r
+#define MSR_EFER 0xc0000080\r
+#define MSR_EFER_XD 0x800\r
+\r
+#define CPUID1_EDX_BTS_AVAILABLE 0x200000\r
+\r
+#define DR6_SINGLE_STEP 0x4000\r
+#define RFLAG_TF 0x100\r
+\r
+#define MSR_DEBUG_CTL 0x1D9\r
+#define MSR_DEBUG_CTL_LBR 0x1\r
+#define MSR_DEBUG_CTL_TR 0x40\r
+#define MSR_DEBUG_CTL_BTS 0x80\r
+#define MSR_DEBUG_CTL_BTINT 0x100\r
+#define MSR_LASTBRANCH_TOS 0x1C9\r
+#define MSR_LER_FROM_LIP 0x1DD\r
+#define MSR_LER_TO_LIP 0x1DE\r
+#define MSR_DS_AREA 0x600\r
+\r
+typedef struct {\r
+ EFI_PHYSICAL_ADDRESS Base;\r
+ EFI_PHYSICAL_ADDRESS Top;\r
+} MEMORY_RANGE;\r
+\r
+typedef struct {\r
+ MEMORY_RANGE Range;\r
+ BOOLEAN Present;\r
+ BOOLEAN Nx;\r
+} MEMORY_PROTECTION_RANGE;\r
+\r
+typedef struct {\r
+ UINT64 HeaderSize;\r
+ UINT64 MaxDataEntries;\r
+ UINT64 MaxDataSize;\r
+ UINT64 CurDataEntries;\r
+ UINT64 CurDataSize;\r
+ UINT64 TsegStart;\r
+ UINT64 TsegSize;\r
+ UINT64 NumSmis;\r
+ UINT64 NumCpus;\r
+} SMM_PROFILE_HEADER;\r
+\r
+typedef struct {\r
+ UINT64 SmiNum;\r
+ UINT64 CpuNum;\r
+ UINT64 ApicId;\r
+ UINT64 ErrorCode;\r
+ UINT64 Instruction;\r
+ UINT64 Address;\r
+ UINT64 SmiCmd;\r
+} SMM_PROFILE_ENTRY;\r
+\r
+extern SMM_S3_RESUME_STATE *mSmmS3ResumeState;\r
+extern UINTN gSmiExceptionHandlers[];\r
+extern BOOLEAN mXdSupported;\r
+extern UINTN *mPFEntryCount;\r
+extern UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];\r
+extern UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];\r
+\r
+//\r
+// Internal functions\r
+//\r
+\r
+/**\r
+ Update IDT table to replace page fault handler and INT 1 handler.\r
+\r
+**/\r
+VOID\r
+InitIdtr (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Check if the memory address will be mapped by 4KB-page.\r
+\r
+ @param Address The address of Memory.\r
+\r
+**/\r
+BOOLEAN\r
+IsAddressSplit (\r
+ IN EFI_PHYSICAL_ADDRESS Address\r
+ );\r
+\r
+/**\r
+ Check if the memory address will be mapped by 4KB-page.\r
+\r
+ @param Address The address of Memory.\r
+ @param Nx The flag indicates if the memory is execute-disable.\r
+\r
+**/\r
+BOOLEAN\r
+IsAddressValid (\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN BOOLEAN *Nx\r
+ );\r
+\r
+/**\r
+ Page Fault handler for SMM use.\r
+\r
+**/\r
+VOID\r
+SmiDefaultPFHandler (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Clear TF in FLAGS.\r
+\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+\r
+**/\r
+VOID\r
+ClearTrapFlag (\r
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
+ );\r
+\r
+#endif // _SMM_PROFILE_H_\r
--- /dev/null
+/** @file\r
+Provides services to access SMRAM Save State Map\r
+\r
+Copyright (c) 2010 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <PiSmm.h>\r
+\r
+#include <Library/SmmCpuFeaturesLib.h>\r
+\r
+#include <Library/BaseLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/SmmServicesTableLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Register/Cpuid.h>\r
+#include <Register/SmramSaveStateMap.h>\r
+\r
+//\r
+// EFER register LMA bit\r
+//\r
+#define LMA BIT10\r
+\r
+///\r
+/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY\r
+///\r
+#define SMM_CPU_OFFSET(Field) OFFSET_OF (SMRAM_SAVE_STATE_MAP, Field)\r
+\r
+///\r
+/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_REGISTER_RANGE\r
+///\r
+#define SMM_REGISTER_RANGE(Start, End) { Start, End, End - Start + 1 }\r
+\r
+///\r
+/// Structure used to describe a range of registers\r
+///\r
+typedef struct {\r
+ EFI_SMM_SAVE_STATE_REGISTER Start;\r
+ EFI_SMM_SAVE_STATE_REGISTER End;\r
+ UINTN Length;\r
+} CPU_SMM_SAVE_STATE_REGISTER_RANGE;\r
+\r
+///\r
+/// Structure used to build a lookup table to retrieve the widths and offsets\r
+/// associated with each supported EFI_SMM_SAVE_STATE_REGISTER value\r
+///\r
+\r
+#define SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX 1\r
+#define SMM_SAVE_STATE_REGISTER_IOMISC_INDEX 2\r
+#define SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX 3\r
+#define SMM_SAVE_STATE_REGISTER_MAX_INDEX 4\r
+\r
+typedef struct {\r
+ UINT8 Width32;\r
+ UINT8 Width64;\r
+ UINT16 Offset32;\r
+ UINT16 Offset64Lo;\r
+ UINT16 Offset64Hi;\r
+ BOOLEAN Writeable;\r
+} CPU_SMM_SAVE_STATE_LOOKUP_ENTRY;\r
+\r
+///\r
+/// Structure used to build a lookup table for the IOMisc width information\r
+///\r
+typedef struct {\r
+ UINT8 Width;\r
+ EFI_SMM_SAVE_STATE_IO_WIDTH IoWidth;\r
+} CPU_SMM_SAVE_STATE_IO_WIDTH;\r
+\r
+///\r
+/// Variables from SMI Handler\r
+///\r
+extern UINT32 gSmbase;\r
+extern volatile UINT32 gSmiStack;\r
+extern UINT32 gSmiCr3;\r
+extern volatile UINT8 gcSmiHandlerTemplate[];\r
+extern CONST UINT16 gcSmiHandlerSize;\r
+\r
+//\r
+// Variables used by SMI Handler\r
+//\r
+IA32_DESCRIPTOR gSmiHandlerIdtr;\r
+\r
+///\r
+/// Table used by GetRegisterIndex() to convert an EFI_SMM_SAVE_STATE_REGISTER\r
+/// value to an index into a table of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY\r
+///\r
+CONST CPU_SMM_SAVE_STATE_REGISTER_RANGE mSmmCpuRegisterRanges[] = {\r
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_GDTBASE, EFI_SMM_SAVE_STATE_REGISTER_LDTINFO),\r
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_ES, EFI_SMM_SAVE_STATE_REGISTER_RIP),\r
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_RFLAGS, EFI_SMM_SAVE_STATE_REGISTER_CR4),\r
+ { (EFI_SMM_SAVE_STATE_REGISTER)0, (EFI_SMM_SAVE_STATE_REGISTER)0, 0 }\r
+};\r
+\r
+///\r
+/// Lookup table used to retrieve the widths and offsets associated with each\r
+/// supported EFI_SMM_SAVE_STATE_REGISTER value\r
+///\r
+CONST CPU_SMM_SAVE_STATE_LOOKUP_ENTRY mSmmCpuWidthOffset[] = {\r
+ {0, 0, 0, 0, 0, FALSE}, // Reserved\r
+\r
+ //\r
+ // Internally defined CPU Save State Registers. Not defined in PI SMM CPU Protocol.\r
+ //\r
+ {4, 4, SMM_CPU_OFFSET (x86.SMMRevId) , SMM_CPU_OFFSET (x64.SMMRevId) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX = 1\r
+ {4, 4, SMM_CPU_OFFSET (x86.IOMisc) , SMM_CPU_OFFSET (x64.IOMisc) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_IOMISC_INDEX = 2\r
+ {4, 8, SMM_CPU_OFFSET (x86.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) + 4, FALSE}, // SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX = 3\r
+\r
+ //\r
+ // CPU Save State registers defined in PI SMM CPU Protocol.\r
+ //\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.GdtBaseLoDword) , SMM_CPU_OFFSET (x64.GdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTBASE = 4\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.IdtBaseLoDword) , SMM_CPU_OFFSET (x64.IdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTBASE = 5\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.LdtBaseLoDword) , SMM_CPU_OFFSET (x64.LdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTBASE = 6\r
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTLIMIT = 7\r
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTLIMIT = 8\r
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTLIMIT = 9\r
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTINFO = 10\r
+\r
+ {4, 4, SMM_CPU_OFFSET (x86._ES) , SMM_CPU_OFFSET (x64._ES) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_ES = 20\r
+ {4, 4, SMM_CPU_OFFSET (x86._CS) , SMM_CPU_OFFSET (x64._CS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CS = 21\r
+ {4, 4, SMM_CPU_OFFSET (x86._SS) , SMM_CPU_OFFSET (x64._SS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_SS = 22\r
+ {4, 4, SMM_CPU_OFFSET (x86._DS) , SMM_CPU_OFFSET (x64._DS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DS = 23\r
+ {4, 4, SMM_CPU_OFFSET (x86._FS) , SMM_CPU_OFFSET (x64._FS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_FS = 24\r
+ {4, 4, SMM_CPU_OFFSET (x86._GS) , SMM_CPU_OFFSET (x64._GS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GS = 25\r
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._LDTR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTR_SEL = 26\r
+ {4, 4, SMM_CPU_OFFSET (x86._TR) , SMM_CPU_OFFSET (x64._TR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_TR_SEL = 27\r
+ {4, 8, SMM_CPU_OFFSET (x86._DR7) , SMM_CPU_OFFSET (x64._DR7) , SMM_CPU_OFFSET (x64._DR7) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR7 = 28\r
+ {4, 8, SMM_CPU_OFFSET (x86._DR6) , SMM_CPU_OFFSET (x64._DR6) , SMM_CPU_OFFSET (x64._DR6) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR6 = 29\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R8) , SMM_CPU_OFFSET (x64._R8) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R8 = 30\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R9) , SMM_CPU_OFFSET (x64._R9) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R9 = 31\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R10) , SMM_CPU_OFFSET (x64._R10) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R10 = 32\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R11) , SMM_CPU_OFFSET (x64._R11) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R11 = 33\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R12) , SMM_CPU_OFFSET (x64._R12) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R12 = 34\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R13) , SMM_CPU_OFFSET (x64._R13) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R13 = 35\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R14) , SMM_CPU_OFFSET (x64._R14) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R14 = 36\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R15) , SMM_CPU_OFFSET (x64._R15) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R15 = 37\r
+ {4, 8, SMM_CPU_OFFSET (x86._EAX) , SMM_CPU_OFFSET (x64._RAX) , SMM_CPU_OFFSET (x64._RAX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RAX = 38\r
+ {4, 8, SMM_CPU_OFFSET (x86._EBX) , SMM_CPU_OFFSET (x64._RBX) , SMM_CPU_OFFSET (x64._RBX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBX = 39\r
+ {4, 8, SMM_CPU_OFFSET (x86._ECX) , SMM_CPU_OFFSET (x64._RCX) , SMM_CPU_OFFSET (x64._RCX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RCX = 40\r
+ {4, 8, SMM_CPU_OFFSET (x86._EDX) , SMM_CPU_OFFSET (x64._RDX) , SMM_CPU_OFFSET (x64._RDX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDX = 41\r
+ {4, 8, SMM_CPU_OFFSET (x86._ESP) , SMM_CPU_OFFSET (x64._RSP) , SMM_CPU_OFFSET (x64._RSP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSP = 42\r
+ {4, 8, SMM_CPU_OFFSET (x86._EBP) , SMM_CPU_OFFSET (x64._RBP) , SMM_CPU_OFFSET (x64._RBP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBP = 43\r
+ {4, 8, SMM_CPU_OFFSET (x86._ESI) , SMM_CPU_OFFSET (x64._RSI) , SMM_CPU_OFFSET (x64._RSI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSI = 44\r
+ {4, 8, SMM_CPU_OFFSET (x86._EDI) , SMM_CPU_OFFSET (x64._RDI) , SMM_CPU_OFFSET (x64._RDI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDI = 45\r
+ {4, 8, SMM_CPU_OFFSET (x86._EIP) , SMM_CPU_OFFSET (x64._RIP) , SMM_CPU_OFFSET (x64._RIP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RIP = 46\r
+\r
+ {4, 8, SMM_CPU_OFFSET (x86._EFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RFLAGS = 51\r
+ {4, 8, SMM_CPU_OFFSET (x86._CR0) , SMM_CPU_OFFSET (x64._CR0) , SMM_CPU_OFFSET (x64._CR0) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR0 = 52\r
+ {4, 8, SMM_CPU_OFFSET (x86._CR3) , SMM_CPU_OFFSET (x64._CR3) , SMM_CPU_OFFSET (x64._CR3) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR3 = 53\r
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._CR4) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR4 = 54\r
+};\r
+\r
+///\r
+/// Lookup table for the IOMisc width information\r
+///\r
+CONST CPU_SMM_SAVE_STATE_IO_WIDTH mSmmCpuIoWidth[] = {\r
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 0\r
+ { 1, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // SMM_IO_LENGTH_BYTE = 1\r
+ { 2, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT16 }, // SMM_IO_LENGTH_WORD = 2\r
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 3\r
+ { 4, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT32 }, // SMM_IO_LENGTH_DWORD = 4\r
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 5\r
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 6\r
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 } // Undefined = 7\r
+};\r
+\r
+///\r
+/// Lookup table for the IOMisc type information\r
+///\r
+CONST EFI_SMM_SAVE_STATE_IO_TYPE mSmmCpuIoType[] = {\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_DX = 0\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_IN_DX = 1\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_OUTS = 2\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_INS = 3\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 4\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 5\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_OUTS = 6\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_INS = 7\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 8\r
+ EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 9\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 10\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 11\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 12\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 13\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 14\r
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0 // Undefined = 15\r
+};\r
+\r
+///\r
+/// The mode of the CPU at the time an SMI occurs\r
+///\r
+UINT8 mSmmSaveStateRegisterLma;\r
+\r
+/**\r
+ Read information from the CPU save state.\r
+\r
+ @param Register Specifies the CPU register to read form the save state.\r
+\r
+ @retval 0 Register is not valid\r
+ @retval >0 Index into mSmmCpuWidthOffset[] associated with Register\r
+\r
+**/\r
+UINTN\r
+GetRegisterIndex (\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN Offset;\r
+\r
+ for (Index = 0, Offset = SMM_SAVE_STATE_REGISTER_MAX_INDEX; mSmmCpuRegisterRanges[Index].Length != 0; Index++) {\r
+ if (Register >= mSmmCpuRegisterRanges[Index].Start && Register <= mSmmCpuRegisterRanges[Index].End) {\r
+ return Register - mSmmCpuRegisterRanges[Index].Start + Offset;\r
+ }\r
+ Offset += mSmmCpuRegisterRanges[Index].Length;\r
+ }\r
+ return 0;\r
+}\r
+\r
+/**\r
+ Read a CPU Save State register on the target processor.\r
+\r
+ This function abstracts the differences that whether the CPU Save State register is in the\r
+ IA32 CPU Save State Map or X64 CPU Save State Map.\r
+\r
+ This function supports reading a CPU Save State register in SMBase relocation handler.\r
+\r
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.\r
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.\r
+ @param[in] Width The number of bytes to read from the CPU save state.\r
+ @param[out] Buffer Upon return, this holds the CPU register value read from the save state.\r
+\r
+ @retval EFI_SUCCESS The register was read from Save State.\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.\r
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+ReadSaveStateRegisterByIndex (\r
+ IN UINTN CpuIndex,\r
+ IN UINTN RegisterIndex,\r
+ IN UINTN Width,\r
+ OUT VOID *Buffer\r
+ )\r
+{\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ if (RegisterIndex == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ CpuSaveState = gSmst->CpuSaveState[CpuIndex];\r
+\r
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
+ //\r
+ // If 32-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write return buffer\r
+ //\r
+ ASSERT(CpuSaveState != NULL);\r
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Width);\r
+ } else {\r
+ //\r
+ // If 64-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write lower 32-bits of return buffer\r
+ //\r
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, MIN(4, Width));\r
+ if (Width >= 4) {\r
+ //\r
+ // Write upper 32-bits of return buffer\r
+ //\r
+ CopyMem((UINT8 *)Buffer + 4, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, Width - 4);\r
+ }\r
+ }\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ Read a CPU Save State register on the target processor.\r
+\r
+ This function abstracts the differences that whether the CPU Save State register is in the\r
+ IA32 CPU Save State Map or X64 CPU Save State Map.\r
+\r
+ This function supports reading a CPU Save State register in SMBase relocation handler.\r
+\r
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.\r
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.\r
+ @param[in] Width The number of bytes to read from the CPU save state.\r
+ @param[out] Buffer Upon return, this holds the CPU register value read from the save state.\r
+\r
+ @retval EFI_SUCCESS The register was read from Save State.\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.\r
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+ReadSaveStateRegister (\r
+ IN UINTN CpuIndex,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN Width,\r
+ OUT VOID *Buffer\r
+ )\r
+{\r
+ UINT32 SmmRevId;\r
+ SMRAM_SAVE_STATE_IOMISC IoMisc;\r
+ EFI_SMM_SAVE_STATE_IO_INFO *IoInfo;\r
+ VOID *IoMemAddr;\r
+\r
+ //\r
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_LMA\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {\r
+ //\r
+ // Only byte access is supported for this register\r
+ //\r
+ if (Width != 1) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ *(UINT8 *)Buffer = mSmmSaveStateRegisterLma;\r
+\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ //\r
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_IO\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {\r
+ //\r
+ // Get SMM Revision ID\r
+ //\r
+ ReadSaveStateRegisterByIndex (CpuIndex, SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX, sizeof(SmmRevId), &SmmRevId);\r
+\r
+ //\r
+ // See if the CPU supports the IOMisc register in the save state\r
+ //\r
+ if (SmmRevId < SMRAM_SAVE_STATE_MIN_REV_ID_IOMISC) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Get the IOMisc register value\r
+ //\r
+ ReadSaveStateRegisterByIndex (CpuIndex, SMM_SAVE_STATE_REGISTER_IOMISC_INDEX, sizeof(IoMisc.Uint32), &IoMisc.Uint32);\r
+\r
+ //\r
+ // Check for the SMI_FLAG in IOMisc\r
+ //\r
+ if (IoMisc.Bits.SmiFlag == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Compute index for the I/O Length and I/O Type lookup tables\r
+ //\r
+ if (mSmmCpuIoWidth[IoMisc.Bits.Length].Width == 0 || mSmmCpuIoType[IoMisc.Bits.Type] == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Zero the IoInfo structure that will be returned in Buffer\r
+ //\r
+ IoInfo = (EFI_SMM_SAVE_STATE_IO_INFO *)Buffer;\r
+ ZeroMem (IoInfo, sizeof(EFI_SMM_SAVE_STATE_IO_INFO));\r
+\r
+ //\r
+ // Use lookup tables to help fill in all the fields of the IoInfo structure\r
+ //\r
+ IoInfo->IoPort = (UINT16)IoMisc.Bits.Port;\r
+ IoInfo->IoWidth = mSmmCpuIoWidth[IoMisc.Bits.Length].IoWidth;\r
+ IoInfo->IoType = mSmmCpuIoType[IoMisc.Bits.Type];\r
+ if (IoInfo->IoType == EFI_SMM_SAVE_STATE_IO_TYPE_INPUT || IoInfo->IoType == EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT) {\r
+ ReadSaveStateRegister (CpuIndex, EFI_SMM_SAVE_STATE_REGISTER_RAX, mSmmCpuIoWidth[IoMisc.Bits.Length].Width, &IoInfo->IoData);\r
+ }\r
+ else {\r
+ ReadSaveStateRegisterByIndex(CpuIndex, SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX, sizeof(IoMemAddr), &IoMemAddr);\r
+ CopyMem(&IoInfo->IoData, IoMemAddr, mSmmCpuIoWidth[IoMisc.Bits.Length].Width);\r
+ }\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ //\r
+ // Convert Register to a register lookup table index\r
+ //\r
+ return ReadSaveStateRegisterByIndex (CpuIndex, GetRegisterIndex (Register), Width, Buffer);\r
+}\r
+\r
+/**\r
+ Write value to a CPU Save State register on the target processor.\r
+\r
+ This function abstracts the differences that whether the CPU Save State register is in the\r
+ IA32 CPU Save State Map or X64 CPU Save State Map.\r
+\r
+ This function supports writing a CPU Save State register in SMBase relocation handler.\r
+\r
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.\r
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.\r
+ @param[in] Width The number of bytes to read from the CPU save state.\r
+ @param[in] Buffer Upon entry, this holds the new CPU register value.\r
+\r
+ @retval EFI_SUCCESS The register was written to Save State.\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.\r
+ @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+WriteSaveStateRegister (\r
+ IN UINTN CpuIndex,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN Width,\r
+ IN CONST VOID *Buffer\r
+ )\r
+{\r
+ UINTN RegisterIndex;\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ //\r
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_LMA are ignored\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ //\r
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_IO are not supported\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Convert Register to a register lookup table index\r
+ //\r
+ RegisterIndex = GetRegisterIndex (Register);\r
+ if (RegisterIndex == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ CpuSaveState = gSmst->CpuSaveState[CpuIndex];\r
+\r
+ //\r
+ // Do not write non-writable SaveState, because it will cause exception.\r
+ //\r
+ if (!mSmmCpuWidthOffset[RegisterIndex].Writeable) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Check CPU mode\r
+ //\r
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
+ //\r
+ // If 32-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ //\r
+ // Write SMM State register\r
+ //\r
+ ASSERT (CpuSaveState != NULL);\r
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Buffer, Width);\r
+ } else {\r
+ //\r
+ // If 64-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write lower 32-bits of SMM State register\r
+ //\r
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, Buffer, MIN (4, Width));\r
+ if (Width >= 4) {\r
+ //\r
+ // Write upper 32-bits of SMM State register\r
+ //\r
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, (UINT8 *)Buffer + 4, Width - 4);\r
+ }\r
+ }\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ Hook the code executed immediately after an RSM instruction on the currently\r
+ executing CPU. The mode of code executed immediately after RSM must be\r
+ detected, and the appropriate hook must be selected. Always clear the auto\r
+ HALT restart flag if it is set.\r
+\r
+ @param[in] CpuIndex The processor index for the currently\r
+ executing CPU.\r
+ @param[in] CpuState Pointer to SMRAM Save State Map for the\r
+ currently executing CPU.\r
+ @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to\r
+ 32-bit mode from 64-bit SMM.\r
+ @param[in] NewInstructionPointer Instruction pointer to use if resuming to\r
+ same mode as SMM.\r
+\r
+ @retval The value of the original instruction pointer before it was hooked.\r
+\r
+**/\r
+UINT64\r
+EFIAPI\r
+HookReturnFromSmm (\r
+ IN UINTN CpuIndex,\r
+ SMRAM_SAVE_STATE_MAP *CpuState,\r
+ UINT64 NewInstructionPointer32,\r
+ UINT64 NewInstructionPointer\r
+ )\r
+{\r
+ UINT64 OriginalInstructionPointer;\r
+\r
+ OriginalInstructionPointer = SmmCpuFeaturesHookReturnFromSmm (\r
+ CpuIndex,\r
+ CpuState,\r
+ NewInstructionPointer32,\r
+ NewInstructionPointer\r
+ );\r
+ if (OriginalInstructionPointer != 0) {\r
+ return OriginalInstructionPointer;\r
+ }\r
+\r
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
+ OriginalInstructionPointer = (UINT64)CpuState->x86._EIP;\r
+ CpuState->x86._EIP = (UINT32)NewInstructionPointer;\r
+ //\r
+ // Clear the auto HALT restart flag so the RSM instruction returns\r
+ // program control to the instruction following the HLT instruction.\r
+ //\r
+ if ((CpuState->x86.AutoHALTRestart & BIT0) != 0) {\r
+ CpuState->x86.AutoHALTRestart &= ~BIT0;\r
+ }\r
+ } else {\r
+ OriginalInstructionPointer = CpuState->x64._RIP;\r
+ if ((CpuState->x64.IA32_EFER & LMA) == 0) {\r
+ CpuState->x64._RIP = (UINT32)NewInstructionPointer32;\r
+ } else {\r
+ CpuState->x64._RIP = (UINT32)NewInstructionPointer;\r
+ }\r
+ //\r
+ // Clear the auto HALT restart flag so the RSM instruction returns\r
+ // program control to the instruction following the HLT instruction.\r
+ //\r
+ if ((CpuState->x64.AutoHALTRestart & BIT0) != 0) {\r
+ CpuState->x64.AutoHALTRestart &= ~BIT0;\r
+ }\r
+ }\r
+ return OriginalInstructionPointer;\r
+}\r
+\r
+/**\r
+ Get the size of the SMI Handler in bytes.\r
+\r
+ @retval The size, in bytes, of the SMI Handler.\r
+\r
+**/\r
+UINTN\r
+EFIAPI\r
+GetSmiHandlerSize (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Size;\r
+\r
+ Size = SmmCpuFeaturesGetSmiHandlerSize ();\r
+ if (Size != 0) {\r
+ return Size;\r
+ }\r
+ return gcSmiHandlerSize;\r
+}\r
+\r
+/**\r
+ Install the SMI handler for the CPU specified by CpuIndex. This function\r
+ is called by the CPU that was elected as monarch during System Management\r
+ Mode initialization.\r
+\r
+ @param[in] CpuIndex The index of the CPU to install the custom SMI handler.\r
+ The value must be between 0 and the NumberOfCpus field\r
+ in the System Management System Table (SMST).\r
+ @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex.\r
+ @param[in] SmiStack The stack to use when an SMI is processed by the\r
+ the CPU specified by CpuIndex.\r
+ @param[in] StackSize The size, in bytes, if the stack used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] GdtBase The base address of the GDT to use when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] IdtBase The base address of the IDT to use when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] Cr3 The base address of the page tables to use when an SMI\r
+ is processed by the CPU specified by CpuIndex.\r
+**/\r
+VOID\r
+EFIAPI\r
+InstallSmiHandler (\r
+ IN UINTN CpuIndex,\r
+ IN UINT32 SmBase,\r
+ IN VOID *SmiStack,\r
+ IN UINTN StackSize,\r
+ IN UINTN GdtBase,\r
+ IN UINTN GdtSize,\r
+ IN UINTN IdtBase,\r
+ IN UINTN IdtSize,\r
+ IN UINT32 Cr3\r
+ )\r
+{\r
+ if (SmmCpuFeaturesGetSmiHandlerSize () != 0) {\r
+ //\r
+ // Install SMI handler provided by library\r
+ //\r
+ SmmCpuFeaturesInstallSmiHandler (\r
+ CpuIndex,\r
+ SmBase,\r
+ SmiStack,\r
+ StackSize,\r
+ GdtBase,\r
+ GdtSize,\r
+ IdtBase,\r
+ IdtSize,\r
+ Cr3\r
+ );\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Initialize values in template before copy\r
+ //\r
+ gSmiStack = (UINT32)((UINTN)SmiStack + StackSize - sizeof (UINTN));\r
+ gSmiCr3 = Cr3;\r
+ gSmbase = SmBase;\r
+ gSmiHandlerIdtr.Base = IdtBase;\r
+ gSmiHandlerIdtr.Limit = (UINT16)(IdtSize - 1);\r
+\r
+ //\r
+ // Set the value at the top of the CPU stack to the CPU Index\r
+ //\r
+ *(UINTN*)(UINTN)gSmiStack = CpuIndex;\r
+\r
+ //\r
+ // Copy template to CPU specific SMI handler location\r
+ //\r
+ CopyMem (\r
+ (VOID*)(UINTN)(SmBase + SMM_HANDLER_OFFSET),\r
+ (VOID*)gcSmiHandlerTemplate,\r
+ gcSmiHandlerSize\r
+ );\r
+}\r
--- /dev/null
+/** @file\r
+SMM Timer feature support\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+UINT64 mTimeoutTicker = 0;\r
+//\r
+// Number of counts in a roll-over cycle of the performance counter.\r
+//\r
+UINT64 mCycle = 0;\r
+//\r
+// Flag to indicate the performance counter is count-up or count-down.\r
+//\r
+BOOLEAN mCountDown;\r
+\r
+/**\r
+ Initialize Timer for SMM AP Sync.\r
+\r
+**/\r
+VOID\r
+InitializeSmmTimer (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 TimerFrequency;\r
+ UINT64 Start;\r
+ UINT64 End;\r
+\r
+ TimerFrequency = GetPerformanceCounterProperties (&Start, &End);\r
+ mTimeoutTicker = DivU64x32 (\r
+ MultU64x64(TimerFrequency, PcdGet64 (PcdCpuSmmApSyncTimeout)),\r
+ 1000 * 1000\r
+ );\r
+ if (End < Start) {\r
+ mCountDown = TRUE;\r
+ mCycle = Start - End;\r
+ } else {\r
+ mCountDown = FALSE;\r
+ mCycle = End - Start;\r
+ }\r
+}\r
+\r
+/**\r
+ Start Timer for SMM AP Sync.\r
+\r
+**/\r
+UINT64\r
+EFIAPI\r
+StartSyncTimer (\r
+ VOID\r
+ )\r
+{\r
+ return GetPerformanceCounter ();\r
+}\r
+\r
+\r
+/**\r
+ Check if the SMM AP Sync timer is timeout.\r
+\r
+ @param Timer The start timer from the begin.\r
+\r
+**/\r
+BOOLEAN\r
+EFIAPI\r
+IsSyncTimerTimeout (\r
+ IN UINT64 Timer\r
+ )\r
+{\r
+ UINT64 CurrentTimer;\r
+ UINT64 Delta;\r
+\r
+ CurrentTimer = GetPerformanceCounter ();\r
+ //\r
+ // We need to consider the case that CurrentTimer is equal to Timer\r
+ // when some timer runs too slow and CPU runs fast. We think roll over\r
+ // condition does not happen on this case.\r
+ //\r
+ if (mCountDown) {\r
+ //\r
+ // The performance counter counts down. Check for roll over condition.\r
+ //\r
+ if (CurrentTimer <= Timer) {\r
+ Delta = Timer - CurrentTimer;\r
+ } else {\r
+ //\r
+ // Handle one roll-over.\r
+ //\r
+ Delta = mCycle - (CurrentTimer - Timer) + 1;\r
+ }\r
+ } else {\r
+ //\r
+ // The performance counter counts up. Check for roll over condition.\r
+ //\r
+ if (CurrentTimer >= Timer) {\r
+ Delta = CurrentTimer - Timer;\r
+ } else {\r
+ //\r
+ // Handle one roll-over.\r
+ //\r
+ Delta = mCycle - (Timer - CurrentTimer) + 1;\r
+ }\r
+ }\r
+\r
+ return (BOOLEAN) (Delta >= mTimeoutTicker);\r
+}\r
## @libraryclass Provides platform specific initialization functions in the SEC phase.\r
##\r
PlatformSecLib|Include/Library/PlatformSecLib.h\r
- \r
+\r
## @libraryclass Public include file for the SMM CPU Platform Hook Library.\r
##\r
SmmCpuPlatformHookLib|Include/Library/SmmCpuPlatformHookLib.h\r
- \r
+\r
## @libraryclass Provides the CPU specific programming for PiSmmCpuDxeSmm module.\r
##\r
SmmCpuFeaturesLib|Include/Library/SmmCpuFeaturesLib.h\r
[Protocols]\r
## Include/Protocol/SmmCpuService.h\r
gEfiSmmCpuServiceProtocolGuid = { 0x1d202cab, 0xc8ab, 0x4d5c, { 0x94, 0xf7, 0x3c, 0xfc, 0xc0, 0xd3, 0xd3, 0x35 }}\r
- \r
+\r
#\r
# [Error.gUefiCpuPkgTokenSpaceGuid]\r
# 0x80000001 | Invalid value provided.\r
#\r
\r
+[PcdsFeatureFlag]\r
+ ## Indicates if SMM Profile will be enabled.\r
+ # If enabled, instruction executions in and data accesses to memory outside of SMRAM will be logged.\r
+ # This PCD is only for validation purpose. It should be set to false in production.<BR><BR>\r
+ # TRUE - SMM Profile will be enabled.<BR>\r
+ # FALSE - SMM Profile will be disabled.<BR>\r
+ # @Prompt Enable SMM Profile.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileEnable|FALSE|BOOLEAN|0x32132109\r
+\r
+ ## Indicates if the SMM profile log buffer is a ring buffer.\r
+ # If disabled, no additional log can be done when the buffer is full.<BR><BR>\r
+ # TRUE - the SMM profile log buffer is a ring buffer.<BR>\r
+ # FALSE - the SMM profile log buffer is a normal buffer.<BR>\r
+ # @Prompt The SMM profile log buffer is a ring buffer.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileRingBuffer|FALSE|BOOLEAN|0x3213210a\r
+\r
+ ## Indicates if SMM Startup AP in a blocking fashion.\r
+ # TRUE - SMM Startup AP in a blocking fashion.<BR>\r
+ # FALSE - SMM Startup AP in a non-blocking fashion.<BR>\r
+ # @Prompt SMM Startup AP in a blocking fashion.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmBlockStartupThisAp|FALSE|BOOLEAN|0x32132108\r
+\r
+ ## Indicates if SMM Stack Guard will be enabled.\r
+ # If enabled, stack overflow in SMM can be caught which eases debugging.<BR><BR>\r
+ # TRUE - SMM Stack Guard will be enabled.<BR>\r
+ # FALSE - SMM Stack Guard will be disabled.<BR>\r
+ # @Prompt Enable SMM Stack Guard.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard|FALSE|BOOLEAN|0x1000001C\r
+\r
+ ## Indicates if BSP election in SMM will be enabled.\r
+ # If enabled, a BSP will be dynamically elected among all processors in each SMI.\r
+ # Otherwise, processor 0 is always as BSP in each SMI.<BR><BR>\r
+ # TRUE - BSP election in SMM will be enabled.<BR>\r
+ # FALSE - BSP election in SMM will be disabled.<BR>\r
+ # @Prompt Enable BSP election in SMM.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmEnableBspElection|TRUE|BOOLEAN|0x32132106\r
+\r
+ ## Indicates if CPU SMM hot-plug will be enabled.<BR><BR>\r
+ # TRUE - SMM CPU hot-plug will be enabled.<BR>\r
+ # FALSE - SMM CPU hot-plug will be disabled.<BR>\r
+ # @Prompt SMM CPU hot-plug.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugSupport|FALSE|BOOLEAN|0x3213210C\r
+\r
+ ## Indicates if SMM Debug will be enabled.\r
+ # If enabled, hardware breakpoints in SMRAM can be set outside of SMM mode and take effect in SMM.<BR><BR>\r
+ # TRUE - SMM Debug will be enabled.<BR>\r
+ # FALSE - SMM Debug will be disabled.<BR>\r
+ # @Prompt Enable SMM Debug.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug|FALSE|BOOLEAN|0x1000001B\r
+\r
+ ## Indicates if lock SMM Feature Control MSR.<BR><BR>\r
+ # TRUE - SMM Feature Control MSR will be locked.<BR>\r
+ # FALSE - SMM Feature Control MSR will not be locked.<BR>\r
+ # @Prompt Lock SMM Feature Control MSR.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmFeatureControlMsrLock|TRUE|BOOLEAN|0x3213210B\r
+\r
[PcdsFixedAtBuild, PcdsPatchableInModule]\r
- ## This value is the CPU Local Apic base address, which aligns the address on a 4-KByte boundary.\r
- # @Prompt Configure base address of CPU Local Apic\r
+ ## This value is the CPU Local APIC base address, which aligns the address on a 4-KByte boundary.\r
+ # @Prompt Configure base address of CPU Local APIC\r
# @Expression 0x80000001 | (gUefiCpuPkgTokenSpaceGuid.PcdCpuLocalApicBaseAddress & 0xfff) == 0\r
gUefiCpuPkgTokenSpaceGuid.PcdCpuLocalApicBaseAddress|0xfee00000|UINT32|0x00000001\r
+\r
## Specifies delay value in microseconds after sending out an INIT IPI.\r
# @Prompt Configure delay value after send an INIT IPI\r
gUefiCpuPkgTokenSpaceGuid.PcdCpuInitIpiDelayInMicroSeconds|10000|UINT32|0x30000002\r
+\r
## Specifies max supported number of Logical Processors.\r
- # @Prompt Configure max supported number of Logical Processorss\r
+ # @Prompt Configure max supported number of Logical Processors\r
gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber|64|UINT32|0x00000002\r
+\r
## This value specifies the Application Processor (AP) stack size, used for Mp Service, which must\r
## aligns the address on a 4-KByte boundary.\r
# @Prompt Configure stack size for Application Processor (AP)\r
# @Prompt Stack size in the temporary RAM.\r
gUefiCpuPkgTokenSpaceGuid.PcdPeiTemporaryRamStackSize|0|UINT32|0x10001003\r
\r
+ ## Specifies buffer size in bytes to save SMM profile data. The value should be a multiple of 4KB.\r
+ # @Prompt SMM profile data buffer size.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileSize|0x200000|UINT32|0x32132107\r
+\r
+ ## Specifies stack size in bytes for each processor in SMM.\r
+ # @Prompt Processor stack size in SMM.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackSize|0x2000|UINT32|0x32132105\r
+\r
+ ## Specifies timeout value in microseconds for the BSP in SMM to wait for all APs to come into SMM.\r
+ # @Prompt AP synchronization timeout value in SMM.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmApSyncTimeout|1000000|UINT64|0x32132104\r
+\r
+ ## Indicates if SMM Code Access Check is enabled.\r
+ # If enabled, the SMM handler cannot execute the code outside SMM regions.\r
+ # This PCD is suggested to TRUE in production image.<BR><BR>\r
+ # TRUE - SMM Code Access Check will be enabled.<BR>\r
+ # FALSE - SMM Code Access Check will be disabled.<BR>\r
+ # @Prompt SMM Code Access Check.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable|TRUE|BOOLEAN|0x60000013\r
+\r
+ ## Indicates the CPU synchronization method used when processing an SMI.\r
+ # 0x00 - Traditional CPU synchronization method.<BR>\r
+ # 0x01 - Relaxed CPU synchronization method.<BR>\r
+ # @Prompt SMM CPU Synchronization Method.\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode|0x00|UINT8|0x60000014\r
+\r
[PcdsFixedAtBuild, PcdsPatchableInModule, PcdsDynamic, PcdsDynamicEx]\r
## Specifies timeout value in microseconds for the BSP to detect all APs for the first time.\r
# @Prompt Timeout for the BSP to detect all APs for the first time.\r
# @Prompt Microcode Region size.\r
gUefiCpuPkgTokenSpaceGuid.PcdCpuMicrocodePatchRegionSize|0x0|UINT64|0x00000006\r
\r
+[PcdsDynamic, PcdsDynamicEx]\r
+ ## Contains the pointer to a CPU S3 data buffer of structure ACPI_CPU_DATA.\r
+ # @Prompt The pointer to a CPU S3 data buffer.\r
+ # @ValidList 0x80000001 | 0\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuS3DataAddress|0x0|UINT64|0x60000010\r
+\r
+ ## Contains the pointer to a CPU Hot Plug Data structure if CPU hot-plug is supported.\r
+ # @Prompt The pointer to CPU Hot Plug Data.\r
+ # @ValidList 0x80000001 | 0\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress|0x0|UINT64|0x60000011\r
+\r
[UserExtensions.TianoCore."ExtraFiles"]\r
UefiCpuPkgExtra.uni\r