/** @file\r
Enable SMM profile.\r
\r
-Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2012 - 2017, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
+\r
This program and the accompanying materials\r
are licensed and made available under the terms and conditions of the BSD License\r
which accompanies this distribution. The full text of the license may be found at\r
//\r
UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;\r
\r
-//\r
-// The flag indicates if execute-disable is supported by processor.\r
-//\r
-BOOLEAN mXdSupported = FALSE;\r
-\r
//\r
// The flag indicates if execute-disable is enabled on processor.\r
//\r
//\r
// The flag indicates if BTS is supported by processor.\r
//\r
-BOOLEAN mBtsSupported = FALSE;\r
+BOOLEAN mBtsSupported = TRUE;\r
\r
//\r
// The flag indicates if SMM profile starts to record data.\r
//\r
{{0x00000000, 0x00000000},TRUE,TRUE},\r
\r
+ //\r
+ // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).\r
+ // It is always present and instruction fetches are allowed.\r
+ // {{0x00000000, 0x00000000},TRUE,FALSE},\r
+ //\r
+\r
//\r
// Future extended range could be added here.\r
//\r
\r
ApicId = GetApicId ();\r
\r
- for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {\r
return Index;\r
}\r
ClearTrapFlag (SystemContext);\r
}\r
\r
+/**\r
+ Check if the input address is in SMM ranges.\r
+\r
+ @param[in] Address The input address.\r
+\r
+ @retval TRUE The input address is in SMM.\r
+ @retval FALSE The input address is not in SMM.\r
+**/\r
+BOOLEAN\r
+IsInSmmRanges (\r
+ IN EFI_PHYSICAL_ADDRESS Address\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
+ return TRUE;\r
+ }\r
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
+ if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&\r
+ Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {\r
+ return TRUE;\r
+ }\r
+ }\r
+ return FALSE;\r
+}\r
+\r
/**\r
Check if the memory address will be mapped by 4KB-page.\r
\r
{\r
UINTN Index;\r
\r
- *Nx = FALSE;\r
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
//\r
// Check configuration\r
return FALSE;\r
\r
} else {\r
- if ((Address < mCpuHotPlugData.SmrrBase) ||\r
- (Address >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
- *Nx = TRUE;\r
+ *Nx = TRUE;\r
+ if (IsInSmmRanges (Address)) {\r
+ *Nx = FALSE;\r
}\r
return TRUE;\r
}\r
{\r
UINTN Index;\r
UINTN NumberOfDescriptors;\r
- UINTN NumberOfMmioDescriptors;\r
+ UINTN NumberOfAddedDescriptors;\r
UINTN NumberOfProtectRange;\r
UINTN NumberOfSpliteRange;\r
EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;\r
UINT64 Low4KBPageSize;\r
\r
NumberOfDescriptors = 0;\r
- NumberOfMmioDescriptors = 0;\r
+ NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;\r
NumberOfSpliteRange = 0;\r
MemorySpaceMap = NULL;\r
\r
);\r
for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {\r
- NumberOfMmioDescriptors++;\r
+ NumberOfAddedDescriptors++;\r
}\r
}\r
\r
- if (NumberOfMmioDescriptors != 0) {\r
- TotalSize = NumberOfMmioDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);\r
+ if (NumberOfAddedDescriptors != 0) {\r
+ TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);\r
mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);\r
ASSERT (mProtectionMemRange != NULL);\r
mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);\r
mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);\r
ASSERT (mSplitMemRange != NULL);\r
\r
+ //\r
+ // Create SMM ranges which are set to present and execution-enable.\r
+ //\r
+ NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
+ if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&\r
+ mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {\r
+ //\r
+ // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz\r
+ //\r
+ break;\r
+ }\r
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;\r
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;\r
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;\r
+ mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;\r
+ NumberOfProtectRange++;\r
+ }\r
+\r
//\r
// Create MMIO ranges which are set to present and execution-disable.\r
//\r
- NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {\r
continue;\r
mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;\r
NumberOfProtectRange++;\r
}\r
+\r
+ //\r
+ // Check and updated actual protected memory ranges count\r
+ //\r
+ ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);\r
+ mProtectionMemRangeCount = NumberOfProtectRange;\r
}\r
\r
//\r
//\r
continue;\r
}\r
- Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);\r
+ Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
} else {\r
Pde = (UINT64*)(UINTN)mSmmProfileCr3;\r
}\r
//\r
continue;\r
}\r
- Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);\r
+ if ((*Pde & IA32_PG_PS) != 0) {\r
+ //\r
+ // This is 1G entry, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
if (Pte == 0) {\r
continue;\r
}\r
\r
// Split it\r
for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {\r
- Pt[Level4] = Address + ((Level4 << 12) | PAGE_ATTRIBUTE_BITS);\r
+ Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
} // end for PT\r
- *Pte = (UINTN)Pt | PAGE_ATTRIBUTE_BITS;\r
+ *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
} // end if IsAddressSplit\r
} // end for PTE\r
} // end for PDE\r
//\r
continue;\r
}\r
- Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);\r
+ Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
} else {\r
Pde = (UINT64*)(UINTN)mSmmProfileCr3;\r
}\r
//\r
continue;\r
}\r
- Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);\r
+ if ((*Pde & IA32_PG_PS) != 0) {\r
+ //\r
+ // This is 1G entry, set NX bit and skip it\r
+ //\r
+ if (mXdSupported) {\r
+ *Pde = *Pde | IA32_PG_NX;\r
+ }\r
+ continue;\r
+ }\r
+ Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
if (Pte == 0) {\r
continue;\r
}\r
}\r
} else {\r
// 4KB page\r
- Pt = (UINT64 *)(UINTN)(*Pte & PHYSICAL_ADDRESS_MASK);\r
+ Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
if (Pt == 0) {\r
continue;\r
}\r
UINTN MsrDsAreaSizePerCpu;\r
UINTN TotalSize;\r
\r
- mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);\r
ASSERT (mPFEntryCount != NULL);\r
mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
- sizeof (mLastPFEntryValue[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);\r
ASSERT (mLastPFEntryValue != NULL);\r
mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
- sizeof (mLastPFEntryPointer[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);\r
ASSERT (mLastPFEntryPointer != NULL);\r
\r
//\r
mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
\r
if (mBtsSupported) {\r
- mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);\r
ASSERT (mMsrDsArea != NULL);\r
- mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);\r
ASSERT (mMsrBTSRecord != NULL);\r
- mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);\r
ASSERT (mMsrPEBSRecord != NULL);\r
\r
mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);\r
- MsrDsAreaSizePerCpu = mMsrDsAreaSize / PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
+ MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;\r
mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);\r
- for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);\r
mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));\r
mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);\r
/**\r
Check if XD feature is supported by a processor.\r
\r
- @param[in,out] Buffer The pointer to private data buffer.\r
-\r
**/\r
VOID\r
-EFIAPI\r
CheckFeatureSupported (\r
- IN OUT VOID *Buffer\r
+ VOID\r
)\r
{\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;\r
\r
if (mXdSupported) {\r
AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
// BTINT bits in the MSR_DEBUGCTLA MSR.\r
// 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.\r
//\r
- if (AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 11, 11) == 1) {\r
+ MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
+ if (MiscEnableMsr.Bits.BTS == 1) {\r
//\r
- // BTS facilities is not supported if MSR_IA32_MISC_ENABLE BIT11 is set.\r
+ // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.\r
//\r
mBtsSupported = FALSE;\r
}\r
}\r
}\r
\r
-/**\r
- Check if XD and BTS features are supported by all processors.\r
-\r
-**/\r
-VOID\r
-CheckProcessorFeature (\r
- VOID\r
- )\r
-{\r
- EFI_STATUS Status;\r
- EFI_MP_SERVICES_PROTOCOL *MpServices;\r
-\r
- Status = gBS->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
- ASSERT_EFI_ERROR (Status);\r
-\r
- //\r
- // First detect if XD and BTS are supported\r
- //\r
- mXdSupported = TRUE;\r
- mBtsSupported = TRUE;\r
-\r
- //\r
- // Check if XD and BTS are supported on all processors.\r
- //\r
- CheckFeatureSupported (NULL);\r
-\r
- //\r
- //Check on other processors if BSP supports this\r
- //\r
- if (mXdSupported || mBtsSupported) {\r
- MpServices->StartupAllAPs (\r
- MpServices,\r
- CheckFeatureSupported,\r
- TRUE,\r
- NULL,\r
- 0,\r
- NULL,\r
- NULL\r
- );\r
- }\r
-}\r
-\r
-/**\r
- Enable XD feature.\r
-\r
-**/\r
-VOID\r
-ActivateXd (\r
- VOID\r
- )\r
-{\r
- UINT64 MsrRegisters;\r
-\r
- MsrRegisters = AsmReadMsr64 (MSR_EFER);\r
- if ((MsrRegisters & MSR_EFER_XD) != 0) {\r
- return ;\r
- }\r
- MsrRegisters |= MSR_EFER_XD;\r
- AsmWriteMsr64 (MSR_EFER, MsrRegisters);\r
-}\r
-\r
/**\r
Enable single step.\r
\r
VOID\r
)\r
{\r
- SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);\r
+ EFI_STATUS Status;\r
+\r
+ Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);\r
+ ASSERT_EFI_ERROR (Status);\r
}\r