/** @file\r
Enable SMM profile.\r
\r
-Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
#include "PiSmmCpuDxeSmm.h"\r
#include "SmmProfileInternal.h"\r
\r
-UINT32 mSmmProfileCr3;\r
+UINT32 mSmmProfileCr3;\r
\r
-SMM_PROFILE_HEADER *mSmmProfileBase;\r
-MSR_DS_AREA_STRUCT *mMsrDsAreaBase;\r
+SMM_PROFILE_HEADER *mSmmProfileBase;\r
+MSR_DS_AREA_STRUCT *mMsrDsAreaBase;\r
//\r
// The buffer to store SMM profile data.\r
//\r
-UINTN mSmmProfileSize;\r
+UINTN mSmmProfileSize;\r
\r
//\r
// The buffer to enable branch trace store.\r
//\r
-UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;\r
+UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;\r
\r
//\r
// The flag indicates if execute-disable is supported by processor.\r
//\r
-BOOLEAN mXdSupported = FALSE;\r
+BOOLEAN mXdSupported = TRUE;\r
\r
//\r
// The flag indicates if execute-disable is enabled on processor.\r
//\r
-BOOLEAN mXdEnabled = FALSE;\r
+BOOLEAN mXdEnabled = FALSE;\r
\r
//\r
// The flag indicates if BTS is supported by processor.\r
//\r
-BOOLEAN mBtsSupported = FALSE;\r
+BOOLEAN mBtsSupported = TRUE;\r
\r
//\r
// The flag indicates if SMM profile starts to record data.\r
//\r
-BOOLEAN mSmmProfileStart = FALSE;\r
+BOOLEAN mSmmProfileStart = FALSE;\r
+\r
+//\r
+// The flag indicates if #DB will be setup in #PF handler.\r
+//\r
+BOOLEAN mSetupDebugTrap = FALSE;\r
\r
//\r
// Record the page fault exception count for one instruction execution.\r
//\r
-UINTN *mPFEntryCount;\r
+UINTN *mPFEntryCount;\r
\r
-UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];\r
+UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];\r
UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];\r
\r
-MSR_DS_AREA_STRUCT **mMsrDsArea;\r
-BRANCH_TRACE_RECORD **mMsrBTSRecord;\r
-UINTN mBTSRecordNumber;\r
-PEBS_RECORD **mMsrPEBSRecord;\r
+MSR_DS_AREA_STRUCT **mMsrDsArea;\r
+BRANCH_TRACE_RECORD **mMsrBTSRecord;\r
+UINTN mBTSRecordNumber;\r
+PEBS_RECORD **mMsrPEBSRecord;\r
\r
//\r
// These memory ranges are always present, they does not generate the access type of page fault exception,\r
// but they possibly generate instruction fetch type of page fault exception.\r
//\r
-MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;\r
-UINTN mProtectionMemRangeCount = 0;\r
+MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;\r
+UINTN mProtectionMemRangeCount = 0;\r
\r
//\r
// Some predefined memory ranges.\r
//\r
-MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {\r
+MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {\r
//\r
// SMRAM range (to be fixed in runtime).\r
// It is always present and instruction fetches are allowed.\r
//\r
- {{0x00000000, 0x00000000},TRUE,FALSE},\r
+ {\r
+ { 0x00000000, 0x00000000 }, TRUE, FALSE\r
+ },\r
\r
//\r
// SMM profile data range( to be fixed in runtime).\r
// It is always present and instruction fetches are not allowed.\r
//\r
- {{0x00000000, 0x00000000},TRUE,TRUE},\r
+ {\r
+ { 0x00000000, 0x00000000 }, TRUE, TRUE\r
+ },\r
+\r
+ //\r
+ // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).\r
+ // It is always present and instruction fetches are allowed.\r
+ // {{0x00000000, 0x00000000},TRUE,FALSE},\r
+ //\r
\r
//\r
// Future extended range could be added here.\r
//\r
// These memory ranges are mapped by 4KB-page instead of 2MB-page.\r
//\r
-MEMORY_RANGE *mSplitMemRange = NULL;\r
-UINTN mSplitMemRangeCount = 0;\r
+MEMORY_RANGE *mSplitMemRange = NULL;\r
+UINTN mSplitMemRangeCount = 0;\r
\r
//\r
// SMI command port.\r
//\r
-UINT32 mSmiCommandPort;\r
+UINT32 mSmiCommandPort;\r
\r
/**\r
Disable branch trace store.\r
VOID\r
)\r
{\r
- UINTN Index;\r
- UINT32 ApicId;\r
+ UINTN Index;\r
+ UINT32 ApicId;\r
\r
ApicId = GetApicId ();\r
\r
- for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {\r
return Index;\r
}\r
}\r
+\r
ASSERT (FALSE);\r
return 0;\r
}\r
**/\r
UINT64\r
GetSourceFromDestinationOnBts (\r
- UINTN CpuIndex,\r
- UINT64 DestinationIP\r
+ UINTN CpuIndex,\r
+ UINT64 DestinationIP\r
)\r
{\r
BRANCH_TRACE_RECORD *CurrentBTSRecord;\r
// Underflow\r
//\r
CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);\r
- CurrentBTSRecord --;\r
+ CurrentBTSRecord--;\r
}\r
+\r
if (CurrentBTSRecord->LastBranchTo == DestinationIP) {\r
//\r
// Good! find 1st one, then find 2nd one.\r
return CurrentBTSRecord->LastBranchFrom;\r
}\r
}\r
+\r
CurrentBTSRecord--;\r
}\r
\r
VOID\r
EFIAPI\r
DebugExceptionHandler (\r
- IN EFI_EXCEPTION_TYPE InterruptType,\r
- IN EFI_SYSTEM_CONTEXT SystemContext\r
+ IN EFI_EXCEPTION_TYPE InterruptType,\r
+ IN EFI_SYSTEM_CONTEXT SystemContext\r
)\r
{\r
UINTN CpuIndex;\r
UINTN PFEntry;\r
\r
- if (!mSmmProfileStart) {\r
+ if (!mSmmProfileStart &&\r
+ !HEAP_GUARD_NONSTOP_MODE &&\r
+ !NULL_DETECTION_NONSTOP_MODE)\r
+ {\r
return;\r
}\r
+\r
CpuIndex = GetCpuIndex ();\r
\r
//\r
ClearTrapFlag (SystemContext);\r
}\r
\r
+/**\r
+ Check if the input address is in SMM ranges.\r
+\r
+ @param[in] Address The input address.\r
+\r
+ @retval TRUE The input address is in SMM.\r
+ @retval FALSE The input address is not in SMM.\r
+**/\r
+BOOLEAN\r
+IsInSmmRanges (\r
+ IN EFI_PHYSICAL_ADDRESS Address\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
+ return TRUE;\r
+ }\r
+\r
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
+ if ((Address >= mSmmCpuSmramRanges[Index].CpuStart) &&\r
+ (Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize))\r
+ {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
/**\r
Check if the memory address will be mapped by 4KB-page.\r
\r
**/\r
BOOLEAN\r
IsAddressValid (\r
- IN EFI_PHYSICAL_ADDRESS Address,\r
- IN BOOLEAN *Nx\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN BOOLEAN *Nx\r
)\r
{\r
UINTN Index;\r
\r
- *Nx = FALSE;\r
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
//\r
// Check configuration\r
return mProtectionMemRange[Index].Present;\r
}\r
}\r
+\r
*Nx = TRUE;\r
return FALSE;\r
-\r
} else {\r
- if ((Address < mCpuHotPlugData.SmrrBase) ||\r
- (Address >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
- *Nx = TRUE;\r
+ *Nx = TRUE;\r
+ if (IsInSmmRanges (Address)) {\r
+ *Nx = FALSE;\r
}\r
+\r
return TRUE;\r
}\r
}\r
**/\r
BOOLEAN\r
IsAddressSplit (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
+ IN EFI_PHYSICAL_ADDRESS Address\r
)\r
{\r
UINTN Index;\r
if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {\r
return TRUE;\r
}\r
- } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {\r
+ } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {\r
if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {\r
return TRUE;\r
}\r
}\r
}\r
+\r
//\r
// Return default\r
//\r
{\r
UINTN Index;\r
UINTN NumberOfDescriptors;\r
- UINTN NumberOfMmioDescriptors;\r
+ UINTN NumberOfAddedDescriptors;\r
UINTN NumberOfProtectRange;\r
UINTN NumberOfSpliteRange;\r
EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;\r
UINTN TotalSize;\r
- EFI_STATUS Status;\r
EFI_PHYSICAL_ADDRESS ProtectBaseAddress;\r
EFI_PHYSICAL_ADDRESS ProtectEndAddress;\r
EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;\r
UINT64 Low4KBPageSize;\r
\r
NumberOfDescriptors = 0;\r
- NumberOfMmioDescriptors = 0;\r
+ NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;\r
NumberOfSpliteRange = 0;\r
MemorySpaceMap = NULL;\r
\r
//\r
// Get MMIO ranges from GCD and add them into protected memory ranges.\r
//\r
- Status = gDS->GetMemorySpaceMap (\r
- &NumberOfDescriptors,\r
- &MemorySpaceMap\r
- );\r
+ gDS->GetMemorySpaceMap (\r
+ &NumberOfDescriptors,\r
+ &MemorySpaceMap\r
+ );\r
for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {\r
- NumberOfMmioDescriptors++;\r
+ NumberOfAddedDescriptors++;\r
}\r
}\r
\r
- if (NumberOfMmioDescriptors != 0) {\r
- TotalSize = NumberOfMmioDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);\r
- mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);\r
+ if (NumberOfAddedDescriptors != 0) {\r
+ TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);\r
+ mProtectionMemRange = (MEMORY_PROTECTION_RANGE *)AllocateZeroPool (TotalSize);\r
ASSERT (mProtectionMemRange != NULL);\r
mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);\r
\r
//\r
// Create split ranges which come from protected ranges.\r
//\r
- TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);\r
- mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);\r
+ TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);\r
+ mSplitMemRange = (MEMORY_RANGE *)AllocateZeroPool (TotalSize);\r
ASSERT (mSplitMemRange != NULL);\r
\r
+ //\r
+ // Create SMM ranges which are set to present and execution-enable.\r
+ //\r
+ NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
+ if ((mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base) &&\r
+ (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top))\r
+ {\r
+ //\r
+ // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz\r
+ //\r
+ break;\r
+ }\r
+\r
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;\r
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;\r
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;\r
+ mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;\r
+ NumberOfProtectRange++;\r
+ }\r
+\r
//\r
// Create MMIO ranges which are set to present and execution-disable.\r
//\r
- NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {\r
continue;\r
}\r
+\r
mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;\r
mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;\r
mProtectionMemRange[NumberOfProtectRange].Present = TRUE;\r
mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;\r
NumberOfProtectRange++;\r
}\r
+\r
+ //\r
+ // Check and updated actual protected memory ranges count\r
+ //\r
+ ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);\r
+ mProtectionMemRangeCount = NumberOfProtectRange;\r
}\r
\r
//\r
Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);\r
Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&\r
- ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {\r
+ ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB))\r
+ {\r
//\r
// There is an range which could be mapped by 2MB-page.\r
//\r
mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
NumberOfSpliteRange++;\r
}\r
+\r
if (Low4KBPageSize != 0) {\r
//\r
// Add not 2MB-aligned range to be mapped by 4KB-page.\r
\r
mSplitMemRangeCount = NumberOfSpliteRange;\r
\r
- DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));\r
+ DEBUG ((DEBUG_INFO, "SMM Profile Memory Ranges:\n"));\r
for (Index = 0; Index < mProtectionMemRangeCount; Index++) {\r
- DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));\r
- DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));\r
+ DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));\r
+ DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));\r
}\r
+\r
for (Index = 0; Index < mSplitMemRangeCount; Index++) {\r
- DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));\r
- DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));\r
+ DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));\r
+ DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));\r
}\r
}\r
\r
VOID\r
)\r
{\r
- UINT64 *Pml4;\r
- UINT64 *Pde;\r
- UINT64 *Pte;\r
- UINT64 *Pt;\r
- UINTN Address;\r
- UINTN Level1;\r
- UINTN Level2;\r
- UINTN Level3;\r
- UINTN Level4;\r
- UINTN NumberOfPdpEntries;\r
- UINTN NumberOfPml4Entries;\r
- UINTN SizeOfMemorySpace;\r
- BOOLEAN Nx;\r
+ UINT64 Pml5Entry;\r
+ UINT64 Pml4Entry;\r
+ UINT64 *Pml5;\r
+ UINT64 *Pml4;\r
+ UINT64 *Pdpt;\r
+ UINT64 *Pd;\r
+ UINT64 *Pt;\r
+ UINTN Address;\r
+ UINTN Pml5Index;\r
+ UINTN Pml4Index;\r
+ UINTN PdptIndex;\r
+ UINTN PdIndex;\r
+ UINTN PtIndex;\r
+ UINTN NumberOfPdptEntries;\r
+ UINTN NumberOfPml4Entries;\r
+ UINTN NumberOfPml5Entries;\r
+ UINTN SizeOfMemorySpace;\r
+ BOOLEAN Nx;\r
+ IA32_CR4 Cr4;\r
+ BOOLEAN Enable5LevelPaging;\r
+\r
+ Cr4.UintN = AsmReadCr4 ();\r
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
\r
if (sizeof (UINTN) == sizeof (UINT64)) {\r
- Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;\r
+ if (!Enable5LevelPaging) {\r
+ Pml5Entry = (UINTN)mSmmProfileCr3 | IA32_PG_P;\r
+ Pml5 = &Pml5Entry;\r
+ } else {\r
+ Pml5 = (UINT64 *)(UINTN)mSmmProfileCr3;\r
+ }\r
+\r
SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;\r
//\r
// Calculate the table entries of PML4E and PDPTE.\r
//\r
- if (SizeOfMemorySpace <= 39 ) {\r
- NumberOfPml4Entries = 1;\r
- NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));\r
- } else {\r
- NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));\r
- NumberOfPdpEntries = 512;\r
+ NumberOfPml5Entries = 1;\r
+ if (SizeOfMemorySpace > 48) {\r
+ NumberOfPml5Entries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 48);\r
+ SizeOfMemorySpace = 48;\r
}\r
+\r
+ NumberOfPml4Entries = 1;\r
+ if (SizeOfMemorySpace > 39) {\r
+ NumberOfPml4Entries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 39);\r
+ SizeOfMemorySpace = 39;\r
+ }\r
+\r
+ NumberOfPdptEntries = 1;\r
+ ASSERT (SizeOfMemorySpace > 30);\r
+ NumberOfPdptEntries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 30);\r
} else {\r
+ Pml4Entry = (UINTN)mSmmProfileCr3 | IA32_PG_P;\r
+ Pml4 = &Pml4Entry;\r
+ Pml5Entry = (UINTN)Pml4 | IA32_PG_P;\r
+ Pml5 = &Pml5Entry;\r
+ NumberOfPml5Entries = 1;\r
NumberOfPml4Entries = 1;\r
- NumberOfPdpEntries = 4;\r
+ NumberOfPdptEntries = 4;\r
}\r
\r
//\r
// Go through page table and change 2MB-page into 4KB-page.\r
//\r
- for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {\r
- if (sizeof (UINTN) == sizeof (UINT64)) {\r
- if ((Pml4[Level1] & IA32_PG_P) == 0) {\r
- //\r
- // If Pml4 entry does not exist, skip it\r
- //\r
- continue;\r
- }\r
- Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);\r
- } else {\r
- Pde = (UINT64*)(UINTN)mSmmProfileCr3;\r
+ for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {\r
+ if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {\r
+ //\r
+ // If PML5 entry does not exist, skip it\r
+ //\r
+ continue;\r
}\r
- for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {\r
- if ((*Pde & IA32_PG_P) == 0) {\r
+\r
+ Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);\r
+ for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {\r
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {\r
//\r
- // If PDE entry does not exist, skip it\r
+ // If PML4 entry does not exist, skip it\r
//\r
continue;\r
}\r
- Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);\r
- if (Pte == 0) {\r
- continue;\r
- }\r
- for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {\r
- if ((*Pte & IA32_PG_P) == 0) {\r
+\r
+ Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
+ for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {\r
+ if ((*Pdpt & IA32_PG_P) == 0) {\r
//\r
- // If PTE entry does not exist, skip it\r
+ // If PDPT entry does not exist, skip it\r
//\r
continue;\r
}\r
- Address = (((Level2 << 9) + Level3) << 21);\r
\r
- //\r
- // If it is 2M page, check IsAddressSplit()\r
- //\r
- if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {\r
+ if ((*Pdpt & IA32_PG_PS) != 0) {\r
//\r
- // Based on current page table, create 4KB page table for split area.\r
+ // This is 1G entry, skip it\r
//\r
- ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));\r
-\r
- Pt = AllocatePages (1);\r
- ASSERT (Pt != NULL);\r
-\r
- // Split it\r
- for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {\r
- Pt[Level4] = Address + ((Level4 << 12) | IA32_PG_RW | IA32_PG_P);\r
- } // end for PT\r
- *Pte = (UINTN)Pt | IA32_PG_RW | IA32_PG_P;\r
- } // end if IsAddressSplit\r
- } // end for PTE\r
- } // end for PDE\r
- }\r
+ continue;\r
+ }\r
+\r
+ Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
+ if (Pd == 0) {\r
+ continue;\r
+ }\r
+\r
+ for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {\r
+ if ((*Pd & IA32_PG_P) == 0) {\r
+ //\r
+ // If PD entry does not exist, skip it\r
+ //\r
+ continue;\r
+ }\r
+\r
+ Address = (UINTN)LShiftU64 (\r
+ LShiftU64 (\r
+ LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,\r
+ 9\r
+ ) + PdIndex,\r
+ 21\r
+ );\r
+\r
+ //\r
+ // If it is 2M page, check IsAddressSplit()\r
+ //\r
+ if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {\r
+ //\r
+ // Based on current page table, create 4KB page table for split area.\r
+ //\r
+ ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));\r
+\r
+ Pt = AllocatePageTableMemory (1);\r
+ ASSERT (Pt != NULL);\r
+\r
+ // Split it\r
+ for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof (*Pt); PtIndex++) {\r
+ Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
+ } // end for PT\r
+\r
+ *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+ } // end if IsAddressSplit\r
+ } // end for PD\r
+ } // end for PDPT\r
+ } // end for PML4\r
+ } // end for PML5\r
\r
//\r
// Go through page table and set several page table entries to absent or execute-disable.\r
//\r
- DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));\r
- for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {\r
- if (sizeof (UINTN) == sizeof (UINT64)) {\r
- if ((Pml4[Level1] & IA32_PG_P) == 0) {\r
- //\r
- // If Pml4 entry does not exist, skip it\r
- //\r
- continue;\r
- }\r
- Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);\r
- } else {\r
- Pde = (UINT64*)(UINTN)mSmmProfileCr3;\r
+ DEBUG ((DEBUG_INFO, "Patch page table start ...\n"));\r
+ for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {\r
+ if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {\r
+ //\r
+ // If PML5 entry does not exist, skip it\r
+ //\r
+ continue;\r
}\r
- for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {\r
- if ((*Pde & IA32_PG_P) == 0) {\r
+\r
+ Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);\r
+ for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {\r
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {\r
//\r
- // If PDE entry does not exist, skip it\r
+ // If PML4 entry does not exist, skip it\r
//\r
continue;\r
}\r
- Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);\r
- if (Pte == 0) {\r
- continue;\r
- }\r
- for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {\r
- if ((*Pte & IA32_PG_P) == 0) {\r
+\r
+ Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
+ for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {\r
+ if ((*Pdpt & IA32_PG_P) == 0) {\r
//\r
- // If PTE entry does not exist, skip it\r
+ // If PDPT entry does not exist, skip it\r
//\r
continue;\r
}\r
- Address = (((Level2 << 9) + Level3) << 21);\r
\r
- if ((*Pte & IA32_PG_PS) != 0) {\r
- // 2MB page\r
+ if ((*Pdpt & IA32_PG_PS) != 0) {\r
+ //\r
+ // This is 1G entry, set NX bit and skip it\r
+ //\r
+ if (mXdSupported) {\r
+ *Pdpt = *Pdpt | IA32_PG_NX;\r
+ }\r
+\r
+ continue;\r
+ }\r
\r
- if (!IsAddressValid (Address, &Nx)) {\r
+ Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
+ if (Pd == 0) {\r
+ continue;\r
+ }\r
+\r
+ for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {\r
+ if ((*Pd & IA32_PG_P) == 0) {\r
//\r
- // Patch to remove Present flag and RW flag\r
+ // If PD entry does not exist, skip it\r
//\r
- *Pte = *Pte & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
- }\r
- if (Nx && mXdSupported) {\r
- *Pte = *Pte | IA32_PG_NX;\r
- }\r
- } else {\r
- // 4KB page\r
- Pt = (UINT64 *)(UINTN)(*Pte & PHYSICAL_ADDRESS_MASK);\r
- if (Pt == 0) {\r
continue;\r
}\r
- for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {\r
+\r
+ Address = (UINTN)LShiftU64 (\r
+ LShiftU64 (\r
+ LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,\r
+ 9\r
+ ) + PdIndex,\r
+ 21\r
+ );\r
+\r
+ if ((*Pd & IA32_PG_PS) != 0) {\r
+ // 2MB page\r
+\r
if (!IsAddressValid (Address, &Nx)) {\r
- *Pt = *Pt & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
+ //\r
+ // Patch to remove Present flag and RW flag\r
+ //\r
+ *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
}\r
+\r
if (Nx && mXdSupported) {\r
- *Pt = *Pt | IA32_PG_NX;\r
+ *Pd = *Pd | IA32_PG_NX;\r
}\r
- Address += SIZE_4KB;\r
- } // end for PT\r
- } // end if PS\r
- } // end for PTE\r
- } // end for PDE\r
- }\r
+ } else {\r
+ // 4KB page\r
+ Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
+ if (Pt == 0) {\r
+ continue;\r
+ }\r
+\r
+ for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof (*Pt); PtIndex++, Pt++) {\r
+ if (!IsAddressValid (Address, &Nx)) {\r
+ *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
+ }\r
+\r
+ if (Nx && mXdSupported) {\r
+ *Pt = *Pt | IA32_PG_NX;\r
+ }\r
+\r
+ Address += SIZE_4KB;\r
+ } // end for PT\r
+ } // end if PS\r
+ } // end for PD\r
+ } // end for PDPT\r
+ } // end for PML4\r
+ } // end for PML5\r
\r
//\r
// Flush TLB\r
//\r
CpuFlushTlb ();\r
- DEBUG ((EFI_D_INFO, "Patch page table done!\n"));\r
+ DEBUG ((DEBUG_INFO, "Patch page table done!\n"));\r
//\r
// Set execute-disable flag\r
//\r
mXdEnabled = TRUE;\r
\r
- return ;\r
-}\r
-\r
-/**\r
- To find FADT in ACPI tables.\r
-\r
- @param AcpiTableGuid The GUID used to find ACPI table in UEFI ConfigurationTable.\r
-\r
- @return FADT table pointer.\r
-**/\r
-EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *\r
-FindAcpiFadtTableByAcpiGuid (\r
- IN EFI_GUID *AcpiTableGuid\r
- )\r
-{\r
- EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER *Rsdp;\r
- EFI_ACPI_DESCRIPTION_HEADER *Rsdt;\r
- EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
- UINTN Index;\r
- UINT32 Data32;\r
- Rsdp = NULL;\r
- Rsdt = NULL;\r
- Fadt = NULL;\r
- //\r
- // found ACPI table RSD_PTR from system table\r
- //\r
- for (Index = 0; Index < gST->NumberOfTableEntries; Index++) {\r
- if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), AcpiTableGuid)) {\r
- //\r
- // A match was found.\r
- //\r
- Rsdp = gST->ConfigurationTable[Index].VendorTable;\r
- break;\r
- }\r
- }\r
-\r
- if (Rsdp == NULL) {\r
- return NULL;\r
- }\r
-\r
- Rsdt = (EFI_ACPI_DESCRIPTION_HEADER *)(UINTN) Rsdp->RsdtAddress;\r
- if (Rsdt == NULL || Rsdt->Signature != EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE) {\r
- return NULL;\r
- }\r
-\r
- for (Index = sizeof (EFI_ACPI_DESCRIPTION_HEADER); Index < Rsdt->Length; Index = Index + sizeof (UINT32)) {\r
-\r
- Data32 = *(UINT32 *) ((UINT8 *) Rsdt + Index);\r
- Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) (UINT32 *) (UINTN) Data32;\r
- if (Fadt->Header.Signature == EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) {\r
- break;\r
- }\r
- }\r
-\r
- if (Fadt == NULL || Fadt->Header.Signature != EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) {\r
- return NULL;\r
- }\r
-\r
- return Fadt;\r
-}\r
-\r
-/**\r
- To find FADT in ACPI tables.\r
-\r
- @return FADT table pointer.\r
-**/\r
-EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *\r
-FindAcpiFadtTable (\r
- VOID\r
- )\r
-{\r
- EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
-\r
- Fadt = FindAcpiFadtTableByAcpiGuid (&gEfiAcpi20TableGuid);\r
- if (Fadt != NULL) {\r
- return Fadt;\r
- }\r
-\r
- return FindAcpiFadtTableByAcpiGuid (&gEfiAcpi10TableGuid);\r
+ return;\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
\r
- Fadt = FindAcpiFadtTable ();\r
+ Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *)EfiLocateFirstAcpiTable (\r
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE\r
+ );\r
ASSERT (Fadt != NULL);\r
\r
mSmiCommandPort = Fadt->SmiCmd;\r
- DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));\r
+ DEBUG ((DEBUG_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));\r
}\r
\r
/**\r
IN EFI_HANDLE Handle\r
)\r
{\r
- EFI_STATUS Status;\r
-\r
//\r
// Save to variable so that SMM profile data can be found.\r
//\r
- Status = gRT->SetVariable (\r
- SMM_PROFILE_NAME,\r
- &gEfiCallerIdGuid,\r
- EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,\r
- sizeof(mSmmProfileBase),\r
- &mSmmProfileBase\r
- );\r
+ gRT->SetVariable (\r
+ SMM_PROFILE_NAME,\r
+ &gEfiCallerIdGuid,\r
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,\r
+ sizeof (mSmmProfileBase),\r
+ &mSmmProfileBase\r
+ );\r
\r
//\r
// Get Software SMI from FADT\r
VOID\r
)\r
{\r
- EFI_STATUS Status;\r
- EFI_PHYSICAL_ADDRESS Base;\r
- VOID *Registration;\r
- UINTN Index;\r
- UINTN MsrDsAreaSizePerCpu;\r
- UINTN TotalSize;\r
-\r
- mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ EFI_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS Base;\r
+ VOID *Registration;\r
+ UINTN Index;\r
+ UINTN MsrDsAreaSizePerCpu;\r
+ UINTN TotalSize;\r
+\r
+ mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);\r
ASSERT (mPFEntryCount != NULL);\r
- mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
- sizeof (mLastPFEntryValue[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
+ sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus\r
+ );\r
ASSERT (mLastPFEntryValue != NULL);\r
mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
- sizeof (mLastPFEntryPointer[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus\r
+ );\r
ASSERT (mLastPFEntryPointer != NULL);\r
\r
//\r
TotalSize = mSmmProfileSize;\r
}\r
\r
- Base = 0xFFFFFFFF;\r
+ Base = 0xFFFFFFFF;\r
Status = gBS->AllocatePages (\r
AllocateMaxAddress,\r
EfiReservedMemoryType,\r
// Initialize SMM profile data header.\r
//\r
mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);\r
- mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));\r
- mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));\r
+ mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof (SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));\r
+ mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof (SMM_PROFILE_ENTRY));\r
mSmmProfileBase->CurDataEntries = 0;\r
mSmmProfileBase->CurDataSize = 0;\r
mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;\r
mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
\r
if (mBtsSupported) {\r
- mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);\r
ASSERT (mMsrDsArea != NULL);\r
- mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);\r
ASSERT (mMsrBTSRecord != NULL);\r
- mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);\r
ASSERT (mMsrPEBSRecord != NULL);\r
\r
- mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);\r
- MsrDsAreaSizePerCpu = mMsrDsAreaSize / PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
- mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);\r
- for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {\r
+ mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);\r
+ MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;\r
+ mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof (MSR_DS_AREA_STRUCT)) / sizeof (BRANCH_TRACE_RECORD);\r
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);\r
- mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));\r
- mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);\r
+ mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof (MSR_DS_AREA_STRUCT));\r
+ mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER);\r
\r
- mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];\r
- mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;\r
- mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;\r
- mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;\r
+ mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];\r
+ mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;\r
+ mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof (BRANCH_TRACE_RECORD) + 1;\r
+ mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;\r
\r
mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];\r
mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;\r
- mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;\r
+ mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof (PEBS_RECORD) + 1;\r
mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;\r
}\r
}\r
);\r
ASSERT_EFI_ERROR (Status);\r
\r
- return ;\r
+ return;\r
}\r
\r
/**\r
- Check if XD feature is supported by a processor.\r
+ Check if feature is supported by a processor.\r
\r
**/\r
VOID\r
VOID\r
)\r
{\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
+ UINT32 RegEax;\r
+ UINT32 RegEcx;\r
+ UINT32 RegEdx;\r
+ MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;\r
+\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r
+ AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);\r
+ if ((RegEcx & CPUID_CET_SS) == 0) {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+ } else {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+ }\r
\r
if (mXdSupported) {\r
AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
// Extended CPUID functions are not supported on this processor.\r
//\r
mXdSupported = FALSE;\r
+ PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);\r
}\r
\r
AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
// Execute Disable Bit feature is not supported on this processor.\r
//\r
mXdSupported = FALSE;\r
+ PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);\r
+ }\r
+\r
+ if (StandardSignatureIsAuthenticAMD ()) {\r
+ //\r
+ // AMD processors do not support MSR_IA32_MISC_ENABLE\r
+ //\r
+ PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);\r
}\r
}\r
\r
// BTINT bits in the MSR_DEBUGCTLA MSR.\r
// 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.\r
//\r
- if ((AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 11, 11) == 0) &&\r
- (AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 12, 12) == 0)) {\r
+ MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
+ if (MiscEnableMsr.Bits.BTS == 1) {\r
//\r
- // BTS facilities is supported.\r
+ // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.\r
//\r
mBtsSupported = FALSE;\r
}\r
}\r
}\r
\r
-/**\r
- Check if XD and BTS features are supported by all processors.\r
-\r
-**/\r
-VOID\r
-CheckProcessorFeature (\r
- VOID\r
- )\r
-{\r
- EFI_STATUS Status;\r
- EFI_MP_SERVICES_PROTOCOL *MpServices;\r
-\r
- Status = gBS->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
- ASSERT_EFI_ERROR (Status);\r
-\r
- //\r
- // First detect if XD and BTS are supported\r
- //\r
- mXdSupported = TRUE;\r
- mBtsSupported = TRUE;\r
-\r
- //\r
- // Check if XD and BTS are supported on all processors.\r
- //\r
- CheckFeatureSupported ();\r
-\r
- //\r
- //Check on other processors if BSP supports this\r
- //\r
- if (mXdSupported || mBtsSupported) {\r
- MpServices->StartupAllAPs (\r
- MpServices,\r
- (EFI_AP_PROCEDURE) CheckFeatureSupported,\r
- TRUE,\r
- NULL,\r
- 0,\r
- NULL,\r
- NULL\r
- );\r
- }\r
-}\r
-\r
-/**\r
- Enable XD feature.\r
-\r
-**/\r
-VOID\r
-ActivateXd (\r
- VOID\r
- )\r
-{\r
- UINT64 MsrRegisters;\r
-\r
- MsrRegisters = AsmReadMsr64 (MSR_EFER);\r
- if ((MsrRegisters & MSR_EFER_XD) != 0) {\r
- return ;\r
- }\r
- MsrRegisters |= MSR_EFER_XD;\r
- AsmWriteMsr64 (MSR_EFER, MsrRegisters);\r
-}\r
-\r
/**\r
Enable single step.\r
\r
VOID\r
)\r
{\r
- UINTN Dr6;\r
+ UINTN Dr6;\r
\r
Dr6 = AsmReadDr6 ();\r
if ((Dr6 & DR6_SINGLE_STEP) != 0) {\r
return;\r
}\r
+\r
Dr6 |= DR6_SINGLE_STEP;\r
AsmWriteDr6 (Dr6);\r
}\r
\r
DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);\r
if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {\r
- return ;\r
+ return;\r
}\r
- AsmWriteMsr64 (MSR_LER_FROM_LIP, 0);\r
- AsmWriteMsr64 (MSR_LER_TO_LIP, 0);\r
+\r
DebugCtl |= MSR_DEBUG_CTL_LBR;\r
AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);\r
}\r
**/\r
VOID\r
ActivateBTS (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
UINT64 DebugCtl;\r
\r
DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);\r
if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {\r
- return ;\r
+ return;\r
}\r
\r
AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);\r
**/\r
VOID\r
ActivateSmmProfile (\r
- IN UINTN CpuIndex\r
+ IN UINTN CpuIndex\r
)\r
{\r
//\r
//\r
// Skip SMM profile initialization if feature is disabled\r
//\r
- if (!FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&\r
+ !HEAP_GUARD_NONSTOP_MODE &&\r
+ !NULL_DETECTION_NONSTOP_MODE)\r
+ {\r
return;\r
}\r
\r
// Initialize profile IDT.\r
//\r
InitIdtr ();\r
+\r
+ //\r
+ // Tell #PF handler to prepare a #DB subsequently.\r
+ //\r
+ mSetupDebugTrap = TRUE;\r
}\r
\r
/**\r
**/\r
VOID\r
RestorePageTableBelow4G (\r
- UINT64 *PageTable,\r
- UINT64 PFAddress,\r
- UINTN CpuIndex,\r
- UINTN ErrorCode\r
+ UINT64 *PageTable,\r
+ UINT64 PFAddress,\r
+ UINTN CpuIndex,\r
+ UINTN ErrorCode\r
)\r
{\r
- UINTN PTIndex;\r
- UINTN PFIndex;\r
+ UINTN PTIndex;\r
+ UINTN PFIndex;\r
+ IA32_CR4 Cr4;\r
+ BOOLEAN Enable5LevelPaging;\r
+\r
+ Cr4.UintN = AsmReadCr4 ();\r
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);\r
+\r
+ //\r
+ // PML5\r
+ //\r
+ if (Enable5LevelPaging) {\r
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);\r
+ ASSERT (PageTable[PTIndex] != 0);\r
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ }\r
\r
//\r
// PML4\r
//\r
- if (sizeof(UINT64) == sizeof(UINTN)) {\r
+ if (sizeof (UINT64) == sizeof (UINTN)) {\r
PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);\r
ASSERT (PageTable[PTIndex] != 0);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
}\r
\r
//\r
//\r
PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);\r
ASSERT (PageTable[PTIndex] != 0);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
\r
//\r
// PD\r
//\r
ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
- PFIndex = mPFEntryCount[CpuIndex];\r
+ PFIndex = mPFEntryCount[CpuIndex];\r
mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
mPFEntryCount[CpuIndex]++;\r
//\r
// Set new entry\r
//\r
- PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));\r
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));\r
PageTable[PTIndex] |= (UINT64)IA32_PG_PS;\r
- PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);\r
+ PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;\r
if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
PageTable[PTIndex] &= ~IA32_PG_NX;\r
}\r
// Small page\r
//\r
ASSERT (PageTable[PTIndex] != 0);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
\r
//\r
// 4K PTE\r
//\r
ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
- PFIndex = mPFEntryCount[CpuIndex];\r
+ PFIndex = mPFEntryCount[CpuIndex];\r
mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
mPFEntryCount[CpuIndex]++;\r
//\r
// Set new entry\r
//\r
- PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));\r
- PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);\r
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));\r
+ PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;\r
if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
PageTable[PTIndex] &= ~IA32_PG_NX;\r
}\r
}\r
}\r
\r
+/**\r
+ Handler for Page Fault triggered by Guard page.\r
+\r
+ @param ErrorCode The Error code of exception.\r
+\r
+**/\r
+VOID\r
+GuardPagePFHandler (\r
+ UINTN ErrorCode\r
+ )\r
+{\r
+ UINT64 *PageTable;\r
+ UINT64 PFAddress;\r
+ UINT64 RestoreAddress;\r
+ UINTN RestorePageNumber;\r
+ UINTN CpuIndex;\r
+\r
+ PageTable = (UINT64 *)AsmReadCr3 ();\r
+ PFAddress = AsmReadCr2 ();\r
+ CpuIndex = GetCpuIndex ();\r
+\r
+ //\r
+ // Memory operation cross pages, like "rep mov" instruction, will cause\r
+ // infinite loop between this and Debug Trap handler. We have to make sure\r
+ // that current page and the page followed are both in PRESENT state.\r
+ //\r
+ RestorePageNumber = 2;\r
+ RestoreAddress = PFAddress;\r
+ while (RestorePageNumber > 0) {\r
+ RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);\r
+ RestoreAddress += EFI_PAGE_SIZE;\r
+ RestorePageNumber--;\r
+ }\r
+\r
+ //\r
+ // Flush TLB\r
+ //\r
+ CpuFlushTlb ();\r
+}\r
+\r
/**\r
The Page fault handler to save SMM profile data.\r
\r
**/\r
VOID\r
SmmProfilePFHandler (\r
- UINTN Rip,\r
- UINTN ErrorCode\r
+ UINTN Rip,\r
+ UINTN ErrorCode\r
)\r
{\r
- UINT64 *PageTable;\r
- UINT64 PFAddress;\r
- UINTN CpuIndex;\r
- UINTN Index;\r
- UINT64 InstructionAddress;\r
- UINTN MaxEntryNumber;\r
- UINTN CurrentEntryNumber;\r
- BOOLEAN IsValidPFAddress;\r
- SMM_PROFILE_ENTRY *SmmProfileEntry;\r
- UINT64 SmiCommand;\r
- EFI_STATUS Status;\r
- UINTN SwSmiCpuIndex;\r
- UINT8 SoftSmiValue;\r
- EFI_SMM_SAVE_STATE_IO_INFO IoInfo;\r
+ UINT64 *PageTable;\r
+ UINT64 PFAddress;\r
+ UINT64 RestoreAddress;\r
+ UINTN RestorePageNumber;\r
+ UINTN CpuIndex;\r
+ UINTN Index;\r
+ UINT64 InstructionAddress;\r
+ UINTN MaxEntryNumber;\r
+ UINTN CurrentEntryNumber;\r
+ BOOLEAN IsValidPFAddress;\r
+ SMM_PROFILE_ENTRY *SmmProfileEntry;\r
+ UINT64 SmiCommand;\r
+ EFI_STATUS Status;\r
+ UINT8 SoftSmiValue;\r
+ EFI_SMM_SAVE_STATE_IO_INFO IoInfo;\r
\r
if (!mSmmProfileStart) {\r
//\r
DisableBTS ();\r
}\r
\r
- IsValidPFAddress = FALSE;\r
- PageTable = (UINT64 *)AsmReadCr3 ();\r
- PFAddress = AsmReadCr2 ();\r
- CpuIndex = GetCpuIndex ();\r
+ IsValidPFAddress = FALSE;\r
+ PageTable = (UINT64 *)AsmReadCr3 ();\r
+ PFAddress = AsmReadCr2 ();\r
+ CpuIndex = GetCpuIndex ();\r
\r
- if (PFAddress <= 0xFFFFFFFF) {\r
- RestorePageTableBelow4G (PageTable, PFAddress, CpuIndex, ErrorCode);\r
- } else {\r
- RestorePageTableAbove4G (PageTable, PFAddress, CpuIndex, ErrorCode, &IsValidPFAddress);\r
+ //\r
+ // Memory operation cross pages, like "rep mov" instruction, will cause\r
+ // infinite loop between this and Debug Trap handler. We have to make sure\r
+ // that current page and the page followed are both in PRESENT state.\r
+ //\r
+ RestorePageNumber = 2;\r
+ RestoreAddress = PFAddress;\r
+ while (RestorePageNumber > 0) {\r
+ if (RestoreAddress <= 0xFFFFFFFF) {\r
+ RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);\r
+ } else {\r
+ RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);\r
+ }\r
+\r
+ RestoreAddress += EFI_PAGE_SIZE;\r
+ RestorePageNumber--;\r
}\r
\r
if (!IsValidPFAddress) {\r
InstructionAddress = Rip;\r
- if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {\r
+ if (((ErrorCode & IA32_PF_EC_ID) != 0) && (mBtsSupported)) {\r
//\r
// If it is instruction fetch failure, get the correct IP from BTS.\r
//\r
}\r
}\r
\r
- //\r
- // Try to find which CPU trigger SWSMI\r
- //\r
- SwSmiCpuIndex = 0;\r
//\r
// Indicate it is not software SMI\r
//\r
- SmiCommand = 0xFFFFFFFFFFFFFFFFULL;\r
+ SmiCommand = 0xFFFFFFFFFFFFFFFFULL;\r
for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
- Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);\r
+ Status = SmmReadSaveState (&mSmmCpu, sizeof (IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);\r
if (EFI_ERROR (Status)) {\r
continue;\r
}\r
+\r
if (IoInfo.IoPort == mSmiCommandPort) {\r
- //\r
- // Great! Find it.\r
- //\r
- SwSmiCpuIndex = Index;\r
//\r
// A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.\r
//\r
SoftSmiValue = IoRead8 (mSmiCommandPort);\r
- SmiCommand = (UINT64)SoftSmiValue;\r
+ SmiCommand = (UINT64)SoftSmiValue;\r
break;\r
}\r
}\r
//\r
// Check if there is already a same entry in profile data.\r
//\r
- for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {\r
+ for (Index = 0; Index < (UINTN)mSmmProfileBase->CurDataEntries; Index++) {\r
if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&\r
(SmmProfileEntry[Index].Address == PFAddress) &&\r
(SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&\r
(SmmProfileEntry[Index].Instruction == InstructionAddress) &&\r
- (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {\r
+ (SmmProfileEntry[Index].SmiCmd == SmiCommand))\r
+ {\r
//\r
// Same record exist, need not save again.\r
//\r
break;\r
}\r
}\r
+\r
if (Index == mSmmProfileBase->CurDataEntries) {\r
- CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;\r
- MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;\r
+ CurrentEntryNumber = (UINTN)mSmmProfileBase->CurDataEntries;\r
+ MaxEntryNumber = (UINTN)mSmmProfileBase->MaxDataEntries;\r
if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {\r
CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;\r
}\r
+\r
if (CurrentEntryNumber < MaxEntryNumber) {\r
//\r
// Log the new entry\r
}\r
}\r
}\r
+\r
//\r
// Flush TLB\r
//\r
VOID\r
)\r
{\r
- SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);\r
+ EFI_STATUS Status;\r
+\r
+ Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);\r
+ ASSERT_EFI_ERROR (Status);\r
}\r