/** @file\r
Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
\r
-Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
-\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
},\r
+ NULL, // pointer to Ap Wrapper Func array\r
+ {NULL, NULL}, // List_Entry for Tokens.\r
};\r
\r
CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
SmmWriteSaveState\r
};\r
\r
+///\r
+/// SMM Memory Attribute Protocol instance\r
+///\r
+EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
+ EdkiiSmmGetMemoryAttributes,\r
+ EdkiiSmmSetMemoryAttributes,\r
+ EdkiiSmmClearMemoryAttributes\r
+};\r
+\r
EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
\r
//\r
UINTN mSmmStackArrayEnd;\r
UINTN mSmmStackSize;\r
\r
+UINTN mSmmShadowStackSize;\r
+BOOLEAN mCetSupported = TRUE;\r
+\r
UINTN mMaxNumberOfCpus = 1;\r
UINTN mNumberOfCpus = 1;\r
\r
//\r
SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
\r
+//\r
+// Saved SMM ranges information\r
+//\r
+EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
+UINTN mSmmCpuSmramRangeCount;\r
+\r
+UINT8 mPhysicalAddressBits;\r
+\r
+//\r
+// Control register contents saved for SMM S3 resume state initialization.\r
+//\r
+UINT32 mSmmCr0;\r
+UINT32 mSmmCr4;\r
+\r
/**\r
Initialize IDT to setup exception handlers for SMM.\r
\r
)\r
{\r
UINTN Pe32Data;\r
- EFI_IMAGE_DOS_HEADER *DosHdr;\r
- EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
VOID *PdbPointer;\r
- UINT64 DumpIpAddress;\r
\r
//\r
// Find Image Base\r
//\r
- Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
- while (Pe32Data != 0) {\r
- DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
- if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
- //\r
- // DOS image header is present, so read the PE header after the DOS image header.\r
- //\r
- Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
- //\r
- // Make sure PE header address does not overflow and is less than the initial address.\r
- //\r
- if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
- if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
- //\r
- // It's PE image.\r
- //\r
- break;\r
- }\r
- }\r
- }\r
-\r
- //\r
- // Not found the image base, check the previous aligned address\r
- //\r
- Pe32Data -= SIZE_4KB;\r
- }\r
-\r
- DumpIpAddress = CallerIpAddress;\r
- DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
-\r
+ Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
if (Pe32Data != 0) {\r
+ DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
if (PdbPointer != NULL) {\r
- DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
+ DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
}\r
}\r
}\r
if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
return EFI_INVALID_PARAMETER;\r
}\r
+ //\r
+ // The SpeculationBarrier() call here is to ensure the above check for the\r
+ // CpuIndex has been completed before the execution of subsequent codes.\r
+ //\r
+ SpeculationBarrier ();\r
\r
//\r
// Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
//\r
// Patch ASM code template with current CR0, CR3, and CR4 values\r
//\r
- gSmmCr0 = (UINT32)AsmReadCr0 ();\r
- gSmmCr3 = (UINT32)AsmReadCr3 ();\r
- gSmmCr4 = (UINT32)AsmReadCr4 ();\r
+ mSmmCr0 = (UINT32)AsmReadCr0 ();\r
+ PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
+ PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
+ mSmmCr4 = (UINT32)AsmReadCr4 ();\r
+ PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
\r
//\r
// Patch GDTR for SMM base relocation\r
UINT8 *Stacks;\r
VOID *Registration;\r
UINT32 RegEax;\r
+ UINT32 RegEbx;\r
+ UINT32 RegEcx;\r
UINT32 RegEdx;\r
UINTN FamilyId;\r
UINTN ModelId;\r
UINT32 Cr3;\r
\r
+ //\r
+ // Initialize address fixup\r
+ //\r
+ PiSmmCpuSmmInitFixupAddress ();\r
+ PiSmmCpuSmiEntryFixupAddress ();\r
+\r
//\r
// Initialize Debug Agent to support source level debug in SMM code\r
//\r
EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
);\r
\r
- //\r
- // Fix segment address of the long-mode-switch jump\r
- //\r
- if (sizeof (UINTN) == sizeof (UINT64)) {\r
- gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
- }\r
-\r
//\r
// Find out SMRR Base and SMRR Size\r
//\r
}\r
}\r
\r
+ DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
+ if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax > CPUID_EXTENDED_FUNCTION) {\r
+ AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
+ DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
+ DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
+ DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
+ if ((RegEcx & CPUID_CET_SS) == 0) {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+ if (mCetSupported) {\r
+ AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
+ AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
+ AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
+ }\r
+ }\r
+ } else {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+\r
//\r
// Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
// specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
//\r
// Allocate SMI stacks for all processors.\r
//\r
+ mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
//\r
// 2 more pages is allocated for each processor.\r
// | | | |\r
// |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
//\r
- mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
- Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
- ASSERT (Stacks != NULL);\r
- mSmmStackArrayBase = (UINTN)Stacks;\r
- mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
- } else {\r
- mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
- Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
- ASSERT (Stacks != NULL);\r
+ mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
+ }\r
+\r
+ mSmmShadowStackSize = 0;\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ //\r
+ // Append Shadow Stack after normal stack\r
+ //\r
+ // |= Stacks\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|\r
+ // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
+ // | |\r
+ // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
+ //\r
+ mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
+ }\r
+ }\r
+\r
+ Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
+ ASSERT (Stacks != NULL);\r
+ mSmmStackArrayBase = (UINTN)Stacks;\r
+ mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
+\r
+ DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
+ DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
}\r
\r
//\r
// Set SMI stack for SMM base relocation\r
//\r
- gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
+ PatchInstructionX86 (\r
+ gPatchSmmInitStack,\r
+ (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),\r
+ sizeof (UINTN)\r
+ );\r
\r
//\r
// Initialize IDT\r
//\r
// Initialize MP globals\r
//\r
- Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
+ Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
+\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ SetShadowStack (\r
+ Cr3,\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
+ mSmmShadowStackSize\r
+ );\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ SetNotPresentPage (\r
+ Cr3,\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
+ EFI_PAGES_TO_SIZE(1)\r
+ );\r
+ }\r
+ }\r
+ }\r
\r
//\r
// Fill in SMM Reserved Regions\r
);\r
ASSERT_EFI_ERROR (Status);\r
\r
+ //\r
+ // Install the SMM Memory Attribute Protocol into SMM protocol database\r
+ //\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mSmmCpuHandle,\r
+ &gEdkiiSmmMemoryAttributeProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmMemoryAttribute\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Initialize global buffer for MM MP.\r
+ //\r
+ InitializeDataForMmMp ();\r
+\r
+ //\r
+ // Install the SMM Mp Protocol into SMM protocol database\r
+ //\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mSmmCpuHandle,\r
+ &gEfiMmMpProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmMp\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
//\r
// Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
//\r
UINTN Size;\r
EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
- EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
- UINTN SmramRangeCount;\r
UINTN Index;\r
UINT64 MaxSize;\r
BOOLEAN Found;\r
Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
\r
- SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
- ASSERT (SmramRanges != NULL);\r
+ mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
+ ASSERT (mSmmCpuSmramRanges != NULL);\r
\r
- Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
ASSERT_EFI_ERROR (Status);\r
\r
- SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
+ mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
\r
//\r
// Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
//\r
CurrentSmramRange = NULL;\r
- for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
+ for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
//\r
// Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
//\r
- if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
+ if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
continue;\r
}\r
\r
- if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
- if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
- if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
- MaxSize = SmramRanges[Index].PhysicalSize;\r
- CurrentSmramRange = &SmramRanges[Index];\r
+ if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
+ if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
+ if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
+ MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
+ CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
}\r
}\r
}\r
\r
do {\r
Found = FALSE;\r
- for (Index = 0; Index < SmramRangeCount; Index++) {\r
- if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
- *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
- *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
+ if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
+ *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
+ *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
Found = TRUE;\r
- } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
- *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
+ } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
Found = TRUE;\r
}\r
}\r
} while (Found);\r
\r
- FreePool (SmramRanges);\r
DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
}\r
\r
//\r
for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
-\r
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
+ //\r
+ // If this processor does not exist\r
+ //\r
+ continue;\r
+ }\r
//\r
// Acquire Config SMM Code Access Check spin lock. The AP will release the\r
// spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
ASSERT_EFI_ERROR (Status);\r
}\r
- Memory = (EFI_PHYSICAL_ADDRESS) (AlignedMemory + EFI_PAGES_TO_SIZE (Pages));\r
+ Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
UnalignedPages = RealPages - Pages - UnalignedPages;\r
if (UnalignedPages > 0) {\r
//\r