/** @file\r
Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
\r
-Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
-\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
NULL, // Pointer to Operation array\r
NULL, // Pointer to CpuSaveStateSize array\r
NULL, // Pointer to CpuSaveState array\r
- { {0} }, // SmmReservedSmramRegion\r
+ {\r
+ { 0 }\r
+ }, // SmmReservedSmramRegion\r
{\r
SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
},\r
+ NULL, // pointer to Ap Wrapper Func array\r
+ { NULL, NULL }, // List_Entry for Tokens.\r
};\r
\r
-CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
+CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
0, // Array Length of SmBase and APIC ID\r
NULL, // Pointer to APIC ID array\r
///\r
/// SMM CPU Protocol instance\r
///\r
-EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
+EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
SmmReadSaveState,\r
SmmWriteSaveState\r
};\r
\r
-EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
+///\r
+/// SMM Memory Attribute Protocol instance\r
+///\r
+EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
+ EdkiiSmmGetMemoryAttributes,\r
+ EdkiiSmmSetMemoryAttributes,\r
+ EdkiiSmmClearMemoryAttributes\r
+};\r
+\r
+EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
\r
//\r
// SMM stack information\r
//\r
-UINTN mSmmStackArrayBase;\r
-UINTN mSmmStackArrayEnd;\r
-UINTN mSmmStackSize;\r
+UINTN mSmmStackArrayBase;\r
+UINTN mSmmStackArrayEnd;\r
+UINTN mSmmStackSize;\r
\r
-UINTN mMaxNumberOfCpus = 1;\r
-UINTN mNumberOfCpus = 1;\r
+UINTN mSmmShadowStackSize;\r
+BOOLEAN mCetSupported = TRUE;\r
+\r
+UINTN mMaxNumberOfCpus = 1;\r
+UINTN mNumberOfCpus = 1;\r
\r
//\r
// SMM ready to lock flag\r
//\r
-BOOLEAN mSmmReadyToLock = FALSE;\r
+BOOLEAN mSmmReadyToLock = FALSE;\r
\r
//\r
// Global used to cache PCD for SMM Code Access Check enable\r
//\r
-BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
+BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
\r
//\r
// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
//\r
-UINT64 mAddressEncMask = 0;\r
+UINT64 mAddressEncMask = 0;\r
\r
//\r
// Spin lock used to serialize setting of SMM Code Access Check feature\r
//\r
-SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
+SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
+\r
+//\r
+// Saved SMM ranges information\r
+//\r
+EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
+UINTN mSmmCpuSmramRangeCount;\r
+\r
+UINT8 mPhysicalAddressBits;\r
+\r
+//\r
+// Control register contents saved for SMM S3 resume state initialization.\r
+//\r
+UINT32 mSmmCr0;\r
+UINT32 mSmmCr4;\r
\r
/**\r
Initialize IDT to setup exception handlers for SMM.\r
VOID\r
)\r
{\r
- EFI_STATUS Status;\r
- BOOLEAN InterruptState;\r
- IA32_DESCRIPTOR DxeIdtr;\r
+ EFI_STATUS Status;\r
+ BOOLEAN InterruptState;\r
+ IA32_DESCRIPTOR DxeIdtr;\r
\r
//\r
// There are 32 (not 255) entries in it since only processor\r
// generated exceptions will be handled.\r
//\r
- gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
+ gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
//\r
// Allocate page aligned IDT, because it might be set as read only.\r
//\r
- gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
+ gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));\r
ASSERT (gcSmiIdtr.Base != 0);\r
ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
\r
//\r
// Restore DXE IDT table and CPU interrupt\r
//\r
- AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
+ AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);\r
SetInterruptState (InterruptState);\r
}\r
\r
**/\r
VOID\r
DumpModuleInfoByIp (\r
- IN UINTN CallerIpAddress\r
+ IN UINTN CallerIpAddress\r
)\r
{\r
- UINTN Pe32Data;\r
- EFI_IMAGE_DOS_HEADER *DosHdr;\r
- EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
- VOID *PdbPointer;\r
- UINT64 DumpIpAddress;\r
+ UINTN Pe32Data;\r
+ VOID *PdbPointer;\r
\r
//\r
// Find Image Base\r
//\r
- Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
- while (Pe32Data != 0) {\r
- DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
- if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
- //\r
- // DOS image header is present, so read the PE header after the DOS image header.\r
- //\r
- Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
- //\r
- // Make sure PE header address does not overflow and is less than the initial address.\r
- //\r
- if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
- if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
- //\r
- // It's PE image.\r
- //\r
- break;\r
- }\r
- }\r
- }\r
-\r
- //\r
- // Not found the image base, check the previous aligned address\r
- //\r
- Pe32Data -= SIZE_4KB;\r
- }\r
-\r
- DumpIpAddress = CallerIpAddress;\r
- DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
-\r
+ Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
if (Pe32Data != 0) {\r
- PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
+ DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));\r
+ PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);\r
if (PdbPointer != NULL) {\r
- DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
+ DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
}\r
}\r
}\r
\r
@retval EFI_SUCCESS The register was read from Save State\r
@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
- @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+ @retval EFI_INVALID_PARAMETER This or Buffer is NULL.\r
\r
**/\r
EFI_STATUS\r
EFIAPI\r
SmmReadSaveState (\r
- IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
- IN UINTN Width,\r
- IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
- IN UINTN CpuIndex,\r
- OUT VOID *Buffer\r
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
+ IN UINTN Width,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN CpuIndex,\r
+ OUT VOID *Buffer\r
)\r
{\r
EFI_STATUS Status;\r
return EFI_INVALID_PARAMETER;\r
}\r
\r
+ //\r
+ // The SpeculationBarrier() call here is to ensure the above check for the\r
+ // CpuIndex has been completed before the execution of subsequent codes.\r
+ //\r
+ SpeculationBarrier ();\r
+\r
//\r
// Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
//\r
if (Width != sizeof (UINT64)) {\r
return EFI_INVALID_PARAMETER;\r
}\r
+\r
//\r
// If the processor is in SMM at the time the SMI occurred,\r
// the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
if (Status == EFI_UNSUPPORTED) {\r
Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
}\r
+\r
return Status;\r
}\r
\r
\r
@retval EFI_SUCCESS The register was written from Save State\r
@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
- @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
+ @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct\r
\r
**/\r
EFI_STATUS\r
EFIAPI\r
SmmWriteSaveState (\r
- IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
- IN UINTN Width,\r
- IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
- IN UINTN CpuIndex,\r
- IN CONST VOID *Buffer\r
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
+ IN UINTN Width,\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
+ IN UINTN CpuIndex,\r
+ IN CONST VOID *Buffer\r
)\r
{\r
EFI_STATUS Status;\r
if (Status == EFI_UNSUPPORTED) {\r
Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
}\r
+\r
return Status;\r
}\r
\r
-\r
/**\r
C function for SMI handler. To change all processor's SMMBase Register.\r
\r
VOID\r
)\r
{\r
- UINT32 ApicId;\r
- UINTN Index;\r
+ UINT32 ApicId;\r
+ UINTN Index;\r
\r
//\r
// Update SMM IDT entries' code segment and load IDT\r
return;\r
}\r
}\r
+\r
ASSERT (FALSE);\r
}\r
\r
//\r
// Patch ASM code template with current CR0, CR3, and CR4 values\r
//\r
- gSmmCr0 = (UINT32)AsmReadCr0 ();\r
- gSmmCr3 = (UINT32)AsmReadCr3 ();\r
- gSmmCr4 = (UINT32)AsmReadCr4 ();\r
+ mSmmCr0 = (UINT32)AsmReadCr0 ();\r
+ PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
+ PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
+ mSmmCr4 = (UINT32)AsmReadCr4 ();\r
+ PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
\r
//\r
// Patch GDTR for SMM base relocation\r
gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
\r
- U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
+ U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
\r
//\r
//\r
// Wait for this AP to finish its 1st SMI\r
//\r
- while (!mRebased[Index]);\r
+ while (!mRebased[Index]) {\r
+ }\r
} else {\r
//\r
// BSP will be Relocated later\r
//\r
// Wait for the BSP to finish its 1st SMI\r
//\r
- while (!mRebased[BspIndex]);\r
+ while (!mRebased[BspIndex]) {\r
+ }\r
\r
//\r
// Restore contents at address 0x38000\r
IN EFI_SYSTEM_TABLE *SystemTable\r
)\r
{\r
- EFI_STATUS Status;\r
- EFI_MP_SERVICES_PROTOCOL *MpServices;\r
- UINTN NumberOfEnabledProcessors;\r
- UINTN Index;\r
- VOID *Buffer;\r
- UINTN BufferPages;\r
- UINTN TileCodeSize;\r
- UINTN TileDataSize;\r
- UINTN TileSize;\r
- UINT8 *Stacks;\r
- VOID *Registration;\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
- UINTN FamilyId;\r
- UINTN ModelId;\r
- UINT32 Cr3;\r
+ EFI_STATUS Status;\r
+ EFI_MP_SERVICES_PROTOCOL *MpServices;\r
+ UINTN NumberOfEnabledProcessors;\r
+ UINTN Index;\r
+ VOID *Buffer;\r
+ UINTN BufferPages;\r
+ UINTN TileCodeSize;\r
+ UINTN TileDataSize;\r
+ UINTN TileSize;\r
+ UINT8 *Stacks;\r
+ VOID *Registration;\r
+ UINT32 RegEax;\r
+ UINT32 RegEbx;\r
+ UINT32 RegEcx;\r
+ UINT32 RegEdx;\r
+ UINTN FamilyId;\r
+ UINTN ModelId;\r
+ UINT32 Cr3;\r
+\r
+ //\r
+ // Initialize address fixup\r
+ //\r
+ PiSmmCpuSmmInitFixupAddress ();\r
+ PiSmmCpuSmiEntryFixupAddress ();\r
\r
//\r
// Initialize Debug Agent to support source level debug in SMM code\r
EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
);\r
\r
- //\r
- // Fix segment address of the long-mode-switch jump\r
- //\r
- if (sizeof (UINTN) == sizeof (UINT64)) {\r
- gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
- }\r
-\r
//\r
// Find out SMRR Base and SMRR Size\r
//\r
// If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
// A constant BSP index makes no sense because it may be hot removed.\r
//\r
- DEBUG_CODE (\r
- if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ DEBUG_CODE_BEGIN ();\r
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
+ ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
+ }\r
\r
- ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
- }\r
- );\r
+ DEBUG_CODE_END ();\r
\r
//\r
// Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
//\r
mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
- DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
\r
//\r
// Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
// Make sure AddressEncMask is contained to smallest supported address field.\r
//\r
mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
- DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
+ DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
\r
//\r
// If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
} else {\r
mMaxNumberOfCpus = mNumberOfCpus;\r
}\r
+\r
gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
\r
//\r
//\r
AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
FamilyId = (RegEax >> 8) & 0xf;\r
- ModelId = (RegEax >> 4) & 0xf;\r
- if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
+ ModelId = (RegEax >> 4) & 0xf;\r
+ if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {\r
ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
}\r
\r
if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
}\r
+\r
//\r
// Determine the mode of the CPU at the time an SMI occurs\r
// Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
if ((RegEdx & BIT29) != 0) {\r
mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
}\r
+\r
if (FamilyId == 0x06) {\r
- if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
+ if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {\r
mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
}\r
}\r
\r
+ DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
+ if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
+ AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r
+ AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
+ DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
+ DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
+ DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
+ if ((RegEcx & CPUID_CET_SS) == 0) {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+\r
+ if (mCetSupported) {\r
+ AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
+ AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
+ AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
+ }\r
+ } else {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+ } else {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+\r
//\r
// Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
// specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
// This size is rounded up to nearest power of 2.\r
//\r
TileCodeSize = GetSmiHandlerSize ();\r
- TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
+ TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);\r
TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
- TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
- TileSize = TileDataSize + TileCodeSize - 1;\r
- TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
- DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
+ TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);\r
+ TileSize = TileDataSize + TileCodeSize - 1;\r
+ TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
+ DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
\r
//\r
// If the TileSize is larger than space available for the SMI Handler of\r
} else {\r
Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
}\r
+\r
ASSERT (Buffer != NULL);\r
- DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
+ DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));\r
\r
//\r
// Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
// size for each CPU in the platform\r
//\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
- mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
- gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
+ mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
+ gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);\r
gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
- gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
\r
if (Index < mNumberOfCpus) {\r
Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
ASSERT_EFI_ERROR (Status);\r
mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
\r
- DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
+ DEBUG ((\r
+ DEBUG_INFO,\r
+ "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
Index,\r
(UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
mCpuHotPlugData.SmBase[Index],\r
));\r
} else {\r
gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
- mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
+ mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
}\r
}\r
\r
//\r
// Allocate SMI stacks for all processors.\r
//\r
+ mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
//\r
- // 2 more pages is allocated for each processor.\r
- // one is guard page and the other is known good stack.\r
+ // SMM Stack Guard Enabled\r
+ // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.\r
//\r
- // +-------------------------------------------+-----+-------------------------------------------+\r
- // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
- // +-------------------------------------------+-----+-------------------------------------------+\r
- // | | | |\r
- // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
+ // +--------------------------------------------------+-----+--------------------------------------------------+\r
+ // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
+ // +--------------------------------------------------+-----+--------------------------------------------------+\r
+ // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|\r
+ // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|\r
+ // | | | |\r
+ // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|\r
//\r
- mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
- Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
- ASSERT (Stacks != NULL);\r
- mSmmStackArrayBase = (UINTN)Stacks;\r
- mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
- } else {\r
- mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
- Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
- ASSERT (Stacks != NULL);\r
+ mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
+ }\r
+\r
+ mSmmShadowStackSize = 0;\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // SMM Stack Guard Enabled\r
+ // Append Shadow Stack after normal stack\r
+ // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.\r
+ //\r
+ // |= Stacks\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|\r
+ // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
+ // | |\r
+ // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
+ //\r
+ mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
+ } else {\r
+ //\r
+ // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)\r
+ // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.\r
+ // 1 more pages is allocated for each processor, it is known good stack.\r
+ //\r
+ //\r
+ // |= Stacks\r
+ // +-------------------------------------+--------------------------------------------------+\r
+ // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |\r
+ // +-------------------------------------+--------------------------------------------------+\r
+ // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|\r
+ // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|\r
+ // | |\r
+ // |<-------------------------------- Processor N ----------------------------------------->|\r
+ //\r
+ mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);\r
+ mSmmStackSize += EFI_PAGES_TO_SIZE (1);\r
+ }\r
+ }\r
+\r
+ Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
+ ASSERT (Stacks != NULL);\r
+ mSmmStackArrayBase = (UINTN)Stacks;\r
+ mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
+\r
+ DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
+ DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
}\r
\r
//\r
// Set SMI stack for SMM base relocation\r
//\r
- gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
+ PatchInstructionX86 (\r
+ gPatchSmmInitStack,\r
+ (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),\r
+ sizeof (UINTN)\r
+ );\r
\r
//\r
// Initialize IDT\r
//\r
// Initialize MP globals\r
//\r
- Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
+ Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
+\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ SetShadowStack (\r
+ Cr3,\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
+ mSmmShadowStackSize\r
+ );\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ SetNotPresentPage (\r
+ Cr3,\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
+ EFI_PAGES_TO_SIZE (1)\r
+ );\r
+ }\r
+ }\r
+ }\r
\r
//\r
// Fill in SMM Reserved Regions\r
//\r
Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
&gSmmCpuPrivate->SmmCpuHandle,\r
- &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
+ &gEfiSmmConfigurationProtocolGuid,\r
+ &gSmmCpuPrivate->SmmConfiguration,\r
NULL\r
);\r
ASSERT_EFI_ERROR (Status);\r
);\r
ASSERT_EFI_ERROR (Status);\r
\r
+ //\r
+ // Install the SMM Memory Attribute Protocol into SMM protocol database\r
+ //\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mSmmCpuHandle,\r
+ &gEdkiiSmmMemoryAttributeProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmMemoryAttribute\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Initialize global buffer for MM MP.\r
+ //\r
+ InitializeDataForMmMp ();\r
+\r
+ //\r
+ // Install the SMM Mp Protocol into SMM protocol database\r
+ //\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mSmmCpuHandle,\r
+ &gEfiMmMpProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmMp\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
//\r
// Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
//\r
GetAcpiS3EnableFlag ();\r
InitSmmS3ResumeState (Cr3);\r
\r
- DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
+ DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
\r
return EFI_SUCCESS;\r
}\r
**/\r
VOID\r
FindSmramInfo (\r
- OUT UINT32 *SmrrBase,\r
- OUT UINT32 *SmrrSize\r
+ OUT UINT32 *SmrrBase,\r
+ OUT UINT32 *SmrrSize\r
)\r
{\r
- EFI_STATUS Status;\r
- UINTN Size;\r
- EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
- EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
- EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
- UINTN SmramRangeCount;\r
- UINTN Index;\r
- UINT64 MaxSize;\r
- BOOLEAN Found;\r
+ EFI_STATUS Status;\r
+ UINTN Size;\r
+ EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
+ EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
+ UINTN Index;\r
+ UINT64 MaxSize;\r
+ BOOLEAN Found;\r
\r
//\r
// Get SMM Access Protocol\r
//\r
// Get SMRAM information\r
//\r
- Size = 0;\r
+ Size = 0;\r
Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
\r
- SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
- ASSERT (SmramRanges != NULL);\r
+ mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
+ ASSERT (mSmmCpuSmramRanges != NULL);\r
\r
- Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
ASSERT_EFI_ERROR (Status);\r
\r
- SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
+ mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
\r
//\r
// Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
//\r
CurrentSmramRange = NULL;\r
- for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
+ for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
//\r
// Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
//\r
- if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
+ if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
continue;\r
}\r
\r
- if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
- if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
- if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
- MaxSize = SmramRanges[Index].PhysicalSize;\r
- CurrentSmramRange = &SmramRanges[Index];\r
+ if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
+ if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
+ if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
+ MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
+ CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
}\r
}\r
}\r
\r
do {\r
Found = FALSE;\r
- for (Index = 0; Index < SmramRangeCount; Index++) {\r
- if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
- *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
- *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
- Found = TRUE;\r
- } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
- *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
- Found = TRUE;\r
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
+ if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&\r
+ (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))\r
+ {\r
+ *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
+ Found = TRUE;\r
+ } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {\r
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
+ Found = TRUE;\r
}\r
}\r
} while (Found);\r
\r
- FreePool (SmramRanges);\r
- DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
+ DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
}\r
\r
/**\r
//\r
for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
+ //\r
+ // If this processor does not exist\r
+ //\r
+ continue;\r
+ }\r
\r
//\r
// Acquire Config SMM Code Access Check spin lock. The AP will release the\r
**/\r
VOID *\r
AllocatePageTableMemory (\r
- IN UINTN Pages\r
+ IN UINTN Pages\r
)\r
{\r
VOID *Buffer;\r
if (Buffer != NULL) {\r
return Buffer;\r
}\r
+\r
return AllocatePages (Pages);\r
}\r
\r
**/\r
VOID *\r
AllocateCodePages (\r
- IN UINTN Pages\r
+ IN UINTN Pages\r
)\r
{\r
EFI_STATUS Status;\r
if (EFI_ERROR (Status)) {\r
return NULL;\r
}\r
- return (VOID *) (UINTN) Memory;\r
+\r
+ return (VOID *)(UINTN)Memory;\r
}\r
\r
/**\r
**/\r
VOID *\r
AllocateAlignedCodePages (\r
- IN UINTN Pages,\r
- IN UINTN Alignment\r
+ IN UINTN Pages,\r
+ IN UINTN Alignment\r
)\r
{\r
EFI_STATUS Status;\r
if (Pages == 0) {\r
return NULL;\r
}\r
+\r
if (Alignment > EFI_PAGE_SIZE) {\r
//\r
// Calculate the total number of pages since alignment is larger than page size.\r
//\r
- AlignmentMask = Alignment - 1;\r
- RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
+ AlignmentMask = Alignment - 1;\r
+ RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
//\r
// Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
//\r
ASSERT (RealPages > Pages);\r
\r
- Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
if (EFI_ERROR (Status)) {\r
return NULL;\r
}\r
- AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
- UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
+\r
+ AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;\r
+ UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);\r
if (UnalignedPages > 0) {\r
//\r
// Free first unaligned page(s).\r
Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
ASSERT_EFI_ERROR (Status);\r
}\r
+\r
Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
UnalignedPages = RealPages - Pages - UnalignedPages;\r
if (UnalignedPages > 0) {\r
if (EFI_ERROR (Status)) {\r
return NULL;\r
}\r
- AlignedMemory = (UINTN) Memory;\r
+\r
+ AlignedMemory = (UINTN)Memory;\r
}\r
- return (VOID *) AlignedMemory;\r
+\r
+ return (VOID *)AlignedMemory;\r
}\r
\r
/**\r
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
SmmProfileStart ();\r
}\r
+\r
//\r
// Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
//\r
//\r
SetMemMapAttributes ();\r
\r
- //\r
- // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
- //\r
- SetUefiMemMapAttributes ();\r
+ if (IsRestrictedMemoryAccess ()) {\r
+ //\r
+ // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
+ //\r
+ SetUefiMemMapAttributes ();\r
\r
- //\r
- // Set page table itself to be read-only\r
- //\r
- SetPageTableAttributes ();\r
+ //\r
+ // Set page table itself to be read-only\r
+ //\r
+ SetPageTableAttributes ();\r
+ }\r
\r
//\r
// Configure SMM Code Access Check feature if available.\r