/** @file\r
Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
\r
-Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
},\r
+ NULL, // pointer to Ap Wrapper Func array\r
+ {NULL, NULL}, // List_Entry for Tokens.\r
};\r
\r
CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
SmmWriteSaveState\r
};\r
\r
+///\r
+/// SMM Memory Attribute Protocol instance\r
+///\r
+EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
+ EdkiiSmmGetMemoryAttributes,\r
+ EdkiiSmmSetMemoryAttributes,\r
+ EdkiiSmmClearMemoryAttributes\r
+};\r
+\r
EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
\r
//\r
UINTN mSmmStackArrayEnd;\r
UINTN mSmmStackSize;\r
\r
+UINTN mSmmShadowStackSize;\r
+BOOLEAN mCetSupported = TRUE;\r
+\r
UINTN mMaxNumberOfCpus = 1;\r
UINTN mNumberOfCpus = 1;\r
\r
//\r
BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
\r
+//\r
+// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
+//\r
+UINT64 mAddressEncMask = 0;\r
+\r
//\r
// Spin lock used to serialize setting of SMM Code Access Check feature\r
//\r
SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
\r
+//\r
+// Saved SMM ranges information\r
+//\r
+EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
+UINTN mSmmCpuSmramRangeCount;\r
+\r
+UINT8 mPhysicalAddressBits;\r
+\r
+//\r
+// Control register contents saved for SMM S3 resume state initialization.\r
+//\r
+UINT32 mSmmCr0;\r
+UINT32 mSmmCr4;\r
+\r
/**\r
Initialize IDT to setup exception handlers for SMM.\r
\r
EFI_STATUS Status;\r
BOOLEAN InterruptState;\r
IA32_DESCRIPTOR DxeIdtr;\r
+\r
+ //\r
+ // There are 32 (not 255) entries in it since only processor\r
+ // generated exceptions will be handled.\r
+ //\r
+ gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
+ //\r
+ // Allocate page aligned IDT, because it might be set as read only.\r
+ //\r
+ gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
+ ASSERT (gcSmiIdtr.Base != 0);\r
+ ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
+\r
//\r
// Disable Interrupt and save DXE IDT table\r
//\r
)\r
{\r
UINTN Pe32Data;\r
- EFI_IMAGE_DOS_HEADER *DosHdr;\r
- EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
VOID *PdbPointer;\r
- UINT64 DumpIpAddress;\r
\r
//\r
// Find Image Base\r
//\r
- Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
- while (Pe32Data != 0) {\r
- DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
- if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
- //\r
- // DOS image header is present, so read the PE header after the DOS image header.\r
- //\r
- Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
- //\r
- // Make sure PE header address does not overflow and is less than the initial address.\r
- //\r
- if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
- if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
- //\r
- // It's PE image.\r
- //\r
- break;\r
- }\r
- }\r
- }\r
-\r
- //\r
- // Not found the image base, check the previous aligned address\r
- //\r
- Pe32Data -= SIZE_4KB;\r
- }\r
-\r
- DumpIpAddress = CallerIpAddress;\r
- DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
-\r
+ Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
if (Pe32Data != 0) {\r
+ DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
if (PdbPointer != NULL) {\r
- DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
+ DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
}\r
}\r
}\r
if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
return EFI_INVALID_PARAMETER;\r
}\r
+ //\r
+ // The SpeculationBarrier() call here is to ensure the above check for the\r
+ // CpuIndex has been completed before the execution of subsequent codes.\r
+ //\r
+ SpeculationBarrier ();\r
\r
//\r
// Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
AsmWriteIdtr (&gcSmiIdtr);\r
ApicId = GetApicId ();\r
\r
- ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
+ ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
\r
for (Index = 0; Index < mNumberOfCpus; Index++) {\r
if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
//\r
// Patch ASM code template with current CR0, CR3, and CR4 values\r
//\r
- gSmmCr0 = (UINT32)AsmReadCr0 ();\r
- gSmmCr3 = (UINT32)AsmReadCr3 ();\r
- gSmmCr4 = (UINT32)AsmReadCr4 ();\r
+ mSmmCr0 = (UINT32)AsmReadCr0 ();\r
+ PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
+ PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
+ mSmmCr4 = (UINT32)AsmReadCr4 ();\r
+ PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
\r
//\r
// Patch GDTR for SMM base relocation\r
{\r
GetAcpiCpuData ();\r
\r
+ //\r
+ // Cache a copy of UEFI memory map before we start profiling feature.\r
+ //\r
+ GetUefiMemoryMap ();\r
+\r
//\r
// Set SMM ready to lock flag and return\r
//\r
UINT8 *Stacks;\r
VOID *Registration;\r
UINT32 RegEax;\r
+ UINT32 RegEbx;\r
+ UINT32 RegEcx;\r
UINT32 RegEdx;\r
UINTN FamilyId;\r
UINTN ModelId;\r
UINT32 Cr3;\r
\r
+ //\r
+ // Initialize address fixup\r
+ //\r
+ PiSmmCpuSmmInitFixupAddress ();\r
+ PiSmmCpuSmiEntryFixupAddress ();\r
+\r
//\r
// Initialize Debug Agent to support source level debug in SMM code\r
//\r
EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
);\r
\r
- //\r
- // Fix segment address of the long-mode-switch jump\r
- //\r
- if (sizeof (UINTN) == sizeof (UINT64)) {\r
- gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
- }\r
-\r
//\r
// Find out SMRR Base and SMRR Size\r
//\r
mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
\r
+ //\r
+ // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
+ // Make sure AddressEncMask is contained to smallest supported address field.\r
+ //\r
+ mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
+ DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
+\r
//\r
// If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
//\r
}\r
}\r
\r
+ DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
+ if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax > CPUID_EXTENDED_FUNCTION) {\r
+ AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
+ DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
+ DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
+ DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
+ if ((RegEcx & CPUID_CET_SS) == 0) {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+ if (mCetSupported) {\r
+ AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
+ AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
+ AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
+ DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
+ }\r
+ }\r
+ } else {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
+ }\r
+\r
//\r
// Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
- // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size\r
- // is rounded up to nearest power of 2.\r
+ // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
+ // This size is rounded up to nearest power of 2.\r
//\r
TileCodeSize = GetSmiHandlerSize ();\r
TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
- TileDataSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR);\r
+ TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
TileSize = TileDataSize + TileCodeSize - 1;\r
TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
\r
//\r
- // If the TileSize is larger than space available for the SMI Handler of CPU[i],\r
- // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],\r
- // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be\r
- // reduced.\r
+ // If the TileSize is larger than space available for the SMI Handler of\r
+ // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
+ // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
+ // the SMI Handler size must be reduced or the size of the extra CPU specific\r
+ // context must be reduced.\r
//\r
ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
\r
//\r
BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
if ((FamilyId == 4) || (FamilyId == 5)) {\r
- Buffer = AllocateAlignedPages (BufferPages, SIZE_32KB);\r
+ Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
} else {\r
- Buffer = AllocateAlignedPages (BufferPages, SIZE_4KB);\r
+ Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
}\r
ASSERT (Buffer != NULL);\r
DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
//\r
// Allocate SMI stacks for all processors.\r
//\r
+ mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
//\r
// 2 more pages is allocated for each processor.\r
// | | | |\r
// |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
//\r
- mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
- Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
- ASSERT (Stacks != NULL);\r
- mSmmStackArrayBase = (UINTN)Stacks;\r
- mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
- } else {\r
- mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
- Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
- ASSERT (Stacks != NULL);\r
+ mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
+ }\r
+\r
+ mSmmShadowStackSize = 0;\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ //\r
+ // Append Shadow Stack after normal stack\r
+ //\r
+ // |= Stacks\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|\r
+ // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
+ // | |\r
+ // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
+ //\r
+ mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
+ }\r
+ }\r
+\r
+ Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
+ ASSERT (Stacks != NULL);\r
+ mSmmStackArrayBase = (UINTN)Stacks;\r
+ mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
+\r
+ DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
+ DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
}\r
\r
//\r
// Set SMI stack for SMM base relocation\r
//\r
- gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
+ PatchInstructionX86 (\r
+ gPatchSmmInitStack,\r
+ (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),\r
+ sizeof (UINTN)\r
+ );\r
\r
//\r
// Initialize IDT\r
//\r
SmmCpuFeaturesSmmRelocationComplete ();\r
\r
+ DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
+\r
//\r
// SMM Time initialization\r
//\r
//\r
// Initialize MP globals\r
//\r
- Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
+ Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
+\r
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ SetShadowStack (\r
+ Cr3,\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
+ mSmmShadowStackSize\r
+ );\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ SetNotPresentPage (\r
+ Cr3,\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
+ EFI_PAGES_TO_SIZE(1)\r
+ );\r
+ }\r
+ }\r
+ }\r
\r
//\r
// Fill in SMM Reserved Regions\r
);\r
ASSERT_EFI_ERROR (Status);\r
\r
+ //\r
+ // Install the SMM Memory Attribute Protocol into SMM protocol database\r
+ //\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mSmmCpuHandle,\r
+ &gEdkiiSmmMemoryAttributeProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmMemoryAttribute\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Initialize global buffer for MM MP.\r
+ //\r
+ InitializeDataForMmMp ();\r
+\r
+ //\r
+ // Install the SMM Mp Protocol into SMM protocol database\r
+ //\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mSmmCpuHandle,\r
+ &gEfiMmMpProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmmMp\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
//\r
// Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
//\r
UINTN Size;\r
EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
- EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
- UINTN SmramRangeCount;\r
UINTN Index;\r
UINT64 MaxSize;\r
BOOLEAN Found;\r
Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
\r
- SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
- ASSERT (SmramRanges != NULL);\r
+ mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
+ ASSERT (mSmmCpuSmramRanges != NULL);\r
\r
- Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
ASSERT_EFI_ERROR (Status);\r
\r
- SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
+ mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
\r
//\r
// Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
//\r
CurrentSmramRange = NULL;\r
- for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
+ for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
//\r
// Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
//\r
- if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
+ if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
continue;\r
}\r
\r
- if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
- if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
- if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
- MaxSize = SmramRanges[Index].PhysicalSize;\r
- CurrentSmramRange = &SmramRanges[Index];\r
+ if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
+ if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
+ if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
+ MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
+ CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
}\r
}\r
}\r
\r
do {\r
Found = FALSE;\r
- for (Index = 0; Index < SmramRangeCount; Index++) {\r
- if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
- *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
- *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
+ if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
+ *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
+ *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
Found = TRUE;\r
- } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
- *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
+ } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
Found = TRUE;\r
}\r
}\r
//\r
for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
-\r
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
+ //\r
+ // If this processor does not exist\r
+ //\r
+ continue;\r
+ }\r
//\r
// Acquire Config SMM Code Access Check spin lock. The AP will release the\r
// spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
return AllocatePages (Pages);\r
}\r
\r
+/**\r
+ Allocate pages for code.\r
+\r
+ @param[in] Pages Number of pages to be allocated.\r
+\r
+ @return Allocated memory.\r
+**/\r
+VOID *\r
+AllocateCodePages (\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS Memory;\r
+\r
+ if (Pages == 0) {\r
+ return NULL;\r
+ }\r
+\r
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
+ if (EFI_ERROR (Status)) {\r
+ return NULL;\r
+ }\r
+ return (VOID *) (UINTN) Memory;\r
+}\r
+\r
+/**\r
+ Allocate aligned pages for code.\r
+\r
+ @param[in] Pages Number of pages to be allocated.\r
+ @param[in] Alignment The requested alignment of the allocation.\r
+ Must be a power of two.\r
+ If Alignment is zero, then byte alignment is used.\r
+\r
+ @return Allocated memory.\r
+**/\r
+VOID *\r
+AllocateAlignedCodePages (\r
+ IN UINTN Pages,\r
+ IN UINTN Alignment\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS Memory;\r
+ UINTN AlignedMemory;\r
+ UINTN AlignmentMask;\r
+ UINTN UnalignedPages;\r
+ UINTN RealPages;\r
+\r
+ //\r
+ // Alignment must be a power of two or zero.\r
+ //\r
+ ASSERT ((Alignment & (Alignment - 1)) == 0);\r
+\r
+ if (Pages == 0) {\r
+ return NULL;\r
+ }\r
+ if (Alignment > EFI_PAGE_SIZE) {\r
+ //\r
+ // Calculate the total number of pages since alignment is larger than page size.\r
+ //\r
+ AlignmentMask = Alignment - 1;\r
+ RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
+ //\r
+ // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
+ //\r
+ ASSERT (RealPages > Pages);\r
+\r
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
+ if (EFI_ERROR (Status)) {\r
+ return NULL;\r
+ }\r
+ AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
+ UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
+ if (UnalignedPages > 0) {\r
+ //\r
+ // Free first unaligned page(s).\r
+ //\r
+ Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
+ ASSERT_EFI_ERROR (Status);\r
+ }\r
+ Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
+ UnalignedPages = RealPages - Pages - UnalignedPages;\r
+ if (UnalignedPages > 0) {\r
+ //\r
+ // Free last unaligned page(s).\r
+ //\r
+ Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
+ ASSERT_EFI_ERROR (Status);\r
+ }\r
+ } else {\r
+ //\r
+ // Do not over-allocate pages in this case.\r
+ //\r
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
+ if (EFI_ERROR (Status)) {\r
+ return NULL;\r
+ }\r
+ AlignedMemory = (UINTN) Memory;\r
+ }\r
+ return (VOID *) AlignedMemory;\r
+}\r
+\r
/**\r
Perform the remaining tasks.\r
\r
// Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
//\r
InitPaging ();\r
+\r
+ //\r
+ // Mark critical region to be read-only in page table\r
+ //\r
+ SetMemMapAttributes ();\r
+\r
+ //\r
+ // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
+ //\r
+ SetUefiMemMapAttributes ();\r
+\r
+ //\r
+ // Set page table itself to be read-only\r
+ //\r
+ SetPageTableAttributes ();\r
+\r
//\r
// Configure SMM Code Access Check feature if available.\r
//\r