\r
@retval EFI_SUCCESS The register was read from Save State\r
@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
- @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+ @retval EFI_INVALID_PARAMETER This or Buffer is NULL.\r
\r
**/\r
EFI_STATUS\r
\r
@retval EFI_SUCCESS The register was written from Save State\r
@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
- @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
+ @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct\r
\r
**/\r
EFI_STATUS\r
// Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
//\r
mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
- DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
\r
//\r
// Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
// Make sure AddressEncMask is contained to smallest supported address field.\r
//\r
mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
- DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
+ DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
\r
//\r
// If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
\r
DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
- AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
- if (RegEax > CPUID_EXTENDED_FUNCTION) {\r
+ AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r
AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
}\r
+ } else {\r
+ mCetSupported = FALSE;\r
+ PatchInstructionX86(mPatchCetSupported, mCetSupported, 1);\r
}\r
} else {\r
mCetSupported = FALSE;\r
TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
TileSize = TileDataSize + TileCodeSize - 1;\r
TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
- DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
+ DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
\r
//\r
// If the TileSize is larger than space available for the SMI Handler of\r
Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
}\r
ASSERT (Buffer != NULL);\r
- DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
+ DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
\r
//\r
// Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
ASSERT_EFI_ERROR (Status);\r
mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
\r
- DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
+ DEBUG ((DEBUG_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
Index,\r
(UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
mCpuHotPlugData.SmBase[Index],\r
mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
//\r
- // 2 more pages is allocated for each processor.\r
- // one is guard page and the other is known good stack.\r
+ // SMM Stack Guard Enabled\r
+ // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.\r
//\r
- // +-------------------------------------------+-----+-------------------------------------------+\r
- // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
- // +-------------------------------------------+-----+-------------------------------------------+\r
- // | | | |\r
- // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
+ // +--------------------------------------------------+-----+--------------------------------------------------+\r
+ // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
+ // +--------------------------------------------------+-----+--------------------------------------------------+\r
+ // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|\r
+ // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|\r
+ // | | | |\r
+ // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|\r
//\r
mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
}\r
\r
mSmmShadowStackSize = 0;\r
if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
- //\r
- // Append Shadow Stack after normal stack\r
- //\r
- // |= Stacks\r
- // +--------------------------------------------------+---------------------------------------------------------------+\r
- // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
- // +--------------------------------------------------+---------------------------------------------------------------+\r
- // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|\r
- // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
- // | |\r
- // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
- //\r
mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
+\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // SMM Stack Guard Enabled\r
+ // Append Shadow Stack after normal stack\r
+ // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.\r
+ //\r
+ // |= Stacks\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
+ // +--------------------------------------------------+---------------------------------------------------------------+\r
+ // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|\r
+ // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
+ // | |\r
+ // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
+ //\r
mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
+ } else {\r
+ //\r
+ // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)\r
+ // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.\r
+ // 1 more pages is allocated for each processor, it is known good stack.\r
+ //\r
+ //\r
+ // |= Stacks\r
+ // +-------------------------------------+--------------------------------------------------+\r
+ // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |\r
+ // +-------------------------------------+--------------------------------------------------+\r
+ // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|\r
+ // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|\r
+ // | |\r
+ // |<-------------------------------- Processor N ----------------------------------------->|\r
+ //\r
+ mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);\r
+ mSmmStackSize += EFI_PAGES_TO_SIZE (1);\r
}\r
}\r
\r
GetAcpiS3EnableFlag ();\r
InitSmmS3ResumeState (Cr3);\r
\r
- DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
+ DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
\r
return EFI_SUCCESS;\r
}\r
*SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
*SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
\r
- //\r
- // Extend *SmrrBase/*SmrrSize to include adjacent SMRAM ranges\r
- //\r
do {\r
Found = FALSE;\r
for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
}\r
} while (Found);\r
\r
- DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
+ DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
}\r
\r
/**\r
//\r
SetMemMapAttributes ();\r
\r
- //\r
- // Do not protect memory outside SMRAM when SMM static page table is not enabled.\r
- //\r
- if (mCpuSmmStaticPageTable) {\r
-\r
+ if (IsRestrictedMemoryAccess ()) {\r
//\r
// For outside SMRAM, we only map SMM communication buffer or MMIO.\r
//\r