#include <PiSmm.h>\r
#include <Library/SmmCpuFeaturesLib.h>\r
#include <Library/BaseLib.h>\r
-#include <Library/MtrrLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
#include <Library/PcdLib.h>\r
#include <Library/MemoryAllocationLib.h>\r
+#include <Library/SmmServicesTableLib.h>\r
#include <Library/DebugLib.h>\r
-#include <Register/Cpuid.h>\r
-#include <Register/SmramSaveStateMap.h>\r
+#include <Register/QemuSmramSaveStateMap.h>\r
\r
//\r
-// Machine Specific Registers (MSRs)\r
+// EFER register LMA bit\r
//\r
-#define SMM_FEATURES_LIB_IA32_MTRR_CAP 0x0FE\r
-#define SMM_FEATURES_LIB_IA32_FEATURE_CONTROL 0x03A\r
-#define SMM_FEATURES_LIB_IA32_SMRR_PHYSBASE 0x1F2\r
-#define SMM_FEATURES_LIB_IA32_SMRR_PHYSMASK 0x1F3\r
-#define SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE 0x0A0\r
-#define SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSMASK 0x0A1\r
-#define EFI_MSR_SMRR_MASK 0xFFFFF000\r
-#define EFI_MSR_SMRR_PHYS_MASK_VALID BIT11\r
-#define SMM_FEATURES_LIB_SMM_FEATURE_CONTROL 0x4E0\r
-\r
-//\r
-// MSRs required for configuration of SMM Code Access Check\r
-//\r
-#define SMM_FEATURES_LIB_IA32_MCA_CAP 0x17D\r
-#define SMM_CODE_ACCESS_CHK_BIT BIT58\r
-\r
-//\r
-// Set default value to assume SMRR is not supported\r
-//\r
-BOOLEAN mSmrrSupported = FALSE;\r
-\r
-//\r
-// Set default value to assume MSR_SMM_FEATURE_CONTROL is not supported\r
-//\r
-BOOLEAN mSmmFeatureControlSupported = FALSE;\r
-\r
-//\r
-// Set default value to assume IA-32 Architectural MSRs are used\r
-//\r
-UINT32 mSmrrPhysBaseMsr = SMM_FEATURES_LIB_IA32_SMRR_PHYSBASE;\r
-UINT32 mSmrrPhysMaskMsr = SMM_FEATURES_LIB_IA32_SMRR_PHYSMASK;\r
-\r
-//\r
-// Set default value to assume MTRRs need to be configured on each SMI\r
-//\r
-BOOLEAN mNeedConfigureMtrrs = TRUE;\r
-\r
-//\r
-// Array for state of SMRR enable on all CPUs\r
-//\r
-BOOLEAN *mSmrrEnabled;\r
+#define LMA BIT10\r
\r
/**\r
The constructor function\r
IN EFI_SYSTEM_TABLE *SystemTable\r
)\r
{\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
- UINTN FamilyId;\r
- UINTN ModelId;\r
-\r
- //\r
- // Retrieve CPU Family and Model\r
- //\r
- AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);\r
- FamilyId = (RegEax >> 8) & 0xf;\r
- ModelId = (RegEax >> 4) & 0xf;\r
- if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
- ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
- }\r
-\r
- //\r
- // Check CPUID(CPUID_VERSION_INFO).EDX[12] for MTRR capability\r
- //\r
- if ((RegEdx & BIT12) != 0) {\r
- //\r
- // Check MTRR_CAP MSR bit 11 for SMRR support\r
- //\r
- if ((AsmReadMsr64 (SMM_FEATURES_LIB_IA32_MTRR_CAP) & BIT11) != 0) {\r
- mSmrrSupported = TRUE;\r
- }\r
- }\r
-\r
- //\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.3 MSRs in the Intel(R) Atom(TM) Processor Family\r
- //\r
- // If CPU Family/Model is 06_1CH, 06_26H, 06_27H, 06_35H or 06_36H, then\r
- // SMRR Physical Base and SMM Physical Mask MSRs are not available.\r
- //\r
- if (FamilyId == 0x06) {\r
- if (ModelId == 0x1C || ModelId == 0x26 || ModelId == 0x27 || ModelId == 0x35 || ModelId == 0x36) {\r
- mSmrrSupported = FALSE;\r
- }\r
- }\r
-\r
- //\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.2 MSRs in the Intel(R) Core(TM) 2 Processor Family\r
- //\r
- // If CPU Family/Model is 06_0F or 06_17, then use Intel(R) Core(TM) 2\r
- // Processor Family MSRs\r
//\r
- if (FamilyId == 0x06) {\r
- if (ModelId == 0x17 || ModelId == 0x0f) {\r
- mSmrrPhysBaseMsr = SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE;\r
- mSmrrPhysMaskMsr = SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSMASK;\r
- }\r
- }\r
-\r
+ // No need to program SMRRs on our virtual platform.\r
//\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 34.4.2 SMRAM Caching\r
- // An IA-32 processor does not automatically write back and invalidate its\r
- // caches before entering SMM or before exiting SMM. Because of this behavior,\r
- // care must be taken in the placement of the SMRAM in system memory and in\r
- // the caching of the SMRAM to prevent cache incoherence when switching back\r
- // and forth between SMM and protected mode operation.\r
- //\r
- // An IA-32 processor is a processor that does not support the Intel 64\r
- // Architecture. Support for the Intel 64 Architecture can be detected from\r
- // CPUID(CPUID_EXTENDED_CPU_SIG).EDX[29]\r
- //\r
- // If an IA-32 processor is detected, then set mNeedConfigureMtrrs to TRUE,\r
- // so caches are flushed on SMI entry and SMI exit, the interrupted code\r
- // MTRRs are saved/restored, and MTRRs for SMM are loaded.\r
- //\r
- AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
- if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
- AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
- if ((RegEdx & BIT29) != 0) {\r
- mNeedConfigureMtrrs = FALSE;\r
- }\r
- }\r
-\r
- //\r
- // Allocate array for state of SMRR enable on all CPUs\r
- //\r
- mSmrrEnabled = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
- ASSERT (mSmrrEnabled != NULL);\r
-\r
return EFI_SUCCESS;\r
}\r
\r
IN CPU_HOT_PLUG_DATA *CpuHotPlugData\r
)\r
{\r
- SMRAM_SAVE_STATE_MAP *CpuState;\r
- UINT64 FeatureControl;\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
- UINTN FamilyId;\r
- UINTN ModelId;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuState;\r
\r
//\r
// Configure SMBASE.\r
//\r
- CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
- CpuState->x86.SMBASE = (UINT32)CpuHotPlugData->SmBase[CpuIndex];\r
-\r
- //\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.2 MSRs in the Intel(R) Core(TM) 2 Processor Family\r
- //\r
- // If Intel(R) Core(TM) Core(TM) 2 Processor Family MSRs are being used, then\r
- // make sure SMRR Enable(BIT3) of MSR_FEATURE_CONTROL MSR(0x3A) is set before\r
- // accessing SMRR base/mask MSRs. If Lock(BIT0) of MSR_FEATURE_CONTROL MSR(0x3A)\r
- // is set, then the MSR is locked and can not be modified.\r
- //\r
- if (mSmrrSupported && mSmrrPhysBaseMsr == SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE) {\r
- FeatureControl = AsmReadMsr64 (SMM_FEATURES_LIB_IA32_FEATURE_CONTROL);\r
- if ((FeatureControl & BIT3) == 0) {\r
- if ((FeatureControl & BIT0) == 0) {\r
- AsmWriteMsr64 (SMM_FEATURES_LIB_IA32_FEATURE_CONTROL, FeatureControl | BIT3);\r
- } else {\r
- mSmrrSupported = FALSE;\r
- }\r
- }\r
- }\r
-\r
- //\r
- // If SMRR is supported, then program SMRR base/mask MSRs.\r
- // The EFI_MSR_SMRR_PHYS_MASK_VALID bit is not set until the first normal SMI.\r
- // The code that initializes SMM environment is running in normal mode\r
- // from SMRAM region. If SMRR is enabled here, then the SMRAM region\r
- // is protected and the normal mode code execution will fail.\r
- //\r
- if (mSmrrSupported) {\r
- AsmWriteMsr64 (mSmrrPhysBaseMsr, CpuHotPlugData->SmrrBase | MTRR_CACHE_WRITE_BACK);\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, (~(CpuHotPlugData->SmrrSize - 1) & EFI_MSR_SMRR_MASK));\r
- mSmrrEnabled[CpuIndex] = FALSE;\r
- }\r
-\r
- //\r
- // Retrieve CPU Family and Model\r
- //\r
- AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);\r
- FamilyId = (RegEax >> 8) & 0xf;\r
- ModelId = (RegEax >> 4) & 0xf;\r
- if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
- ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
+ CpuState = (QEMU_SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
+ if ((CpuState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ CpuState->x86.SMBASE = (UINT32)CpuHotPlugData->SmBase[CpuIndex];\r
+ } else {\r
+ CpuState->x64.SMBASE = (UINT32)CpuHotPlugData->SmBase[CpuIndex];\r
}\r
\r
//\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.10.1 MSRs in 4th Generation Intel(R) Core(TM)\r
- // Processor Family.\r
+ // No need to program SMRRs on our virtual platform.\r
//\r
- // If CPU Family/Model is 06_3C, 06_45, or 06_46 then use 4th Generation\r
- // Intel(R) Core(TM) Processor Family MSRs.\r
- //\r
- if (FamilyId == 0x06) {\r
- if (ModelId == 0x3C || ModelId == 0x45 || ModelId == 0x46) {\r
- //\r
- // Check to see if the CPU supports the SMM Code Access Check feature\r
- // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
- //\r
- if ((AsmReadMsr64 (SMM_FEATURES_LIB_IA32_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) != 0) {\r
- mSmmFeatureControlSupported = TRUE;\r
- }\r
- }\r
- }\r
}\r
\r
/**\r
IN UINT64 NewInstructionPointer\r
)\r
{\r
- return 0;\r
+ UINT64 OriginalInstructionPointer;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)CpuState;\r
+\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ OriginalInstructionPointer = (UINT64)CpuSaveState->x86._EIP;\r
+ CpuSaveState->x86._EIP = (UINT32)NewInstructionPointer;\r
+ //\r
+ // Clear the auto HALT restart flag so the RSM instruction returns\r
+ // program control to the instruction following the HLT instruction.\r
+ //\r
+ if ((CpuSaveState->x86.AutoHALTRestart & BIT0) != 0) {\r
+ CpuSaveState->x86.AutoHALTRestart &= ~BIT0;\r
+ }\r
+ } else {\r
+ OriginalInstructionPointer = CpuSaveState->x64._RIP;\r
+ if ((CpuSaveState->x64.IA32_EFER & LMA) == 0) {\r
+ CpuSaveState->x64._RIP = (UINT32)NewInstructionPointer32;\r
+ } else {\r
+ CpuSaveState->x64._RIP = (UINT32)NewInstructionPointer;\r
+ }\r
+ //\r
+ // Clear the auto HALT restart flag so the RSM instruction returns\r
+ // program control to the instruction following the HLT instruction.\r
+ //\r
+ if ((CpuSaveState->x64.AutoHALTRestart & BIT0) != 0) {\r
+ CpuSaveState->x64.AutoHALTRestart &= ~BIT0;\r
+ }\r
+ }\r
+ return OriginalInstructionPointer;\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- return mNeedConfigureMtrrs;\r
+ return FALSE;\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- if (mSmrrSupported && mNeedConfigureMtrrs) {\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64(mSmrrPhysMaskMsr) & ~EFI_MSR_SMRR_PHYS_MASK_VALID);\r
- }\r
+ //\r
+ // No SMRR support, nothing to do\r
+ //\r
}\r
\r
/**\r
VOID\r
)\r
{\r
- if (mSmrrSupported && mNeedConfigureMtrrs) {\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64(mSmrrPhysMaskMsr) | EFI_MSR_SMRR_PHYS_MASK_VALID);\r
- }\r
+ //\r
+ // No SMRR support, nothing to do\r
+ //\r
}\r
\r
/**\r
)\r
{\r
//\r
- // If SMRR is supported and this is the first normal SMI, then enable SMRR\r
+ // No SMRR support, nothing to do\r
//\r
- if (mSmrrSupported && !mSmrrEnabled[CpuIndex]) {\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64 (mSmrrPhysMaskMsr) | EFI_MSR_SMRR_PHYS_MASK_VALID);\r
- mSmrrEnabled[CpuIndex] = TRUE;\r
- }\r
}\r
\r
/**\r
IN SMM_REG_NAME RegName\r
)\r
{\r
- if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {\r
- return TRUE;\r
- }\r
+ ASSERT (RegName == SmmRegFeatureControl);\r
return FALSE;\r
}\r
\r
IN SMM_REG_NAME RegName\r
)\r
{\r
- if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {\r
- return AsmReadMsr64 (SMM_FEATURES_LIB_SMM_FEATURE_CONTROL);\r
- }\r
+ //\r
+ // This is called for SmmRegSmmDelayed, SmmRegSmmBlocked, SmmRegSmmEnable.\r
+ // The last of these should actually be SmmRegSmmDisable, so we can just\r
+ // return FALSE.\r
+ //\r
return 0;\r
}\r
\r
IN UINT64 Value\r
)\r
{\r
- if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {\r
- AsmWriteMsr64 (SMM_FEATURES_LIB_SMM_FEATURE_CONTROL, Value);\r
+ ASSERT (FALSE);\r
+}\r
+\r
+///\r
+/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY\r
+///\r
+#define SMM_CPU_OFFSET(Field) OFFSET_OF (QEMU_SMRAM_SAVE_STATE_MAP, Field)\r
+\r
+///\r
+/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_REGISTER_RANGE\r
+///\r
+#define SMM_REGISTER_RANGE(Start, End) { Start, End, End - Start + 1 }\r
+\r
+///\r
+/// Structure used to describe a range of registers\r
+///\r
+typedef struct {\r
+ EFI_SMM_SAVE_STATE_REGISTER Start;\r
+ EFI_SMM_SAVE_STATE_REGISTER End;\r
+ UINTN Length;\r
+} CPU_SMM_SAVE_STATE_REGISTER_RANGE;\r
+\r
+///\r
+/// Structure used to build a lookup table to retrieve the widths and offsets\r
+/// associated with each supported EFI_SMM_SAVE_STATE_REGISTER value\r
+///\r
+\r
+#define SMM_SAVE_STATE_REGISTER_FIRST_INDEX 1\r
+\r
+typedef struct {\r
+ UINT8 Width32;\r
+ UINT8 Width64;\r
+ UINT16 Offset32;\r
+ UINT16 Offset64Lo;\r
+ UINT16 Offset64Hi;\r
+ BOOLEAN Writeable;\r
+} CPU_SMM_SAVE_STATE_LOOKUP_ENTRY;\r
+\r
+///\r
+/// Table used by GetRegisterIndex() to convert an EFI_SMM_SAVE_STATE_REGISTER \r
+/// value to an index into a table of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY\r
+///\r
+static CONST CPU_SMM_SAVE_STATE_REGISTER_RANGE mSmmCpuRegisterRanges[] = {\r
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_GDTBASE, EFI_SMM_SAVE_STATE_REGISTER_LDTINFO),\r
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_ES, EFI_SMM_SAVE_STATE_REGISTER_RIP),\r
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_RFLAGS, EFI_SMM_SAVE_STATE_REGISTER_CR4),\r
+ { (EFI_SMM_SAVE_STATE_REGISTER)0, (EFI_SMM_SAVE_STATE_REGISTER)0, 0 }\r
+};\r
+\r
+///\r
+/// Lookup table used to retrieve the widths and offsets associated with each \r
+/// supported EFI_SMM_SAVE_STATE_REGISTER value \r
+///\r
+static CONST CPU_SMM_SAVE_STATE_LOOKUP_ENTRY mSmmCpuWidthOffset[] = {\r
+ {0, 0, 0, 0, 0, FALSE}, // Reserved\r
+\r
+ //\r
+ // CPU Save State registers defined in PI SMM CPU Protocol.\r
+ //\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._GDTRBase) , SMM_CPU_OFFSET (x64._GDTRBase) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTBASE = 4\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._IDTRBase) , SMM_CPU_OFFSET (x64._IDTRBase) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTBASE = 5\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._LDTRBase) , SMM_CPU_OFFSET (x64._LDTRBase) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTBASE = 6\r
+ {0, 0, 0 , SMM_CPU_OFFSET (x64._GDTRLimit), SMM_CPU_OFFSET (x64._GDTRLimit) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTLIMIT = 7\r
+ {0, 0, 0 , SMM_CPU_OFFSET (x64._IDTRLimit), SMM_CPU_OFFSET (x64._IDTRLimit) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTLIMIT = 8\r
+ {0, 0, 0 , SMM_CPU_OFFSET (x64._LDTRLimit), SMM_CPU_OFFSET (x64._LDTRLimit) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTLIMIT = 9\r
+ {0, 0, 0 , 0 , 0 + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTINFO = 10\r
+\r
+ {4, 4, SMM_CPU_OFFSET (x86._ES) , SMM_CPU_OFFSET (x64._ES) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_ES = 20\r
+ {4, 4, SMM_CPU_OFFSET (x86._CS) , SMM_CPU_OFFSET (x64._CS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CS = 21\r
+ {4, 4, SMM_CPU_OFFSET (x86._SS) , SMM_CPU_OFFSET (x64._SS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_SS = 22\r
+ {4, 4, SMM_CPU_OFFSET (x86._DS) , SMM_CPU_OFFSET (x64._DS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DS = 23\r
+ {4, 4, SMM_CPU_OFFSET (x86._FS) , SMM_CPU_OFFSET (x64._FS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_FS = 24\r
+ {4, 4, SMM_CPU_OFFSET (x86._GS) , SMM_CPU_OFFSET (x64._GS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GS = 25\r
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._LDTR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTR_SEL = 26\r
+ {4, 4, SMM_CPU_OFFSET (x86._TR) , SMM_CPU_OFFSET (x64._TR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_TR_SEL = 27\r
+ {4, 8, SMM_CPU_OFFSET (x86._DR7) , SMM_CPU_OFFSET (x64._DR7) , SMM_CPU_OFFSET (x64._DR7) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR7 = 28\r
+ {4, 8, SMM_CPU_OFFSET (x86._DR6) , SMM_CPU_OFFSET (x64._DR6) , SMM_CPU_OFFSET (x64._DR6) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR6 = 29\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R8) , SMM_CPU_OFFSET (x64._R8) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R8 = 30\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R9) , SMM_CPU_OFFSET (x64._R9) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R9 = 31\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R10) , SMM_CPU_OFFSET (x64._R10) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R10 = 32\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R11) , SMM_CPU_OFFSET (x64._R11) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R11 = 33\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R12) , SMM_CPU_OFFSET (x64._R12) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R12 = 34\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R13) , SMM_CPU_OFFSET (x64._R13) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R13 = 35\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R14) , SMM_CPU_OFFSET (x64._R14) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R14 = 36\r
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R15) , SMM_CPU_OFFSET (x64._R15) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R15 = 37\r
+ {4, 8, SMM_CPU_OFFSET (x86._EAX) , SMM_CPU_OFFSET (x64._RAX) , SMM_CPU_OFFSET (x64._RAX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RAX = 38\r
+ {4, 8, SMM_CPU_OFFSET (x86._EBX) , SMM_CPU_OFFSET (x64._RBX) , SMM_CPU_OFFSET (x64._RBX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBX = 39\r
+ {4, 8, SMM_CPU_OFFSET (x86._ECX) , SMM_CPU_OFFSET (x64._RCX) , SMM_CPU_OFFSET (x64._RCX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RCX = 40\r
+ {4, 8, SMM_CPU_OFFSET (x86._EDX) , SMM_CPU_OFFSET (x64._RDX) , SMM_CPU_OFFSET (x64._RDX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDX = 41\r
+ {4, 8, SMM_CPU_OFFSET (x86._ESP) , SMM_CPU_OFFSET (x64._RSP) , SMM_CPU_OFFSET (x64._RSP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSP = 42\r
+ {4, 8, SMM_CPU_OFFSET (x86._EBP) , SMM_CPU_OFFSET (x64._RBP) , SMM_CPU_OFFSET (x64._RBP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBP = 43\r
+ {4, 8, SMM_CPU_OFFSET (x86._ESI) , SMM_CPU_OFFSET (x64._RSI) , SMM_CPU_OFFSET (x64._RSI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSI = 44\r
+ {4, 8, SMM_CPU_OFFSET (x86._EDI) , SMM_CPU_OFFSET (x64._RDI) , SMM_CPU_OFFSET (x64._RDI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDI = 45\r
+ {4, 8, SMM_CPU_OFFSET (x86._EIP) , SMM_CPU_OFFSET (x64._RIP) , SMM_CPU_OFFSET (x64._RIP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RIP = 46\r
+\r
+ {4, 8, SMM_CPU_OFFSET (x86._EFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RFLAGS = 51\r
+ {4, 8, SMM_CPU_OFFSET (x86._CR0) , SMM_CPU_OFFSET (x64._CR0) , SMM_CPU_OFFSET (x64._CR0) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR0 = 52\r
+ {4, 8, SMM_CPU_OFFSET (x86._CR3) , SMM_CPU_OFFSET (x64._CR3) , SMM_CPU_OFFSET (x64._CR3) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR3 = 53\r
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._CR4) , SMM_CPU_OFFSET (x64._CR4) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR4 = 54\r
+};\r
+\r
+//\r
+// No support for I/O restart\r
+//\r
+\r
+/**\r
+ Read information from the CPU save state.\r
+\r
+ @param Register Specifies the CPU register to read form the save state.\r
+\r
+ @retval 0 Register is not valid\r
+ @retval >0 Index into mSmmCpuWidthOffset[] associated with Register\r
+\r
+**/\r
+static UINTN\r
+GetRegisterIndex (\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN Offset;\r
+\r
+ for (Index = 0, Offset = SMM_SAVE_STATE_REGISTER_FIRST_INDEX; mSmmCpuRegisterRanges[Index].Length != 0; Index++) {\r
+ if (Register >= mSmmCpuRegisterRanges[Index].Start && Register <= mSmmCpuRegisterRanges[Index].End) {\r
+ return Register - mSmmCpuRegisterRanges[Index].Start + Offset;\r
+ }\r
+ Offset += mSmmCpuRegisterRanges[Index].Length;\r
+ }\r
+ return 0;\r
+}\r
+\r
+/**\r
+ Read a CPU Save State register on the target processor.\r
+\r
+ This function abstracts the differences that whether the CPU Save State register is in the \r
+ IA32 CPU Save State Map or X64 CPU Save State Map.\r
+\r
+ This function supports reading a CPU Save State register in SMBase relocation handler.\r
+\r
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.\r
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.\r
+ @param[in] Width The number of bytes to read from the CPU save state.\r
+ @param[out] Buffer Upon return, this holds the CPU register value read from the save state.\r
+\r
+ @retval EFI_SUCCESS The register was read from Save State.\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.\r
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+\r
+**/\r
+static EFI_STATUS\r
+ReadSaveStateRegisterByIndex (\r
+ IN UINTN CpuIndex,\r
+ IN UINTN RegisterIndex,\r
+ IN UINTN Width,\r
+ OUT VOID *Buffer\r
+ )\r
+{\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ //\r
+ // If 32-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write return buffer\r
+ //\r
+ ASSERT(CpuSaveState != NULL);\r
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Width);\r
+ } else {\r
+ //\r
+ // If 64-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write lower 32-bits of return buffer\r
+ //\r
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, MIN(4, Width));\r
+ if (Width >= 4) {\r
+ //\r
+ // Write upper 32-bits of return buffer\r
+ //\r
+ CopyMem((UINT8 *)Buffer + 4, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, Width - 4);\r
+ }\r
}\r
+ return EFI_SUCCESS;\r
}\r
\r
/**\r
OUT VOID *Buffer\r
)\r
{\r
- return EFI_UNSUPPORTED;\r
+ UINTN RegisterIndex;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ //\r
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_LMA\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {\r
+ //\r
+ // Only byte access is supported for this register\r
+ //\r
+ if (Width != 1) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+\r
+ //\r
+ // Check CPU mode\r
+ //\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ *(UINT8 *)Buffer = 32;\r
+ } else {\r
+ *(UINT8 *)Buffer = 64;\r
+ }\r
+\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ //\r
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_IO\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Convert Register to a register lookup table index. Let\r
+ // PiSmmCpuDxeSmm implement other special registers (currently\r
+ // there is only EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID).\r
+ //\r
+ RegisterIndex = GetRegisterIndex (Register);\r
+ if (RegisterIndex == 0) {\r
+ return Register < EFI_SMM_SAVE_STATE_REGISTER_IO ? EFI_NOT_FOUND : EFI_UNSUPPORTED;\r
+ }\r
+\r
+ return ReadSaveStateRegisterByIndex (CpuIndex, RegisterIndex, Width, Buffer);\r
}\r
\r
/**\r
IN CONST VOID *Buffer\r
)\r
{\r
- return EFI_UNSUPPORTED;\r
+ UINTN RegisterIndex;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ //\r
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_LMA are ignored\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ //\r
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_IO are not supported\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Convert Register to a register lookup table index. Let\r
+ // PiSmmCpuDxeSmm implement other special registers (currently\r
+ // there is only EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID).\r
+ //\r
+ RegisterIndex = GetRegisterIndex (Register);\r
+ if (RegisterIndex == 0) {\r
+ return Register < EFI_SMM_SAVE_STATE_REGISTER_IO ? EFI_NOT_FOUND : EFI_UNSUPPORTED;\r
+ }\r
+\r
+ CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+\r
+ //\r
+ // Do not write non-writable SaveState, because it will cause exception.\r
+ // \r
+ if (!mSmmCpuWidthOffset[RegisterIndex].Writeable) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Check CPU mode\r
+ //\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ //\r
+ // If 32-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ //\r
+ // Write SMM State register\r
+ //\r
+ ASSERT (CpuSaveState != NULL);\r
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Buffer, Width);\r
+ } else {\r
+ //\r
+ // If 64-bit mode width is zero, then the specified register can not be accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write lower 32-bits of SMM State register\r
+ //\r
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, Buffer, MIN (4, Width));\r
+ if (Width >= 4) {\r
+ //\r
+ // Write upper 32-bits of SMM State register\r
+ //\r
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, (UINT8 *)Buffer + 4, Width - 4);\r
+ }\r
+ }\r
+ return EFI_SUCCESS;\r
}\r
\r
/**\r