/** @file\r
-The CPU specific programming for PiSmmCpuDxeSmm module.\r
+ The CPU specific programming for PiSmmCpuDxeSmm module.\r
\r
-Copyright (c) 2010 - 2015, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+ Copyright (c) 2010 - 2015, Intel Corporation. All rights reserved.<BR>\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+ This program and the accompanying materials are licensed and made available\r
+ under the terms and conditions of the BSD License which accompanies this\r
+ distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT\r
+ WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
**/\r
\r
-#include <PiSmm.h>\r
-#include <Library/SmmCpuFeaturesLib.h>\r
#include <Library/BaseLib.h>\r
-#include <Library/MtrrLib.h>\r
-#include <Library/PcdLib.h>\r
-#include <Library/MemoryAllocationLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
#include <Library/DebugLib.h>\r
-#include <Register/Cpuid.h>\r
-#include <Register/SmramSaveStateMap.h>\r
-\r
-//\r
-// Machine Specific Registers (MSRs)\r
-//\r
-#define SMM_FEATURES_LIB_IA32_MTRR_CAP 0x0FE\r
-#define SMM_FEATURES_LIB_IA32_FEATURE_CONTROL 0x03A\r
-#define SMM_FEATURES_LIB_IA32_SMRR_PHYSBASE 0x1F2\r
-#define SMM_FEATURES_LIB_IA32_SMRR_PHYSMASK 0x1F3\r
-#define SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE 0x0A0\r
-#define SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSMASK 0x0A1\r
-#define EFI_MSR_SMRR_MASK 0xFFFFF000\r
-#define EFI_MSR_SMRR_PHYS_MASK_VALID BIT11\r
-#define SMM_FEATURES_LIB_SMM_FEATURE_CONTROL 0x4E0\r
-\r
-//\r
-// MSRs required for configuration of SMM Code Access Check\r
-//\r
-#define SMM_FEATURES_LIB_IA32_MCA_CAP 0x17D\r
-#define SMM_CODE_ACCESS_CHK_BIT BIT58\r
-\r
-//\r
-// Set default value to assume SMRR is not supported\r
-//\r
-BOOLEAN mSmrrSupported = FALSE;\r
-\r
-//\r
-// Set default value to assume MSR_SMM_FEATURE_CONTROL is not supported\r
-//\r
-BOOLEAN mSmmFeatureControlSupported = FALSE;\r
-\r
-//\r
-// Set default value to assume IA-32 Architectural MSRs are used\r
-//\r
-UINT32 mSmrrPhysBaseMsr = SMM_FEATURES_LIB_IA32_SMRR_PHYSBASE;\r
-UINT32 mSmrrPhysMaskMsr = SMM_FEATURES_LIB_IA32_SMRR_PHYSMASK;\r
-\r
-//\r
-// Set default value to assume MTRRs need to be configured on each SMI\r
-//\r
-BOOLEAN mNeedConfigureMtrrs = TRUE;\r
+#include <Library/MemEncryptSevLib.h>\r
+#include <Library/SmmCpuFeaturesLib.h>\r
+#include <Library/SmmServicesTableLib.h>\r
+#include <Library/UefiBootServicesTableLib.h>\r
+#include <PiSmm.h>\r
+#include <Register/QemuSmramSaveStateMap.h>\r
\r
//\r
-// Array for state of SMRR enable on all CPUs\r
+// EFER register LMA bit\r
//\r
-BOOLEAN *mSmrrEnabled;\r
+#define LMA BIT10\r
\r
/**\r
The constructor function\r
IN EFI_SYSTEM_TABLE *SystemTable\r
)\r
{\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
- UINTN FamilyId;\r
- UINTN ModelId;\r
-\r
- //\r
- // Retrieve CPU Family and Model\r
- //\r
- AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);\r
- FamilyId = (RegEax >> 8) & 0xf;\r
- ModelId = (RegEax >> 4) & 0xf;\r
- if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
- ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
- }\r
-\r
- //\r
- // Check CPUID(CPUID_VERSION_INFO).EDX[12] for MTRR capability\r
- //\r
- if ((RegEdx & BIT12) != 0) {\r
- //\r
- // Check MTRR_CAP MSR bit 11 for SMRR support\r
- //\r
- if ((AsmReadMsr64 (SMM_FEATURES_LIB_IA32_MTRR_CAP) & BIT11) != 0) {\r
- mSmrrSupported = TRUE;\r
- }\r
- }\r
-\r
- //\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.3 MSRs in the Intel(R) Atom(TM) Processor Family\r
//\r
- // If CPU Family/Model is 06_1CH, 06_26H, 06_27H, 06_35H or 06_36H, then\r
- // SMRR Physical Base and SMM Physical Mask MSRs are not available.\r
+ // No need to program SMRRs on our virtual platform.\r
//\r
- if (FamilyId == 0x06) {\r
- if (ModelId == 0x1C || ModelId == 0x26 || ModelId == 0x27 || ModelId == 0x35 || ModelId == 0x36) {\r
- mSmrrSupported = FALSE;\r
- }\r
- }\r
-\r
- //\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.2 MSRs in the Intel(R) Core(TM) 2 Processor Family\r
- //\r
- // If CPU Family/Model is 06_0F or 06_17, then use Intel(R) Core(TM) 2\r
- // Processor Family MSRs\r
- //\r
- if (FamilyId == 0x06) {\r
- if (ModelId == 0x17 || ModelId == 0x0f) {\r
- mSmrrPhysBaseMsr = SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE;\r
- mSmrrPhysMaskMsr = SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSMASK;\r
- }\r
- }\r
-\r
- //\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 34.4.2 SMRAM Caching\r
- // An IA-32 processor does not automatically write back and invalidate its\r
- // caches before entering SMM or before exiting SMM. Because of this behavior,\r
- // care must be taken in the placement of the SMRAM in system memory and in\r
- // the caching of the SMRAM to prevent cache incoherence when switching back\r
- // and forth between SMM and protected mode operation.\r
- //\r
- // An IA-32 processor is a processor that does not support the Intel 64\r
- // Architecture. Support for the Intel 64 Architecture can be detected from\r
- // CPUID(CPUID_EXTENDED_CPU_SIG).EDX[29]\r
- //\r
- // If an IA-32 processor is detected, then set mNeedConfigureMtrrs to TRUE,\r
- // so caches are flushed on SMI entry and SMI exit, the interrupted code\r
- // MTRRs are saved/restored, and MTRRs for SMM are loaded.\r
- //\r
- AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
- if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
- AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
- if ((RegEdx & BIT29) != 0) {\r
- mNeedConfigureMtrrs = FALSE;\r
- }\r
- }\r
-\r
- //\r
- // Allocate array for state of SMRR enable on all CPUs\r
- //\r
- mSmrrEnabled = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
- ASSERT (mSmrrEnabled != NULL);\r
-\r
return EFI_SUCCESS;\r
}\r
\r
IN CPU_HOT_PLUG_DATA *CpuHotPlugData\r
)\r
{\r
- SMRAM_SAVE_STATE_MAP *CpuState;\r
- UINT64 FeatureControl;\r
- UINT32 RegEax;\r
- UINT32 RegEdx;\r
- UINTN FamilyId;\r
- UINTN ModelId;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuState;\r
\r
//\r
// Configure SMBASE.\r
//\r
- CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
- CpuState->x86.SMBASE = (UINT32)CpuHotPlugData->SmBase[CpuIndex];\r
-\r
- //\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.2 MSRs in the Intel(R) Core(TM) 2 Processor Family\r
- //\r
- // If Intel(R) Core(TM) Core(TM) 2 Processor Family MSRs are being used, then\r
- // make sure SMRR Enable(BIT3) of MSR_FEATURE_CONTROL MSR(0x3A) is set before\r
- // accessing SMRR base/mask MSRs. If Lock(BIT0) of MSR_FEATURE_CONTROL MSR(0x3A)\r
- // is set, then the MSR is locked and can not be modified.\r
- //\r
- if (mSmrrSupported && mSmrrPhysBaseMsr == SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE) {\r
- FeatureControl = AsmReadMsr64 (SMM_FEATURES_LIB_IA32_FEATURE_CONTROL);\r
- if ((FeatureControl & BIT3) == 0) {\r
- if ((FeatureControl & BIT0) == 0) {\r
- AsmWriteMsr64 (SMM_FEATURES_LIB_IA32_FEATURE_CONTROL, FeatureControl | BIT3);\r
- } else {\r
- mSmrrSupported = FALSE;\r
- }\r
- }\r
- }\r
-\r
- //\r
- // If SMRR is supported, then program SMRR base/mask MSRs.\r
- // The EFI_MSR_SMRR_PHYS_MASK_VALID bit is not set until the first normal SMI.\r
- // The code that initializes SMM environment is running in normal mode\r
- // from SMRAM region. If SMRR is enabled here, then the SMRAM region\r
- // is protected and the normal mode code execution will fail.\r
- //\r
- if (mSmrrSupported) {\r
- AsmWriteMsr64 (mSmrrPhysBaseMsr, CpuHotPlugData->SmrrBase | MTRR_CACHE_WRITE_BACK);\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, (~(CpuHotPlugData->SmrrSize - 1) & EFI_MSR_SMRR_MASK));\r
- mSmrrEnabled[CpuIndex] = FALSE;\r
+ CpuState = (QEMU_SMRAM_SAVE_STATE_MAP *)(UINTN)(\r
+ SMM_DEFAULT_SMBASE +\r
+ SMRAM_SAVE_STATE_MAP_OFFSET\r
+ );\r
+ if ((CpuState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ CpuState->x86.SMBASE = (UINT32)CpuHotPlugData->SmBase[CpuIndex];\r
+ } else {\r
+ CpuState->x64.SMBASE = (UINT32)CpuHotPlugData->SmBase[CpuIndex];\r
}\r
\r
//\r
- // Retrieve CPU Family and Model\r
- //\r
- AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);\r
- FamilyId = (RegEax >> 8) & 0xf;\r
- ModelId = (RegEax >> 4) & 0xf;\r
- if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
- ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
- }\r
-\r
+ // No need to program SMRRs on our virtual platform.\r
//\r
- // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
- // Volume 3C, Section 35.10.1 MSRs in 4th Generation Intel(R) Core(TM)\r
- // Processor Family.\r
- //\r
- // If CPU Family/Model is 06_3C, 06_45, or 06_46 then use 4th Generation\r
- // Intel(R) Core(TM) Processor Family MSRs.\r
- //\r
- if (FamilyId == 0x06) {\r
- if (ModelId == 0x3C || ModelId == 0x45 || ModelId == 0x46) {\r
- //\r
- // Check to see if the CPU supports the SMM Code Access Check feature\r
- // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
- //\r
- if ((AsmReadMsr64 (SMM_FEATURES_LIB_IA32_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) != 0) {\r
- mSmmFeatureControlSupported = TRUE;\r
- }\r
- }\r
- }\r
}\r
\r
/**\r
\r
@param[in] CpuIndex The index of the CPU to hook. The value\r
must be between 0 and the NumberOfCpus\r
- field in the System Management System Table\r
- (SMST).\r
+ field in the System Management System\r
+ Table (SMST).\r
@param[in] CpuState Pointer to SMRAM Save State Map for the\r
currently executing CPU.\r
@param[in] NewInstructionPointer32 Instruction pointer to use if resuming to\r
IN UINT64 NewInstructionPointer\r
)\r
{\r
- return 0;\r
+ UINT64 OriginalInstructionPointer;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)CpuState;\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ OriginalInstructionPointer = (UINT64)CpuSaveState->x86._EIP;\r
+ CpuSaveState->x86._EIP = (UINT32)NewInstructionPointer;\r
+ //\r
+ // Clear the auto HALT restart flag so the RSM instruction returns\r
+ // program control to the instruction following the HLT instruction.\r
+ //\r
+ if ((CpuSaveState->x86.AutoHALTRestart & BIT0) != 0) {\r
+ CpuSaveState->x86.AutoHALTRestart &= ~BIT0;\r
+ }\r
+ } else {\r
+ OriginalInstructionPointer = CpuSaveState->x64._RIP;\r
+ if ((CpuSaveState->x64.IA32_EFER & LMA) == 0) {\r
+ CpuSaveState->x64._RIP = (UINT32)NewInstructionPointer32;\r
+ } else {\r
+ CpuSaveState->x64._RIP = (UINT32)NewInstructionPointer;\r
+ }\r
+ //\r
+ // Clear the auto HALT restart flag so the RSM instruction returns\r
+ // program control to the instruction following the HLT instruction.\r
+ //\r
+ if ((CpuSaveState->x64.AutoHALTRestart & BIT0) != 0) {\r
+ CpuSaveState->x64.AutoHALTRestart &= ~BIT0;\r
+ }\r
+ }\r
+ return OriginalInstructionPointer;\r
}\r
\r
/**\r
VOID\r
)\r
{\r
+ EFI_STATUS Status;\r
+ UINTN MapPagesBase;\r
+ UINTN MapPagesCount;\r
+\r
+ if (!MemEncryptSevIsEnabled ()) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Now that SMBASE relocation is complete, re-encrypt the original SMRAM save\r
+ // state map's container pages, and release the pages to DXE. (The pages were\r
+ // allocated in PlatformPei.)\r
+ //\r
+ Status = MemEncryptSevLocateInitialSmramSaveStateMapPages (\r
+ &MapPagesBase,\r
+ &MapPagesCount\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ Status = MemEncryptSevSetPageEncMask (\r
+ 0, // Cr3BaseAddress -- use current CR3\r
+ MapPagesBase, // BaseAddress\r
+ MapPagesCount, // NumPages\r
+ TRUE // Flush\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ DEBUG ((DEBUG_ERROR, "%a: MemEncryptSevSetPageEncMask(): %r\n",\r
+ __FUNCTION__, Status));\r
+ ASSERT (FALSE);\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ ZeroMem ((VOID *)MapPagesBase, EFI_PAGES_TO_SIZE (MapPagesCount));\r
+\r
+ Status = gBS->FreePages (MapPagesBase, MapPagesCount);\r
+ ASSERT_EFI_ERROR (Status);\r
}\r
\r
/**\r
and the default SMI handler must be used.\r
\r
@retval 0 Use the default SMI handler.\r
- @retval > 0 Use the SMI handler installed by SmmCpuFeaturesInstallSmiHandler()\r
- The caller is required to allocate enough SMRAM for each CPU to\r
- support the size of the custom SMI handler.\r
+ @retval > 0 Use the SMI handler installed by\r
+ SmmCpuFeaturesInstallSmiHandler(). The caller is required to\r
+ allocate enough SMRAM for each CPU to support the size of the\r
+ custom SMI handler.\r
**/\r
UINTN\r
EFIAPI\r
}\r
\r
/**\r
- Install a custom SMI handler for the CPU specified by CpuIndex. This function\r
- is only called if SmmCpuFeaturesGetSmiHandlerSize() returns a size is greater\r
- than zero and is called by the CPU that was elected as monarch during System\r
- Management Mode initialization.\r
+ Install a custom SMI handler for the CPU specified by CpuIndex. This\r
+ function is only called if SmmCpuFeaturesGetSmiHandlerSize() returns a size\r
+ is greater than zero and is called by the CPU that was elected as monarch\r
+ during System Management Mode initialization.\r
\r
@param[in] CpuIndex The index of the CPU to install the custom SMI handler.\r
The value must be between 0 and the NumberOfCpus field\r
VOID\r
)\r
{\r
- return mNeedConfigureMtrrs;\r
+ return FALSE;\r
}\r
\r
/**\r
- Disable SMRR register if SMRR is supported and SmmCpuFeaturesNeedConfigureMtrrs()\r
- returns TRUE.\r
+ Disable SMRR register if SMRR is supported and\r
+ SmmCpuFeaturesNeedConfigureMtrrs() returns TRUE.\r
**/\r
VOID\r
EFIAPI\r
VOID\r
)\r
{\r
- if (mSmrrSupported && mNeedConfigureMtrrs) {\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64(mSmrrPhysMaskMsr) & ~EFI_MSR_SMRR_PHYS_MASK_VALID);\r
- }\r
+ //\r
+ // No SMRR support, nothing to do\r
+ //\r
}\r
\r
/**\r
- Enable SMRR register if SMRR is supported and SmmCpuFeaturesNeedConfigureMtrrs()\r
- returns TRUE.\r
+ Enable SMRR register if SMRR is supported and\r
+ SmmCpuFeaturesNeedConfigureMtrrs() returns TRUE.\r
**/\r
VOID\r
EFIAPI\r
VOID\r
)\r
{\r
- if (mSmrrSupported && mNeedConfigureMtrrs) {\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64(mSmrrPhysMaskMsr) | EFI_MSR_SMRR_PHYS_MASK_VALID);\r
- }\r
+ //\r
+ // No SMRR support, nothing to do\r
+ //\r
}\r
\r
/**\r
)\r
{\r
//\r
- // If SMRR is supported and this is the first normal SMI, then enable SMRR\r
+ // No SMRR support, nothing to do\r
//\r
- if (mSmrrSupported && !mSmrrEnabled[CpuIndex]) {\r
- AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64 (mSmrrPhysMaskMsr) | EFI_MSR_SMRR_PHYS_MASK_VALID);\r
- mSmrrEnabled[CpuIndex] = TRUE;\r
- }\r
}\r
\r
/**\r
Processor specific hook point each time a CPU exits System Management Mode.\r
\r
- @param[in] CpuIndex The index of the CPU that is exiting SMM. The value must\r
- be between 0 and the NumberOfCpus field in the System\r
- Management System Table (SMST).\r
+ @param[in] CpuIndex The index of the CPU that is exiting SMM. The value\r
+ must be between 0 and the NumberOfCpus field in the\r
+ System Management System Table (SMST).\r
**/\r
VOID\r
EFIAPI\r
IN SMM_REG_NAME RegName\r
)\r
{\r
- if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {\r
- return TRUE;\r
- }\r
+ ASSERT (RegName == SmmRegFeatureControl);\r
return FALSE;\r
}\r
\r
IN SMM_REG_NAME RegName\r
)\r
{\r
- if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {\r
- return AsmReadMsr64 (SMM_FEATURES_LIB_SMM_FEATURE_CONTROL);\r
- }\r
+ //\r
+ // This is called for SmmRegSmmDelayed, SmmRegSmmBlocked, SmmRegSmmEnable.\r
+ // The last of these should actually be SmmRegSmmDisable, so we can just\r
+ // return FALSE.\r
+ //\r
return 0;\r
}\r
\r
IN UINT64 Value\r
)\r
{\r
- if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {\r
- AsmWriteMsr64 (SMM_FEATURES_LIB_SMM_FEATURE_CONTROL, Value);\r
+ ASSERT (FALSE);\r
+}\r
+\r
+///\r
+/// Macro used to simplify the lookup table entries of type\r
+/// CPU_SMM_SAVE_STATE_LOOKUP_ENTRY\r
+///\r
+#define SMM_CPU_OFFSET(Field) OFFSET_OF (QEMU_SMRAM_SAVE_STATE_MAP, Field)\r
+\r
+///\r
+/// Macro used to simplify the lookup table entries of type\r
+/// CPU_SMM_SAVE_STATE_REGISTER_RANGE\r
+///\r
+#define SMM_REGISTER_RANGE(Start, End) { Start, End, End - Start + 1 }\r
+\r
+///\r
+/// Structure used to describe a range of registers\r
+///\r
+typedef struct {\r
+ EFI_SMM_SAVE_STATE_REGISTER Start;\r
+ EFI_SMM_SAVE_STATE_REGISTER End;\r
+ UINTN Length;\r
+} CPU_SMM_SAVE_STATE_REGISTER_RANGE;\r
+\r
+///\r
+/// Structure used to build a lookup table to retrieve the widths and offsets\r
+/// associated with each supported EFI_SMM_SAVE_STATE_REGISTER value\r
+///\r
+\r
+#define SMM_SAVE_STATE_REGISTER_FIRST_INDEX 1\r
+\r
+typedef struct {\r
+ UINT8 Width32;\r
+ UINT8 Width64;\r
+ UINT16 Offset32;\r
+ UINT16 Offset64Lo;\r
+ UINT16 Offset64Hi;\r
+ BOOLEAN Writeable;\r
+} CPU_SMM_SAVE_STATE_LOOKUP_ENTRY;\r
+\r
+///\r
+/// Table used by GetRegisterIndex() to convert an EFI_SMM_SAVE_STATE_REGISTER\r
+/// value to an index into a table of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY\r
+///\r
+STATIC CONST CPU_SMM_SAVE_STATE_REGISTER_RANGE mSmmCpuRegisterRanges[] = {\r
+ SMM_REGISTER_RANGE (\r
+ EFI_SMM_SAVE_STATE_REGISTER_GDTBASE,\r
+ EFI_SMM_SAVE_STATE_REGISTER_LDTINFO\r
+ ),\r
+ SMM_REGISTER_RANGE (\r
+ EFI_SMM_SAVE_STATE_REGISTER_ES,\r
+ EFI_SMM_SAVE_STATE_REGISTER_RIP\r
+ ),\r
+ SMM_REGISTER_RANGE (\r
+ EFI_SMM_SAVE_STATE_REGISTER_RFLAGS,\r
+ EFI_SMM_SAVE_STATE_REGISTER_CR4\r
+ ),\r
+ { (EFI_SMM_SAVE_STATE_REGISTER)0, (EFI_SMM_SAVE_STATE_REGISTER)0, 0 }\r
+};\r
+\r
+///\r
+/// Lookup table used to retrieve the widths and offsets associated with each\r
+/// supported EFI_SMM_SAVE_STATE_REGISTER value\r
+///\r
+STATIC CONST CPU_SMM_SAVE_STATE_LOOKUP_ENTRY mSmmCpuWidthOffset[] = {\r
+ {\r
+ 0, // Width32\r
+ 0, // Width64\r
+ 0, // Offset32\r
+ 0, // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // Reserved\r
+\r
+ //\r
+ // CPU Save State registers defined in PI SMM CPU Protocol.\r
+ //\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._GDTRBase), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._GDTRBase) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_GDTBASE = 4\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._IDTRBase), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._IDTRBase) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_IDTBASE = 5\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._LDTRBase), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._LDTRBase) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_LDTBASE = 6\r
+\r
+ {\r
+ 0, // Width32\r
+ 0, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._GDTRLimit), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._GDTRLimit) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_GDTLIMIT = 7\r
+\r
+ {\r
+ 0, // Width32\r
+ 0, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._IDTRLimit), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._IDTRLimit) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_IDTLIMIT = 8\r
+\r
+ {\r
+ 0, // Width32\r
+ 0, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._LDTRLimit), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._LDTRLimit) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_LDTLIMIT = 9\r
+\r
+ {\r
+ 0, // Width32\r
+ 0, // Width64\r
+ 0, // Offset32\r
+ 0, // Offset64Lo\r
+ 0 + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_LDTINFO = 10\r
+\r
+ {\r
+ 4, // Width32\r
+ 4, // Width64\r
+ SMM_CPU_OFFSET (x86._ES), // Offset32\r
+ SMM_CPU_OFFSET (x64._ES), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_ES = 20\r
+\r
+ {\r
+ 4, // Width32\r
+ 4, // Width64\r
+ SMM_CPU_OFFSET (x86._CS), // Offset32\r
+ SMM_CPU_OFFSET (x64._CS), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_CS = 21\r
+\r
+ {\r
+ 4, // Width32\r
+ 4, // Width64\r
+ SMM_CPU_OFFSET (x86._SS), // Offset32\r
+ SMM_CPU_OFFSET (x64._SS), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_SS = 22\r
+\r
+ {\r
+ 4, // Width32\r
+ 4, // Width64\r
+ SMM_CPU_OFFSET (x86._DS), // Offset32\r
+ SMM_CPU_OFFSET (x64._DS), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_DS = 23\r
+\r
+ {\r
+ 4, // Width32\r
+ 4, // Width64\r
+ SMM_CPU_OFFSET (x86._FS), // Offset32\r
+ SMM_CPU_OFFSET (x64._FS), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_FS = 24\r
+\r
+ {\r
+ 4, // Width32\r
+ 4, // Width64\r
+ SMM_CPU_OFFSET (x86._GS), // Offset32\r
+ SMM_CPU_OFFSET (x64._GS), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_GS = 25\r
+\r
+ {\r
+ 0, // Width32\r
+ 4, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._LDTR), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_LDTR_SEL = 26\r
+\r
+ {\r
+ 4, // Width32\r
+ 4, // Width64\r
+ SMM_CPU_OFFSET (x86._TR), // Offset32\r
+ SMM_CPU_OFFSET (x64._TR), // Offset64Lo\r
+ 0, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_TR_SEL = 27\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._DR7), // Offset32\r
+ SMM_CPU_OFFSET (x64._DR7), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._DR7) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_DR7 = 28\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._DR6), // Offset32\r
+ SMM_CPU_OFFSET (x64._DR6), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._DR6) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_DR6 = 29\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R8), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R8) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R8 = 30\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R9), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R9) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R9 = 31\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R10), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R10) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R10 = 32\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R11), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R11) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R11 = 33\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R12), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R12) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R12 = 34\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R13), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R13) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R13 = 35\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R14), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R14) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R14 = 36\r
+\r
+ {\r
+ 0, // Width32\r
+ 8, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._R15), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._R15) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_R15 = 37\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._EAX), // Offset32\r
+ SMM_CPU_OFFSET (x64._RAX), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RAX) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RAX = 38\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._EBX), // Offset32\r
+ SMM_CPU_OFFSET (x64._RBX), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RBX) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RBX = 39\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._ECX), // Offset32\r
+ SMM_CPU_OFFSET (x64._RCX), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RCX) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RCX = 40\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._EDX), // Offset32\r
+ SMM_CPU_OFFSET (x64._RDX), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RDX) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RDX = 41\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._ESP), // Offset32\r
+ SMM_CPU_OFFSET (x64._RSP), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RSP) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RSP = 42\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._EBP), // Offset32\r
+ SMM_CPU_OFFSET (x64._RBP), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RBP) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RBP = 43\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._ESI), // Offset32\r
+ SMM_CPU_OFFSET (x64._RSI), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RSI) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RSI = 44\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._EDI), // Offset32\r
+ SMM_CPU_OFFSET (x64._RDI), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RDI) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RDI = 45\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._EIP), // Offset32\r
+ SMM_CPU_OFFSET (x64._RIP), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RIP) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RIP = 46\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._EFLAGS), // Offset32\r
+ SMM_CPU_OFFSET (x64._RFLAGS), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._RFLAGS) + 4, // Offset64Hi\r
+ TRUE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_RFLAGS = 51\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._CR0), // Offset32\r
+ SMM_CPU_OFFSET (x64._CR0), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._CR0) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_CR0 = 52\r
+\r
+ {\r
+ 4, // Width32\r
+ 8, // Width64\r
+ SMM_CPU_OFFSET (x86._CR3), // Offset32\r
+ SMM_CPU_OFFSET (x64._CR3), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._CR3) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_CR3 = 53\r
+\r
+ {\r
+ 0, // Width32\r
+ 4, // Width64\r
+ 0, // Offset32\r
+ SMM_CPU_OFFSET (x64._CR4), // Offset64Lo\r
+ SMM_CPU_OFFSET (x64._CR4) + 4, // Offset64Hi\r
+ FALSE // Writeable\r
+ }, // EFI_SMM_SAVE_STATE_REGISTER_CR4 = 54\r
+};\r
+\r
+//\r
+// No support for I/O restart\r
+//\r
+\r
+/**\r
+ Read information from the CPU save state.\r
+\r
+ @param Register Specifies the CPU register to read form the save state.\r
+\r
+ @retval 0 Register is not valid\r
+ @retval >0 Index into mSmmCpuWidthOffset[] associated with Register\r
+\r
+**/\r
+STATIC\r
+UINTN\r
+GetRegisterIndex (\r
+ IN EFI_SMM_SAVE_STATE_REGISTER Register\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN Offset;\r
+\r
+ for (Index = 0, Offset = SMM_SAVE_STATE_REGISTER_FIRST_INDEX;\r
+ mSmmCpuRegisterRanges[Index].Length != 0;\r
+ Index++) {\r
+ if (Register >= mSmmCpuRegisterRanges[Index].Start &&\r
+ Register <= mSmmCpuRegisterRanges[Index].End) {\r
+ return Register - mSmmCpuRegisterRanges[Index].Start + Offset;\r
+ }\r
+ Offset += mSmmCpuRegisterRanges[Index].Length;\r
+ }\r
+ return 0;\r
+}\r
+\r
+/**\r
+ Read a CPU Save State register on the target processor.\r
+\r
+ This function abstracts the differences that whether the CPU Save State\r
+ register is in the IA32 CPU Save State Map or X64 CPU Save State Map.\r
+\r
+ This function supports reading a CPU Save State register in SMBase relocation\r
+ handler.\r
+\r
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save\r
+ state.\r
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.\r
+ @param[in] Width The number of bytes to read from the CPU save\r
+ state.\r
+ @param[out] Buffer Upon return, this holds the CPU register value\r
+ read from the save state.\r
+\r
+ @retval EFI_SUCCESS The register was read from Save State.\r
+ @retval EFI_NOT_FOUND The register is not defined for the Save State\r
+ of Processor.\r
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
+\r
+**/\r
+STATIC\r
+EFI_STATUS\r
+ReadSaveStateRegisterByIndex (\r
+ IN UINTN CpuIndex,\r
+ IN UINTN RegisterIndex,\r
+ IN UINTN Width,\r
+ OUT VOID *Buffer\r
+ )\r
+{\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ //\r
+ // If 32-bit mode width is zero, then the specified register can not be\r
+ // accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 32-bit mode width, then the specified\r
+ // register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write return buffer\r
+ //\r
+ ASSERT(CpuSaveState != NULL);\r
+ CopyMem (\r
+ Buffer,\r
+ (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32,\r
+ Width\r
+ );\r
+ } else {\r
+ //\r
+ // If 64-bit mode width is zero, then the specified register can not be\r
+ // accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 64-bit mode width, then the specified\r
+ // register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write lower 32-bits of return buffer\r
+ //\r
+ CopyMem (\r
+ Buffer,\r
+ (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo,\r
+ MIN (4, Width)\r
+ );\r
+ if (Width >= 4) {\r
+ //\r
+ // Write upper 32-bits of return buffer\r
+ //\r
+ CopyMem (\r
+ (UINT8 *)Buffer + 4,\r
+ (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi,\r
+ Width - 4\r
+ );\r
+ }\r
}\r
+ return EFI_SUCCESS;\r
}\r
\r
/**\r
\r
@retval EFI_SUCCESS The register was read from Save State.\r
@retval EFI_INVALID_PARAMTER Buffer is NULL.\r
- @retval EFI_UNSUPPORTED This function does not support reading Register.\r
-\r
+ @retval EFI_UNSUPPORTED This function does not support reading\r
+ Register.\r
**/\r
EFI_STATUS\r
EFIAPI\r
OUT VOID *Buffer\r
)\r
{\r
- return EFI_UNSUPPORTED;\r
+ UINTN RegisterIndex;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ //\r
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_LMA\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {\r
+ //\r
+ // Only byte access is supported for this register\r
+ //\r
+ if (Width != 1) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+\r
+ //\r
+ // Check CPU mode\r
+ //\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ *(UINT8 *)Buffer = 32;\r
+ } else {\r
+ *(UINT8 *)Buffer = 64;\r
+ }\r
+\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ //\r
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_IO\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Convert Register to a register lookup table index. Let\r
+ // PiSmmCpuDxeSmm implement other special registers (currently\r
+ // there is only EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID).\r
+ //\r
+ RegisterIndex = GetRegisterIndex (Register);\r
+ if (RegisterIndex == 0) {\r
+ return (Register < EFI_SMM_SAVE_STATE_REGISTER_IO ?\r
+ EFI_NOT_FOUND :\r
+ EFI_UNSUPPORTED);\r
+ }\r
+\r
+ return ReadSaveStateRegisterByIndex (CpuIndex, RegisterIndex, Width, Buffer);\r
}\r
\r
/**\r
\r
@retval EFI_SUCCESS The register was written to Save State.\r
@retval EFI_INVALID_PARAMTER Buffer is NULL.\r
- @retval EFI_UNSUPPORTED This function does not support writing Register.\r
+ @retval EFI_UNSUPPORTED This function does not support writing\r
+ Register.\r
**/\r
EFI_STATUS\r
EFIAPI\r
IN CONST VOID *Buffer\r
)\r
{\r
- return EFI_UNSUPPORTED;\r
+ UINTN RegisterIndex;\r
+ QEMU_SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ //\r
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_LMA are ignored\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {\r
+ return EFI_SUCCESS;\r
+ }\r
+\r
+ //\r
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_IO are not supported\r
+ //\r
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // Convert Register to a register lookup table index. Let\r
+ // PiSmmCpuDxeSmm implement other special registers (currently\r
+ // there is only EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID).\r
+ //\r
+ RegisterIndex = GetRegisterIndex (Register);\r
+ if (RegisterIndex == 0) {\r
+ return (Register < EFI_SMM_SAVE_STATE_REGISTER_IO ?\r
+ EFI_NOT_FOUND :\r
+ EFI_UNSUPPORTED);\r
+ }\r
+\r
+ CpuSaveState = (QEMU_SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+\r
+ //\r
+ // Do not write non-writable SaveState, because it will cause exception.\r
+ //\r
+ if (!mSmmCpuWidthOffset[RegisterIndex].Writeable) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Check CPU mode\r
+ //\r
+ if ((CpuSaveState->x86.SMMRevId & 0xFFFF) == 0) {\r
+ //\r
+ // If 32-bit mode width is zero, then the specified register can not be\r
+ // accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 32-bit mode width, then the specified\r
+ // register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ //\r
+ // Write SMM State register\r
+ //\r
+ ASSERT (CpuSaveState != NULL);\r
+ CopyMem (\r
+ (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32,\r
+ Buffer,\r
+ Width\r
+ );\r
+ } else {\r
+ //\r
+ // If 64-bit mode width is zero, then the specified register can not be\r
+ // accessed\r
+ //\r
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {\r
+ return EFI_NOT_FOUND;\r
+ }\r
+\r
+ //\r
+ // If Width is bigger than the 64-bit mode width, then the specified\r
+ // register can not be accessed\r
+ //\r
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // Write lower 32-bits of SMM State register\r
+ //\r
+ CopyMem (\r
+ (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo,\r
+ Buffer,\r
+ MIN (4, Width)\r
+ );\r
+ if (Width >= 4) {\r
+ //\r
+ // Write upper 32-bits of SMM State register\r
+ //\r
+ CopyMem (\r
+ (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi,\r
+ (UINT8 *)Buffer + 4,\r
+ Width - 4\r
+ );\r
+ }\r
+ }\r
+ return EFI_SUCCESS;\r
}\r
\r
/**\r
}\r
\r
/**\r
- This API provides a method for a CPU to allocate a specific region for storing page tables.\r
+ This API provides a method for a CPU to allocate a specific region for\r
+ storing page tables.\r
\r
This API can be called more once to allocate memory for page tables.\r
\r
- Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
- allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
- is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
- returned.\r
+ Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns\r
+ a pointer to the allocated buffer. The buffer returned is aligned on a 4KB\r
+ boundary. If Pages is 0, then NULL is returned. If there is not enough\r
+ memory remaining to satisfy the request, then NULL is returned.\r
\r
- This function can also return NULL if there is no preference on where the page tables are allocated in SMRAM.\r
+ This function can also return NULL if there is no preference on where the\r
+ page tables are allocated in SMRAM.\r
\r
@param Pages The number of 4 KB pages to allocate.\r
\r
@return A pointer to the allocated buffer for page tables.\r
@retval NULL Fail to allocate a specific region for storing page tables,\r
- Or there is no preference on where the page tables are allocated in SMRAM.\r
+ Or there is no preference on where the page tables are\r
+ allocated in SMRAM.\r
\r
**/\r
VOID *\r