REGISTER_TYPE in UefiCpuPkg/Include/AcpiCpuData.h defines a MemoryMapped
enum value. However support for the MemoryMapped enum is missing from
the implementation of SetProcessorRegister(). This patch adds support
for MemoryMapped type SetProcessorRegister().
One spin lock is added to avoid potential conflict when multiple processor
update the same memory space.
Cc: Michael Kinney <michael.d.kinney@intel.com>
Cc: Feng Tian <feng.tian@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Jeff Fan <jeff.fan@intel.com>
Reviewed-by: Feng Tian <feng.tian@intel.com>
Reviewed-by: Michael Kinney <michael.d.kinney@intel.com>
Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
UINTN LongJumpOffset;\r
} MP_ASSEMBLY_ADDRESS_MAP;\r
\r
UINTN LongJumpOffset;\r
} MP_ASSEMBLY_ADDRESS_MAP;\r
\r
+//\r
+// Spin lock used to serialize MemoryMapped operation\r
+//\r
+SPIN_LOCK *mMemoryMappedLock = NULL;\r
+\r
/**\r
Get starting address and size of the rendezvous entry for APs.\r
Information for fixing a jump instruction in the code is also returned.\r
/**\r
Get starting address and size of the rendezvous entry for APs.\r
Information for fixing a jump instruction in the code is also returned.\r
+ // MemoryMapped operations\r
+ //\r
+ case MemoryMapped:\r
+ AcquireSpinLock (mMemoryMappedLock);\r
+ MmioBitFieldWrite32 (\r
+ RegisterTableEntry->Index,\r
+ RegisterTableEntry->ValidBitStart,\r
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
+ (UINT32)RegisterTableEntry->Value\r
+ );\r
+ ReleaseSpinLock (mMemoryMappedLock);\r
+ break;\r
+ //\r
// Enable or disable cache\r
//\r
case CacheControl:\r
// Enable or disable cache\r
//\r
case CacheControl:\r
SemaphoreAddr += SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
= (SPIN_LOCK *)SemaphoreAddr;\r
SemaphoreAddr += SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
= (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
+ = (SPIN_LOCK *)SemaphoreAddr;\r
+\r
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
\r
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
\r
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
+ mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
\r
mSemaphoreSize = SemaphoreSize;\r
}\r
\r
mSemaphoreSize = SemaphoreSize;\r
}\r
\r
DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
\r
\r
DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
\r
+ InitializeSpinLock (mMemoryMappedLock);\r
+\r
//\r
// See if there is enough context to resume PEI Phase\r
//\r
//\r
// See if there is enough context to resume PEI Phase\r
//\r
volatile BOOLEAN *AllCpusInSync;\r
SPIN_LOCK *PFLock;\r
SPIN_LOCK *CodeAccessCheckLock;\r
volatile BOOLEAN *AllCpusInSync;\r
SPIN_LOCK *PFLock;\r
SPIN_LOCK *CodeAccessCheckLock;\r
+ SPIN_LOCK *MemoryMappedLock;\r
} SMM_CPU_SEMAPHORE_GLOBAL;\r
\r
///\r
} SMM_CPU_SEMAPHORE_GLOBAL;\r
\r
///\r
extern UINTN mSemaphoreSize;\r
extern SPIN_LOCK *mPFLock;\r
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;\r
extern UINTN mSemaphoreSize;\r
extern SPIN_LOCK *mPFLock;\r
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;\r
+extern SPIN_LOCK *mMemoryMappedLock;\r
\r
/**\r
Create 4G PageTable in SMRAM.\r
\r
/**\r
Create 4G PageTable in SMRAM.\r