/** @file\r
SMM MP service implementation\r
\r
-Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
UINTN mSemaphoreSize;\r
SPIN_LOCK *mPFLock = NULL;\r
SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
+BOOLEAN mMachineCheckSupported = FALSE;\r
\r
/**\r
Performs an atomic compare exchange operation to get semaphore.\r
return TRUE;\r
}\r
\r
+/**\r
+ Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
+\r
+ @retval TRUE Os enable lmce.\r
+ @retval FALSE Os not enable lmce.\r
+\r
+**/\r
+BOOLEAN\r
+IsLmceOsEnabled (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_MCG_CAP_REGISTER McgCap;\r
+ MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
+ MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
+\r
+ McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
+ if (McgCap.Bits.MCG_LMCE_P == 0) {\r
+ return FALSE;\r
+ }\r
+\r
+ FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
+ if (FeatureCtrl.Bits.LmceOn == 0) {\r
+ return FALSE;\r
+ }\r
+\r
+ McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
+ return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
+}\r
+\r
+/**\r
+ Return if Local machine check exception signaled.\r
+\r
+ Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
+ delivered to only the logical processor.\r
+\r
+ @retval TRUE LMCE was signaled.\r
+ @retval FALSE LMCE was not signaled.\r
+\r
+**/\r
+BOOLEAN\r
+IsLmceSignaled (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
+\r
+ McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
+ return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
+}\r
\r
/**\r
Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
{\r
UINT64 Timer;\r
UINTN Index;\r
+ BOOLEAN LmceEn;\r
+ BOOLEAN LmceSignal;\r
\r
ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
+ LmceEn = FALSE;\r
+ LmceSignal = FALSE;\r
+ if (mMachineCheckSupported) {\r
+ LmceEn = IsLmceOsEnabled ();\r
+ LmceSignal = IsLmceSignaled();\r
+ }\r
+\r
//\r
// Platform implementor should choose a timeout value appropriately:\r
// - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
// Sync with APs 1st timeout\r
//\r
for (Timer = StartSyncTimer ();\r
- !IsSyncTimerTimeout (Timer) &&\r
+ !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
!AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
) {\r
CpuPause ();\r
//\r
// The BUSY lock is initialized to Acquired state\r
//\r
- AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
\r
//\r
// Perform the pre tasks\r
// Set Page Directory Pointers\r
//\r
for (Index = 0; Index < 4; Index++) {\r
- Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
+ Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
+ (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
}\r
Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
\r
// Fill in Page Directory Entries\r
//\r
for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
- Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
+ Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
+ Pdpte = (UINT64*)PageTable;\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
- Pdpte = (UINT64*)PageTable;\r
for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
- Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
- Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
+ Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
//\r
// Fill in Page Table Entries\r
//\r
//\r
// Mark the guard page as non-present\r
//\r
- Pte[Index] = PageAddress;\r
+ Pte[Index] = PageAddress | mAddressEncMask;\r
GuardPage += mSmmStackSize;\r
if (GuardPage > mSmmStackArrayEnd) {\r
GuardPage = 0;\r
}\r
} else {\r
- Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
+ Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
}\r
PageAddress+= EFI_PAGE_SIZE;\r
}\r
}\r
}\r
\r
- return (UINT32)(UINTN)PageTable;\r
-}\r
-\r
-/**\r
- Set memory cache ability.\r
-\r
- @param PageTable PageTable Address\r
- @param Address Memory Address to change cache ability\r
- @param Cacheability Cache ability to set\r
-\r
-**/\r
-VOID\r
-SetCacheability (\r
- IN UINT64 *PageTable,\r
- IN UINTN Address,\r
- IN UINT8 Cacheability\r
- )\r
-{\r
- UINTN PTIndex;\r
- VOID *NewPageTableAddress;\r
- UINT64 *NewPageTable;\r
- UINTN Index;\r
-\r
- ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
-\r
- if (sizeof (UINTN) == sizeof (UINT64)) {\r
- PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
- }\r
-\r
- PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
-\r
- //\r
- // A perfect implementation should check the original cacheability with the\r
- // one being set, and break a 2M page entry into pieces only when they\r
- // disagreed.\r
- //\r
- PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
- if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
- //\r
- // Allocate a page from SMRAM\r
- //\r
- NewPageTableAddress = AllocatePageTableMemory (1);\r
- ASSERT (NewPageTableAddress != NULL);\r
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
+ Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ if ((Pte[0] & IA32_PG_PS) == 0) {\r
+ // 4K-page entries are already mapped. Just hide the first one anyway.\r
+ Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
+ Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
+ } else {\r
+ // Create 4K-page entries\r
+ Pages = (UINTN)AllocatePageTableMemory (1);\r
+ ASSERT (Pages != 0);\r
\r
- NewPageTable = (UINT64 *)NewPageTableAddress;\r
+ Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
\r
- for (Index = 0; Index < 0x200; Index++) {\r
- NewPageTable[Index] = PageTable[PTIndex];\r
- if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
- NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
- NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
+ Pte = (UINT64*)Pages;\r
+ PageAddress = 0;\r
+ Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
+ for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
+ PageAddress += EFI_PAGE_SIZE;\r
+ Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
}\r
- NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
}\r
-\r
- PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
-\r
- PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
- ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
- PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
- PageTable[PTIndex] |= (UINT64)Cacheability;\r
+ return (UINT32)(UINTN)PageTable;\r
}\r
\r
/**\r
\r
@param[in] Procedure The address of the procedure to run\r
@param[in] CpuIndex Target CPU Index\r
- @param[in, OUT] ProcArguments The parameter to pass to the procedure\r
+ @param[in, out] ProcArguments The parameter to pass to the procedure\r
@param[in] BlockingMode Startup AP in blocking mode or not\r
\r
@retval EFI_INVALID_PARAMETER CpuNumber not valid\r
DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
return EFI_INVALID_PARAMETER;\r
}\r
+ if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
)\r
{\r
SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
- \r
+\r
if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
ASSERT(CpuIndex < mMaxNumberOfCpus);\r
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
ASSERT(CpuIndex < mMaxNumberOfCpus);\r
\r
//\r
- // Save Cr2 because Page Fault exception in SMM may override its value\r
+ // Save Cr2 because Page Fault exception in SMM may override its value,\r
+ // when using on-demand paging for above 4G memory.\r
//\r
- Cr2 = AsmReadCr2 ();\r
+ Cr2 = 0;\r
+ SaveCr2 (&Cr2);\r
\r
//\r
// Perform CPU specific entry hooks\r
\r
Exit:\r
SmmCpuFeaturesRendezvousExit (CpuIndex);\r
+\r
//\r
// Restore Cr2\r
//\r
- AsmWriteCr2 (Cr2);\r
+ RestoreCr2 (Cr2);\r
}\r
\r
/**\r
UINTN TotalSize;\r
UINTN GlobalSemaphoresSize;\r
UINTN CpuSemaphoresSize;\r
- UINTN MsrSemahporeSize;\r
UINTN SemaphoreSize;\r
UINTN Pages;\r
UINTN *SemaphoreBlock;\r
ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
- MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r
- TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r
+ TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
= (SPIN_LOCK *)SemaphoreAddr;\r
SemaphoreAddr += SemaphoreSize;\r
- mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
- = (SPIN_LOCK *)SemaphoreAddr;\r
\r
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
\r
- SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r
- mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r
- mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r
- ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r
- ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r
-\r
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
- mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
\r
mSemaphoreSize = SemaphoreSize;\r
}\r
/**\r
Initialize global data for MP synchronization.\r
\r
- @param Stacks Base address of SMI stack buffer for all processors.\r
- @param StackSize Stack size for each processor in SMM.\r
+ @param Stacks Base address of SMI stack buffer for all processors.\r
+ @param StackSize Stack size for each processor in SMM.\r
+ @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
\r
**/\r
UINT32\r
InitializeMpServiceData (\r
IN VOID *Stacks,\r
- IN UINTN StackSize\r
+ IN UINTN StackSize,\r
+ IN UINTN ShadowStackSize\r
)\r
{\r
UINT32 Cr3;\r
UINTN Index;\r
- PROCESSOR_SMM_DESCRIPTOR *Psd;\r
UINT8 *GdtTssTables;\r
UINTN GdtTableStepSize;\r
+ CPUID_VERSION_INFO_EDX RegEdx;\r
+\r
+ //\r
+ // Determine if this CPU supports machine check\r
+ //\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
+ mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
\r
//\r
// Allocate memory for all locks and semaphores\r
GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
\r
//\r
- // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
+ // Install SMI handler for each CPU\r
//\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
- Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
- CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
- Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
-\r
- //\r
- // Install SMI handler\r
- //\r
InstallSmiHandler (\r
Index,\r
(UINT32)mCpuHotPlugData.SmBase[Index],\r
- (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
+ (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
StackSize,\r
- (UINTN)Psd->SmmGdtPtr,\r
- Psd->SmmGdtSize,\r
+ (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
+ gcSmiGdtr.Limit + 1,\r
gcSmiIdtr.Base,\r
gcSmiIdtr.Limit + 1,\r
Cr3\r