/** @file\r
SMM MP service implementation\r
\r
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
This program and the accompanying materials\r
are licensed and made available under the terms and conditions of the BSD License\r
which accompanies this distribution. The full text of the license may be found at\r
UINT64 gPhyMask;\r
SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
UINTN mSmmMpSyncDataSize;\r
+SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
+UINTN mSemaphoreSize;\r
+SPIN_LOCK *mPFLock = NULL;\r
\r
/**\r
Performs an atomic compare exchange operation to get semaphore.\r
SMM_CPU_DATA_BLOCK *CpuData;\r
EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
- if (mSmmMpSyncData->Counter == mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
return TRUE;\r
}\r
\r
UINT64 Timer;\r
UINTN Index;\r
\r
- ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
\r
//\r
// Platform implementor should choose a timeout value appropriately:\r
// - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
// In both cases, adding SMI-disabling checking code increases overhead.\r
//\r
- if (mSmmMpSyncData->Counter < mNumberOfCpus) {\r
+ if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
//\r
// Send SMI IPIs to bring outside processors in\r
//\r
//\r
// Flag BSP's presence\r
//\r
- mSmmMpSyncData->InsideSmm = TRUE;\r
+ *mSmmMpSyncData->InsideSmm = TRUE;\r
\r
//\r
// Initialize Debug Agent to start source level debug in BSP handler\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
\r
//\r
// Wait for all APs to get ready for programming MTRRs\r
AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
\r
//\r
- // Restore SMM Configuration in S3 boot path.\r
+ // Perform the pre tasks\r
//\r
- if (mRestoreSmmConfigurationInS3) {\r
- //\r
- // Configure SMM Code Access Check feature if available.\r
- //\r
- ConfigSmmCodeAccessCheck ();\r
- mRestoreSmmConfigurationInS3 = FALSE;\r
- }\r
+ PerformPreTasks ();\r
\r
//\r
// Invoke SMM Foundation EntryPoint with the processor information context.\r
//\r
// Lock the counter down and retrieve the number of APs\r
//\r
- mSmmMpSyncData->AllCpusInSync = TRUE;\r
- ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
+ *mSmmMpSyncData->AllCpusInSync = TRUE;\r
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
//\r
// Make sure all APs have their Present flag set\r
//\r
//\r
// Notify all APs to exit\r
//\r
- mSmmMpSyncData->InsideSmm = FALSE;\r
+ *mSmmMpSyncData->InsideSmm = FALSE;\r
ReleaseAllAPs ();\r
\r
//\r
//\r
// Allow APs to check in from this point on\r
//\r
- mSmmMpSyncData->Counter = 0;\r
- mSmmMpSyncData->AllCpusInSync = FALSE;\r
+ *mSmmMpSyncData->Counter = 0;\r
+ *mSmmMpSyncData->AllCpusInSync = FALSE;\r
}\r
\r
/**\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
+ !(*mSmmMpSyncData->InsideSmm);\r
) {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// BSP timeout in the first round\r
//\r
//\r
for (Timer = StartSyncTimer ();\r
!IsSyncTimerTimeout (Timer) &&\r
- !mSmmMpSyncData->InsideSmm;\r
+ !(*mSmmMpSyncData->InsideSmm);\r
) {\r
CpuPause ();\r
}\r
\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
//\r
// Give up since BSP is unable to enter SMM\r
// and signal the completion of this AP\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
} else {\r
//\r
// Don't know BSP index. Give up without sending IPI to BSP.\r
//\r
- WaitForSemaphore (&mSmmMpSyncData->Counter);\r
+ WaitForSemaphore (mSmmMpSyncData->Counter);\r
return;\r
}\r
}\r
//\r
// Check if BSP wants to exit SMM\r
//\r
- if (!mSmmMpSyncData->InsideSmm) {\r
+ if (!(*mSmmMpSyncData->InsideSmm)) {\r
break;\r
}\r
\r
Create 4G PageTable in SMRAM.\r
\r
@param ExtraPages Additional page numbers besides for 4G memory\r
+ @param Is32BitPageTable Whether the page table is 32-bit PAE\r
@return PageTable Address\r
\r
**/\r
UINT32\r
Gen4GPageTable (\r
- IN UINTN ExtraPages\r
+ IN UINTN ExtraPages,\r
+ IN BOOLEAN Is32BitPageTable\r
)\r
{\r
VOID *PageTable;\r
//\r
// Allocate the page table\r
//\r
- PageTable = AllocatePages (ExtraPages + 5 + PagesNeeded);\r
+ PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);\r
ASSERT (PageTable != NULL);\r
\r
PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r
// Set Page Directory Pointers\r
//\r
for (Index = 0; Index < 4; Index++) {\r
- Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + IA32_PG_P;\r
+ Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
}\r
Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
\r
// Fill in Page Directory Entries\r
//\r
for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
- Pte[Index] = (Index << 21) + IA32_PG_PS + IA32_PG_RW + IA32_PG_P;\r
+ Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
Pdpte = (UINT64*)PageTable;\r
for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
- Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages + IA32_PG_RW + IA32_PG_P;\r
+ Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
//\r
// Fill in Page Table Entries\r
//\r
GuardPage = 0;\r
}\r
} else {\r
- Pte[Index] = PageAddress + IA32_PG_RW + IA32_PG_P;\r
+ Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
}\r
PageAddress+= EFI_PAGE_SIZE;\r
}\r
//\r
// Allocate a page from SMRAM\r
//\r
- NewPageTableAddress = AllocatePages (1);\r
+ NewPageTableAddress = AllocatePageTableMemory (1);\r
ASSERT (NewPageTableAddress != NULL);\r
\r
NewPageTable = (UINT64 *)NewPageTableAddress;\r
NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
}\r
\r
- PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P;\r
+ PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
return EFI_SUCCESS;\r
}\r
\r
+/**\r
+ This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
+ They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
+\r
+ NOTE: It might not be appreciated in runtime since it might\r
+ conflict with OS debugging facilities. Turn them off in RELEASE.\r
+\r
+ @param CpuIndex CPU Index\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+CpuSmmDebugEntry (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+ \r
+ if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
+ AsmWriteDr6 (CpuSaveState->x86._DR6);\r
+ AsmWriteDr7 (CpuSaveState->x86._DR7);\r
+ } else {\r
+ AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
+ AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ This function restores DR6 & DR7 to SMM save state.\r
+\r
+ NOTE: It might not be appreciated in runtime since it might\r
+ conflict with OS debugging facilities. Turn them off in RELEASE.\r
+\r
+ @param CpuIndex CPU Index\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+CpuSmmDebugExit (\r
+ IN UINTN CpuIndex\r
+ )\r
+{\r
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
+ CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
+ CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
+ } else {\r
+ CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
+ CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
+ }\r
+ }\r
+}\r
+\r
/**\r
C function for SMI entry, each processor comes here upon SMI trigger.\r
\r
IN UINTN CpuIndex\r
)\r
{\r
- EFI_STATUS Status;\r
- BOOLEAN ValidSmi;\r
- BOOLEAN IsBsp;\r
- BOOLEAN BspInProgress;\r
- UINTN Index;\r
- UINTN Cr2;\r
+ EFI_STATUS Status;\r
+ BOOLEAN ValidSmi;\r
+ BOOLEAN IsBsp;\r
+ BOOLEAN BspInProgress;\r
+ UINTN Index;\r
+ UINTN Cr2;\r
+ BOOLEAN XdDisableFlag;\r
+ MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;\r
\r
//\r
// Save Cr2 because Page Fault exception in SMM may override its value\r
// Determine if BSP has been already in progress. Note this must be checked after\r
// ValidSmi because BSP may clear a valid SMI source after checking in.\r
//\r
- BspInProgress = mSmmMpSyncData->InsideSmm;\r
+ BspInProgress = *mSmmMpSyncData->InsideSmm;\r
\r
if (!BspInProgress && !ValidSmi) {\r
//\r
//\r
// Signal presence of this processor\r
//\r
- if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {\r
+ if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
//\r
// BSP has already ended the synchronization, so QUIT!!!\r
//\r
//\r
// Wait for BSP's signal to finish SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
}\r
goto Exit;\r
}\r
\r
//\r
- // Try to enable NX\r
+ // Try to enable XD\r
//\r
+ XdDisableFlag = FALSE;\r
if (mXdSupported) {\r
+ MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
+ if (MiscEnableMsr.Bits.XD == 1) {\r
+ XdDisableFlag = TRUE;\r
+ MiscEnableMsr.Bits.XD = 0;\r
+ AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r
+ }\r
ActivateXd ();\r
}\r
\r
// BSP Handler is always called with a ValidSmi == TRUE\r
//\r
BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
-\r
} else {\r
APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
}\r
//\r
// Wait for BSP's signal to exit SMI\r
//\r
- while (mSmmMpSyncData->AllCpusInSync) {\r
+ while (*mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
+ }\r
+\r
+ //\r
+ // Restore XD\r
+ //\r
+ if (XdDisableFlag) {\r
+ MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
+ MiscEnableMsr.Bits.XD = 1;\r
+ AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r
}\r
}\r
\r
AsmWriteCr2 (Cr2);\r
}\r
\r
+/**\r
+ Allocate buffer for all semaphores and spin locks.\r
+\r
+**/\r
+VOID\r
+InitializeSmmCpuSemaphores (\r
+ VOID\r
+ )\r
+{\r
+ UINTN ProcessorCount;\r
+ UINTN TotalSize;\r
+ UINTN GlobalSemaphoresSize;\r
+ UINTN SemaphoreSize;\r
+ UINTN Pages;\r
+ UINTN *SemaphoreBlock;\r
+ UINTN SemaphoreAddr;\r
+\r
+ SemaphoreSize = GetSpinLockProperties ();\r
+ ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
+ TotalSize = GlobalSemaphoresSize;\r
+ DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
+ DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
+ Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
+ SemaphoreBlock = AllocatePages (Pages);\r
+ ASSERT (SemaphoreBlock != NULL);\r
+ ZeroMem (SemaphoreBlock, TotalSize);\r
+\r
+ SemaphoreAddr = (UINTN)SemaphoreBlock;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
+ SemaphoreAddr += SemaphoreSize;\r
+ mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
+ = (SPIN_LOCK *)SemaphoreAddr;\r
+\r
+ mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
+ mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
+ mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
+ mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
+ mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
+\r
+ mSemaphoreSize = SemaphoreSize;\r
+}\r
\r
/**\r
Initialize un-cacheable data.\r
mSmmMpSyncData->BspIndex = (UINT32)-1;\r
}\r
mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
+\r
+ InitializeSmmCpuSemaphores ();\r
}\r
}\r
\r
UINTN Index;\r
MTRR_SETTINGS *Mtrr;\r
PROCESSOR_SMM_DESCRIPTOR *Psd;\r
- UINTN GdtTssTableSize;\r
UINT8 *GdtTssTables;\r
- IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;\r
- UINTN TssBase;\r
UINTN GdtTableStepSize;\r
\r
+ //\r
+ // Initialize mSmmMpSyncData\r
+ //\r
+ mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
+ (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
+ mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
+ ASSERT (mSmmMpSyncData != NULL);\r
+ InitializeMpSyncData ();\r
+\r
//\r
// Initialize physical address mask\r
// NOTE: Physical memory above virtual address limit is not supported !!!\r
//\r
Cr3 = SmmInitPageTable ();\r
\r
- GdtTssTables = NULL;\r
- GdtTssTableSize = 0;\r
- GdtTableStepSize = 0;\r
- //\r
- // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
- // on each SMI entry.\r
- //\r
- if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) {\r
- GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned\r
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
- ASSERT (GdtTssTables != NULL);\r
- GdtTableStepSize = GdtTssTableSize;\r
-\r
- for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
- CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);\r
- if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
- //\r
- // Setup top of known good stack as IST1 for each processor.\r
- //\r
- *(UINTN *)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1 + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);\r
- }\r
- }\r
- } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
-\r
- //\r
- // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.\r
- // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
- // on each SMI entry.\r
- //\r
-\r
- //\r
- // Enlarge GDT to contain 2 TSS descriptors\r
- //\r
- gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));\r
-\r
- GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned\r
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
- ASSERT (GdtTssTables != NULL);\r
- GdtTableStepSize = GdtTssTableSize;\r
-\r
- for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
- CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);\r
- //\r
- // Fixup TSS descriptors\r
- //\r
- TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);\r
- GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;\r
- GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
- GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
- GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
-\r
- TssBase += TSS_SIZE;\r
- GdtDescriptor++;\r
- GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
- GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
- GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
- //\r
- // Fixup TSS segments\r
- //\r
- // ESP as known good stack\r
- //\r
- *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;\r
- *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;\r
- }\r
- }\r
+ GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
\r
//\r
// Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
- if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED (EFI_IMAGE_MACHINE_X64)) {\r
- //\r
- // For X64 SMM, set GDT to the copy allocated above.\r
- //\r
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
- } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
- //\r
- // For IA32 SMM, if SMM Stack Guard feature is enabled, set GDT to the copy allocated above.\r
- //\r
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
- Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
- }\r
+ Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
+ Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
\r
//\r
// Install SMI handler\r
);\r
}\r
\r
- //\r
- // Initialize mSmmMpSyncData\r
- //\r
- mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
- (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
- mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
- ASSERT (mSmmMpSyncData != NULL);\r
- InitializeMpSyncData ();\r
-\r
//\r
// Record current MTRR settings\r
//\r