X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=UefiCpuPkg%2FPiSmmCpuDxeSmm%2FPiSmmCpuDxeSmm.c;h=670a5cf663f8c573a65aa061caaf7176743a9d7f;hb=b095a5403b1dd2ad7af153069f3729716a864e80;hp=e210c8d44659357e02e6989021148e57842e287c;hpb=9838b0161d3d1dfc9903f24ea5dc993dd0792325;p=mirror_edk2.git diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c index e210c8d446..670a5cf663 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c @@ -76,13 +76,6 @@ EFI_SMM_CPU_PROTOCOL mSmmCpu = { EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER]; -/// -/// SMM CPU Save State Protocol instance -/// -EFI_SMM_CPU_SAVE_STATE_PROTOCOL mSmmCpuSaveState = { - NULL -}; - // // SMM stack information // @@ -530,18 +523,13 @@ SmmRestoreCpu ( } // - // Do below CPU things for native platform only + // Skip initialization if mAcpiCpuData is not valid // - if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) { + if (mAcpiCpuData.NumberOfCpus > 0) { // - // Skip initialization if mAcpiCpuData is not valid + // First time microcode load and restore MTRRs // - if (mAcpiCpuData.NumberOfCpus > 0) { - // - // First time microcode load and restore MTRRs - // - EarlyInitializeCpu (); - } + EarlyInitializeCpu (); } // @@ -550,18 +538,13 @@ SmmRestoreCpu ( SmmRelocateBases (); // - // Do below CPU things for native platform only + // Skip initialization if mAcpiCpuData is not valid // - if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) { + if (mAcpiCpuData.NumberOfCpus > 0) { // - // Skip initialization if mAcpiCpuData is not valid + // Restore MSRs for BSP and all APs // - if (mAcpiCpuData.NumberOfCpus > 0) { - // - // Restore MSRs for BSP and all APs - // - InitializeCpu (); - } + InitializeCpu (); } // @@ -686,13 +669,6 @@ SmmReadyToLockEventNotify ( // mAcpiCpuData.NumberOfCpus = 0; - // - // If FrameworkCompatibilitySspport is enabled, then do not copy CPU S3 Data into SMRAM - // - if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) { - goto Done; - } - // // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM // @@ -784,6 +760,9 @@ PiCpuSmmEntry ( UINTN NumberOfEnabledProcessors; UINTN Index; VOID *Buffer; + UINTN BufferPages; + UINTN TileCodeSize; + UINTN TileDataSize; UINTN TileSize; VOID *GuidHob; EFI_SMRAM_DESCRIPTOR *SmramDescriptor; @@ -961,9 +940,13 @@ PiCpuSmmEntry ( // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size // is rounded up to nearest power of 2. // - TileSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR) + GetSmiHandlerSize () - 1; + TileCodeSize = GetSmiHandlerSize (); + TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB); + TileDataSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR); + TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB); + TileSize = TileDataSize + TileCodeSize - 1; TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize); - DEBUG ((EFI_D_INFO, "SMRAM TileSize = %08x\n", TileSize)); + DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize)); // // If the TileSize is larger than space available for the SMI Handler of CPU[i], @@ -985,12 +968,14 @@ PiCpuSmmEntry ( // Intel486 processors: FamilyId is 4 // Pentium processors : FamilyId is 5 // + BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)); if ((FamilyId == 4) || (FamilyId == 5)) { - Buffer = AllocateAlignedPages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)), SIZE_32KB); + Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB); } else { - Buffer = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1))); + Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB); } ASSERT (Buffer != NULL); + DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages))); // // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA. @@ -1009,7 +994,6 @@ PiCpuSmmEntry ( mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize; mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState; - mSmmCpuSaveState.CpuSaveState = (EFI_SMM_CPU_STATE **)gSmmCpuPrivate->CpuSaveState; // // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA. @@ -1150,25 +1134,6 @@ PiCpuSmmEntry ( Status = InitializeSmmCpuServices (mSmmCpuHandle); ASSERT_EFI_ERROR (Status); - if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) { - // - // Install Framework SMM Save State Protocol into UEFI protocol database for backward compatibility - // - Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces ( - &gSmmCpuPrivate->SmmCpuHandle, - &gEfiSmmCpuSaveStateProtocolGuid, - &mSmmCpuSaveState, - NULL - ); - ASSERT_EFI_ERROR (Status); - // - // The SmmStartupThisAp service in Framework SMST should always be non-null. - // Update SmmStartupThisAp pointer in PI SMST here so that PI/Framework SMM thunk - // can have it ready when constructing Framework SMST. - // - gSmst->SmmStartupThisAp = SmmStartupThisAp; - } - // // register SMM Ready To Lock Protocol notification // @@ -1353,9 +1318,9 @@ ConfigSmmCodeAccessCheckOnCurrentProcessor ( NewSmmFeatureControlMsr = SmmFeatureControlMsr; if (mSmmCodeAccessCheckEnable) { NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT; - } - if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) { - NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT; + if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) { + NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT; + } } // @@ -1386,7 +1351,7 @@ ConfigSmmCodeAccessCheck ( // // Check to see if the Feature Control MSR is supported on this CPU // - Index = gSmst->CurrentlyExecutingCpu; + Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu; if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) { mSmmCodeAccessCheckEnable = FALSE; return; @@ -1398,13 +1363,6 @@ ConfigSmmCodeAccessCheck ( // if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) { mSmmCodeAccessCheckEnable = FALSE; - } - - // - // If the SMM Code Access Check feature is disabled and the Feature Control MSR - // is not being locked, then no additional work is required - // - if (!mSmmCodeAccessCheckEnable && !FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) { return; } @@ -1428,7 +1386,7 @@ ConfigSmmCodeAccessCheck ( // Enable SMM Code Access Check feature for the APs. // for (Index = 0; Index < gSmst->NumberOfCpus; Index++) { - if (Index != gSmst->CurrentlyExecutingCpu) { + if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) { // // Acquire Config SMM Code Access Check spin lock. The AP will release the @@ -1488,3 +1446,25 @@ PerformRemainingTasks ( mSmmReadyToLock = FALSE; } } + +/** + Perform the pre tasks. + +**/ +VOID +PerformPreTasks ( + VOID + ) +{ + // + // Restore SMM Configuration in S3 boot path. + // + if (mRestoreSmmConfigurationInS3) { + // + // Configure SMM Code Access Check feature if available. + // + ConfigSmmCodeAccessCheck (); + + mRestoreSmmConfigurationInS3 = FALSE; + } +}