2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
4 Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
14 // SMM CPU Private Data structure that contains SMM Configuration Protocol
15 // along its supporting fields.
17 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData
= {
18 SMM_CPU_PRIVATE_DATA_SIGNATURE
, // Signature
20 NULL
, // Pointer to ProcessorInfo array
21 NULL
, // Pointer to Operation array
22 NULL
, // Pointer to CpuSaveStateSize array
23 NULL
, // Pointer to CpuSaveState array
26 }, // SmmReservedSmramRegion
28 SmmStartupThisAp
, // SmmCoreEntryContext.SmmStartupThisAp
29 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
30 0, // SmmCoreEntryContext.NumberOfCpus
31 NULL
, // SmmCoreEntryContext.CpuSaveStateSize
32 NULL
// SmmCoreEntryContext.CpuSaveState
36 mSmmCpuPrivateData
.SmmReservedSmramRegion
, // SmmConfiguration.SmramReservedRegions
37 RegisterSmmEntry
// SmmConfiguration.RegisterSmmEntry
39 NULL
, // pointer to Ap Wrapper Func array
40 { NULL
, NULL
}, // List_Entry for Tokens.
43 CPU_HOT_PLUG_DATA mCpuHotPlugData
= {
44 CPU_HOT_PLUG_DATA_REVISION_1
, // Revision
45 0, // Array Length of SmBase and APIC ID
46 NULL
, // Pointer to APIC ID array
47 NULL
, // Pointer to SMBASE array
54 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
56 SMM_CPU_PRIVATE_DATA
*gSmmCpuPrivate
= &mSmmCpuPrivateData
;
59 // SMM Relocation variables
61 volatile BOOLEAN
*mRebased
;
64 /// Handle for the SMM CPU Protocol
66 EFI_HANDLE mSmmCpuHandle
= NULL
;
69 /// SMM CPU Protocol instance
71 EFI_SMM_CPU_PROTOCOL mSmmCpu
= {
77 /// SMM Memory Attribute Protocol instance
79 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute
= {
80 EdkiiSmmGetMemoryAttributes
,
81 EdkiiSmmSetMemoryAttributes
,
82 EdkiiSmmClearMemoryAttributes
85 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable
[EXCEPTION_VECTOR_NUMBER
];
87 BOOLEAN mSmmRelocated
= FALSE
;
88 volatile BOOLEAN
*mSmmInitialized
= NULL
;
89 UINT32 mBspApicId
= 0;
92 // SMM stack information
94 UINTN mSmmStackArrayBase
;
95 UINTN mSmmStackArrayEnd
;
98 UINTN mSmmShadowStackSize
;
99 BOOLEAN mCetSupported
= TRUE
;
101 UINTN mMaxNumberOfCpus
= 1;
102 UINTN mNumberOfCpus
= 1;
105 // SMM ready to lock flag
107 BOOLEAN mSmmReadyToLock
= FALSE
;
110 // Global used to cache PCD for SMM Code Access Check enable
112 BOOLEAN mSmmCodeAccessCheckEnable
= FALSE
;
115 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
117 UINT64 mAddressEncMask
= 0;
120 // Spin lock used to serialize setting of SMM Code Access Check feature
122 SPIN_LOCK
*mConfigSmmCodeAccessCheckLock
= NULL
;
125 // Saved SMM ranges information
127 EFI_SMRAM_DESCRIPTOR
*mSmmCpuSmramRanges
;
128 UINTN mSmmCpuSmramRangeCount
;
130 UINT8 mPhysicalAddressBits
;
133 // Control register contents saved for SMM S3 resume state initialization.
139 Initialize IDT to setup exception handlers for SMM.
148 BOOLEAN InterruptState
;
149 IA32_DESCRIPTOR DxeIdtr
;
152 // There are 32 (not 255) entries in it since only processor
153 // generated exceptions will be handled.
155 gcSmiIdtr
.Limit
= (sizeof (IA32_IDT_GATE_DESCRIPTOR
) * 32) - 1;
157 // Allocate page aligned IDT, because it might be set as read only.
159 gcSmiIdtr
.Base
= (UINTN
)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr
.Limit
+ 1));
160 ASSERT (gcSmiIdtr
.Base
!= 0);
161 ZeroMem ((VOID
*)gcSmiIdtr
.Base
, gcSmiIdtr
.Limit
+ 1);
164 // Disable Interrupt and save DXE IDT table
166 InterruptState
= SaveAndDisableInterrupts ();
167 AsmReadIdtr (&DxeIdtr
);
169 // Load SMM temporary IDT table
171 AsmWriteIdtr (&gcSmiIdtr
);
173 // Setup SMM default exception handlers, SMM IDT table
174 // will be updated and saved in gcSmiIdtr
176 Status
= InitializeCpuExceptionHandlers (NULL
);
177 ASSERT_EFI_ERROR (Status
);
179 // Restore DXE IDT table and CPU interrupt
181 AsmWriteIdtr ((IA32_DESCRIPTOR
*)&DxeIdtr
);
182 SetInterruptState (InterruptState
);
186 Search module name by input IP address and output it.
188 @param CallerIpAddress Caller instruction pointer.
193 IN UINTN CallerIpAddress
202 Pe32Data
= PeCoffSearchImageBase (CallerIpAddress
);
204 DEBUG ((DEBUG_ERROR
, "It is invoked from the instruction before IP(0x%p)", (VOID
*)CallerIpAddress
));
205 PdbPointer
= PeCoffLoaderGetPdbPointer ((VOID
*)Pe32Data
);
206 if (PdbPointer
!= NULL
) {
207 DEBUG ((DEBUG_ERROR
, " in module (%a)\n", PdbPointer
));
213 Read information from the CPU save state.
215 @param This EFI_SMM_CPU_PROTOCOL instance
216 @param Width The number of bytes to read from the CPU save state.
217 @param Register Specifies the CPU register to read form the save state.
218 @param CpuIndex Specifies the zero-based index of the CPU save state.
219 @param Buffer Upon return, this holds the CPU register value read from the save state.
221 @retval EFI_SUCCESS The register was read from Save State
222 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
223 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
229 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
231 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
239 // Retrieve pointer to the specified CPU's SMM Save State buffer
241 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
242 return EFI_INVALID_PARAMETER
;
246 // The SpeculationBarrier() call here is to ensure the above check for the
247 // CpuIndex has been completed before the execution of subsequent codes.
249 SpeculationBarrier ();
252 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
254 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
256 // The pseudo-register only supports the 64-bit size specified by Width.
258 if (Width
!= sizeof (UINT64
)) {
259 return EFI_INVALID_PARAMETER
;
263 // If the processor is in SMM at the time the SMI occurred,
264 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
265 // Otherwise, EFI_NOT_FOUND is returned.
267 if (*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
)) {
268 *(UINT64
*)Buffer
= gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
;
271 return EFI_NOT_FOUND
;
275 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
276 return EFI_INVALID_PARAMETER
;
279 Status
= SmmCpuFeaturesReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
280 if (Status
== EFI_UNSUPPORTED
) {
281 Status
= ReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
288 Write data to the CPU save state.
290 @param This EFI_SMM_CPU_PROTOCOL instance
291 @param Width The number of bytes to read from the CPU save state.
292 @param Register Specifies the CPU register to write to the save state.
293 @param CpuIndex Specifies the zero-based index of the CPU save state
294 @param Buffer Upon entry, this holds the new CPU register value.
296 @retval EFI_SUCCESS The register was written from Save State
297 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
298 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
304 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
306 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
308 IN CONST VOID
*Buffer
314 // Retrieve pointer to the specified CPU's SMM Save State buffer
316 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
317 return EFI_INVALID_PARAMETER
;
321 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
323 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
327 if (!mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) {
328 return EFI_INVALID_PARAMETER
;
331 Status
= SmmCpuFeaturesWriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
332 if (Status
== EFI_UNSUPPORTED
) {
333 Status
= WriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
340 C function for SMI handler. To change all processor's SMMBase Register.
354 // Update SMM IDT entries' code segment and load IDT
356 AsmWriteIdtr (&gcSmiIdtr
);
357 ApicId
= GetApicId ();
359 IsBsp
= (BOOLEAN
)(mBspApicId
== ApicId
);
361 ASSERT (mNumberOfCpus
<= mMaxNumberOfCpus
);
363 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
364 if (ApicId
== (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
366 // Initialize SMM specific features on the currently executing CPU
368 SmmCpuFeaturesInitializeProcessor (
371 gSmmCpuPrivate
->ProcessorInfo
,
377 // Check XD and BTS features on each processor on normal boot
379 CheckFeatureSupported ();
382 // BSP rebase is already done above.
383 // Initialize private data during S3 resume
385 InitializeMpSyncData ();
388 if (!mSmmRelocated
) {
390 // Hook return after RSM to set SMM re-based flag
392 SemaphoreHook (Index
, &mRebased
[Index
]);
403 Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
407 ExecuteFirstSmiInit (
413 if (mSmmInitialized
== NULL
) {
414 mSmmInitialized
= (BOOLEAN
*)AllocatePool (sizeof (BOOLEAN
) * mMaxNumberOfCpus
);
417 ASSERT (mSmmInitialized
!= NULL
);
418 if (mSmmInitialized
== NULL
) {
423 // Reset the mSmmInitialized to false.
425 ZeroMem ((VOID
*)mSmmInitialized
, sizeof (BOOLEAN
) * mMaxNumberOfCpus
);
428 // Get the BSP ApicId.
430 mBspApicId
= GetApicId ();
433 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) for SMM init
435 SendSmiIpi (mBspApicId
);
436 SendSmiIpiAllExcludingSelf ();
439 // Wait for all processors to finish its 1st SMI
441 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
442 while (!(BOOLEAN
)mSmmInitialized
[Index
]) {
448 Relocate SmmBases for each processor.
450 Execute on first boot and all S3 resumes
459 UINT8 BakBuf
[BACK_BUF_SIZE
];
460 SMRAM_SAVE_STATE_MAP BakBuf2
;
461 SMRAM_SAVE_STATE_MAP
*CpuStatePtr
;
467 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
469 ASSERT (sizeof (BakBuf
) >= gcSmmInitSize
);
472 // Patch ASM code template with current CR0, CR3, and CR4 values
474 mSmmCr0
= (UINT32
)AsmReadCr0 ();
475 PatchInstructionX86 (gPatchSmmCr0
, mSmmCr0
, 4);
476 PatchInstructionX86 (gPatchSmmCr3
, AsmReadCr3 (), 4);
477 mSmmCr4
= (UINT32
)AsmReadCr4 ();
478 PatchInstructionX86 (gPatchSmmCr4
, mSmmCr4
& (~CR4_CET_ENABLE
), 4);
481 // Patch GDTR for SMM base relocation
483 gcSmiInitGdtr
.Base
= gcSmiGdtr
.Base
;
484 gcSmiInitGdtr
.Limit
= gcSmiGdtr
.Limit
;
486 U8Ptr
= (UINT8
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMM_HANDLER_OFFSET
);
487 CpuStatePtr
= (SMRAM_SAVE_STATE_MAP
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMRAM_SAVE_STATE_MAP_OFFSET
);
490 // Backup original contents at address 0x38000
492 CopyMem (BakBuf
, U8Ptr
, sizeof (BakBuf
));
493 CopyMem (&BakBuf2
, CpuStatePtr
, sizeof (BakBuf2
));
496 // Load image for relocation
498 CopyMem (U8Ptr
, gcSmmInitTemplate
, gcSmmInitSize
);
501 // Retrieve the local APIC ID of current processor
503 mBspApicId
= GetApicId ();
506 // Relocate SM bases for all APs
507 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
509 BspIndex
= (UINTN
)-1;
510 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
511 mRebased
[Index
] = FALSE
;
512 if (mBspApicId
!= (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
513 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
515 // Wait for this AP to finish its 1st SMI
517 while (!mRebased
[Index
]) {
521 // BSP will be Relocated later
528 // Relocate BSP's SMM base
530 ASSERT (BspIndex
!= (UINTN
)-1);
531 SendSmiIpi (mBspApicId
);
533 // Wait for the BSP to finish its 1st SMI
535 while (!mRebased
[BspIndex
]) {
539 // Restore contents at address 0x38000
541 CopyMem (CpuStatePtr
, &BakBuf2
, sizeof (BakBuf2
));
542 CopyMem (U8Ptr
, BakBuf
, sizeof (BakBuf
));
546 SMM Ready To Lock event notification handler.
548 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
549 perform additional lock actions that must be performed from SMM on the next SMI.
551 @param[in] Protocol Points to the protocol's unique identifier.
552 @param[in] Interface Points to the interface instance.
553 @param[in] Handle The handle on which the interface was installed.
555 @retval EFI_SUCCESS Notification handler runs successfully.
559 SmmReadyToLockEventNotify (
560 IN CONST EFI_GUID
*Protocol
,
568 // Cache a copy of UEFI memory map before we start profiling feature.
573 // Set SMM ready to lock flag and return
575 mSmmReadyToLock
= TRUE
;
580 The module Entry Point of the CPU SMM driver.
582 @param ImageHandle The firmware allocated handle for the EFI image.
583 @param SystemTable A pointer to the EFI System Table.
585 @retval EFI_SUCCESS The entry point is executed successfully.
586 @retval Other Some error occurs when executing this entry point.
592 IN EFI_HANDLE ImageHandle
,
593 IN EFI_SYSTEM_TABLE
*SystemTable
597 EFI_MP_SERVICES_PROTOCOL
*MpServices
;
598 UINTN NumberOfEnabledProcessors
;
614 EFI_HOB_GUID_TYPE
*GuidHob
;
615 SMM_BASE_HOB_DATA
*SmmBaseHobData
;
618 SmmBaseHobData
= NULL
;
621 // Initialize address fixup
623 PiSmmCpuSmmInitFixupAddress ();
624 PiSmmCpuSmiEntryFixupAddress ();
627 // Initialize Debug Agent to support source level debug in SMM code
629 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM
, NULL
, NULL
);
632 // Report the start of CPU SMM initialization.
636 EFI_COMPUTING_UNIT_HOST_PROCESSOR
| EFI_CU_HP_PC_SMM_INIT
640 // Find out SMRR Base and SMRR Size
642 FindSmramInfo (&mCpuHotPlugData
.SmrrBase
, &mCpuHotPlugData
.SmrrSize
);
645 // Get MP Services Protocol
647 Status
= SystemTable
->BootServices
->LocateProtocol (&gEfiMpServiceProtocolGuid
, NULL
, (VOID
**)&MpServices
);
648 ASSERT_EFI_ERROR (Status
);
651 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
653 Status
= MpServices
->GetNumberOfProcessors (MpServices
, &mNumberOfCpus
, &NumberOfEnabledProcessors
);
654 ASSERT_EFI_ERROR (Status
);
655 ASSERT (mNumberOfCpus
<= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
));
658 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
659 // A constant BSP index makes no sense because it may be hot removed.
662 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
663 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection
));
669 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
671 mSmmCodeAccessCheckEnable
= PcdGetBool (PcdCpuSmmCodeAccessCheckEnable
);
672 DEBUG ((DEBUG_INFO
, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable
));
675 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
676 // Make sure AddressEncMask is contained to smallest supported address field.
678 mAddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
679 DEBUG ((DEBUG_INFO
, "mAddressEncMask = 0x%lx\n", mAddressEncMask
));
682 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
684 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
685 mMaxNumberOfCpus
= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
);
687 mMaxNumberOfCpus
= mNumberOfCpus
;
690 gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
= mMaxNumberOfCpus
;
693 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
694 // allocated buffer. The minimum size of this buffer for a uniprocessor system
695 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
696 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
697 // then the SMI entry point and the CPU save state areas can be tiles to minimize
698 // the total amount SMRAM required for all the CPUs. The tile size can be computed
699 // by adding the // CPU save state size, any extra CPU specific context, and
700 // the size of code that must be placed at the SMI entry point to transfer
701 // control to a C function in the native SMM execution mode. This size is
702 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
703 // The total amount of memory required is the maximum number of CPUs that
704 // platform supports times the tile size. The picture below shows the tiling,
705 // where m is the number of tiles that fit in 32KB.
707 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
708 // | CPU m+1 Save State |
709 // +-----------------------------+
710 // | CPU m+1 Extra Data |
711 // +-----------------------------+
713 // +-----------------------------+
714 // | CPU 2m SMI Entry |
715 // +#############################+ <-- Base of allocated buffer + 64 KB
716 // | CPU m-1 Save State |
717 // +-----------------------------+
718 // | CPU m-1 Extra Data |
719 // +-----------------------------+
721 // +-----------------------------+
722 // | CPU 2m-1 SMI Entry |
723 // +=============================+ <-- 2^n offset from Base of allocated buffer
724 // | . . . . . . . . . . . . |
725 // +=============================+ <-- 2^n offset from Base of allocated buffer
726 // | CPU 2 Save State |
727 // +-----------------------------+
728 // | CPU 2 Extra Data |
729 // +-----------------------------+
731 // +-----------------------------+
732 // | CPU m+1 SMI Entry |
733 // +=============================+ <-- Base of allocated buffer + 32 KB
734 // | CPU 1 Save State |
735 // +-----------------------------+
736 // | CPU 1 Extra Data |
737 // +-----------------------------+
739 // +-----------------------------+
740 // | CPU m SMI Entry |
741 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
742 // | CPU 0 Save State |
743 // +-----------------------------+
744 // | CPU 0 Extra Data |
745 // +-----------------------------+
747 // +-----------------------------+
748 // | CPU m-1 SMI Entry |
749 // +=============================+ <-- 2^n offset from Base of allocated buffer
750 // | . . . . . . . . . . . . |
751 // +=============================+ <-- 2^n offset from Base of allocated buffer
753 // +-----------------------------+
754 // | CPU 1 SMI Entry |
755 // +=============================+ <-- 2^n offset from Base of allocated buffer
757 // +-----------------------------+
758 // | CPU 0 SMI Entry |
759 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
763 // Retrieve CPU Family
765 AsmCpuid (CPUID_VERSION_INFO
, &RegEax
, NULL
, NULL
, NULL
);
766 FamilyId
= (RegEax
>> 8) & 0xf;
767 ModelId
= (RegEax
>> 4) & 0xf;
768 if ((FamilyId
== 0x06) || (FamilyId
== 0x0f)) {
769 ModelId
= ModelId
| ((RegEax
>> 12) & 0xf0);
773 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
774 if (RegEax
>= CPUID_EXTENDED_CPU_SIG
) {
775 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
779 // Determine the mode of the CPU at the time an SMI occurs
780 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
781 // Volume 3C, Section 34.4.1.1
783 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
;
784 if ((RegEdx
& BIT29
) != 0) {
785 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
788 if (FamilyId
== 0x06) {
789 if ((ModelId
== 0x17) || (ModelId
== 0x0f) || (ModelId
== 0x1c)) {
790 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
794 DEBUG ((DEBUG_INFO
, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask
)));
795 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) {
796 AsmCpuid (CPUID_SIGNATURE
, &RegEax
, NULL
, NULL
, NULL
);
797 if (RegEax
>= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
) {
798 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
, NULL
, NULL
, &RegEcx
, &RegEdx
);
799 DEBUG ((DEBUG_INFO
, "CPUID[7/0] ECX - 0x%08x\n", RegEcx
));
800 DEBUG ((DEBUG_INFO
, " CET_SS - 0x%08x\n", RegEcx
& CPUID_CET_SS
));
801 DEBUG ((DEBUG_INFO
, " CET_IBT - 0x%08x\n", RegEdx
& CPUID_CET_IBT
));
802 if ((RegEcx
& CPUID_CET_SS
) == 0) {
803 mCetSupported
= FALSE
;
804 PatchInstructionX86 (mPatchCetSupported
, mCetSupported
, 1);
808 AsmCpuidEx (CPUID_EXTENDED_STATE
, CPUID_EXTENDED_STATE_SUB_LEAF
, NULL
, &RegEbx
, &RegEcx
, NULL
);
809 DEBUG ((DEBUG_INFO
, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx
, RegEcx
));
810 AsmCpuidEx (CPUID_EXTENDED_STATE
, 11, &RegEax
, NULL
, &RegEcx
, NULL
);
811 DEBUG ((DEBUG_INFO
, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax
, RegEcx
));
812 AsmCpuidEx (CPUID_EXTENDED_STATE
, 12, &RegEax
, NULL
, &RegEcx
, NULL
);
813 DEBUG ((DEBUG_INFO
, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax
, RegEcx
));
816 mCetSupported
= FALSE
;
817 PatchInstructionX86 (mPatchCetSupported
, mCetSupported
, 1);
820 mCetSupported
= FALSE
;
821 PatchInstructionX86 (mPatchCetSupported
, mCetSupported
, 1);
825 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
826 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
827 // This size is rounded up to nearest power of 2.
829 TileCodeSize
= GetSmiHandlerSize ();
830 TileCodeSize
= ALIGN_VALUE (TileCodeSize
, SIZE_4KB
);
831 TileDataSize
= (SMRAM_SAVE_STATE_MAP_OFFSET
- SMM_PSD_OFFSET
) + sizeof (SMRAM_SAVE_STATE_MAP
);
832 TileDataSize
= ALIGN_VALUE (TileDataSize
, SIZE_4KB
);
833 TileSize
= TileDataSize
+ TileCodeSize
- 1;
834 TileSize
= 2 * GetPowerOfTwo32 ((UINT32
)TileSize
);
835 DEBUG ((DEBUG_INFO
, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize
, TileCodeSize
, TileDataSize
));
838 // If the TileSize is larger than space available for the SMI Handler of
839 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
840 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
841 // the SMI Handler size must be reduced or the size of the extra CPU specific
842 // context must be reduced.
844 ASSERT (TileSize
<= (SMRAM_SAVE_STATE_MAP_OFFSET
+ sizeof (SMRAM_SAVE_STATE_MAP
) - SMM_HANDLER_OFFSET
));
847 // Retrive the allocated SmmBase from gSmmBaseHobGuid. If found,
848 // means the SmBase relocation has been done.
850 GuidHob
= GetFirstGuidHob (&gSmmBaseHobGuid
);
851 if (GuidHob
!= NULL
) {
853 // Check whether the Required TileSize is enough.
855 if (TileSize
> SIZE_8KB
) {
856 DEBUG ((DEBUG_ERROR
, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize
, SIZE_8KB
));
858 return RETURN_BUFFER_TOO_SMALL
;
861 SmmBaseHobData
= GET_GUID_HOB_DATA (GuidHob
);
864 // Assume single instance of HOB produced, expect the HOB.NumberOfProcessors equals to the mMaxNumberOfCpus.
866 ASSERT (SmmBaseHobData
->NumberOfProcessors
== (UINT32
)mMaxNumberOfCpus
&& SmmBaseHobData
->ProcessorIndex
== 0);
867 mSmmRelocated
= TRUE
;
870 // When the HOB doesn't exist, allocate new SMBASE itself.
872 DEBUG ((DEBUG_INFO
, "PiCpuSmmEntry: gSmmBaseHobGuid not found!\n"));
874 // Allocate buffer for all of the tiles.
876 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
877 // Volume 3C, Section 34.11 SMBASE Relocation
878 // For Pentium and Intel486 processors, the SMBASE values must be
879 // aligned on a 32-KByte boundary or the processor will enter shutdown
880 // state during the execution of a RSM instruction.
882 // Intel486 processors: FamilyId is 4
883 // Pentium processors : FamilyId is 5
885 BufferPages
= EFI_SIZE_TO_PAGES (SIZE_32KB
+ TileSize
* (mMaxNumberOfCpus
- 1));
886 if ((FamilyId
== 4) || (FamilyId
== 5)) {
887 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_32KB
);
889 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_4KB
);
892 ASSERT (Buffer
!= NULL
);
893 DEBUG ((DEBUG_INFO
, "New Allcoated SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer
, EFI_PAGES_TO_SIZE (BufferPages
)));
897 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
899 gSmmCpuPrivate
->ProcessorInfo
= (EFI_PROCESSOR_INFORMATION
*)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION
) * mMaxNumberOfCpus
);
900 ASSERT (gSmmCpuPrivate
->ProcessorInfo
!= NULL
);
902 gSmmCpuPrivate
->Operation
= (SMM_CPU_OPERATION
*)AllocatePool (sizeof (SMM_CPU_OPERATION
) * mMaxNumberOfCpus
);
903 ASSERT (gSmmCpuPrivate
->Operation
!= NULL
);
905 gSmmCpuPrivate
->CpuSaveStateSize
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
906 ASSERT (gSmmCpuPrivate
->CpuSaveStateSize
!= NULL
);
908 gSmmCpuPrivate
->CpuSaveState
= (VOID
**)AllocatePool (sizeof (VOID
*) * mMaxNumberOfCpus
);
909 ASSERT (gSmmCpuPrivate
->CpuSaveState
!= NULL
);
911 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveStateSize
= gSmmCpuPrivate
->CpuSaveStateSize
;
912 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveState
= gSmmCpuPrivate
->CpuSaveState
;
915 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
917 mCpuHotPlugData
.ApicId
= (UINT64
*)AllocatePool (sizeof (UINT64
) * mMaxNumberOfCpus
);
918 ASSERT (mCpuHotPlugData
.ApicId
!= NULL
);
919 mCpuHotPlugData
.SmBase
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
920 ASSERT (mCpuHotPlugData
.SmBase
!= NULL
);
921 mCpuHotPlugData
.ArrayLength
= (UINT32
)mMaxNumberOfCpus
;
924 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
925 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
926 // size for each CPU in the platform
928 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
929 mCpuHotPlugData
.SmBase
[Index
] = mSmmRelocated
? (UINTN
)SmmBaseHobData
->SmBase
[Index
] : (UINTN
)Buffer
+ Index
* TileSize
- SMM_HANDLER_OFFSET
;
931 gSmmCpuPrivate
->CpuSaveStateSize
[Index
] = sizeof (SMRAM_SAVE_STATE_MAP
);
932 gSmmCpuPrivate
->CpuSaveState
[Index
] = (VOID
*)(mCpuHotPlugData
.SmBase
[Index
] + SMRAM_SAVE_STATE_MAP_OFFSET
);
933 gSmmCpuPrivate
->Operation
[Index
] = SmmCpuNone
;
935 if (Index
< mNumberOfCpus
) {
936 Status
= MpServices
->GetProcessorInfo (MpServices
, Index
, &gSmmCpuPrivate
->ProcessorInfo
[Index
]);
937 ASSERT_EFI_ERROR (Status
);
938 mCpuHotPlugData
.ApicId
[Index
] = gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
;
942 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
944 (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
,
945 mCpuHotPlugData
.SmBase
[Index
],
946 gSmmCpuPrivate
->CpuSaveState
[Index
],
947 gSmmCpuPrivate
->CpuSaveStateSize
[Index
]
950 gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
= INVALID_APIC_ID
;
951 mCpuHotPlugData
.ApicId
[Index
] = INVALID_APIC_ID
;
956 // Allocate SMI stacks for all processors.
958 mSmmStackSize
= EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize
)));
959 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
961 // SMM Stack Guard Enabled
962 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
964 // +--------------------------------------------------+-----+--------------------------------------------------+
965 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
966 // +--------------------------------------------------+-----+--------------------------------------------------+
967 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
968 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
970 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
972 mSmmStackSize
+= EFI_PAGES_TO_SIZE (2);
975 mSmmShadowStackSize
= 0;
976 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
977 mSmmShadowStackSize
= EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize
)));
979 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
981 // SMM Stack Guard Enabled
982 // Append Shadow Stack after normal stack
983 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
986 // +--------------------------------------------------+---------------------------------------------------------------+
987 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
988 // +--------------------------------------------------+---------------------------------------------------------------+
989 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
990 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
992 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
994 mSmmShadowStackSize
+= EFI_PAGES_TO_SIZE (2);
997 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
998 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
999 // 1 more pages is allocated for each processor, it is known good stack.
1003 // +-------------------------------------+--------------------------------------------------+
1004 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
1005 // +-------------------------------------+--------------------------------------------------+
1006 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
1007 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
1009 // |<-------------------------------- Processor N ----------------------------------------->|
1011 mSmmShadowStackSize
+= EFI_PAGES_TO_SIZE (1);
1012 mSmmStackSize
+= EFI_PAGES_TO_SIZE (1);
1016 Stacks
= (UINT8
*)AllocatePages (gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* (EFI_SIZE_TO_PAGES (mSmmStackSize
+ mSmmShadowStackSize
)));
1017 ASSERT (Stacks
!= NULL
);
1018 mSmmStackArrayBase
= (UINTN
)Stacks
;
1019 mSmmStackArrayEnd
= mSmmStackArrayBase
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* (mSmmStackSize
+ mSmmShadowStackSize
) - 1;
1021 DEBUG ((DEBUG_INFO
, "Stacks - 0x%x\n", Stacks
));
1022 DEBUG ((DEBUG_INFO
, "mSmmStackSize - 0x%x\n", mSmmStackSize
));
1023 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard
)));
1024 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
1025 DEBUG ((DEBUG_INFO
, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize
));
1029 // Set SMI stack for SMM base relocation
1031 PatchInstructionX86 (
1033 (UINTN
)(Stacks
+ mSmmStackSize
- sizeof (UINTN
)),
1040 InitializeSmmIdt ();
1043 // Check whether Smm Relocation is done or not.
1044 // If not, will do the SmmBases Relocation here!!!
1046 if (!mSmmRelocated
) {
1048 // Relocate SMM Base addresses to the ones allocated from SMRAM
1050 mRebased
= (BOOLEAN
*)AllocateZeroPool (sizeof (BOOLEAN
) * mMaxNumberOfCpus
);
1051 ASSERT (mRebased
!= NULL
);
1052 SmmRelocateBases ();
1055 // Call hook for BSP to perform extra actions in normal mode after all
1056 // SMM base addresses have been relocated on all CPUs
1058 SmmCpuFeaturesSmmRelocationComplete ();
1061 DEBUG ((DEBUG_INFO
, "mXdSupported - 0x%x\n", mXdSupported
));
1064 // SMM Time initialization
1066 InitializeSmmTimer ();
1069 // Initialize MP globals
1071 Cr3
= InitializeMpServiceData (Stacks
, mSmmStackSize
, mSmmShadowStackSize
);
1073 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
1074 for (Index
= 0; Index
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; Index
++) {
1077 (EFI_PHYSICAL_ADDRESS
)(UINTN
)Stacks
+ mSmmStackSize
+ (mSmmStackSize
+ mSmmShadowStackSize
) * Index
,
1080 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
1083 (EFI_PHYSICAL_ADDRESS
)(UINTN
)Stacks
+ mSmmStackSize
+ EFI_PAGES_TO_SIZE (1) + (mSmmStackSize
+ mSmmShadowStackSize
) * Index
,
1084 EFI_PAGES_TO_SIZE (1)
1091 // For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
1092 // Those MSRs & CSRs must be configured before normal SMI sources happen.
1093 // So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
1095 if (mSmmRelocated
) {
1096 ExecuteFirstSmiInit ();
1099 // Call hook for BSP to perform extra actions in normal mode after all
1100 // SMM base addresses have been relocated on all CPUs
1102 SmmCpuFeaturesSmmRelocationComplete ();
1106 // Fill in SMM Reserved Regions
1108 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedStart
= 0;
1109 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedSize
= 0;
1112 // Install the SMM Configuration Protocol onto a new handle on the handle database.
1113 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
1114 // to an SMRAM address will be present in the handle database
1116 Status
= SystemTable
->BootServices
->InstallMultipleProtocolInterfaces (
1117 &gSmmCpuPrivate
->SmmCpuHandle
,
1118 &gEfiSmmConfigurationProtocolGuid
,
1119 &gSmmCpuPrivate
->SmmConfiguration
,
1122 ASSERT_EFI_ERROR (Status
);
1125 // Install the SMM CPU Protocol into SMM protocol database
1127 Status
= gSmst
->SmmInstallProtocolInterface (
1129 &gEfiSmmCpuProtocolGuid
,
1130 EFI_NATIVE_INTERFACE
,
1133 ASSERT_EFI_ERROR (Status
);
1136 // Install the SMM Memory Attribute Protocol into SMM protocol database
1138 Status
= gSmst
->SmmInstallProtocolInterface (
1140 &gEdkiiSmmMemoryAttributeProtocolGuid
,
1141 EFI_NATIVE_INTERFACE
,
1142 &mSmmMemoryAttribute
1144 ASSERT_EFI_ERROR (Status
);
1147 // Initialize global buffer for MM MP.
1149 InitializeDataForMmMp ();
1152 // Initialize Package First Thread Index Info.
1154 InitPackageFirstThreadIndexInfo ();
1157 // Install the SMM Mp Protocol into SMM protocol database
1159 Status
= gSmst
->SmmInstallProtocolInterface (
1161 &gEfiMmMpProtocolGuid
,
1162 EFI_NATIVE_INTERFACE
,
1165 ASSERT_EFI_ERROR (Status
);
1168 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1170 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
1171 Status
= PcdSet64S (PcdCpuHotPlugDataAddress
, (UINT64
)(UINTN
)&mCpuHotPlugData
);
1172 ASSERT_EFI_ERROR (Status
);
1176 // Initialize SMM CPU Services Support
1178 Status
= InitializeSmmCpuServices (mSmmCpuHandle
);
1179 ASSERT_EFI_ERROR (Status
);
1182 // register SMM Ready To Lock Protocol notification
1184 Status
= gSmst
->SmmRegisterProtocolNotify (
1185 &gEfiSmmReadyToLockProtocolGuid
,
1186 SmmReadyToLockEventNotify
,
1189 ASSERT_EFI_ERROR (Status
);
1192 // Initialize SMM Profile feature
1194 InitSmmProfile (Cr3
);
1196 GetAcpiS3EnableFlag ();
1197 InitSmmS3ResumeState (Cr3
);
1199 DEBUG ((DEBUG_INFO
, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1206 Find out SMRAM information including SMRR base and SMRR size.
1208 @param SmrrBase SMRR base
1209 @param SmrrSize SMRR size
1214 OUT UINT32
*SmrrBase
,
1215 OUT UINT32
*SmrrSize
1220 EFI_SMM_ACCESS2_PROTOCOL
*SmmAccess
;
1221 EFI_SMRAM_DESCRIPTOR
*CurrentSmramRange
;
1227 // Get SMM Access Protocol
1229 Status
= gBS
->LocateProtocol (&gEfiSmmAccess2ProtocolGuid
, NULL
, (VOID
**)&SmmAccess
);
1230 ASSERT_EFI_ERROR (Status
);
1233 // Get SMRAM information
1236 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, NULL
);
1237 ASSERT (Status
== EFI_BUFFER_TOO_SMALL
);
1239 mSmmCpuSmramRanges
= (EFI_SMRAM_DESCRIPTOR
*)AllocatePool (Size
);
1240 ASSERT (mSmmCpuSmramRanges
!= NULL
);
1242 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, mSmmCpuSmramRanges
);
1243 ASSERT_EFI_ERROR (Status
);
1245 mSmmCpuSmramRangeCount
= Size
/ sizeof (EFI_SMRAM_DESCRIPTOR
);
1248 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1250 CurrentSmramRange
= NULL
;
1251 for (Index
= 0, MaxSize
= SIZE_256KB
- EFI_PAGE_SIZE
; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1253 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1255 if ((mSmmCpuSmramRanges
[Index
].RegionState
& (EFI_ALLOCATED
| EFI_NEEDS_TESTING
| EFI_NEEDS_ECC_INITIALIZATION
)) != 0) {
1259 if (mSmmCpuSmramRanges
[Index
].CpuStart
>= BASE_1MB
) {
1260 if ((mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
) <= SMRR_MAX_ADDRESS
) {
1261 if (mSmmCpuSmramRanges
[Index
].PhysicalSize
>= MaxSize
) {
1262 MaxSize
= mSmmCpuSmramRanges
[Index
].PhysicalSize
;
1263 CurrentSmramRange
= &mSmmCpuSmramRanges
[Index
];
1269 ASSERT (CurrentSmramRange
!= NULL
);
1271 *SmrrBase
= (UINT32
)CurrentSmramRange
->CpuStart
;
1272 *SmrrSize
= (UINT32
)CurrentSmramRange
->PhysicalSize
;
1276 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1277 if ((mSmmCpuSmramRanges
[Index
].CpuStart
< *SmrrBase
) &&
1278 (*SmrrBase
== (mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
)))
1280 *SmrrBase
= (UINT32
)mSmmCpuSmramRanges
[Index
].CpuStart
;
1281 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1283 } else if (((*SmrrBase
+ *SmrrSize
) == mSmmCpuSmramRanges
[Index
].CpuStart
) && (mSmmCpuSmramRanges
[Index
].PhysicalSize
> 0)) {
1284 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1290 DEBUG ((DEBUG_INFO
, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase
, *SmrrSize
));
1294 Configure SMM Code Access Check feature on an AP.
1295 SMM Feature Control MSR will be locked after configuration.
1297 @param[in,out] Buffer Pointer to private data buffer.
1301 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1306 UINT64 SmmFeatureControlMsr
;
1307 UINT64 NewSmmFeatureControlMsr
;
1310 // Retrieve the CPU Index from the context passed in
1312 CpuIndex
= *(UINTN
*)Buffer
;
1315 // Get the current SMM Feature Control MSR value
1317 SmmFeatureControlMsr
= SmmCpuFeaturesGetSmmRegister (CpuIndex
, SmmRegFeatureControl
);
1320 // Compute the new SMM Feature Control MSR value
1322 NewSmmFeatureControlMsr
= SmmFeatureControlMsr
;
1323 if (mSmmCodeAccessCheckEnable
) {
1324 NewSmmFeatureControlMsr
|= SMM_CODE_CHK_EN_BIT
;
1325 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock
)) {
1326 NewSmmFeatureControlMsr
|= SMM_FEATURE_CONTROL_LOCK_BIT
;
1331 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1333 if (NewSmmFeatureControlMsr
!= SmmFeatureControlMsr
) {
1334 SmmCpuFeaturesSetSmmRegister (CpuIndex
, SmmRegFeatureControl
, NewSmmFeatureControlMsr
);
1338 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1340 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1344 Configure SMM Code Access Check feature for all processors.
1345 SMM Feature Control MSR will be locked after configuration.
1348 ConfigSmmCodeAccessCheck (
1356 // Check to see if the Feature Control MSR is supported on this CPU
1358 Index
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
1359 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index
, SmmRegFeatureControl
)) {
1360 mSmmCodeAccessCheckEnable
= FALSE
;
1365 // Check to see if the CPU supports the SMM Code Access Check feature
1366 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1368 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP
) & SMM_CODE_ACCESS_CHK_BIT
) == 0) {
1369 mSmmCodeAccessCheckEnable
= FALSE
;
1374 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1376 InitializeSpinLock (mConfigSmmCodeAccessCheckLock
);
1379 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1380 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1382 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1385 // Enable SMM Code Access Check feature on the BSP.
1387 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index
);
1390 // Enable SMM Code Access Check feature for the APs.
1392 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1393 if (Index
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1394 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== INVALID_APIC_ID
) {
1396 // If this processor does not exist
1402 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1403 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1405 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1408 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1410 Status
= gSmst
->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor
, Index
, &Index
);
1411 ASSERT_EFI_ERROR (Status
);
1414 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1416 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock
)) {
1421 // Release the Config SMM Code Access Check spin lock.
1423 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1429 Allocate pages for code.
1431 @param[in] Pages Number of pages to be allocated.
1433 @return Allocated memory.
1441 EFI_PHYSICAL_ADDRESS Memory
;
1447 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1448 if (EFI_ERROR (Status
)) {
1452 return (VOID
*)(UINTN
)Memory
;
1456 Allocate aligned pages for code.
1458 @param[in] Pages Number of pages to be allocated.
1459 @param[in] Alignment The requested alignment of the allocation.
1460 Must be a power of two.
1461 If Alignment is zero, then byte alignment is used.
1463 @return Allocated memory.
1466 AllocateAlignedCodePages (
1472 EFI_PHYSICAL_ADDRESS Memory
;
1473 UINTN AlignedMemory
;
1474 UINTN AlignmentMask
;
1475 UINTN UnalignedPages
;
1479 // Alignment must be a power of two or zero.
1481 ASSERT ((Alignment
& (Alignment
- 1)) == 0);
1487 if (Alignment
> EFI_PAGE_SIZE
) {
1489 // Calculate the total number of pages since alignment is larger than page size.
1491 AlignmentMask
= Alignment
- 1;
1492 RealPages
= Pages
+ EFI_SIZE_TO_PAGES (Alignment
);
1494 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1496 ASSERT (RealPages
> Pages
);
1498 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, RealPages
, &Memory
);
1499 if (EFI_ERROR (Status
)) {
1503 AlignedMemory
= ((UINTN
)Memory
+ AlignmentMask
) & ~AlignmentMask
;
1504 UnalignedPages
= EFI_SIZE_TO_PAGES (AlignedMemory
- (UINTN
)Memory
);
1505 if (UnalignedPages
> 0) {
1507 // Free first unaligned page(s).
1509 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1510 ASSERT_EFI_ERROR (Status
);
1513 Memory
= AlignedMemory
+ EFI_PAGES_TO_SIZE (Pages
);
1514 UnalignedPages
= RealPages
- Pages
- UnalignedPages
;
1515 if (UnalignedPages
> 0) {
1517 // Free last unaligned page(s).
1519 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1520 ASSERT_EFI_ERROR (Status
);
1524 // Do not over-allocate pages in this case.
1526 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1527 if (EFI_ERROR (Status
)) {
1531 AlignedMemory
= (UINTN
)Memory
;
1534 return (VOID
*)AlignedMemory
;
1538 Perform the remaining tasks.
1542 PerformRemainingTasks (
1546 if (mSmmReadyToLock
) {
1548 // Start SMM Profile feature
1550 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1555 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1560 // Mark critical region to be read-only in page table
1562 SetMemMapAttributes ();
1564 if (IsRestrictedMemoryAccess ()) {
1566 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1568 SetUefiMemMapAttributes ();
1571 // Set page table itself to be read-only
1573 SetPageTableAttributes ();
1577 // Configure SMM Code Access Check feature if available.
1579 ConfigSmmCodeAccessCheck ();
1581 SmmCpuFeaturesCompleteSmmReadyToLock ();
1584 // Clean SMM ready to lock flag
1586 mSmmReadyToLock
= FALSE
;
1591 Perform the pre tasks.
1599 RestoreSmmConfigurationInS3 ();