2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
14 // SMM CPU Private Data structure that contains SMM Configuration Protocol
15 // along its supporting fields.
17 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData
= {
18 SMM_CPU_PRIVATE_DATA_SIGNATURE
, // Signature
20 NULL
, // Pointer to ProcessorInfo array
21 NULL
, // Pointer to Operation array
22 NULL
, // Pointer to CpuSaveStateSize array
23 NULL
, // Pointer to CpuSaveState array
24 { {0} }, // SmmReservedSmramRegion
26 SmmStartupThisAp
, // SmmCoreEntryContext.SmmStartupThisAp
27 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
28 0, // SmmCoreEntryContext.NumberOfCpus
29 NULL
, // SmmCoreEntryContext.CpuSaveStateSize
30 NULL
// SmmCoreEntryContext.CpuSaveState
34 mSmmCpuPrivateData
.SmmReservedSmramRegion
, // SmmConfiguration.SmramReservedRegions
35 RegisterSmmEntry
// SmmConfiguration.RegisterSmmEntry
37 NULL
, // pointer to Ap Wrapper Func array
38 {NULL
, NULL
}, // List_Entry for Tokens.
41 CPU_HOT_PLUG_DATA mCpuHotPlugData
= {
42 CPU_HOT_PLUG_DATA_REVISION_1
, // Revision
43 0, // Array Length of SmBase and APIC ID
44 NULL
, // Pointer to APIC ID array
45 NULL
, // Pointer to SMBASE array
52 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
54 SMM_CPU_PRIVATE_DATA
*gSmmCpuPrivate
= &mSmmCpuPrivateData
;
57 // SMM Relocation variables
59 volatile BOOLEAN
*mRebased
;
60 volatile BOOLEAN mIsBsp
;
63 /// Handle for the SMM CPU Protocol
65 EFI_HANDLE mSmmCpuHandle
= NULL
;
68 /// SMM CPU Protocol instance
70 EFI_SMM_CPU_PROTOCOL mSmmCpu
= {
76 /// SMM Memory Attribute Protocol instance
78 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute
= {
79 EdkiiSmmGetMemoryAttributes
,
80 EdkiiSmmSetMemoryAttributes
,
81 EdkiiSmmClearMemoryAttributes
84 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable
[EXCEPTION_VECTOR_NUMBER
];
87 // SMM stack information
89 UINTN mSmmStackArrayBase
;
90 UINTN mSmmStackArrayEnd
;
93 UINTN mSmmShadowStackSize
;
94 BOOLEAN mCetSupported
= TRUE
;
96 UINTN mMaxNumberOfCpus
= 1;
97 UINTN mNumberOfCpus
= 1;
100 // SMM ready to lock flag
102 BOOLEAN mSmmReadyToLock
= FALSE
;
105 // Global used to cache PCD for SMM Code Access Check enable
107 BOOLEAN mSmmCodeAccessCheckEnable
= FALSE
;
110 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
112 UINT64 mAddressEncMask
= 0;
115 // Spin lock used to serialize setting of SMM Code Access Check feature
117 SPIN_LOCK
*mConfigSmmCodeAccessCheckLock
= NULL
;
120 // Saved SMM ranges information
122 EFI_SMRAM_DESCRIPTOR
*mSmmCpuSmramRanges
;
123 UINTN mSmmCpuSmramRangeCount
;
125 UINT8 mPhysicalAddressBits
;
128 // Control register contents saved for SMM S3 resume state initialization.
134 Initialize IDT to setup exception handlers for SMM.
143 BOOLEAN InterruptState
;
144 IA32_DESCRIPTOR DxeIdtr
;
147 // There are 32 (not 255) entries in it since only processor
148 // generated exceptions will be handled.
150 gcSmiIdtr
.Limit
= (sizeof(IA32_IDT_GATE_DESCRIPTOR
) * 32) - 1;
152 // Allocate page aligned IDT, because it might be set as read only.
154 gcSmiIdtr
.Base
= (UINTN
)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr
.Limit
+ 1));
155 ASSERT (gcSmiIdtr
.Base
!= 0);
156 ZeroMem ((VOID
*)gcSmiIdtr
.Base
, gcSmiIdtr
.Limit
+ 1);
159 // Disable Interrupt and save DXE IDT table
161 InterruptState
= SaveAndDisableInterrupts ();
162 AsmReadIdtr (&DxeIdtr
);
164 // Load SMM temporary IDT table
166 AsmWriteIdtr (&gcSmiIdtr
);
168 // Setup SMM default exception handlers, SMM IDT table
169 // will be updated and saved in gcSmiIdtr
171 Status
= InitializeCpuExceptionHandlers (NULL
);
172 ASSERT_EFI_ERROR (Status
);
174 // Restore DXE IDT table and CPU interrupt
176 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &DxeIdtr
);
177 SetInterruptState (InterruptState
);
181 Search module name by input IP address and output it.
183 @param CallerIpAddress Caller instruction pointer.
188 IN UINTN CallerIpAddress
197 Pe32Data
= PeCoffSearchImageBase (CallerIpAddress
);
199 DEBUG ((DEBUG_ERROR
, "It is invoked from the instruction before IP(0x%p)", (VOID
*) CallerIpAddress
));
200 PdbPointer
= PeCoffLoaderGetPdbPointer ((VOID
*) Pe32Data
);
201 if (PdbPointer
!= NULL
) {
202 DEBUG ((DEBUG_ERROR
, " in module (%a)\n", PdbPointer
));
208 Read information from the CPU save state.
210 @param This EFI_SMM_CPU_PROTOCOL instance
211 @param Width The number of bytes to read from the CPU save state.
212 @param Register Specifies the CPU register to read form the save state.
213 @param CpuIndex Specifies the zero-based index of the CPU save state.
214 @param Buffer Upon return, this holds the CPU register value read from the save state.
216 @retval EFI_SUCCESS The register was read from Save State
217 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
218 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
224 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
226 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
234 // Retrieve pointer to the specified CPU's SMM Save State buffer
236 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
237 return EFI_INVALID_PARAMETER
;
240 // The SpeculationBarrier() call here is to ensure the above check for the
241 // CpuIndex has been completed before the execution of subsequent codes.
243 SpeculationBarrier ();
246 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
248 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
250 // The pseudo-register only supports the 64-bit size specified by Width.
252 if (Width
!= sizeof (UINT64
)) {
253 return EFI_INVALID_PARAMETER
;
256 // If the processor is in SMM at the time the SMI occurred,
257 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
258 // Otherwise, EFI_NOT_FOUND is returned.
260 if (*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
)) {
261 *(UINT64
*)Buffer
= gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
;
264 return EFI_NOT_FOUND
;
268 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
269 return EFI_INVALID_PARAMETER
;
272 Status
= SmmCpuFeaturesReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
273 if (Status
== EFI_UNSUPPORTED
) {
274 Status
= ReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
280 Write data to the CPU save state.
282 @param This EFI_SMM_CPU_PROTOCOL instance
283 @param Width The number of bytes to read from the CPU save state.
284 @param Register Specifies the CPU register to write to the save state.
285 @param CpuIndex Specifies the zero-based index of the CPU save state
286 @param Buffer Upon entry, this holds the new CPU register value.
288 @retval EFI_SUCCESS The register was written from Save State
289 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
290 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
296 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
298 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
300 IN CONST VOID
*Buffer
306 // Retrieve pointer to the specified CPU's SMM Save State buffer
308 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
309 return EFI_INVALID_PARAMETER
;
313 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
315 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
319 if (!mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) {
320 return EFI_INVALID_PARAMETER
;
323 Status
= SmmCpuFeaturesWriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
324 if (Status
== EFI_UNSUPPORTED
) {
325 Status
= WriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
332 C function for SMI handler. To change all processor's SMMBase Register.
345 // Update SMM IDT entries' code segment and load IDT
347 AsmWriteIdtr (&gcSmiIdtr
);
348 ApicId
= GetApicId ();
350 ASSERT (mNumberOfCpus
<= mMaxNumberOfCpus
);
352 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
353 if (ApicId
== (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
355 // Initialize SMM specific features on the currently executing CPU
357 SmmCpuFeaturesInitializeProcessor (
360 gSmmCpuPrivate
->ProcessorInfo
,
366 // Check XD and BTS features on each processor on normal boot
368 CheckFeatureSupported ();
373 // BSP rebase is already done above.
374 // Initialize private data during S3 resume
376 InitializeMpSyncData ();
380 // Hook return after RSM to set SMM re-based flag
382 SemaphoreHook (Index
, &mRebased
[Index
]);
391 Relocate SmmBases for each processor.
393 Execute on first boot and all S3 resumes
402 UINT8 BakBuf
[BACK_BUF_SIZE
];
403 SMRAM_SAVE_STATE_MAP BakBuf2
;
404 SMRAM_SAVE_STATE_MAP
*CpuStatePtr
;
411 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
413 ASSERT (sizeof (BakBuf
) >= gcSmmInitSize
);
416 // Patch ASM code template with current CR0, CR3, and CR4 values
418 mSmmCr0
= (UINT32
)AsmReadCr0 ();
419 PatchInstructionX86 (gPatchSmmCr0
, mSmmCr0
, 4);
420 PatchInstructionX86 (gPatchSmmCr3
, AsmReadCr3 (), 4);
421 mSmmCr4
= (UINT32
)AsmReadCr4 ();
422 PatchInstructionX86 (gPatchSmmCr4
, mSmmCr4
& (~CR4_CET_ENABLE
), 4);
425 // Patch GDTR for SMM base relocation
427 gcSmiInitGdtr
.Base
= gcSmiGdtr
.Base
;
428 gcSmiInitGdtr
.Limit
= gcSmiGdtr
.Limit
;
430 U8Ptr
= (UINT8
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMM_HANDLER_OFFSET
);
431 CpuStatePtr
= (SMRAM_SAVE_STATE_MAP
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMRAM_SAVE_STATE_MAP_OFFSET
);
434 // Backup original contents at address 0x38000
436 CopyMem (BakBuf
, U8Ptr
, sizeof (BakBuf
));
437 CopyMem (&BakBuf2
, CpuStatePtr
, sizeof (BakBuf2
));
440 // Load image for relocation
442 CopyMem (U8Ptr
, gcSmmInitTemplate
, gcSmmInitSize
);
445 // Retrieve the local APIC ID of current processor
447 ApicId
= GetApicId ();
450 // Relocate SM bases for all APs
451 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
454 BspIndex
= (UINTN
)-1;
455 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
456 mRebased
[Index
] = FALSE
;
457 if (ApicId
!= (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
458 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
460 // Wait for this AP to finish its 1st SMI
462 while (!mRebased
[Index
]);
465 // BSP will be Relocated later
472 // Relocate BSP's SMM base
474 ASSERT (BspIndex
!= (UINTN
)-1);
478 // Wait for the BSP to finish its 1st SMI
480 while (!mRebased
[BspIndex
]);
483 // Restore contents at address 0x38000
485 CopyMem (CpuStatePtr
, &BakBuf2
, sizeof (BakBuf2
));
486 CopyMem (U8Ptr
, BakBuf
, sizeof (BakBuf
));
490 SMM Ready To Lock event notification handler.
492 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
493 perform additional lock actions that must be performed from SMM on the next SMI.
495 @param[in] Protocol Points to the protocol's unique identifier.
496 @param[in] Interface Points to the interface instance.
497 @param[in] Handle The handle on which the interface was installed.
499 @retval EFI_SUCCESS Notification handler runs successfully.
503 SmmReadyToLockEventNotify (
504 IN CONST EFI_GUID
*Protocol
,
512 // Cache a copy of UEFI memory map before we start profiling feature.
517 // Set SMM ready to lock flag and return
519 mSmmReadyToLock
= TRUE
;
524 The module Entry Point of the CPU SMM driver.
526 @param ImageHandle The firmware allocated handle for the EFI image.
527 @param SystemTable A pointer to the EFI System Table.
529 @retval EFI_SUCCESS The entry point is executed successfully.
530 @retval Other Some error occurs when executing this entry point.
536 IN EFI_HANDLE ImageHandle
,
537 IN EFI_SYSTEM_TABLE
*SystemTable
541 EFI_MP_SERVICES_PROTOCOL
*MpServices
;
542 UINTN NumberOfEnabledProcessors
;
560 // Initialize address fixup
562 PiSmmCpuSmmInitFixupAddress ();
563 PiSmmCpuSmiEntryFixupAddress ();
566 // Initialize Debug Agent to support source level debug in SMM code
568 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM
, NULL
, NULL
);
571 // Report the start of CPU SMM initialization.
575 EFI_COMPUTING_UNIT_HOST_PROCESSOR
| EFI_CU_HP_PC_SMM_INIT
579 // Find out SMRR Base and SMRR Size
581 FindSmramInfo (&mCpuHotPlugData
.SmrrBase
, &mCpuHotPlugData
.SmrrSize
);
584 // Get MP Services Protocol
586 Status
= SystemTable
->BootServices
->LocateProtocol (&gEfiMpServiceProtocolGuid
, NULL
, (VOID
**)&MpServices
);
587 ASSERT_EFI_ERROR (Status
);
590 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
592 Status
= MpServices
->GetNumberOfProcessors (MpServices
, &mNumberOfCpus
, &NumberOfEnabledProcessors
);
593 ASSERT_EFI_ERROR (Status
);
594 ASSERT (mNumberOfCpus
<= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
));
597 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
598 // A constant BSP index makes no sense because it may be hot removed.
601 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
603 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection
));
608 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
610 mSmmCodeAccessCheckEnable
= PcdGetBool (PcdCpuSmmCodeAccessCheckEnable
);
611 DEBUG ((EFI_D_INFO
, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable
));
614 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
615 // Make sure AddressEncMask is contained to smallest supported address field.
617 mAddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
618 DEBUG ((EFI_D_INFO
, "mAddressEncMask = 0x%lx\n", mAddressEncMask
));
621 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
623 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
624 mMaxNumberOfCpus
= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
);
626 mMaxNumberOfCpus
= mNumberOfCpus
;
628 gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
= mMaxNumberOfCpus
;
631 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
632 // allocated buffer. The minimum size of this buffer for a uniprocessor system
633 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
634 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
635 // then the SMI entry point and the CPU save state areas can be tiles to minimize
636 // the total amount SMRAM required for all the CPUs. The tile size can be computed
637 // by adding the // CPU save state size, any extra CPU specific context, and
638 // the size of code that must be placed at the SMI entry point to transfer
639 // control to a C function in the native SMM execution mode. This size is
640 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
641 // The total amount of memory required is the maximum number of CPUs that
642 // platform supports times the tile size. The picture below shows the tiling,
643 // where m is the number of tiles that fit in 32KB.
645 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
646 // | CPU m+1 Save State |
647 // +-----------------------------+
648 // | CPU m+1 Extra Data |
649 // +-----------------------------+
651 // +-----------------------------+
652 // | CPU 2m SMI Entry |
653 // +#############################+ <-- Base of allocated buffer + 64 KB
654 // | CPU m-1 Save State |
655 // +-----------------------------+
656 // | CPU m-1 Extra Data |
657 // +-----------------------------+
659 // +-----------------------------+
660 // | CPU 2m-1 SMI Entry |
661 // +=============================+ <-- 2^n offset from Base of allocated buffer
662 // | . . . . . . . . . . . . |
663 // +=============================+ <-- 2^n offset from Base of allocated buffer
664 // | CPU 2 Save State |
665 // +-----------------------------+
666 // | CPU 2 Extra Data |
667 // +-----------------------------+
669 // +-----------------------------+
670 // | CPU m+1 SMI Entry |
671 // +=============================+ <-- Base of allocated buffer + 32 KB
672 // | CPU 1 Save State |
673 // +-----------------------------+
674 // | CPU 1 Extra Data |
675 // +-----------------------------+
677 // +-----------------------------+
678 // | CPU m SMI Entry |
679 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
680 // | CPU 0 Save State |
681 // +-----------------------------+
682 // | CPU 0 Extra Data |
683 // +-----------------------------+
685 // +-----------------------------+
686 // | CPU m-1 SMI Entry |
687 // +=============================+ <-- 2^n offset from Base of allocated buffer
688 // | . . . . . . . . . . . . |
689 // +=============================+ <-- 2^n offset from Base of allocated buffer
691 // +-----------------------------+
692 // | CPU 1 SMI Entry |
693 // +=============================+ <-- 2^n offset from Base of allocated buffer
695 // +-----------------------------+
696 // | CPU 0 SMI Entry |
697 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
701 // Retrieve CPU Family
703 AsmCpuid (CPUID_VERSION_INFO
, &RegEax
, NULL
, NULL
, NULL
);
704 FamilyId
= (RegEax
>> 8) & 0xf;
705 ModelId
= (RegEax
>> 4) & 0xf;
706 if (FamilyId
== 0x06 || FamilyId
== 0x0f) {
707 ModelId
= ModelId
| ((RegEax
>> 12) & 0xf0);
711 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
712 if (RegEax
>= CPUID_EXTENDED_CPU_SIG
) {
713 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
716 // Determine the mode of the CPU at the time an SMI occurs
717 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
718 // Volume 3C, Section 34.4.1.1
720 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
;
721 if ((RegEdx
& BIT29
) != 0) {
722 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
724 if (FamilyId
== 0x06) {
725 if (ModelId
== 0x17 || ModelId
== 0x0f || ModelId
== 0x1c) {
726 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
730 DEBUG ((DEBUG_INFO
, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask
)));
731 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) {
732 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
733 if (RegEax
> CPUID_EXTENDED_FUNCTION
) {
734 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
, NULL
, NULL
, &RegEcx
, &RegEdx
);
735 DEBUG ((DEBUG_INFO
, "CPUID[7/0] ECX - 0x%08x\n", RegEcx
));
736 DEBUG ((DEBUG_INFO
, " CET_SS - 0x%08x\n", RegEcx
& CPUID_CET_SS
));
737 DEBUG ((DEBUG_INFO
, " CET_IBT - 0x%08x\n", RegEdx
& CPUID_CET_IBT
));
738 if ((RegEcx
& CPUID_CET_SS
) == 0) {
739 mCetSupported
= FALSE
;
740 PatchInstructionX86 (mPatchCetSupported
, mCetSupported
, 1);
743 AsmCpuidEx (CPUID_EXTENDED_STATE
, CPUID_EXTENDED_STATE_SUB_LEAF
, NULL
, &RegEbx
, &RegEcx
, NULL
);
744 DEBUG ((DEBUG_INFO
, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx
, RegEcx
));
745 AsmCpuidEx (CPUID_EXTENDED_STATE
, 11, &RegEax
, NULL
, &RegEcx
, NULL
);
746 DEBUG ((DEBUG_INFO
, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax
, RegEcx
));
747 AsmCpuidEx(CPUID_EXTENDED_STATE
, 12, &RegEax
, NULL
, &RegEcx
, NULL
);
748 DEBUG ((DEBUG_INFO
, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax
, RegEcx
));
752 mCetSupported
= FALSE
;
753 PatchInstructionX86 (mPatchCetSupported
, mCetSupported
, 1);
757 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
758 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
759 // This size is rounded up to nearest power of 2.
761 TileCodeSize
= GetSmiHandlerSize ();
762 TileCodeSize
= ALIGN_VALUE(TileCodeSize
, SIZE_4KB
);
763 TileDataSize
= (SMRAM_SAVE_STATE_MAP_OFFSET
- SMM_PSD_OFFSET
) + sizeof (SMRAM_SAVE_STATE_MAP
);
764 TileDataSize
= ALIGN_VALUE(TileDataSize
, SIZE_4KB
);
765 TileSize
= TileDataSize
+ TileCodeSize
- 1;
766 TileSize
= 2 * GetPowerOfTwo32 ((UINT32
)TileSize
);
767 DEBUG ((EFI_D_INFO
, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize
, TileCodeSize
, TileDataSize
));
770 // If the TileSize is larger than space available for the SMI Handler of
771 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
772 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
773 // the SMI Handler size must be reduced or the size of the extra CPU specific
774 // context must be reduced.
776 ASSERT (TileSize
<= (SMRAM_SAVE_STATE_MAP_OFFSET
+ sizeof (SMRAM_SAVE_STATE_MAP
) - SMM_HANDLER_OFFSET
));
779 // Allocate buffer for all of the tiles.
781 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
782 // Volume 3C, Section 34.11 SMBASE Relocation
783 // For Pentium and Intel486 processors, the SMBASE values must be
784 // aligned on a 32-KByte boundary or the processor will enter shutdown
785 // state during the execution of a RSM instruction.
787 // Intel486 processors: FamilyId is 4
788 // Pentium processors : FamilyId is 5
790 BufferPages
= EFI_SIZE_TO_PAGES (SIZE_32KB
+ TileSize
* (mMaxNumberOfCpus
- 1));
791 if ((FamilyId
== 4) || (FamilyId
== 5)) {
792 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_32KB
);
794 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_4KB
);
796 ASSERT (Buffer
!= NULL
);
797 DEBUG ((EFI_D_INFO
, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer
, EFI_PAGES_TO_SIZE(BufferPages
)));
800 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
802 gSmmCpuPrivate
->ProcessorInfo
= (EFI_PROCESSOR_INFORMATION
*)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION
) * mMaxNumberOfCpus
);
803 ASSERT (gSmmCpuPrivate
->ProcessorInfo
!= NULL
);
805 gSmmCpuPrivate
->Operation
= (SMM_CPU_OPERATION
*)AllocatePool (sizeof (SMM_CPU_OPERATION
) * mMaxNumberOfCpus
);
806 ASSERT (gSmmCpuPrivate
->Operation
!= NULL
);
808 gSmmCpuPrivate
->CpuSaveStateSize
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
809 ASSERT (gSmmCpuPrivate
->CpuSaveStateSize
!= NULL
);
811 gSmmCpuPrivate
->CpuSaveState
= (VOID
**)AllocatePool (sizeof (VOID
*) * mMaxNumberOfCpus
);
812 ASSERT (gSmmCpuPrivate
->CpuSaveState
!= NULL
);
814 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveStateSize
= gSmmCpuPrivate
->CpuSaveStateSize
;
815 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveState
= gSmmCpuPrivate
->CpuSaveState
;
818 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
820 mCpuHotPlugData
.ApicId
= (UINT64
*)AllocatePool (sizeof (UINT64
) * mMaxNumberOfCpus
);
821 ASSERT (mCpuHotPlugData
.ApicId
!= NULL
);
822 mCpuHotPlugData
.SmBase
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
823 ASSERT (mCpuHotPlugData
.SmBase
!= NULL
);
824 mCpuHotPlugData
.ArrayLength
= (UINT32
)mMaxNumberOfCpus
;
827 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
828 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
829 // size for each CPU in the platform
831 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
832 mCpuHotPlugData
.SmBase
[Index
] = (UINTN
)Buffer
+ Index
* TileSize
- SMM_HANDLER_OFFSET
;
833 gSmmCpuPrivate
->CpuSaveStateSize
[Index
] = sizeof(SMRAM_SAVE_STATE_MAP
);
834 gSmmCpuPrivate
->CpuSaveState
[Index
] = (VOID
*)(mCpuHotPlugData
.SmBase
[Index
] + SMRAM_SAVE_STATE_MAP_OFFSET
);
835 gSmmCpuPrivate
->Operation
[Index
] = SmmCpuNone
;
837 if (Index
< mNumberOfCpus
) {
838 Status
= MpServices
->GetProcessorInfo (MpServices
, Index
, &gSmmCpuPrivate
->ProcessorInfo
[Index
]);
839 ASSERT_EFI_ERROR (Status
);
840 mCpuHotPlugData
.ApicId
[Index
] = gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
;
842 DEBUG ((EFI_D_INFO
, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
844 (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
,
845 mCpuHotPlugData
.SmBase
[Index
],
846 gSmmCpuPrivate
->CpuSaveState
[Index
],
847 gSmmCpuPrivate
->CpuSaveStateSize
[Index
]
850 gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
= INVALID_APIC_ID
;
851 mCpuHotPlugData
.ApicId
[Index
] = INVALID_APIC_ID
;
856 // Allocate SMI stacks for all processors.
858 mSmmStackSize
= EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize
)));
859 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
861 // 2 more pages is allocated for each processor.
862 // one is guard page and the other is known good stack.
864 // +-------------------------------------------+-----+-------------------------------------------+
865 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
866 // +-------------------------------------------+-----+-------------------------------------------+
868 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
870 mSmmStackSize
+= EFI_PAGES_TO_SIZE (2);
873 mSmmShadowStackSize
= 0;
874 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
876 // Append Shadow Stack after normal stack
879 // +--------------------------------------------------+---------------------------------------------------------------+
880 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
881 // +--------------------------------------------------+---------------------------------------------------------------+
882 // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|
883 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
885 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
887 mSmmShadowStackSize
= EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize
)));
888 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
889 mSmmShadowStackSize
+= EFI_PAGES_TO_SIZE (2);
893 Stacks
= (UINT8
*) AllocatePages (gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* (EFI_SIZE_TO_PAGES (mSmmStackSize
+ mSmmShadowStackSize
)));
894 ASSERT (Stacks
!= NULL
);
895 mSmmStackArrayBase
= (UINTN
)Stacks
;
896 mSmmStackArrayEnd
= mSmmStackArrayBase
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* (mSmmStackSize
+ mSmmShadowStackSize
) - 1;
898 DEBUG ((DEBUG_INFO
, "Stacks - 0x%x\n", Stacks
));
899 DEBUG ((DEBUG_INFO
, "mSmmStackSize - 0x%x\n", mSmmStackSize
));
900 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard
)));
901 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
902 DEBUG ((DEBUG_INFO
, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize
));
906 // Set SMI stack for SMM base relocation
908 PatchInstructionX86 (
910 (UINTN
) (Stacks
+ mSmmStackSize
- sizeof (UINTN
)),
920 // Relocate SMM Base addresses to the ones allocated from SMRAM
922 mRebased
= (BOOLEAN
*)AllocateZeroPool (sizeof (BOOLEAN
) * mMaxNumberOfCpus
);
923 ASSERT (mRebased
!= NULL
);
927 // Call hook for BSP to perform extra actions in normal mode after all
928 // SMM base addresses have been relocated on all CPUs
930 SmmCpuFeaturesSmmRelocationComplete ();
932 DEBUG ((DEBUG_INFO
, "mXdSupported - 0x%x\n", mXdSupported
));
935 // SMM Time initialization
937 InitializeSmmTimer ();
940 // Initialize MP globals
942 Cr3
= InitializeMpServiceData (Stacks
, mSmmStackSize
, mSmmShadowStackSize
);
944 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
945 for (Index
= 0; Index
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; Index
++) {
948 (EFI_PHYSICAL_ADDRESS
)(UINTN
)Stacks
+ mSmmStackSize
+ (mSmmStackSize
+ mSmmShadowStackSize
) * Index
,
951 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
954 (EFI_PHYSICAL_ADDRESS
)(UINTN
)Stacks
+ mSmmStackSize
+ EFI_PAGES_TO_SIZE(1) + (mSmmStackSize
+ mSmmShadowStackSize
) * Index
,
962 // Fill in SMM Reserved Regions
964 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedStart
= 0;
965 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedSize
= 0;
968 // Install the SMM Configuration Protocol onto a new handle on the handle database.
969 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
970 // to an SMRAM address will be present in the handle database
972 Status
= SystemTable
->BootServices
->InstallMultipleProtocolInterfaces (
973 &gSmmCpuPrivate
->SmmCpuHandle
,
974 &gEfiSmmConfigurationProtocolGuid
, &gSmmCpuPrivate
->SmmConfiguration
,
977 ASSERT_EFI_ERROR (Status
);
980 // Install the SMM CPU Protocol into SMM protocol database
982 Status
= gSmst
->SmmInstallProtocolInterface (
984 &gEfiSmmCpuProtocolGuid
,
985 EFI_NATIVE_INTERFACE
,
988 ASSERT_EFI_ERROR (Status
);
991 // Install the SMM Memory Attribute Protocol into SMM protocol database
993 Status
= gSmst
->SmmInstallProtocolInterface (
995 &gEdkiiSmmMemoryAttributeProtocolGuid
,
996 EFI_NATIVE_INTERFACE
,
999 ASSERT_EFI_ERROR (Status
);
1002 // Initialize global buffer for MM MP.
1004 InitializeDataForMmMp ();
1007 // Install the SMM Mp Protocol into SMM protocol database
1009 Status
= gSmst
->SmmInstallProtocolInterface (
1011 &gEfiMmMpProtocolGuid
,
1012 EFI_NATIVE_INTERFACE
,
1015 ASSERT_EFI_ERROR (Status
);
1018 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1020 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
1021 Status
= PcdSet64S (PcdCpuHotPlugDataAddress
, (UINT64
)(UINTN
)&mCpuHotPlugData
);
1022 ASSERT_EFI_ERROR (Status
);
1026 // Initialize SMM CPU Services Support
1028 Status
= InitializeSmmCpuServices (mSmmCpuHandle
);
1029 ASSERT_EFI_ERROR (Status
);
1032 // register SMM Ready To Lock Protocol notification
1034 Status
= gSmst
->SmmRegisterProtocolNotify (
1035 &gEfiSmmReadyToLockProtocolGuid
,
1036 SmmReadyToLockEventNotify
,
1039 ASSERT_EFI_ERROR (Status
);
1042 // Initialize SMM Profile feature
1044 InitSmmProfile (Cr3
);
1046 GetAcpiS3EnableFlag ();
1047 InitSmmS3ResumeState (Cr3
);
1049 DEBUG ((EFI_D_INFO
, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1056 Find out SMRAM information including SMRR base and SMRR size.
1058 @param SmrrBase SMRR base
1059 @param SmrrSize SMRR size
1064 OUT UINT32
*SmrrBase
,
1065 OUT UINT32
*SmrrSize
1070 EFI_SMM_ACCESS2_PROTOCOL
*SmmAccess
;
1071 EFI_SMRAM_DESCRIPTOR
*CurrentSmramRange
;
1077 // Get SMM Access Protocol
1079 Status
= gBS
->LocateProtocol (&gEfiSmmAccess2ProtocolGuid
, NULL
, (VOID
**)&SmmAccess
);
1080 ASSERT_EFI_ERROR (Status
);
1083 // Get SMRAM information
1086 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, NULL
);
1087 ASSERT (Status
== EFI_BUFFER_TOO_SMALL
);
1089 mSmmCpuSmramRanges
= (EFI_SMRAM_DESCRIPTOR
*)AllocatePool (Size
);
1090 ASSERT (mSmmCpuSmramRanges
!= NULL
);
1092 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, mSmmCpuSmramRanges
);
1093 ASSERT_EFI_ERROR (Status
);
1095 mSmmCpuSmramRangeCount
= Size
/ sizeof (EFI_SMRAM_DESCRIPTOR
);
1098 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1100 CurrentSmramRange
= NULL
;
1101 for (Index
= 0, MaxSize
= SIZE_256KB
- EFI_PAGE_SIZE
; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1103 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1105 if ((mSmmCpuSmramRanges
[Index
].RegionState
& (EFI_ALLOCATED
| EFI_NEEDS_TESTING
| EFI_NEEDS_ECC_INITIALIZATION
)) != 0) {
1109 if (mSmmCpuSmramRanges
[Index
].CpuStart
>= BASE_1MB
) {
1110 if ((mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
) <= SMRR_MAX_ADDRESS
) {
1111 if (mSmmCpuSmramRanges
[Index
].PhysicalSize
>= MaxSize
) {
1112 MaxSize
= mSmmCpuSmramRanges
[Index
].PhysicalSize
;
1113 CurrentSmramRange
= &mSmmCpuSmramRanges
[Index
];
1119 ASSERT (CurrentSmramRange
!= NULL
);
1121 *SmrrBase
= (UINT32
)CurrentSmramRange
->CpuStart
;
1122 *SmrrSize
= (UINT32
)CurrentSmramRange
->PhysicalSize
;
1125 // Extend *SmrrBase/*SmrrSize to include adjacent SMRAM ranges
1129 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1130 if (mSmmCpuSmramRanges
[Index
].CpuStart
< *SmrrBase
&&
1131 *SmrrBase
== (mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
)) {
1132 *SmrrBase
= (UINT32
)mSmmCpuSmramRanges
[Index
].CpuStart
;
1133 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1135 } else if ((*SmrrBase
+ *SmrrSize
) == mSmmCpuSmramRanges
[Index
].CpuStart
&& mSmmCpuSmramRanges
[Index
].PhysicalSize
> 0) {
1136 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1142 DEBUG ((EFI_D_INFO
, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase
, *SmrrSize
));
1146 Configure SMM Code Access Check feature on an AP.
1147 SMM Feature Control MSR will be locked after configuration.
1149 @param[in,out] Buffer Pointer to private data buffer.
1153 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1158 UINT64 SmmFeatureControlMsr
;
1159 UINT64 NewSmmFeatureControlMsr
;
1162 // Retrieve the CPU Index from the context passed in
1164 CpuIndex
= *(UINTN
*)Buffer
;
1167 // Get the current SMM Feature Control MSR value
1169 SmmFeatureControlMsr
= SmmCpuFeaturesGetSmmRegister (CpuIndex
, SmmRegFeatureControl
);
1172 // Compute the new SMM Feature Control MSR value
1174 NewSmmFeatureControlMsr
= SmmFeatureControlMsr
;
1175 if (mSmmCodeAccessCheckEnable
) {
1176 NewSmmFeatureControlMsr
|= SMM_CODE_CHK_EN_BIT
;
1177 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock
)) {
1178 NewSmmFeatureControlMsr
|= SMM_FEATURE_CONTROL_LOCK_BIT
;
1183 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1185 if (NewSmmFeatureControlMsr
!= SmmFeatureControlMsr
) {
1186 SmmCpuFeaturesSetSmmRegister (CpuIndex
, SmmRegFeatureControl
, NewSmmFeatureControlMsr
);
1190 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1192 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1196 Configure SMM Code Access Check feature for all processors.
1197 SMM Feature Control MSR will be locked after configuration.
1200 ConfigSmmCodeAccessCheck (
1208 // Check to see if the Feature Control MSR is supported on this CPU
1210 Index
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
1211 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index
, SmmRegFeatureControl
)) {
1212 mSmmCodeAccessCheckEnable
= FALSE
;
1217 // Check to see if the CPU supports the SMM Code Access Check feature
1218 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1220 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP
) & SMM_CODE_ACCESS_CHK_BIT
) == 0) {
1221 mSmmCodeAccessCheckEnable
= FALSE
;
1226 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1228 InitializeSpinLock (mConfigSmmCodeAccessCheckLock
);
1231 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1232 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1234 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1237 // Enable SMM Code Access Check feature on the BSP.
1239 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index
);
1242 // Enable SMM Code Access Check feature for the APs.
1244 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1245 if (Index
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1246 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== INVALID_APIC_ID
) {
1248 // If this processor does not exist
1253 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1254 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1256 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1259 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1261 Status
= gSmst
->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor
, Index
, &Index
);
1262 ASSERT_EFI_ERROR (Status
);
1265 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1267 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock
)) {
1272 // Release the Config SMM Code Access Check spin lock.
1274 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1280 This API provides a way to allocate memory for page table.
1282 This API can be called more once to allocate memory for page tables.
1284 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1285 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1286 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1289 @param Pages The number of 4 KB pages to allocate.
1291 @return A pointer to the allocated buffer or NULL if allocation fails.
1295 AllocatePageTableMemory (
1301 Buffer
= SmmCpuFeaturesAllocatePageTableMemory (Pages
);
1302 if (Buffer
!= NULL
) {
1305 return AllocatePages (Pages
);
1309 Allocate pages for code.
1311 @param[in] Pages Number of pages to be allocated.
1313 @return Allocated memory.
1321 EFI_PHYSICAL_ADDRESS Memory
;
1327 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1328 if (EFI_ERROR (Status
)) {
1331 return (VOID
*) (UINTN
) Memory
;
1335 Allocate aligned pages for code.
1337 @param[in] Pages Number of pages to be allocated.
1338 @param[in] Alignment The requested alignment of the allocation.
1339 Must be a power of two.
1340 If Alignment is zero, then byte alignment is used.
1342 @return Allocated memory.
1345 AllocateAlignedCodePages (
1351 EFI_PHYSICAL_ADDRESS Memory
;
1352 UINTN AlignedMemory
;
1353 UINTN AlignmentMask
;
1354 UINTN UnalignedPages
;
1358 // Alignment must be a power of two or zero.
1360 ASSERT ((Alignment
& (Alignment
- 1)) == 0);
1365 if (Alignment
> EFI_PAGE_SIZE
) {
1367 // Calculate the total number of pages since alignment is larger than page size.
1369 AlignmentMask
= Alignment
- 1;
1370 RealPages
= Pages
+ EFI_SIZE_TO_PAGES (Alignment
);
1372 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1374 ASSERT (RealPages
> Pages
);
1376 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, RealPages
, &Memory
);
1377 if (EFI_ERROR (Status
)) {
1380 AlignedMemory
= ((UINTN
) Memory
+ AlignmentMask
) & ~AlignmentMask
;
1381 UnalignedPages
= EFI_SIZE_TO_PAGES (AlignedMemory
- (UINTN
) Memory
);
1382 if (UnalignedPages
> 0) {
1384 // Free first unaligned page(s).
1386 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1387 ASSERT_EFI_ERROR (Status
);
1389 Memory
= AlignedMemory
+ EFI_PAGES_TO_SIZE (Pages
);
1390 UnalignedPages
= RealPages
- Pages
- UnalignedPages
;
1391 if (UnalignedPages
> 0) {
1393 // Free last unaligned page(s).
1395 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1396 ASSERT_EFI_ERROR (Status
);
1400 // Do not over-allocate pages in this case.
1402 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1403 if (EFI_ERROR (Status
)) {
1406 AlignedMemory
= (UINTN
) Memory
;
1408 return (VOID
*) AlignedMemory
;
1412 Perform the remaining tasks.
1416 PerformRemainingTasks (
1420 if (mSmmReadyToLock
) {
1422 // Start SMM Profile feature
1424 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1428 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1433 // Mark critical region to be read-only in page table
1435 SetMemMapAttributes ();
1438 // Do not protect memory outside SMRAM when SMM static page table is not enabled.
1440 if (mCpuSmmStaticPageTable
) {
1443 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1445 SetUefiMemMapAttributes ();
1448 // Set page table itself to be read-only
1450 SetPageTableAttributes ();
1454 // Configure SMM Code Access Check feature if available.
1456 ConfigSmmCodeAccessCheck ();
1458 SmmCpuFeaturesCompleteSmmReadyToLock ();
1461 // Clean SMM ready to lock flag
1463 mSmmReadyToLock
= FALSE
;
1468 Perform the pre tasks.
1476 RestoreSmmConfigurationInS3 ();