2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "PiSmmCpuDxeSmm.h"
14 // SMM CPU Private Data structure that contains SMM Configuration Protocol
15 // along its supporting fields.
17 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData
= {
18 SMM_CPU_PRIVATE_DATA_SIGNATURE
, // Signature
20 NULL
, // Pointer to ProcessorInfo array
21 NULL
, // Pointer to Operation array
22 NULL
, // Pointer to CpuSaveStateSize array
23 NULL
, // Pointer to CpuSaveState array
24 { {0} }, // SmmReservedSmramRegion
26 SmmStartupThisAp
, // SmmCoreEntryContext.SmmStartupThisAp
27 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
28 0, // SmmCoreEntryContext.NumberOfCpus
29 NULL
, // SmmCoreEntryContext.CpuSaveStateSize
30 NULL
// SmmCoreEntryContext.CpuSaveState
34 mSmmCpuPrivateData
.SmmReservedSmramRegion
, // SmmConfiguration.SmramReservedRegions
35 RegisterSmmEntry
// SmmConfiguration.RegisterSmmEntry
39 CPU_HOT_PLUG_DATA mCpuHotPlugData
= {
40 CPU_HOT_PLUG_DATA_REVISION_1
, // Revision
41 0, // Array Length of SmBase and APIC ID
42 NULL
, // Pointer to APIC ID array
43 NULL
, // Pointer to SMBASE array
50 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
52 SMM_CPU_PRIVATE_DATA
*gSmmCpuPrivate
= &mSmmCpuPrivateData
;
55 // SMM Relocation variables
57 volatile BOOLEAN
*mRebased
;
58 volatile BOOLEAN mIsBsp
;
61 /// Handle for the SMM CPU Protocol
63 EFI_HANDLE mSmmCpuHandle
= NULL
;
66 /// SMM CPU Protocol instance
68 EFI_SMM_CPU_PROTOCOL mSmmCpu
= {
74 /// SMM Memory Attribute Protocol instance
76 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute
= {
77 EdkiiSmmGetMemoryAttributes
,
78 EdkiiSmmSetMemoryAttributes
,
79 EdkiiSmmClearMemoryAttributes
82 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable
[EXCEPTION_VECTOR_NUMBER
];
85 // SMM stack information
87 UINTN mSmmStackArrayBase
;
88 UINTN mSmmStackArrayEnd
;
91 UINTN mSmmShadowStackSize
;
92 BOOLEAN mCetSupported
= TRUE
;
94 UINTN mMaxNumberOfCpus
= 1;
95 UINTN mNumberOfCpus
= 1;
98 // SMM ready to lock flag
100 BOOLEAN mSmmReadyToLock
= FALSE
;
103 // Global used to cache PCD for SMM Code Access Check enable
105 BOOLEAN mSmmCodeAccessCheckEnable
= FALSE
;
108 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
110 UINT64 mAddressEncMask
= 0;
113 // Spin lock used to serialize setting of SMM Code Access Check feature
115 SPIN_LOCK
*mConfigSmmCodeAccessCheckLock
= NULL
;
118 // Saved SMM ranges information
120 EFI_SMRAM_DESCRIPTOR
*mSmmCpuSmramRanges
;
121 UINTN mSmmCpuSmramRangeCount
;
123 UINT8 mPhysicalAddressBits
;
126 // Control register contents saved for SMM S3 resume state initialization.
132 Initialize IDT to setup exception handlers for SMM.
141 BOOLEAN InterruptState
;
142 IA32_DESCRIPTOR DxeIdtr
;
145 // There are 32 (not 255) entries in it since only processor
146 // generated exceptions will be handled.
148 gcSmiIdtr
.Limit
= (sizeof(IA32_IDT_GATE_DESCRIPTOR
) * 32) - 1;
150 // Allocate page aligned IDT, because it might be set as read only.
152 gcSmiIdtr
.Base
= (UINTN
)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr
.Limit
+ 1));
153 ASSERT (gcSmiIdtr
.Base
!= 0);
154 ZeroMem ((VOID
*)gcSmiIdtr
.Base
, gcSmiIdtr
.Limit
+ 1);
157 // Disable Interrupt and save DXE IDT table
159 InterruptState
= SaveAndDisableInterrupts ();
160 AsmReadIdtr (&DxeIdtr
);
162 // Load SMM temporary IDT table
164 AsmWriteIdtr (&gcSmiIdtr
);
166 // Setup SMM default exception handlers, SMM IDT table
167 // will be updated and saved in gcSmiIdtr
169 Status
= InitializeCpuExceptionHandlers (NULL
);
170 ASSERT_EFI_ERROR (Status
);
172 // Restore DXE IDT table and CPU interrupt
174 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &DxeIdtr
);
175 SetInterruptState (InterruptState
);
179 Search module name by input IP address and output it.
181 @param CallerIpAddress Caller instruction pointer.
186 IN UINTN CallerIpAddress
195 Pe32Data
= PeCoffSearchImageBase (CallerIpAddress
);
197 DEBUG ((DEBUG_ERROR
, "It is invoked from the instruction before IP(0x%p)", (VOID
*) CallerIpAddress
));
198 PdbPointer
= PeCoffLoaderGetPdbPointer ((VOID
*) Pe32Data
);
199 if (PdbPointer
!= NULL
) {
200 DEBUG ((DEBUG_ERROR
, " in module (%a)\n", PdbPointer
));
206 Read information from the CPU save state.
208 @param This EFI_SMM_CPU_PROTOCOL instance
209 @param Width The number of bytes to read from the CPU save state.
210 @param Register Specifies the CPU register to read form the save state.
211 @param CpuIndex Specifies the zero-based index of the CPU save state.
212 @param Buffer Upon return, this holds the CPU register value read from the save state.
214 @retval EFI_SUCCESS The register was read from Save State
215 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
216 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
222 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
224 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
232 // Retrieve pointer to the specified CPU's SMM Save State buffer
234 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
235 return EFI_INVALID_PARAMETER
;
238 // The SpeculationBarrier() call here is to ensure the above check for the
239 // CpuIndex has been completed before the execution of subsequent codes.
241 SpeculationBarrier ();
244 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
246 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
248 // The pseudo-register only supports the 64-bit size specified by Width.
250 if (Width
!= sizeof (UINT64
)) {
251 return EFI_INVALID_PARAMETER
;
254 // If the processor is in SMM at the time the SMI occurred,
255 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
256 // Otherwise, EFI_NOT_FOUND is returned.
258 if (*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
)) {
259 *(UINT64
*)Buffer
= gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
;
262 return EFI_NOT_FOUND
;
266 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
267 return EFI_INVALID_PARAMETER
;
270 Status
= SmmCpuFeaturesReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
271 if (Status
== EFI_UNSUPPORTED
) {
272 Status
= ReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
278 Write data to the CPU save state.
280 @param This EFI_SMM_CPU_PROTOCOL instance
281 @param Width The number of bytes to read from the CPU save state.
282 @param Register Specifies the CPU register to write to the save state.
283 @param CpuIndex Specifies the zero-based index of the CPU save state
284 @param Buffer Upon entry, this holds the new CPU register value.
286 @retval EFI_SUCCESS The register was written from Save State
287 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
288 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
294 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
296 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
298 IN CONST VOID
*Buffer
304 // Retrieve pointer to the specified CPU's SMM Save State buffer
306 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
307 return EFI_INVALID_PARAMETER
;
311 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
313 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
317 if (!mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) {
318 return EFI_INVALID_PARAMETER
;
321 Status
= SmmCpuFeaturesWriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
322 if (Status
== EFI_UNSUPPORTED
) {
323 Status
= WriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
330 C function for SMI handler. To change all processor's SMMBase Register.
343 // Update SMM IDT entries' code segment and load IDT
345 AsmWriteIdtr (&gcSmiIdtr
);
346 ApicId
= GetApicId ();
348 ASSERT (mNumberOfCpus
<= mMaxNumberOfCpus
);
350 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
351 if (ApicId
== (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
353 // Initialize SMM specific features on the currently executing CPU
355 SmmCpuFeaturesInitializeProcessor (
358 gSmmCpuPrivate
->ProcessorInfo
,
364 // Check XD and BTS features on each processor on normal boot
366 CheckFeatureSupported ();
371 // BSP rebase is already done above.
372 // Initialize private data during S3 resume
374 InitializeMpSyncData ();
378 // Hook return after RSM to set SMM re-based flag
380 SemaphoreHook (Index
, &mRebased
[Index
]);
389 Relocate SmmBases for each processor.
391 Execute on first boot and all S3 resumes
400 UINT8 BakBuf
[BACK_BUF_SIZE
];
401 SMRAM_SAVE_STATE_MAP BakBuf2
;
402 SMRAM_SAVE_STATE_MAP
*CpuStatePtr
;
409 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
411 ASSERT (sizeof (BakBuf
) >= gcSmmInitSize
);
414 // Patch ASM code template with current CR0, CR3, and CR4 values
416 mSmmCr0
= (UINT32
)AsmReadCr0 ();
417 PatchInstructionX86 (gPatchSmmCr0
, mSmmCr0
, 4);
418 PatchInstructionX86 (gPatchSmmCr3
, AsmReadCr3 (), 4);
419 mSmmCr4
= (UINT32
)AsmReadCr4 ();
420 PatchInstructionX86 (gPatchSmmCr4
, mSmmCr4
& (~CR4_CET_ENABLE
), 4);
423 // Patch GDTR for SMM base relocation
425 gcSmiInitGdtr
.Base
= gcSmiGdtr
.Base
;
426 gcSmiInitGdtr
.Limit
= gcSmiGdtr
.Limit
;
428 U8Ptr
= (UINT8
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMM_HANDLER_OFFSET
);
429 CpuStatePtr
= (SMRAM_SAVE_STATE_MAP
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMRAM_SAVE_STATE_MAP_OFFSET
);
432 // Backup original contents at address 0x38000
434 CopyMem (BakBuf
, U8Ptr
, sizeof (BakBuf
));
435 CopyMem (&BakBuf2
, CpuStatePtr
, sizeof (BakBuf2
));
438 // Load image for relocation
440 CopyMem (U8Ptr
, gcSmmInitTemplate
, gcSmmInitSize
);
443 // Retrieve the local APIC ID of current processor
445 ApicId
= GetApicId ();
448 // Relocate SM bases for all APs
449 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
452 BspIndex
= (UINTN
)-1;
453 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
454 mRebased
[Index
] = FALSE
;
455 if (ApicId
!= (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
456 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
458 // Wait for this AP to finish its 1st SMI
460 while (!mRebased
[Index
]);
463 // BSP will be Relocated later
470 // Relocate BSP's SMM base
472 ASSERT (BspIndex
!= (UINTN
)-1);
476 // Wait for the BSP to finish its 1st SMI
478 while (!mRebased
[BspIndex
]);
481 // Restore contents at address 0x38000
483 CopyMem (CpuStatePtr
, &BakBuf2
, sizeof (BakBuf2
));
484 CopyMem (U8Ptr
, BakBuf
, sizeof (BakBuf
));
488 SMM Ready To Lock event notification handler.
490 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
491 perform additional lock actions that must be performed from SMM on the next SMI.
493 @param[in] Protocol Points to the protocol's unique identifier.
494 @param[in] Interface Points to the interface instance.
495 @param[in] Handle The handle on which the interface was installed.
497 @retval EFI_SUCCESS Notification handler runs successfully.
501 SmmReadyToLockEventNotify (
502 IN CONST EFI_GUID
*Protocol
,
510 // Cache a copy of UEFI memory map before we start profiling feature.
515 // Set SMM ready to lock flag and return
517 mSmmReadyToLock
= TRUE
;
522 The module Entry Point of the CPU SMM driver.
524 @param ImageHandle The firmware allocated handle for the EFI image.
525 @param SystemTable A pointer to the EFI System Table.
527 @retval EFI_SUCCESS The entry point is executed successfully.
528 @retval Other Some error occurs when executing this entry point.
534 IN EFI_HANDLE ImageHandle
,
535 IN EFI_SYSTEM_TABLE
*SystemTable
539 EFI_MP_SERVICES_PROTOCOL
*MpServices
;
540 UINTN NumberOfEnabledProcessors
;
558 // Initialize address fixup
560 PiSmmCpuSmmInitFixupAddress ();
561 PiSmmCpuSmiEntryFixupAddress ();
564 // Initialize Debug Agent to support source level debug in SMM code
566 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM
, NULL
, NULL
);
569 // Report the start of CPU SMM initialization.
573 EFI_COMPUTING_UNIT_HOST_PROCESSOR
| EFI_CU_HP_PC_SMM_INIT
577 // Find out SMRR Base and SMRR Size
579 FindSmramInfo (&mCpuHotPlugData
.SmrrBase
, &mCpuHotPlugData
.SmrrSize
);
582 // Get MP Services Protocol
584 Status
= SystemTable
->BootServices
->LocateProtocol (&gEfiMpServiceProtocolGuid
, NULL
, (VOID
**)&MpServices
);
585 ASSERT_EFI_ERROR (Status
);
588 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
590 Status
= MpServices
->GetNumberOfProcessors (MpServices
, &mNumberOfCpus
, &NumberOfEnabledProcessors
);
591 ASSERT_EFI_ERROR (Status
);
592 ASSERT (mNumberOfCpus
<= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
));
595 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
596 // A constant BSP index makes no sense because it may be hot removed.
599 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
601 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection
));
606 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
608 mSmmCodeAccessCheckEnable
= PcdGetBool (PcdCpuSmmCodeAccessCheckEnable
);
609 DEBUG ((EFI_D_INFO
, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable
));
612 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
613 // Make sure AddressEncMask is contained to smallest supported address field.
615 mAddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
616 DEBUG ((EFI_D_INFO
, "mAddressEncMask = 0x%lx\n", mAddressEncMask
));
619 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
621 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
622 mMaxNumberOfCpus
= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
);
624 mMaxNumberOfCpus
= mNumberOfCpus
;
626 gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
= mMaxNumberOfCpus
;
629 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
630 // allocated buffer. The minimum size of this buffer for a uniprocessor system
631 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
632 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
633 // then the SMI entry point and the CPU save state areas can be tiles to minimize
634 // the total amount SMRAM required for all the CPUs. The tile size can be computed
635 // by adding the // CPU save state size, any extra CPU specific context, and
636 // the size of code that must be placed at the SMI entry point to transfer
637 // control to a C function in the native SMM execution mode. This size is
638 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
639 // The total amount of memory required is the maximum number of CPUs that
640 // platform supports times the tile size. The picture below shows the tiling,
641 // where m is the number of tiles that fit in 32KB.
643 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
644 // | CPU m+1 Save State |
645 // +-----------------------------+
646 // | CPU m+1 Extra Data |
647 // +-----------------------------+
649 // +-----------------------------+
650 // | CPU 2m SMI Entry |
651 // +#############################+ <-- Base of allocated buffer + 64 KB
652 // | CPU m-1 Save State |
653 // +-----------------------------+
654 // | CPU m-1 Extra Data |
655 // +-----------------------------+
657 // +-----------------------------+
658 // | CPU 2m-1 SMI Entry |
659 // +=============================+ <-- 2^n offset from Base of allocated buffer
660 // | . . . . . . . . . . . . |
661 // +=============================+ <-- 2^n offset from Base of allocated buffer
662 // | CPU 2 Save State |
663 // +-----------------------------+
664 // | CPU 2 Extra Data |
665 // +-----------------------------+
667 // +-----------------------------+
668 // | CPU m+1 SMI Entry |
669 // +=============================+ <-- Base of allocated buffer + 32 KB
670 // | CPU 1 Save State |
671 // +-----------------------------+
672 // | CPU 1 Extra Data |
673 // +-----------------------------+
675 // +-----------------------------+
676 // | CPU m SMI Entry |
677 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
678 // | CPU 0 Save State |
679 // +-----------------------------+
680 // | CPU 0 Extra Data |
681 // +-----------------------------+
683 // +-----------------------------+
684 // | CPU m-1 SMI Entry |
685 // +=============================+ <-- 2^n offset from Base of allocated buffer
686 // | . . . . . . . . . . . . |
687 // +=============================+ <-- 2^n offset from Base of allocated buffer
689 // +-----------------------------+
690 // | CPU 1 SMI Entry |
691 // +=============================+ <-- 2^n offset from Base of allocated buffer
693 // +-----------------------------+
694 // | CPU 0 SMI Entry |
695 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
699 // Retrieve CPU Family
701 AsmCpuid (CPUID_VERSION_INFO
, &RegEax
, NULL
, NULL
, NULL
);
702 FamilyId
= (RegEax
>> 8) & 0xf;
703 ModelId
= (RegEax
>> 4) & 0xf;
704 if (FamilyId
== 0x06 || FamilyId
== 0x0f) {
705 ModelId
= ModelId
| ((RegEax
>> 12) & 0xf0);
709 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
710 if (RegEax
>= CPUID_EXTENDED_CPU_SIG
) {
711 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
714 // Determine the mode of the CPU at the time an SMI occurs
715 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
716 // Volume 3C, Section 34.4.1.1
718 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
;
719 if ((RegEdx
& BIT29
) != 0) {
720 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
722 if (FamilyId
== 0x06) {
723 if (ModelId
== 0x17 || ModelId
== 0x0f || ModelId
== 0x1c) {
724 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
728 DEBUG ((DEBUG_INFO
, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask
)));
729 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) {
730 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
731 if (RegEax
> CPUID_EXTENDED_FUNCTION
) {
732 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
, NULL
, NULL
, &RegEcx
, &RegEdx
);
733 DEBUG ((DEBUG_INFO
, "CPUID[7/0] ECX - 0x%08x\n", RegEcx
));
734 DEBUG ((DEBUG_INFO
, " CET_SS - 0x%08x\n", RegEcx
& CPUID_CET_SS
));
735 DEBUG ((DEBUG_INFO
, " CET_IBT - 0x%08x\n", RegEdx
& CPUID_CET_IBT
));
736 if ((RegEcx
& CPUID_CET_SS
) == 0) {
737 mCetSupported
= FALSE
;
738 PatchInstructionX86 (mPatchCetSupported
, mCetSupported
, 1);
741 AsmCpuidEx (CPUID_EXTENDED_STATE
, CPUID_EXTENDED_STATE_SUB_LEAF
, NULL
, &RegEbx
, &RegEcx
, NULL
);
742 DEBUG ((DEBUG_INFO
, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx
, RegEcx
));
743 AsmCpuidEx (CPUID_EXTENDED_STATE
, 11, &RegEax
, NULL
, &RegEcx
, NULL
);
744 DEBUG ((DEBUG_INFO
, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax
, RegEcx
));
745 AsmCpuidEx(CPUID_EXTENDED_STATE
, 12, &RegEax
, NULL
, &RegEcx
, NULL
);
746 DEBUG ((DEBUG_INFO
, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax
, RegEcx
));
750 mCetSupported
= FALSE
;
751 PatchInstructionX86 (mPatchCetSupported
, mCetSupported
, 1);
755 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
756 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
757 // This size is rounded up to nearest power of 2.
759 TileCodeSize
= GetSmiHandlerSize ();
760 TileCodeSize
= ALIGN_VALUE(TileCodeSize
, SIZE_4KB
);
761 TileDataSize
= (SMRAM_SAVE_STATE_MAP_OFFSET
- SMM_PSD_OFFSET
) + sizeof (SMRAM_SAVE_STATE_MAP
);
762 TileDataSize
= ALIGN_VALUE(TileDataSize
, SIZE_4KB
);
763 TileSize
= TileDataSize
+ TileCodeSize
- 1;
764 TileSize
= 2 * GetPowerOfTwo32 ((UINT32
)TileSize
);
765 DEBUG ((EFI_D_INFO
, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize
, TileCodeSize
, TileDataSize
));
768 // If the TileSize is larger than space available for the SMI Handler of
769 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
770 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
771 // the SMI Handler size must be reduced or the size of the extra CPU specific
772 // context must be reduced.
774 ASSERT (TileSize
<= (SMRAM_SAVE_STATE_MAP_OFFSET
+ sizeof (SMRAM_SAVE_STATE_MAP
) - SMM_HANDLER_OFFSET
));
777 // Allocate buffer for all of the tiles.
779 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
780 // Volume 3C, Section 34.11 SMBASE Relocation
781 // For Pentium and Intel486 processors, the SMBASE values must be
782 // aligned on a 32-KByte boundary or the processor will enter shutdown
783 // state during the execution of a RSM instruction.
785 // Intel486 processors: FamilyId is 4
786 // Pentium processors : FamilyId is 5
788 BufferPages
= EFI_SIZE_TO_PAGES (SIZE_32KB
+ TileSize
* (mMaxNumberOfCpus
- 1));
789 if ((FamilyId
== 4) || (FamilyId
== 5)) {
790 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_32KB
);
792 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_4KB
);
794 ASSERT (Buffer
!= NULL
);
795 DEBUG ((EFI_D_INFO
, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer
, EFI_PAGES_TO_SIZE(BufferPages
)));
798 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
800 gSmmCpuPrivate
->ProcessorInfo
= (EFI_PROCESSOR_INFORMATION
*)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION
) * mMaxNumberOfCpus
);
801 ASSERT (gSmmCpuPrivate
->ProcessorInfo
!= NULL
);
803 gSmmCpuPrivate
->Operation
= (SMM_CPU_OPERATION
*)AllocatePool (sizeof (SMM_CPU_OPERATION
) * mMaxNumberOfCpus
);
804 ASSERT (gSmmCpuPrivate
->Operation
!= NULL
);
806 gSmmCpuPrivate
->CpuSaveStateSize
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
807 ASSERT (gSmmCpuPrivate
->CpuSaveStateSize
!= NULL
);
809 gSmmCpuPrivate
->CpuSaveState
= (VOID
**)AllocatePool (sizeof (VOID
*) * mMaxNumberOfCpus
);
810 ASSERT (gSmmCpuPrivate
->CpuSaveState
!= NULL
);
812 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveStateSize
= gSmmCpuPrivate
->CpuSaveStateSize
;
813 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveState
= gSmmCpuPrivate
->CpuSaveState
;
816 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
818 mCpuHotPlugData
.ApicId
= (UINT64
*)AllocatePool (sizeof (UINT64
) * mMaxNumberOfCpus
);
819 ASSERT (mCpuHotPlugData
.ApicId
!= NULL
);
820 mCpuHotPlugData
.SmBase
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
821 ASSERT (mCpuHotPlugData
.SmBase
!= NULL
);
822 mCpuHotPlugData
.ArrayLength
= (UINT32
)mMaxNumberOfCpus
;
825 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
826 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
827 // size for each CPU in the platform
829 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
830 mCpuHotPlugData
.SmBase
[Index
] = (UINTN
)Buffer
+ Index
* TileSize
- SMM_HANDLER_OFFSET
;
831 gSmmCpuPrivate
->CpuSaveStateSize
[Index
] = sizeof(SMRAM_SAVE_STATE_MAP
);
832 gSmmCpuPrivate
->CpuSaveState
[Index
] = (VOID
*)(mCpuHotPlugData
.SmBase
[Index
] + SMRAM_SAVE_STATE_MAP_OFFSET
);
833 gSmmCpuPrivate
->Operation
[Index
] = SmmCpuNone
;
835 if (Index
< mNumberOfCpus
) {
836 Status
= MpServices
->GetProcessorInfo (MpServices
, Index
, &gSmmCpuPrivate
->ProcessorInfo
[Index
]);
837 ASSERT_EFI_ERROR (Status
);
838 mCpuHotPlugData
.ApicId
[Index
] = gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
;
840 DEBUG ((EFI_D_INFO
, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
842 (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
,
843 mCpuHotPlugData
.SmBase
[Index
],
844 gSmmCpuPrivate
->CpuSaveState
[Index
],
845 gSmmCpuPrivate
->CpuSaveStateSize
[Index
]
848 gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
= INVALID_APIC_ID
;
849 mCpuHotPlugData
.ApicId
[Index
] = INVALID_APIC_ID
;
854 // Allocate SMI stacks for all processors.
856 mSmmStackSize
= EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize
)));
857 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
859 // 2 more pages is allocated for each processor.
860 // one is guard page and the other is known good stack.
862 // +-------------------------------------------+-----+-------------------------------------------+
863 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
864 // +-------------------------------------------+-----+-------------------------------------------+
866 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
868 mSmmStackSize
+= EFI_PAGES_TO_SIZE (2);
871 mSmmShadowStackSize
= 0;
872 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
874 // Append Shadow Stack after normal stack
877 // +--------------------------------------------------+---------------------------------------------------------------+
878 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
879 // +--------------------------------------------------+---------------------------------------------------------------+
880 // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|
881 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
883 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
885 mSmmShadowStackSize
= EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize
)));
886 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
887 mSmmShadowStackSize
+= EFI_PAGES_TO_SIZE (2);
891 Stacks
= (UINT8
*) AllocatePages (gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* (EFI_SIZE_TO_PAGES (mSmmStackSize
+ mSmmShadowStackSize
)));
892 ASSERT (Stacks
!= NULL
);
893 mSmmStackArrayBase
= (UINTN
)Stacks
;
894 mSmmStackArrayEnd
= mSmmStackArrayBase
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* (mSmmStackSize
+ mSmmShadowStackSize
) - 1;
896 DEBUG ((DEBUG_INFO
, "Stacks - 0x%x\n", Stacks
));
897 DEBUG ((DEBUG_INFO
, "mSmmStackSize - 0x%x\n", mSmmStackSize
));
898 DEBUG ((DEBUG_INFO
, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard
)));
899 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
900 DEBUG ((DEBUG_INFO
, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize
));
904 // Set SMI stack for SMM base relocation
906 PatchInstructionX86 (
908 (UINTN
) (Stacks
+ mSmmStackSize
- sizeof (UINTN
)),
918 // Relocate SMM Base addresses to the ones allocated from SMRAM
920 mRebased
= (BOOLEAN
*)AllocateZeroPool (sizeof (BOOLEAN
) * mMaxNumberOfCpus
);
921 ASSERT (mRebased
!= NULL
);
925 // Call hook for BSP to perform extra actions in normal mode after all
926 // SMM base addresses have been relocated on all CPUs
928 SmmCpuFeaturesSmmRelocationComplete ();
930 DEBUG ((DEBUG_INFO
, "mXdSupported - 0x%x\n", mXdSupported
));
933 // SMM Time initialization
935 InitializeSmmTimer ();
938 // Initialize MP globals
940 Cr3
= InitializeMpServiceData (Stacks
, mSmmStackSize
, mSmmShadowStackSize
);
942 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask
) != 0) && mCetSupported
) {
943 for (Index
= 0; Index
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; Index
++) {
946 (EFI_PHYSICAL_ADDRESS
)(UINTN
)Stacks
+ mSmmStackSize
+ (mSmmStackSize
+ mSmmShadowStackSize
) * Index
,
949 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
952 (EFI_PHYSICAL_ADDRESS
)(UINTN
)Stacks
+ mSmmStackSize
+ EFI_PAGES_TO_SIZE(1) + (mSmmStackSize
+ mSmmShadowStackSize
) * Index
,
960 // Fill in SMM Reserved Regions
962 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedStart
= 0;
963 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedSize
= 0;
966 // Install the SMM Configuration Protocol onto a new handle on the handle database.
967 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
968 // to an SMRAM address will be present in the handle database
970 Status
= SystemTable
->BootServices
->InstallMultipleProtocolInterfaces (
971 &gSmmCpuPrivate
->SmmCpuHandle
,
972 &gEfiSmmConfigurationProtocolGuid
, &gSmmCpuPrivate
->SmmConfiguration
,
975 ASSERT_EFI_ERROR (Status
);
978 // Install the SMM CPU Protocol into SMM protocol database
980 Status
= gSmst
->SmmInstallProtocolInterface (
982 &gEfiSmmCpuProtocolGuid
,
983 EFI_NATIVE_INTERFACE
,
986 ASSERT_EFI_ERROR (Status
);
989 // Install the SMM Memory Attribute Protocol into SMM protocol database
991 Status
= gSmst
->SmmInstallProtocolInterface (
993 &gEdkiiSmmMemoryAttributeProtocolGuid
,
994 EFI_NATIVE_INTERFACE
,
997 ASSERT_EFI_ERROR (Status
);
1000 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1002 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
1003 Status
= PcdSet64S (PcdCpuHotPlugDataAddress
, (UINT64
)(UINTN
)&mCpuHotPlugData
);
1004 ASSERT_EFI_ERROR (Status
);
1008 // Initialize SMM CPU Services Support
1010 Status
= InitializeSmmCpuServices (mSmmCpuHandle
);
1011 ASSERT_EFI_ERROR (Status
);
1014 // register SMM Ready To Lock Protocol notification
1016 Status
= gSmst
->SmmRegisterProtocolNotify (
1017 &gEfiSmmReadyToLockProtocolGuid
,
1018 SmmReadyToLockEventNotify
,
1021 ASSERT_EFI_ERROR (Status
);
1024 // Initialize SMM Profile feature
1026 InitSmmProfile (Cr3
);
1028 GetAcpiS3EnableFlag ();
1029 InitSmmS3ResumeState (Cr3
);
1031 DEBUG ((EFI_D_INFO
, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1038 Find out SMRAM information including SMRR base and SMRR size.
1040 @param SmrrBase SMRR base
1041 @param SmrrSize SMRR size
1046 OUT UINT32
*SmrrBase
,
1047 OUT UINT32
*SmrrSize
1052 EFI_SMM_ACCESS2_PROTOCOL
*SmmAccess
;
1053 EFI_SMRAM_DESCRIPTOR
*CurrentSmramRange
;
1059 // Get SMM Access Protocol
1061 Status
= gBS
->LocateProtocol (&gEfiSmmAccess2ProtocolGuid
, NULL
, (VOID
**)&SmmAccess
);
1062 ASSERT_EFI_ERROR (Status
);
1065 // Get SMRAM information
1068 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, NULL
);
1069 ASSERT (Status
== EFI_BUFFER_TOO_SMALL
);
1071 mSmmCpuSmramRanges
= (EFI_SMRAM_DESCRIPTOR
*)AllocatePool (Size
);
1072 ASSERT (mSmmCpuSmramRanges
!= NULL
);
1074 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, mSmmCpuSmramRanges
);
1075 ASSERT_EFI_ERROR (Status
);
1077 mSmmCpuSmramRangeCount
= Size
/ sizeof (EFI_SMRAM_DESCRIPTOR
);
1080 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1082 CurrentSmramRange
= NULL
;
1083 for (Index
= 0, MaxSize
= SIZE_256KB
- EFI_PAGE_SIZE
; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1085 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1087 if ((mSmmCpuSmramRanges
[Index
].RegionState
& (EFI_ALLOCATED
| EFI_NEEDS_TESTING
| EFI_NEEDS_ECC_INITIALIZATION
)) != 0) {
1091 if (mSmmCpuSmramRanges
[Index
].CpuStart
>= BASE_1MB
) {
1092 if ((mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
) <= SMRR_MAX_ADDRESS
) {
1093 if (mSmmCpuSmramRanges
[Index
].PhysicalSize
>= MaxSize
) {
1094 MaxSize
= mSmmCpuSmramRanges
[Index
].PhysicalSize
;
1095 CurrentSmramRange
= &mSmmCpuSmramRanges
[Index
];
1101 ASSERT (CurrentSmramRange
!= NULL
);
1103 *SmrrBase
= (UINT32
)CurrentSmramRange
->CpuStart
;
1104 *SmrrSize
= (UINT32
)CurrentSmramRange
->PhysicalSize
;
1108 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1109 if (mSmmCpuSmramRanges
[Index
].CpuStart
< *SmrrBase
&&
1110 *SmrrBase
== (mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
)) {
1111 *SmrrBase
= (UINT32
)mSmmCpuSmramRanges
[Index
].CpuStart
;
1112 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1114 } else if ((*SmrrBase
+ *SmrrSize
) == mSmmCpuSmramRanges
[Index
].CpuStart
&& mSmmCpuSmramRanges
[Index
].PhysicalSize
> 0) {
1115 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1121 DEBUG ((EFI_D_INFO
, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase
, *SmrrSize
));
1125 Configure SMM Code Access Check feature on an AP.
1126 SMM Feature Control MSR will be locked after configuration.
1128 @param[in,out] Buffer Pointer to private data buffer.
1132 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1137 UINT64 SmmFeatureControlMsr
;
1138 UINT64 NewSmmFeatureControlMsr
;
1141 // Retrieve the CPU Index from the context passed in
1143 CpuIndex
= *(UINTN
*)Buffer
;
1146 // Get the current SMM Feature Control MSR value
1148 SmmFeatureControlMsr
= SmmCpuFeaturesGetSmmRegister (CpuIndex
, SmmRegFeatureControl
);
1151 // Compute the new SMM Feature Control MSR value
1153 NewSmmFeatureControlMsr
= SmmFeatureControlMsr
;
1154 if (mSmmCodeAccessCheckEnable
) {
1155 NewSmmFeatureControlMsr
|= SMM_CODE_CHK_EN_BIT
;
1156 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock
)) {
1157 NewSmmFeatureControlMsr
|= SMM_FEATURE_CONTROL_LOCK_BIT
;
1162 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1164 if (NewSmmFeatureControlMsr
!= SmmFeatureControlMsr
) {
1165 SmmCpuFeaturesSetSmmRegister (CpuIndex
, SmmRegFeatureControl
, NewSmmFeatureControlMsr
);
1169 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1171 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1175 Configure SMM Code Access Check feature for all processors.
1176 SMM Feature Control MSR will be locked after configuration.
1179 ConfigSmmCodeAccessCheck (
1187 // Check to see if the Feature Control MSR is supported on this CPU
1189 Index
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
1190 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index
, SmmRegFeatureControl
)) {
1191 mSmmCodeAccessCheckEnable
= FALSE
;
1196 // Check to see if the CPU supports the SMM Code Access Check feature
1197 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1199 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP
) & SMM_CODE_ACCESS_CHK_BIT
) == 0) {
1200 mSmmCodeAccessCheckEnable
= FALSE
;
1205 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1207 InitializeSpinLock (mConfigSmmCodeAccessCheckLock
);
1210 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1211 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1213 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1216 // Enable SMM Code Access Check feature on the BSP.
1218 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index
);
1221 // Enable SMM Code Access Check feature for the APs.
1223 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1224 if (Index
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1225 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== INVALID_APIC_ID
) {
1227 // If this processor does not exist
1232 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1233 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1235 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1238 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1240 Status
= gSmst
->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor
, Index
, &Index
);
1241 ASSERT_EFI_ERROR (Status
);
1244 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1246 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock
)) {
1251 // Release the Config SMM Code Access Check spin lock.
1253 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1259 This API provides a way to allocate memory for page table.
1261 This API can be called more once to allocate memory for page tables.
1263 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1264 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1265 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1268 @param Pages The number of 4 KB pages to allocate.
1270 @return A pointer to the allocated buffer or NULL if allocation fails.
1274 AllocatePageTableMemory (
1280 Buffer
= SmmCpuFeaturesAllocatePageTableMemory (Pages
);
1281 if (Buffer
!= NULL
) {
1284 return AllocatePages (Pages
);
1288 Allocate pages for code.
1290 @param[in] Pages Number of pages to be allocated.
1292 @return Allocated memory.
1300 EFI_PHYSICAL_ADDRESS Memory
;
1306 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1307 if (EFI_ERROR (Status
)) {
1310 return (VOID
*) (UINTN
) Memory
;
1314 Allocate aligned pages for code.
1316 @param[in] Pages Number of pages to be allocated.
1317 @param[in] Alignment The requested alignment of the allocation.
1318 Must be a power of two.
1319 If Alignment is zero, then byte alignment is used.
1321 @return Allocated memory.
1324 AllocateAlignedCodePages (
1330 EFI_PHYSICAL_ADDRESS Memory
;
1331 UINTN AlignedMemory
;
1332 UINTN AlignmentMask
;
1333 UINTN UnalignedPages
;
1337 // Alignment must be a power of two or zero.
1339 ASSERT ((Alignment
& (Alignment
- 1)) == 0);
1344 if (Alignment
> EFI_PAGE_SIZE
) {
1346 // Calculate the total number of pages since alignment is larger than page size.
1348 AlignmentMask
= Alignment
- 1;
1349 RealPages
= Pages
+ EFI_SIZE_TO_PAGES (Alignment
);
1351 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1353 ASSERT (RealPages
> Pages
);
1355 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, RealPages
, &Memory
);
1356 if (EFI_ERROR (Status
)) {
1359 AlignedMemory
= ((UINTN
) Memory
+ AlignmentMask
) & ~AlignmentMask
;
1360 UnalignedPages
= EFI_SIZE_TO_PAGES (AlignedMemory
- (UINTN
) Memory
);
1361 if (UnalignedPages
> 0) {
1363 // Free first unaligned page(s).
1365 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1366 ASSERT_EFI_ERROR (Status
);
1368 Memory
= AlignedMemory
+ EFI_PAGES_TO_SIZE (Pages
);
1369 UnalignedPages
= RealPages
- Pages
- UnalignedPages
;
1370 if (UnalignedPages
> 0) {
1372 // Free last unaligned page(s).
1374 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1375 ASSERT_EFI_ERROR (Status
);
1379 // Do not over-allocate pages in this case.
1381 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1382 if (EFI_ERROR (Status
)) {
1385 AlignedMemory
= (UINTN
) Memory
;
1387 return (VOID
*) AlignedMemory
;
1391 Perform the remaining tasks.
1395 PerformRemainingTasks (
1399 if (mSmmReadyToLock
) {
1401 // Start SMM Profile feature
1403 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1407 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1412 // Mark critical region to be read-only in page table
1414 SetMemMapAttributes ();
1417 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1419 SetUefiMemMapAttributes ();
1422 // Set page table itself to be read-only
1424 SetPageTableAttributes ();
1427 // Configure SMM Code Access Check feature if available.
1429 ConfigSmmCodeAccessCheck ();
1431 SmmCpuFeaturesCompleteSmmReadyToLock ();
1434 // Clean SMM ready to lock flag
1436 mSmmReadyToLock
= FALSE
;
1441 Perform the pre tasks.
1449 RestoreSmmConfigurationInS3 ();