2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
4 Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
20 // SMM CPU Private Data structure that contains SMM Configuration Protocol
21 // along its supporting fields.
23 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData
= {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE
, // Signature
26 NULL
, // Pointer to ProcessorInfo array
27 NULL
, // Pointer to Operation array
28 NULL
, // Pointer to CpuSaveStateSize array
29 NULL
, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
32 SmmStartupThisAp
, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL
, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL
// SmmCoreEntryContext.CpuSaveState
40 mSmmCpuPrivateData
.SmmReservedSmramRegion
, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry
// SmmConfiguration.RegisterSmmEntry
45 CPU_HOT_PLUG_DATA mCpuHotPlugData
= {
46 CPU_HOT_PLUG_DATA_REVISION_1
, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL
, // Pointer to APIC ID array
49 NULL
, // Pointer to SMBASE array
56 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
58 SMM_CPU_PRIVATE_DATA
*gSmmCpuPrivate
= &mSmmCpuPrivateData
;
61 // SMM Relocation variables
63 volatile BOOLEAN
*mRebased
;
64 volatile BOOLEAN mIsBsp
;
67 /// Handle for the SMM CPU Protocol
69 EFI_HANDLE mSmmCpuHandle
= NULL
;
72 /// SMM CPU Protocol instance
74 EFI_SMM_CPU_PROTOCOL mSmmCpu
= {
80 /// SMM Memory Attribute Protocol instance
82 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute
= {
83 EdkiiSmmGetMemoryAttributes
,
84 EdkiiSmmSetMemoryAttributes
,
85 EdkiiSmmClearMemoryAttributes
88 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable
[EXCEPTION_VECTOR_NUMBER
];
91 // SMM stack information
93 UINTN mSmmStackArrayBase
;
94 UINTN mSmmStackArrayEnd
;
97 UINTN mMaxNumberOfCpus
= 1;
98 UINTN mNumberOfCpus
= 1;
101 // SMM ready to lock flag
103 BOOLEAN mSmmReadyToLock
= FALSE
;
106 // Global used to cache PCD for SMM Code Access Check enable
108 BOOLEAN mSmmCodeAccessCheckEnable
= FALSE
;
111 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
113 UINT64 mAddressEncMask
= 0;
116 // Spin lock used to serialize setting of SMM Code Access Check feature
118 SPIN_LOCK
*mConfigSmmCodeAccessCheckLock
= NULL
;
121 // Saved SMM ranges information
123 EFI_SMRAM_DESCRIPTOR
*mSmmCpuSmramRanges
;
124 UINTN mSmmCpuSmramRangeCount
;
126 UINT8 mPhysicalAddressBits
;
129 // Control register contents saved for SMM S3 resume state initialization.
135 Initialize IDT to setup exception handlers for SMM.
144 BOOLEAN InterruptState
;
145 IA32_DESCRIPTOR DxeIdtr
;
148 // There are 32 (not 255) entries in it since only processor
149 // generated exceptions will be handled.
151 gcSmiIdtr
.Limit
= (sizeof(IA32_IDT_GATE_DESCRIPTOR
) * 32) - 1;
153 // Allocate page aligned IDT, because it might be set as read only.
155 gcSmiIdtr
.Base
= (UINTN
)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr
.Limit
+ 1));
156 ASSERT (gcSmiIdtr
.Base
!= 0);
157 ZeroMem ((VOID
*)gcSmiIdtr
.Base
, gcSmiIdtr
.Limit
+ 1);
160 // Disable Interrupt and save DXE IDT table
162 InterruptState
= SaveAndDisableInterrupts ();
163 AsmReadIdtr (&DxeIdtr
);
165 // Load SMM temporary IDT table
167 AsmWriteIdtr (&gcSmiIdtr
);
169 // Setup SMM default exception handlers, SMM IDT table
170 // will be updated and saved in gcSmiIdtr
172 Status
= InitializeCpuExceptionHandlers (NULL
);
173 ASSERT_EFI_ERROR (Status
);
175 // Restore DXE IDT table and CPU interrupt
177 AsmWriteIdtr ((IA32_DESCRIPTOR
*) &DxeIdtr
);
178 SetInterruptState (InterruptState
);
182 Search module name by input IP address and output it.
184 @param CallerIpAddress Caller instruction pointer.
189 IN UINTN CallerIpAddress
198 Pe32Data
= PeCoffSearchImageBase (CallerIpAddress
);
200 DEBUG ((DEBUG_ERROR
, "It is invoked from the instruction before IP(0x%p)", (VOID
*) CallerIpAddress
));
201 PdbPointer
= PeCoffLoaderGetPdbPointer ((VOID
*) Pe32Data
);
202 if (PdbPointer
!= NULL
) {
203 DEBUG ((DEBUG_ERROR
, " in module (%a)\n", PdbPointer
));
209 Read information from the CPU save state.
211 @param This EFI_SMM_CPU_PROTOCOL instance
212 @param Width The number of bytes to read from the CPU save state.
213 @param Register Specifies the CPU register to read form the save state.
214 @param CpuIndex Specifies the zero-based index of the CPU save state.
215 @param Buffer Upon return, this holds the CPU register value read from the save state.
217 @retval EFI_SUCCESS The register was read from Save State
218 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
219 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
225 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
227 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
235 // Retrieve pointer to the specified CPU's SMM Save State buffer
237 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
238 return EFI_INVALID_PARAMETER
;
242 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
244 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
246 // The pseudo-register only supports the 64-bit size specified by Width.
248 if (Width
!= sizeof (UINT64
)) {
249 return EFI_INVALID_PARAMETER
;
252 // If the processor is in SMM at the time the SMI occurred,
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
254 // Otherwise, EFI_NOT_FOUND is returned.
256 if (*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
)) {
257 *(UINT64
*)Buffer
= gSmmCpuPrivate
->ProcessorInfo
[CpuIndex
].ProcessorId
;
260 return EFI_NOT_FOUND
;
264 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
265 return EFI_INVALID_PARAMETER
;
268 Status
= SmmCpuFeaturesReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
269 if (Status
== EFI_UNSUPPORTED
) {
270 Status
= ReadSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
276 Write data to the CPU save state.
278 @param This EFI_SMM_CPU_PROTOCOL instance
279 @param Width The number of bytes to read from the CPU save state.
280 @param Register Specifies the CPU register to write to the save state.
281 @param CpuIndex Specifies the zero-based index of the CPU save state
282 @param Buffer Upon entry, this holds the new CPU register value.
284 @retval EFI_SUCCESS The register was written from Save State
285 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
286 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
292 IN CONST EFI_SMM_CPU_PROTOCOL
*This
,
294 IN EFI_SMM_SAVE_STATE_REGISTER Register
,
296 IN CONST VOID
*Buffer
302 // Retrieve pointer to the specified CPU's SMM Save State buffer
304 if ((CpuIndex
>= gSmst
->NumberOfCpus
) || (Buffer
== NULL
)) {
305 return EFI_INVALID_PARAMETER
;
309 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
311 if (Register
== EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
) {
315 if (!mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) {
316 return EFI_INVALID_PARAMETER
;
319 Status
= SmmCpuFeaturesWriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
320 if (Status
== EFI_UNSUPPORTED
) {
321 Status
= WriteSaveStateRegister (CpuIndex
, Register
, Width
, Buffer
);
328 C function for SMI handler. To change all processor's SMMBase Register.
341 // Update SMM IDT entries' code segment and load IDT
343 AsmWriteIdtr (&gcSmiIdtr
);
344 ApicId
= GetApicId ();
346 ASSERT (mNumberOfCpus
<= mMaxNumberOfCpus
);
348 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
349 if (ApicId
== (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
351 // Initialize SMM specific features on the currently executing CPU
353 SmmCpuFeaturesInitializeProcessor (
356 gSmmCpuPrivate
->ProcessorInfo
,
362 // Check XD and BTS features on each processor on normal boot
364 CheckFeatureSupported ();
369 // BSP rebase is already done above.
370 // Initialize private data during S3 resume
372 InitializeMpSyncData ();
376 // Hook return after RSM to set SMM re-based flag
378 SemaphoreHook (Index
, &mRebased
[Index
]);
387 Relocate SmmBases for each processor.
389 Execute on first boot and all S3 resumes
398 UINT8 BakBuf
[BACK_BUF_SIZE
];
399 SMRAM_SAVE_STATE_MAP BakBuf2
;
400 SMRAM_SAVE_STATE_MAP
*CpuStatePtr
;
407 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
409 ASSERT (sizeof (BakBuf
) >= gcSmmInitSize
);
412 // Patch ASM code template with current CR0, CR3, and CR4 values
414 mSmmCr0
= (UINT32
)AsmReadCr0 ();
415 PatchInstructionX86 (gPatchSmmCr0
, mSmmCr0
, 4);
416 PatchInstructionX86 (gPatchSmmCr3
, AsmReadCr3 (), 4);
417 mSmmCr4
= (UINT32
)AsmReadCr4 ();
418 PatchInstructionX86 (gPatchSmmCr4
, mSmmCr4
, 4);
421 // Patch GDTR for SMM base relocation
423 gcSmiInitGdtr
.Base
= gcSmiGdtr
.Base
;
424 gcSmiInitGdtr
.Limit
= gcSmiGdtr
.Limit
;
426 U8Ptr
= (UINT8
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMM_HANDLER_OFFSET
);
427 CpuStatePtr
= (SMRAM_SAVE_STATE_MAP
*)(UINTN
)(SMM_DEFAULT_SMBASE
+ SMRAM_SAVE_STATE_MAP_OFFSET
);
430 // Backup original contents at address 0x38000
432 CopyMem (BakBuf
, U8Ptr
, sizeof (BakBuf
));
433 CopyMem (&BakBuf2
, CpuStatePtr
, sizeof (BakBuf2
));
436 // Load image for relocation
438 CopyMem (U8Ptr
, gcSmmInitTemplate
, gcSmmInitSize
);
441 // Retrieve the local APIC ID of current processor
443 ApicId
= GetApicId ();
446 // Relocate SM bases for all APs
447 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
450 BspIndex
= (UINTN
)-1;
451 for (Index
= 0; Index
< mNumberOfCpus
; Index
++) {
452 mRebased
[Index
] = FALSE
;
453 if (ApicId
!= (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
) {
454 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
456 // Wait for this AP to finish its 1st SMI
458 while (!mRebased
[Index
]);
461 // BSP will be Relocated later
468 // Relocate BSP's SMM base
470 ASSERT (BspIndex
!= (UINTN
)-1);
474 // Wait for the BSP to finish its 1st SMI
476 while (!mRebased
[BspIndex
]);
479 // Restore contents at address 0x38000
481 CopyMem (CpuStatePtr
, &BakBuf2
, sizeof (BakBuf2
));
482 CopyMem (U8Ptr
, BakBuf
, sizeof (BakBuf
));
486 SMM Ready To Lock event notification handler.
488 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
489 perform additional lock actions that must be performed from SMM on the next SMI.
491 @param[in] Protocol Points to the protocol's unique identifier.
492 @param[in] Interface Points to the interface instance.
493 @param[in] Handle The handle on which the interface was installed.
495 @retval EFI_SUCCESS Notification handler runs successfully.
499 SmmReadyToLockEventNotify (
500 IN CONST EFI_GUID
*Protocol
,
508 // Cache a copy of UEFI memory map before we start profiling feature.
513 // Set SMM ready to lock flag and return
515 mSmmReadyToLock
= TRUE
;
520 The module Entry Point of the CPU SMM driver.
522 @param ImageHandle The firmware allocated handle for the EFI image.
523 @param SystemTable A pointer to the EFI System Table.
525 @retval EFI_SUCCESS The entry point is executed successfully.
526 @retval Other Some error occurs when executing this entry point.
532 IN EFI_HANDLE ImageHandle
,
533 IN EFI_SYSTEM_TABLE
*SystemTable
537 EFI_MP_SERVICES_PROTOCOL
*MpServices
;
538 UINTN NumberOfEnabledProcessors
;
554 // Initialize address fixup
556 PiSmmCpuSmmInitFixupAddress ();
557 PiSmmCpuSmiEntryFixupAddress ();
560 // Initialize Debug Agent to support source level debug in SMM code
562 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM
, NULL
, NULL
);
565 // Report the start of CPU SMM initialization.
569 EFI_COMPUTING_UNIT_HOST_PROCESSOR
| EFI_CU_HP_PC_SMM_INIT
573 // Find out SMRR Base and SMRR Size
575 FindSmramInfo (&mCpuHotPlugData
.SmrrBase
, &mCpuHotPlugData
.SmrrSize
);
578 // Get MP Services Protocol
580 Status
= SystemTable
->BootServices
->LocateProtocol (&gEfiMpServiceProtocolGuid
, NULL
, (VOID
**)&MpServices
);
581 ASSERT_EFI_ERROR (Status
);
584 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
586 Status
= MpServices
->GetNumberOfProcessors (MpServices
, &mNumberOfCpus
, &NumberOfEnabledProcessors
);
587 ASSERT_EFI_ERROR (Status
);
588 ASSERT (mNumberOfCpus
<= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
));
591 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
592 // A constant BSP index makes no sense because it may be hot removed.
595 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
597 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection
));
602 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
604 mSmmCodeAccessCheckEnable
= PcdGetBool (PcdCpuSmmCodeAccessCheckEnable
);
605 DEBUG ((EFI_D_INFO
, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable
));
608 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
609 // Make sure AddressEncMask is contained to smallest supported address field.
611 mAddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) & PAGING_1G_ADDRESS_MASK_64
;
612 DEBUG ((EFI_D_INFO
, "mAddressEncMask = 0x%lx\n", mAddressEncMask
));
615 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
617 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
618 mMaxNumberOfCpus
= PcdGet32 (PcdCpuMaxLogicalProcessorNumber
);
620 mMaxNumberOfCpus
= mNumberOfCpus
;
622 gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
= mMaxNumberOfCpus
;
625 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
626 // allocated buffer. The minimum size of this buffer for a uniprocessor system
627 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
628 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
629 // then the SMI entry point and the CPU save state areas can be tiles to minimize
630 // the total amount SMRAM required for all the CPUs. The tile size can be computed
631 // by adding the // CPU save state size, any extra CPU specific context, and
632 // the size of code that must be placed at the SMI entry point to transfer
633 // control to a C function in the native SMM execution mode. This size is
634 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
635 // The total amount of memory required is the maximum number of CPUs that
636 // platform supports times the tile size. The picture below shows the tiling,
637 // where m is the number of tiles that fit in 32KB.
639 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
640 // | CPU m+1 Save State |
641 // +-----------------------------+
642 // | CPU m+1 Extra Data |
643 // +-----------------------------+
645 // +-----------------------------+
646 // | CPU 2m SMI Entry |
647 // +#############################+ <-- Base of allocated buffer + 64 KB
648 // | CPU m-1 Save State |
649 // +-----------------------------+
650 // | CPU m-1 Extra Data |
651 // +-----------------------------+
653 // +-----------------------------+
654 // | CPU 2m-1 SMI Entry |
655 // +=============================+ <-- 2^n offset from Base of allocated buffer
656 // | . . . . . . . . . . . . |
657 // +=============================+ <-- 2^n offset from Base of allocated buffer
658 // | CPU 2 Save State |
659 // +-----------------------------+
660 // | CPU 2 Extra Data |
661 // +-----------------------------+
663 // +-----------------------------+
664 // | CPU m+1 SMI Entry |
665 // +=============================+ <-- Base of allocated buffer + 32 KB
666 // | CPU 1 Save State |
667 // +-----------------------------+
668 // | CPU 1 Extra Data |
669 // +-----------------------------+
671 // +-----------------------------+
672 // | CPU m SMI Entry |
673 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
674 // | CPU 0 Save State |
675 // +-----------------------------+
676 // | CPU 0 Extra Data |
677 // +-----------------------------+
679 // +-----------------------------+
680 // | CPU m-1 SMI Entry |
681 // +=============================+ <-- 2^n offset from Base of allocated buffer
682 // | . . . . . . . . . . . . |
683 // +=============================+ <-- 2^n offset from Base of allocated buffer
685 // +-----------------------------+
686 // | CPU 1 SMI Entry |
687 // +=============================+ <-- 2^n offset from Base of allocated buffer
689 // +-----------------------------+
690 // | CPU 0 SMI Entry |
691 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
695 // Retrieve CPU Family
697 AsmCpuid (CPUID_VERSION_INFO
, &RegEax
, NULL
, NULL
, NULL
);
698 FamilyId
= (RegEax
>> 8) & 0xf;
699 ModelId
= (RegEax
>> 4) & 0xf;
700 if (FamilyId
== 0x06 || FamilyId
== 0x0f) {
701 ModelId
= ModelId
| ((RegEax
>> 12) & 0xf0);
705 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
706 if (RegEax
>= CPUID_EXTENDED_CPU_SIG
) {
707 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
710 // Determine the mode of the CPU at the time an SMI occurs
711 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
712 // Volume 3C, Section 34.4.1.1
714 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
;
715 if ((RegEdx
& BIT29
) != 0) {
716 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
718 if (FamilyId
== 0x06) {
719 if (ModelId
== 0x17 || ModelId
== 0x0f || ModelId
== 0x1c) {
720 mSmmSaveStateRegisterLma
= EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT
;
725 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
726 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
727 // This size is rounded up to nearest power of 2.
729 TileCodeSize
= GetSmiHandlerSize ();
730 TileCodeSize
= ALIGN_VALUE(TileCodeSize
, SIZE_4KB
);
731 TileDataSize
= (SMRAM_SAVE_STATE_MAP_OFFSET
- SMM_PSD_OFFSET
) + sizeof (SMRAM_SAVE_STATE_MAP
);
732 TileDataSize
= ALIGN_VALUE(TileDataSize
, SIZE_4KB
);
733 TileSize
= TileDataSize
+ TileCodeSize
- 1;
734 TileSize
= 2 * GetPowerOfTwo32 ((UINT32
)TileSize
);
735 DEBUG ((EFI_D_INFO
, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize
, TileCodeSize
, TileDataSize
));
738 // If the TileSize is larger than space available for the SMI Handler of
739 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
740 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
741 // the SMI Handler size must be reduced or the size of the extra CPU specific
742 // context must be reduced.
744 ASSERT (TileSize
<= (SMRAM_SAVE_STATE_MAP_OFFSET
+ sizeof (SMRAM_SAVE_STATE_MAP
) - SMM_HANDLER_OFFSET
));
747 // Allocate buffer for all of the tiles.
749 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
750 // Volume 3C, Section 34.11 SMBASE Relocation
751 // For Pentium and Intel486 processors, the SMBASE values must be
752 // aligned on a 32-KByte boundary or the processor will enter shutdown
753 // state during the execution of a RSM instruction.
755 // Intel486 processors: FamilyId is 4
756 // Pentium processors : FamilyId is 5
758 BufferPages
= EFI_SIZE_TO_PAGES (SIZE_32KB
+ TileSize
* (mMaxNumberOfCpus
- 1));
759 if ((FamilyId
== 4) || (FamilyId
== 5)) {
760 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_32KB
);
762 Buffer
= AllocateAlignedCodePages (BufferPages
, SIZE_4KB
);
764 ASSERT (Buffer
!= NULL
);
765 DEBUG ((EFI_D_INFO
, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer
, EFI_PAGES_TO_SIZE(BufferPages
)));
768 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
770 gSmmCpuPrivate
->ProcessorInfo
= (EFI_PROCESSOR_INFORMATION
*)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION
) * mMaxNumberOfCpus
);
771 ASSERT (gSmmCpuPrivate
->ProcessorInfo
!= NULL
);
773 gSmmCpuPrivate
->Operation
= (SMM_CPU_OPERATION
*)AllocatePool (sizeof (SMM_CPU_OPERATION
) * mMaxNumberOfCpus
);
774 ASSERT (gSmmCpuPrivate
->Operation
!= NULL
);
776 gSmmCpuPrivate
->CpuSaveStateSize
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
777 ASSERT (gSmmCpuPrivate
->CpuSaveStateSize
!= NULL
);
779 gSmmCpuPrivate
->CpuSaveState
= (VOID
**)AllocatePool (sizeof (VOID
*) * mMaxNumberOfCpus
);
780 ASSERT (gSmmCpuPrivate
->CpuSaveState
!= NULL
);
782 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveStateSize
= gSmmCpuPrivate
->CpuSaveStateSize
;
783 mSmmCpuPrivateData
.SmmCoreEntryContext
.CpuSaveState
= gSmmCpuPrivate
->CpuSaveState
;
786 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
788 mCpuHotPlugData
.ApicId
= (UINT64
*)AllocatePool (sizeof (UINT64
) * mMaxNumberOfCpus
);
789 ASSERT (mCpuHotPlugData
.ApicId
!= NULL
);
790 mCpuHotPlugData
.SmBase
= (UINTN
*)AllocatePool (sizeof (UINTN
) * mMaxNumberOfCpus
);
791 ASSERT (mCpuHotPlugData
.SmBase
!= NULL
);
792 mCpuHotPlugData
.ArrayLength
= (UINT32
)mMaxNumberOfCpus
;
795 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
796 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
797 // size for each CPU in the platform
799 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
800 mCpuHotPlugData
.SmBase
[Index
] = (UINTN
)Buffer
+ Index
* TileSize
- SMM_HANDLER_OFFSET
;
801 gSmmCpuPrivate
->CpuSaveStateSize
[Index
] = sizeof(SMRAM_SAVE_STATE_MAP
);
802 gSmmCpuPrivate
->CpuSaveState
[Index
] = (VOID
*)(mCpuHotPlugData
.SmBase
[Index
] + SMRAM_SAVE_STATE_MAP_OFFSET
);
803 gSmmCpuPrivate
->Operation
[Index
] = SmmCpuNone
;
805 if (Index
< mNumberOfCpus
) {
806 Status
= MpServices
->GetProcessorInfo (MpServices
, Index
, &gSmmCpuPrivate
->ProcessorInfo
[Index
]);
807 ASSERT_EFI_ERROR (Status
);
808 mCpuHotPlugData
.ApicId
[Index
] = gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
;
810 DEBUG ((EFI_D_INFO
, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
812 (UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
,
813 mCpuHotPlugData
.SmBase
[Index
],
814 gSmmCpuPrivate
->CpuSaveState
[Index
],
815 gSmmCpuPrivate
->CpuSaveStateSize
[Index
]
818 gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
= INVALID_APIC_ID
;
819 mCpuHotPlugData
.ApicId
[Index
] = INVALID_APIC_ID
;
824 // Allocate SMI stacks for all processors.
826 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
828 // 2 more pages is allocated for each processor.
829 // one is guard page and the other is known good stack.
831 // +-------------------------------------------+-----+-------------------------------------------+
832 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
833 // +-------------------------------------------+-----+-------------------------------------------+
835 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
837 mSmmStackSize
= EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize
)) + 2);
838 Stacks
= (UINT8
*) AllocatePages (gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize
)) + 2));
839 ASSERT (Stacks
!= NULL
);
840 mSmmStackArrayBase
= (UINTN
)Stacks
;
841 mSmmStackArrayEnd
= mSmmStackArrayBase
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* mSmmStackSize
- 1;
843 mSmmStackSize
= PcdGet32 (PcdCpuSmmStackSize
);
844 Stacks
= (UINT8
*) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
* mSmmStackSize
));
845 ASSERT (Stacks
!= NULL
);
849 // Set SMI stack for SMM base relocation
851 PatchInstructionX86 (
853 (UINTN
) (Stacks
+ mSmmStackSize
- sizeof (UINTN
)),
863 // Relocate SMM Base addresses to the ones allocated from SMRAM
865 mRebased
= (BOOLEAN
*)AllocateZeroPool (sizeof (BOOLEAN
) * mMaxNumberOfCpus
);
866 ASSERT (mRebased
!= NULL
);
870 // Call hook for BSP to perform extra actions in normal mode after all
871 // SMM base addresses have been relocated on all CPUs
873 SmmCpuFeaturesSmmRelocationComplete ();
875 DEBUG ((DEBUG_INFO
, "mXdSupported - 0x%x\n", mXdSupported
));
878 // SMM Time initialization
880 InitializeSmmTimer ();
883 // Initialize MP globals
885 Cr3
= InitializeMpServiceData (Stacks
, mSmmStackSize
);
888 // Fill in SMM Reserved Regions
890 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedStart
= 0;
891 gSmmCpuPrivate
->SmmReservedSmramRegion
[0].SmramReservedSize
= 0;
894 // Install the SMM Configuration Protocol onto a new handle on the handle database.
895 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
896 // to an SMRAM address will be present in the handle database
898 Status
= SystemTable
->BootServices
->InstallMultipleProtocolInterfaces (
899 &gSmmCpuPrivate
->SmmCpuHandle
,
900 &gEfiSmmConfigurationProtocolGuid
, &gSmmCpuPrivate
->SmmConfiguration
,
903 ASSERT_EFI_ERROR (Status
);
906 // Install the SMM CPU Protocol into SMM protocol database
908 Status
= gSmst
->SmmInstallProtocolInterface (
910 &gEfiSmmCpuProtocolGuid
,
911 EFI_NATIVE_INTERFACE
,
914 ASSERT_EFI_ERROR (Status
);
917 // Install the SMM Memory Attribute Protocol into SMM protocol database
919 Status
= gSmst
->SmmInstallProtocolInterface (
921 &gEdkiiSmmMemoryAttributeProtocolGuid
,
922 EFI_NATIVE_INTERFACE
,
925 ASSERT_EFI_ERROR (Status
);
928 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
930 if (FeaturePcdGet (PcdCpuHotPlugSupport
)) {
931 Status
= PcdSet64S (PcdCpuHotPlugDataAddress
, (UINT64
)(UINTN
)&mCpuHotPlugData
);
932 ASSERT_EFI_ERROR (Status
);
936 // Initialize SMM CPU Services Support
938 Status
= InitializeSmmCpuServices (mSmmCpuHandle
);
939 ASSERT_EFI_ERROR (Status
);
942 // register SMM Ready To Lock Protocol notification
944 Status
= gSmst
->SmmRegisterProtocolNotify (
945 &gEfiSmmReadyToLockProtocolGuid
,
946 SmmReadyToLockEventNotify
,
949 ASSERT_EFI_ERROR (Status
);
952 // Initialize SMM Profile feature
954 InitSmmProfile (Cr3
);
956 GetAcpiS3EnableFlag ();
957 InitSmmS3ResumeState (Cr3
);
959 DEBUG ((EFI_D_INFO
, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
966 Find out SMRAM information including SMRR base and SMRR size.
968 @param SmrrBase SMRR base
969 @param SmrrSize SMRR size
974 OUT UINT32
*SmrrBase
,
980 EFI_SMM_ACCESS2_PROTOCOL
*SmmAccess
;
981 EFI_SMRAM_DESCRIPTOR
*CurrentSmramRange
;
987 // Get SMM Access Protocol
989 Status
= gBS
->LocateProtocol (&gEfiSmmAccess2ProtocolGuid
, NULL
, (VOID
**)&SmmAccess
);
990 ASSERT_EFI_ERROR (Status
);
993 // Get SMRAM information
996 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, NULL
);
997 ASSERT (Status
== EFI_BUFFER_TOO_SMALL
);
999 mSmmCpuSmramRanges
= (EFI_SMRAM_DESCRIPTOR
*)AllocatePool (Size
);
1000 ASSERT (mSmmCpuSmramRanges
!= NULL
);
1002 Status
= SmmAccess
->GetCapabilities (SmmAccess
, &Size
, mSmmCpuSmramRanges
);
1003 ASSERT_EFI_ERROR (Status
);
1005 mSmmCpuSmramRangeCount
= Size
/ sizeof (EFI_SMRAM_DESCRIPTOR
);
1008 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1010 CurrentSmramRange
= NULL
;
1011 for (Index
= 0, MaxSize
= SIZE_256KB
- EFI_PAGE_SIZE
; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1013 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1015 if ((mSmmCpuSmramRanges
[Index
].RegionState
& (EFI_ALLOCATED
| EFI_NEEDS_TESTING
| EFI_NEEDS_ECC_INITIALIZATION
)) != 0) {
1019 if (mSmmCpuSmramRanges
[Index
].CpuStart
>= BASE_1MB
) {
1020 if ((mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
) <= SMRR_MAX_ADDRESS
) {
1021 if (mSmmCpuSmramRanges
[Index
].PhysicalSize
>= MaxSize
) {
1022 MaxSize
= mSmmCpuSmramRanges
[Index
].PhysicalSize
;
1023 CurrentSmramRange
= &mSmmCpuSmramRanges
[Index
];
1029 ASSERT (CurrentSmramRange
!= NULL
);
1031 *SmrrBase
= (UINT32
)CurrentSmramRange
->CpuStart
;
1032 *SmrrSize
= (UINT32
)CurrentSmramRange
->PhysicalSize
;
1036 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
1037 if (mSmmCpuSmramRanges
[Index
].CpuStart
< *SmrrBase
&&
1038 *SmrrBase
== (mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
)) {
1039 *SmrrBase
= (UINT32
)mSmmCpuSmramRanges
[Index
].CpuStart
;
1040 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1042 } else if ((*SmrrBase
+ *SmrrSize
) == mSmmCpuSmramRanges
[Index
].CpuStart
&& mSmmCpuSmramRanges
[Index
].PhysicalSize
> 0) {
1043 *SmrrSize
= (UINT32
)(*SmrrSize
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
);
1049 DEBUG ((EFI_D_INFO
, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase
, *SmrrSize
));
1053 Configure SMM Code Access Check feature on an AP.
1054 SMM Feature Control MSR will be locked after configuration.
1056 @param[in,out] Buffer Pointer to private data buffer.
1060 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1065 UINT64 SmmFeatureControlMsr
;
1066 UINT64 NewSmmFeatureControlMsr
;
1069 // Retrieve the CPU Index from the context passed in
1071 CpuIndex
= *(UINTN
*)Buffer
;
1074 // Get the current SMM Feature Control MSR value
1076 SmmFeatureControlMsr
= SmmCpuFeaturesGetSmmRegister (CpuIndex
, SmmRegFeatureControl
);
1079 // Compute the new SMM Feature Control MSR value
1081 NewSmmFeatureControlMsr
= SmmFeatureControlMsr
;
1082 if (mSmmCodeAccessCheckEnable
) {
1083 NewSmmFeatureControlMsr
|= SMM_CODE_CHK_EN_BIT
;
1084 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock
)) {
1085 NewSmmFeatureControlMsr
|= SMM_FEATURE_CONTROL_LOCK_BIT
;
1090 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1092 if (NewSmmFeatureControlMsr
!= SmmFeatureControlMsr
) {
1093 SmmCpuFeaturesSetSmmRegister (CpuIndex
, SmmRegFeatureControl
, NewSmmFeatureControlMsr
);
1097 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1099 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1103 Configure SMM Code Access Check feature for all processors.
1104 SMM Feature Control MSR will be locked after configuration.
1107 ConfigSmmCodeAccessCheck (
1115 // Check to see if the Feature Control MSR is supported on this CPU
1117 Index
= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
;
1118 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index
, SmmRegFeatureControl
)) {
1119 mSmmCodeAccessCheckEnable
= FALSE
;
1124 // Check to see if the CPU supports the SMM Code Access Check feature
1125 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1127 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP
) & SMM_CODE_ACCESS_CHK_BIT
) == 0) {
1128 mSmmCodeAccessCheckEnable
= FALSE
;
1133 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1135 InitializeSpinLock (mConfigSmmCodeAccessCheckLock
);
1138 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1139 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1141 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1144 // Enable SMM Code Access Check feature on the BSP.
1146 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index
);
1149 // Enable SMM Code Access Check feature for the APs.
1151 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1152 if (Index
!= gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
1153 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== INVALID_APIC_ID
) {
1155 // If this processor does not exist
1160 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1161 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1163 AcquireSpinLock (mConfigSmmCodeAccessCheckLock
);
1166 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1168 Status
= gSmst
->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor
, Index
, &Index
);
1169 ASSERT_EFI_ERROR (Status
);
1172 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1174 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock
)) {
1179 // Release the Config SMM Code Access Check spin lock.
1181 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock
);
1187 This API provides a way to allocate memory for page table.
1189 This API can be called more once to allocate memory for page tables.
1191 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1192 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1193 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1196 @param Pages The number of 4 KB pages to allocate.
1198 @return A pointer to the allocated buffer or NULL if allocation fails.
1202 AllocatePageTableMemory (
1208 Buffer
= SmmCpuFeaturesAllocatePageTableMemory (Pages
);
1209 if (Buffer
!= NULL
) {
1212 return AllocatePages (Pages
);
1216 Allocate pages for code.
1218 @param[in] Pages Number of pages to be allocated.
1220 @return Allocated memory.
1228 EFI_PHYSICAL_ADDRESS Memory
;
1234 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1235 if (EFI_ERROR (Status
)) {
1238 return (VOID
*) (UINTN
) Memory
;
1242 Allocate aligned pages for code.
1244 @param[in] Pages Number of pages to be allocated.
1245 @param[in] Alignment The requested alignment of the allocation.
1246 Must be a power of two.
1247 If Alignment is zero, then byte alignment is used.
1249 @return Allocated memory.
1252 AllocateAlignedCodePages (
1258 EFI_PHYSICAL_ADDRESS Memory
;
1259 UINTN AlignedMemory
;
1260 UINTN AlignmentMask
;
1261 UINTN UnalignedPages
;
1265 // Alignment must be a power of two or zero.
1267 ASSERT ((Alignment
& (Alignment
- 1)) == 0);
1272 if (Alignment
> EFI_PAGE_SIZE
) {
1274 // Calculate the total number of pages since alignment is larger than page size.
1276 AlignmentMask
= Alignment
- 1;
1277 RealPages
= Pages
+ EFI_SIZE_TO_PAGES (Alignment
);
1279 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1281 ASSERT (RealPages
> Pages
);
1283 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, RealPages
, &Memory
);
1284 if (EFI_ERROR (Status
)) {
1287 AlignedMemory
= ((UINTN
) Memory
+ AlignmentMask
) & ~AlignmentMask
;
1288 UnalignedPages
= EFI_SIZE_TO_PAGES (AlignedMemory
- (UINTN
) Memory
);
1289 if (UnalignedPages
> 0) {
1291 // Free first unaligned page(s).
1293 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1294 ASSERT_EFI_ERROR (Status
);
1296 Memory
= AlignedMemory
+ EFI_PAGES_TO_SIZE (Pages
);
1297 UnalignedPages
= RealPages
- Pages
- UnalignedPages
;
1298 if (UnalignedPages
> 0) {
1300 // Free last unaligned page(s).
1302 Status
= gSmst
->SmmFreePages (Memory
, UnalignedPages
);
1303 ASSERT_EFI_ERROR (Status
);
1307 // Do not over-allocate pages in this case.
1309 Status
= gSmst
->SmmAllocatePages (AllocateAnyPages
, EfiRuntimeServicesCode
, Pages
, &Memory
);
1310 if (EFI_ERROR (Status
)) {
1313 AlignedMemory
= (UINTN
) Memory
;
1315 return (VOID
*) AlignedMemory
;
1319 Perform the remaining tasks.
1323 PerformRemainingTasks (
1327 if (mSmmReadyToLock
) {
1329 // Start SMM Profile feature
1331 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1335 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1340 // Mark critical region to be read-only in page table
1342 SetMemMapAttributes ();
1345 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1347 SetUefiMemMapAttributes ();
1350 // Set page table itself to be read-only
1352 SetPageTableAttributes ();
1355 // Configure SMM Code Access Check feature if available.
1357 ConfigSmmCodeAccessCheck ();
1359 SmmCpuFeaturesCompleteSmmReadyToLock ();
1362 // Clean SMM ready to lock flag
1364 mSmmReadyToLock
= FALSE
;
1369 Perform the pre tasks.
1377 RestoreSmmConfigurationInS3 ();