]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // SMM CPU Private Data structure that contains SMM Configuration Protocol
15 // along its supporting fields.
16 //
17 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
19 NULL, // SmmCpuHandle
20 NULL, // Pointer to ProcessorInfo array
21 NULL, // Pointer to Operation array
22 NULL, // Pointer to CpuSaveStateSize array
23 NULL, // Pointer to CpuSaveState array
24 {
25 { 0 }
26 }, // SmmReservedSmramRegion
27 {
28 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
29 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
30 0, // SmmCoreEntryContext.NumberOfCpus
31 NULL, // SmmCoreEntryContext.CpuSaveStateSize
32 NULL // SmmCoreEntryContext.CpuSaveState
33 },
34 NULL, // SmmCoreEntry
35 {
36 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
37 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
38 },
39 NULL, // pointer to Ap Wrapper Func array
40 { NULL, NULL }, // List_Entry for Tokens.
41 };
42
43 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
45 0, // Array Length of SmBase and APIC ID
46 NULL, // Pointer to APIC ID array
47 NULL, // Pointer to SMBASE array
48 0, // Reserved
49 0, // SmrrBase
50 0 // SmrrSize
51 };
52
53 //
54 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
55 //
56 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
57
58 //
59 // SMM Relocation variables
60 //
61 volatile BOOLEAN *mRebased;
62
63 ///
64 /// Handle for the SMM CPU Protocol
65 ///
66 EFI_HANDLE mSmmCpuHandle = NULL;
67
68 ///
69 /// SMM CPU Protocol instance
70 ///
71 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
72 SmmReadSaveState,
73 SmmWriteSaveState
74 };
75
76 ///
77 /// SMM Memory Attribute Protocol instance
78 ///
79 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
80 EdkiiSmmGetMemoryAttributes,
81 EdkiiSmmSetMemoryAttributes,
82 EdkiiSmmClearMemoryAttributes
83 };
84
85 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
86
87 BOOLEAN mSmmRelocated = FALSE;
88 volatile BOOLEAN *mSmmInitialized = NULL;
89 UINT32 mBspApicId = 0;
90
91 //
92 // SMM stack information
93 //
94 UINTN mSmmStackArrayBase;
95 UINTN mSmmStackArrayEnd;
96 UINTN mSmmStackSize;
97
98 UINTN mSmmShadowStackSize;
99 BOOLEAN mCetSupported = TRUE;
100
101 UINTN mMaxNumberOfCpus = 1;
102 UINTN mNumberOfCpus = 1;
103
104 //
105 // SMM ready to lock flag
106 //
107 BOOLEAN mSmmReadyToLock = FALSE;
108
109 //
110 // Global used to cache PCD for SMM Code Access Check enable
111 //
112 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
113
114 //
115 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
116 //
117 UINT64 mAddressEncMask = 0;
118
119 //
120 // Spin lock used to serialize setting of SMM Code Access Check feature
121 //
122 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
123
124 //
125 // Saved SMM ranges information
126 //
127 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
128 UINTN mSmmCpuSmramRangeCount;
129
130 UINT8 mPhysicalAddressBits;
131
132 //
133 // Control register contents saved for SMM S3 resume state initialization.
134 //
135 UINT32 mSmmCr0;
136 UINT32 mSmmCr4;
137
138 /**
139 Initialize IDT to setup exception handlers for SMM.
140
141 **/
142 VOID
143 InitializeSmmIdt (
144 VOID
145 )
146 {
147 EFI_STATUS Status;
148 BOOLEAN InterruptState;
149 IA32_DESCRIPTOR DxeIdtr;
150
151 //
152 // There are 32 (not 255) entries in it since only processor
153 // generated exceptions will be handled.
154 //
155 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
156 //
157 // Allocate page aligned IDT, because it might be set as read only.
158 //
159 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));
160 ASSERT (gcSmiIdtr.Base != 0);
161 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
162
163 //
164 // Disable Interrupt and save DXE IDT table
165 //
166 InterruptState = SaveAndDisableInterrupts ();
167 AsmReadIdtr (&DxeIdtr);
168 //
169 // Load SMM temporary IDT table
170 //
171 AsmWriteIdtr (&gcSmiIdtr);
172 //
173 // Setup SMM default exception handlers, SMM IDT table
174 // will be updated and saved in gcSmiIdtr
175 //
176 Status = InitializeCpuExceptionHandlers (NULL);
177 ASSERT_EFI_ERROR (Status);
178 //
179 // Restore DXE IDT table and CPU interrupt
180 //
181 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);
182 SetInterruptState (InterruptState);
183 }
184
185 /**
186 Search module name by input IP address and output it.
187
188 @param CallerIpAddress Caller instruction pointer.
189
190 **/
191 VOID
192 DumpModuleInfoByIp (
193 IN UINTN CallerIpAddress
194 )
195 {
196 UINTN Pe32Data;
197 VOID *PdbPointer;
198
199 //
200 // Find Image Base
201 //
202 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
203 if (Pe32Data != 0) {
204 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));
205 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);
206 if (PdbPointer != NULL) {
207 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
208 }
209 }
210 }
211
212 /**
213 Read information from the CPU save state.
214
215 @param This EFI_SMM_CPU_PROTOCOL instance
216 @param Width The number of bytes to read from the CPU save state.
217 @param Register Specifies the CPU register to read form the save state.
218 @param CpuIndex Specifies the zero-based index of the CPU save state.
219 @param Buffer Upon return, this holds the CPU register value read from the save state.
220
221 @retval EFI_SUCCESS The register was read from Save State
222 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
223 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
224
225 **/
226 EFI_STATUS
227 EFIAPI
228 SmmReadSaveState (
229 IN CONST EFI_SMM_CPU_PROTOCOL *This,
230 IN UINTN Width,
231 IN EFI_SMM_SAVE_STATE_REGISTER Register,
232 IN UINTN CpuIndex,
233 OUT VOID *Buffer
234 )
235 {
236 EFI_STATUS Status;
237
238 //
239 // Retrieve pointer to the specified CPU's SMM Save State buffer
240 //
241 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
242 return EFI_INVALID_PARAMETER;
243 }
244
245 //
246 // The SpeculationBarrier() call here is to ensure the above check for the
247 // CpuIndex has been completed before the execution of subsequent codes.
248 //
249 SpeculationBarrier ();
250
251 //
252 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
253 //
254 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
255 //
256 // The pseudo-register only supports the 64-bit size specified by Width.
257 //
258 if (Width != sizeof (UINT64)) {
259 return EFI_INVALID_PARAMETER;
260 }
261
262 //
263 // If the processor is in SMM at the time the SMI occurred,
264 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
265 // Otherwise, EFI_NOT_FOUND is returned.
266 //
267 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
268 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
269 return EFI_SUCCESS;
270 } else {
271 return EFI_NOT_FOUND;
272 }
273 }
274
275 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
276 return EFI_INVALID_PARAMETER;
277 }
278
279 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
280 if (Status == EFI_UNSUPPORTED) {
281 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
282 }
283
284 return Status;
285 }
286
287 /**
288 Write data to the CPU save state.
289
290 @param This EFI_SMM_CPU_PROTOCOL instance
291 @param Width The number of bytes to read from the CPU save state.
292 @param Register Specifies the CPU register to write to the save state.
293 @param CpuIndex Specifies the zero-based index of the CPU save state
294 @param Buffer Upon entry, this holds the new CPU register value.
295
296 @retval EFI_SUCCESS The register was written from Save State
297 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
298 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
299
300 **/
301 EFI_STATUS
302 EFIAPI
303 SmmWriteSaveState (
304 IN CONST EFI_SMM_CPU_PROTOCOL *This,
305 IN UINTN Width,
306 IN EFI_SMM_SAVE_STATE_REGISTER Register,
307 IN UINTN CpuIndex,
308 IN CONST VOID *Buffer
309 )
310 {
311 EFI_STATUS Status;
312
313 //
314 // Retrieve pointer to the specified CPU's SMM Save State buffer
315 //
316 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
317 return EFI_INVALID_PARAMETER;
318 }
319
320 //
321 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
322 //
323 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
324 return EFI_SUCCESS;
325 }
326
327 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
328 return EFI_INVALID_PARAMETER;
329 }
330
331 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
332 if (Status == EFI_UNSUPPORTED) {
333 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
334 }
335
336 return Status;
337 }
338
339 /**
340 C function for SMI handler. To change all processor's SMMBase Register.
341
342 **/
343 VOID
344 EFIAPI
345 SmmInitHandler (
346 VOID
347 )
348 {
349 UINT32 ApicId;
350 UINTN Index;
351 BOOLEAN IsBsp;
352
353 //
354 // Update SMM IDT entries' code segment and load IDT
355 //
356 AsmWriteIdtr (&gcSmiIdtr);
357 ApicId = GetApicId ();
358
359 IsBsp = (BOOLEAN)(mBspApicId == ApicId);
360
361 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
362
363 for (Index = 0; Index < mNumberOfCpus; Index++) {
364 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
365 //
366 // Initialize SMM specific features on the currently executing CPU
367 //
368 SmmCpuFeaturesInitializeProcessor (
369 Index,
370 IsBsp,
371 gSmmCpuPrivate->ProcessorInfo,
372 &mCpuHotPlugData
373 );
374
375 if (!mSmmS3Flag) {
376 //
377 // Check XD and BTS features on each processor on normal boot
378 //
379 CheckFeatureSupported ();
380 } else if (IsBsp) {
381 //
382 // BSP rebase is already done above.
383 // Initialize private data during S3 resume
384 //
385 InitializeMpSyncData ();
386 }
387
388 if (!mSmmRelocated) {
389 //
390 // Hook return after RSM to set SMM re-based flag
391 //
392 SemaphoreHook (Index, &mRebased[Index]);
393 }
394
395 return;
396 }
397 }
398
399 ASSERT (FALSE);
400 }
401
402 /**
403 Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
404
405 **/
406 VOID
407 ExecuteFirstSmiInit (
408 VOID
409 )
410 {
411 UINTN Index;
412
413 if (mSmmInitialized == NULL) {
414 mSmmInitialized = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
415 }
416
417 ASSERT (mSmmInitialized != NULL);
418 if (mSmmInitialized == NULL) {
419 return;
420 }
421
422 //
423 // Reset the mSmmInitialized to false.
424 //
425 ZeroMem ((VOID *)mSmmInitialized, sizeof (BOOLEAN) * mMaxNumberOfCpus);
426
427 //
428 // Get the BSP ApicId.
429 //
430 mBspApicId = GetApicId ();
431
432 //
433 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) for SMM init
434 //
435 SendSmiIpi (mBspApicId);
436 SendSmiIpiAllExcludingSelf ();
437
438 //
439 // Wait for all processors to finish its 1st SMI
440 //
441 for (Index = 0; Index < mNumberOfCpus; Index++) {
442 while (!(BOOLEAN)mSmmInitialized[Index]) {
443 }
444 }
445 }
446
447 /**
448 Relocate SmmBases for each processor.
449
450 Execute on first boot and all S3 resumes
451
452 **/
453 VOID
454 EFIAPI
455 SmmRelocateBases (
456 VOID
457 )
458 {
459 UINT8 BakBuf[BACK_BUF_SIZE];
460 SMRAM_SAVE_STATE_MAP BakBuf2;
461 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
462 UINT8 *U8Ptr;
463 UINTN Index;
464 UINTN BspIndex;
465
466 //
467 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
468 //
469 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
470
471 //
472 // Patch ASM code template with current CR0, CR3, and CR4 values
473 //
474 mSmmCr0 = (UINT32)AsmReadCr0 ();
475 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
476 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
477 mSmmCr4 = (UINT32)AsmReadCr4 ();
478 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
479
480 //
481 // Patch GDTR for SMM base relocation
482 //
483 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
484 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
485
486 U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
487 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
488
489 //
490 // Backup original contents at address 0x38000
491 //
492 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
493 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
494
495 //
496 // Load image for relocation
497 //
498 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
499
500 //
501 // Retrieve the local APIC ID of current processor
502 //
503 mBspApicId = GetApicId ();
504
505 //
506 // Relocate SM bases for all APs
507 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
508 //
509 BspIndex = (UINTN)-1;
510 for (Index = 0; Index < mNumberOfCpus; Index++) {
511 mRebased[Index] = FALSE;
512 if (mBspApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
513 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
514 //
515 // Wait for this AP to finish its 1st SMI
516 //
517 while (!mRebased[Index]) {
518 }
519 } else {
520 //
521 // BSP will be Relocated later
522 //
523 BspIndex = Index;
524 }
525 }
526
527 //
528 // Relocate BSP's SMM base
529 //
530 ASSERT (BspIndex != (UINTN)-1);
531 SendSmiIpi (mBspApicId);
532 //
533 // Wait for the BSP to finish its 1st SMI
534 //
535 while (!mRebased[BspIndex]) {
536 }
537
538 //
539 // Restore contents at address 0x38000
540 //
541 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
542 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
543 }
544
545 /**
546 SMM Ready To Lock event notification handler.
547
548 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
549 perform additional lock actions that must be performed from SMM on the next SMI.
550
551 @param[in] Protocol Points to the protocol's unique identifier.
552 @param[in] Interface Points to the interface instance.
553 @param[in] Handle The handle on which the interface was installed.
554
555 @retval EFI_SUCCESS Notification handler runs successfully.
556 **/
557 EFI_STATUS
558 EFIAPI
559 SmmReadyToLockEventNotify (
560 IN CONST EFI_GUID *Protocol,
561 IN VOID *Interface,
562 IN EFI_HANDLE Handle
563 )
564 {
565 GetAcpiCpuData ();
566
567 //
568 // Cache a copy of UEFI memory map before we start profiling feature.
569 //
570 GetUefiMemoryMap ();
571
572 //
573 // Set SMM ready to lock flag and return
574 //
575 mSmmReadyToLock = TRUE;
576 return EFI_SUCCESS;
577 }
578
579 /**
580 The module Entry Point of the CPU SMM driver.
581
582 @param ImageHandle The firmware allocated handle for the EFI image.
583 @param SystemTable A pointer to the EFI System Table.
584
585 @retval EFI_SUCCESS The entry point is executed successfully.
586 @retval Other Some error occurs when executing this entry point.
587
588 **/
589 EFI_STATUS
590 EFIAPI
591 PiCpuSmmEntry (
592 IN EFI_HANDLE ImageHandle,
593 IN EFI_SYSTEM_TABLE *SystemTable
594 )
595 {
596 EFI_STATUS Status;
597 EFI_MP_SERVICES_PROTOCOL *MpServices;
598 UINTN NumberOfEnabledProcessors;
599 UINTN Index;
600 VOID *Buffer;
601 UINTN BufferPages;
602 UINTN TileCodeSize;
603 UINTN TileDataSize;
604 UINTN TileSize;
605 UINT8 *Stacks;
606 VOID *Registration;
607 UINT32 RegEax;
608 UINT32 RegEbx;
609 UINT32 RegEcx;
610 UINT32 RegEdx;
611 UINTN FamilyId;
612 UINTN ModelId;
613 UINT32 Cr3;
614 EFI_HOB_GUID_TYPE *GuidHob;
615 SMM_BASE_HOB_DATA *SmmBaseHobData;
616
617 GuidHob = NULL;
618 SmmBaseHobData = NULL;
619
620 //
621 // Initialize address fixup
622 //
623 PiSmmCpuSmmInitFixupAddress ();
624 PiSmmCpuSmiEntryFixupAddress ();
625
626 //
627 // Initialize Debug Agent to support source level debug in SMM code
628 //
629 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
630
631 //
632 // Report the start of CPU SMM initialization.
633 //
634 REPORT_STATUS_CODE (
635 EFI_PROGRESS_CODE,
636 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
637 );
638
639 //
640 // Find out SMRR Base and SMRR Size
641 //
642 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
643
644 //
645 // Get MP Services Protocol
646 //
647 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
648 ASSERT_EFI_ERROR (Status);
649
650 //
651 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
652 //
653 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
654 ASSERT_EFI_ERROR (Status);
655 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
656
657 //
658 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
659 // A constant BSP index makes no sense because it may be hot removed.
660 //
661 DEBUG_CODE_BEGIN ();
662 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
663 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
664 }
665
666 DEBUG_CODE_END ();
667
668 //
669 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
670 //
671 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
672 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
673
674 //
675 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
676 // Make sure AddressEncMask is contained to smallest supported address field.
677 //
678 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
679 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
680
681 //
682 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
683 //
684 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
685 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
686 } else {
687 mMaxNumberOfCpus = mNumberOfCpus;
688 }
689
690 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
691
692 //
693 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
694 // allocated buffer. The minimum size of this buffer for a uniprocessor system
695 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
696 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
697 // then the SMI entry point and the CPU save state areas can be tiles to minimize
698 // the total amount SMRAM required for all the CPUs. The tile size can be computed
699 // by adding the // CPU save state size, any extra CPU specific context, and
700 // the size of code that must be placed at the SMI entry point to transfer
701 // control to a C function in the native SMM execution mode. This size is
702 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
703 // The total amount of memory required is the maximum number of CPUs that
704 // platform supports times the tile size. The picture below shows the tiling,
705 // where m is the number of tiles that fit in 32KB.
706 //
707 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
708 // | CPU m+1 Save State |
709 // +-----------------------------+
710 // | CPU m+1 Extra Data |
711 // +-----------------------------+
712 // | Padding |
713 // +-----------------------------+
714 // | CPU 2m SMI Entry |
715 // +#############################+ <-- Base of allocated buffer + 64 KB
716 // | CPU m-1 Save State |
717 // +-----------------------------+
718 // | CPU m-1 Extra Data |
719 // +-----------------------------+
720 // | Padding |
721 // +-----------------------------+
722 // | CPU 2m-1 SMI Entry |
723 // +=============================+ <-- 2^n offset from Base of allocated buffer
724 // | . . . . . . . . . . . . |
725 // +=============================+ <-- 2^n offset from Base of allocated buffer
726 // | CPU 2 Save State |
727 // +-----------------------------+
728 // | CPU 2 Extra Data |
729 // +-----------------------------+
730 // | Padding |
731 // +-----------------------------+
732 // | CPU m+1 SMI Entry |
733 // +=============================+ <-- Base of allocated buffer + 32 KB
734 // | CPU 1 Save State |
735 // +-----------------------------+
736 // | CPU 1 Extra Data |
737 // +-----------------------------+
738 // | Padding |
739 // +-----------------------------+
740 // | CPU m SMI Entry |
741 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
742 // | CPU 0 Save State |
743 // +-----------------------------+
744 // | CPU 0 Extra Data |
745 // +-----------------------------+
746 // | Padding |
747 // +-----------------------------+
748 // | CPU m-1 SMI Entry |
749 // +=============================+ <-- 2^n offset from Base of allocated buffer
750 // | . . . . . . . . . . . . |
751 // +=============================+ <-- 2^n offset from Base of allocated buffer
752 // | Padding |
753 // +-----------------------------+
754 // | CPU 1 SMI Entry |
755 // +=============================+ <-- 2^n offset from Base of allocated buffer
756 // | Padding |
757 // +-----------------------------+
758 // | CPU 0 SMI Entry |
759 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
760 //
761
762 //
763 // Retrieve CPU Family
764 //
765 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
766 FamilyId = (RegEax >> 8) & 0xf;
767 ModelId = (RegEax >> 4) & 0xf;
768 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {
769 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
770 }
771
772 RegEdx = 0;
773 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
774 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
775 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
776 }
777
778 //
779 // Determine the mode of the CPU at the time an SMI occurs
780 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
781 // Volume 3C, Section 34.4.1.1
782 //
783 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
784 if ((RegEdx & BIT29) != 0) {
785 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
786 }
787
788 if (FamilyId == 0x06) {
789 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {
790 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
791 }
792 }
793
794 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
795 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
796 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
797 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
798 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
799 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
800 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
801 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
802 if ((RegEcx & CPUID_CET_SS) == 0) {
803 mCetSupported = FALSE;
804 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
805 }
806
807 if (mCetSupported) {
808 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
809 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
810 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
811 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
812 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
813 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
814 }
815 } else {
816 mCetSupported = FALSE;
817 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
818 }
819 } else {
820 mCetSupported = FALSE;
821 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
822 }
823
824 //
825 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
826 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
827 // This size is rounded up to nearest power of 2.
828 //
829 TileCodeSize = GetSmiHandlerSize ();
830 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);
831 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
832 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);
833 TileSize = TileDataSize + TileCodeSize - 1;
834 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
835 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
836
837 //
838 // If the TileSize is larger than space available for the SMI Handler of
839 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
840 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
841 // the SMI Handler size must be reduced or the size of the extra CPU specific
842 // context must be reduced.
843 //
844 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
845
846 //
847 // Retrive the allocated SmmBase from gSmmBaseHobGuid. If found,
848 // means the SmBase relocation has been done.
849 //
850 GuidHob = GetFirstGuidHob (&gSmmBaseHobGuid);
851 if (GuidHob != NULL) {
852 //
853 // Check whether the Required TileSize is enough.
854 //
855 if (TileSize > SIZE_8KB) {
856 DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
857 CpuDeadLoop ();
858 return RETURN_BUFFER_TOO_SMALL;
859 }
860
861 SmmBaseHobData = GET_GUID_HOB_DATA (GuidHob);
862
863 //
864 // Assume single instance of HOB produced, expect the HOB.NumberOfProcessors equals to the mMaxNumberOfCpus.
865 //
866 ASSERT (SmmBaseHobData->NumberOfProcessors == (UINT32)mMaxNumberOfCpus && SmmBaseHobData->ProcessorIndex == 0);
867 mSmmRelocated = TRUE;
868 } else {
869 //
870 // When the HOB doesn't exist, allocate new SMBASE itself.
871 //
872 DEBUG ((DEBUG_INFO, "PiCpuSmmEntry: gSmmBaseHobGuid not found!\n"));
873 //
874 // Allocate buffer for all of the tiles.
875 //
876 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
877 // Volume 3C, Section 34.11 SMBASE Relocation
878 // For Pentium and Intel486 processors, the SMBASE values must be
879 // aligned on a 32-KByte boundary or the processor will enter shutdown
880 // state during the execution of a RSM instruction.
881 //
882 // Intel486 processors: FamilyId is 4
883 // Pentium processors : FamilyId is 5
884 //
885 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
886 if ((FamilyId == 4) || (FamilyId == 5)) {
887 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
888 } else {
889 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
890 }
891
892 ASSERT (Buffer != NULL);
893 DEBUG ((DEBUG_INFO, "New Allcoated SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));
894 }
895
896 //
897 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
898 //
899 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
900 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
901
902 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
903 ASSERT (gSmmCpuPrivate->Operation != NULL);
904
905 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
906 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
907
908 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
909 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
910
911 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
912 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
913
914 //
915 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
916 //
917 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
918 ASSERT (mCpuHotPlugData.ApicId != NULL);
919 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
920 ASSERT (mCpuHotPlugData.SmBase != NULL);
921 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
922
923 //
924 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
925 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
926 // size for each CPU in the platform
927 //
928 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
929 mCpuHotPlugData.SmBase[Index] = mSmmRelocated ? (UINTN)SmmBaseHobData->SmBase[Index] : (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
930
931 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
932 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
933 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
934
935 if (Index < mNumberOfCpus) {
936 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
937 ASSERT_EFI_ERROR (Status);
938 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
939
940 DEBUG ((
941 DEBUG_INFO,
942 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
943 Index,
944 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
945 mCpuHotPlugData.SmBase[Index],
946 gSmmCpuPrivate->CpuSaveState[Index],
947 gSmmCpuPrivate->CpuSaveStateSize[Index]
948 ));
949 } else {
950 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
951 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
952 }
953 }
954
955 //
956 // Allocate SMI stacks for all processors.
957 //
958 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
959 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
960 //
961 // SMM Stack Guard Enabled
962 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
963 //
964 // +--------------------------------------------------+-----+--------------------------------------------------+
965 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
966 // +--------------------------------------------------+-----+--------------------------------------------------+
967 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
968 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
969 // | | | |
970 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
971 //
972 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
973 }
974
975 mSmmShadowStackSize = 0;
976 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
977 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
978
979 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
980 //
981 // SMM Stack Guard Enabled
982 // Append Shadow Stack after normal stack
983 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
984 //
985 // |= Stacks
986 // +--------------------------------------------------+---------------------------------------------------------------+
987 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
988 // +--------------------------------------------------+---------------------------------------------------------------+
989 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
990 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
991 // | |
992 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
993 //
994 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
995 } else {
996 //
997 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
998 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
999 // 1 more pages is allocated for each processor, it is known good stack.
1000 //
1001 //
1002 // |= Stacks
1003 // +-------------------------------------+--------------------------------------------------+
1004 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
1005 // +-------------------------------------+--------------------------------------------------+
1006 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
1007 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
1008 // | |
1009 // |<-------------------------------- Processor N ----------------------------------------->|
1010 //
1011 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
1012 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
1013 }
1014 }
1015
1016 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
1017 ASSERT (Stacks != NULL);
1018 mSmmStackArrayBase = (UINTN)Stacks;
1019 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
1020
1021 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
1022 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
1023 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
1024 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1025 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
1026 }
1027
1028 //
1029 // Set SMI stack for SMM base relocation
1030 //
1031 PatchInstructionX86 (
1032 gPatchSmmInitStack,
1033 (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),
1034 sizeof (UINTN)
1035 );
1036
1037 //
1038 // Initialize IDT
1039 //
1040 InitializeSmmIdt ();
1041
1042 //
1043 // Check whether Smm Relocation is done or not.
1044 // If not, will do the SmmBases Relocation here!!!
1045 //
1046 if (!mSmmRelocated) {
1047 //
1048 // Relocate SMM Base addresses to the ones allocated from SMRAM
1049 //
1050 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
1051 ASSERT (mRebased != NULL);
1052 SmmRelocateBases ();
1053
1054 //
1055 // Call hook for BSP to perform extra actions in normal mode after all
1056 // SMM base addresses have been relocated on all CPUs
1057 //
1058 SmmCpuFeaturesSmmRelocationComplete ();
1059 }
1060
1061 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
1062
1063 //
1064 // SMM Time initialization
1065 //
1066 InitializeSmmTimer ();
1067
1068 //
1069 // Initialize MP globals
1070 //
1071 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
1072
1073 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1074 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
1075 SetShadowStack (
1076 Cr3,
1077 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
1078 mSmmShadowStackSize
1079 );
1080 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1081 SetNotPresentPage (
1082 Cr3,
1083 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
1084 EFI_PAGES_TO_SIZE (1)
1085 );
1086 }
1087 }
1088 }
1089
1090 //
1091 // For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
1092 // Those MSRs & CSRs must be configured before normal SMI sources happen.
1093 // So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
1094 //
1095 if (mSmmRelocated) {
1096 ExecuteFirstSmiInit ();
1097
1098 //
1099 // Call hook for BSP to perform extra actions in normal mode after all
1100 // SMM base addresses have been relocated on all CPUs
1101 //
1102 SmmCpuFeaturesSmmRelocationComplete ();
1103 }
1104
1105 //
1106 // Fill in SMM Reserved Regions
1107 //
1108 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1109 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1110
1111 //
1112 // Install the SMM Configuration Protocol onto a new handle on the handle database.
1113 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
1114 // to an SMRAM address will be present in the handle database
1115 //
1116 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
1117 &gSmmCpuPrivate->SmmCpuHandle,
1118 &gEfiSmmConfigurationProtocolGuid,
1119 &gSmmCpuPrivate->SmmConfiguration,
1120 NULL
1121 );
1122 ASSERT_EFI_ERROR (Status);
1123
1124 //
1125 // Install the SMM CPU Protocol into SMM protocol database
1126 //
1127 Status = gSmst->SmmInstallProtocolInterface (
1128 &mSmmCpuHandle,
1129 &gEfiSmmCpuProtocolGuid,
1130 EFI_NATIVE_INTERFACE,
1131 &mSmmCpu
1132 );
1133 ASSERT_EFI_ERROR (Status);
1134
1135 //
1136 // Install the SMM Memory Attribute Protocol into SMM protocol database
1137 //
1138 Status = gSmst->SmmInstallProtocolInterface (
1139 &mSmmCpuHandle,
1140 &gEdkiiSmmMemoryAttributeProtocolGuid,
1141 EFI_NATIVE_INTERFACE,
1142 &mSmmMemoryAttribute
1143 );
1144 ASSERT_EFI_ERROR (Status);
1145
1146 //
1147 // Initialize global buffer for MM MP.
1148 //
1149 InitializeDataForMmMp ();
1150
1151 //
1152 // Initialize Package First Thread Index Info.
1153 //
1154 InitPackageFirstThreadIndexInfo ();
1155
1156 //
1157 // Install the SMM Mp Protocol into SMM protocol database
1158 //
1159 Status = gSmst->SmmInstallProtocolInterface (
1160 &mSmmCpuHandle,
1161 &gEfiMmMpProtocolGuid,
1162 EFI_NATIVE_INTERFACE,
1163 &mSmmMp
1164 );
1165 ASSERT_EFI_ERROR (Status);
1166
1167 //
1168 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1169 //
1170 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1171 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1172 ASSERT_EFI_ERROR (Status);
1173 }
1174
1175 //
1176 // Initialize SMM CPU Services Support
1177 //
1178 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1179 ASSERT_EFI_ERROR (Status);
1180
1181 //
1182 // register SMM Ready To Lock Protocol notification
1183 //
1184 Status = gSmst->SmmRegisterProtocolNotify (
1185 &gEfiSmmReadyToLockProtocolGuid,
1186 SmmReadyToLockEventNotify,
1187 &Registration
1188 );
1189 ASSERT_EFI_ERROR (Status);
1190
1191 //
1192 // Initialize SMM Profile feature
1193 //
1194 InitSmmProfile (Cr3);
1195
1196 GetAcpiS3EnableFlag ();
1197 InitSmmS3ResumeState (Cr3);
1198
1199 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1200
1201 return EFI_SUCCESS;
1202 }
1203
1204 /**
1205
1206 Find out SMRAM information including SMRR base and SMRR size.
1207
1208 @param SmrrBase SMRR base
1209 @param SmrrSize SMRR size
1210
1211 **/
1212 VOID
1213 FindSmramInfo (
1214 OUT UINT32 *SmrrBase,
1215 OUT UINT32 *SmrrSize
1216 )
1217 {
1218 EFI_STATUS Status;
1219 UINTN Size;
1220 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1221 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1222 UINTN Index;
1223 UINT64 MaxSize;
1224 BOOLEAN Found;
1225
1226 //
1227 // Get SMM Access Protocol
1228 //
1229 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1230 ASSERT_EFI_ERROR (Status);
1231
1232 //
1233 // Get SMRAM information
1234 //
1235 Size = 0;
1236 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1237 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1238
1239 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1240 ASSERT (mSmmCpuSmramRanges != NULL);
1241
1242 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1243 ASSERT_EFI_ERROR (Status);
1244
1245 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1246
1247 //
1248 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1249 //
1250 CurrentSmramRange = NULL;
1251 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1252 //
1253 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1254 //
1255 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1256 continue;
1257 }
1258
1259 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1260 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1261 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1262 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1263 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1264 }
1265 }
1266 }
1267 }
1268
1269 ASSERT (CurrentSmramRange != NULL);
1270
1271 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1272 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1273
1274 do {
1275 Found = FALSE;
1276 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1277 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&
1278 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))
1279 {
1280 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1281 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1282 Found = TRUE;
1283 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {
1284 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1285 Found = TRUE;
1286 }
1287 }
1288 } while (Found);
1289
1290 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1291 }
1292
1293 /**
1294 Configure SMM Code Access Check feature on an AP.
1295 SMM Feature Control MSR will be locked after configuration.
1296
1297 @param[in,out] Buffer Pointer to private data buffer.
1298 **/
1299 VOID
1300 EFIAPI
1301 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1302 IN OUT VOID *Buffer
1303 )
1304 {
1305 UINTN CpuIndex;
1306 UINT64 SmmFeatureControlMsr;
1307 UINT64 NewSmmFeatureControlMsr;
1308
1309 //
1310 // Retrieve the CPU Index from the context passed in
1311 //
1312 CpuIndex = *(UINTN *)Buffer;
1313
1314 //
1315 // Get the current SMM Feature Control MSR value
1316 //
1317 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1318
1319 //
1320 // Compute the new SMM Feature Control MSR value
1321 //
1322 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1323 if (mSmmCodeAccessCheckEnable) {
1324 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1325 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1326 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1327 }
1328 }
1329
1330 //
1331 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1332 //
1333 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1334 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1335 }
1336
1337 //
1338 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1339 //
1340 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1341 }
1342
1343 /**
1344 Configure SMM Code Access Check feature for all processors.
1345 SMM Feature Control MSR will be locked after configuration.
1346 **/
1347 VOID
1348 ConfigSmmCodeAccessCheck (
1349 VOID
1350 )
1351 {
1352 UINTN Index;
1353 EFI_STATUS Status;
1354
1355 //
1356 // Check to see if the Feature Control MSR is supported on this CPU
1357 //
1358 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1359 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1360 mSmmCodeAccessCheckEnable = FALSE;
1361 return;
1362 }
1363
1364 //
1365 // Check to see if the CPU supports the SMM Code Access Check feature
1366 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1367 //
1368 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1369 mSmmCodeAccessCheckEnable = FALSE;
1370 return;
1371 }
1372
1373 //
1374 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1375 //
1376 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1377
1378 //
1379 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1380 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1381 //
1382 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1383
1384 //
1385 // Enable SMM Code Access Check feature on the BSP.
1386 //
1387 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1388
1389 //
1390 // Enable SMM Code Access Check feature for the APs.
1391 //
1392 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1393 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1394 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1395 //
1396 // If this processor does not exist
1397 //
1398 continue;
1399 }
1400
1401 //
1402 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1403 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1404 //
1405 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1406
1407 //
1408 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1409 //
1410 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1411 ASSERT_EFI_ERROR (Status);
1412
1413 //
1414 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1415 //
1416 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1417 CpuPause ();
1418 }
1419
1420 //
1421 // Release the Config SMM Code Access Check spin lock.
1422 //
1423 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1424 }
1425 }
1426 }
1427
1428 /**
1429 Allocate pages for code.
1430
1431 @param[in] Pages Number of pages to be allocated.
1432
1433 @return Allocated memory.
1434 **/
1435 VOID *
1436 AllocateCodePages (
1437 IN UINTN Pages
1438 )
1439 {
1440 EFI_STATUS Status;
1441 EFI_PHYSICAL_ADDRESS Memory;
1442
1443 if (Pages == 0) {
1444 return NULL;
1445 }
1446
1447 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1448 if (EFI_ERROR (Status)) {
1449 return NULL;
1450 }
1451
1452 return (VOID *)(UINTN)Memory;
1453 }
1454
1455 /**
1456 Allocate aligned pages for code.
1457
1458 @param[in] Pages Number of pages to be allocated.
1459 @param[in] Alignment The requested alignment of the allocation.
1460 Must be a power of two.
1461 If Alignment is zero, then byte alignment is used.
1462
1463 @return Allocated memory.
1464 **/
1465 VOID *
1466 AllocateAlignedCodePages (
1467 IN UINTN Pages,
1468 IN UINTN Alignment
1469 )
1470 {
1471 EFI_STATUS Status;
1472 EFI_PHYSICAL_ADDRESS Memory;
1473 UINTN AlignedMemory;
1474 UINTN AlignmentMask;
1475 UINTN UnalignedPages;
1476 UINTN RealPages;
1477
1478 //
1479 // Alignment must be a power of two or zero.
1480 //
1481 ASSERT ((Alignment & (Alignment - 1)) == 0);
1482
1483 if (Pages == 0) {
1484 return NULL;
1485 }
1486
1487 if (Alignment > EFI_PAGE_SIZE) {
1488 //
1489 // Calculate the total number of pages since alignment is larger than page size.
1490 //
1491 AlignmentMask = Alignment - 1;
1492 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1493 //
1494 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1495 //
1496 ASSERT (RealPages > Pages);
1497
1498 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1499 if (EFI_ERROR (Status)) {
1500 return NULL;
1501 }
1502
1503 AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;
1504 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);
1505 if (UnalignedPages > 0) {
1506 //
1507 // Free first unaligned page(s).
1508 //
1509 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1510 ASSERT_EFI_ERROR (Status);
1511 }
1512
1513 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1514 UnalignedPages = RealPages - Pages - UnalignedPages;
1515 if (UnalignedPages > 0) {
1516 //
1517 // Free last unaligned page(s).
1518 //
1519 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1520 ASSERT_EFI_ERROR (Status);
1521 }
1522 } else {
1523 //
1524 // Do not over-allocate pages in this case.
1525 //
1526 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1527 if (EFI_ERROR (Status)) {
1528 return NULL;
1529 }
1530
1531 AlignedMemory = (UINTN)Memory;
1532 }
1533
1534 return (VOID *)AlignedMemory;
1535 }
1536
1537 /**
1538 Perform the remaining tasks.
1539
1540 **/
1541 VOID
1542 PerformRemainingTasks (
1543 VOID
1544 )
1545 {
1546 if (mSmmReadyToLock) {
1547 //
1548 // Start SMM Profile feature
1549 //
1550 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1551 SmmProfileStart ();
1552 }
1553
1554 //
1555 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1556 //
1557 InitPaging ();
1558
1559 //
1560 // Mark critical region to be read-only in page table
1561 //
1562 SetMemMapAttributes ();
1563
1564 if (IsRestrictedMemoryAccess ()) {
1565 //
1566 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1567 //
1568 SetUefiMemMapAttributes ();
1569
1570 //
1571 // Set page table itself to be read-only
1572 //
1573 SetPageTableAttributes ();
1574 }
1575
1576 //
1577 // Configure SMM Code Access Check feature if available.
1578 //
1579 ConfigSmmCodeAccessCheck ();
1580
1581 SmmCpuFeaturesCompleteSmmReadyToLock ();
1582
1583 //
1584 // Clean SMM ready to lock flag
1585 //
1586 mSmmReadyToLock = FALSE;
1587 }
1588 }
1589
1590 /**
1591 Perform the pre tasks.
1592
1593 **/
1594 VOID
1595 PerformPreTasks (
1596 VOID
1597 )
1598 {
1599 RestoreSmmConfigurationInS3 ();
1600 }