]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
655175a2c6db5c51123bd5bec603a891ad106778
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // SMM CPU Private Data structure that contains SMM Configuration Protocol
15 // along its supporting fields.
16 //
17 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
19 NULL, // SmmCpuHandle
20 NULL, // Pointer to ProcessorInfo array
21 NULL, // Pointer to Operation array
22 NULL, // Pointer to CpuSaveStateSize array
23 NULL, // Pointer to CpuSaveState array
24 {
25 { 0 }
26 }, // SmmReservedSmramRegion
27 {
28 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
29 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
30 0, // SmmCoreEntryContext.NumberOfCpus
31 NULL, // SmmCoreEntryContext.CpuSaveStateSize
32 NULL // SmmCoreEntryContext.CpuSaveState
33 },
34 NULL, // SmmCoreEntry
35 {
36 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
37 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
38 },
39 NULL, // pointer to Ap Wrapper Func array
40 { NULL, NULL }, // List_Entry for Tokens.
41 };
42
43 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
45 0, // Array Length of SmBase and APIC ID
46 NULL, // Pointer to APIC ID array
47 NULL, // Pointer to SMBASE array
48 0, // Reserved
49 0, // SmrrBase
50 0 // SmrrSize
51 };
52
53 //
54 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
55 //
56 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
57
58 //
59 // SMM Relocation variables
60 //
61 volatile BOOLEAN *mRebased;
62 volatile BOOLEAN mIsBsp;
63
64 ///
65 /// Handle for the SMM CPU Protocol
66 ///
67 EFI_HANDLE mSmmCpuHandle = NULL;
68
69 ///
70 /// SMM CPU Protocol instance
71 ///
72 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
73 SmmReadSaveState,
74 SmmWriteSaveState
75 };
76
77 ///
78 /// SMM Memory Attribute Protocol instance
79 ///
80 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
81 EdkiiSmmGetMemoryAttributes,
82 EdkiiSmmSetMemoryAttributes,
83 EdkiiSmmClearMemoryAttributes
84 };
85
86 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
87
88 //
89 // SMM stack information
90 //
91 UINTN mSmmStackArrayBase;
92 UINTN mSmmStackArrayEnd;
93 UINTN mSmmStackSize;
94
95 UINTN mSmmShadowStackSize;
96 BOOLEAN mCetSupported = TRUE;
97
98 UINTN mMaxNumberOfCpus = 1;
99 UINTN mNumberOfCpus = 1;
100
101 //
102 // SMM ready to lock flag
103 //
104 BOOLEAN mSmmReadyToLock = FALSE;
105
106 //
107 // Global used to cache PCD for SMM Code Access Check enable
108 //
109 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
110
111 //
112 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
113 //
114 UINT64 mAddressEncMask = 0;
115
116 //
117 // Spin lock used to serialize setting of SMM Code Access Check feature
118 //
119 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
120
121 //
122 // Saved SMM ranges information
123 //
124 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
125 UINTN mSmmCpuSmramRangeCount;
126
127 UINT8 mPhysicalAddressBits;
128
129 //
130 // Control register contents saved for SMM S3 resume state initialization.
131 //
132 UINT32 mSmmCr0;
133 UINT32 mSmmCr4;
134
135 /**
136 Initialize IDT to setup exception handlers for SMM.
137
138 **/
139 VOID
140 InitializeSmmIdt (
141 VOID
142 )
143 {
144 EFI_STATUS Status;
145 BOOLEAN InterruptState;
146 IA32_DESCRIPTOR DxeIdtr;
147
148 //
149 // There are 32 (not 255) entries in it since only processor
150 // generated exceptions will be handled.
151 //
152 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
153 //
154 // Allocate page aligned IDT, because it might be set as read only.
155 //
156 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));
157 ASSERT (gcSmiIdtr.Base != 0);
158 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
159
160 //
161 // Disable Interrupt and save DXE IDT table
162 //
163 InterruptState = SaveAndDisableInterrupts ();
164 AsmReadIdtr (&DxeIdtr);
165 //
166 // Load SMM temporary IDT table
167 //
168 AsmWriteIdtr (&gcSmiIdtr);
169 //
170 // Setup SMM default exception handlers, SMM IDT table
171 // will be updated and saved in gcSmiIdtr
172 //
173 Status = InitializeCpuExceptionHandlers (NULL);
174 ASSERT_EFI_ERROR (Status);
175 //
176 // Restore DXE IDT table and CPU interrupt
177 //
178 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);
179 SetInterruptState (InterruptState);
180 }
181
182 /**
183 Search module name by input IP address and output it.
184
185 @param CallerIpAddress Caller instruction pointer.
186
187 **/
188 VOID
189 DumpModuleInfoByIp (
190 IN UINTN CallerIpAddress
191 )
192 {
193 UINTN Pe32Data;
194 VOID *PdbPointer;
195
196 //
197 // Find Image Base
198 //
199 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
200 if (Pe32Data != 0) {
201 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));
202 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);
203 if (PdbPointer != NULL) {
204 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
205 }
206 }
207 }
208
209 /**
210 Read information from the CPU save state.
211
212 @param This EFI_SMM_CPU_PROTOCOL instance
213 @param Width The number of bytes to read from the CPU save state.
214 @param Register Specifies the CPU register to read form the save state.
215 @param CpuIndex Specifies the zero-based index of the CPU save state.
216 @param Buffer Upon return, this holds the CPU register value read from the save state.
217
218 @retval EFI_SUCCESS The register was read from Save State
219 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
220 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
221
222 **/
223 EFI_STATUS
224 EFIAPI
225 SmmReadSaveState (
226 IN CONST EFI_SMM_CPU_PROTOCOL *This,
227 IN UINTN Width,
228 IN EFI_SMM_SAVE_STATE_REGISTER Register,
229 IN UINTN CpuIndex,
230 OUT VOID *Buffer
231 )
232 {
233 EFI_STATUS Status;
234
235 //
236 // Retrieve pointer to the specified CPU's SMM Save State buffer
237 //
238 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
239 return EFI_INVALID_PARAMETER;
240 }
241
242 //
243 // The SpeculationBarrier() call here is to ensure the above check for the
244 // CpuIndex has been completed before the execution of subsequent codes.
245 //
246 SpeculationBarrier ();
247
248 //
249 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
250 //
251 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
252 //
253 // The pseudo-register only supports the 64-bit size specified by Width.
254 //
255 if (Width != sizeof (UINT64)) {
256 return EFI_INVALID_PARAMETER;
257 }
258
259 //
260 // If the processor is in SMM at the time the SMI occurred,
261 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
262 // Otherwise, EFI_NOT_FOUND is returned.
263 //
264 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
265 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
266 return EFI_SUCCESS;
267 } else {
268 return EFI_NOT_FOUND;
269 }
270 }
271
272 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
273 return EFI_INVALID_PARAMETER;
274 }
275
276 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
277 if (Status == EFI_UNSUPPORTED) {
278 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
279 }
280
281 return Status;
282 }
283
284 /**
285 Write data to the CPU save state.
286
287 @param This EFI_SMM_CPU_PROTOCOL instance
288 @param Width The number of bytes to read from the CPU save state.
289 @param Register Specifies the CPU register to write to the save state.
290 @param CpuIndex Specifies the zero-based index of the CPU save state
291 @param Buffer Upon entry, this holds the new CPU register value.
292
293 @retval EFI_SUCCESS The register was written from Save State
294 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
295 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
296
297 **/
298 EFI_STATUS
299 EFIAPI
300 SmmWriteSaveState (
301 IN CONST EFI_SMM_CPU_PROTOCOL *This,
302 IN UINTN Width,
303 IN EFI_SMM_SAVE_STATE_REGISTER Register,
304 IN UINTN CpuIndex,
305 IN CONST VOID *Buffer
306 )
307 {
308 EFI_STATUS Status;
309
310 //
311 // Retrieve pointer to the specified CPU's SMM Save State buffer
312 //
313 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
314 return EFI_INVALID_PARAMETER;
315 }
316
317 //
318 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
319 //
320 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
321 return EFI_SUCCESS;
322 }
323
324 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
325 return EFI_INVALID_PARAMETER;
326 }
327
328 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
329 if (Status == EFI_UNSUPPORTED) {
330 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
331 }
332
333 return Status;
334 }
335
336 /**
337 C function for SMI handler. To change all processor's SMMBase Register.
338
339 **/
340 VOID
341 EFIAPI
342 SmmInitHandler (
343 VOID
344 )
345 {
346 UINT32 ApicId;
347 UINTN Index;
348
349 //
350 // Update SMM IDT entries' code segment and load IDT
351 //
352 AsmWriteIdtr (&gcSmiIdtr);
353 ApicId = GetApicId ();
354
355 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
356
357 for (Index = 0; Index < mNumberOfCpus; Index++) {
358 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
359 //
360 // Initialize SMM specific features on the currently executing CPU
361 //
362 SmmCpuFeaturesInitializeProcessor (
363 Index,
364 mIsBsp,
365 gSmmCpuPrivate->ProcessorInfo,
366 &mCpuHotPlugData
367 );
368
369 if (!mSmmS3Flag) {
370 //
371 // Check XD and BTS features on each processor on normal boot
372 //
373 CheckFeatureSupported ();
374 }
375
376 if (mIsBsp) {
377 //
378 // BSP rebase is already done above.
379 // Initialize private data during S3 resume
380 //
381 InitializeMpSyncData ();
382 }
383
384 //
385 // Hook return after RSM to set SMM re-based flag
386 //
387 SemaphoreHook (Index, &mRebased[Index]);
388
389 return;
390 }
391 }
392
393 ASSERT (FALSE);
394 }
395
396 /**
397 Relocate SmmBases for each processor.
398
399 Execute on first boot and all S3 resumes
400
401 **/
402 VOID
403 EFIAPI
404 SmmRelocateBases (
405 VOID
406 )
407 {
408 UINT8 BakBuf[BACK_BUF_SIZE];
409 SMRAM_SAVE_STATE_MAP BakBuf2;
410 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
411 UINT8 *U8Ptr;
412 UINT32 ApicId;
413 UINTN Index;
414 UINTN BspIndex;
415
416 //
417 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
418 //
419 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
420
421 //
422 // Patch ASM code template with current CR0, CR3, and CR4 values
423 //
424 mSmmCr0 = (UINT32)AsmReadCr0 ();
425 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
426 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
427 mSmmCr4 = (UINT32)AsmReadCr4 ();
428 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
429
430 //
431 // Patch GDTR for SMM base relocation
432 //
433 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
434 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
435
436 U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
437 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
438
439 //
440 // Backup original contents at address 0x38000
441 //
442 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
443 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
444
445 //
446 // Load image for relocation
447 //
448 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
449
450 //
451 // Retrieve the local APIC ID of current processor
452 //
453 ApicId = GetApicId ();
454
455 //
456 // Relocate SM bases for all APs
457 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
458 //
459 mIsBsp = FALSE;
460 BspIndex = (UINTN)-1;
461 for (Index = 0; Index < mNumberOfCpus; Index++) {
462 mRebased[Index] = FALSE;
463 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
464 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
465 //
466 // Wait for this AP to finish its 1st SMI
467 //
468 while (!mRebased[Index]) {
469 }
470 } else {
471 //
472 // BSP will be Relocated later
473 //
474 BspIndex = Index;
475 }
476 }
477
478 //
479 // Relocate BSP's SMM base
480 //
481 ASSERT (BspIndex != (UINTN)-1);
482 mIsBsp = TRUE;
483 SendSmiIpi (ApicId);
484 //
485 // Wait for the BSP to finish its 1st SMI
486 //
487 while (!mRebased[BspIndex]) {
488 }
489
490 //
491 // Restore contents at address 0x38000
492 //
493 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
494 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
495 }
496
497 /**
498 SMM Ready To Lock event notification handler.
499
500 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
501 perform additional lock actions that must be performed from SMM on the next SMI.
502
503 @param[in] Protocol Points to the protocol's unique identifier.
504 @param[in] Interface Points to the interface instance.
505 @param[in] Handle The handle on which the interface was installed.
506
507 @retval EFI_SUCCESS Notification handler runs successfully.
508 **/
509 EFI_STATUS
510 EFIAPI
511 SmmReadyToLockEventNotify (
512 IN CONST EFI_GUID *Protocol,
513 IN VOID *Interface,
514 IN EFI_HANDLE Handle
515 )
516 {
517 GetAcpiCpuData ();
518
519 //
520 // Cache a copy of UEFI memory map before we start profiling feature.
521 //
522 GetUefiMemoryMap ();
523
524 //
525 // Set SMM ready to lock flag and return
526 //
527 mSmmReadyToLock = TRUE;
528 return EFI_SUCCESS;
529 }
530
531 /**
532 The module Entry Point of the CPU SMM driver.
533
534 @param ImageHandle The firmware allocated handle for the EFI image.
535 @param SystemTable A pointer to the EFI System Table.
536
537 @retval EFI_SUCCESS The entry point is executed successfully.
538 @retval Other Some error occurs when executing this entry point.
539
540 **/
541 EFI_STATUS
542 EFIAPI
543 PiCpuSmmEntry (
544 IN EFI_HANDLE ImageHandle,
545 IN EFI_SYSTEM_TABLE *SystemTable
546 )
547 {
548 EFI_STATUS Status;
549 EFI_MP_SERVICES_PROTOCOL *MpServices;
550 UINTN NumberOfEnabledProcessors;
551 UINTN Index;
552 VOID *Buffer;
553 UINTN BufferPages;
554 UINTN TileCodeSize;
555 UINTN TileDataSize;
556 UINTN TileSize;
557 UINT8 *Stacks;
558 VOID *Registration;
559 UINT32 RegEax;
560 UINT32 RegEbx;
561 UINT32 RegEcx;
562 UINT32 RegEdx;
563 UINTN FamilyId;
564 UINTN ModelId;
565 UINT32 Cr3;
566
567 //
568 // Initialize address fixup
569 //
570 PiSmmCpuSmmInitFixupAddress ();
571 PiSmmCpuSmiEntryFixupAddress ();
572
573 //
574 // Initialize Debug Agent to support source level debug in SMM code
575 //
576 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
577
578 //
579 // Report the start of CPU SMM initialization.
580 //
581 REPORT_STATUS_CODE (
582 EFI_PROGRESS_CODE,
583 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
584 );
585
586 //
587 // Find out SMRR Base and SMRR Size
588 //
589 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
590
591 //
592 // Get MP Services Protocol
593 //
594 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
595 ASSERT_EFI_ERROR (Status);
596
597 //
598 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
599 //
600 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
601 ASSERT_EFI_ERROR (Status);
602 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
603
604 //
605 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
606 // A constant BSP index makes no sense because it may be hot removed.
607 //
608 DEBUG_CODE_BEGIN ();
609 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
610 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
611 }
612
613 DEBUG_CODE_END ();
614
615 //
616 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
617 //
618 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
619 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
620
621 //
622 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
623 // Make sure AddressEncMask is contained to smallest supported address field.
624 //
625 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
626 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
627
628 //
629 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
630 //
631 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
632 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
633 } else {
634 mMaxNumberOfCpus = mNumberOfCpus;
635 }
636
637 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
638
639 //
640 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
641 // allocated buffer. The minimum size of this buffer for a uniprocessor system
642 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
643 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
644 // then the SMI entry point and the CPU save state areas can be tiles to minimize
645 // the total amount SMRAM required for all the CPUs. The tile size can be computed
646 // by adding the // CPU save state size, any extra CPU specific context, and
647 // the size of code that must be placed at the SMI entry point to transfer
648 // control to a C function in the native SMM execution mode. This size is
649 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
650 // The total amount of memory required is the maximum number of CPUs that
651 // platform supports times the tile size. The picture below shows the tiling,
652 // where m is the number of tiles that fit in 32KB.
653 //
654 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
655 // | CPU m+1 Save State |
656 // +-----------------------------+
657 // | CPU m+1 Extra Data |
658 // +-----------------------------+
659 // | Padding |
660 // +-----------------------------+
661 // | CPU 2m SMI Entry |
662 // +#############################+ <-- Base of allocated buffer + 64 KB
663 // | CPU m-1 Save State |
664 // +-----------------------------+
665 // | CPU m-1 Extra Data |
666 // +-----------------------------+
667 // | Padding |
668 // +-----------------------------+
669 // | CPU 2m-1 SMI Entry |
670 // +=============================+ <-- 2^n offset from Base of allocated buffer
671 // | . . . . . . . . . . . . |
672 // +=============================+ <-- 2^n offset from Base of allocated buffer
673 // | CPU 2 Save State |
674 // +-----------------------------+
675 // | CPU 2 Extra Data |
676 // +-----------------------------+
677 // | Padding |
678 // +-----------------------------+
679 // | CPU m+1 SMI Entry |
680 // +=============================+ <-- Base of allocated buffer + 32 KB
681 // | CPU 1 Save State |
682 // +-----------------------------+
683 // | CPU 1 Extra Data |
684 // +-----------------------------+
685 // | Padding |
686 // +-----------------------------+
687 // | CPU m SMI Entry |
688 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
689 // | CPU 0 Save State |
690 // +-----------------------------+
691 // | CPU 0 Extra Data |
692 // +-----------------------------+
693 // | Padding |
694 // +-----------------------------+
695 // | CPU m-1 SMI Entry |
696 // +=============================+ <-- 2^n offset from Base of allocated buffer
697 // | . . . . . . . . . . . . |
698 // +=============================+ <-- 2^n offset from Base of allocated buffer
699 // | Padding |
700 // +-----------------------------+
701 // | CPU 1 SMI Entry |
702 // +=============================+ <-- 2^n offset from Base of allocated buffer
703 // | Padding |
704 // +-----------------------------+
705 // | CPU 0 SMI Entry |
706 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
707 //
708
709 //
710 // Retrieve CPU Family
711 //
712 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
713 FamilyId = (RegEax >> 8) & 0xf;
714 ModelId = (RegEax >> 4) & 0xf;
715 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {
716 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
717 }
718
719 RegEdx = 0;
720 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
721 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
722 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
723 }
724
725 //
726 // Determine the mode of the CPU at the time an SMI occurs
727 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
728 // Volume 3C, Section 34.4.1.1
729 //
730 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
731 if ((RegEdx & BIT29) != 0) {
732 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
733 }
734
735 if (FamilyId == 0x06) {
736 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {
737 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
738 }
739 }
740
741 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
742 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
743 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
744 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
745 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
746 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
747 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
748 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
749 if ((RegEcx & CPUID_CET_SS) == 0) {
750 mCetSupported = FALSE;
751 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
752 }
753
754 if (mCetSupported) {
755 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
756 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
757 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
758 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
759 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
760 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
761 }
762 } else {
763 mCetSupported = FALSE;
764 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
765 }
766 } else {
767 mCetSupported = FALSE;
768 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
769 }
770
771 //
772 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
773 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
774 // This size is rounded up to nearest power of 2.
775 //
776 TileCodeSize = GetSmiHandlerSize ();
777 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);
778 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
779 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);
780 TileSize = TileDataSize + TileCodeSize - 1;
781 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
782 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
783
784 //
785 // If the TileSize is larger than space available for the SMI Handler of
786 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
787 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
788 // the SMI Handler size must be reduced or the size of the extra CPU specific
789 // context must be reduced.
790 //
791 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
792
793 //
794 // Allocate buffer for all of the tiles.
795 //
796 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
797 // Volume 3C, Section 34.11 SMBASE Relocation
798 // For Pentium and Intel486 processors, the SMBASE values must be
799 // aligned on a 32-KByte boundary or the processor will enter shutdown
800 // state during the execution of a RSM instruction.
801 //
802 // Intel486 processors: FamilyId is 4
803 // Pentium processors : FamilyId is 5
804 //
805 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
806 if ((FamilyId == 4) || (FamilyId == 5)) {
807 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
808 } else {
809 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
810 }
811
812 ASSERT (Buffer != NULL);
813 DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));
814
815 //
816 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
817 //
818 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
819 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
820
821 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
822 ASSERT (gSmmCpuPrivate->Operation != NULL);
823
824 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
825 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
826
827 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
828 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
829
830 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
831 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
832
833 //
834 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
835 //
836 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
837 ASSERT (mCpuHotPlugData.ApicId != NULL);
838 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
839 ASSERT (mCpuHotPlugData.SmBase != NULL);
840 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
841
842 //
843 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
844 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
845 // size for each CPU in the platform
846 //
847 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
848 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
849 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
850 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
851 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
852
853 if (Index < mNumberOfCpus) {
854 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
855 ASSERT_EFI_ERROR (Status);
856 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
857
858 DEBUG ((
859 DEBUG_INFO,
860 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
861 Index,
862 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
863 mCpuHotPlugData.SmBase[Index],
864 gSmmCpuPrivate->CpuSaveState[Index],
865 gSmmCpuPrivate->CpuSaveStateSize[Index]
866 ));
867 } else {
868 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
869 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
870 }
871 }
872
873 //
874 // Allocate SMI stacks for all processors.
875 //
876 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
877 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
878 //
879 // SMM Stack Guard Enabled
880 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
881 //
882 // +--------------------------------------------------+-----+--------------------------------------------------+
883 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
884 // +--------------------------------------------------+-----+--------------------------------------------------+
885 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
886 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
887 // | | | |
888 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
889 //
890 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
891 }
892
893 mSmmShadowStackSize = 0;
894 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
895 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
896
897 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
898 //
899 // SMM Stack Guard Enabled
900 // Append Shadow Stack after normal stack
901 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
902 //
903 // |= Stacks
904 // +--------------------------------------------------+---------------------------------------------------------------+
905 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
906 // +--------------------------------------------------+---------------------------------------------------------------+
907 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
908 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
909 // | |
910 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
911 //
912 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
913 } else {
914 //
915 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
916 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
917 // 1 more pages is allocated for each processor, it is known good stack.
918 //
919 //
920 // |= Stacks
921 // +-------------------------------------+--------------------------------------------------+
922 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
923 // +-------------------------------------+--------------------------------------------------+
924 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
925 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
926 // | |
927 // |<-------------------------------- Processor N ----------------------------------------->|
928 //
929 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
930 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
931 }
932 }
933
934 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
935 ASSERT (Stacks != NULL);
936 mSmmStackArrayBase = (UINTN)Stacks;
937 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
938
939 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
940 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
941 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
942 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
943 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
944 }
945
946 //
947 // Set SMI stack for SMM base relocation
948 //
949 PatchInstructionX86 (
950 gPatchSmmInitStack,
951 (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),
952 sizeof (UINTN)
953 );
954
955 //
956 // Initialize IDT
957 //
958 InitializeSmmIdt ();
959
960 //
961 // Relocate SMM Base addresses to the ones allocated from SMRAM
962 //
963 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
964 ASSERT (mRebased != NULL);
965 SmmRelocateBases ();
966
967 //
968 // Call hook for BSP to perform extra actions in normal mode after all
969 // SMM base addresses have been relocated on all CPUs
970 //
971 SmmCpuFeaturesSmmRelocationComplete ();
972
973 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
974
975 //
976 // SMM Time initialization
977 //
978 InitializeSmmTimer ();
979
980 //
981 // Initialize MP globals
982 //
983 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
984
985 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
986 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
987 SetShadowStack (
988 Cr3,
989 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
990 mSmmShadowStackSize
991 );
992 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
993 SetNotPresentPage (
994 Cr3,
995 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
996 EFI_PAGES_TO_SIZE (1)
997 );
998 }
999 }
1000 }
1001
1002 //
1003 // Fill in SMM Reserved Regions
1004 //
1005 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1006 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1007
1008 //
1009 // Install the SMM Configuration Protocol onto a new handle on the handle database.
1010 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
1011 // to an SMRAM address will be present in the handle database
1012 //
1013 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
1014 &gSmmCpuPrivate->SmmCpuHandle,
1015 &gEfiSmmConfigurationProtocolGuid,
1016 &gSmmCpuPrivate->SmmConfiguration,
1017 NULL
1018 );
1019 ASSERT_EFI_ERROR (Status);
1020
1021 //
1022 // Install the SMM CPU Protocol into SMM protocol database
1023 //
1024 Status = gSmst->SmmInstallProtocolInterface (
1025 &mSmmCpuHandle,
1026 &gEfiSmmCpuProtocolGuid,
1027 EFI_NATIVE_INTERFACE,
1028 &mSmmCpu
1029 );
1030 ASSERT_EFI_ERROR (Status);
1031
1032 //
1033 // Install the SMM Memory Attribute Protocol into SMM protocol database
1034 //
1035 Status = gSmst->SmmInstallProtocolInterface (
1036 &mSmmCpuHandle,
1037 &gEdkiiSmmMemoryAttributeProtocolGuid,
1038 EFI_NATIVE_INTERFACE,
1039 &mSmmMemoryAttribute
1040 );
1041 ASSERT_EFI_ERROR (Status);
1042
1043 //
1044 // Initialize global buffer for MM MP.
1045 //
1046 InitializeDataForMmMp ();
1047
1048 //
1049 // Initialize Package First Thread Index Info.
1050 //
1051 InitPackageFirstThreadIndexInfo ();
1052
1053 //
1054 // Install the SMM Mp Protocol into SMM protocol database
1055 //
1056 Status = gSmst->SmmInstallProtocolInterface (
1057 &mSmmCpuHandle,
1058 &gEfiMmMpProtocolGuid,
1059 EFI_NATIVE_INTERFACE,
1060 &mSmmMp
1061 );
1062 ASSERT_EFI_ERROR (Status);
1063
1064 //
1065 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1066 //
1067 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1068 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1069 ASSERT_EFI_ERROR (Status);
1070 }
1071
1072 //
1073 // Initialize SMM CPU Services Support
1074 //
1075 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1076 ASSERT_EFI_ERROR (Status);
1077
1078 //
1079 // register SMM Ready To Lock Protocol notification
1080 //
1081 Status = gSmst->SmmRegisterProtocolNotify (
1082 &gEfiSmmReadyToLockProtocolGuid,
1083 SmmReadyToLockEventNotify,
1084 &Registration
1085 );
1086 ASSERT_EFI_ERROR (Status);
1087
1088 //
1089 // Initialize SMM Profile feature
1090 //
1091 InitSmmProfile (Cr3);
1092
1093 GetAcpiS3EnableFlag ();
1094 InitSmmS3ResumeState (Cr3);
1095
1096 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1097
1098 return EFI_SUCCESS;
1099 }
1100
1101 /**
1102
1103 Find out SMRAM information including SMRR base and SMRR size.
1104
1105 @param SmrrBase SMRR base
1106 @param SmrrSize SMRR size
1107
1108 **/
1109 VOID
1110 FindSmramInfo (
1111 OUT UINT32 *SmrrBase,
1112 OUT UINT32 *SmrrSize
1113 )
1114 {
1115 EFI_STATUS Status;
1116 UINTN Size;
1117 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1118 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1119 UINTN Index;
1120 UINT64 MaxSize;
1121 BOOLEAN Found;
1122
1123 //
1124 // Get SMM Access Protocol
1125 //
1126 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1127 ASSERT_EFI_ERROR (Status);
1128
1129 //
1130 // Get SMRAM information
1131 //
1132 Size = 0;
1133 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1134 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1135
1136 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1137 ASSERT (mSmmCpuSmramRanges != NULL);
1138
1139 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1140 ASSERT_EFI_ERROR (Status);
1141
1142 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1143
1144 //
1145 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1146 //
1147 CurrentSmramRange = NULL;
1148 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1149 //
1150 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1151 //
1152 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1153 continue;
1154 }
1155
1156 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1157 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1158 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1159 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1160 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1161 }
1162 }
1163 }
1164 }
1165
1166 ASSERT (CurrentSmramRange != NULL);
1167
1168 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1169 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1170
1171 do {
1172 Found = FALSE;
1173 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1174 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&
1175 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))
1176 {
1177 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1178 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1179 Found = TRUE;
1180 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {
1181 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1182 Found = TRUE;
1183 }
1184 }
1185 } while (Found);
1186
1187 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1188 }
1189
1190 /**
1191 Configure SMM Code Access Check feature on an AP.
1192 SMM Feature Control MSR will be locked after configuration.
1193
1194 @param[in,out] Buffer Pointer to private data buffer.
1195 **/
1196 VOID
1197 EFIAPI
1198 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1199 IN OUT VOID *Buffer
1200 )
1201 {
1202 UINTN CpuIndex;
1203 UINT64 SmmFeatureControlMsr;
1204 UINT64 NewSmmFeatureControlMsr;
1205
1206 //
1207 // Retrieve the CPU Index from the context passed in
1208 //
1209 CpuIndex = *(UINTN *)Buffer;
1210
1211 //
1212 // Get the current SMM Feature Control MSR value
1213 //
1214 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1215
1216 //
1217 // Compute the new SMM Feature Control MSR value
1218 //
1219 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1220 if (mSmmCodeAccessCheckEnable) {
1221 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1222 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1223 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1224 }
1225 }
1226
1227 //
1228 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1229 //
1230 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1231 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1232 }
1233
1234 //
1235 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1236 //
1237 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1238 }
1239
1240 /**
1241 Configure SMM Code Access Check feature for all processors.
1242 SMM Feature Control MSR will be locked after configuration.
1243 **/
1244 VOID
1245 ConfigSmmCodeAccessCheck (
1246 VOID
1247 )
1248 {
1249 UINTN Index;
1250 EFI_STATUS Status;
1251
1252 //
1253 // Check to see if the Feature Control MSR is supported on this CPU
1254 //
1255 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1256 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1257 mSmmCodeAccessCheckEnable = FALSE;
1258 return;
1259 }
1260
1261 //
1262 // Check to see if the CPU supports the SMM Code Access Check feature
1263 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1264 //
1265 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1266 mSmmCodeAccessCheckEnable = FALSE;
1267 return;
1268 }
1269
1270 //
1271 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1272 //
1273 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1274
1275 //
1276 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1277 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1278 //
1279 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1280
1281 //
1282 // Enable SMM Code Access Check feature on the BSP.
1283 //
1284 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1285
1286 //
1287 // Enable SMM Code Access Check feature for the APs.
1288 //
1289 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1290 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1291 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1292 //
1293 // If this processor does not exist
1294 //
1295 continue;
1296 }
1297
1298 //
1299 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1300 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1301 //
1302 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1303
1304 //
1305 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1306 //
1307 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1308 ASSERT_EFI_ERROR (Status);
1309
1310 //
1311 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1312 //
1313 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1314 CpuPause ();
1315 }
1316
1317 //
1318 // Release the Config SMM Code Access Check spin lock.
1319 //
1320 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1321 }
1322 }
1323 }
1324
1325 /**
1326 Allocate pages for code.
1327
1328 @param[in] Pages Number of pages to be allocated.
1329
1330 @return Allocated memory.
1331 **/
1332 VOID *
1333 AllocateCodePages (
1334 IN UINTN Pages
1335 )
1336 {
1337 EFI_STATUS Status;
1338 EFI_PHYSICAL_ADDRESS Memory;
1339
1340 if (Pages == 0) {
1341 return NULL;
1342 }
1343
1344 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1345 if (EFI_ERROR (Status)) {
1346 return NULL;
1347 }
1348
1349 return (VOID *)(UINTN)Memory;
1350 }
1351
1352 /**
1353 Allocate aligned pages for code.
1354
1355 @param[in] Pages Number of pages to be allocated.
1356 @param[in] Alignment The requested alignment of the allocation.
1357 Must be a power of two.
1358 If Alignment is zero, then byte alignment is used.
1359
1360 @return Allocated memory.
1361 **/
1362 VOID *
1363 AllocateAlignedCodePages (
1364 IN UINTN Pages,
1365 IN UINTN Alignment
1366 )
1367 {
1368 EFI_STATUS Status;
1369 EFI_PHYSICAL_ADDRESS Memory;
1370 UINTN AlignedMemory;
1371 UINTN AlignmentMask;
1372 UINTN UnalignedPages;
1373 UINTN RealPages;
1374
1375 //
1376 // Alignment must be a power of two or zero.
1377 //
1378 ASSERT ((Alignment & (Alignment - 1)) == 0);
1379
1380 if (Pages == 0) {
1381 return NULL;
1382 }
1383
1384 if (Alignment > EFI_PAGE_SIZE) {
1385 //
1386 // Calculate the total number of pages since alignment is larger than page size.
1387 //
1388 AlignmentMask = Alignment - 1;
1389 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1390 //
1391 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1392 //
1393 ASSERT (RealPages > Pages);
1394
1395 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1396 if (EFI_ERROR (Status)) {
1397 return NULL;
1398 }
1399
1400 AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;
1401 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);
1402 if (UnalignedPages > 0) {
1403 //
1404 // Free first unaligned page(s).
1405 //
1406 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1407 ASSERT_EFI_ERROR (Status);
1408 }
1409
1410 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1411 UnalignedPages = RealPages - Pages - UnalignedPages;
1412 if (UnalignedPages > 0) {
1413 //
1414 // Free last unaligned page(s).
1415 //
1416 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1417 ASSERT_EFI_ERROR (Status);
1418 }
1419 } else {
1420 //
1421 // Do not over-allocate pages in this case.
1422 //
1423 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1424 if (EFI_ERROR (Status)) {
1425 return NULL;
1426 }
1427
1428 AlignedMemory = (UINTN)Memory;
1429 }
1430
1431 return (VOID *)AlignedMemory;
1432 }
1433
1434 /**
1435 Perform the remaining tasks.
1436
1437 **/
1438 VOID
1439 PerformRemainingTasks (
1440 VOID
1441 )
1442 {
1443 if (mSmmReadyToLock) {
1444 //
1445 // Start SMM Profile feature
1446 //
1447 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1448 SmmProfileStart ();
1449 }
1450
1451 //
1452 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1453 //
1454 InitPaging ();
1455
1456 //
1457 // Mark critical region to be read-only in page table
1458 //
1459 SetMemMapAttributes ();
1460
1461 if (IsRestrictedMemoryAccess ()) {
1462 //
1463 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1464 //
1465 SetUefiMemMapAttributes ();
1466
1467 //
1468 // Set page table itself to be read-only
1469 //
1470 SetPageTableAttributes ();
1471 }
1472
1473 //
1474 // Configure SMM Code Access Check feature if available.
1475 //
1476 ConfigSmmCodeAccessCheck ();
1477
1478 SmmCpuFeaturesCompleteSmmReadyToLock ();
1479
1480 //
1481 // Clean SMM ready to lock flag
1482 //
1483 mSmmReadyToLock = FALSE;
1484 }
1485 }
1486
1487 /**
1488 Perform the pre tasks.
1489
1490 **/
1491 VOID
1492 PerformPreTasks (
1493 VOID
1494 )
1495 {
1496 RestoreSmmConfigurationInS3 ();
1497 }