]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: Change use of EFI_D_* to DEBUG_*
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // SMM CPU Private Data structure that contains SMM Configuration Protocol
15 // along its supporting fields.
16 //
17 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
19 NULL, // SmmCpuHandle
20 NULL, // Pointer to ProcessorInfo array
21 NULL, // Pointer to Operation array
22 NULL, // Pointer to CpuSaveStateSize array
23 NULL, // Pointer to CpuSaveState array
24 { {0} }, // SmmReservedSmramRegion
25 {
26 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
27 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
28 0, // SmmCoreEntryContext.NumberOfCpus
29 NULL, // SmmCoreEntryContext.CpuSaveStateSize
30 NULL // SmmCoreEntryContext.CpuSaveState
31 },
32 NULL, // SmmCoreEntry
33 {
34 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
35 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
36 },
37 NULL, // pointer to Ap Wrapper Func array
38 {NULL, NULL}, // List_Entry for Tokens.
39 };
40
41 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
42 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
43 0, // Array Length of SmBase and APIC ID
44 NULL, // Pointer to APIC ID array
45 NULL, // Pointer to SMBASE array
46 0, // Reserved
47 0, // SmrrBase
48 0 // SmrrSize
49 };
50
51 //
52 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
53 //
54 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
55
56 //
57 // SMM Relocation variables
58 //
59 volatile BOOLEAN *mRebased;
60 volatile BOOLEAN mIsBsp;
61
62 ///
63 /// Handle for the SMM CPU Protocol
64 ///
65 EFI_HANDLE mSmmCpuHandle = NULL;
66
67 ///
68 /// SMM CPU Protocol instance
69 ///
70 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
71 SmmReadSaveState,
72 SmmWriteSaveState
73 };
74
75 ///
76 /// SMM Memory Attribute Protocol instance
77 ///
78 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
79 EdkiiSmmGetMemoryAttributes,
80 EdkiiSmmSetMemoryAttributes,
81 EdkiiSmmClearMemoryAttributes
82 };
83
84 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
85
86 //
87 // SMM stack information
88 //
89 UINTN mSmmStackArrayBase;
90 UINTN mSmmStackArrayEnd;
91 UINTN mSmmStackSize;
92
93 UINTN mSmmShadowStackSize;
94 BOOLEAN mCetSupported = TRUE;
95
96 UINTN mMaxNumberOfCpus = 1;
97 UINTN mNumberOfCpus = 1;
98
99 //
100 // SMM ready to lock flag
101 //
102 BOOLEAN mSmmReadyToLock = FALSE;
103
104 //
105 // Global used to cache PCD for SMM Code Access Check enable
106 //
107 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
108
109 //
110 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
111 //
112 UINT64 mAddressEncMask = 0;
113
114 //
115 // Spin lock used to serialize setting of SMM Code Access Check feature
116 //
117 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
118
119 //
120 // Saved SMM ranges information
121 //
122 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
123 UINTN mSmmCpuSmramRangeCount;
124
125 UINT8 mPhysicalAddressBits;
126
127 //
128 // Control register contents saved for SMM S3 resume state initialization.
129 //
130 UINT32 mSmmCr0;
131 UINT32 mSmmCr4;
132
133 /**
134 Initialize IDT to setup exception handlers for SMM.
135
136 **/
137 VOID
138 InitializeSmmIdt (
139 VOID
140 )
141 {
142 EFI_STATUS Status;
143 BOOLEAN InterruptState;
144 IA32_DESCRIPTOR DxeIdtr;
145
146 //
147 // There are 32 (not 255) entries in it since only processor
148 // generated exceptions will be handled.
149 //
150 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
151 //
152 // Allocate page aligned IDT, because it might be set as read only.
153 //
154 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
155 ASSERT (gcSmiIdtr.Base != 0);
156 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
157
158 //
159 // Disable Interrupt and save DXE IDT table
160 //
161 InterruptState = SaveAndDisableInterrupts ();
162 AsmReadIdtr (&DxeIdtr);
163 //
164 // Load SMM temporary IDT table
165 //
166 AsmWriteIdtr (&gcSmiIdtr);
167 //
168 // Setup SMM default exception handlers, SMM IDT table
169 // will be updated and saved in gcSmiIdtr
170 //
171 Status = InitializeCpuExceptionHandlers (NULL);
172 ASSERT_EFI_ERROR (Status);
173 //
174 // Restore DXE IDT table and CPU interrupt
175 //
176 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
177 SetInterruptState (InterruptState);
178 }
179
180 /**
181 Search module name by input IP address and output it.
182
183 @param CallerIpAddress Caller instruction pointer.
184
185 **/
186 VOID
187 DumpModuleInfoByIp (
188 IN UINTN CallerIpAddress
189 )
190 {
191 UINTN Pe32Data;
192 VOID *PdbPointer;
193
194 //
195 // Find Image Base
196 //
197 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
198 if (Pe32Data != 0) {
199 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
200 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
201 if (PdbPointer != NULL) {
202 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
203 }
204 }
205 }
206
207 /**
208 Read information from the CPU save state.
209
210 @param This EFI_SMM_CPU_PROTOCOL instance
211 @param Width The number of bytes to read from the CPU save state.
212 @param Register Specifies the CPU register to read form the save state.
213 @param CpuIndex Specifies the zero-based index of the CPU save state.
214 @param Buffer Upon return, this holds the CPU register value read from the save state.
215
216 @retval EFI_SUCCESS The register was read from Save State
217 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
218 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
219
220 **/
221 EFI_STATUS
222 EFIAPI
223 SmmReadSaveState (
224 IN CONST EFI_SMM_CPU_PROTOCOL *This,
225 IN UINTN Width,
226 IN EFI_SMM_SAVE_STATE_REGISTER Register,
227 IN UINTN CpuIndex,
228 OUT VOID *Buffer
229 )
230 {
231 EFI_STATUS Status;
232
233 //
234 // Retrieve pointer to the specified CPU's SMM Save State buffer
235 //
236 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
237 return EFI_INVALID_PARAMETER;
238 }
239 //
240 // The SpeculationBarrier() call here is to ensure the above check for the
241 // CpuIndex has been completed before the execution of subsequent codes.
242 //
243 SpeculationBarrier ();
244
245 //
246 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
247 //
248 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
249 //
250 // The pseudo-register only supports the 64-bit size specified by Width.
251 //
252 if (Width != sizeof (UINT64)) {
253 return EFI_INVALID_PARAMETER;
254 }
255 //
256 // If the processor is in SMM at the time the SMI occurred,
257 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
258 // Otherwise, EFI_NOT_FOUND is returned.
259 //
260 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
261 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
262 return EFI_SUCCESS;
263 } else {
264 return EFI_NOT_FOUND;
265 }
266 }
267
268 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
269 return EFI_INVALID_PARAMETER;
270 }
271
272 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
273 if (Status == EFI_UNSUPPORTED) {
274 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
275 }
276 return Status;
277 }
278
279 /**
280 Write data to the CPU save state.
281
282 @param This EFI_SMM_CPU_PROTOCOL instance
283 @param Width The number of bytes to read from the CPU save state.
284 @param Register Specifies the CPU register to write to the save state.
285 @param CpuIndex Specifies the zero-based index of the CPU save state
286 @param Buffer Upon entry, this holds the new CPU register value.
287
288 @retval EFI_SUCCESS The register was written from Save State
289 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
290 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
291
292 **/
293 EFI_STATUS
294 EFIAPI
295 SmmWriteSaveState (
296 IN CONST EFI_SMM_CPU_PROTOCOL *This,
297 IN UINTN Width,
298 IN EFI_SMM_SAVE_STATE_REGISTER Register,
299 IN UINTN CpuIndex,
300 IN CONST VOID *Buffer
301 )
302 {
303 EFI_STATUS Status;
304
305 //
306 // Retrieve pointer to the specified CPU's SMM Save State buffer
307 //
308 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
309 return EFI_INVALID_PARAMETER;
310 }
311
312 //
313 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
314 //
315 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
316 return EFI_SUCCESS;
317 }
318
319 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
320 return EFI_INVALID_PARAMETER;
321 }
322
323 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
324 if (Status == EFI_UNSUPPORTED) {
325 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
326 }
327 return Status;
328 }
329
330
331 /**
332 C function for SMI handler. To change all processor's SMMBase Register.
333
334 **/
335 VOID
336 EFIAPI
337 SmmInitHandler (
338 VOID
339 )
340 {
341 UINT32 ApicId;
342 UINTN Index;
343
344 //
345 // Update SMM IDT entries' code segment and load IDT
346 //
347 AsmWriteIdtr (&gcSmiIdtr);
348 ApicId = GetApicId ();
349
350 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
351
352 for (Index = 0; Index < mNumberOfCpus; Index++) {
353 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
354 //
355 // Initialize SMM specific features on the currently executing CPU
356 //
357 SmmCpuFeaturesInitializeProcessor (
358 Index,
359 mIsBsp,
360 gSmmCpuPrivate->ProcessorInfo,
361 &mCpuHotPlugData
362 );
363
364 if (!mSmmS3Flag) {
365 //
366 // Check XD and BTS features on each processor on normal boot
367 //
368 CheckFeatureSupported ();
369 }
370
371 if (mIsBsp) {
372 //
373 // BSP rebase is already done above.
374 // Initialize private data during S3 resume
375 //
376 InitializeMpSyncData ();
377 }
378
379 //
380 // Hook return after RSM to set SMM re-based flag
381 //
382 SemaphoreHook (Index, &mRebased[Index]);
383
384 return;
385 }
386 }
387 ASSERT (FALSE);
388 }
389
390 /**
391 Relocate SmmBases for each processor.
392
393 Execute on first boot and all S3 resumes
394
395 **/
396 VOID
397 EFIAPI
398 SmmRelocateBases (
399 VOID
400 )
401 {
402 UINT8 BakBuf[BACK_BUF_SIZE];
403 SMRAM_SAVE_STATE_MAP BakBuf2;
404 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
405 UINT8 *U8Ptr;
406 UINT32 ApicId;
407 UINTN Index;
408 UINTN BspIndex;
409
410 //
411 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
412 //
413 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
414
415 //
416 // Patch ASM code template with current CR0, CR3, and CR4 values
417 //
418 mSmmCr0 = (UINT32)AsmReadCr0 ();
419 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
420 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
421 mSmmCr4 = (UINT32)AsmReadCr4 ();
422 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
423
424 //
425 // Patch GDTR for SMM base relocation
426 //
427 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
428 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
429
430 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
431 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
432
433 //
434 // Backup original contents at address 0x38000
435 //
436 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
437 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
438
439 //
440 // Load image for relocation
441 //
442 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
443
444 //
445 // Retrieve the local APIC ID of current processor
446 //
447 ApicId = GetApicId ();
448
449 //
450 // Relocate SM bases for all APs
451 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
452 //
453 mIsBsp = FALSE;
454 BspIndex = (UINTN)-1;
455 for (Index = 0; Index < mNumberOfCpus; Index++) {
456 mRebased[Index] = FALSE;
457 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
458 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
459 //
460 // Wait for this AP to finish its 1st SMI
461 //
462 while (!mRebased[Index]);
463 } else {
464 //
465 // BSP will be Relocated later
466 //
467 BspIndex = Index;
468 }
469 }
470
471 //
472 // Relocate BSP's SMM base
473 //
474 ASSERT (BspIndex != (UINTN)-1);
475 mIsBsp = TRUE;
476 SendSmiIpi (ApicId);
477 //
478 // Wait for the BSP to finish its 1st SMI
479 //
480 while (!mRebased[BspIndex]);
481
482 //
483 // Restore contents at address 0x38000
484 //
485 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
486 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
487 }
488
489 /**
490 SMM Ready To Lock event notification handler.
491
492 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
493 perform additional lock actions that must be performed from SMM on the next SMI.
494
495 @param[in] Protocol Points to the protocol's unique identifier.
496 @param[in] Interface Points to the interface instance.
497 @param[in] Handle The handle on which the interface was installed.
498
499 @retval EFI_SUCCESS Notification handler runs successfully.
500 **/
501 EFI_STATUS
502 EFIAPI
503 SmmReadyToLockEventNotify (
504 IN CONST EFI_GUID *Protocol,
505 IN VOID *Interface,
506 IN EFI_HANDLE Handle
507 )
508 {
509 GetAcpiCpuData ();
510
511 //
512 // Cache a copy of UEFI memory map before we start profiling feature.
513 //
514 GetUefiMemoryMap ();
515
516 //
517 // Set SMM ready to lock flag and return
518 //
519 mSmmReadyToLock = TRUE;
520 return EFI_SUCCESS;
521 }
522
523 /**
524 The module Entry Point of the CPU SMM driver.
525
526 @param ImageHandle The firmware allocated handle for the EFI image.
527 @param SystemTable A pointer to the EFI System Table.
528
529 @retval EFI_SUCCESS The entry point is executed successfully.
530 @retval Other Some error occurs when executing this entry point.
531
532 **/
533 EFI_STATUS
534 EFIAPI
535 PiCpuSmmEntry (
536 IN EFI_HANDLE ImageHandle,
537 IN EFI_SYSTEM_TABLE *SystemTable
538 )
539 {
540 EFI_STATUS Status;
541 EFI_MP_SERVICES_PROTOCOL *MpServices;
542 UINTN NumberOfEnabledProcessors;
543 UINTN Index;
544 VOID *Buffer;
545 UINTN BufferPages;
546 UINTN TileCodeSize;
547 UINTN TileDataSize;
548 UINTN TileSize;
549 UINT8 *Stacks;
550 VOID *Registration;
551 UINT32 RegEax;
552 UINT32 RegEbx;
553 UINT32 RegEcx;
554 UINT32 RegEdx;
555 UINTN FamilyId;
556 UINTN ModelId;
557 UINT32 Cr3;
558
559 //
560 // Initialize address fixup
561 //
562 PiSmmCpuSmmInitFixupAddress ();
563 PiSmmCpuSmiEntryFixupAddress ();
564
565 //
566 // Initialize Debug Agent to support source level debug in SMM code
567 //
568 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
569
570 //
571 // Report the start of CPU SMM initialization.
572 //
573 REPORT_STATUS_CODE (
574 EFI_PROGRESS_CODE,
575 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
576 );
577
578 //
579 // Find out SMRR Base and SMRR Size
580 //
581 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
582
583 //
584 // Get MP Services Protocol
585 //
586 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
587 ASSERT_EFI_ERROR (Status);
588
589 //
590 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
591 //
592 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
593 ASSERT_EFI_ERROR (Status);
594 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
595
596 //
597 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
598 // A constant BSP index makes no sense because it may be hot removed.
599 //
600 DEBUG_CODE (
601 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
602
603 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
604 }
605 );
606
607 //
608 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
609 //
610 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
611 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
612
613 //
614 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
615 // Make sure AddressEncMask is contained to smallest supported address field.
616 //
617 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
618 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
619
620 //
621 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
622 //
623 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
624 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
625 } else {
626 mMaxNumberOfCpus = mNumberOfCpus;
627 }
628 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
629
630 //
631 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
632 // allocated buffer. The minimum size of this buffer for a uniprocessor system
633 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
634 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
635 // then the SMI entry point and the CPU save state areas can be tiles to minimize
636 // the total amount SMRAM required for all the CPUs. The tile size can be computed
637 // by adding the // CPU save state size, any extra CPU specific context, and
638 // the size of code that must be placed at the SMI entry point to transfer
639 // control to a C function in the native SMM execution mode. This size is
640 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
641 // The total amount of memory required is the maximum number of CPUs that
642 // platform supports times the tile size. The picture below shows the tiling,
643 // where m is the number of tiles that fit in 32KB.
644 //
645 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
646 // | CPU m+1 Save State |
647 // +-----------------------------+
648 // | CPU m+1 Extra Data |
649 // +-----------------------------+
650 // | Padding |
651 // +-----------------------------+
652 // | CPU 2m SMI Entry |
653 // +#############################+ <-- Base of allocated buffer + 64 KB
654 // | CPU m-1 Save State |
655 // +-----------------------------+
656 // | CPU m-1 Extra Data |
657 // +-----------------------------+
658 // | Padding |
659 // +-----------------------------+
660 // | CPU 2m-1 SMI Entry |
661 // +=============================+ <-- 2^n offset from Base of allocated buffer
662 // | . . . . . . . . . . . . |
663 // +=============================+ <-- 2^n offset from Base of allocated buffer
664 // | CPU 2 Save State |
665 // +-----------------------------+
666 // | CPU 2 Extra Data |
667 // +-----------------------------+
668 // | Padding |
669 // +-----------------------------+
670 // | CPU m+1 SMI Entry |
671 // +=============================+ <-- Base of allocated buffer + 32 KB
672 // | CPU 1 Save State |
673 // +-----------------------------+
674 // | CPU 1 Extra Data |
675 // +-----------------------------+
676 // | Padding |
677 // +-----------------------------+
678 // | CPU m SMI Entry |
679 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
680 // | CPU 0 Save State |
681 // +-----------------------------+
682 // | CPU 0 Extra Data |
683 // +-----------------------------+
684 // | Padding |
685 // +-----------------------------+
686 // | CPU m-1 SMI Entry |
687 // +=============================+ <-- 2^n offset from Base of allocated buffer
688 // | . . . . . . . . . . . . |
689 // +=============================+ <-- 2^n offset from Base of allocated buffer
690 // | Padding |
691 // +-----------------------------+
692 // | CPU 1 SMI Entry |
693 // +=============================+ <-- 2^n offset from Base of allocated buffer
694 // | Padding |
695 // +-----------------------------+
696 // | CPU 0 SMI Entry |
697 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
698 //
699
700 //
701 // Retrieve CPU Family
702 //
703 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
704 FamilyId = (RegEax >> 8) & 0xf;
705 ModelId = (RegEax >> 4) & 0xf;
706 if (FamilyId == 0x06 || FamilyId == 0x0f) {
707 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
708 }
709
710 RegEdx = 0;
711 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
712 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
713 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
714 }
715 //
716 // Determine the mode of the CPU at the time an SMI occurs
717 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
718 // Volume 3C, Section 34.4.1.1
719 //
720 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
721 if ((RegEdx & BIT29) != 0) {
722 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
723 }
724 if (FamilyId == 0x06) {
725 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
726 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
727 }
728 }
729
730 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
731 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
732 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
733 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
734 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
735 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
736 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
737 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
738 if ((RegEcx & CPUID_CET_SS) == 0) {
739 mCetSupported = FALSE;
740 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
741 }
742 if (mCetSupported) {
743 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
744 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
745 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
746 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
747 AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
748 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
749 }
750 } else {
751 mCetSupported = FALSE;
752 PatchInstructionX86(mPatchCetSupported, mCetSupported, 1);
753 }
754 } else {
755 mCetSupported = FALSE;
756 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
757 }
758
759 //
760 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
761 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
762 // This size is rounded up to nearest power of 2.
763 //
764 TileCodeSize = GetSmiHandlerSize ();
765 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
766 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
767 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
768 TileSize = TileDataSize + TileCodeSize - 1;
769 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
770 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
771
772 //
773 // If the TileSize is larger than space available for the SMI Handler of
774 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
775 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
776 // the SMI Handler size must be reduced or the size of the extra CPU specific
777 // context must be reduced.
778 //
779 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
780
781 //
782 // Allocate buffer for all of the tiles.
783 //
784 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
785 // Volume 3C, Section 34.11 SMBASE Relocation
786 // For Pentium and Intel486 processors, the SMBASE values must be
787 // aligned on a 32-KByte boundary or the processor will enter shutdown
788 // state during the execution of a RSM instruction.
789 //
790 // Intel486 processors: FamilyId is 4
791 // Pentium processors : FamilyId is 5
792 //
793 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
794 if ((FamilyId == 4) || (FamilyId == 5)) {
795 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
796 } else {
797 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
798 }
799 ASSERT (Buffer != NULL);
800 DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
801
802 //
803 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
804 //
805 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
806 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
807
808 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
809 ASSERT (gSmmCpuPrivate->Operation != NULL);
810
811 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
812 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
813
814 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
815 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
816
817 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
818 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
819
820 //
821 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
822 //
823 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
824 ASSERT (mCpuHotPlugData.ApicId != NULL);
825 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
826 ASSERT (mCpuHotPlugData.SmBase != NULL);
827 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
828
829 //
830 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
831 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
832 // size for each CPU in the platform
833 //
834 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
835 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
836 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
837 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
838 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
839
840 if (Index < mNumberOfCpus) {
841 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
842 ASSERT_EFI_ERROR (Status);
843 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
844
845 DEBUG ((DEBUG_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
846 Index,
847 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
848 mCpuHotPlugData.SmBase[Index],
849 gSmmCpuPrivate->CpuSaveState[Index],
850 gSmmCpuPrivate->CpuSaveStateSize[Index]
851 ));
852 } else {
853 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
854 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
855 }
856 }
857
858 //
859 // Allocate SMI stacks for all processors.
860 //
861 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
862 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
863 //
864 // SMM Stack Guard Enabled
865 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
866 //
867 // +--------------------------------------------------+-----+--------------------------------------------------+
868 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
869 // +--------------------------------------------------+-----+--------------------------------------------------+
870 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
871 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
872 // | | | |
873 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
874 //
875 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
876 }
877
878 mSmmShadowStackSize = 0;
879 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
880 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
881
882 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
883 //
884 // SMM Stack Guard Enabled
885 // Append Shadow Stack after normal stack
886 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
887 //
888 // |= Stacks
889 // +--------------------------------------------------+---------------------------------------------------------------+
890 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
891 // +--------------------------------------------------+---------------------------------------------------------------+
892 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
893 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
894 // | |
895 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
896 //
897 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
898 } else {
899 //
900 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
901 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
902 // 1 more pages is allocated for each processor, it is known good stack.
903 //
904 //
905 // |= Stacks
906 // +-------------------------------------+--------------------------------------------------+
907 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
908 // +-------------------------------------+--------------------------------------------------+
909 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
910 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
911 // | |
912 // |<-------------------------------- Processor N ----------------------------------------->|
913 //
914 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
915 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
916 }
917 }
918
919 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
920 ASSERT (Stacks != NULL);
921 mSmmStackArrayBase = (UINTN)Stacks;
922 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
923
924 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
925 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
926 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
927 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
928 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
929 }
930
931 //
932 // Set SMI stack for SMM base relocation
933 //
934 PatchInstructionX86 (
935 gPatchSmmInitStack,
936 (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),
937 sizeof (UINTN)
938 );
939
940 //
941 // Initialize IDT
942 //
943 InitializeSmmIdt ();
944
945 //
946 // Relocate SMM Base addresses to the ones allocated from SMRAM
947 //
948 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
949 ASSERT (mRebased != NULL);
950 SmmRelocateBases ();
951
952 //
953 // Call hook for BSP to perform extra actions in normal mode after all
954 // SMM base addresses have been relocated on all CPUs
955 //
956 SmmCpuFeaturesSmmRelocationComplete ();
957
958 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
959
960 //
961 // SMM Time initialization
962 //
963 InitializeSmmTimer ();
964
965 //
966 // Initialize MP globals
967 //
968 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
969
970 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
971 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
972 SetShadowStack (
973 Cr3,
974 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
975 mSmmShadowStackSize
976 );
977 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
978 SetNotPresentPage (
979 Cr3,
980 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
981 EFI_PAGES_TO_SIZE(1)
982 );
983 }
984 }
985 }
986
987 //
988 // Fill in SMM Reserved Regions
989 //
990 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
991 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
992
993 //
994 // Install the SMM Configuration Protocol onto a new handle on the handle database.
995 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
996 // to an SMRAM address will be present in the handle database
997 //
998 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
999 &gSmmCpuPrivate->SmmCpuHandle,
1000 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
1001 NULL
1002 );
1003 ASSERT_EFI_ERROR (Status);
1004
1005 //
1006 // Install the SMM CPU Protocol into SMM protocol database
1007 //
1008 Status = gSmst->SmmInstallProtocolInterface (
1009 &mSmmCpuHandle,
1010 &gEfiSmmCpuProtocolGuid,
1011 EFI_NATIVE_INTERFACE,
1012 &mSmmCpu
1013 );
1014 ASSERT_EFI_ERROR (Status);
1015
1016 //
1017 // Install the SMM Memory Attribute Protocol into SMM protocol database
1018 //
1019 Status = gSmst->SmmInstallProtocolInterface (
1020 &mSmmCpuHandle,
1021 &gEdkiiSmmMemoryAttributeProtocolGuid,
1022 EFI_NATIVE_INTERFACE,
1023 &mSmmMemoryAttribute
1024 );
1025 ASSERT_EFI_ERROR (Status);
1026
1027 //
1028 // Initialize global buffer for MM MP.
1029 //
1030 InitializeDataForMmMp ();
1031
1032 //
1033 // Install the SMM Mp Protocol into SMM protocol database
1034 //
1035 Status = gSmst->SmmInstallProtocolInterface (
1036 &mSmmCpuHandle,
1037 &gEfiMmMpProtocolGuid,
1038 EFI_NATIVE_INTERFACE,
1039 &mSmmMp
1040 );
1041 ASSERT_EFI_ERROR (Status);
1042
1043 //
1044 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1045 //
1046 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1047 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1048 ASSERT_EFI_ERROR (Status);
1049 }
1050
1051 //
1052 // Initialize SMM CPU Services Support
1053 //
1054 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1055 ASSERT_EFI_ERROR (Status);
1056
1057 //
1058 // register SMM Ready To Lock Protocol notification
1059 //
1060 Status = gSmst->SmmRegisterProtocolNotify (
1061 &gEfiSmmReadyToLockProtocolGuid,
1062 SmmReadyToLockEventNotify,
1063 &Registration
1064 );
1065 ASSERT_EFI_ERROR (Status);
1066
1067 //
1068 // Initialize SMM Profile feature
1069 //
1070 InitSmmProfile (Cr3);
1071
1072 GetAcpiS3EnableFlag ();
1073 InitSmmS3ResumeState (Cr3);
1074
1075 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1076
1077 return EFI_SUCCESS;
1078 }
1079
1080 /**
1081
1082 Find out SMRAM information including SMRR base and SMRR size.
1083
1084 @param SmrrBase SMRR base
1085 @param SmrrSize SMRR size
1086
1087 **/
1088 VOID
1089 FindSmramInfo (
1090 OUT UINT32 *SmrrBase,
1091 OUT UINT32 *SmrrSize
1092 )
1093 {
1094 EFI_STATUS Status;
1095 UINTN Size;
1096 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1097 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1098 UINTN Index;
1099 UINT64 MaxSize;
1100 BOOLEAN Found;
1101
1102 //
1103 // Get SMM Access Protocol
1104 //
1105 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1106 ASSERT_EFI_ERROR (Status);
1107
1108 //
1109 // Get SMRAM information
1110 //
1111 Size = 0;
1112 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1113 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1114
1115 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1116 ASSERT (mSmmCpuSmramRanges != NULL);
1117
1118 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1119 ASSERT_EFI_ERROR (Status);
1120
1121 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1122
1123 //
1124 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1125 //
1126 CurrentSmramRange = NULL;
1127 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1128 //
1129 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1130 //
1131 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1132 continue;
1133 }
1134
1135 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1136 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1137 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1138 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1139 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1140 }
1141 }
1142 }
1143 }
1144
1145 ASSERT (CurrentSmramRange != NULL);
1146
1147 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1148 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1149
1150 do {
1151 Found = FALSE;
1152 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1153 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
1154 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
1155 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1156 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1157 Found = TRUE;
1158 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
1159 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1160 Found = TRUE;
1161 }
1162 }
1163 } while (Found);
1164
1165 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1166 }
1167
1168 /**
1169 Configure SMM Code Access Check feature on an AP.
1170 SMM Feature Control MSR will be locked after configuration.
1171
1172 @param[in,out] Buffer Pointer to private data buffer.
1173 **/
1174 VOID
1175 EFIAPI
1176 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1177 IN OUT VOID *Buffer
1178 )
1179 {
1180 UINTN CpuIndex;
1181 UINT64 SmmFeatureControlMsr;
1182 UINT64 NewSmmFeatureControlMsr;
1183
1184 //
1185 // Retrieve the CPU Index from the context passed in
1186 //
1187 CpuIndex = *(UINTN *)Buffer;
1188
1189 //
1190 // Get the current SMM Feature Control MSR value
1191 //
1192 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1193
1194 //
1195 // Compute the new SMM Feature Control MSR value
1196 //
1197 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1198 if (mSmmCodeAccessCheckEnable) {
1199 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1200 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1201 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1202 }
1203 }
1204
1205 //
1206 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1207 //
1208 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1209 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1210 }
1211
1212 //
1213 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1214 //
1215 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1216 }
1217
1218 /**
1219 Configure SMM Code Access Check feature for all processors.
1220 SMM Feature Control MSR will be locked after configuration.
1221 **/
1222 VOID
1223 ConfigSmmCodeAccessCheck (
1224 VOID
1225 )
1226 {
1227 UINTN Index;
1228 EFI_STATUS Status;
1229
1230 //
1231 // Check to see if the Feature Control MSR is supported on this CPU
1232 //
1233 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1234 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1235 mSmmCodeAccessCheckEnable = FALSE;
1236 return;
1237 }
1238
1239 //
1240 // Check to see if the CPU supports the SMM Code Access Check feature
1241 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1242 //
1243 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1244 mSmmCodeAccessCheckEnable = FALSE;
1245 return;
1246 }
1247
1248 //
1249 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1250 //
1251 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1252
1253 //
1254 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1255 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1256 //
1257 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1258
1259 //
1260 // Enable SMM Code Access Check feature on the BSP.
1261 //
1262 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1263
1264 //
1265 // Enable SMM Code Access Check feature for the APs.
1266 //
1267 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1268 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1269 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1270 //
1271 // If this processor does not exist
1272 //
1273 continue;
1274 }
1275 //
1276 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1277 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1278 //
1279 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1280
1281 //
1282 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1283 //
1284 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1285 ASSERT_EFI_ERROR (Status);
1286
1287 //
1288 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1289 //
1290 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1291 CpuPause ();
1292 }
1293
1294 //
1295 // Release the Config SMM Code Access Check spin lock.
1296 //
1297 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1298 }
1299 }
1300 }
1301
1302 /**
1303 This API provides a way to allocate memory for page table.
1304
1305 This API can be called more once to allocate memory for page tables.
1306
1307 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1308 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1309 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1310 returned.
1311
1312 @param Pages The number of 4 KB pages to allocate.
1313
1314 @return A pointer to the allocated buffer or NULL if allocation fails.
1315
1316 **/
1317 VOID *
1318 AllocatePageTableMemory (
1319 IN UINTN Pages
1320 )
1321 {
1322 VOID *Buffer;
1323
1324 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1325 if (Buffer != NULL) {
1326 return Buffer;
1327 }
1328 return AllocatePages (Pages);
1329 }
1330
1331 /**
1332 Allocate pages for code.
1333
1334 @param[in] Pages Number of pages to be allocated.
1335
1336 @return Allocated memory.
1337 **/
1338 VOID *
1339 AllocateCodePages (
1340 IN UINTN Pages
1341 )
1342 {
1343 EFI_STATUS Status;
1344 EFI_PHYSICAL_ADDRESS Memory;
1345
1346 if (Pages == 0) {
1347 return NULL;
1348 }
1349
1350 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1351 if (EFI_ERROR (Status)) {
1352 return NULL;
1353 }
1354 return (VOID *) (UINTN) Memory;
1355 }
1356
1357 /**
1358 Allocate aligned pages for code.
1359
1360 @param[in] Pages Number of pages to be allocated.
1361 @param[in] Alignment The requested alignment of the allocation.
1362 Must be a power of two.
1363 If Alignment is zero, then byte alignment is used.
1364
1365 @return Allocated memory.
1366 **/
1367 VOID *
1368 AllocateAlignedCodePages (
1369 IN UINTN Pages,
1370 IN UINTN Alignment
1371 )
1372 {
1373 EFI_STATUS Status;
1374 EFI_PHYSICAL_ADDRESS Memory;
1375 UINTN AlignedMemory;
1376 UINTN AlignmentMask;
1377 UINTN UnalignedPages;
1378 UINTN RealPages;
1379
1380 //
1381 // Alignment must be a power of two or zero.
1382 //
1383 ASSERT ((Alignment & (Alignment - 1)) == 0);
1384
1385 if (Pages == 0) {
1386 return NULL;
1387 }
1388 if (Alignment > EFI_PAGE_SIZE) {
1389 //
1390 // Calculate the total number of pages since alignment is larger than page size.
1391 //
1392 AlignmentMask = Alignment - 1;
1393 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1394 //
1395 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1396 //
1397 ASSERT (RealPages > Pages);
1398
1399 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1400 if (EFI_ERROR (Status)) {
1401 return NULL;
1402 }
1403 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1404 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1405 if (UnalignedPages > 0) {
1406 //
1407 // Free first unaligned page(s).
1408 //
1409 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1410 ASSERT_EFI_ERROR (Status);
1411 }
1412 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1413 UnalignedPages = RealPages - Pages - UnalignedPages;
1414 if (UnalignedPages > 0) {
1415 //
1416 // Free last unaligned page(s).
1417 //
1418 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1419 ASSERT_EFI_ERROR (Status);
1420 }
1421 } else {
1422 //
1423 // Do not over-allocate pages in this case.
1424 //
1425 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1426 if (EFI_ERROR (Status)) {
1427 return NULL;
1428 }
1429 AlignedMemory = (UINTN) Memory;
1430 }
1431 return (VOID *) AlignedMemory;
1432 }
1433
1434 /**
1435 Perform the remaining tasks.
1436
1437 **/
1438 VOID
1439 PerformRemainingTasks (
1440 VOID
1441 )
1442 {
1443 if (mSmmReadyToLock) {
1444 //
1445 // Start SMM Profile feature
1446 //
1447 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1448 SmmProfileStart ();
1449 }
1450 //
1451 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1452 //
1453 InitPaging ();
1454
1455 //
1456 // Mark critical region to be read-only in page table
1457 //
1458 SetMemMapAttributes ();
1459
1460 if (IsRestrictedMemoryAccess ()) {
1461 //
1462 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1463 //
1464 SetUefiMemMapAttributes ();
1465
1466 //
1467 // Set page table itself to be read-only
1468 //
1469 SetPageTableAttributes ();
1470 }
1471
1472 //
1473 // Configure SMM Code Access Check feature if available.
1474 //
1475 ConfigSmmCodeAccessCheck ();
1476
1477 SmmCpuFeaturesCompleteSmmReadyToLock ();
1478
1479 //
1480 // Clean SMM ready to lock flag
1481 //
1482 mSmmReadyToLock = FALSE;
1483 }
1484 }
1485
1486 /**
1487 Perform the pre tasks.
1488
1489 **/
1490 VOID
1491 PerformPreTasks (
1492 VOID
1493 )
1494 {
1495 RestoreSmmConfigurationInS3 ();
1496 }