]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: Apply uncrustify changes
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // SMM CPU Private Data structure that contains SMM Configuration Protocol
15 // along its supporting fields.
16 //
17 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
19 NULL, // SmmCpuHandle
20 NULL, // Pointer to ProcessorInfo array
21 NULL, // Pointer to Operation array
22 NULL, // Pointer to CpuSaveStateSize array
23 NULL, // Pointer to CpuSaveState array
24 {
25 { 0 }
26 }, // SmmReservedSmramRegion
27 {
28 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
29 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
30 0, // SmmCoreEntryContext.NumberOfCpus
31 NULL, // SmmCoreEntryContext.CpuSaveStateSize
32 NULL // SmmCoreEntryContext.CpuSaveState
33 },
34 NULL, // SmmCoreEntry
35 {
36 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
37 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
38 },
39 NULL, // pointer to Ap Wrapper Func array
40 { NULL, NULL }, // List_Entry for Tokens.
41 };
42
43 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
45 0, // Array Length of SmBase and APIC ID
46 NULL, // Pointer to APIC ID array
47 NULL, // Pointer to SMBASE array
48 0, // Reserved
49 0, // SmrrBase
50 0 // SmrrSize
51 };
52
53 //
54 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
55 //
56 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
57
58 //
59 // SMM Relocation variables
60 //
61 volatile BOOLEAN *mRebased;
62 volatile BOOLEAN mIsBsp;
63
64 ///
65 /// Handle for the SMM CPU Protocol
66 ///
67 EFI_HANDLE mSmmCpuHandle = NULL;
68
69 ///
70 /// SMM CPU Protocol instance
71 ///
72 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
73 SmmReadSaveState,
74 SmmWriteSaveState
75 };
76
77 ///
78 /// SMM Memory Attribute Protocol instance
79 ///
80 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
81 EdkiiSmmGetMemoryAttributes,
82 EdkiiSmmSetMemoryAttributes,
83 EdkiiSmmClearMemoryAttributes
84 };
85
86 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
87
88 //
89 // SMM stack information
90 //
91 UINTN mSmmStackArrayBase;
92 UINTN mSmmStackArrayEnd;
93 UINTN mSmmStackSize;
94
95 UINTN mSmmShadowStackSize;
96 BOOLEAN mCetSupported = TRUE;
97
98 UINTN mMaxNumberOfCpus = 1;
99 UINTN mNumberOfCpus = 1;
100
101 //
102 // SMM ready to lock flag
103 //
104 BOOLEAN mSmmReadyToLock = FALSE;
105
106 //
107 // Global used to cache PCD for SMM Code Access Check enable
108 //
109 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
110
111 //
112 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
113 //
114 UINT64 mAddressEncMask = 0;
115
116 //
117 // Spin lock used to serialize setting of SMM Code Access Check feature
118 //
119 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
120
121 //
122 // Saved SMM ranges information
123 //
124 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
125 UINTN mSmmCpuSmramRangeCount;
126
127 UINT8 mPhysicalAddressBits;
128
129 //
130 // Control register contents saved for SMM S3 resume state initialization.
131 //
132 UINT32 mSmmCr0;
133 UINT32 mSmmCr4;
134
135 /**
136 Initialize IDT to setup exception handlers for SMM.
137
138 **/
139 VOID
140 InitializeSmmIdt (
141 VOID
142 )
143 {
144 EFI_STATUS Status;
145 BOOLEAN InterruptState;
146 IA32_DESCRIPTOR DxeIdtr;
147
148 //
149 // There are 32 (not 255) entries in it since only processor
150 // generated exceptions will be handled.
151 //
152 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
153 //
154 // Allocate page aligned IDT, because it might be set as read only.
155 //
156 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));
157 ASSERT (gcSmiIdtr.Base != 0);
158 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
159
160 //
161 // Disable Interrupt and save DXE IDT table
162 //
163 InterruptState = SaveAndDisableInterrupts ();
164 AsmReadIdtr (&DxeIdtr);
165 //
166 // Load SMM temporary IDT table
167 //
168 AsmWriteIdtr (&gcSmiIdtr);
169 //
170 // Setup SMM default exception handlers, SMM IDT table
171 // will be updated and saved in gcSmiIdtr
172 //
173 Status = InitializeCpuExceptionHandlers (NULL);
174 ASSERT_EFI_ERROR (Status);
175 //
176 // Restore DXE IDT table and CPU interrupt
177 //
178 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);
179 SetInterruptState (InterruptState);
180 }
181
182 /**
183 Search module name by input IP address and output it.
184
185 @param CallerIpAddress Caller instruction pointer.
186
187 **/
188 VOID
189 DumpModuleInfoByIp (
190 IN UINTN CallerIpAddress
191 )
192 {
193 UINTN Pe32Data;
194 VOID *PdbPointer;
195
196 //
197 // Find Image Base
198 //
199 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
200 if (Pe32Data != 0) {
201 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));
202 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);
203 if (PdbPointer != NULL) {
204 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
205 }
206 }
207 }
208
209 /**
210 Read information from the CPU save state.
211
212 @param This EFI_SMM_CPU_PROTOCOL instance
213 @param Width The number of bytes to read from the CPU save state.
214 @param Register Specifies the CPU register to read form the save state.
215 @param CpuIndex Specifies the zero-based index of the CPU save state.
216 @param Buffer Upon return, this holds the CPU register value read from the save state.
217
218 @retval EFI_SUCCESS The register was read from Save State
219 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
220 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
221
222 **/
223 EFI_STATUS
224 EFIAPI
225 SmmReadSaveState (
226 IN CONST EFI_SMM_CPU_PROTOCOL *This,
227 IN UINTN Width,
228 IN EFI_SMM_SAVE_STATE_REGISTER Register,
229 IN UINTN CpuIndex,
230 OUT VOID *Buffer
231 )
232 {
233 EFI_STATUS Status;
234
235 //
236 // Retrieve pointer to the specified CPU's SMM Save State buffer
237 //
238 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
239 return EFI_INVALID_PARAMETER;
240 }
241
242 //
243 // The SpeculationBarrier() call here is to ensure the above check for the
244 // CpuIndex has been completed before the execution of subsequent codes.
245 //
246 SpeculationBarrier ();
247
248 //
249 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
250 //
251 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
252 //
253 // The pseudo-register only supports the 64-bit size specified by Width.
254 //
255 if (Width != sizeof (UINT64)) {
256 return EFI_INVALID_PARAMETER;
257 }
258
259 //
260 // If the processor is in SMM at the time the SMI occurred,
261 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
262 // Otherwise, EFI_NOT_FOUND is returned.
263 //
264 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
265 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
266 return EFI_SUCCESS;
267 } else {
268 return EFI_NOT_FOUND;
269 }
270 }
271
272 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
273 return EFI_INVALID_PARAMETER;
274 }
275
276 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
277 if (Status == EFI_UNSUPPORTED) {
278 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
279 }
280
281 return Status;
282 }
283
284 /**
285 Write data to the CPU save state.
286
287 @param This EFI_SMM_CPU_PROTOCOL instance
288 @param Width The number of bytes to read from the CPU save state.
289 @param Register Specifies the CPU register to write to the save state.
290 @param CpuIndex Specifies the zero-based index of the CPU save state
291 @param Buffer Upon entry, this holds the new CPU register value.
292
293 @retval EFI_SUCCESS The register was written from Save State
294 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
295 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
296
297 **/
298 EFI_STATUS
299 EFIAPI
300 SmmWriteSaveState (
301 IN CONST EFI_SMM_CPU_PROTOCOL *This,
302 IN UINTN Width,
303 IN EFI_SMM_SAVE_STATE_REGISTER Register,
304 IN UINTN CpuIndex,
305 IN CONST VOID *Buffer
306 )
307 {
308 EFI_STATUS Status;
309
310 //
311 // Retrieve pointer to the specified CPU's SMM Save State buffer
312 //
313 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
314 return EFI_INVALID_PARAMETER;
315 }
316
317 //
318 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
319 //
320 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
321 return EFI_SUCCESS;
322 }
323
324 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
325 return EFI_INVALID_PARAMETER;
326 }
327
328 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
329 if (Status == EFI_UNSUPPORTED) {
330 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
331 }
332
333 return Status;
334 }
335
336 /**
337 C function for SMI handler. To change all processor's SMMBase Register.
338
339 **/
340 VOID
341 EFIAPI
342 SmmInitHandler (
343 VOID
344 )
345 {
346 UINT32 ApicId;
347 UINTN Index;
348
349 //
350 // Update SMM IDT entries' code segment and load IDT
351 //
352 AsmWriteIdtr (&gcSmiIdtr);
353 ApicId = GetApicId ();
354
355 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
356
357 for (Index = 0; Index < mNumberOfCpus; Index++) {
358 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
359 //
360 // Initialize SMM specific features on the currently executing CPU
361 //
362 SmmCpuFeaturesInitializeProcessor (
363 Index,
364 mIsBsp,
365 gSmmCpuPrivate->ProcessorInfo,
366 &mCpuHotPlugData
367 );
368
369 if (!mSmmS3Flag) {
370 //
371 // Check XD and BTS features on each processor on normal boot
372 //
373 CheckFeatureSupported ();
374 }
375
376 if (mIsBsp) {
377 //
378 // BSP rebase is already done above.
379 // Initialize private data during S3 resume
380 //
381 InitializeMpSyncData ();
382 }
383
384 //
385 // Hook return after RSM to set SMM re-based flag
386 //
387 SemaphoreHook (Index, &mRebased[Index]);
388
389 return;
390 }
391 }
392
393 ASSERT (FALSE);
394 }
395
396 /**
397 Relocate SmmBases for each processor.
398
399 Execute on first boot and all S3 resumes
400
401 **/
402 VOID
403 EFIAPI
404 SmmRelocateBases (
405 VOID
406 )
407 {
408 UINT8 BakBuf[BACK_BUF_SIZE];
409 SMRAM_SAVE_STATE_MAP BakBuf2;
410 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
411 UINT8 *U8Ptr;
412 UINT32 ApicId;
413 UINTN Index;
414 UINTN BspIndex;
415
416 //
417 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
418 //
419 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
420
421 //
422 // Patch ASM code template with current CR0, CR3, and CR4 values
423 //
424 mSmmCr0 = (UINT32)AsmReadCr0 ();
425 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
426 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
427 mSmmCr4 = (UINT32)AsmReadCr4 ();
428 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
429
430 //
431 // Patch GDTR for SMM base relocation
432 //
433 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
434 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
435
436 U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
437 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
438
439 //
440 // Backup original contents at address 0x38000
441 //
442 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
443 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
444
445 //
446 // Load image for relocation
447 //
448 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
449
450 //
451 // Retrieve the local APIC ID of current processor
452 //
453 ApicId = GetApicId ();
454
455 //
456 // Relocate SM bases for all APs
457 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
458 //
459 mIsBsp = FALSE;
460 BspIndex = (UINTN)-1;
461 for (Index = 0; Index < mNumberOfCpus; Index++) {
462 mRebased[Index] = FALSE;
463 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
464 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
465 //
466 // Wait for this AP to finish its 1st SMI
467 //
468 while (!mRebased[Index]) {
469 }
470 } else {
471 //
472 // BSP will be Relocated later
473 //
474 BspIndex = Index;
475 }
476 }
477
478 //
479 // Relocate BSP's SMM base
480 //
481 ASSERT (BspIndex != (UINTN)-1);
482 mIsBsp = TRUE;
483 SendSmiIpi (ApicId);
484 //
485 // Wait for the BSP to finish its 1st SMI
486 //
487 while (!mRebased[BspIndex]) {
488 }
489
490 //
491 // Restore contents at address 0x38000
492 //
493 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
494 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
495 }
496
497 /**
498 SMM Ready To Lock event notification handler.
499
500 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
501 perform additional lock actions that must be performed from SMM on the next SMI.
502
503 @param[in] Protocol Points to the protocol's unique identifier.
504 @param[in] Interface Points to the interface instance.
505 @param[in] Handle The handle on which the interface was installed.
506
507 @retval EFI_SUCCESS Notification handler runs successfully.
508 **/
509 EFI_STATUS
510 EFIAPI
511 SmmReadyToLockEventNotify (
512 IN CONST EFI_GUID *Protocol,
513 IN VOID *Interface,
514 IN EFI_HANDLE Handle
515 )
516 {
517 GetAcpiCpuData ();
518
519 //
520 // Cache a copy of UEFI memory map before we start profiling feature.
521 //
522 GetUefiMemoryMap ();
523
524 //
525 // Set SMM ready to lock flag and return
526 //
527 mSmmReadyToLock = TRUE;
528 return EFI_SUCCESS;
529 }
530
531 /**
532 The module Entry Point of the CPU SMM driver.
533
534 @param ImageHandle The firmware allocated handle for the EFI image.
535 @param SystemTable A pointer to the EFI System Table.
536
537 @retval EFI_SUCCESS The entry point is executed successfully.
538 @retval Other Some error occurs when executing this entry point.
539
540 **/
541 EFI_STATUS
542 EFIAPI
543 PiCpuSmmEntry (
544 IN EFI_HANDLE ImageHandle,
545 IN EFI_SYSTEM_TABLE *SystemTable
546 )
547 {
548 EFI_STATUS Status;
549 EFI_MP_SERVICES_PROTOCOL *MpServices;
550 UINTN NumberOfEnabledProcessors;
551 UINTN Index;
552 VOID *Buffer;
553 UINTN BufferPages;
554 UINTN TileCodeSize;
555 UINTN TileDataSize;
556 UINTN TileSize;
557 UINT8 *Stacks;
558 VOID *Registration;
559 UINT32 RegEax;
560 UINT32 RegEbx;
561 UINT32 RegEcx;
562 UINT32 RegEdx;
563 UINTN FamilyId;
564 UINTN ModelId;
565 UINT32 Cr3;
566
567 //
568 // Initialize address fixup
569 //
570 PiSmmCpuSmmInitFixupAddress ();
571 PiSmmCpuSmiEntryFixupAddress ();
572
573 //
574 // Initialize Debug Agent to support source level debug in SMM code
575 //
576 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
577
578 //
579 // Report the start of CPU SMM initialization.
580 //
581 REPORT_STATUS_CODE (
582 EFI_PROGRESS_CODE,
583 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
584 );
585
586 //
587 // Find out SMRR Base and SMRR Size
588 //
589 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
590
591 //
592 // Get MP Services Protocol
593 //
594 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
595 ASSERT_EFI_ERROR (Status);
596
597 //
598 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
599 //
600 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
601 ASSERT_EFI_ERROR (Status);
602 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
603
604 //
605 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
606 // A constant BSP index makes no sense because it may be hot removed.
607 //
608 DEBUG_CODE_BEGIN ();
609 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
610 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
611 }
612
613 DEBUG_CODE_END ();
614
615 //
616 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
617 //
618 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
619 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
620
621 //
622 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
623 // Make sure AddressEncMask is contained to smallest supported address field.
624 //
625 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
626 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
627
628 //
629 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
630 //
631 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
632 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
633 } else {
634 mMaxNumberOfCpus = mNumberOfCpus;
635 }
636
637 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
638
639 //
640 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
641 // allocated buffer. The minimum size of this buffer for a uniprocessor system
642 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
643 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
644 // then the SMI entry point and the CPU save state areas can be tiles to minimize
645 // the total amount SMRAM required for all the CPUs. The tile size can be computed
646 // by adding the // CPU save state size, any extra CPU specific context, and
647 // the size of code that must be placed at the SMI entry point to transfer
648 // control to a C function in the native SMM execution mode. This size is
649 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
650 // The total amount of memory required is the maximum number of CPUs that
651 // platform supports times the tile size. The picture below shows the tiling,
652 // where m is the number of tiles that fit in 32KB.
653 //
654 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
655 // | CPU m+1 Save State |
656 // +-----------------------------+
657 // | CPU m+1 Extra Data |
658 // +-----------------------------+
659 // | Padding |
660 // +-----------------------------+
661 // | CPU 2m SMI Entry |
662 // +#############################+ <-- Base of allocated buffer + 64 KB
663 // | CPU m-1 Save State |
664 // +-----------------------------+
665 // | CPU m-1 Extra Data |
666 // +-----------------------------+
667 // | Padding |
668 // +-----------------------------+
669 // | CPU 2m-1 SMI Entry |
670 // +=============================+ <-- 2^n offset from Base of allocated buffer
671 // | . . . . . . . . . . . . |
672 // +=============================+ <-- 2^n offset from Base of allocated buffer
673 // | CPU 2 Save State |
674 // +-----------------------------+
675 // | CPU 2 Extra Data |
676 // +-----------------------------+
677 // | Padding |
678 // +-----------------------------+
679 // | CPU m+1 SMI Entry |
680 // +=============================+ <-- Base of allocated buffer + 32 KB
681 // | CPU 1 Save State |
682 // +-----------------------------+
683 // | CPU 1 Extra Data |
684 // +-----------------------------+
685 // | Padding |
686 // +-----------------------------+
687 // | CPU m SMI Entry |
688 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
689 // | CPU 0 Save State |
690 // +-----------------------------+
691 // | CPU 0 Extra Data |
692 // +-----------------------------+
693 // | Padding |
694 // +-----------------------------+
695 // | CPU m-1 SMI Entry |
696 // +=============================+ <-- 2^n offset from Base of allocated buffer
697 // | . . . . . . . . . . . . |
698 // +=============================+ <-- 2^n offset from Base of allocated buffer
699 // | Padding |
700 // +-----------------------------+
701 // | CPU 1 SMI Entry |
702 // +=============================+ <-- 2^n offset from Base of allocated buffer
703 // | Padding |
704 // +-----------------------------+
705 // | CPU 0 SMI Entry |
706 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
707 //
708
709 //
710 // Retrieve CPU Family
711 //
712 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
713 FamilyId = (RegEax >> 8) & 0xf;
714 ModelId = (RegEax >> 4) & 0xf;
715 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {
716 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
717 }
718
719 RegEdx = 0;
720 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
721 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
722 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
723 }
724
725 //
726 // Determine the mode of the CPU at the time an SMI occurs
727 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
728 // Volume 3C, Section 34.4.1.1
729 //
730 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
731 if ((RegEdx & BIT29) != 0) {
732 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
733 }
734
735 if (FamilyId == 0x06) {
736 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {
737 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
738 }
739 }
740
741 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
742 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
743 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
744 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
745 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
746 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
747 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
748 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
749 if ((RegEcx & CPUID_CET_SS) == 0) {
750 mCetSupported = FALSE;
751 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
752 }
753
754 if (mCetSupported) {
755 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
756 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
757 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
758 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
759 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
760 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
761 }
762 } else {
763 mCetSupported = FALSE;
764 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
765 }
766 } else {
767 mCetSupported = FALSE;
768 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
769 }
770
771 //
772 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
773 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
774 // This size is rounded up to nearest power of 2.
775 //
776 TileCodeSize = GetSmiHandlerSize ();
777 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);
778 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
779 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);
780 TileSize = TileDataSize + TileCodeSize - 1;
781 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
782 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
783
784 //
785 // If the TileSize is larger than space available for the SMI Handler of
786 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
787 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
788 // the SMI Handler size must be reduced or the size of the extra CPU specific
789 // context must be reduced.
790 //
791 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
792
793 //
794 // Allocate buffer for all of the tiles.
795 //
796 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
797 // Volume 3C, Section 34.11 SMBASE Relocation
798 // For Pentium and Intel486 processors, the SMBASE values must be
799 // aligned on a 32-KByte boundary or the processor will enter shutdown
800 // state during the execution of a RSM instruction.
801 //
802 // Intel486 processors: FamilyId is 4
803 // Pentium processors : FamilyId is 5
804 //
805 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
806 if ((FamilyId == 4) || (FamilyId == 5)) {
807 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
808 } else {
809 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
810 }
811
812 ASSERT (Buffer != NULL);
813 DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));
814
815 //
816 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
817 //
818 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
819 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
820
821 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
822 ASSERT (gSmmCpuPrivate->Operation != NULL);
823
824 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
825 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
826
827 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
828 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
829
830 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
831 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
832
833 //
834 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
835 //
836 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
837 ASSERT (mCpuHotPlugData.ApicId != NULL);
838 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
839 ASSERT (mCpuHotPlugData.SmBase != NULL);
840 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
841
842 //
843 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
844 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
845 // size for each CPU in the platform
846 //
847 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
848 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
849 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
850 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
851 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
852
853 if (Index < mNumberOfCpus) {
854 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
855 ASSERT_EFI_ERROR (Status);
856 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
857
858 DEBUG ((
859 DEBUG_INFO,
860 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
861 Index,
862 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
863 mCpuHotPlugData.SmBase[Index],
864 gSmmCpuPrivate->CpuSaveState[Index],
865 gSmmCpuPrivate->CpuSaveStateSize[Index]
866 ));
867 } else {
868 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
869 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
870 }
871 }
872
873 //
874 // Allocate SMI stacks for all processors.
875 //
876 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
877 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
878 //
879 // SMM Stack Guard Enabled
880 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
881 //
882 // +--------------------------------------------------+-----+--------------------------------------------------+
883 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
884 // +--------------------------------------------------+-----+--------------------------------------------------+
885 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
886 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
887 // | | | |
888 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
889 //
890 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
891 }
892
893 mSmmShadowStackSize = 0;
894 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
895 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
896
897 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
898 //
899 // SMM Stack Guard Enabled
900 // Append Shadow Stack after normal stack
901 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
902 //
903 // |= Stacks
904 // +--------------------------------------------------+---------------------------------------------------------------+
905 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
906 // +--------------------------------------------------+---------------------------------------------------------------+
907 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
908 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
909 // | |
910 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
911 //
912 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
913 } else {
914 //
915 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
916 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
917 // 1 more pages is allocated for each processor, it is known good stack.
918 //
919 //
920 // |= Stacks
921 // +-------------------------------------+--------------------------------------------------+
922 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
923 // +-------------------------------------+--------------------------------------------------+
924 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
925 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
926 // | |
927 // |<-------------------------------- Processor N ----------------------------------------->|
928 //
929 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
930 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
931 }
932 }
933
934 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
935 ASSERT (Stacks != NULL);
936 mSmmStackArrayBase = (UINTN)Stacks;
937 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
938
939 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
940 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
941 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
942 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
943 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
944 }
945
946 //
947 // Set SMI stack for SMM base relocation
948 //
949 PatchInstructionX86 (
950 gPatchSmmInitStack,
951 (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),
952 sizeof (UINTN)
953 );
954
955 //
956 // Initialize IDT
957 //
958 InitializeSmmIdt ();
959
960 //
961 // Relocate SMM Base addresses to the ones allocated from SMRAM
962 //
963 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
964 ASSERT (mRebased != NULL);
965 SmmRelocateBases ();
966
967 //
968 // Call hook for BSP to perform extra actions in normal mode after all
969 // SMM base addresses have been relocated on all CPUs
970 //
971 SmmCpuFeaturesSmmRelocationComplete ();
972
973 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
974
975 //
976 // SMM Time initialization
977 //
978 InitializeSmmTimer ();
979
980 //
981 // Initialize MP globals
982 //
983 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
984
985 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
986 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
987 SetShadowStack (
988 Cr3,
989 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
990 mSmmShadowStackSize
991 );
992 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
993 SetNotPresentPage (
994 Cr3,
995 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
996 EFI_PAGES_TO_SIZE (1)
997 );
998 }
999 }
1000 }
1001
1002 //
1003 // Fill in SMM Reserved Regions
1004 //
1005 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1006 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1007
1008 //
1009 // Install the SMM Configuration Protocol onto a new handle on the handle database.
1010 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
1011 // to an SMRAM address will be present in the handle database
1012 //
1013 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
1014 &gSmmCpuPrivate->SmmCpuHandle,
1015 &gEfiSmmConfigurationProtocolGuid,
1016 &gSmmCpuPrivate->SmmConfiguration,
1017 NULL
1018 );
1019 ASSERT_EFI_ERROR (Status);
1020
1021 //
1022 // Install the SMM CPU Protocol into SMM protocol database
1023 //
1024 Status = gSmst->SmmInstallProtocolInterface (
1025 &mSmmCpuHandle,
1026 &gEfiSmmCpuProtocolGuid,
1027 EFI_NATIVE_INTERFACE,
1028 &mSmmCpu
1029 );
1030 ASSERT_EFI_ERROR (Status);
1031
1032 //
1033 // Install the SMM Memory Attribute Protocol into SMM protocol database
1034 //
1035 Status = gSmst->SmmInstallProtocolInterface (
1036 &mSmmCpuHandle,
1037 &gEdkiiSmmMemoryAttributeProtocolGuid,
1038 EFI_NATIVE_INTERFACE,
1039 &mSmmMemoryAttribute
1040 );
1041 ASSERT_EFI_ERROR (Status);
1042
1043 //
1044 // Initialize global buffer for MM MP.
1045 //
1046 InitializeDataForMmMp ();
1047
1048 //
1049 // Install the SMM Mp Protocol into SMM protocol database
1050 //
1051 Status = gSmst->SmmInstallProtocolInterface (
1052 &mSmmCpuHandle,
1053 &gEfiMmMpProtocolGuid,
1054 EFI_NATIVE_INTERFACE,
1055 &mSmmMp
1056 );
1057 ASSERT_EFI_ERROR (Status);
1058
1059 //
1060 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1061 //
1062 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1063 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1064 ASSERT_EFI_ERROR (Status);
1065 }
1066
1067 //
1068 // Initialize SMM CPU Services Support
1069 //
1070 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1071 ASSERT_EFI_ERROR (Status);
1072
1073 //
1074 // register SMM Ready To Lock Protocol notification
1075 //
1076 Status = gSmst->SmmRegisterProtocolNotify (
1077 &gEfiSmmReadyToLockProtocolGuid,
1078 SmmReadyToLockEventNotify,
1079 &Registration
1080 );
1081 ASSERT_EFI_ERROR (Status);
1082
1083 //
1084 // Initialize SMM Profile feature
1085 //
1086 InitSmmProfile (Cr3);
1087
1088 GetAcpiS3EnableFlag ();
1089 InitSmmS3ResumeState (Cr3);
1090
1091 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1092
1093 return EFI_SUCCESS;
1094 }
1095
1096 /**
1097
1098 Find out SMRAM information including SMRR base and SMRR size.
1099
1100 @param SmrrBase SMRR base
1101 @param SmrrSize SMRR size
1102
1103 **/
1104 VOID
1105 FindSmramInfo (
1106 OUT UINT32 *SmrrBase,
1107 OUT UINT32 *SmrrSize
1108 )
1109 {
1110 EFI_STATUS Status;
1111 UINTN Size;
1112 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1113 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1114 UINTN Index;
1115 UINT64 MaxSize;
1116 BOOLEAN Found;
1117
1118 //
1119 // Get SMM Access Protocol
1120 //
1121 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1122 ASSERT_EFI_ERROR (Status);
1123
1124 //
1125 // Get SMRAM information
1126 //
1127 Size = 0;
1128 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1129 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1130
1131 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1132 ASSERT (mSmmCpuSmramRanges != NULL);
1133
1134 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1135 ASSERT_EFI_ERROR (Status);
1136
1137 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1138
1139 //
1140 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1141 //
1142 CurrentSmramRange = NULL;
1143 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1144 //
1145 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1146 //
1147 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1148 continue;
1149 }
1150
1151 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1152 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1153 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1154 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1155 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1156 }
1157 }
1158 }
1159 }
1160
1161 ASSERT (CurrentSmramRange != NULL);
1162
1163 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1164 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1165
1166 do {
1167 Found = FALSE;
1168 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1169 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&
1170 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))
1171 {
1172 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1173 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1174 Found = TRUE;
1175 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {
1176 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1177 Found = TRUE;
1178 }
1179 }
1180 } while (Found);
1181
1182 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1183 }
1184
1185 /**
1186 Configure SMM Code Access Check feature on an AP.
1187 SMM Feature Control MSR will be locked after configuration.
1188
1189 @param[in,out] Buffer Pointer to private data buffer.
1190 **/
1191 VOID
1192 EFIAPI
1193 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1194 IN OUT VOID *Buffer
1195 )
1196 {
1197 UINTN CpuIndex;
1198 UINT64 SmmFeatureControlMsr;
1199 UINT64 NewSmmFeatureControlMsr;
1200
1201 //
1202 // Retrieve the CPU Index from the context passed in
1203 //
1204 CpuIndex = *(UINTN *)Buffer;
1205
1206 //
1207 // Get the current SMM Feature Control MSR value
1208 //
1209 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1210
1211 //
1212 // Compute the new SMM Feature Control MSR value
1213 //
1214 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1215 if (mSmmCodeAccessCheckEnable) {
1216 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1217 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1218 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1219 }
1220 }
1221
1222 //
1223 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1224 //
1225 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1226 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1227 }
1228
1229 //
1230 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1231 //
1232 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1233 }
1234
1235 /**
1236 Configure SMM Code Access Check feature for all processors.
1237 SMM Feature Control MSR will be locked after configuration.
1238 **/
1239 VOID
1240 ConfigSmmCodeAccessCheck (
1241 VOID
1242 )
1243 {
1244 UINTN Index;
1245 EFI_STATUS Status;
1246
1247 //
1248 // Check to see if the Feature Control MSR is supported on this CPU
1249 //
1250 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1251 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1252 mSmmCodeAccessCheckEnable = FALSE;
1253 return;
1254 }
1255
1256 //
1257 // Check to see if the CPU supports the SMM Code Access Check feature
1258 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1259 //
1260 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1261 mSmmCodeAccessCheckEnable = FALSE;
1262 return;
1263 }
1264
1265 //
1266 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1267 //
1268 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1269
1270 //
1271 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1272 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1273 //
1274 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1275
1276 //
1277 // Enable SMM Code Access Check feature on the BSP.
1278 //
1279 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1280
1281 //
1282 // Enable SMM Code Access Check feature for the APs.
1283 //
1284 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1285 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1286 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1287 //
1288 // If this processor does not exist
1289 //
1290 continue;
1291 }
1292
1293 //
1294 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1295 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1296 //
1297 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1298
1299 //
1300 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1301 //
1302 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1303 ASSERT_EFI_ERROR (Status);
1304
1305 //
1306 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1307 //
1308 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1309 CpuPause ();
1310 }
1311
1312 //
1313 // Release the Config SMM Code Access Check spin lock.
1314 //
1315 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1316 }
1317 }
1318 }
1319
1320 /**
1321 This API provides a way to allocate memory for page table.
1322
1323 This API can be called more once to allocate memory for page tables.
1324
1325 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1326 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1327 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1328 returned.
1329
1330 @param Pages The number of 4 KB pages to allocate.
1331
1332 @return A pointer to the allocated buffer or NULL if allocation fails.
1333
1334 **/
1335 VOID *
1336 AllocatePageTableMemory (
1337 IN UINTN Pages
1338 )
1339 {
1340 VOID *Buffer;
1341
1342 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1343 if (Buffer != NULL) {
1344 return Buffer;
1345 }
1346
1347 return AllocatePages (Pages);
1348 }
1349
1350 /**
1351 Allocate pages for code.
1352
1353 @param[in] Pages Number of pages to be allocated.
1354
1355 @return Allocated memory.
1356 **/
1357 VOID *
1358 AllocateCodePages (
1359 IN UINTN Pages
1360 )
1361 {
1362 EFI_STATUS Status;
1363 EFI_PHYSICAL_ADDRESS Memory;
1364
1365 if (Pages == 0) {
1366 return NULL;
1367 }
1368
1369 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1370 if (EFI_ERROR (Status)) {
1371 return NULL;
1372 }
1373
1374 return (VOID *)(UINTN)Memory;
1375 }
1376
1377 /**
1378 Allocate aligned pages for code.
1379
1380 @param[in] Pages Number of pages to be allocated.
1381 @param[in] Alignment The requested alignment of the allocation.
1382 Must be a power of two.
1383 If Alignment is zero, then byte alignment is used.
1384
1385 @return Allocated memory.
1386 **/
1387 VOID *
1388 AllocateAlignedCodePages (
1389 IN UINTN Pages,
1390 IN UINTN Alignment
1391 )
1392 {
1393 EFI_STATUS Status;
1394 EFI_PHYSICAL_ADDRESS Memory;
1395 UINTN AlignedMemory;
1396 UINTN AlignmentMask;
1397 UINTN UnalignedPages;
1398 UINTN RealPages;
1399
1400 //
1401 // Alignment must be a power of two or zero.
1402 //
1403 ASSERT ((Alignment & (Alignment - 1)) == 0);
1404
1405 if (Pages == 0) {
1406 return NULL;
1407 }
1408
1409 if (Alignment > EFI_PAGE_SIZE) {
1410 //
1411 // Calculate the total number of pages since alignment is larger than page size.
1412 //
1413 AlignmentMask = Alignment - 1;
1414 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1415 //
1416 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1417 //
1418 ASSERT (RealPages > Pages);
1419
1420 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1421 if (EFI_ERROR (Status)) {
1422 return NULL;
1423 }
1424
1425 AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;
1426 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);
1427 if (UnalignedPages > 0) {
1428 //
1429 // Free first unaligned page(s).
1430 //
1431 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1432 ASSERT_EFI_ERROR (Status);
1433 }
1434
1435 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1436 UnalignedPages = RealPages - Pages - UnalignedPages;
1437 if (UnalignedPages > 0) {
1438 //
1439 // Free last unaligned page(s).
1440 //
1441 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1442 ASSERT_EFI_ERROR (Status);
1443 }
1444 } else {
1445 //
1446 // Do not over-allocate pages in this case.
1447 //
1448 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1449 if (EFI_ERROR (Status)) {
1450 return NULL;
1451 }
1452
1453 AlignedMemory = (UINTN)Memory;
1454 }
1455
1456 return (VOID *)AlignedMemory;
1457 }
1458
1459 /**
1460 Perform the remaining tasks.
1461
1462 **/
1463 VOID
1464 PerformRemainingTasks (
1465 VOID
1466 )
1467 {
1468 if (mSmmReadyToLock) {
1469 //
1470 // Start SMM Profile feature
1471 //
1472 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1473 SmmProfileStart ();
1474 }
1475
1476 //
1477 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1478 //
1479 InitPaging ();
1480
1481 //
1482 // Mark critical region to be read-only in page table
1483 //
1484 SetMemMapAttributes ();
1485
1486 if (IsRestrictedMemoryAccess ()) {
1487 //
1488 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1489 //
1490 SetUefiMemMapAttributes ();
1491
1492 //
1493 // Set page table itself to be read-only
1494 //
1495 SetPageTableAttributes ();
1496 }
1497
1498 //
1499 // Configure SMM Code Access Check feature if available.
1500 //
1501 ConfigSmmCodeAccessCheck ();
1502
1503 SmmCpuFeaturesCompleteSmmReadyToLock ();
1504
1505 //
1506 // Clean SMM ready to lock flag
1507 //
1508 mSmmReadyToLock = FALSE;
1509 }
1510 }
1511
1512 /**
1513 Perform the pre tasks.
1514
1515 **/
1516 VOID
1517 PerformPreTasks (
1518 VOID
1519 )
1520 {
1521 RestoreSmmConfigurationInS3 ();
1522 }