]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpu: Add Shadow Stack Support for X86 SMM.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // SMM CPU Private Data structure that contains SMM Configuration Protocol
21 // along its supporting fields.
22 //
23 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
25 NULL, // SmmCpuHandle
26 NULL, // Pointer to ProcessorInfo array
27 NULL, // Pointer to Operation array
28 NULL, // Pointer to CpuSaveStateSize array
29 NULL, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
31 {
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL // SmmCoreEntryContext.CpuSaveState
37 },
38 NULL, // SmmCoreEntry
39 {
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
42 },
43 };
44
45 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL, // Pointer to APIC ID array
49 NULL, // Pointer to SMBASE array
50 0, // Reserved
51 0, // SmrrBase
52 0 // SmrrSize
53 };
54
55 //
56 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
57 //
58 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
59
60 //
61 // SMM Relocation variables
62 //
63 volatile BOOLEAN *mRebased;
64 volatile BOOLEAN mIsBsp;
65
66 ///
67 /// Handle for the SMM CPU Protocol
68 ///
69 EFI_HANDLE mSmmCpuHandle = NULL;
70
71 ///
72 /// SMM CPU Protocol instance
73 ///
74 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
75 SmmReadSaveState,
76 SmmWriteSaveState
77 };
78
79 ///
80 /// SMM Memory Attribute Protocol instance
81 ///
82 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
83 EdkiiSmmGetMemoryAttributes,
84 EdkiiSmmSetMemoryAttributes,
85 EdkiiSmmClearMemoryAttributes
86 };
87
88 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
89
90 //
91 // SMM stack information
92 //
93 UINTN mSmmStackArrayBase;
94 UINTN mSmmStackArrayEnd;
95 UINTN mSmmStackSize;
96
97 UINTN mSmmShadowStackSize;
98 BOOLEAN mCetSupported = TRUE;
99
100 UINTN mMaxNumberOfCpus = 1;
101 UINTN mNumberOfCpus = 1;
102
103 //
104 // SMM ready to lock flag
105 //
106 BOOLEAN mSmmReadyToLock = FALSE;
107
108 //
109 // Global used to cache PCD for SMM Code Access Check enable
110 //
111 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
112
113 //
114 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
115 //
116 UINT64 mAddressEncMask = 0;
117
118 //
119 // Spin lock used to serialize setting of SMM Code Access Check feature
120 //
121 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
122
123 //
124 // Saved SMM ranges information
125 //
126 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
127 UINTN mSmmCpuSmramRangeCount;
128
129 UINT8 mPhysicalAddressBits;
130
131 //
132 // Control register contents saved for SMM S3 resume state initialization.
133 //
134 UINT32 mSmmCr0;
135 UINT32 mSmmCr4;
136
137 /**
138 Initialize IDT to setup exception handlers for SMM.
139
140 **/
141 VOID
142 InitializeSmmIdt (
143 VOID
144 )
145 {
146 EFI_STATUS Status;
147 BOOLEAN InterruptState;
148 IA32_DESCRIPTOR DxeIdtr;
149
150 //
151 // There are 32 (not 255) entries in it since only processor
152 // generated exceptions will be handled.
153 //
154 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
155 //
156 // Allocate page aligned IDT, because it might be set as read only.
157 //
158 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
159 ASSERT (gcSmiIdtr.Base != 0);
160 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
161
162 //
163 // Disable Interrupt and save DXE IDT table
164 //
165 InterruptState = SaveAndDisableInterrupts ();
166 AsmReadIdtr (&DxeIdtr);
167 //
168 // Load SMM temporary IDT table
169 //
170 AsmWriteIdtr (&gcSmiIdtr);
171 //
172 // Setup SMM default exception handlers, SMM IDT table
173 // will be updated and saved in gcSmiIdtr
174 //
175 Status = InitializeCpuExceptionHandlers (NULL);
176 ASSERT_EFI_ERROR (Status);
177 //
178 // Restore DXE IDT table and CPU interrupt
179 //
180 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
181 SetInterruptState (InterruptState);
182 }
183
184 /**
185 Search module name by input IP address and output it.
186
187 @param CallerIpAddress Caller instruction pointer.
188
189 **/
190 VOID
191 DumpModuleInfoByIp (
192 IN UINTN CallerIpAddress
193 )
194 {
195 UINTN Pe32Data;
196 VOID *PdbPointer;
197
198 //
199 // Find Image Base
200 //
201 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
202 if (Pe32Data != 0) {
203 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
204 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
205 if (PdbPointer != NULL) {
206 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
207 }
208 }
209 }
210
211 /**
212 Read information from the CPU save state.
213
214 @param This EFI_SMM_CPU_PROTOCOL instance
215 @param Width The number of bytes to read from the CPU save state.
216 @param Register Specifies the CPU register to read form the save state.
217 @param CpuIndex Specifies the zero-based index of the CPU save state.
218 @param Buffer Upon return, this holds the CPU register value read from the save state.
219
220 @retval EFI_SUCCESS The register was read from Save State
221 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
222 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
223
224 **/
225 EFI_STATUS
226 EFIAPI
227 SmmReadSaveState (
228 IN CONST EFI_SMM_CPU_PROTOCOL *This,
229 IN UINTN Width,
230 IN EFI_SMM_SAVE_STATE_REGISTER Register,
231 IN UINTN CpuIndex,
232 OUT VOID *Buffer
233 )
234 {
235 EFI_STATUS Status;
236
237 //
238 // Retrieve pointer to the specified CPU's SMM Save State buffer
239 //
240 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
241 return EFI_INVALID_PARAMETER;
242 }
243 //
244 // The SpeculationBarrier() call here is to ensure the above check for the
245 // CpuIndex has been completed before the execution of subsequent codes.
246 //
247 SpeculationBarrier ();
248
249 //
250 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
251 //
252 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
253 //
254 // The pseudo-register only supports the 64-bit size specified by Width.
255 //
256 if (Width != sizeof (UINT64)) {
257 return EFI_INVALID_PARAMETER;
258 }
259 //
260 // If the processor is in SMM at the time the SMI occurred,
261 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
262 // Otherwise, EFI_NOT_FOUND is returned.
263 //
264 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
265 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
266 return EFI_SUCCESS;
267 } else {
268 return EFI_NOT_FOUND;
269 }
270 }
271
272 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
273 return EFI_INVALID_PARAMETER;
274 }
275
276 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
277 if (Status == EFI_UNSUPPORTED) {
278 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
279 }
280 return Status;
281 }
282
283 /**
284 Write data to the CPU save state.
285
286 @param This EFI_SMM_CPU_PROTOCOL instance
287 @param Width The number of bytes to read from the CPU save state.
288 @param Register Specifies the CPU register to write to the save state.
289 @param CpuIndex Specifies the zero-based index of the CPU save state
290 @param Buffer Upon entry, this holds the new CPU register value.
291
292 @retval EFI_SUCCESS The register was written from Save State
293 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
294 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
295
296 **/
297 EFI_STATUS
298 EFIAPI
299 SmmWriteSaveState (
300 IN CONST EFI_SMM_CPU_PROTOCOL *This,
301 IN UINTN Width,
302 IN EFI_SMM_SAVE_STATE_REGISTER Register,
303 IN UINTN CpuIndex,
304 IN CONST VOID *Buffer
305 )
306 {
307 EFI_STATUS Status;
308
309 //
310 // Retrieve pointer to the specified CPU's SMM Save State buffer
311 //
312 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
313 return EFI_INVALID_PARAMETER;
314 }
315
316 //
317 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
318 //
319 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
320 return EFI_SUCCESS;
321 }
322
323 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
324 return EFI_INVALID_PARAMETER;
325 }
326
327 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
328 if (Status == EFI_UNSUPPORTED) {
329 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
330 }
331 return Status;
332 }
333
334
335 /**
336 C function for SMI handler. To change all processor's SMMBase Register.
337
338 **/
339 VOID
340 EFIAPI
341 SmmInitHandler (
342 VOID
343 )
344 {
345 UINT32 ApicId;
346 UINTN Index;
347
348 //
349 // Update SMM IDT entries' code segment and load IDT
350 //
351 AsmWriteIdtr (&gcSmiIdtr);
352 ApicId = GetApicId ();
353
354 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
355
356 for (Index = 0; Index < mNumberOfCpus; Index++) {
357 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
358 //
359 // Initialize SMM specific features on the currently executing CPU
360 //
361 SmmCpuFeaturesInitializeProcessor (
362 Index,
363 mIsBsp,
364 gSmmCpuPrivate->ProcessorInfo,
365 &mCpuHotPlugData
366 );
367
368 if (!mSmmS3Flag) {
369 //
370 // Check XD and BTS features on each processor on normal boot
371 //
372 CheckFeatureSupported ();
373 }
374
375 if (mIsBsp) {
376 //
377 // BSP rebase is already done above.
378 // Initialize private data during S3 resume
379 //
380 InitializeMpSyncData ();
381 }
382
383 //
384 // Hook return after RSM to set SMM re-based flag
385 //
386 SemaphoreHook (Index, &mRebased[Index]);
387
388 return;
389 }
390 }
391 ASSERT (FALSE);
392 }
393
394 /**
395 Relocate SmmBases for each processor.
396
397 Execute on first boot and all S3 resumes
398
399 **/
400 VOID
401 EFIAPI
402 SmmRelocateBases (
403 VOID
404 )
405 {
406 UINT8 BakBuf[BACK_BUF_SIZE];
407 SMRAM_SAVE_STATE_MAP BakBuf2;
408 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
409 UINT8 *U8Ptr;
410 UINT32 ApicId;
411 UINTN Index;
412 UINTN BspIndex;
413
414 //
415 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
416 //
417 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
418
419 //
420 // Patch ASM code template with current CR0, CR3, and CR4 values
421 //
422 mSmmCr0 = (UINT32)AsmReadCr0 ();
423 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
424 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
425 mSmmCr4 = (UINT32)AsmReadCr4 ();
426 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
427
428 //
429 // Patch GDTR for SMM base relocation
430 //
431 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
432 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
433
434 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
435 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
436
437 //
438 // Backup original contents at address 0x38000
439 //
440 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
441 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
442
443 //
444 // Load image for relocation
445 //
446 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
447
448 //
449 // Retrieve the local APIC ID of current processor
450 //
451 ApicId = GetApicId ();
452
453 //
454 // Relocate SM bases for all APs
455 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
456 //
457 mIsBsp = FALSE;
458 BspIndex = (UINTN)-1;
459 for (Index = 0; Index < mNumberOfCpus; Index++) {
460 mRebased[Index] = FALSE;
461 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
462 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
463 //
464 // Wait for this AP to finish its 1st SMI
465 //
466 while (!mRebased[Index]);
467 } else {
468 //
469 // BSP will be Relocated later
470 //
471 BspIndex = Index;
472 }
473 }
474
475 //
476 // Relocate BSP's SMM base
477 //
478 ASSERT (BspIndex != (UINTN)-1);
479 mIsBsp = TRUE;
480 SendSmiIpi (ApicId);
481 //
482 // Wait for the BSP to finish its 1st SMI
483 //
484 while (!mRebased[BspIndex]);
485
486 //
487 // Restore contents at address 0x38000
488 //
489 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
490 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
491 }
492
493 /**
494 SMM Ready To Lock event notification handler.
495
496 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
497 perform additional lock actions that must be performed from SMM on the next SMI.
498
499 @param[in] Protocol Points to the protocol's unique identifier.
500 @param[in] Interface Points to the interface instance.
501 @param[in] Handle The handle on which the interface was installed.
502
503 @retval EFI_SUCCESS Notification handler runs successfully.
504 **/
505 EFI_STATUS
506 EFIAPI
507 SmmReadyToLockEventNotify (
508 IN CONST EFI_GUID *Protocol,
509 IN VOID *Interface,
510 IN EFI_HANDLE Handle
511 )
512 {
513 GetAcpiCpuData ();
514
515 //
516 // Cache a copy of UEFI memory map before we start profiling feature.
517 //
518 GetUefiMemoryMap ();
519
520 //
521 // Set SMM ready to lock flag and return
522 //
523 mSmmReadyToLock = TRUE;
524 return EFI_SUCCESS;
525 }
526
527 /**
528 The module Entry Point of the CPU SMM driver.
529
530 @param ImageHandle The firmware allocated handle for the EFI image.
531 @param SystemTable A pointer to the EFI System Table.
532
533 @retval EFI_SUCCESS The entry point is executed successfully.
534 @retval Other Some error occurs when executing this entry point.
535
536 **/
537 EFI_STATUS
538 EFIAPI
539 PiCpuSmmEntry (
540 IN EFI_HANDLE ImageHandle,
541 IN EFI_SYSTEM_TABLE *SystemTable
542 )
543 {
544 EFI_STATUS Status;
545 EFI_MP_SERVICES_PROTOCOL *MpServices;
546 UINTN NumberOfEnabledProcessors;
547 UINTN Index;
548 VOID *Buffer;
549 UINTN BufferPages;
550 UINTN TileCodeSize;
551 UINTN TileDataSize;
552 UINTN TileSize;
553 UINT8 *Stacks;
554 VOID *Registration;
555 UINT32 RegEax;
556 UINT32 RegEbx;
557 UINT32 RegEcx;
558 UINT32 RegEdx;
559 UINTN FamilyId;
560 UINTN ModelId;
561 UINT32 Cr3;
562
563 //
564 // Initialize address fixup
565 //
566 PiSmmCpuSmmInitFixupAddress ();
567 PiSmmCpuSmiEntryFixupAddress ();
568
569 //
570 // Initialize Debug Agent to support source level debug in SMM code
571 //
572 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
573
574 //
575 // Report the start of CPU SMM initialization.
576 //
577 REPORT_STATUS_CODE (
578 EFI_PROGRESS_CODE,
579 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
580 );
581
582 //
583 // Find out SMRR Base and SMRR Size
584 //
585 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
586
587 //
588 // Get MP Services Protocol
589 //
590 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
591 ASSERT_EFI_ERROR (Status);
592
593 //
594 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
595 //
596 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
597 ASSERT_EFI_ERROR (Status);
598 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
599
600 //
601 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
602 // A constant BSP index makes no sense because it may be hot removed.
603 //
604 DEBUG_CODE (
605 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
606
607 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
608 }
609 );
610
611 //
612 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
613 //
614 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
615 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
616
617 //
618 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
619 // Make sure AddressEncMask is contained to smallest supported address field.
620 //
621 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
622 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
623
624 //
625 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
626 //
627 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
628 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
629 } else {
630 mMaxNumberOfCpus = mNumberOfCpus;
631 }
632 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
633
634 //
635 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
636 // allocated buffer. The minimum size of this buffer for a uniprocessor system
637 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
638 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
639 // then the SMI entry point and the CPU save state areas can be tiles to minimize
640 // the total amount SMRAM required for all the CPUs. The tile size can be computed
641 // by adding the // CPU save state size, any extra CPU specific context, and
642 // the size of code that must be placed at the SMI entry point to transfer
643 // control to a C function in the native SMM execution mode. This size is
644 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
645 // The total amount of memory required is the maximum number of CPUs that
646 // platform supports times the tile size. The picture below shows the tiling,
647 // where m is the number of tiles that fit in 32KB.
648 //
649 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
650 // | CPU m+1 Save State |
651 // +-----------------------------+
652 // | CPU m+1 Extra Data |
653 // +-----------------------------+
654 // | Padding |
655 // +-----------------------------+
656 // | CPU 2m SMI Entry |
657 // +#############################+ <-- Base of allocated buffer + 64 KB
658 // | CPU m-1 Save State |
659 // +-----------------------------+
660 // | CPU m-1 Extra Data |
661 // +-----------------------------+
662 // | Padding |
663 // +-----------------------------+
664 // | CPU 2m-1 SMI Entry |
665 // +=============================+ <-- 2^n offset from Base of allocated buffer
666 // | . . . . . . . . . . . . |
667 // +=============================+ <-- 2^n offset from Base of allocated buffer
668 // | CPU 2 Save State |
669 // +-----------------------------+
670 // | CPU 2 Extra Data |
671 // +-----------------------------+
672 // | Padding |
673 // +-----------------------------+
674 // | CPU m+1 SMI Entry |
675 // +=============================+ <-- Base of allocated buffer + 32 KB
676 // | CPU 1 Save State |
677 // +-----------------------------+
678 // | CPU 1 Extra Data |
679 // +-----------------------------+
680 // | Padding |
681 // +-----------------------------+
682 // | CPU m SMI Entry |
683 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
684 // | CPU 0 Save State |
685 // +-----------------------------+
686 // | CPU 0 Extra Data |
687 // +-----------------------------+
688 // | Padding |
689 // +-----------------------------+
690 // | CPU m-1 SMI Entry |
691 // +=============================+ <-- 2^n offset from Base of allocated buffer
692 // | . . . . . . . . . . . . |
693 // +=============================+ <-- 2^n offset from Base of allocated buffer
694 // | Padding |
695 // +-----------------------------+
696 // | CPU 1 SMI Entry |
697 // +=============================+ <-- 2^n offset from Base of allocated buffer
698 // | Padding |
699 // +-----------------------------+
700 // | CPU 0 SMI Entry |
701 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
702 //
703
704 //
705 // Retrieve CPU Family
706 //
707 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
708 FamilyId = (RegEax >> 8) & 0xf;
709 ModelId = (RegEax >> 4) & 0xf;
710 if (FamilyId == 0x06 || FamilyId == 0x0f) {
711 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
712 }
713
714 RegEdx = 0;
715 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
716 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
717 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
718 }
719 //
720 // Determine the mode of the CPU at the time an SMI occurs
721 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
722 // Volume 3C, Section 34.4.1.1
723 //
724 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
725 if ((RegEdx & BIT29) != 0) {
726 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
727 }
728 if (FamilyId == 0x06) {
729 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
730 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
731 }
732 }
733
734 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
735 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
736 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
737 if (RegEax > CPUID_EXTENDED_FUNCTION) {
738 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
739 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
740 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
741 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
742 if ((RegEcx & CPUID_CET_SS) == 0) {
743 mCetSupported = FALSE;
744 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
745 }
746 if (mCetSupported) {
747 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
748 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
749 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
750 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
751 AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
752 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
753 }
754 }
755 } else {
756 mCetSupported = FALSE;
757 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
758 }
759
760 //
761 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
762 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
763 // This size is rounded up to nearest power of 2.
764 //
765 TileCodeSize = GetSmiHandlerSize ();
766 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
767 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
768 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
769 TileSize = TileDataSize + TileCodeSize - 1;
770 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
771 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
772
773 //
774 // If the TileSize is larger than space available for the SMI Handler of
775 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
776 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
777 // the SMI Handler size must be reduced or the size of the extra CPU specific
778 // context must be reduced.
779 //
780 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
781
782 //
783 // Allocate buffer for all of the tiles.
784 //
785 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
786 // Volume 3C, Section 34.11 SMBASE Relocation
787 // For Pentium and Intel486 processors, the SMBASE values must be
788 // aligned on a 32-KByte boundary or the processor will enter shutdown
789 // state during the execution of a RSM instruction.
790 //
791 // Intel486 processors: FamilyId is 4
792 // Pentium processors : FamilyId is 5
793 //
794 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
795 if ((FamilyId == 4) || (FamilyId == 5)) {
796 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
797 } else {
798 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
799 }
800 ASSERT (Buffer != NULL);
801 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
802
803 //
804 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
805 //
806 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
807 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
808
809 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
810 ASSERT (gSmmCpuPrivate->Operation != NULL);
811
812 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
813 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
814
815 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
816 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
817
818 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
819 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
820
821 //
822 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
823 //
824 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
825 ASSERT (mCpuHotPlugData.ApicId != NULL);
826 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
827 ASSERT (mCpuHotPlugData.SmBase != NULL);
828 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
829
830 //
831 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
832 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
833 // size for each CPU in the platform
834 //
835 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
836 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
837 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
838 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
839 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
840
841 if (Index < mNumberOfCpus) {
842 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
843 ASSERT_EFI_ERROR (Status);
844 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
845
846 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
847 Index,
848 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
849 mCpuHotPlugData.SmBase[Index],
850 gSmmCpuPrivate->CpuSaveState[Index],
851 gSmmCpuPrivate->CpuSaveStateSize[Index]
852 ));
853 } else {
854 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
855 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
856 }
857 }
858
859 //
860 // Allocate SMI stacks for all processors.
861 //
862 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
863 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
864 //
865 // 2 more pages is allocated for each processor.
866 // one is guard page and the other is known good stack.
867 //
868 // +-------------------------------------------+-----+-------------------------------------------+
869 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
870 // +-------------------------------------------+-----+-------------------------------------------+
871 // | | | |
872 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
873 //
874 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
875 }
876
877 mSmmShadowStackSize = 0;
878 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
879 //
880 // Append Shadow Stack after normal stack
881 //
882 // |= Stacks
883 // +--------------------------------------------------+---------------------------------------------------------------+
884 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
885 // +--------------------------------------------------+---------------------------------------------------------------+
886 // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|
887 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
888 // | |
889 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
890 //
891 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
892 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
893 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
894 }
895 }
896
897 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
898 ASSERT (Stacks != NULL);
899 mSmmStackArrayBase = (UINTN)Stacks;
900 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
901
902 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
903 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
904 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
905 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
906 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
907 }
908
909 //
910 // Set SMI stack for SMM base relocation
911 //
912 PatchInstructionX86 (
913 gPatchSmmInitStack,
914 (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),
915 sizeof (UINTN)
916 );
917
918 //
919 // Initialize IDT
920 //
921 InitializeSmmIdt ();
922
923 //
924 // Relocate SMM Base addresses to the ones allocated from SMRAM
925 //
926 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
927 ASSERT (mRebased != NULL);
928 SmmRelocateBases ();
929
930 //
931 // Call hook for BSP to perform extra actions in normal mode after all
932 // SMM base addresses have been relocated on all CPUs
933 //
934 SmmCpuFeaturesSmmRelocationComplete ();
935
936 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
937
938 //
939 // SMM Time initialization
940 //
941 InitializeSmmTimer ();
942
943 //
944 // Initialize MP globals
945 //
946 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
947
948 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
949 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
950 SetShadowStack (
951 Cr3,
952 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
953 mSmmShadowStackSize
954 );
955 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
956 SetNotPresentPage (
957 Cr3,
958 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
959 EFI_PAGES_TO_SIZE(1)
960 );
961 }
962 }
963 }
964
965 //
966 // Fill in SMM Reserved Regions
967 //
968 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
969 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
970
971 //
972 // Install the SMM Configuration Protocol onto a new handle on the handle database.
973 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
974 // to an SMRAM address will be present in the handle database
975 //
976 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
977 &gSmmCpuPrivate->SmmCpuHandle,
978 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
979 NULL
980 );
981 ASSERT_EFI_ERROR (Status);
982
983 //
984 // Install the SMM CPU Protocol into SMM protocol database
985 //
986 Status = gSmst->SmmInstallProtocolInterface (
987 &mSmmCpuHandle,
988 &gEfiSmmCpuProtocolGuid,
989 EFI_NATIVE_INTERFACE,
990 &mSmmCpu
991 );
992 ASSERT_EFI_ERROR (Status);
993
994 //
995 // Install the SMM Memory Attribute Protocol into SMM protocol database
996 //
997 Status = gSmst->SmmInstallProtocolInterface (
998 &mSmmCpuHandle,
999 &gEdkiiSmmMemoryAttributeProtocolGuid,
1000 EFI_NATIVE_INTERFACE,
1001 &mSmmMemoryAttribute
1002 );
1003 ASSERT_EFI_ERROR (Status);
1004
1005 //
1006 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1007 //
1008 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1009 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1010 ASSERT_EFI_ERROR (Status);
1011 }
1012
1013 //
1014 // Initialize SMM CPU Services Support
1015 //
1016 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1017 ASSERT_EFI_ERROR (Status);
1018
1019 //
1020 // register SMM Ready To Lock Protocol notification
1021 //
1022 Status = gSmst->SmmRegisterProtocolNotify (
1023 &gEfiSmmReadyToLockProtocolGuid,
1024 SmmReadyToLockEventNotify,
1025 &Registration
1026 );
1027 ASSERT_EFI_ERROR (Status);
1028
1029 //
1030 // Initialize SMM Profile feature
1031 //
1032 InitSmmProfile (Cr3);
1033
1034 GetAcpiS3EnableFlag ();
1035 InitSmmS3ResumeState (Cr3);
1036
1037 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1038
1039 return EFI_SUCCESS;
1040 }
1041
1042 /**
1043
1044 Find out SMRAM information including SMRR base and SMRR size.
1045
1046 @param SmrrBase SMRR base
1047 @param SmrrSize SMRR size
1048
1049 **/
1050 VOID
1051 FindSmramInfo (
1052 OUT UINT32 *SmrrBase,
1053 OUT UINT32 *SmrrSize
1054 )
1055 {
1056 EFI_STATUS Status;
1057 UINTN Size;
1058 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1059 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1060 UINTN Index;
1061 UINT64 MaxSize;
1062 BOOLEAN Found;
1063
1064 //
1065 // Get SMM Access Protocol
1066 //
1067 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1068 ASSERT_EFI_ERROR (Status);
1069
1070 //
1071 // Get SMRAM information
1072 //
1073 Size = 0;
1074 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1075 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1076
1077 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1078 ASSERT (mSmmCpuSmramRanges != NULL);
1079
1080 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1081 ASSERT_EFI_ERROR (Status);
1082
1083 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1084
1085 //
1086 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1087 //
1088 CurrentSmramRange = NULL;
1089 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1090 //
1091 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1092 //
1093 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1094 continue;
1095 }
1096
1097 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1098 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1099 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1100 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1101 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1102 }
1103 }
1104 }
1105 }
1106
1107 ASSERT (CurrentSmramRange != NULL);
1108
1109 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1110 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1111
1112 do {
1113 Found = FALSE;
1114 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1115 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
1116 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
1117 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1118 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1119 Found = TRUE;
1120 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
1121 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1122 Found = TRUE;
1123 }
1124 }
1125 } while (Found);
1126
1127 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1128 }
1129
1130 /**
1131 Configure SMM Code Access Check feature on an AP.
1132 SMM Feature Control MSR will be locked after configuration.
1133
1134 @param[in,out] Buffer Pointer to private data buffer.
1135 **/
1136 VOID
1137 EFIAPI
1138 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1139 IN OUT VOID *Buffer
1140 )
1141 {
1142 UINTN CpuIndex;
1143 UINT64 SmmFeatureControlMsr;
1144 UINT64 NewSmmFeatureControlMsr;
1145
1146 //
1147 // Retrieve the CPU Index from the context passed in
1148 //
1149 CpuIndex = *(UINTN *)Buffer;
1150
1151 //
1152 // Get the current SMM Feature Control MSR value
1153 //
1154 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1155
1156 //
1157 // Compute the new SMM Feature Control MSR value
1158 //
1159 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1160 if (mSmmCodeAccessCheckEnable) {
1161 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1162 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1163 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1164 }
1165 }
1166
1167 //
1168 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1169 //
1170 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1171 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1172 }
1173
1174 //
1175 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1176 //
1177 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1178 }
1179
1180 /**
1181 Configure SMM Code Access Check feature for all processors.
1182 SMM Feature Control MSR will be locked after configuration.
1183 **/
1184 VOID
1185 ConfigSmmCodeAccessCheck (
1186 VOID
1187 )
1188 {
1189 UINTN Index;
1190 EFI_STATUS Status;
1191
1192 //
1193 // Check to see if the Feature Control MSR is supported on this CPU
1194 //
1195 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1196 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1197 mSmmCodeAccessCheckEnable = FALSE;
1198 return;
1199 }
1200
1201 //
1202 // Check to see if the CPU supports the SMM Code Access Check feature
1203 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1204 //
1205 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1206 mSmmCodeAccessCheckEnable = FALSE;
1207 return;
1208 }
1209
1210 //
1211 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1212 //
1213 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1214
1215 //
1216 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1217 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1218 //
1219 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1220
1221 //
1222 // Enable SMM Code Access Check feature on the BSP.
1223 //
1224 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1225
1226 //
1227 // Enable SMM Code Access Check feature for the APs.
1228 //
1229 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1230 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1231 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1232 //
1233 // If this processor does not exist
1234 //
1235 continue;
1236 }
1237 //
1238 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1239 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1240 //
1241 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1242
1243 //
1244 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1245 //
1246 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1247 ASSERT_EFI_ERROR (Status);
1248
1249 //
1250 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1251 //
1252 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1253 CpuPause ();
1254 }
1255
1256 //
1257 // Release the Config SMM Code Access Check spin lock.
1258 //
1259 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1260 }
1261 }
1262 }
1263
1264 /**
1265 This API provides a way to allocate memory for page table.
1266
1267 This API can be called more once to allocate memory for page tables.
1268
1269 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1270 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1271 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1272 returned.
1273
1274 @param Pages The number of 4 KB pages to allocate.
1275
1276 @return A pointer to the allocated buffer or NULL if allocation fails.
1277
1278 **/
1279 VOID *
1280 AllocatePageTableMemory (
1281 IN UINTN Pages
1282 )
1283 {
1284 VOID *Buffer;
1285
1286 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1287 if (Buffer != NULL) {
1288 return Buffer;
1289 }
1290 return AllocatePages (Pages);
1291 }
1292
1293 /**
1294 Allocate pages for code.
1295
1296 @param[in] Pages Number of pages to be allocated.
1297
1298 @return Allocated memory.
1299 **/
1300 VOID *
1301 AllocateCodePages (
1302 IN UINTN Pages
1303 )
1304 {
1305 EFI_STATUS Status;
1306 EFI_PHYSICAL_ADDRESS Memory;
1307
1308 if (Pages == 0) {
1309 return NULL;
1310 }
1311
1312 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1313 if (EFI_ERROR (Status)) {
1314 return NULL;
1315 }
1316 return (VOID *) (UINTN) Memory;
1317 }
1318
1319 /**
1320 Allocate aligned pages for code.
1321
1322 @param[in] Pages Number of pages to be allocated.
1323 @param[in] Alignment The requested alignment of the allocation.
1324 Must be a power of two.
1325 If Alignment is zero, then byte alignment is used.
1326
1327 @return Allocated memory.
1328 **/
1329 VOID *
1330 AllocateAlignedCodePages (
1331 IN UINTN Pages,
1332 IN UINTN Alignment
1333 )
1334 {
1335 EFI_STATUS Status;
1336 EFI_PHYSICAL_ADDRESS Memory;
1337 UINTN AlignedMemory;
1338 UINTN AlignmentMask;
1339 UINTN UnalignedPages;
1340 UINTN RealPages;
1341
1342 //
1343 // Alignment must be a power of two or zero.
1344 //
1345 ASSERT ((Alignment & (Alignment - 1)) == 0);
1346
1347 if (Pages == 0) {
1348 return NULL;
1349 }
1350 if (Alignment > EFI_PAGE_SIZE) {
1351 //
1352 // Calculate the total number of pages since alignment is larger than page size.
1353 //
1354 AlignmentMask = Alignment - 1;
1355 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1356 //
1357 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1358 //
1359 ASSERT (RealPages > Pages);
1360
1361 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1362 if (EFI_ERROR (Status)) {
1363 return NULL;
1364 }
1365 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1366 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1367 if (UnalignedPages > 0) {
1368 //
1369 // Free first unaligned page(s).
1370 //
1371 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1372 ASSERT_EFI_ERROR (Status);
1373 }
1374 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1375 UnalignedPages = RealPages - Pages - UnalignedPages;
1376 if (UnalignedPages > 0) {
1377 //
1378 // Free last unaligned page(s).
1379 //
1380 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1381 ASSERT_EFI_ERROR (Status);
1382 }
1383 } else {
1384 //
1385 // Do not over-allocate pages in this case.
1386 //
1387 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1388 if (EFI_ERROR (Status)) {
1389 return NULL;
1390 }
1391 AlignedMemory = (UINTN) Memory;
1392 }
1393 return (VOID *) AlignedMemory;
1394 }
1395
1396 /**
1397 Perform the remaining tasks.
1398
1399 **/
1400 VOID
1401 PerformRemainingTasks (
1402 VOID
1403 )
1404 {
1405 if (mSmmReadyToLock) {
1406 //
1407 // Start SMM Profile feature
1408 //
1409 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1410 SmmProfileStart ();
1411 }
1412 //
1413 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1414 //
1415 InitPaging ();
1416
1417 //
1418 // Mark critical region to be read-only in page table
1419 //
1420 SetMemMapAttributes ();
1421
1422 //
1423 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1424 //
1425 SetUefiMemMapAttributes ();
1426
1427 //
1428 // Set page table itself to be read-only
1429 //
1430 SetPageTableAttributes ();
1431
1432 //
1433 // Configure SMM Code Access Check feature if available.
1434 //
1435 ConfigSmmCodeAccessCheck ();
1436
1437 SmmCpuFeaturesCompleteSmmReadyToLock ();
1438
1439 //
1440 // Clean SMM ready to lock flag
1441 //
1442 mSmmReadyToLock = FALSE;
1443 }
1444 }
1445
1446 /**
1447 Perform the pre tasks.
1448
1449 **/
1450 VOID
1451 PerformPreTasks (
1452 VOID
1453 )
1454 {
1455 RestoreSmmConfigurationInS3 ();
1456 }