]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add logic to support semaphore type.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // SMM CPU Private Data structure that contains SMM Configuration Protocol
21 // along its supporting fields.
22 //
23 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
25 NULL, // SmmCpuHandle
26 NULL, // Pointer to ProcessorInfo array
27 NULL, // Pointer to Operation array
28 NULL, // Pointer to CpuSaveStateSize array
29 NULL, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
31 {
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL // SmmCoreEntryContext.CpuSaveState
37 },
38 NULL, // SmmCoreEntry
39 {
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
42 },
43 };
44
45 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL, // Pointer to APIC ID array
49 NULL, // Pointer to SMBASE array
50 0, // Reserved
51 0, // SmrrBase
52 0 // SmrrSize
53 };
54
55 //
56 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
57 //
58 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
59
60 //
61 // SMM Relocation variables
62 //
63 volatile BOOLEAN *mRebased;
64 volatile BOOLEAN mIsBsp;
65
66 ///
67 /// Handle for the SMM CPU Protocol
68 ///
69 EFI_HANDLE mSmmCpuHandle = NULL;
70
71 ///
72 /// SMM CPU Protocol instance
73 ///
74 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
75 SmmReadSaveState,
76 SmmWriteSaveState
77 };
78
79 ///
80 /// SMM Memory Attribute Protocol instance
81 ///
82 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
83 EdkiiSmmGetMemoryAttributes,
84 EdkiiSmmSetMemoryAttributes,
85 EdkiiSmmClearMemoryAttributes
86 };
87
88 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
89
90 //
91 // SMM stack information
92 //
93 UINTN mSmmStackArrayBase;
94 UINTN mSmmStackArrayEnd;
95 UINTN mSmmStackSize;
96
97 UINTN mMaxNumberOfCpus = 1;
98 UINTN mNumberOfCpus = 1;
99
100 //
101 // SMM ready to lock flag
102 //
103 BOOLEAN mSmmReadyToLock = FALSE;
104
105 //
106 // Global used to cache PCD for SMM Code Access Check enable
107 //
108 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
109
110 //
111 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
112 //
113 UINT64 mAddressEncMask = 0;
114
115 //
116 // Spin lock used to serialize setting of SMM Code Access Check feature
117 //
118 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
119
120 //
121 // Saved SMM ranges information
122 //
123 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
124 UINTN mSmmCpuSmramRangeCount;
125
126 UINT8 mPhysicalAddressBits;
127
128 //
129 // Control register contents saved for SMM S3 resume state initialization.
130 //
131 UINT32 mSmmCr0;
132 UINT32 mSmmCr4;
133
134 /**
135 Initialize IDT to setup exception handlers for SMM.
136
137 **/
138 VOID
139 InitializeSmmIdt (
140 VOID
141 )
142 {
143 EFI_STATUS Status;
144 BOOLEAN InterruptState;
145 IA32_DESCRIPTOR DxeIdtr;
146
147 //
148 // There are 32 (not 255) entries in it since only processor
149 // generated exceptions will be handled.
150 //
151 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
152 //
153 // Allocate page aligned IDT, because it might be set as read only.
154 //
155 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
156 ASSERT (gcSmiIdtr.Base != 0);
157 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
158
159 //
160 // Disable Interrupt and save DXE IDT table
161 //
162 InterruptState = SaveAndDisableInterrupts ();
163 AsmReadIdtr (&DxeIdtr);
164 //
165 // Load SMM temporary IDT table
166 //
167 AsmWriteIdtr (&gcSmiIdtr);
168 //
169 // Setup SMM default exception handlers, SMM IDT table
170 // will be updated and saved in gcSmiIdtr
171 //
172 Status = InitializeCpuExceptionHandlers (NULL);
173 ASSERT_EFI_ERROR (Status);
174 //
175 // Restore DXE IDT table and CPU interrupt
176 //
177 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
178 SetInterruptState (InterruptState);
179 }
180
181 /**
182 Search module name by input IP address and output it.
183
184 @param CallerIpAddress Caller instruction pointer.
185
186 **/
187 VOID
188 DumpModuleInfoByIp (
189 IN UINTN CallerIpAddress
190 )
191 {
192 UINTN Pe32Data;
193 VOID *PdbPointer;
194
195 //
196 // Find Image Base
197 //
198 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
199 if (Pe32Data != 0) {
200 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
201 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
202 if (PdbPointer != NULL) {
203 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
204 }
205 }
206 }
207
208 /**
209 Read information from the CPU save state.
210
211 @param This EFI_SMM_CPU_PROTOCOL instance
212 @param Width The number of bytes to read from the CPU save state.
213 @param Register Specifies the CPU register to read form the save state.
214 @param CpuIndex Specifies the zero-based index of the CPU save state.
215 @param Buffer Upon return, this holds the CPU register value read from the save state.
216
217 @retval EFI_SUCCESS The register was read from Save State
218 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
219 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
220
221 **/
222 EFI_STATUS
223 EFIAPI
224 SmmReadSaveState (
225 IN CONST EFI_SMM_CPU_PROTOCOL *This,
226 IN UINTN Width,
227 IN EFI_SMM_SAVE_STATE_REGISTER Register,
228 IN UINTN CpuIndex,
229 OUT VOID *Buffer
230 )
231 {
232 EFI_STATUS Status;
233
234 //
235 // Retrieve pointer to the specified CPU's SMM Save State buffer
236 //
237 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
238 return EFI_INVALID_PARAMETER;
239 }
240 //
241 // The AsmLfence() call here is to ensure the above check for the CpuIndex
242 // has been completed before the execution of subsequent codes.
243 //
244 AsmLfence ();
245
246 //
247 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
248 //
249 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
250 //
251 // The pseudo-register only supports the 64-bit size specified by Width.
252 //
253 if (Width != sizeof (UINT64)) {
254 return EFI_INVALID_PARAMETER;
255 }
256 //
257 // If the processor is in SMM at the time the SMI occurred,
258 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
259 // Otherwise, EFI_NOT_FOUND is returned.
260 //
261 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
262 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
263 return EFI_SUCCESS;
264 } else {
265 return EFI_NOT_FOUND;
266 }
267 }
268
269 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
270 return EFI_INVALID_PARAMETER;
271 }
272
273 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
274 if (Status == EFI_UNSUPPORTED) {
275 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
276 }
277 return Status;
278 }
279
280 /**
281 Write data to the CPU save state.
282
283 @param This EFI_SMM_CPU_PROTOCOL instance
284 @param Width The number of bytes to read from the CPU save state.
285 @param Register Specifies the CPU register to write to the save state.
286 @param CpuIndex Specifies the zero-based index of the CPU save state
287 @param Buffer Upon entry, this holds the new CPU register value.
288
289 @retval EFI_SUCCESS The register was written from Save State
290 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
291 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
292
293 **/
294 EFI_STATUS
295 EFIAPI
296 SmmWriteSaveState (
297 IN CONST EFI_SMM_CPU_PROTOCOL *This,
298 IN UINTN Width,
299 IN EFI_SMM_SAVE_STATE_REGISTER Register,
300 IN UINTN CpuIndex,
301 IN CONST VOID *Buffer
302 )
303 {
304 EFI_STATUS Status;
305
306 //
307 // Retrieve pointer to the specified CPU's SMM Save State buffer
308 //
309 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
310 return EFI_INVALID_PARAMETER;
311 }
312
313 //
314 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
315 //
316 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
317 return EFI_SUCCESS;
318 }
319
320 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
321 return EFI_INVALID_PARAMETER;
322 }
323
324 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
325 if (Status == EFI_UNSUPPORTED) {
326 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
327 }
328 return Status;
329 }
330
331
332 /**
333 C function for SMI handler. To change all processor's SMMBase Register.
334
335 **/
336 VOID
337 EFIAPI
338 SmmInitHandler (
339 VOID
340 )
341 {
342 UINT32 ApicId;
343 UINTN Index;
344
345 //
346 // Update SMM IDT entries' code segment and load IDT
347 //
348 AsmWriteIdtr (&gcSmiIdtr);
349 ApicId = GetApicId ();
350
351 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
352
353 for (Index = 0; Index < mNumberOfCpus; Index++) {
354 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
355 //
356 // Initialize SMM specific features on the currently executing CPU
357 //
358 SmmCpuFeaturesInitializeProcessor (
359 Index,
360 mIsBsp,
361 gSmmCpuPrivate->ProcessorInfo,
362 &mCpuHotPlugData
363 );
364
365 if (!mSmmS3Flag) {
366 //
367 // Check XD and BTS features on each processor on normal boot
368 //
369 CheckFeatureSupported ();
370 }
371
372 if (mIsBsp) {
373 //
374 // BSP rebase is already done above.
375 // Initialize private data during S3 resume
376 //
377 InitializeMpSyncData ();
378 }
379
380 //
381 // Hook return after RSM to set SMM re-based flag
382 //
383 SemaphoreHook (Index, &mRebased[Index]);
384
385 return;
386 }
387 }
388 ASSERT (FALSE);
389 }
390
391 /**
392 Relocate SmmBases for each processor.
393
394 Execute on first boot and all S3 resumes
395
396 **/
397 VOID
398 EFIAPI
399 SmmRelocateBases (
400 VOID
401 )
402 {
403 UINT8 BakBuf[BACK_BUF_SIZE];
404 SMRAM_SAVE_STATE_MAP BakBuf2;
405 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
406 UINT8 *U8Ptr;
407 UINT32 ApicId;
408 UINTN Index;
409 UINTN BspIndex;
410
411 //
412 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
413 //
414 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
415
416 //
417 // Patch ASM code template with current CR0, CR3, and CR4 values
418 //
419 mSmmCr0 = (UINT32)AsmReadCr0 ();
420 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
421 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
422 mSmmCr4 = (UINT32)AsmReadCr4 ();
423 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4, 4);
424
425 //
426 // Patch GDTR for SMM base relocation
427 //
428 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
429 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
430
431 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
432 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
433
434 //
435 // Backup original contents at address 0x38000
436 //
437 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
438 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
439
440 //
441 // Load image for relocation
442 //
443 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
444
445 //
446 // Retrieve the local APIC ID of current processor
447 //
448 ApicId = GetApicId ();
449
450 //
451 // Relocate SM bases for all APs
452 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
453 //
454 mIsBsp = FALSE;
455 BspIndex = (UINTN)-1;
456 for (Index = 0; Index < mNumberOfCpus; Index++) {
457 mRebased[Index] = FALSE;
458 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
459 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
460 //
461 // Wait for this AP to finish its 1st SMI
462 //
463 while (!mRebased[Index]);
464 } else {
465 //
466 // BSP will be Relocated later
467 //
468 BspIndex = Index;
469 }
470 }
471
472 //
473 // Relocate BSP's SMM base
474 //
475 ASSERT (BspIndex != (UINTN)-1);
476 mIsBsp = TRUE;
477 SendSmiIpi (ApicId);
478 //
479 // Wait for the BSP to finish its 1st SMI
480 //
481 while (!mRebased[BspIndex]);
482
483 //
484 // Restore contents at address 0x38000
485 //
486 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
487 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
488 }
489
490 /**
491 SMM Ready To Lock event notification handler.
492
493 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
494 perform additional lock actions that must be performed from SMM on the next SMI.
495
496 @param[in] Protocol Points to the protocol's unique identifier.
497 @param[in] Interface Points to the interface instance.
498 @param[in] Handle The handle on which the interface was installed.
499
500 @retval EFI_SUCCESS Notification handler runs successfully.
501 **/
502 EFI_STATUS
503 EFIAPI
504 SmmReadyToLockEventNotify (
505 IN CONST EFI_GUID *Protocol,
506 IN VOID *Interface,
507 IN EFI_HANDLE Handle
508 )
509 {
510 GetAcpiCpuData ();
511
512 //
513 // Cache a copy of UEFI memory map before we start profiling feature.
514 //
515 GetUefiMemoryMap ();
516
517 //
518 // Set SMM ready to lock flag and return
519 //
520 mSmmReadyToLock = TRUE;
521 return EFI_SUCCESS;
522 }
523
524 /**
525 The module Entry Point of the CPU SMM driver.
526
527 @param ImageHandle The firmware allocated handle for the EFI image.
528 @param SystemTable A pointer to the EFI System Table.
529
530 @retval EFI_SUCCESS The entry point is executed successfully.
531 @retval Other Some error occurs when executing this entry point.
532
533 **/
534 EFI_STATUS
535 EFIAPI
536 PiCpuSmmEntry (
537 IN EFI_HANDLE ImageHandle,
538 IN EFI_SYSTEM_TABLE *SystemTable
539 )
540 {
541 EFI_STATUS Status;
542 EFI_MP_SERVICES_PROTOCOL *MpServices;
543 UINTN NumberOfEnabledProcessors;
544 UINTN Index;
545 VOID *Buffer;
546 UINTN BufferPages;
547 UINTN TileCodeSize;
548 UINTN TileDataSize;
549 UINTN TileSize;
550 UINT8 *Stacks;
551 VOID *Registration;
552 UINT32 RegEax;
553 UINT32 RegEdx;
554 UINTN FamilyId;
555 UINTN ModelId;
556 UINT32 Cr3;
557
558 //
559 // Initialize address fixup
560 //
561 PiSmmCpuSmmInitFixupAddress ();
562 PiSmmCpuSmiEntryFixupAddress ();
563
564 //
565 // Initialize Debug Agent to support source level debug in SMM code
566 //
567 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
568
569 //
570 // Report the start of CPU SMM initialization.
571 //
572 REPORT_STATUS_CODE (
573 EFI_PROGRESS_CODE,
574 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
575 );
576
577 //
578 // Find out SMRR Base and SMRR Size
579 //
580 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
581
582 //
583 // Get MP Services Protocol
584 //
585 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
586 ASSERT_EFI_ERROR (Status);
587
588 //
589 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
590 //
591 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
592 ASSERT_EFI_ERROR (Status);
593 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
594
595 //
596 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
597 // A constant BSP index makes no sense because it may be hot removed.
598 //
599 DEBUG_CODE (
600 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
601
602 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
603 }
604 );
605
606 //
607 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
608 //
609 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
610 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
611
612 //
613 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
614 // Make sure AddressEncMask is contained to smallest supported address field.
615 //
616 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
617 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
618
619 //
620 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
621 //
622 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
623 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
624 } else {
625 mMaxNumberOfCpus = mNumberOfCpus;
626 }
627 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
628
629 //
630 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
631 // allocated buffer. The minimum size of this buffer for a uniprocessor system
632 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
633 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
634 // then the SMI entry point and the CPU save state areas can be tiles to minimize
635 // the total amount SMRAM required for all the CPUs. The tile size can be computed
636 // by adding the // CPU save state size, any extra CPU specific context, and
637 // the size of code that must be placed at the SMI entry point to transfer
638 // control to a C function in the native SMM execution mode. This size is
639 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
640 // The total amount of memory required is the maximum number of CPUs that
641 // platform supports times the tile size. The picture below shows the tiling,
642 // where m is the number of tiles that fit in 32KB.
643 //
644 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
645 // | CPU m+1 Save State |
646 // +-----------------------------+
647 // | CPU m+1 Extra Data |
648 // +-----------------------------+
649 // | Padding |
650 // +-----------------------------+
651 // | CPU 2m SMI Entry |
652 // +#############################+ <-- Base of allocated buffer + 64 KB
653 // | CPU m-1 Save State |
654 // +-----------------------------+
655 // | CPU m-1 Extra Data |
656 // +-----------------------------+
657 // | Padding |
658 // +-----------------------------+
659 // | CPU 2m-1 SMI Entry |
660 // +=============================+ <-- 2^n offset from Base of allocated buffer
661 // | . . . . . . . . . . . . |
662 // +=============================+ <-- 2^n offset from Base of allocated buffer
663 // | CPU 2 Save State |
664 // +-----------------------------+
665 // | CPU 2 Extra Data |
666 // +-----------------------------+
667 // | Padding |
668 // +-----------------------------+
669 // | CPU m+1 SMI Entry |
670 // +=============================+ <-- Base of allocated buffer + 32 KB
671 // | CPU 1 Save State |
672 // +-----------------------------+
673 // | CPU 1 Extra Data |
674 // +-----------------------------+
675 // | Padding |
676 // +-----------------------------+
677 // | CPU m SMI Entry |
678 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
679 // | CPU 0 Save State |
680 // +-----------------------------+
681 // | CPU 0 Extra Data |
682 // +-----------------------------+
683 // | Padding |
684 // +-----------------------------+
685 // | CPU m-1 SMI Entry |
686 // +=============================+ <-- 2^n offset from Base of allocated buffer
687 // | . . . . . . . . . . . . |
688 // +=============================+ <-- 2^n offset from Base of allocated buffer
689 // | Padding |
690 // +-----------------------------+
691 // | CPU 1 SMI Entry |
692 // +=============================+ <-- 2^n offset from Base of allocated buffer
693 // | Padding |
694 // +-----------------------------+
695 // | CPU 0 SMI Entry |
696 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
697 //
698
699 //
700 // Retrieve CPU Family
701 //
702 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
703 FamilyId = (RegEax >> 8) & 0xf;
704 ModelId = (RegEax >> 4) & 0xf;
705 if (FamilyId == 0x06 || FamilyId == 0x0f) {
706 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
707 }
708
709 RegEdx = 0;
710 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
711 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
712 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
713 }
714 //
715 // Determine the mode of the CPU at the time an SMI occurs
716 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
717 // Volume 3C, Section 34.4.1.1
718 //
719 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
720 if ((RegEdx & BIT29) != 0) {
721 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
722 }
723 if (FamilyId == 0x06) {
724 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
725 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
726 }
727 }
728
729 //
730 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
731 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
732 // This size is rounded up to nearest power of 2.
733 //
734 TileCodeSize = GetSmiHandlerSize ();
735 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
736 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
737 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
738 TileSize = TileDataSize + TileCodeSize - 1;
739 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
740 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
741
742 //
743 // If the TileSize is larger than space available for the SMI Handler of
744 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
745 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
746 // the SMI Handler size must be reduced or the size of the extra CPU specific
747 // context must be reduced.
748 //
749 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
750
751 //
752 // Allocate buffer for all of the tiles.
753 //
754 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
755 // Volume 3C, Section 34.11 SMBASE Relocation
756 // For Pentium and Intel486 processors, the SMBASE values must be
757 // aligned on a 32-KByte boundary or the processor will enter shutdown
758 // state during the execution of a RSM instruction.
759 //
760 // Intel486 processors: FamilyId is 4
761 // Pentium processors : FamilyId is 5
762 //
763 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
764 if ((FamilyId == 4) || (FamilyId == 5)) {
765 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
766 } else {
767 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
768 }
769 ASSERT (Buffer != NULL);
770 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
771
772 //
773 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
774 //
775 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
776 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
777
778 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
779 ASSERT (gSmmCpuPrivate->Operation != NULL);
780
781 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
782 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
783
784 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
785 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
786
787 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
788 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
789
790 //
791 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
792 //
793 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
794 ASSERT (mCpuHotPlugData.ApicId != NULL);
795 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
796 ASSERT (mCpuHotPlugData.SmBase != NULL);
797 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
798
799 //
800 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
801 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
802 // size for each CPU in the platform
803 //
804 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
805 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
806 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
807 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
808 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
809
810 if (Index < mNumberOfCpus) {
811 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
812 ASSERT_EFI_ERROR (Status);
813 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
814
815 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
816 Index,
817 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
818 mCpuHotPlugData.SmBase[Index],
819 gSmmCpuPrivate->CpuSaveState[Index],
820 gSmmCpuPrivate->CpuSaveStateSize[Index]
821 ));
822 } else {
823 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
824 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
825 }
826 }
827
828 //
829 // Allocate SMI stacks for all processors.
830 //
831 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
832 //
833 // 2 more pages is allocated for each processor.
834 // one is guard page and the other is known good stack.
835 //
836 // +-------------------------------------------+-----+-------------------------------------------+
837 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
838 // +-------------------------------------------+-----+-------------------------------------------+
839 // | | | |
840 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
841 //
842 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
843 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
844 ASSERT (Stacks != NULL);
845 mSmmStackArrayBase = (UINTN)Stacks;
846 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
847 } else {
848 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
849 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
850 ASSERT (Stacks != NULL);
851 }
852
853 //
854 // Set SMI stack for SMM base relocation
855 //
856 PatchInstructionX86 (
857 gPatchSmmInitStack,
858 (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),
859 sizeof (UINTN)
860 );
861
862 //
863 // Initialize IDT
864 //
865 InitializeSmmIdt ();
866
867 //
868 // Relocate SMM Base addresses to the ones allocated from SMRAM
869 //
870 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
871 ASSERT (mRebased != NULL);
872 SmmRelocateBases ();
873
874 //
875 // Call hook for BSP to perform extra actions in normal mode after all
876 // SMM base addresses have been relocated on all CPUs
877 //
878 SmmCpuFeaturesSmmRelocationComplete ();
879
880 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
881
882 //
883 // SMM Time initialization
884 //
885 InitializeSmmTimer ();
886
887 //
888 // Initialize MP globals
889 //
890 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);
891
892 //
893 // Fill in SMM Reserved Regions
894 //
895 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
896 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
897
898 //
899 // Install the SMM Configuration Protocol onto a new handle on the handle database.
900 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
901 // to an SMRAM address will be present in the handle database
902 //
903 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
904 &gSmmCpuPrivate->SmmCpuHandle,
905 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
906 NULL
907 );
908 ASSERT_EFI_ERROR (Status);
909
910 //
911 // Install the SMM CPU Protocol into SMM protocol database
912 //
913 Status = gSmst->SmmInstallProtocolInterface (
914 &mSmmCpuHandle,
915 &gEfiSmmCpuProtocolGuid,
916 EFI_NATIVE_INTERFACE,
917 &mSmmCpu
918 );
919 ASSERT_EFI_ERROR (Status);
920
921 //
922 // Install the SMM Memory Attribute Protocol into SMM protocol database
923 //
924 Status = gSmst->SmmInstallProtocolInterface (
925 &mSmmCpuHandle,
926 &gEdkiiSmmMemoryAttributeProtocolGuid,
927 EFI_NATIVE_INTERFACE,
928 &mSmmMemoryAttribute
929 );
930 ASSERT_EFI_ERROR (Status);
931
932 //
933 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
934 //
935 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
936 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
937 ASSERT_EFI_ERROR (Status);
938 }
939
940 //
941 // Initialize SMM CPU Services Support
942 //
943 Status = InitializeSmmCpuServices (mSmmCpuHandle);
944 ASSERT_EFI_ERROR (Status);
945
946 //
947 // register SMM Ready To Lock Protocol notification
948 //
949 Status = gSmst->SmmRegisterProtocolNotify (
950 &gEfiSmmReadyToLockProtocolGuid,
951 SmmReadyToLockEventNotify,
952 &Registration
953 );
954 ASSERT_EFI_ERROR (Status);
955
956 //
957 // Initialize SMM Profile feature
958 //
959 InitSmmProfile (Cr3);
960
961 GetAcpiS3EnableFlag ();
962 InitSmmS3ResumeState (Cr3);
963
964 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
965
966 return EFI_SUCCESS;
967 }
968
969 /**
970
971 Find out SMRAM information including SMRR base and SMRR size.
972
973 @param SmrrBase SMRR base
974 @param SmrrSize SMRR size
975
976 **/
977 VOID
978 FindSmramInfo (
979 OUT UINT32 *SmrrBase,
980 OUT UINT32 *SmrrSize
981 )
982 {
983 EFI_STATUS Status;
984 UINTN Size;
985 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
986 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
987 UINTN Index;
988 UINT64 MaxSize;
989 BOOLEAN Found;
990
991 //
992 // Get SMM Access Protocol
993 //
994 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
995 ASSERT_EFI_ERROR (Status);
996
997 //
998 // Get SMRAM information
999 //
1000 Size = 0;
1001 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1002 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1003
1004 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1005 ASSERT (mSmmCpuSmramRanges != NULL);
1006
1007 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1008 ASSERT_EFI_ERROR (Status);
1009
1010 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1011
1012 //
1013 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1014 //
1015 CurrentSmramRange = NULL;
1016 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1017 //
1018 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1019 //
1020 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1021 continue;
1022 }
1023
1024 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1025 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1026 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1027 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1028 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1029 }
1030 }
1031 }
1032 }
1033
1034 ASSERT (CurrentSmramRange != NULL);
1035
1036 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1037 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1038
1039 do {
1040 Found = FALSE;
1041 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1042 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
1043 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
1044 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1045 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1046 Found = TRUE;
1047 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
1048 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1049 Found = TRUE;
1050 }
1051 }
1052 } while (Found);
1053
1054 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1055 }
1056
1057 /**
1058 Configure SMM Code Access Check feature on an AP.
1059 SMM Feature Control MSR will be locked after configuration.
1060
1061 @param[in,out] Buffer Pointer to private data buffer.
1062 **/
1063 VOID
1064 EFIAPI
1065 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1066 IN OUT VOID *Buffer
1067 )
1068 {
1069 UINTN CpuIndex;
1070 UINT64 SmmFeatureControlMsr;
1071 UINT64 NewSmmFeatureControlMsr;
1072
1073 //
1074 // Retrieve the CPU Index from the context passed in
1075 //
1076 CpuIndex = *(UINTN *)Buffer;
1077
1078 //
1079 // Get the current SMM Feature Control MSR value
1080 //
1081 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1082
1083 //
1084 // Compute the new SMM Feature Control MSR value
1085 //
1086 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1087 if (mSmmCodeAccessCheckEnable) {
1088 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1089 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1090 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1091 }
1092 }
1093
1094 //
1095 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1096 //
1097 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1098 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1099 }
1100
1101 //
1102 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1103 //
1104 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1105 }
1106
1107 /**
1108 Configure SMM Code Access Check feature for all processors.
1109 SMM Feature Control MSR will be locked after configuration.
1110 **/
1111 VOID
1112 ConfigSmmCodeAccessCheck (
1113 VOID
1114 )
1115 {
1116 UINTN Index;
1117 EFI_STATUS Status;
1118
1119 //
1120 // Check to see if the Feature Control MSR is supported on this CPU
1121 //
1122 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1123 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1124 mSmmCodeAccessCheckEnable = FALSE;
1125 return;
1126 }
1127
1128 //
1129 // Check to see if the CPU supports the SMM Code Access Check feature
1130 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1131 //
1132 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1133 mSmmCodeAccessCheckEnable = FALSE;
1134 return;
1135 }
1136
1137 //
1138 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1139 //
1140 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1141
1142 //
1143 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1144 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1145 //
1146 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1147
1148 //
1149 // Enable SMM Code Access Check feature on the BSP.
1150 //
1151 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1152
1153 //
1154 // Enable SMM Code Access Check feature for the APs.
1155 //
1156 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1157 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1158 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1159 //
1160 // If this processor does not exist
1161 //
1162 continue;
1163 }
1164 //
1165 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1166 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1167 //
1168 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1169
1170 //
1171 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1172 //
1173 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1174 ASSERT_EFI_ERROR (Status);
1175
1176 //
1177 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1178 //
1179 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1180 CpuPause ();
1181 }
1182
1183 //
1184 // Release the Config SMM Code Access Check spin lock.
1185 //
1186 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1187 }
1188 }
1189 }
1190
1191 /**
1192 This API provides a way to allocate memory for page table.
1193
1194 This API can be called more once to allocate memory for page tables.
1195
1196 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1197 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1198 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1199 returned.
1200
1201 @param Pages The number of 4 KB pages to allocate.
1202
1203 @return A pointer to the allocated buffer or NULL if allocation fails.
1204
1205 **/
1206 VOID *
1207 AllocatePageTableMemory (
1208 IN UINTN Pages
1209 )
1210 {
1211 VOID *Buffer;
1212
1213 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1214 if (Buffer != NULL) {
1215 return Buffer;
1216 }
1217 return AllocatePages (Pages);
1218 }
1219
1220 /**
1221 Allocate pages for code.
1222
1223 @param[in] Pages Number of pages to be allocated.
1224
1225 @return Allocated memory.
1226 **/
1227 VOID *
1228 AllocateCodePages (
1229 IN UINTN Pages
1230 )
1231 {
1232 EFI_STATUS Status;
1233 EFI_PHYSICAL_ADDRESS Memory;
1234
1235 if (Pages == 0) {
1236 return NULL;
1237 }
1238
1239 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1240 if (EFI_ERROR (Status)) {
1241 return NULL;
1242 }
1243 return (VOID *) (UINTN) Memory;
1244 }
1245
1246 /**
1247 Allocate aligned pages for code.
1248
1249 @param[in] Pages Number of pages to be allocated.
1250 @param[in] Alignment The requested alignment of the allocation.
1251 Must be a power of two.
1252 If Alignment is zero, then byte alignment is used.
1253
1254 @return Allocated memory.
1255 **/
1256 VOID *
1257 AllocateAlignedCodePages (
1258 IN UINTN Pages,
1259 IN UINTN Alignment
1260 )
1261 {
1262 EFI_STATUS Status;
1263 EFI_PHYSICAL_ADDRESS Memory;
1264 UINTN AlignedMemory;
1265 UINTN AlignmentMask;
1266 UINTN UnalignedPages;
1267 UINTN RealPages;
1268
1269 //
1270 // Alignment must be a power of two or zero.
1271 //
1272 ASSERT ((Alignment & (Alignment - 1)) == 0);
1273
1274 if (Pages == 0) {
1275 return NULL;
1276 }
1277 if (Alignment > EFI_PAGE_SIZE) {
1278 //
1279 // Calculate the total number of pages since alignment is larger than page size.
1280 //
1281 AlignmentMask = Alignment - 1;
1282 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1283 //
1284 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1285 //
1286 ASSERT (RealPages > Pages);
1287
1288 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1289 if (EFI_ERROR (Status)) {
1290 return NULL;
1291 }
1292 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1293 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1294 if (UnalignedPages > 0) {
1295 //
1296 // Free first unaligned page(s).
1297 //
1298 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1299 ASSERT_EFI_ERROR (Status);
1300 }
1301 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1302 UnalignedPages = RealPages - Pages - UnalignedPages;
1303 if (UnalignedPages > 0) {
1304 //
1305 // Free last unaligned page(s).
1306 //
1307 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1308 ASSERT_EFI_ERROR (Status);
1309 }
1310 } else {
1311 //
1312 // Do not over-allocate pages in this case.
1313 //
1314 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1315 if (EFI_ERROR (Status)) {
1316 return NULL;
1317 }
1318 AlignedMemory = (UINTN) Memory;
1319 }
1320 return (VOID *) AlignedMemory;
1321 }
1322
1323 /**
1324 Perform the remaining tasks.
1325
1326 **/
1327 VOID
1328 PerformRemainingTasks (
1329 VOID
1330 )
1331 {
1332 if (mSmmReadyToLock) {
1333 //
1334 // Start SMM Profile feature
1335 //
1336 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1337 SmmProfileStart ();
1338 }
1339 //
1340 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1341 //
1342 InitPaging ();
1343
1344 //
1345 // Mark critical region to be read-only in page table
1346 //
1347 SetMemMapAttributes ();
1348
1349 //
1350 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1351 //
1352 SetUefiMemMapAttributes ();
1353
1354 //
1355 // Set page table itself to be read-only
1356 //
1357 SetPageTableAttributes ();
1358
1359 //
1360 // Configure SMM Code Access Check feature if available.
1361 //
1362 ConfigSmmCodeAccessCheck ();
1363
1364 SmmCpuFeaturesCompleteSmmReadyToLock ();
1365
1366 //
1367 // Clean SMM ready to lock flag
1368 //
1369 mSmmReadyToLock = FALSE;
1370 }
1371 }
1372
1373 /**
1374 Perform the pre tasks.
1375
1376 **/
1377 VOID
1378 PerformPreTasks (
1379 VOID
1380 )
1381 {
1382 RestoreSmmConfigurationInS3 ();
1383 }