]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: patch "gSmmCr0" with PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // SMM CPU Private Data structure that contains SMM Configuration Protocol
21 // along its supporting fields.
22 //
23 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
25 NULL, // SmmCpuHandle
26 NULL, // Pointer to ProcessorInfo array
27 NULL, // Pointer to Operation array
28 NULL, // Pointer to CpuSaveStateSize array
29 NULL, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
31 {
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL // SmmCoreEntryContext.CpuSaveState
37 },
38 NULL, // SmmCoreEntry
39 {
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
42 },
43 };
44
45 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL, // Pointer to APIC ID array
49 NULL, // Pointer to SMBASE array
50 0, // Reserved
51 0, // SmrrBase
52 0 // SmrrSize
53 };
54
55 //
56 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
57 //
58 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
59
60 //
61 // SMM Relocation variables
62 //
63 volatile BOOLEAN *mRebased;
64 volatile BOOLEAN mIsBsp;
65
66 ///
67 /// Handle for the SMM CPU Protocol
68 ///
69 EFI_HANDLE mSmmCpuHandle = NULL;
70
71 ///
72 /// SMM CPU Protocol instance
73 ///
74 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
75 SmmReadSaveState,
76 SmmWriteSaveState
77 };
78
79 ///
80 /// SMM Memory Attribute Protocol instance
81 ///
82 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
83 EdkiiSmmGetMemoryAttributes,
84 EdkiiSmmSetMemoryAttributes,
85 EdkiiSmmClearMemoryAttributes
86 };
87
88 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
89
90 //
91 // SMM stack information
92 //
93 UINTN mSmmStackArrayBase;
94 UINTN mSmmStackArrayEnd;
95 UINTN mSmmStackSize;
96
97 UINTN mMaxNumberOfCpus = 1;
98 UINTN mNumberOfCpus = 1;
99
100 //
101 // SMM ready to lock flag
102 //
103 BOOLEAN mSmmReadyToLock = FALSE;
104
105 //
106 // Global used to cache PCD for SMM Code Access Check enable
107 //
108 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
109
110 //
111 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
112 //
113 UINT64 mAddressEncMask = 0;
114
115 //
116 // Spin lock used to serialize setting of SMM Code Access Check feature
117 //
118 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
119
120 //
121 // Saved SMM ranges information
122 //
123 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
124 UINTN mSmmCpuSmramRangeCount;
125
126 UINT8 mPhysicalAddressBits;
127
128 //
129 // Control register contents saved for SMM S3 resume state initialization.
130 //
131 UINT32 mSmmCr0;
132 UINT32 mSmmCr4;
133
134 /**
135 Initialize IDT to setup exception handlers for SMM.
136
137 **/
138 VOID
139 InitializeSmmIdt (
140 VOID
141 )
142 {
143 EFI_STATUS Status;
144 BOOLEAN InterruptState;
145 IA32_DESCRIPTOR DxeIdtr;
146
147 //
148 // There are 32 (not 255) entries in it since only processor
149 // generated exceptions will be handled.
150 //
151 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
152 //
153 // Allocate page aligned IDT, because it might be set as read only.
154 //
155 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
156 ASSERT (gcSmiIdtr.Base != 0);
157 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
158
159 //
160 // Disable Interrupt and save DXE IDT table
161 //
162 InterruptState = SaveAndDisableInterrupts ();
163 AsmReadIdtr (&DxeIdtr);
164 //
165 // Load SMM temporary IDT table
166 //
167 AsmWriteIdtr (&gcSmiIdtr);
168 //
169 // Setup SMM default exception handlers, SMM IDT table
170 // will be updated and saved in gcSmiIdtr
171 //
172 Status = InitializeCpuExceptionHandlers (NULL);
173 ASSERT_EFI_ERROR (Status);
174 //
175 // Restore DXE IDT table and CPU interrupt
176 //
177 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
178 SetInterruptState (InterruptState);
179 }
180
181 /**
182 Search module name by input IP address and output it.
183
184 @param CallerIpAddress Caller instruction pointer.
185
186 **/
187 VOID
188 DumpModuleInfoByIp (
189 IN UINTN CallerIpAddress
190 )
191 {
192 UINTN Pe32Data;
193 VOID *PdbPointer;
194
195 //
196 // Find Image Base
197 //
198 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
199 if (Pe32Data != 0) {
200 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
201 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
202 if (PdbPointer != NULL) {
203 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
204 }
205 }
206 }
207
208 /**
209 Read information from the CPU save state.
210
211 @param This EFI_SMM_CPU_PROTOCOL instance
212 @param Width The number of bytes to read from the CPU save state.
213 @param Register Specifies the CPU register to read form the save state.
214 @param CpuIndex Specifies the zero-based index of the CPU save state.
215 @param Buffer Upon return, this holds the CPU register value read from the save state.
216
217 @retval EFI_SUCCESS The register was read from Save State
218 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
219 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
220
221 **/
222 EFI_STATUS
223 EFIAPI
224 SmmReadSaveState (
225 IN CONST EFI_SMM_CPU_PROTOCOL *This,
226 IN UINTN Width,
227 IN EFI_SMM_SAVE_STATE_REGISTER Register,
228 IN UINTN CpuIndex,
229 OUT VOID *Buffer
230 )
231 {
232 EFI_STATUS Status;
233
234 //
235 // Retrieve pointer to the specified CPU's SMM Save State buffer
236 //
237 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
238 return EFI_INVALID_PARAMETER;
239 }
240
241 //
242 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
243 //
244 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
245 //
246 // The pseudo-register only supports the 64-bit size specified by Width.
247 //
248 if (Width != sizeof (UINT64)) {
249 return EFI_INVALID_PARAMETER;
250 }
251 //
252 // If the processor is in SMM at the time the SMI occurred,
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
254 // Otherwise, EFI_NOT_FOUND is returned.
255 //
256 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
257 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
258 return EFI_SUCCESS;
259 } else {
260 return EFI_NOT_FOUND;
261 }
262 }
263
264 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
265 return EFI_INVALID_PARAMETER;
266 }
267
268 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
269 if (Status == EFI_UNSUPPORTED) {
270 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
271 }
272 return Status;
273 }
274
275 /**
276 Write data to the CPU save state.
277
278 @param This EFI_SMM_CPU_PROTOCOL instance
279 @param Width The number of bytes to read from the CPU save state.
280 @param Register Specifies the CPU register to write to the save state.
281 @param CpuIndex Specifies the zero-based index of the CPU save state
282 @param Buffer Upon entry, this holds the new CPU register value.
283
284 @retval EFI_SUCCESS The register was written from Save State
285 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
286 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
287
288 **/
289 EFI_STATUS
290 EFIAPI
291 SmmWriteSaveState (
292 IN CONST EFI_SMM_CPU_PROTOCOL *This,
293 IN UINTN Width,
294 IN EFI_SMM_SAVE_STATE_REGISTER Register,
295 IN UINTN CpuIndex,
296 IN CONST VOID *Buffer
297 )
298 {
299 EFI_STATUS Status;
300
301 //
302 // Retrieve pointer to the specified CPU's SMM Save State buffer
303 //
304 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
305 return EFI_INVALID_PARAMETER;
306 }
307
308 //
309 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
310 //
311 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
312 return EFI_SUCCESS;
313 }
314
315 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
316 return EFI_INVALID_PARAMETER;
317 }
318
319 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
320 if (Status == EFI_UNSUPPORTED) {
321 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
322 }
323 return Status;
324 }
325
326
327 /**
328 C function for SMI handler. To change all processor's SMMBase Register.
329
330 **/
331 VOID
332 EFIAPI
333 SmmInitHandler (
334 VOID
335 )
336 {
337 UINT32 ApicId;
338 UINTN Index;
339
340 //
341 // Update SMM IDT entries' code segment and load IDT
342 //
343 AsmWriteIdtr (&gcSmiIdtr);
344 ApicId = GetApicId ();
345
346 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
347
348 for (Index = 0; Index < mNumberOfCpus; Index++) {
349 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
350 //
351 // Initialize SMM specific features on the currently executing CPU
352 //
353 SmmCpuFeaturesInitializeProcessor (
354 Index,
355 mIsBsp,
356 gSmmCpuPrivate->ProcessorInfo,
357 &mCpuHotPlugData
358 );
359
360 if (!mSmmS3Flag) {
361 //
362 // Check XD and BTS features on each processor on normal boot
363 //
364 CheckFeatureSupported ();
365 }
366
367 if (mIsBsp) {
368 //
369 // BSP rebase is already done above.
370 // Initialize private data during S3 resume
371 //
372 InitializeMpSyncData ();
373 }
374
375 //
376 // Hook return after RSM to set SMM re-based flag
377 //
378 SemaphoreHook (Index, &mRebased[Index]);
379
380 return;
381 }
382 }
383 ASSERT (FALSE);
384 }
385
386 /**
387 Relocate SmmBases for each processor.
388
389 Execute on first boot and all S3 resumes
390
391 **/
392 VOID
393 EFIAPI
394 SmmRelocateBases (
395 VOID
396 )
397 {
398 UINT8 BakBuf[BACK_BUF_SIZE];
399 SMRAM_SAVE_STATE_MAP BakBuf2;
400 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
401 UINT8 *U8Ptr;
402 UINT32 ApicId;
403 UINTN Index;
404 UINTN BspIndex;
405
406 //
407 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
408 //
409 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
410
411 //
412 // Patch ASM code template with current CR0, CR3, and CR4 values
413 //
414 mSmmCr0 = (UINT32)AsmReadCr0 ();
415 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
416 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
417 mSmmCr4 = (UINT32)AsmReadCr4 ();
418 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4, 4);
419
420 //
421 // Patch GDTR for SMM base relocation
422 //
423 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
424 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
425
426 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
427 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
428
429 //
430 // Backup original contents at address 0x38000
431 //
432 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
433 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
434
435 //
436 // Load image for relocation
437 //
438 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
439
440 //
441 // Retrieve the local APIC ID of current processor
442 //
443 ApicId = GetApicId ();
444
445 //
446 // Relocate SM bases for all APs
447 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
448 //
449 mIsBsp = FALSE;
450 BspIndex = (UINTN)-1;
451 for (Index = 0; Index < mNumberOfCpus; Index++) {
452 mRebased[Index] = FALSE;
453 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
454 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
455 //
456 // Wait for this AP to finish its 1st SMI
457 //
458 while (!mRebased[Index]);
459 } else {
460 //
461 // BSP will be Relocated later
462 //
463 BspIndex = Index;
464 }
465 }
466
467 //
468 // Relocate BSP's SMM base
469 //
470 ASSERT (BspIndex != (UINTN)-1);
471 mIsBsp = TRUE;
472 SendSmiIpi (ApicId);
473 //
474 // Wait for the BSP to finish its 1st SMI
475 //
476 while (!mRebased[BspIndex]);
477
478 //
479 // Restore contents at address 0x38000
480 //
481 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
482 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
483 }
484
485 /**
486 SMM Ready To Lock event notification handler.
487
488 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
489 perform additional lock actions that must be performed from SMM on the next SMI.
490
491 @param[in] Protocol Points to the protocol's unique identifier.
492 @param[in] Interface Points to the interface instance.
493 @param[in] Handle The handle on which the interface was installed.
494
495 @retval EFI_SUCCESS Notification handler runs successfully.
496 **/
497 EFI_STATUS
498 EFIAPI
499 SmmReadyToLockEventNotify (
500 IN CONST EFI_GUID *Protocol,
501 IN VOID *Interface,
502 IN EFI_HANDLE Handle
503 )
504 {
505 GetAcpiCpuData ();
506
507 //
508 // Cache a copy of UEFI memory map before we start profiling feature.
509 //
510 GetUefiMemoryMap ();
511
512 //
513 // Set SMM ready to lock flag and return
514 //
515 mSmmReadyToLock = TRUE;
516 return EFI_SUCCESS;
517 }
518
519 /**
520 The module Entry Point of the CPU SMM driver.
521
522 @param ImageHandle The firmware allocated handle for the EFI image.
523 @param SystemTable A pointer to the EFI System Table.
524
525 @retval EFI_SUCCESS The entry point is executed successfully.
526 @retval Other Some error occurs when executing this entry point.
527
528 **/
529 EFI_STATUS
530 EFIAPI
531 PiCpuSmmEntry (
532 IN EFI_HANDLE ImageHandle,
533 IN EFI_SYSTEM_TABLE *SystemTable
534 )
535 {
536 EFI_STATUS Status;
537 EFI_MP_SERVICES_PROTOCOL *MpServices;
538 UINTN NumberOfEnabledProcessors;
539 UINTN Index;
540 VOID *Buffer;
541 UINTN BufferPages;
542 UINTN TileCodeSize;
543 UINTN TileDataSize;
544 UINTN TileSize;
545 UINT8 *Stacks;
546 VOID *Registration;
547 UINT32 RegEax;
548 UINT32 RegEdx;
549 UINTN FamilyId;
550 UINTN ModelId;
551 UINT32 Cr3;
552
553 //
554 // Initialize address fixup
555 //
556 PiSmmCpuSmmInitFixupAddress ();
557 PiSmmCpuSmiEntryFixupAddress ();
558
559 //
560 // Initialize Debug Agent to support source level debug in SMM code
561 //
562 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
563
564 //
565 // Report the start of CPU SMM initialization.
566 //
567 REPORT_STATUS_CODE (
568 EFI_PROGRESS_CODE,
569 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
570 );
571
572 //
573 // Fix segment address of the long-mode-switch jump
574 //
575 if (sizeof (UINTN) == sizeof (UINT64)) {
576 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;
577 }
578
579 //
580 // Find out SMRR Base and SMRR Size
581 //
582 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
583
584 //
585 // Get MP Services Protocol
586 //
587 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
588 ASSERT_EFI_ERROR (Status);
589
590 //
591 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
592 //
593 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
594 ASSERT_EFI_ERROR (Status);
595 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
596
597 //
598 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
599 // A constant BSP index makes no sense because it may be hot removed.
600 //
601 DEBUG_CODE (
602 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
603
604 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
605 }
606 );
607
608 //
609 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
610 //
611 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
612 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
613
614 //
615 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
616 // Make sure AddressEncMask is contained to smallest supported address field.
617 //
618 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
619 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
620
621 //
622 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
623 //
624 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
625 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
626 } else {
627 mMaxNumberOfCpus = mNumberOfCpus;
628 }
629 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
630
631 //
632 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
633 // allocated buffer. The minimum size of this buffer for a uniprocessor system
634 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
635 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
636 // then the SMI entry point and the CPU save state areas can be tiles to minimize
637 // the total amount SMRAM required for all the CPUs. The tile size can be computed
638 // by adding the // CPU save state size, any extra CPU specific context, and
639 // the size of code that must be placed at the SMI entry point to transfer
640 // control to a C function in the native SMM execution mode. This size is
641 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
642 // The total amount of memory required is the maximum number of CPUs that
643 // platform supports times the tile size. The picture below shows the tiling,
644 // where m is the number of tiles that fit in 32KB.
645 //
646 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
647 // | CPU m+1 Save State |
648 // +-----------------------------+
649 // | CPU m+1 Extra Data |
650 // +-----------------------------+
651 // | Padding |
652 // +-----------------------------+
653 // | CPU 2m SMI Entry |
654 // +#############################+ <-- Base of allocated buffer + 64 KB
655 // | CPU m-1 Save State |
656 // +-----------------------------+
657 // | CPU m-1 Extra Data |
658 // +-----------------------------+
659 // | Padding |
660 // +-----------------------------+
661 // | CPU 2m-1 SMI Entry |
662 // +=============================+ <-- 2^n offset from Base of allocated buffer
663 // | . . . . . . . . . . . . |
664 // +=============================+ <-- 2^n offset from Base of allocated buffer
665 // | CPU 2 Save State |
666 // +-----------------------------+
667 // | CPU 2 Extra Data |
668 // +-----------------------------+
669 // | Padding |
670 // +-----------------------------+
671 // | CPU m+1 SMI Entry |
672 // +=============================+ <-- Base of allocated buffer + 32 KB
673 // | CPU 1 Save State |
674 // +-----------------------------+
675 // | CPU 1 Extra Data |
676 // +-----------------------------+
677 // | Padding |
678 // +-----------------------------+
679 // | CPU m SMI Entry |
680 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
681 // | CPU 0 Save State |
682 // +-----------------------------+
683 // | CPU 0 Extra Data |
684 // +-----------------------------+
685 // | Padding |
686 // +-----------------------------+
687 // | CPU m-1 SMI Entry |
688 // +=============================+ <-- 2^n offset from Base of allocated buffer
689 // | . . . . . . . . . . . . |
690 // +=============================+ <-- 2^n offset from Base of allocated buffer
691 // | Padding |
692 // +-----------------------------+
693 // | CPU 1 SMI Entry |
694 // +=============================+ <-- 2^n offset from Base of allocated buffer
695 // | Padding |
696 // +-----------------------------+
697 // | CPU 0 SMI Entry |
698 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
699 //
700
701 //
702 // Retrieve CPU Family
703 //
704 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
705 FamilyId = (RegEax >> 8) & 0xf;
706 ModelId = (RegEax >> 4) & 0xf;
707 if (FamilyId == 0x06 || FamilyId == 0x0f) {
708 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
709 }
710
711 RegEdx = 0;
712 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
713 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
714 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
715 }
716 //
717 // Determine the mode of the CPU at the time an SMI occurs
718 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
719 // Volume 3C, Section 34.4.1.1
720 //
721 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
722 if ((RegEdx & BIT29) != 0) {
723 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
724 }
725 if (FamilyId == 0x06) {
726 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
727 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
728 }
729 }
730
731 //
732 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
733 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
734 // This size is rounded up to nearest power of 2.
735 //
736 TileCodeSize = GetSmiHandlerSize ();
737 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
738 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
739 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
740 TileSize = TileDataSize + TileCodeSize - 1;
741 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
742 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
743
744 //
745 // If the TileSize is larger than space available for the SMI Handler of
746 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
747 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
748 // the SMI Handler size must be reduced or the size of the extra CPU specific
749 // context must be reduced.
750 //
751 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
752
753 //
754 // Allocate buffer for all of the tiles.
755 //
756 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
757 // Volume 3C, Section 34.11 SMBASE Relocation
758 // For Pentium and Intel486 processors, the SMBASE values must be
759 // aligned on a 32-KByte boundary or the processor will enter shutdown
760 // state during the execution of a RSM instruction.
761 //
762 // Intel486 processors: FamilyId is 4
763 // Pentium processors : FamilyId is 5
764 //
765 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
766 if ((FamilyId == 4) || (FamilyId == 5)) {
767 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
768 } else {
769 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
770 }
771 ASSERT (Buffer != NULL);
772 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
773
774 //
775 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
776 //
777 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
778 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
779
780 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
781 ASSERT (gSmmCpuPrivate->Operation != NULL);
782
783 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
784 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
785
786 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
787 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
788
789 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
790 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
791
792 //
793 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
794 //
795 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
796 ASSERT (mCpuHotPlugData.ApicId != NULL);
797 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
798 ASSERT (mCpuHotPlugData.SmBase != NULL);
799 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
800
801 //
802 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
803 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
804 // size for each CPU in the platform
805 //
806 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
807 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
808 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
809 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
810 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
811
812 if (Index < mNumberOfCpus) {
813 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
814 ASSERT_EFI_ERROR (Status);
815 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
816
817 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
818 Index,
819 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
820 mCpuHotPlugData.SmBase[Index],
821 gSmmCpuPrivate->CpuSaveState[Index],
822 gSmmCpuPrivate->CpuSaveStateSize[Index]
823 ));
824 } else {
825 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
826 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
827 }
828 }
829
830 //
831 // Allocate SMI stacks for all processors.
832 //
833 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
834 //
835 // 2 more pages is allocated for each processor.
836 // one is guard page and the other is known good stack.
837 //
838 // +-------------------------------------------+-----+-------------------------------------------+
839 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
840 // +-------------------------------------------+-----+-------------------------------------------+
841 // | | | |
842 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
843 //
844 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
845 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
846 ASSERT (Stacks != NULL);
847 mSmmStackArrayBase = (UINTN)Stacks;
848 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
849 } else {
850 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
851 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
852 ASSERT (Stacks != NULL);
853 }
854
855 //
856 // Set SMI stack for SMM base relocation
857 //
858 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));
859
860 //
861 // Initialize IDT
862 //
863 InitializeSmmIdt ();
864
865 //
866 // Relocate SMM Base addresses to the ones allocated from SMRAM
867 //
868 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
869 ASSERT (mRebased != NULL);
870 SmmRelocateBases ();
871
872 //
873 // Call hook for BSP to perform extra actions in normal mode after all
874 // SMM base addresses have been relocated on all CPUs
875 //
876 SmmCpuFeaturesSmmRelocationComplete ();
877
878 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
879
880 //
881 // SMM Time initialization
882 //
883 InitializeSmmTimer ();
884
885 //
886 // Initialize MP globals
887 //
888 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);
889
890 //
891 // Fill in SMM Reserved Regions
892 //
893 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
894 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
895
896 //
897 // Install the SMM Configuration Protocol onto a new handle on the handle database.
898 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
899 // to an SMRAM address will be present in the handle database
900 //
901 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
902 &gSmmCpuPrivate->SmmCpuHandle,
903 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
904 NULL
905 );
906 ASSERT_EFI_ERROR (Status);
907
908 //
909 // Install the SMM CPU Protocol into SMM protocol database
910 //
911 Status = gSmst->SmmInstallProtocolInterface (
912 &mSmmCpuHandle,
913 &gEfiSmmCpuProtocolGuid,
914 EFI_NATIVE_INTERFACE,
915 &mSmmCpu
916 );
917 ASSERT_EFI_ERROR (Status);
918
919 //
920 // Install the SMM Memory Attribute Protocol into SMM protocol database
921 //
922 Status = gSmst->SmmInstallProtocolInterface (
923 &mSmmCpuHandle,
924 &gEdkiiSmmMemoryAttributeProtocolGuid,
925 EFI_NATIVE_INTERFACE,
926 &mSmmMemoryAttribute
927 );
928 ASSERT_EFI_ERROR (Status);
929
930 //
931 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
932 //
933 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
934 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
935 ASSERT_EFI_ERROR (Status);
936 }
937
938 //
939 // Initialize SMM CPU Services Support
940 //
941 Status = InitializeSmmCpuServices (mSmmCpuHandle);
942 ASSERT_EFI_ERROR (Status);
943
944 //
945 // register SMM Ready To Lock Protocol notification
946 //
947 Status = gSmst->SmmRegisterProtocolNotify (
948 &gEfiSmmReadyToLockProtocolGuid,
949 SmmReadyToLockEventNotify,
950 &Registration
951 );
952 ASSERT_EFI_ERROR (Status);
953
954 //
955 // Initialize SMM Profile feature
956 //
957 InitSmmProfile (Cr3);
958
959 GetAcpiS3EnableFlag ();
960 InitSmmS3ResumeState (Cr3);
961
962 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
963
964 return EFI_SUCCESS;
965 }
966
967 /**
968
969 Find out SMRAM information including SMRR base and SMRR size.
970
971 @param SmrrBase SMRR base
972 @param SmrrSize SMRR size
973
974 **/
975 VOID
976 FindSmramInfo (
977 OUT UINT32 *SmrrBase,
978 OUT UINT32 *SmrrSize
979 )
980 {
981 EFI_STATUS Status;
982 UINTN Size;
983 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
984 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
985 UINTN Index;
986 UINT64 MaxSize;
987 BOOLEAN Found;
988
989 //
990 // Get SMM Access Protocol
991 //
992 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
993 ASSERT_EFI_ERROR (Status);
994
995 //
996 // Get SMRAM information
997 //
998 Size = 0;
999 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1000 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1001
1002 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1003 ASSERT (mSmmCpuSmramRanges != NULL);
1004
1005 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1006 ASSERT_EFI_ERROR (Status);
1007
1008 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1009
1010 //
1011 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1012 //
1013 CurrentSmramRange = NULL;
1014 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1015 //
1016 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1017 //
1018 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1019 continue;
1020 }
1021
1022 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1023 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1024 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1025 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1026 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1027 }
1028 }
1029 }
1030 }
1031
1032 ASSERT (CurrentSmramRange != NULL);
1033
1034 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1035 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1036
1037 do {
1038 Found = FALSE;
1039 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1040 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
1041 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
1042 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1043 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1044 Found = TRUE;
1045 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
1046 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1047 Found = TRUE;
1048 }
1049 }
1050 } while (Found);
1051
1052 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1053 }
1054
1055 /**
1056 Configure SMM Code Access Check feature on an AP.
1057 SMM Feature Control MSR will be locked after configuration.
1058
1059 @param[in,out] Buffer Pointer to private data buffer.
1060 **/
1061 VOID
1062 EFIAPI
1063 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1064 IN OUT VOID *Buffer
1065 )
1066 {
1067 UINTN CpuIndex;
1068 UINT64 SmmFeatureControlMsr;
1069 UINT64 NewSmmFeatureControlMsr;
1070
1071 //
1072 // Retrieve the CPU Index from the context passed in
1073 //
1074 CpuIndex = *(UINTN *)Buffer;
1075
1076 //
1077 // Get the current SMM Feature Control MSR value
1078 //
1079 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1080
1081 //
1082 // Compute the new SMM Feature Control MSR value
1083 //
1084 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1085 if (mSmmCodeAccessCheckEnable) {
1086 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1087 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1088 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1089 }
1090 }
1091
1092 //
1093 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1094 //
1095 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1096 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1097 }
1098
1099 //
1100 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1101 //
1102 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1103 }
1104
1105 /**
1106 Configure SMM Code Access Check feature for all processors.
1107 SMM Feature Control MSR will be locked after configuration.
1108 **/
1109 VOID
1110 ConfigSmmCodeAccessCheck (
1111 VOID
1112 )
1113 {
1114 UINTN Index;
1115 EFI_STATUS Status;
1116
1117 //
1118 // Check to see if the Feature Control MSR is supported on this CPU
1119 //
1120 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1121 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1122 mSmmCodeAccessCheckEnable = FALSE;
1123 return;
1124 }
1125
1126 //
1127 // Check to see if the CPU supports the SMM Code Access Check feature
1128 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1129 //
1130 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1131 mSmmCodeAccessCheckEnable = FALSE;
1132 return;
1133 }
1134
1135 //
1136 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1137 //
1138 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1139
1140 //
1141 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1142 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1143 //
1144 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1145
1146 //
1147 // Enable SMM Code Access Check feature on the BSP.
1148 //
1149 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1150
1151 //
1152 // Enable SMM Code Access Check feature for the APs.
1153 //
1154 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1155 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1156 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1157 //
1158 // If this processor does not exist
1159 //
1160 continue;
1161 }
1162 //
1163 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1164 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1165 //
1166 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1167
1168 //
1169 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1170 //
1171 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1172 ASSERT_EFI_ERROR (Status);
1173
1174 //
1175 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1176 //
1177 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1178 CpuPause ();
1179 }
1180
1181 //
1182 // Release the Config SMM Code Access Check spin lock.
1183 //
1184 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1185 }
1186 }
1187 }
1188
1189 /**
1190 This API provides a way to allocate memory for page table.
1191
1192 This API can be called more once to allocate memory for page tables.
1193
1194 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1195 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1196 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1197 returned.
1198
1199 @param Pages The number of 4 KB pages to allocate.
1200
1201 @return A pointer to the allocated buffer or NULL if allocation fails.
1202
1203 **/
1204 VOID *
1205 AllocatePageTableMemory (
1206 IN UINTN Pages
1207 )
1208 {
1209 VOID *Buffer;
1210
1211 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1212 if (Buffer != NULL) {
1213 return Buffer;
1214 }
1215 return AllocatePages (Pages);
1216 }
1217
1218 /**
1219 Allocate pages for code.
1220
1221 @param[in] Pages Number of pages to be allocated.
1222
1223 @return Allocated memory.
1224 **/
1225 VOID *
1226 AllocateCodePages (
1227 IN UINTN Pages
1228 )
1229 {
1230 EFI_STATUS Status;
1231 EFI_PHYSICAL_ADDRESS Memory;
1232
1233 if (Pages == 0) {
1234 return NULL;
1235 }
1236
1237 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1238 if (EFI_ERROR (Status)) {
1239 return NULL;
1240 }
1241 return (VOID *) (UINTN) Memory;
1242 }
1243
1244 /**
1245 Allocate aligned pages for code.
1246
1247 @param[in] Pages Number of pages to be allocated.
1248 @param[in] Alignment The requested alignment of the allocation.
1249 Must be a power of two.
1250 If Alignment is zero, then byte alignment is used.
1251
1252 @return Allocated memory.
1253 **/
1254 VOID *
1255 AllocateAlignedCodePages (
1256 IN UINTN Pages,
1257 IN UINTN Alignment
1258 )
1259 {
1260 EFI_STATUS Status;
1261 EFI_PHYSICAL_ADDRESS Memory;
1262 UINTN AlignedMemory;
1263 UINTN AlignmentMask;
1264 UINTN UnalignedPages;
1265 UINTN RealPages;
1266
1267 //
1268 // Alignment must be a power of two or zero.
1269 //
1270 ASSERT ((Alignment & (Alignment - 1)) == 0);
1271
1272 if (Pages == 0) {
1273 return NULL;
1274 }
1275 if (Alignment > EFI_PAGE_SIZE) {
1276 //
1277 // Calculate the total number of pages since alignment is larger than page size.
1278 //
1279 AlignmentMask = Alignment - 1;
1280 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1281 //
1282 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1283 //
1284 ASSERT (RealPages > Pages);
1285
1286 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1287 if (EFI_ERROR (Status)) {
1288 return NULL;
1289 }
1290 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1291 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1292 if (UnalignedPages > 0) {
1293 //
1294 // Free first unaligned page(s).
1295 //
1296 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1297 ASSERT_EFI_ERROR (Status);
1298 }
1299 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1300 UnalignedPages = RealPages - Pages - UnalignedPages;
1301 if (UnalignedPages > 0) {
1302 //
1303 // Free last unaligned page(s).
1304 //
1305 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1306 ASSERT_EFI_ERROR (Status);
1307 }
1308 } else {
1309 //
1310 // Do not over-allocate pages in this case.
1311 //
1312 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1313 if (EFI_ERROR (Status)) {
1314 return NULL;
1315 }
1316 AlignedMemory = (UINTN) Memory;
1317 }
1318 return (VOID *) AlignedMemory;
1319 }
1320
1321 /**
1322 Perform the remaining tasks.
1323
1324 **/
1325 VOID
1326 PerformRemainingTasks (
1327 VOID
1328 )
1329 {
1330 if (mSmmReadyToLock) {
1331 //
1332 // Start SMM Profile feature
1333 //
1334 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1335 SmmProfileStart ();
1336 }
1337 //
1338 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1339 //
1340 InitPaging ();
1341
1342 //
1343 // Mark critical region to be read-only in page table
1344 //
1345 SetMemMapAttributes ();
1346
1347 //
1348 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1349 //
1350 SetUefiMemMapAttributes ();
1351
1352 //
1353 // Set page table itself to be read-only
1354 //
1355 SetPageTableAttributes ();
1356
1357 //
1358 // Configure SMM Code Access Check feature if available.
1359 //
1360 ConfigSmmCodeAccessCheck ();
1361
1362 SmmCpuFeaturesCompleteSmmReadyToLock ();
1363
1364 //
1365 // Clean SMM ready to lock flag
1366 //
1367 mSmmReadyToLock = FALSE;
1368 }
1369 }
1370
1371 /**
1372 Perform the pre tasks.
1373
1374 **/
1375 VOID
1376 PerformPreTasks (
1377 VOID
1378 )
1379 {
1380 RestoreSmmConfigurationInS3 ();
1381 }