]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: Update PiSmmCpuDxeSmm pass XCODE5 tool chain
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // SMM CPU Private Data structure that contains SMM Configuration Protocol
21 // along its supporting fields.
22 //
23 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
25 NULL, // SmmCpuHandle
26 NULL, // Pointer to ProcessorInfo array
27 NULL, // Pointer to Operation array
28 NULL, // Pointer to CpuSaveStateSize array
29 NULL, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
31 {
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL // SmmCoreEntryContext.CpuSaveState
37 },
38 NULL, // SmmCoreEntry
39 {
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
42 },
43 };
44
45 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL, // Pointer to APIC ID array
49 NULL, // Pointer to SMBASE array
50 0, // Reserved
51 0, // SmrrBase
52 0 // SmrrSize
53 };
54
55 //
56 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
57 //
58 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
59
60 //
61 // SMM Relocation variables
62 //
63 volatile BOOLEAN *mRebased;
64 volatile BOOLEAN mIsBsp;
65
66 ///
67 /// Handle for the SMM CPU Protocol
68 ///
69 EFI_HANDLE mSmmCpuHandle = NULL;
70
71 ///
72 /// SMM CPU Protocol instance
73 ///
74 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
75 SmmReadSaveState,
76 SmmWriteSaveState
77 };
78
79 ///
80 /// SMM Memory Attribute Protocol instance
81 ///
82 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
83 EdkiiSmmGetMemoryAttributes,
84 EdkiiSmmSetMemoryAttributes,
85 EdkiiSmmClearMemoryAttributes
86 };
87
88 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
89
90 //
91 // SMM stack information
92 //
93 UINTN mSmmStackArrayBase;
94 UINTN mSmmStackArrayEnd;
95 UINTN mSmmStackSize;
96
97 UINTN mMaxNumberOfCpus = 1;
98 UINTN mNumberOfCpus = 1;
99
100 //
101 // SMM ready to lock flag
102 //
103 BOOLEAN mSmmReadyToLock = FALSE;
104
105 //
106 // Global used to cache PCD for SMM Code Access Check enable
107 //
108 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
109
110 //
111 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
112 //
113 UINT64 mAddressEncMask = 0;
114
115 //
116 // Spin lock used to serialize setting of SMM Code Access Check feature
117 //
118 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
119
120 //
121 // Saved SMM ranges information
122 //
123 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
124 UINTN mSmmCpuSmramRangeCount;
125
126 UINT8 mPhysicalAddressBits;
127
128 /**
129 Initialize IDT to setup exception handlers for SMM.
130
131 **/
132 VOID
133 InitializeSmmIdt (
134 VOID
135 )
136 {
137 EFI_STATUS Status;
138 BOOLEAN InterruptState;
139 IA32_DESCRIPTOR DxeIdtr;
140
141 //
142 // There are 32 (not 255) entries in it since only processor
143 // generated exceptions will be handled.
144 //
145 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
146 //
147 // Allocate page aligned IDT, because it might be set as read only.
148 //
149 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
150 ASSERT (gcSmiIdtr.Base != 0);
151 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
152
153 //
154 // Disable Interrupt and save DXE IDT table
155 //
156 InterruptState = SaveAndDisableInterrupts ();
157 AsmReadIdtr (&DxeIdtr);
158 //
159 // Load SMM temporary IDT table
160 //
161 AsmWriteIdtr (&gcSmiIdtr);
162 //
163 // Setup SMM default exception handlers, SMM IDT table
164 // will be updated and saved in gcSmiIdtr
165 //
166 Status = InitializeCpuExceptionHandlers (NULL);
167 ASSERT_EFI_ERROR (Status);
168 //
169 // Restore DXE IDT table and CPU interrupt
170 //
171 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
172 SetInterruptState (InterruptState);
173 }
174
175 /**
176 Search module name by input IP address and output it.
177
178 @param CallerIpAddress Caller instruction pointer.
179
180 **/
181 VOID
182 DumpModuleInfoByIp (
183 IN UINTN CallerIpAddress
184 )
185 {
186 UINTN Pe32Data;
187 VOID *PdbPointer;
188
189 //
190 // Find Image Base
191 //
192 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
193 if (Pe32Data != 0) {
194 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
195 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
196 if (PdbPointer != NULL) {
197 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
198 }
199 }
200 }
201
202 /**
203 Read information from the CPU save state.
204
205 @param This EFI_SMM_CPU_PROTOCOL instance
206 @param Width The number of bytes to read from the CPU save state.
207 @param Register Specifies the CPU register to read form the save state.
208 @param CpuIndex Specifies the zero-based index of the CPU save state.
209 @param Buffer Upon return, this holds the CPU register value read from the save state.
210
211 @retval EFI_SUCCESS The register was read from Save State
212 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
213 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
214
215 **/
216 EFI_STATUS
217 EFIAPI
218 SmmReadSaveState (
219 IN CONST EFI_SMM_CPU_PROTOCOL *This,
220 IN UINTN Width,
221 IN EFI_SMM_SAVE_STATE_REGISTER Register,
222 IN UINTN CpuIndex,
223 OUT VOID *Buffer
224 )
225 {
226 EFI_STATUS Status;
227
228 //
229 // Retrieve pointer to the specified CPU's SMM Save State buffer
230 //
231 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
232 return EFI_INVALID_PARAMETER;
233 }
234
235 //
236 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
237 //
238 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
239 //
240 // The pseudo-register only supports the 64-bit size specified by Width.
241 //
242 if (Width != sizeof (UINT64)) {
243 return EFI_INVALID_PARAMETER;
244 }
245 //
246 // If the processor is in SMM at the time the SMI occurred,
247 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
248 // Otherwise, EFI_NOT_FOUND is returned.
249 //
250 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
251 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
252 return EFI_SUCCESS;
253 } else {
254 return EFI_NOT_FOUND;
255 }
256 }
257
258 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
259 return EFI_INVALID_PARAMETER;
260 }
261
262 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
263 if (Status == EFI_UNSUPPORTED) {
264 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
265 }
266 return Status;
267 }
268
269 /**
270 Write data to the CPU save state.
271
272 @param This EFI_SMM_CPU_PROTOCOL instance
273 @param Width The number of bytes to read from the CPU save state.
274 @param Register Specifies the CPU register to write to the save state.
275 @param CpuIndex Specifies the zero-based index of the CPU save state
276 @param Buffer Upon entry, this holds the new CPU register value.
277
278 @retval EFI_SUCCESS The register was written from Save State
279 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
280 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
281
282 **/
283 EFI_STATUS
284 EFIAPI
285 SmmWriteSaveState (
286 IN CONST EFI_SMM_CPU_PROTOCOL *This,
287 IN UINTN Width,
288 IN EFI_SMM_SAVE_STATE_REGISTER Register,
289 IN UINTN CpuIndex,
290 IN CONST VOID *Buffer
291 )
292 {
293 EFI_STATUS Status;
294
295 //
296 // Retrieve pointer to the specified CPU's SMM Save State buffer
297 //
298 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
299 return EFI_INVALID_PARAMETER;
300 }
301
302 //
303 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
304 //
305 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
306 return EFI_SUCCESS;
307 }
308
309 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
310 return EFI_INVALID_PARAMETER;
311 }
312
313 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
314 if (Status == EFI_UNSUPPORTED) {
315 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
316 }
317 return Status;
318 }
319
320
321 /**
322 C function for SMI handler. To change all processor's SMMBase Register.
323
324 **/
325 VOID
326 EFIAPI
327 SmmInitHandler (
328 VOID
329 )
330 {
331 UINT32 ApicId;
332 UINTN Index;
333
334 //
335 // Update SMM IDT entries' code segment and load IDT
336 //
337 AsmWriteIdtr (&gcSmiIdtr);
338 ApicId = GetApicId ();
339
340 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
341
342 for (Index = 0; Index < mNumberOfCpus; Index++) {
343 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
344 //
345 // Initialize SMM specific features on the currently executing CPU
346 //
347 SmmCpuFeaturesInitializeProcessor (
348 Index,
349 mIsBsp,
350 gSmmCpuPrivate->ProcessorInfo,
351 &mCpuHotPlugData
352 );
353
354 if (!mSmmS3Flag) {
355 //
356 // Check XD and BTS features on each processor on normal boot
357 //
358 CheckFeatureSupported ();
359 }
360
361 if (mIsBsp) {
362 //
363 // BSP rebase is already done above.
364 // Initialize private data during S3 resume
365 //
366 InitializeMpSyncData ();
367 }
368
369 //
370 // Hook return after RSM to set SMM re-based flag
371 //
372 SemaphoreHook (Index, &mRebased[Index]);
373
374 return;
375 }
376 }
377 ASSERT (FALSE);
378 }
379
380 /**
381 Relocate SmmBases for each processor.
382
383 Execute on first boot and all S3 resumes
384
385 **/
386 VOID
387 EFIAPI
388 SmmRelocateBases (
389 VOID
390 )
391 {
392 UINT8 BakBuf[BACK_BUF_SIZE];
393 SMRAM_SAVE_STATE_MAP BakBuf2;
394 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
395 UINT8 *U8Ptr;
396 UINT32 ApicId;
397 UINTN Index;
398 UINTN BspIndex;
399
400 //
401 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
402 //
403 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
404
405 //
406 // Patch ASM code template with current CR0, CR3, and CR4 values
407 //
408 gSmmCr0 = (UINT32)AsmReadCr0 ();
409 gSmmCr3 = (UINT32)AsmReadCr3 ();
410 gSmmCr4 = (UINT32)AsmReadCr4 ();
411
412 //
413 // Patch GDTR for SMM base relocation
414 //
415 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
416 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
417
418 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
419 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
420
421 //
422 // Backup original contents at address 0x38000
423 //
424 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
425 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
426
427 //
428 // Load image for relocation
429 //
430 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
431
432 //
433 // Retrieve the local APIC ID of current processor
434 //
435 ApicId = GetApicId ();
436
437 //
438 // Relocate SM bases for all APs
439 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
440 //
441 mIsBsp = FALSE;
442 BspIndex = (UINTN)-1;
443 for (Index = 0; Index < mNumberOfCpus; Index++) {
444 mRebased[Index] = FALSE;
445 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
446 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
447 //
448 // Wait for this AP to finish its 1st SMI
449 //
450 while (!mRebased[Index]);
451 } else {
452 //
453 // BSP will be Relocated later
454 //
455 BspIndex = Index;
456 }
457 }
458
459 //
460 // Relocate BSP's SMM base
461 //
462 ASSERT (BspIndex != (UINTN)-1);
463 mIsBsp = TRUE;
464 SendSmiIpi (ApicId);
465 //
466 // Wait for the BSP to finish its 1st SMI
467 //
468 while (!mRebased[BspIndex]);
469
470 //
471 // Restore contents at address 0x38000
472 //
473 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
474 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
475 }
476
477 /**
478 SMM Ready To Lock event notification handler.
479
480 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
481 perform additional lock actions that must be performed from SMM on the next SMI.
482
483 @param[in] Protocol Points to the protocol's unique identifier.
484 @param[in] Interface Points to the interface instance.
485 @param[in] Handle The handle on which the interface was installed.
486
487 @retval EFI_SUCCESS Notification handler runs successfully.
488 **/
489 EFI_STATUS
490 EFIAPI
491 SmmReadyToLockEventNotify (
492 IN CONST EFI_GUID *Protocol,
493 IN VOID *Interface,
494 IN EFI_HANDLE Handle
495 )
496 {
497 GetAcpiCpuData ();
498
499 //
500 // Cache a copy of UEFI memory map before we start profiling feature.
501 //
502 GetUefiMemoryMap ();
503
504 //
505 // Set SMM ready to lock flag and return
506 //
507 mSmmReadyToLock = TRUE;
508 return EFI_SUCCESS;
509 }
510
511 /**
512 The module Entry Point of the CPU SMM driver.
513
514 @param ImageHandle The firmware allocated handle for the EFI image.
515 @param SystemTable A pointer to the EFI System Table.
516
517 @retval EFI_SUCCESS The entry point is executed successfully.
518 @retval Other Some error occurs when executing this entry point.
519
520 **/
521 EFI_STATUS
522 EFIAPI
523 PiCpuSmmEntry (
524 IN EFI_HANDLE ImageHandle,
525 IN EFI_SYSTEM_TABLE *SystemTable
526 )
527 {
528 EFI_STATUS Status;
529 EFI_MP_SERVICES_PROTOCOL *MpServices;
530 UINTN NumberOfEnabledProcessors;
531 UINTN Index;
532 VOID *Buffer;
533 UINTN BufferPages;
534 UINTN TileCodeSize;
535 UINTN TileDataSize;
536 UINTN TileSize;
537 UINT8 *Stacks;
538 VOID *Registration;
539 UINT32 RegEax;
540 UINT32 RegEdx;
541 UINTN FamilyId;
542 UINTN ModelId;
543 UINT32 Cr3;
544
545 //
546 // Initialize address fixup
547 //
548 PiSmmCpuSmmInitFixupAddress ();
549 PiSmmCpuSmiEntryFixupAddress ();
550
551 //
552 // Initialize Debug Agent to support source level debug in SMM code
553 //
554 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
555
556 //
557 // Report the start of CPU SMM initialization.
558 //
559 REPORT_STATUS_CODE (
560 EFI_PROGRESS_CODE,
561 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
562 );
563
564 //
565 // Fix segment address of the long-mode-switch jump
566 //
567 if (sizeof (UINTN) == sizeof (UINT64)) {
568 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;
569 }
570
571 //
572 // Find out SMRR Base and SMRR Size
573 //
574 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
575
576 //
577 // Get MP Services Protocol
578 //
579 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
580 ASSERT_EFI_ERROR (Status);
581
582 //
583 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
584 //
585 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
586 ASSERT_EFI_ERROR (Status);
587 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
588
589 //
590 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
591 // A constant BSP index makes no sense because it may be hot removed.
592 //
593 DEBUG_CODE (
594 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
595
596 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
597 }
598 );
599
600 //
601 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
602 //
603 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
604 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
605
606 //
607 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
608 // Make sure AddressEncMask is contained to smallest supported address field.
609 //
610 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
611 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
612
613 //
614 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
615 //
616 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
617 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
618 } else {
619 mMaxNumberOfCpus = mNumberOfCpus;
620 }
621 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
622
623 //
624 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
625 // allocated buffer. The minimum size of this buffer for a uniprocessor system
626 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
627 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
628 // then the SMI entry point and the CPU save state areas can be tiles to minimize
629 // the total amount SMRAM required for all the CPUs. The tile size can be computed
630 // by adding the // CPU save state size, any extra CPU specific context, and
631 // the size of code that must be placed at the SMI entry point to transfer
632 // control to a C function in the native SMM execution mode. This size is
633 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
634 // The total amount of memory required is the maximum number of CPUs that
635 // platform supports times the tile size. The picture below shows the tiling,
636 // where m is the number of tiles that fit in 32KB.
637 //
638 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
639 // | CPU m+1 Save State |
640 // +-----------------------------+
641 // | CPU m+1 Extra Data |
642 // +-----------------------------+
643 // | Padding |
644 // +-----------------------------+
645 // | CPU 2m SMI Entry |
646 // +#############################+ <-- Base of allocated buffer + 64 KB
647 // | CPU m-1 Save State |
648 // +-----------------------------+
649 // | CPU m-1 Extra Data |
650 // +-----------------------------+
651 // | Padding |
652 // +-----------------------------+
653 // | CPU 2m-1 SMI Entry |
654 // +=============================+ <-- 2^n offset from Base of allocated buffer
655 // | . . . . . . . . . . . . |
656 // +=============================+ <-- 2^n offset from Base of allocated buffer
657 // | CPU 2 Save State |
658 // +-----------------------------+
659 // | CPU 2 Extra Data |
660 // +-----------------------------+
661 // | Padding |
662 // +-----------------------------+
663 // | CPU m+1 SMI Entry |
664 // +=============================+ <-- Base of allocated buffer + 32 KB
665 // | CPU 1 Save State |
666 // +-----------------------------+
667 // | CPU 1 Extra Data |
668 // +-----------------------------+
669 // | Padding |
670 // +-----------------------------+
671 // | CPU m SMI Entry |
672 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
673 // | CPU 0 Save State |
674 // +-----------------------------+
675 // | CPU 0 Extra Data |
676 // +-----------------------------+
677 // | Padding |
678 // +-----------------------------+
679 // | CPU m-1 SMI Entry |
680 // +=============================+ <-- 2^n offset from Base of allocated buffer
681 // | . . . . . . . . . . . . |
682 // +=============================+ <-- 2^n offset from Base of allocated buffer
683 // | Padding |
684 // +-----------------------------+
685 // | CPU 1 SMI Entry |
686 // +=============================+ <-- 2^n offset from Base of allocated buffer
687 // | Padding |
688 // +-----------------------------+
689 // | CPU 0 SMI Entry |
690 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
691 //
692
693 //
694 // Retrieve CPU Family
695 //
696 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
697 FamilyId = (RegEax >> 8) & 0xf;
698 ModelId = (RegEax >> 4) & 0xf;
699 if (FamilyId == 0x06 || FamilyId == 0x0f) {
700 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
701 }
702
703 RegEdx = 0;
704 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
705 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
706 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
707 }
708 //
709 // Determine the mode of the CPU at the time an SMI occurs
710 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
711 // Volume 3C, Section 34.4.1.1
712 //
713 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
714 if ((RegEdx & BIT29) != 0) {
715 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
716 }
717 if (FamilyId == 0x06) {
718 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
719 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
720 }
721 }
722
723 //
724 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
725 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
726 // This size is rounded up to nearest power of 2.
727 //
728 TileCodeSize = GetSmiHandlerSize ();
729 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
730 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
731 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
732 TileSize = TileDataSize + TileCodeSize - 1;
733 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
734 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
735
736 //
737 // If the TileSize is larger than space available for the SMI Handler of
738 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
739 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
740 // the SMI Handler size must be reduced or the size of the extra CPU specific
741 // context must be reduced.
742 //
743 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
744
745 //
746 // Allocate buffer for all of the tiles.
747 //
748 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
749 // Volume 3C, Section 34.11 SMBASE Relocation
750 // For Pentium and Intel486 processors, the SMBASE values must be
751 // aligned on a 32-KByte boundary or the processor will enter shutdown
752 // state during the execution of a RSM instruction.
753 //
754 // Intel486 processors: FamilyId is 4
755 // Pentium processors : FamilyId is 5
756 //
757 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
758 if ((FamilyId == 4) || (FamilyId == 5)) {
759 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
760 } else {
761 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
762 }
763 ASSERT (Buffer != NULL);
764 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
765
766 //
767 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
768 //
769 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
770 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
771
772 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
773 ASSERT (gSmmCpuPrivate->Operation != NULL);
774
775 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
776 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
777
778 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
779 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
780
781 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
782 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
783
784 //
785 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
786 //
787 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
788 ASSERT (mCpuHotPlugData.ApicId != NULL);
789 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
790 ASSERT (mCpuHotPlugData.SmBase != NULL);
791 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
792
793 //
794 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
795 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
796 // size for each CPU in the platform
797 //
798 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
799 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
800 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
801 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
802 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
803
804 if (Index < mNumberOfCpus) {
805 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
806 ASSERT_EFI_ERROR (Status);
807 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
808
809 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
810 Index,
811 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
812 mCpuHotPlugData.SmBase[Index],
813 gSmmCpuPrivate->CpuSaveState[Index],
814 gSmmCpuPrivate->CpuSaveStateSize[Index]
815 ));
816 } else {
817 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
818 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
819 }
820 }
821
822 //
823 // Allocate SMI stacks for all processors.
824 //
825 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
826 //
827 // 2 more pages is allocated for each processor.
828 // one is guard page and the other is known good stack.
829 //
830 // +-------------------------------------------+-----+-------------------------------------------+
831 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
832 // +-------------------------------------------+-----+-------------------------------------------+
833 // | | | |
834 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
835 //
836 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
837 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
838 ASSERT (Stacks != NULL);
839 mSmmStackArrayBase = (UINTN)Stacks;
840 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
841 } else {
842 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
843 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
844 ASSERT (Stacks != NULL);
845 }
846
847 //
848 // Set SMI stack for SMM base relocation
849 //
850 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));
851
852 //
853 // Initialize IDT
854 //
855 InitializeSmmIdt ();
856
857 //
858 // Relocate SMM Base addresses to the ones allocated from SMRAM
859 //
860 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
861 ASSERT (mRebased != NULL);
862 SmmRelocateBases ();
863
864 //
865 // Call hook for BSP to perform extra actions in normal mode after all
866 // SMM base addresses have been relocated on all CPUs
867 //
868 SmmCpuFeaturesSmmRelocationComplete ();
869
870 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
871
872 //
873 // SMM Time initialization
874 //
875 InitializeSmmTimer ();
876
877 //
878 // Initialize MP globals
879 //
880 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);
881
882 //
883 // Fill in SMM Reserved Regions
884 //
885 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
886 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
887
888 //
889 // Install the SMM Configuration Protocol onto a new handle on the handle database.
890 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
891 // to an SMRAM address will be present in the handle database
892 //
893 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
894 &gSmmCpuPrivate->SmmCpuHandle,
895 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
896 NULL
897 );
898 ASSERT_EFI_ERROR (Status);
899
900 //
901 // Install the SMM CPU Protocol into SMM protocol database
902 //
903 Status = gSmst->SmmInstallProtocolInterface (
904 &mSmmCpuHandle,
905 &gEfiSmmCpuProtocolGuid,
906 EFI_NATIVE_INTERFACE,
907 &mSmmCpu
908 );
909 ASSERT_EFI_ERROR (Status);
910
911 //
912 // Install the SMM Memory Attribute Protocol into SMM protocol database
913 //
914 Status = gSmst->SmmInstallProtocolInterface (
915 &mSmmCpuHandle,
916 &gEdkiiSmmMemoryAttributeProtocolGuid,
917 EFI_NATIVE_INTERFACE,
918 &mSmmMemoryAttribute
919 );
920 ASSERT_EFI_ERROR (Status);
921
922 //
923 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
924 //
925 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
926 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
927 ASSERT_EFI_ERROR (Status);
928 }
929
930 //
931 // Initialize SMM CPU Services Support
932 //
933 Status = InitializeSmmCpuServices (mSmmCpuHandle);
934 ASSERT_EFI_ERROR (Status);
935
936 //
937 // register SMM Ready To Lock Protocol notification
938 //
939 Status = gSmst->SmmRegisterProtocolNotify (
940 &gEfiSmmReadyToLockProtocolGuid,
941 SmmReadyToLockEventNotify,
942 &Registration
943 );
944 ASSERT_EFI_ERROR (Status);
945
946 //
947 // Initialize SMM Profile feature
948 //
949 InitSmmProfile (Cr3);
950
951 GetAcpiS3EnableFlag ();
952 InitSmmS3ResumeState (Cr3);
953
954 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
955
956 return EFI_SUCCESS;
957 }
958
959 /**
960
961 Find out SMRAM information including SMRR base and SMRR size.
962
963 @param SmrrBase SMRR base
964 @param SmrrSize SMRR size
965
966 **/
967 VOID
968 FindSmramInfo (
969 OUT UINT32 *SmrrBase,
970 OUT UINT32 *SmrrSize
971 )
972 {
973 EFI_STATUS Status;
974 UINTN Size;
975 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
976 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
977 UINTN Index;
978 UINT64 MaxSize;
979 BOOLEAN Found;
980
981 //
982 // Get SMM Access Protocol
983 //
984 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
985 ASSERT_EFI_ERROR (Status);
986
987 //
988 // Get SMRAM information
989 //
990 Size = 0;
991 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
992 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
993
994 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
995 ASSERT (mSmmCpuSmramRanges != NULL);
996
997 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
998 ASSERT_EFI_ERROR (Status);
999
1000 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1001
1002 //
1003 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1004 //
1005 CurrentSmramRange = NULL;
1006 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1007 //
1008 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1009 //
1010 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1011 continue;
1012 }
1013
1014 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1015 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1016 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1017 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1018 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1019 }
1020 }
1021 }
1022 }
1023
1024 ASSERT (CurrentSmramRange != NULL);
1025
1026 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1027 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1028
1029 do {
1030 Found = FALSE;
1031 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1032 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
1033 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
1034 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1035 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1036 Found = TRUE;
1037 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
1038 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1039 Found = TRUE;
1040 }
1041 }
1042 } while (Found);
1043
1044 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1045 }
1046
1047 /**
1048 Configure SMM Code Access Check feature on an AP.
1049 SMM Feature Control MSR will be locked after configuration.
1050
1051 @param[in,out] Buffer Pointer to private data buffer.
1052 **/
1053 VOID
1054 EFIAPI
1055 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1056 IN OUT VOID *Buffer
1057 )
1058 {
1059 UINTN CpuIndex;
1060 UINT64 SmmFeatureControlMsr;
1061 UINT64 NewSmmFeatureControlMsr;
1062
1063 //
1064 // Retrieve the CPU Index from the context passed in
1065 //
1066 CpuIndex = *(UINTN *)Buffer;
1067
1068 //
1069 // Get the current SMM Feature Control MSR value
1070 //
1071 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1072
1073 //
1074 // Compute the new SMM Feature Control MSR value
1075 //
1076 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1077 if (mSmmCodeAccessCheckEnable) {
1078 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1079 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1080 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1081 }
1082 }
1083
1084 //
1085 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1086 //
1087 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1088 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1089 }
1090
1091 //
1092 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1093 //
1094 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1095 }
1096
1097 /**
1098 Configure SMM Code Access Check feature for all processors.
1099 SMM Feature Control MSR will be locked after configuration.
1100 **/
1101 VOID
1102 ConfigSmmCodeAccessCheck (
1103 VOID
1104 )
1105 {
1106 UINTN Index;
1107 EFI_STATUS Status;
1108
1109 //
1110 // Check to see if the Feature Control MSR is supported on this CPU
1111 //
1112 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1113 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1114 mSmmCodeAccessCheckEnable = FALSE;
1115 return;
1116 }
1117
1118 //
1119 // Check to see if the CPU supports the SMM Code Access Check feature
1120 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1121 //
1122 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1123 mSmmCodeAccessCheckEnable = FALSE;
1124 return;
1125 }
1126
1127 //
1128 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1129 //
1130 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1131
1132 //
1133 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1134 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1135 //
1136 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1137
1138 //
1139 // Enable SMM Code Access Check feature on the BSP.
1140 //
1141 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1142
1143 //
1144 // Enable SMM Code Access Check feature for the APs.
1145 //
1146 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1147 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1148 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1149 //
1150 // If this processor does not exist
1151 //
1152 continue;
1153 }
1154 //
1155 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1156 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1157 //
1158 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1159
1160 //
1161 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1162 //
1163 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1164 ASSERT_EFI_ERROR (Status);
1165
1166 //
1167 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1168 //
1169 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1170 CpuPause ();
1171 }
1172
1173 //
1174 // Release the Config SMM Code Access Check spin lock.
1175 //
1176 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1177 }
1178 }
1179 }
1180
1181 /**
1182 This API provides a way to allocate memory for page table.
1183
1184 This API can be called more once to allocate memory for page tables.
1185
1186 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1187 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1188 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1189 returned.
1190
1191 @param Pages The number of 4 KB pages to allocate.
1192
1193 @return A pointer to the allocated buffer or NULL if allocation fails.
1194
1195 **/
1196 VOID *
1197 AllocatePageTableMemory (
1198 IN UINTN Pages
1199 )
1200 {
1201 VOID *Buffer;
1202
1203 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1204 if (Buffer != NULL) {
1205 return Buffer;
1206 }
1207 return AllocatePages (Pages);
1208 }
1209
1210 /**
1211 Allocate pages for code.
1212
1213 @param[in] Pages Number of pages to be allocated.
1214
1215 @return Allocated memory.
1216 **/
1217 VOID *
1218 AllocateCodePages (
1219 IN UINTN Pages
1220 )
1221 {
1222 EFI_STATUS Status;
1223 EFI_PHYSICAL_ADDRESS Memory;
1224
1225 if (Pages == 0) {
1226 return NULL;
1227 }
1228
1229 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1230 if (EFI_ERROR (Status)) {
1231 return NULL;
1232 }
1233 return (VOID *) (UINTN) Memory;
1234 }
1235
1236 /**
1237 Allocate aligned pages for code.
1238
1239 @param[in] Pages Number of pages to be allocated.
1240 @param[in] Alignment The requested alignment of the allocation.
1241 Must be a power of two.
1242 If Alignment is zero, then byte alignment is used.
1243
1244 @return Allocated memory.
1245 **/
1246 VOID *
1247 AllocateAlignedCodePages (
1248 IN UINTN Pages,
1249 IN UINTN Alignment
1250 )
1251 {
1252 EFI_STATUS Status;
1253 EFI_PHYSICAL_ADDRESS Memory;
1254 UINTN AlignedMemory;
1255 UINTN AlignmentMask;
1256 UINTN UnalignedPages;
1257 UINTN RealPages;
1258
1259 //
1260 // Alignment must be a power of two or zero.
1261 //
1262 ASSERT ((Alignment & (Alignment - 1)) == 0);
1263
1264 if (Pages == 0) {
1265 return NULL;
1266 }
1267 if (Alignment > EFI_PAGE_SIZE) {
1268 //
1269 // Calculate the total number of pages since alignment is larger than page size.
1270 //
1271 AlignmentMask = Alignment - 1;
1272 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1273 //
1274 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1275 //
1276 ASSERT (RealPages > Pages);
1277
1278 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1279 if (EFI_ERROR (Status)) {
1280 return NULL;
1281 }
1282 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1283 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1284 if (UnalignedPages > 0) {
1285 //
1286 // Free first unaligned page(s).
1287 //
1288 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1289 ASSERT_EFI_ERROR (Status);
1290 }
1291 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1292 UnalignedPages = RealPages - Pages - UnalignedPages;
1293 if (UnalignedPages > 0) {
1294 //
1295 // Free last unaligned page(s).
1296 //
1297 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1298 ASSERT_EFI_ERROR (Status);
1299 }
1300 } else {
1301 //
1302 // Do not over-allocate pages in this case.
1303 //
1304 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1305 if (EFI_ERROR (Status)) {
1306 return NULL;
1307 }
1308 AlignedMemory = (UINTN) Memory;
1309 }
1310 return (VOID *) AlignedMemory;
1311 }
1312
1313 /**
1314 Perform the remaining tasks.
1315
1316 **/
1317 VOID
1318 PerformRemainingTasks (
1319 VOID
1320 )
1321 {
1322 if (mSmmReadyToLock) {
1323 //
1324 // Start SMM Profile feature
1325 //
1326 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1327 SmmProfileStart ();
1328 }
1329 //
1330 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1331 //
1332 InitPaging ();
1333
1334 //
1335 // Mark critical region to be read-only in page table
1336 //
1337 SetMemMapAttributes ();
1338
1339 //
1340 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1341 //
1342 SetUefiMemMapAttributes ();
1343
1344 //
1345 // Set page table itself to be read-only
1346 //
1347 SetPageTableAttributes ();
1348
1349 //
1350 // Configure SMM Code Access Check feature if available.
1351 //
1352 ConfigSmmCodeAccessCheck ();
1353
1354 SmmCpuFeaturesCompleteSmmReadyToLock ();
1355
1356 //
1357 // Clean SMM ready to lock flag
1358 //
1359 mSmmReadyToLock = FALSE;
1360 }
1361 }
1362
1363 /**
1364 Perform the pre tasks.
1365
1366 **/
1367 VOID
1368 PerformPreTasks (
1369 VOID
1370 )
1371 {
1372 RestoreSmmConfigurationInS3 ();
1373 }