]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add SmmMemoryAttribute protocol
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // SMM CPU Private Data structure that contains SMM Configuration Protocol
21 // along its supporting fields.
22 //
23 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
25 NULL, // SmmCpuHandle
26 NULL, // Pointer to ProcessorInfo array
27 NULL, // Pointer to Operation array
28 NULL, // Pointer to CpuSaveStateSize array
29 NULL, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
31 {
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL // SmmCoreEntryContext.CpuSaveState
37 },
38 NULL, // SmmCoreEntry
39 {
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
42 },
43 };
44
45 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL, // Pointer to APIC ID array
49 NULL, // Pointer to SMBASE array
50 0, // Reserved
51 0, // SmrrBase
52 0 // SmrrSize
53 };
54
55 //
56 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
57 //
58 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
59
60 //
61 // SMM Relocation variables
62 //
63 volatile BOOLEAN *mRebased;
64 volatile BOOLEAN mIsBsp;
65
66 ///
67 /// Handle for the SMM CPU Protocol
68 ///
69 EFI_HANDLE mSmmCpuHandle = NULL;
70
71 ///
72 /// SMM CPU Protocol instance
73 ///
74 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
75 SmmReadSaveState,
76 SmmWriteSaveState
77 };
78
79 ///
80 /// SMM Memory Attribute Protocol instance
81 ///
82 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
83 EdkiiSmmGetMemoryAttributes,
84 EdkiiSmmSetMemoryAttributes,
85 EdkiiSmmClearMemoryAttributes
86 };
87
88 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
89
90 //
91 // SMM stack information
92 //
93 UINTN mSmmStackArrayBase;
94 UINTN mSmmStackArrayEnd;
95 UINTN mSmmStackSize;
96
97 UINTN mMaxNumberOfCpus = 1;
98 UINTN mNumberOfCpus = 1;
99
100 //
101 // SMM ready to lock flag
102 //
103 BOOLEAN mSmmReadyToLock = FALSE;
104
105 //
106 // Global used to cache PCD for SMM Code Access Check enable
107 //
108 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
109
110 //
111 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
112 //
113 UINT64 mAddressEncMask = 0;
114
115 //
116 // Spin lock used to serialize setting of SMM Code Access Check feature
117 //
118 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
119
120 //
121 // Saved SMM ranges information
122 //
123 EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
124 UINTN mSmmCpuSmramRangeCount;
125
126 UINT8 mPhysicalAddressBits;
127
128 /**
129 Initialize IDT to setup exception handlers for SMM.
130
131 **/
132 VOID
133 InitializeSmmIdt (
134 VOID
135 )
136 {
137 EFI_STATUS Status;
138 BOOLEAN InterruptState;
139 IA32_DESCRIPTOR DxeIdtr;
140
141 //
142 // There are 32 (not 255) entries in it since only processor
143 // generated exceptions will be handled.
144 //
145 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
146 //
147 // Allocate page aligned IDT, because it might be set as read only.
148 //
149 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
150 ASSERT (gcSmiIdtr.Base != 0);
151 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
152
153 //
154 // Disable Interrupt and save DXE IDT table
155 //
156 InterruptState = SaveAndDisableInterrupts ();
157 AsmReadIdtr (&DxeIdtr);
158 //
159 // Load SMM temporary IDT table
160 //
161 AsmWriteIdtr (&gcSmiIdtr);
162 //
163 // Setup SMM default exception handlers, SMM IDT table
164 // will be updated and saved in gcSmiIdtr
165 //
166 Status = InitializeCpuExceptionHandlers (NULL);
167 ASSERT_EFI_ERROR (Status);
168 //
169 // Restore DXE IDT table and CPU interrupt
170 //
171 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
172 SetInterruptState (InterruptState);
173 }
174
175 /**
176 Search module name by input IP address and output it.
177
178 @param CallerIpAddress Caller instruction pointer.
179
180 **/
181 VOID
182 DumpModuleInfoByIp (
183 IN UINTN CallerIpAddress
184 )
185 {
186 UINTN Pe32Data;
187 VOID *PdbPointer;
188
189 //
190 // Find Image Base
191 //
192 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
193 if (Pe32Data != 0) {
194 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
195 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
196 if (PdbPointer != NULL) {
197 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
198 }
199 }
200 }
201
202 /**
203 Read information from the CPU save state.
204
205 @param This EFI_SMM_CPU_PROTOCOL instance
206 @param Width The number of bytes to read from the CPU save state.
207 @param Register Specifies the CPU register to read form the save state.
208 @param CpuIndex Specifies the zero-based index of the CPU save state.
209 @param Buffer Upon return, this holds the CPU register value read from the save state.
210
211 @retval EFI_SUCCESS The register was read from Save State
212 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
213 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
214
215 **/
216 EFI_STATUS
217 EFIAPI
218 SmmReadSaveState (
219 IN CONST EFI_SMM_CPU_PROTOCOL *This,
220 IN UINTN Width,
221 IN EFI_SMM_SAVE_STATE_REGISTER Register,
222 IN UINTN CpuIndex,
223 OUT VOID *Buffer
224 )
225 {
226 EFI_STATUS Status;
227
228 //
229 // Retrieve pointer to the specified CPU's SMM Save State buffer
230 //
231 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
232 return EFI_INVALID_PARAMETER;
233 }
234
235 //
236 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
237 //
238 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
239 //
240 // The pseudo-register only supports the 64-bit size specified by Width.
241 //
242 if (Width != sizeof (UINT64)) {
243 return EFI_INVALID_PARAMETER;
244 }
245 //
246 // If the processor is in SMM at the time the SMI occurred,
247 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
248 // Otherwise, EFI_NOT_FOUND is returned.
249 //
250 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
251 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
252 return EFI_SUCCESS;
253 } else {
254 return EFI_NOT_FOUND;
255 }
256 }
257
258 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
259 return EFI_INVALID_PARAMETER;
260 }
261
262 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
263 if (Status == EFI_UNSUPPORTED) {
264 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
265 }
266 return Status;
267 }
268
269 /**
270 Write data to the CPU save state.
271
272 @param This EFI_SMM_CPU_PROTOCOL instance
273 @param Width The number of bytes to read from the CPU save state.
274 @param Register Specifies the CPU register to write to the save state.
275 @param CpuIndex Specifies the zero-based index of the CPU save state
276 @param Buffer Upon entry, this holds the new CPU register value.
277
278 @retval EFI_SUCCESS The register was written from Save State
279 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
280 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
281
282 **/
283 EFI_STATUS
284 EFIAPI
285 SmmWriteSaveState (
286 IN CONST EFI_SMM_CPU_PROTOCOL *This,
287 IN UINTN Width,
288 IN EFI_SMM_SAVE_STATE_REGISTER Register,
289 IN UINTN CpuIndex,
290 IN CONST VOID *Buffer
291 )
292 {
293 EFI_STATUS Status;
294
295 //
296 // Retrieve pointer to the specified CPU's SMM Save State buffer
297 //
298 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
299 return EFI_INVALID_PARAMETER;
300 }
301
302 //
303 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
304 //
305 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
306 return EFI_SUCCESS;
307 }
308
309 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
310 return EFI_INVALID_PARAMETER;
311 }
312
313 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
314 if (Status == EFI_UNSUPPORTED) {
315 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
316 }
317 return Status;
318 }
319
320
321 /**
322 C function for SMI handler. To change all processor's SMMBase Register.
323
324 **/
325 VOID
326 EFIAPI
327 SmmInitHandler (
328 VOID
329 )
330 {
331 UINT32 ApicId;
332 UINTN Index;
333
334 //
335 // Update SMM IDT entries' code segment and load IDT
336 //
337 AsmWriteIdtr (&gcSmiIdtr);
338 ApicId = GetApicId ();
339
340 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
341
342 for (Index = 0; Index < mNumberOfCpus; Index++) {
343 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
344 //
345 // Initialize SMM specific features on the currently executing CPU
346 //
347 SmmCpuFeaturesInitializeProcessor (
348 Index,
349 mIsBsp,
350 gSmmCpuPrivate->ProcessorInfo,
351 &mCpuHotPlugData
352 );
353
354 if (!mSmmS3Flag) {
355 //
356 // Check XD and BTS features on each processor on normal boot
357 //
358 CheckFeatureSupported ();
359 }
360
361 if (mIsBsp) {
362 //
363 // BSP rebase is already done above.
364 // Initialize private data during S3 resume
365 //
366 InitializeMpSyncData ();
367 }
368
369 //
370 // Hook return after RSM to set SMM re-based flag
371 //
372 SemaphoreHook (Index, &mRebased[Index]);
373
374 return;
375 }
376 }
377 ASSERT (FALSE);
378 }
379
380 /**
381 Relocate SmmBases for each processor.
382
383 Execute on first boot and all S3 resumes
384
385 **/
386 VOID
387 EFIAPI
388 SmmRelocateBases (
389 VOID
390 )
391 {
392 UINT8 BakBuf[BACK_BUF_SIZE];
393 SMRAM_SAVE_STATE_MAP BakBuf2;
394 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
395 UINT8 *U8Ptr;
396 UINT32 ApicId;
397 UINTN Index;
398 UINTN BspIndex;
399
400 //
401 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
402 //
403 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
404
405 //
406 // Patch ASM code template with current CR0, CR3, and CR4 values
407 //
408 gSmmCr0 = (UINT32)AsmReadCr0 ();
409 gSmmCr3 = (UINT32)AsmReadCr3 ();
410 gSmmCr4 = (UINT32)AsmReadCr4 ();
411
412 //
413 // Patch GDTR for SMM base relocation
414 //
415 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
416 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
417
418 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
419 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
420
421 //
422 // Backup original contents at address 0x38000
423 //
424 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
425 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
426
427 //
428 // Load image for relocation
429 //
430 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
431
432 //
433 // Retrieve the local APIC ID of current processor
434 //
435 ApicId = GetApicId ();
436
437 //
438 // Relocate SM bases for all APs
439 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
440 //
441 mIsBsp = FALSE;
442 BspIndex = (UINTN)-1;
443 for (Index = 0; Index < mNumberOfCpus; Index++) {
444 mRebased[Index] = FALSE;
445 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
446 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
447 //
448 // Wait for this AP to finish its 1st SMI
449 //
450 while (!mRebased[Index]);
451 } else {
452 //
453 // BSP will be Relocated later
454 //
455 BspIndex = Index;
456 }
457 }
458
459 //
460 // Relocate BSP's SMM base
461 //
462 ASSERT (BspIndex != (UINTN)-1);
463 mIsBsp = TRUE;
464 SendSmiIpi (ApicId);
465 //
466 // Wait for the BSP to finish its 1st SMI
467 //
468 while (!mRebased[BspIndex]);
469
470 //
471 // Restore contents at address 0x38000
472 //
473 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
474 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
475 }
476
477 /**
478 SMM Ready To Lock event notification handler.
479
480 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
481 perform additional lock actions that must be performed from SMM on the next SMI.
482
483 @param[in] Protocol Points to the protocol's unique identifier.
484 @param[in] Interface Points to the interface instance.
485 @param[in] Handle The handle on which the interface was installed.
486
487 @retval EFI_SUCCESS Notification handler runs successfully.
488 **/
489 EFI_STATUS
490 EFIAPI
491 SmmReadyToLockEventNotify (
492 IN CONST EFI_GUID *Protocol,
493 IN VOID *Interface,
494 IN EFI_HANDLE Handle
495 )
496 {
497 GetAcpiCpuData ();
498
499 //
500 // Cache a copy of UEFI memory map before we start profiling feature.
501 //
502 GetUefiMemoryMap ();
503
504 //
505 // Set SMM ready to lock flag and return
506 //
507 mSmmReadyToLock = TRUE;
508 return EFI_SUCCESS;
509 }
510
511 /**
512 The module Entry Point of the CPU SMM driver.
513
514 @param ImageHandle The firmware allocated handle for the EFI image.
515 @param SystemTable A pointer to the EFI System Table.
516
517 @retval EFI_SUCCESS The entry point is executed successfully.
518 @retval Other Some error occurs when executing this entry point.
519
520 **/
521 EFI_STATUS
522 EFIAPI
523 PiCpuSmmEntry (
524 IN EFI_HANDLE ImageHandle,
525 IN EFI_SYSTEM_TABLE *SystemTable
526 )
527 {
528 EFI_STATUS Status;
529 EFI_MP_SERVICES_PROTOCOL *MpServices;
530 UINTN NumberOfEnabledProcessors;
531 UINTN Index;
532 VOID *Buffer;
533 UINTN BufferPages;
534 UINTN TileCodeSize;
535 UINTN TileDataSize;
536 UINTN TileSize;
537 UINT8 *Stacks;
538 VOID *Registration;
539 UINT32 RegEax;
540 UINT32 RegEdx;
541 UINTN FamilyId;
542 UINTN ModelId;
543 UINT32 Cr3;
544
545 //
546 // Initialize Debug Agent to support source level debug in SMM code
547 //
548 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
549
550 //
551 // Report the start of CPU SMM initialization.
552 //
553 REPORT_STATUS_CODE (
554 EFI_PROGRESS_CODE,
555 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
556 );
557
558 //
559 // Fix segment address of the long-mode-switch jump
560 //
561 if (sizeof (UINTN) == sizeof (UINT64)) {
562 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;
563 }
564
565 //
566 // Find out SMRR Base and SMRR Size
567 //
568 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
569
570 //
571 // Get MP Services Protocol
572 //
573 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
574 ASSERT_EFI_ERROR (Status);
575
576 //
577 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
578 //
579 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
580 ASSERT_EFI_ERROR (Status);
581 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
582
583 //
584 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
585 // A constant BSP index makes no sense because it may be hot removed.
586 //
587 DEBUG_CODE (
588 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
589
590 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
591 }
592 );
593
594 //
595 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
596 //
597 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
598 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
599
600 //
601 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
602 // Make sure AddressEncMask is contained to smallest supported address field.
603 //
604 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
605 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
606
607 //
608 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
609 //
610 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
611 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
612 } else {
613 mMaxNumberOfCpus = mNumberOfCpus;
614 }
615 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
616
617 //
618 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
619 // allocated buffer. The minimum size of this buffer for a uniprocessor system
620 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
621 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
622 // then the SMI entry point and the CPU save state areas can be tiles to minimize
623 // the total amount SMRAM required for all the CPUs. The tile size can be computed
624 // by adding the // CPU save state size, any extra CPU specific context, and
625 // the size of code that must be placed at the SMI entry point to transfer
626 // control to a C function in the native SMM execution mode. This size is
627 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
628 // The total amount of memory required is the maximum number of CPUs that
629 // platform supports times the tile size. The picture below shows the tiling,
630 // where m is the number of tiles that fit in 32KB.
631 //
632 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
633 // | CPU m+1 Save State |
634 // +-----------------------------+
635 // | CPU m+1 Extra Data |
636 // +-----------------------------+
637 // | Padding |
638 // +-----------------------------+
639 // | CPU 2m SMI Entry |
640 // +#############################+ <-- Base of allocated buffer + 64 KB
641 // | CPU m-1 Save State |
642 // +-----------------------------+
643 // | CPU m-1 Extra Data |
644 // +-----------------------------+
645 // | Padding |
646 // +-----------------------------+
647 // | CPU 2m-1 SMI Entry |
648 // +=============================+ <-- 2^n offset from Base of allocated buffer
649 // | . . . . . . . . . . . . |
650 // +=============================+ <-- 2^n offset from Base of allocated buffer
651 // | CPU 2 Save State |
652 // +-----------------------------+
653 // | CPU 2 Extra Data |
654 // +-----------------------------+
655 // | Padding |
656 // +-----------------------------+
657 // | CPU m+1 SMI Entry |
658 // +=============================+ <-- Base of allocated buffer + 32 KB
659 // | CPU 1 Save State |
660 // +-----------------------------+
661 // | CPU 1 Extra Data |
662 // +-----------------------------+
663 // | Padding |
664 // +-----------------------------+
665 // | CPU m SMI Entry |
666 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
667 // | CPU 0 Save State |
668 // +-----------------------------+
669 // | CPU 0 Extra Data |
670 // +-----------------------------+
671 // | Padding |
672 // +-----------------------------+
673 // | CPU m-1 SMI Entry |
674 // +=============================+ <-- 2^n offset from Base of allocated buffer
675 // | . . . . . . . . . . . . |
676 // +=============================+ <-- 2^n offset from Base of allocated buffer
677 // | Padding |
678 // +-----------------------------+
679 // | CPU 1 SMI Entry |
680 // +=============================+ <-- 2^n offset from Base of allocated buffer
681 // | Padding |
682 // +-----------------------------+
683 // | CPU 0 SMI Entry |
684 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
685 //
686
687 //
688 // Retrieve CPU Family
689 //
690 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
691 FamilyId = (RegEax >> 8) & 0xf;
692 ModelId = (RegEax >> 4) & 0xf;
693 if (FamilyId == 0x06 || FamilyId == 0x0f) {
694 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
695 }
696
697 RegEdx = 0;
698 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
699 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
700 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
701 }
702 //
703 // Determine the mode of the CPU at the time an SMI occurs
704 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
705 // Volume 3C, Section 34.4.1.1
706 //
707 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
708 if ((RegEdx & BIT29) != 0) {
709 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
710 }
711 if (FamilyId == 0x06) {
712 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
713 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
714 }
715 }
716
717 //
718 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
719 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
720 // This size is rounded up to nearest power of 2.
721 //
722 TileCodeSize = GetSmiHandlerSize ();
723 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
724 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
725 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
726 TileSize = TileDataSize + TileCodeSize - 1;
727 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
728 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
729
730 //
731 // If the TileSize is larger than space available for the SMI Handler of
732 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
733 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
734 // the SMI Handler size must be reduced or the size of the extra CPU specific
735 // context must be reduced.
736 //
737 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
738
739 //
740 // Allocate buffer for all of the tiles.
741 //
742 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
743 // Volume 3C, Section 34.11 SMBASE Relocation
744 // For Pentium and Intel486 processors, the SMBASE values must be
745 // aligned on a 32-KByte boundary or the processor will enter shutdown
746 // state during the execution of a RSM instruction.
747 //
748 // Intel486 processors: FamilyId is 4
749 // Pentium processors : FamilyId is 5
750 //
751 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
752 if ((FamilyId == 4) || (FamilyId == 5)) {
753 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
754 } else {
755 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
756 }
757 ASSERT (Buffer != NULL);
758 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
759
760 //
761 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
762 //
763 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
764 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
765
766 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
767 ASSERT (gSmmCpuPrivate->Operation != NULL);
768
769 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
770 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
771
772 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
773 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
774
775 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
776 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
777
778 //
779 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
780 //
781 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
782 ASSERT (mCpuHotPlugData.ApicId != NULL);
783 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
784 ASSERT (mCpuHotPlugData.SmBase != NULL);
785 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
786
787 //
788 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
789 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
790 // size for each CPU in the platform
791 //
792 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
793 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
794 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
795 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
796 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
797
798 if (Index < mNumberOfCpus) {
799 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
800 ASSERT_EFI_ERROR (Status);
801 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
802
803 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
804 Index,
805 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
806 mCpuHotPlugData.SmBase[Index],
807 gSmmCpuPrivate->CpuSaveState[Index],
808 gSmmCpuPrivate->CpuSaveStateSize[Index]
809 ));
810 } else {
811 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
812 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
813 }
814 }
815
816 //
817 // Allocate SMI stacks for all processors.
818 //
819 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
820 //
821 // 2 more pages is allocated for each processor.
822 // one is guard page and the other is known good stack.
823 //
824 // +-------------------------------------------+-----+-------------------------------------------+
825 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
826 // +-------------------------------------------+-----+-------------------------------------------+
827 // | | | |
828 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
829 //
830 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
831 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
832 ASSERT (Stacks != NULL);
833 mSmmStackArrayBase = (UINTN)Stacks;
834 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
835 } else {
836 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
837 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
838 ASSERT (Stacks != NULL);
839 }
840
841 //
842 // Set SMI stack for SMM base relocation
843 //
844 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));
845
846 //
847 // Initialize IDT
848 //
849 InitializeSmmIdt ();
850
851 //
852 // Relocate SMM Base addresses to the ones allocated from SMRAM
853 //
854 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
855 ASSERT (mRebased != NULL);
856 SmmRelocateBases ();
857
858 //
859 // Call hook for BSP to perform extra actions in normal mode after all
860 // SMM base addresses have been relocated on all CPUs
861 //
862 SmmCpuFeaturesSmmRelocationComplete ();
863
864 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
865
866 //
867 // SMM Time initialization
868 //
869 InitializeSmmTimer ();
870
871 //
872 // Initialize MP globals
873 //
874 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);
875
876 //
877 // Fill in SMM Reserved Regions
878 //
879 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
880 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
881
882 //
883 // Install the SMM Configuration Protocol onto a new handle on the handle database.
884 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
885 // to an SMRAM address will be present in the handle database
886 //
887 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
888 &gSmmCpuPrivate->SmmCpuHandle,
889 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
890 NULL
891 );
892 ASSERT_EFI_ERROR (Status);
893
894 //
895 // Install the SMM CPU Protocol into SMM protocol database
896 //
897 Status = gSmst->SmmInstallProtocolInterface (
898 &mSmmCpuHandle,
899 &gEfiSmmCpuProtocolGuid,
900 EFI_NATIVE_INTERFACE,
901 &mSmmCpu
902 );
903 ASSERT_EFI_ERROR (Status);
904
905 //
906 // Install the SMM Memory Attribute Protocol into SMM protocol database
907 //
908 Status = gSmst->SmmInstallProtocolInterface (
909 &mSmmCpuHandle,
910 &gEdkiiSmmMemoryAttributeProtocolGuid,
911 EFI_NATIVE_INTERFACE,
912 &mSmmMemoryAttribute
913 );
914 ASSERT_EFI_ERROR (Status);
915
916 //
917 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
918 //
919 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
920 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
921 ASSERT_EFI_ERROR (Status);
922 }
923
924 //
925 // Initialize SMM CPU Services Support
926 //
927 Status = InitializeSmmCpuServices (mSmmCpuHandle);
928 ASSERT_EFI_ERROR (Status);
929
930 //
931 // register SMM Ready To Lock Protocol notification
932 //
933 Status = gSmst->SmmRegisterProtocolNotify (
934 &gEfiSmmReadyToLockProtocolGuid,
935 SmmReadyToLockEventNotify,
936 &Registration
937 );
938 ASSERT_EFI_ERROR (Status);
939
940 //
941 // Initialize SMM Profile feature
942 //
943 InitSmmProfile (Cr3);
944
945 GetAcpiS3EnableFlag ();
946 InitSmmS3ResumeState (Cr3);
947
948 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
949
950 return EFI_SUCCESS;
951 }
952
953 /**
954
955 Find out SMRAM information including SMRR base and SMRR size.
956
957 @param SmrrBase SMRR base
958 @param SmrrSize SMRR size
959
960 **/
961 VOID
962 FindSmramInfo (
963 OUT UINT32 *SmrrBase,
964 OUT UINT32 *SmrrSize
965 )
966 {
967 EFI_STATUS Status;
968 UINTN Size;
969 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
970 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
971 UINTN Index;
972 UINT64 MaxSize;
973 BOOLEAN Found;
974
975 //
976 // Get SMM Access Protocol
977 //
978 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
979 ASSERT_EFI_ERROR (Status);
980
981 //
982 // Get SMRAM information
983 //
984 Size = 0;
985 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
986 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
987
988 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
989 ASSERT (mSmmCpuSmramRanges != NULL);
990
991 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
992 ASSERT_EFI_ERROR (Status);
993
994 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
995
996 //
997 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
998 //
999 CurrentSmramRange = NULL;
1000 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1001 //
1002 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1003 //
1004 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1005 continue;
1006 }
1007
1008 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1009 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1010 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1011 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1012 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1013 }
1014 }
1015 }
1016 }
1017
1018 ASSERT (CurrentSmramRange != NULL);
1019
1020 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1021 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1022
1023 do {
1024 Found = FALSE;
1025 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1026 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
1027 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
1028 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1029 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1030 Found = TRUE;
1031 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
1032 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1033 Found = TRUE;
1034 }
1035 }
1036 } while (Found);
1037
1038 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1039 }
1040
1041 /**
1042 Configure SMM Code Access Check feature on an AP.
1043 SMM Feature Control MSR will be locked after configuration.
1044
1045 @param[in,out] Buffer Pointer to private data buffer.
1046 **/
1047 VOID
1048 EFIAPI
1049 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1050 IN OUT VOID *Buffer
1051 )
1052 {
1053 UINTN CpuIndex;
1054 UINT64 SmmFeatureControlMsr;
1055 UINT64 NewSmmFeatureControlMsr;
1056
1057 //
1058 // Retrieve the CPU Index from the context passed in
1059 //
1060 CpuIndex = *(UINTN *)Buffer;
1061
1062 //
1063 // Get the current SMM Feature Control MSR value
1064 //
1065 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1066
1067 //
1068 // Compute the new SMM Feature Control MSR value
1069 //
1070 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1071 if (mSmmCodeAccessCheckEnable) {
1072 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1073 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1074 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1075 }
1076 }
1077
1078 //
1079 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1080 //
1081 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1082 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1083 }
1084
1085 //
1086 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1087 //
1088 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1089 }
1090
1091 /**
1092 Configure SMM Code Access Check feature for all processors.
1093 SMM Feature Control MSR will be locked after configuration.
1094 **/
1095 VOID
1096 ConfigSmmCodeAccessCheck (
1097 VOID
1098 )
1099 {
1100 UINTN Index;
1101 EFI_STATUS Status;
1102
1103 //
1104 // Check to see if the Feature Control MSR is supported on this CPU
1105 //
1106 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1107 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1108 mSmmCodeAccessCheckEnable = FALSE;
1109 return;
1110 }
1111
1112 //
1113 // Check to see if the CPU supports the SMM Code Access Check feature
1114 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1115 //
1116 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1117 mSmmCodeAccessCheckEnable = FALSE;
1118 return;
1119 }
1120
1121 //
1122 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1123 //
1124 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1125
1126 //
1127 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1128 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1129 //
1130 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1131
1132 //
1133 // Enable SMM Code Access Check feature on the BSP.
1134 //
1135 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1136
1137 //
1138 // Enable SMM Code Access Check feature for the APs.
1139 //
1140 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1141 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1142 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1143 //
1144 // If this processor does not exist
1145 //
1146 continue;
1147 }
1148 //
1149 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1150 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1151 //
1152 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1153
1154 //
1155 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1156 //
1157 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1158 ASSERT_EFI_ERROR (Status);
1159
1160 //
1161 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1162 //
1163 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1164 CpuPause ();
1165 }
1166
1167 //
1168 // Release the Config SMM Code Access Check spin lock.
1169 //
1170 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1171 }
1172 }
1173 }
1174
1175 /**
1176 This API provides a way to allocate memory for page table.
1177
1178 This API can be called more once to allocate memory for page tables.
1179
1180 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1181 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1182 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1183 returned.
1184
1185 @param Pages The number of 4 KB pages to allocate.
1186
1187 @return A pointer to the allocated buffer or NULL if allocation fails.
1188
1189 **/
1190 VOID *
1191 AllocatePageTableMemory (
1192 IN UINTN Pages
1193 )
1194 {
1195 VOID *Buffer;
1196
1197 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1198 if (Buffer != NULL) {
1199 return Buffer;
1200 }
1201 return AllocatePages (Pages);
1202 }
1203
1204 /**
1205 Allocate pages for code.
1206
1207 @param[in] Pages Number of pages to be allocated.
1208
1209 @return Allocated memory.
1210 **/
1211 VOID *
1212 AllocateCodePages (
1213 IN UINTN Pages
1214 )
1215 {
1216 EFI_STATUS Status;
1217 EFI_PHYSICAL_ADDRESS Memory;
1218
1219 if (Pages == 0) {
1220 return NULL;
1221 }
1222
1223 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1224 if (EFI_ERROR (Status)) {
1225 return NULL;
1226 }
1227 return (VOID *) (UINTN) Memory;
1228 }
1229
1230 /**
1231 Allocate aligned pages for code.
1232
1233 @param[in] Pages Number of pages to be allocated.
1234 @param[in] Alignment The requested alignment of the allocation.
1235 Must be a power of two.
1236 If Alignment is zero, then byte alignment is used.
1237
1238 @return Allocated memory.
1239 **/
1240 VOID *
1241 AllocateAlignedCodePages (
1242 IN UINTN Pages,
1243 IN UINTN Alignment
1244 )
1245 {
1246 EFI_STATUS Status;
1247 EFI_PHYSICAL_ADDRESS Memory;
1248 UINTN AlignedMemory;
1249 UINTN AlignmentMask;
1250 UINTN UnalignedPages;
1251 UINTN RealPages;
1252
1253 //
1254 // Alignment must be a power of two or zero.
1255 //
1256 ASSERT ((Alignment & (Alignment - 1)) == 0);
1257
1258 if (Pages == 0) {
1259 return NULL;
1260 }
1261 if (Alignment > EFI_PAGE_SIZE) {
1262 //
1263 // Calculate the total number of pages since alignment is larger than page size.
1264 //
1265 AlignmentMask = Alignment - 1;
1266 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1267 //
1268 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1269 //
1270 ASSERT (RealPages > Pages);
1271
1272 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1273 if (EFI_ERROR (Status)) {
1274 return NULL;
1275 }
1276 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1277 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1278 if (UnalignedPages > 0) {
1279 //
1280 // Free first unaligned page(s).
1281 //
1282 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1283 ASSERT_EFI_ERROR (Status);
1284 }
1285 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1286 UnalignedPages = RealPages - Pages - UnalignedPages;
1287 if (UnalignedPages > 0) {
1288 //
1289 // Free last unaligned page(s).
1290 //
1291 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1292 ASSERT_EFI_ERROR (Status);
1293 }
1294 } else {
1295 //
1296 // Do not over-allocate pages in this case.
1297 //
1298 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1299 if (EFI_ERROR (Status)) {
1300 return NULL;
1301 }
1302 AlignedMemory = (UINTN) Memory;
1303 }
1304 return (VOID *) AlignedMemory;
1305 }
1306
1307 /**
1308 Perform the remaining tasks.
1309
1310 **/
1311 VOID
1312 PerformRemainingTasks (
1313 VOID
1314 )
1315 {
1316 if (mSmmReadyToLock) {
1317 //
1318 // Start SMM Profile feature
1319 //
1320 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1321 SmmProfileStart ();
1322 }
1323 //
1324 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1325 //
1326 InitPaging ();
1327
1328 //
1329 // Mark critical region to be read-only in page table
1330 //
1331 SetMemMapAttributes ();
1332
1333 //
1334 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1335 //
1336 SetUefiMemMapAttributes ();
1337
1338 //
1339 // Set page table itself to be read-only
1340 //
1341 SetPageTableAttributes ();
1342
1343 //
1344 // Configure SMM Code Access Check feature if available.
1345 //
1346 ConfigSmmCodeAccessCheck ();
1347
1348 SmmCpuFeaturesCompleteSmmReadyToLock ();
1349
1350 //
1351 // Clean SMM ready to lock flag
1352 //
1353 mSmmReadyToLock = FALSE;
1354 }
1355 }
1356
1357 /**
1358 Perform the pre tasks.
1359
1360 **/
1361 VOID
1362 PerformPreTasks (
1363 VOID
1364 )
1365 {
1366 RestoreSmmConfigurationInS3 ();
1367 }