]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: PiSmmCpuDxeSmm: Replace PcdSet## with PcdSet##S
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // SMM CPU Private Data structure that contains SMM Configuration Protocol
19 // along its supporting fields.
20 //
21 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
22 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
23 NULL, // SmmCpuHandle
24 NULL, // Pointer to ProcessorInfo array
25 NULL, // Pointer to Operation array
26 NULL, // Pointer to CpuSaveStateSize array
27 NULL, // Pointer to CpuSaveState array
28 { {0} }, // SmmReservedSmramRegion
29 {
30 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
31 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
32 0, // SmmCoreEntryContext.NumberOfCpus
33 NULL, // SmmCoreEntryContext.CpuSaveStateSize
34 NULL // SmmCoreEntryContext.CpuSaveState
35 },
36 NULL, // SmmCoreEntry
37 {
38 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
39 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
40 },
41 };
42
43 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
45 0, // Array Length of SmBase and APIC ID
46 NULL, // Pointer to APIC ID array
47 NULL, // Pointer to SMBASE array
48 0, // Reserved
49 0, // SmrrBase
50 0 // SmrrSize
51 };
52
53 //
54 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
55 //
56 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
57
58 //
59 // SMM Relocation variables
60 //
61 volatile BOOLEAN *mRebased;
62 volatile BOOLEAN mIsBsp;
63
64 ///
65 /// Handle for the SMM CPU Protocol
66 ///
67 EFI_HANDLE mSmmCpuHandle = NULL;
68
69 ///
70 /// SMM CPU Protocol instance
71 ///
72 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
73 SmmReadSaveState,
74 SmmWriteSaveState
75 };
76
77 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
78
79 ///
80 /// SMM CPU Save State Protocol instance
81 ///
82 EFI_SMM_CPU_SAVE_STATE_PROTOCOL mSmmCpuSaveState = {
83 NULL
84 };
85
86 //
87 // SMM stack information
88 //
89 UINTN mSmmStackArrayBase;
90 UINTN mSmmStackArrayEnd;
91 UINTN mSmmStackSize;
92
93 //
94 // Pointer to structure used during S3 Resume
95 //
96 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
97
98 UINTN mMaxNumberOfCpus = 1;
99 UINTN mNumberOfCpus = 1;
100
101 //
102 // SMM ready to lock flag
103 //
104 BOOLEAN mSmmReadyToLock = FALSE;
105
106 //
107 // Global used to cache PCD for SMM Code Access Check enable
108 //
109 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
110
111 //
112 // Spin lock used to serialize setting of SMM Code Access Check feature
113 //
114 SPIN_LOCK mConfigSmmCodeAccessCheckLock;
115
116 /**
117 Initialize IDT to setup exception handlers for SMM.
118
119 **/
120 VOID
121 InitializeSmmIdt (
122 VOID
123 )
124 {
125 EFI_STATUS Status;
126 BOOLEAN InterruptState;
127 IA32_DESCRIPTOR DxeIdtr;
128 //
129 // Disable Interrupt and save DXE IDT table
130 //
131 InterruptState = SaveAndDisableInterrupts ();
132 AsmReadIdtr (&DxeIdtr);
133 //
134 // Load SMM temporary IDT table
135 //
136 AsmWriteIdtr (&gcSmiIdtr);
137 //
138 // Setup SMM default exception handlers, SMM IDT table
139 // will be updated and saved in gcSmiIdtr
140 //
141 Status = InitializeCpuExceptionHandlers (NULL);
142 ASSERT_EFI_ERROR (Status);
143 //
144 // Restore DXE IDT table and CPU interrupt
145 //
146 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
147 SetInterruptState (InterruptState);
148 }
149
150 /**
151 Search module name by input IP address and output it.
152
153 @param CallerIpAddress Caller instruction pointer.
154
155 **/
156 VOID
157 DumpModuleInfoByIp (
158 IN UINTN CallerIpAddress
159 )
160 {
161 UINTN Pe32Data;
162 EFI_IMAGE_DOS_HEADER *DosHdr;
163 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;
164 VOID *PdbPointer;
165 UINT64 DumpIpAddress;
166
167 //
168 // Find Image Base
169 //
170 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);
171 while (Pe32Data != 0) {
172 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;
173 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {
174 //
175 // DOS image header is present, so read the PE header after the DOS image header.
176 //
177 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));
178 //
179 // Make sure PE header address does not overflow and is less than the initial address.
180 //
181 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {
182 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {
183 //
184 // It's PE image.
185 //
186 break;
187 }
188 }
189 }
190
191 //
192 // Not found the image base, check the previous aligned address
193 //
194 Pe32Data -= SIZE_4KB;
195 }
196
197 DumpIpAddress = CallerIpAddress;
198 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));
199
200 if (Pe32Data != 0) {
201 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
202 if (PdbPointer != NULL) {
203 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));
204 }
205 }
206 }
207
208 /**
209 Read information from the CPU save state.
210
211 @param This EFI_SMM_CPU_PROTOCOL instance
212 @param Width The number of bytes to read from the CPU save state.
213 @param Register Specifies the CPU register to read form the save state.
214 @param CpuIndex Specifies the zero-based index of the CPU save state.
215 @param Buffer Upon return, this holds the CPU register value read from the save state.
216
217 @retval EFI_SUCCESS The register was read from Save State
218 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
219 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
220
221 **/
222 EFI_STATUS
223 EFIAPI
224 SmmReadSaveState (
225 IN CONST EFI_SMM_CPU_PROTOCOL *This,
226 IN UINTN Width,
227 IN EFI_SMM_SAVE_STATE_REGISTER Register,
228 IN UINTN CpuIndex,
229 OUT VOID *Buffer
230 )
231 {
232 EFI_STATUS Status;
233
234 //
235 // Retrieve pointer to the specified CPU's SMM Save State buffer
236 //
237 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
238 return EFI_INVALID_PARAMETER;
239 }
240
241 //
242 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
243 //
244 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
245 //
246 // The pseudo-register only supports the 64-bit size specified by Width.
247 //
248 if (Width != sizeof (UINT64)) {
249 return EFI_INVALID_PARAMETER;
250 }
251 //
252 // If the processor is in SMM at the time the SMI occurred,
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
254 // Otherwise, EFI_NOT_FOUND is returned.
255 //
256 if (mSmmMpSyncData->CpuData[CpuIndex].Present) {
257 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
258 return EFI_SUCCESS;
259 } else {
260 return EFI_NOT_FOUND;
261 }
262 }
263
264 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
265 return EFI_INVALID_PARAMETER;
266 }
267
268 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
269 if (Status == EFI_UNSUPPORTED) {
270 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
271 }
272 return Status;
273 }
274
275 /**
276 Write data to the CPU save state.
277
278 @param This EFI_SMM_CPU_PROTOCOL instance
279 @param Width The number of bytes to read from the CPU save state.
280 @param Register Specifies the CPU register to write to the save state.
281 @param CpuIndex Specifies the zero-based index of the CPU save state
282 @param Buffer Upon entry, this holds the new CPU register value.
283
284 @retval EFI_SUCCESS The register was written from Save State
285 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
286 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
287
288 **/
289 EFI_STATUS
290 EFIAPI
291 SmmWriteSaveState (
292 IN CONST EFI_SMM_CPU_PROTOCOL *This,
293 IN UINTN Width,
294 IN EFI_SMM_SAVE_STATE_REGISTER Register,
295 IN UINTN CpuIndex,
296 IN CONST VOID *Buffer
297 )
298 {
299 EFI_STATUS Status;
300
301 //
302 // Retrieve pointer to the specified CPU's SMM Save State buffer
303 //
304 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
305 return EFI_INVALID_PARAMETER;
306 }
307
308 //
309 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
310 //
311 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
312 return EFI_SUCCESS;
313 }
314
315 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
316 return EFI_INVALID_PARAMETER;
317 }
318
319 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
320 if (Status == EFI_UNSUPPORTED) {
321 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
322 }
323 return Status;
324 }
325
326
327 /**
328 C function for SMI handler. To change all processor's SMMBase Register.
329
330 **/
331 VOID
332 EFIAPI
333 SmmInitHandler (
334 VOID
335 )
336 {
337 UINT32 ApicId;
338 UINTN Index;
339
340 //
341 // Update SMM IDT entries' code segment and load IDT
342 //
343 AsmWriteIdtr (&gcSmiIdtr);
344 ApicId = GetApicId ();
345
346 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
347
348 for (Index = 0; Index < mNumberOfCpus; Index++) {
349 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
350 //
351 // Initialize SMM specific features on the currently executing CPU
352 //
353 SmmCpuFeaturesInitializeProcessor (
354 Index,
355 mIsBsp,
356 gSmmCpuPrivate->ProcessorInfo,
357 &mCpuHotPlugData
358 );
359
360 if (mIsBsp) {
361 //
362 // BSP rebase is already done above.
363 // Initialize private data during S3 resume
364 //
365 InitializeMpSyncData ();
366 }
367
368 //
369 // Hook return after RSM to set SMM re-based flag
370 //
371 SemaphoreHook (Index, &mRebased[Index]);
372
373 return;
374 }
375 }
376 ASSERT (FALSE);
377 }
378
379 /**
380 Relocate SmmBases for each processor.
381
382 Execute on first boot and all S3 resumes
383
384 **/
385 VOID
386 EFIAPI
387 SmmRelocateBases (
388 VOID
389 )
390 {
391 UINT8 BakBuf[BACK_BUF_SIZE];
392 SMRAM_SAVE_STATE_MAP BakBuf2;
393 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
394 UINT8 *U8Ptr;
395 UINT32 ApicId;
396 UINTN Index;
397 UINTN BspIndex;
398
399 //
400 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
401 //
402 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
403
404 //
405 // Patch ASM code template with current CR0, CR3, and CR4 values
406 //
407 gSmmCr0 = (UINT32)AsmReadCr0 ();
408 gSmmCr3 = (UINT32)AsmReadCr3 ();
409 gSmmCr4 = (UINT32)AsmReadCr4 ();
410
411 //
412 // Patch GDTR for SMM base relocation
413 //
414 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
415 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
416
417 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
418 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
419
420 //
421 // Backup original contents at address 0x38000
422 //
423 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
424 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
425
426 //
427 // Load image for relocation
428 //
429 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
430
431 //
432 // Retrieve the local APIC ID of current processor
433 //
434 ApicId = GetApicId ();
435
436 //
437 // Relocate SM bases for all APs
438 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
439 //
440 mIsBsp = FALSE;
441 BspIndex = (UINTN)-1;
442 for (Index = 0; Index < mNumberOfCpus; Index++) {
443 mRebased[Index] = FALSE;
444 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
445 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
446 //
447 // Wait for this AP to finish its 1st SMI
448 //
449 while (!mRebased[Index]);
450 } else {
451 //
452 // BSP will be Relocated later
453 //
454 BspIndex = Index;
455 }
456 }
457
458 //
459 // Relocate BSP's SMM base
460 //
461 ASSERT (BspIndex != (UINTN)-1);
462 mIsBsp = TRUE;
463 SendSmiIpi (ApicId);
464 //
465 // Wait for the BSP to finish its 1st SMI
466 //
467 while (!mRebased[BspIndex]);
468
469 //
470 // Restore contents at address 0x38000
471 //
472 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
473 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
474 }
475
476 /**
477 Perform SMM initialization for all processors in the S3 boot path.
478
479 For a native platform, MP initialization in the S3 boot path is also performed in this function.
480 **/
481 VOID
482 EFIAPI
483 SmmRestoreCpu (
484 VOID
485 )
486 {
487 SMM_S3_RESUME_STATE *SmmS3ResumeState;
488 IA32_DESCRIPTOR Ia32Idtr;
489 IA32_DESCRIPTOR X64Idtr;
490 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
491 EFI_STATUS Status;
492
493 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
494
495 //
496 // See if there is enough context to resume PEI Phase
497 //
498 if (mSmmS3ResumeState == NULL) {
499 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
500 CpuDeadLoop ();
501 }
502
503 SmmS3ResumeState = mSmmS3ResumeState;
504 ASSERT (SmmS3ResumeState != NULL);
505
506 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
507 //
508 // Save the IA32 IDT Descriptor
509 //
510 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
511
512 //
513 // Setup X64 IDT table
514 //
515 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
516 X64Idtr.Base = (UINTN) IdtEntryTable;
517 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
518 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
519
520 //
521 // Setup the default exception handler
522 //
523 Status = InitializeCpuExceptionHandlers (NULL);
524 ASSERT_EFI_ERROR (Status);
525
526 //
527 // Initialize Debug Agent to support source level debug
528 //
529 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
530 }
531
532 //
533 // Do below CPU things for native platform only
534 //
535 if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {
536 //
537 // Skip initialization if mAcpiCpuData is not valid
538 //
539 if (mAcpiCpuData.NumberOfCpus > 0) {
540 //
541 // First time microcode load and restore MTRRs
542 //
543 EarlyInitializeCpu ();
544 }
545 }
546
547 //
548 // Restore SMBASE for BSP and all APs
549 //
550 SmmRelocateBases ();
551
552 //
553 // Do below CPU things for native platform only
554 //
555 if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {
556 //
557 // Skip initialization if mAcpiCpuData is not valid
558 //
559 if (mAcpiCpuData.NumberOfCpus > 0) {
560 //
561 // Restore MSRs for BSP and all APs
562 //
563 InitializeCpu ();
564 }
565 }
566
567 //
568 // Set a flag to restore SMM configuration in S3 path.
569 //
570 mRestoreSmmConfigurationInS3 = TRUE;
571
572 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
573 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
574 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
575 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
576 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
577
578 //
579 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
580 //
581 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
582 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
583
584 SwitchStack (
585 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
586 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
587 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
588 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
589 );
590 }
591
592 //
593 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
594 //
595 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
596 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
597 //
598 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
599 //
600 SaveAndSetDebugTimerInterrupt (FALSE);
601 //
602 // Restore IA32 IDT table
603 //
604 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
605 AsmDisablePaging64 (
606 SmmS3ResumeState->ReturnCs,
607 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
608 (UINT32)SmmS3ResumeState->ReturnContext1,
609 (UINT32)SmmS3ResumeState->ReturnContext2,
610 (UINT32)SmmS3ResumeState->ReturnStackPointer
611 );
612 }
613
614 //
615 // Can not resume PEI Phase
616 //
617 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
618 CpuDeadLoop ();
619 }
620
621 /**
622 Copy register table from ACPI NVS memory into SMRAM.
623
624 @param[in] DestinationRegisterTableList Points to destination register table.
625 @param[in] SourceRegisterTableList Points to source register table.
626 @param[in] NumberOfCpus Number of CPUs.
627
628 **/
629 VOID
630 CopyRegisterTable (
631 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
632 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
633 IN UINT32 NumberOfCpus
634 )
635 {
636 UINTN Index;
637 UINTN Index1;
638 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
639
640 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
641 for (Index = 0; Index < NumberOfCpus; Index++) {
642 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);
643 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);
644 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);
645 //
646 // Go though all MSRs in register table to initialize MSR spin lock
647 //
648 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;
649 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
650 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
651 //
652 // Initialize MSR spin lock only for those MSRs need bit field writing
653 //
654 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
655 }
656 }
657 }
658 }
659
660 /**
661 SMM Ready To Lock event notification handler.
662
663 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
664 perform additional lock actions that must be performed from SMM on the next SMI.
665
666 @param[in] Protocol Points to the protocol's unique identifier.
667 @param[in] Interface Points to the interface instance.
668 @param[in] Handle The handle on which the interface was installed.
669
670 @retval EFI_SUCCESS Notification handler runs successfully.
671 **/
672 EFI_STATUS
673 EFIAPI
674 SmmReadyToLockEventNotify (
675 IN CONST EFI_GUID *Protocol,
676 IN VOID *Interface,
677 IN EFI_HANDLE Handle
678 )
679 {
680 ACPI_CPU_DATA *AcpiCpuData;
681 IA32_DESCRIPTOR *Gdtr;
682 IA32_DESCRIPTOR *Idtr;
683
684 //
685 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
686 //
687 mAcpiCpuData.NumberOfCpus = 0;
688
689 //
690 // If FrameworkCompatibilitySspport is enabled, then do not copy CPU S3 Data into SMRAM
691 //
692 if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) {
693 goto Done;
694 }
695
696 //
697 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
698 //
699 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
700 if (AcpiCpuData == 0) {
701 goto Done;
702 }
703
704 //
705 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
706 //
707 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
708
709 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
710 ASSERT (mAcpiCpuData.MtrrTable != 0);
711
712 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
713
714 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
715 ASSERT (mAcpiCpuData.GdtrProfile != 0);
716
717 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
718
719 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
720 ASSERT (mAcpiCpuData.IdtrProfile != 0);
721
722 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
723
724 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
725 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
726
727 CopyRegisterTable (
728 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
729 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
730 mAcpiCpuData.NumberOfCpus
731 );
732
733 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
734 ASSERT (mAcpiCpuData.RegisterTable != 0);
735
736 CopyRegisterTable (
737 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
738 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
739 mAcpiCpuData.NumberOfCpus
740 );
741
742 //
743 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
744 //
745 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
746 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
747
748 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
749 ASSERT (mGdtForAp != NULL);
750 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
751 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
752
753 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
754 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
755 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
756
757 Done:
758 //
759 // Set SMM ready to lock flag and return
760 //
761 mSmmReadyToLock = TRUE;
762 return EFI_SUCCESS;
763 }
764
765 /**
766 The module Entry Point of the CPU SMM driver.
767
768 @param ImageHandle The firmware allocated handle for the EFI image.
769 @param SystemTable A pointer to the EFI System Table.
770
771 @retval EFI_SUCCESS The entry point is executed successfully.
772 @retval Other Some error occurs when executing this entry point.
773
774 **/
775 EFI_STATUS
776 EFIAPI
777 PiCpuSmmEntry (
778 IN EFI_HANDLE ImageHandle,
779 IN EFI_SYSTEM_TABLE *SystemTable
780 )
781 {
782 EFI_STATUS Status;
783 EFI_MP_SERVICES_PROTOCOL *MpServices;
784 UINTN NumberOfEnabledProcessors;
785 UINTN Index;
786 VOID *Buffer;
787 UINTN TileSize;
788 VOID *GuidHob;
789 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
790 SMM_S3_RESUME_STATE *SmmS3ResumeState;
791 UINT8 *Stacks;
792 VOID *Registration;
793 UINT32 RegEax;
794 UINT32 RegEdx;
795 UINTN FamilyId;
796 UINTN ModelId;
797 UINT32 Cr3;
798
799 //
800 // Initialize Debug Agent to support source level debug in SMM code
801 //
802 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
803
804 //
805 // Report the start of CPU SMM initialization.
806 //
807 REPORT_STATUS_CODE (
808 EFI_PROGRESS_CODE,
809 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
810 );
811
812 //
813 // Fix segment address of the long-mode-switch jump
814 //
815 if (sizeof (UINTN) == sizeof (UINT64)) {
816 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;
817 }
818
819 //
820 // Find out SMRR Base and SMRR Size
821 //
822 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
823
824 //
825 // Get MP Services Protocol
826 //
827 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
828 ASSERT_EFI_ERROR (Status);
829
830 //
831 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
832 //
833 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
834 ASSERT_EFI_ERROR (Status);
835 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
836
837 //
838 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
839 // A constant BSP index makes no sense because it may be hot removed.
840 //
841 DEBUG_CODE (
842 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
843
844 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
845 }
846 );
847
848 //
849 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
850 //
851 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
852 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
853
854 //
855 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
856 //
857 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
858 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
859 } else {
860 mMaxNumberOfCpus = mNumberOfCpus;
861 }
862 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
863
864 //
865 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
866 // allocated buffer. The minimum size of this buffer for a uniprocessor system
867 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
868 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
869 // then the SMI entry point and the CPU save state areas can be tiles to minimize
870 // the total amount SMRAM required for all the CPUs. The tile size can be computed
871 // by adding the // CPU save state size, any extra CPU specific context, and
872 // the size of code that must be placed at the SMI entry point to transfer
873 // control to a C function in the native SMM execution mode. This size is
874 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
875 // The total amount of memory required is the maximum number of CPUs that
876 // platform supports times the tile size. The picture below shows the tiling,
877 // where m is the number of tiles that fit in 32KB.
878 //
879 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
880 // | CPU m+1 Save State |
881 // +-----------------------------+
882 // | CPU m+1 Extra Data |
883 // +-----------------------------+
884 // | Padding |
885 // +-----------------------------+
886 // | CPU 2m SMI Entry |
887 // +#############################+ <-- Base of allocated buffer + 64 KB
888 // | CPU m-1 Save State |
889 // +-----------------------------+
890 // | CPU m-1 Extra Data |
891 // +-----------------------------+
892 // | Padding |
893 // +-----------------------------+
894 // | CPU 2m-1 SMI Entry |
895 // +=============================+ <-- 2^n offset from Base of allocated buffer
896 // | . . . . . . . . . . . . |
897 // +=============================+ <-- 2^n offset from Base of allocated buffer
898 // | CPU 2 Save State |
899 // +-----------------------------+
900 // | CPU 2 Extra Data |
901 // +-----------------------------+
902 // | Padding |
903 // +-----------------------------+
904 // | CPU m+1 SMI Entry |
905 // +=============================+ <-- Base of allocated buffer + 32 KB
906 // | CPU 1 Save State |
907 // +-----------------------------+
908 // | CPU 1 Extra Data |
909 // +-----------------------------+
910 // | Padding |
911 // +-----------------------------+
912 // | CPU m SMI Entry |
913 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
914 // | CPU 0 Save State |
915 // +-----------------------------+
916 // | CPU 0 Extra Data |
917 // +-----------------------------+
918 // | Padding |
919 // +-----------------------------+
920 // | CPU m-1 SMI Entry |
921 // +=============================+ <-- 2^n offset from Base of allocated buffer
922 // | . . . . . . . . . . . . |
923 // +=============================+ <-- 2^n offset from Base of allocated buffer
924 // | Padding |
925 // +-----------------------------+
926 // | CPU 1 SMI Entry |
927 // +=============================+ <-- 2^n offset from Base of allocated buffer
928 // | Padding |
929 // +-----------------------------+
930 // | CPU 0 SMI Entry |
931 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
932 //
933
934 //
935 // Retrieve CPU Family
936 //
937 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);
938 FamilyId = (RegEax >> 8) & 0xf;
939 ModelId = (RegEax >> 4) & 0xf;
940 if (FamilyId == 0x06 || FamilyId == 0x0f) {
941 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
942 }
943
944 //
945 // Determine the mode of the CPU at the time an SMI occurs
946 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
947 // Volume 3C, Section 34.4.1.1
948 //
949 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
950 if ((RegEdx & BIT29) != 0) {
951 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
952 }
953 if (FamilyId == 0x06) {
954 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
955 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
956 }
957 }
958
959 //
960 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
961 // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size
962 // is rounded up to nearest power of 2.
963 //
964 TileSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR) + GetSmiHandlerSize () - 1;
965 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
966 DEBUG ((EFI_D_INFO, "SMRAM TileSize = %08x\n", TileSize));
967
968 //
969 // If the TileSize is larger than space available for the SMI Handler of CPU[i],
970 // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],
971 // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be
972 // reduced.
973 //
974 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
975
976 //
977 // Allocate buffer for all of the tiles.
978 //
979 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
980 // Volume 3C, Section 34.11 SMBASE Relocation
981 // For Pentium and Intel486 processors, the SMBASE values must be
982 // aligned on a 32-KByte boundary or the processor will enter shutdown
983 // state during the execution of a RSM instruction.
984 //
985 // Intel486 processors: FamilyId is 4
986 // Pentium processors : FamilyId is 5
987 //
988 if ((FamilyId == 4) || (FamilyId == 5)) {
989 Buffer = AllocateAlignedPages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)), SIZE_32KB);
990 } else {
991 Buffer = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)));
992 }
993 ASSERT (Buffer != NULL);
994
995 //
996 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
997 //
998 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
999 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
1000
1001 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
1002 ASSERT (gSmmCpuPrivate->Operation != NULL);
1003
1004 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
1005 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
1006
1007 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
1008 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
1009
1010 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
1011 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
1012 mSmmCpuSaveState.CpuSaveState = (EFI_SMM_CPU_STATE **)gSmmCpuPrivate->CpuSaveState;
1013
1014 //
1015 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
1016 //
1017 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
1018 ASSERT (mCpuHotPlugData.ApicId != NULL);
1019 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
1020 ASSERT (mCpuHotPlugData.SmBase != NULL);
1021 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
1022
1023 //
1024 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
1025 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
1026 // size for each CPU in the platform
1027 //
1028 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1029 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
1030 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
1031 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
1032 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
1033
1034 if (Index < mNumberOfCpus) {
1035 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
1036 ASSERT_EFI_ERROR (Status);
1037 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
1038
1039 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
1040 Index,
1041 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
1042 mCpuHotPlugData.SmBase[Index],
1043 gSmmCpuPrivate->CpuSaveState[Index],
1044 gSmmCpuPrivate->CpuSaveStateSize[Index]
1045 ));
1046 } else {
1047 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
1048 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
1049 }
1050 }
1051
1052 //
1053 // Allocate SMI stacks for all processors.
1054 //
1055 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1056 //
1057 // 2 more pages is allocated for each processor.
1058 // one is guard page and the other is known good stack.
1059 //
1060 // +-------------------------------------------+-----+-------------------------------------------+
1061 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
1062 // +-------------------------------------------+-----+-------------------------------------------+
1063 // | | | |
1064 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
1065 //
1066 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
1067 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
1068 ASSERT (Stacks != NULL);
1069 mSmmStackArrayBase = (UINTN)Stacks;
1070 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
1071 } else {
1072 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
1073 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
1074 ASSERT (Stacks != NULL);
1075 }
1076
1077 //
1078 // Set SMI stack for SMM base relocation
1079 //
1080 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));
1081
1082 //
1083 // Initialize IDT
1084 //
1085 InitializeSmmIdt ();
1086
1087 //
1088 // Relocate SMM Base addresses to the ones allocated from SMRAM
1089 //
1090 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
1091 ASSERT (mRebased != NULL);
1092 SmmRelocateBases ();
1093
1094 //
1095 // Call hook for BSP to perform extra actions in normal mode after all
1096 // SMM base addresses have been relocated on all CPUs
1097 //
1098 SmmCpuFeaturesSmmRelocationComplete ();
1099
1100 //
1101 // SMM Time initialization
1102 //
1103 InitializeSmmTimer ();
1104
1105 //
1106 // Initialize MP globals
1107 //
1108 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);
1109
1110 //
1111 // Fill in SMM Reserved Regions
1112 //
1113 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1114 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1115
1116 //
1117 // Install the SMM Configuration Protocol onto a new handle on the handle database.
1118 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
1119 // to an SMRAM address will be present in the handle database
1120 //
1121 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
1122 &gSmmCpuPrivate->SmmCpuHandle,
1123 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
1124 NULL
1125 );
1126 ASSERT_EFI_ERROR (Status);
1127
1128 //
1129 // Install the SMM CPU Protocol into SMM protocol database
1130 //
1131 Status = gSmst->SmmInstallProtocolInterface (
1132 &mSmmCpuHandle,
1133 &gEfiSmmCpuProtocolGuid,
1134 EFI_NATIVE_INTERFACE,
1135 &mSmmCpu
1136 );
1137 ASSERT_EFI_ERROR (Status);
1138
1139 //
1140 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1141 //
1142 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1143 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1144 ASSERT_EFI_ERROR (Status);
1145 }
1146
1147 //
1148 // Initialize SMM CPU Services Support
1149 //
1150 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1151 ASSERT_EFI_ERROR (Status);
1152
1153 if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) {
1154 //
1155 // Install Framework SMM Save State Protocol into UEFI protocol database for backward compatibility
1156 //
1157 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
1158 &gSmmCpuPrivate->SmmCpuHandle,
1159 &gEfiSmmCpuSaveStateProtocolGuid,
1160 &mSmmCpuSaveState,
1161 NULL
1162 );
1163 ASSERT_EFI_ERROR (Status);
1164 //
1165 // The SmmStartupThisAp service in Framework SMST should always be non-null.
1166 // Update SmmStartupThisAp pointer in PI SMST here so that PI/Framework SMM thunk
1167 // can have it ready when constructing Framework SMST.
1168 //
1169 gSmst->SmmStartupThisAp = SmmStartupThisAp;
1170 }
1171
1172 //
1173 // register SMM Ready To Lock Protocol notification
1174 //
1175 Status = gSmst->SmmRegisterProtocolNotify (
1176 &gEfiSmmReadyToLockProtocolGuid,
1177 SmmReadyToLockEventNotify,
1178 &Registration
1179 );
1180 ASSERT_EFI_ERROR (Status);
1181
1182 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
1183 if (GuidHob != NULL) {
1184 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
1185
1186 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
1187 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
1188
1189 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
1190 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
1191
1192 mSmmS3ResumeState = SmmS3ResumeState;
1193 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
1194
1195 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
1196
1197 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
1198 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
1199 if (SmmS3ResumeState->SmmS3StackBase == 0) {
1200 SmmS3ResumeState->SmmS3StackSize = 0;
1201 }
1202
1203 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
1204 SmmS3ResumeState->SmmS3Cr3 = Cr3;
1205 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
1206
1207 if (sizeof (UINTN) == sizeof (UINT64)) {
1208 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
1209 }
1210 if (sizeof (UINTN) == sizeof (UINT32)) {
1211 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
1212 }
1213 }
1214
1215 //
1216 // Check XD and BTS features
1217 //
1218 CheckProcessorFeature ();
1219
1220 //
1221 // Initialize SMM Profile feature
1222 //
1223 InitSmmProfile (Cr3);
1224
1225 //
1226 // Patch SmmS3ResumeState->SmmS3Cr3
1227 //
1228 InitSmmS3Cr3 ();
1229
1230 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1231
1232 return EFI_SUCCESS;
1233 }
1234
1235 /**
1236
1237 Find out SMRAM information including SMRR base and SMRR size.
1238
1239 @param SmrrBase SMRR base
1240 @param SmrrSize SMRR size
1241
1242 **/
1243 VOID
1244 FindSmramInfo (
1245 OUT UINT32 *SmrrBase,
1246 OUT UINT32 *SmrrSize
1247 )
1248 {
1249 EFI_STATUS Status;
1250 UINTN Size;
1251 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1252 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1253 EFI_SMRAM_DESCRIPTOR *SmramRanges;
1254 UINTN SmramRangeCount;
1255 UINTN Index;
1256 UINT64 MaxSize;
1257 BOOLEAN Found;
1258
1259 //
1260 // Get SMM Access Protocol
1261 //
1262 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1263 ASSERT_EFI_ERROR (Status);
1264
1265 //
1266 // Get SMRAM information
1267 //
1268 Size = 0;
1269 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1270 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1271
1272 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1273 ASSERT (SmramRanges != NULL);
1274
1275 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);
1276 ASSERT_EFI_ERROR (Status);
1277
1278 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1279
1280 //
1281 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1282 //
1283 CurrentSmramRange = NULL;
1284 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {
1285 //
1286 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1287 //
1288 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1289 continue;
1290 }
1291
1292 if (SmramRanges[Index].CpuStart >= BASE_1MB) {
1293 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {
1294 if (SmramRanges[Index].PhysicalSize >= MaxSize) {
1295 MaxSize = SmramRanges[Index].PhysicalSize;
1296 CurrentSmramRange = &SmramRanges[Index];
1297 }
1298 }
1299 }
1300 }
1301
1302 ASSERT (CurrentSmramRange != NULL);
1303
1304 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1305 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1306
1307 do {
1308 Found = FALSE;
1309 for (Index = 0; Index < SmramRangeCount; Index++) {
1310 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {
1311 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;
1312 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);
1313 Found = TRUE;
1314 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {
1315 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);
1316 Found = TRUE;
1317 }
1318 }
1319 } while (Found);
1320
1321 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1322 }
1323
1324 /**
1325 Configure SMM Code Access Check feature on an AP.
1326 SMM Feature Control MSR will be locked after configuration.
1327
1328 @param[in,out] Buffer Pointer to private data buffer.
1329 **/
1330 VOID
1331 EFIAPI
1332 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1333 IN OUT VOID *Buffer
1334 )
1335 {
1336 UINTN CpuIndex;
1337 UINT64 SmmFeatureControlMsr;
1338 UINT64 NewSmmFeatureControlMsr;
1339
1340 //
1341 // Retrieve the CPU Index from the context passed in
1342 //
1343 CpuIndex = *(UINTN *)Buffer;
1344
1345 //
1346 // Get the current SMM Feature Control MSR value
1347 //
1348 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1349
1350 //
1351 // Compute the new SMM Feature Control MSR value
1352 //
1353 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1354 if (mSmmCodeAccessCheckEnable) {
1355 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1356 }
1357 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1358 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1359 }
1360
1361 //
1362 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1363 //
1364 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1365 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1366 }
1367
1368 //
1369 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1370 //
1371 ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);
1372 }
1373
1374 /**
1375 Configure SMM Code Access Check feature for all processors.
1376 SMM Feature Control MSR will be locked after configuration.
1377 **/
1378 VOID
1379 ConfigSmmCodeAccessCheck (
1380 VOID
1381 )
1382 {
1383 UINTN Index;
1384 EFI_STATUS Status;
1385
1386 //
1387 // Check to see if the Feature Control MSR is supported on this CPU
1388 //
1389 Index = gSmst->CurrentlyExecutingCpu;
1390 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1391 mSmmCodeAccessCheckEnable = FALSE;
1392 return;
1393 }
1394
1395 //
1396 // Check to see if the CPU supports the SMM Code Access Check feature
1397 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1398 //
1399 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1400 mSmmCodeAccessCheckEnable = FALSE;
1401 }
1402
1403 //
1404 // If the SMM Code Access Check feature is disabled and the Feature Control MSR
1405 // is not being locked, then no additional work is required
1406 //
1407 if (!mSmmCodeAccessCheckEnable && !FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1408 return;
1409 }
1410
1411 //
1412 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1413 //
1414 InitializeSpinLock (&mConfigSmmCodeAccessCheckLock);
1415
1416 //
1417 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1418 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1419 //
1420 AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);
1421
1422 //
1423 // Enable SMM Code Access Check feature on the BSP.
1424 //
1425 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1426
1427 //
1428 // Enable SMM Code Access Check feature for the APs.
1429 //
1430 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1431 if (Index != gSmst->CurrentlyExecutingCpu) {
1432
1433 //
1434 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1435 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1436 //
1437 AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);
1438
1439 //
1440 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1441 //
1442 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1443 ASSERT_EFI_ERROR (Status);
1444
1445 //
1446 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1447 //
1448 while (!AcquireSpinLockOrFail (&mConfigSmmCodeAccessCheckLock)) {
1449 CpuPause ();
1450 }
1451
1452 //
1453 // Release the Config SMM Code Access Check spin lock.
1454 //
1455 ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);
1456 }
1457 }
1458 }
1459
1460 /**
1461 Perform the remaining tasks.
1462
1463 **/
1464 VOID
1465 PerformRemainingTasks (
1466 VOID
1467 )
1468 {
1469 if (mSmmReadyToLock) {
1470 //
1471 // Start SMM Profile feature
1472 //
1473 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1474 SmmProfileStart ();
1475 }
1476 //
1477 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1478 //
1479 InitPaging ();
1480 //
1481 // Configure SMM Code Access Check feature if available.
1482 //
1483 ConfigSmmCodeAccessCheck ();
1484
1485 //
1486 // Clean SMM ready to lock flag
1487 //
1488 mSmmReadyToLock = FALSE;
1489 }
1490 }