]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: fix S3 Resume for CPU hotplug
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
66 ACPI_CPU_DATA mAcpiCpuData;
67 volatile UINT32 mNumberToFinish;
68 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71 //
72 // S3 boot flag
73 //
74 BOOLEAN mSmmS3Flag = FALSE;
75
76 //
77 // Pointer to structure used during S3 Resume
78 //
79 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81 BOOLEAN mAcpiS3Enable = TRUE;
82
83 UINT8 *mApHltLoopCode = NULL;
84 UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90 };
91
92 /**
93 Sync up the MTRR values for all processors.
94
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
96 **/
97 VOID
98 EFIAPI
99 LoadMtrrData (
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102 /*++
103
104 Routine Description:
105
106 Sync up the MTRR values for all processors.
107
108 Arguments:
109
110 Returns:
111 None
112
113 --*/
114 {
115 MTRR_SETTINGS *MtrrSettings;
116
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
118 MtrrSetAllMtrrs (MtrrSettings);
119 }
120
121 /**
122 Increment semaphore by 1.
123
124 @param Sem IN: 32-bit unsigned integer
125
126 **/
127 VOID
128 S3ReleaseSemaphore (
129 IN OUT volatile UINT32 *Sem
130 )
131 {
132 InterlockedIncrement (Sem);
133 }
134
135 /**
136 Decrement the semaphore by 1 if it is not zero.
137
138 Performs an atomic decrement operation for semaphore.
139 The compare exchange operation must be performed using
140 MP safe mechanisms.
141
142 @param Sem IN: 32-bit unsigned integer
143
144 **/
145 VOID
146 S3WaitForSemaphore (
147 IN OUT volatile UINT32 *Sem
148 )
149 {
150 UINT32 Value;
151
152 do {
153 Value = *Sem;
154 } while (Value == 0 ||
155 InterlockedCompareExchange32 (
156 Sem,
157 Value,
158 Value - 1
159 ) != Value);
160 }
161
162 /**
163 Read / write CR value.
164
165 @param[in] CrIndex The CR index which need to read/write.
166 @param[in] Read Read or write. TRUE is read.
167 @param[in,out] CrValue CR value.
168
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
170 **/
171 UINTN
172 ReadWriteCr (
173 IN UINT32 CrIndex,
174 IN BOOLEAN Read,
175 IN OUT UINTN *CrValue
176 )
177 {
178 switch (CrIndex) {
179 case 0:
180 if (Read) {
181 *CrValue = AsmReadCr0 ();
182 } else {
183 AsmWriteCr0 (*CrValue);
184 }
185 break;
186 case 2:
187 if (Read) {
188 *CrValue = AsmReadCr2 ();
189 } else {
190 AsmWriteCr2 (*CrValue);
191 }
192 break;
193 case 3:
194 if (Read) {
195 *CrValue = AsmReadCr3 ();
196 } else {
197 AsmWriteCr3 (*CrValue);
198 }
199 break;
200 case 4:
201 if (Read) {
202 *CrValue = AsmReadCr4 ();
203 } else {
204 AsmWriteCr4 (*CrValue);
205 }
206 break;
207 default:
208 return EFI_UNSUPPORTED;;
209 }
210
211 return EFI_SUCCESS;
212 }
213
214 /**
215 Initialize the CPU registers from a register table.
216
217 @param[in] RegisterTable The register table for this AP.
218 @param[in] ApLocation AP location info for this ap.
219 @param[in] CpuStatus CPU status info for this CPU.
220 @param[in] CpuFlags Flags data structure used when program the register.
221
222 @note This service could be called by BSP/APs.
223 **/
224 VOID
225 ProgramProcessorRegister (
226 IN CPU_REGISTER_TABLE *RegisterTable,
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
228 IN CPU_STATUS_INFORMATION *CpuStatus,
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
230 )
231 {
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
233 UINTN Index;
234 UINTN Value;
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
236 volatile UINT32 *SemaphorePtr;
237 UINT32 FirstThread;
238 UINT32 PackageThreadsCount;
239 UINT32 CurrentThread;
240 UINTN ProcessorIndex;
241 UINTN ValidThreadCount;
242 UINT32 *ValidCoreCountPerPackage;
243 EFI_STATUS Status;
244 UINT64 CurrentValue;
245
246 //
247 // Traverse Register Table of this logical processor
248 //
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
250
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
252
253 RegisterTableEntry = &RegisterTableEntryHead[Index];
254
255 //
256 // Check the type of specified register
257 //
258 switch (RegisterTableEntry->RegisterType) {
259 //
260 // The specified register is Control Register
261 //
262 case ControlRegister:
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
264 if (EFI_ERROR (Status)) {
265 break;
266 }
267 if (RegisterTableEntry->TestThenWrite) {
268 CurrentValue = BitFieldRead64 (
269 Value,
270 RegisterTableEntry->ValidBitStart,
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
272 );
273 if (CurrentValue == RegisterTableEntry->Value) {
274 break;
275 }
276 }
277 Value = (UINTN) BitFieldWrite64 (
278 Value,
279 RegisterTableEntry->ValidBitStart,
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
281 RegisterTableEntry->Value
282 );
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
284 break;
285 //
286 // The specified register is Model Specific Register
287 //
288 case Msr:
289 if (RegisterTableEntry->TestThenWrite) {
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
291 if (RegisterTableEntry->ValidBitLength >= 64) {
292 if (Value == RegisterTableEntry->Value) {
293 break;
294 }
295 } else {
296 CurrentValue = BitFieldRead64 (
297 Value,
298 RegisterTableEntry->ValidBitStart,
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
300 );
301 if (CurrentValue == RegisterTableEntry->Value) {
302 break;
303 }
304 }
305 }
306
307 //
308 // If this function is called to restore register setting after INIT signal,
309 // there is no need to restore MSRs in register table.
310 //
311 if (RegisterTableEntry->ValidBitLength >= 64) {
312 //
313 // If length is not less than 64 bits, then directly write without reading
314 //
315 AsmWriteMsr64 (
316 RegisterTableEntry->Index,
317 RegisterTableEntry->Value
318 );
319 } else {
320 //
321 // Set the bit section according to bit start and length
322 //
323 AsmMsrBitFieldWrite64 (
324 RegisterTableEntry->Index,
325 RegisterTableEntry->ValidBitStart,
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
327 RegisterTableEntry->Value
328 );
329 }
330 break;
331 //
332 // MemoryMapped operations
333 //
334 case MemoryMapped:
335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
336 MmioBitFieldWrite32 (
337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
338 RegisterTableEntry->ValidBitStart,
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
340 (UINT32)RegisterTableEntry->Value
341 );
342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
343 break;
344 //
345 // Enable or disable cache
346 //
347 case CacheControl:
348 //
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
350 //
351 if (RegisterTableEntry->Value == 0) {
352 AsmDisableCache ();
353 } else {
354 AsmEnableCache ();
355 }
356 break;
357
358 case Semaphore:
359 // Semaphore works logic like below:
360 //
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
363 //
364 // All threads (T0...Tn) waits in P() line and continues running
365 // together.
366 //
367 //
368 // T0 T1 ... Tn
369 //
370 // V(0...n) V(0...n) ... V(0...n)
371 // n * P(0) n * P(1) ... n * P(n)
372 //
373 ASSERT (
374 (ApLocation != NULL) &&
375 (CpuStatus->ValidCoreCountPerPackage != 0) &&
376 (CpuFlags->CoreSemaphoreCount != NULL) &&
377 (CpuFlags->PackageSemaphoreCount != NULL)
378 );
379 switch (RegisterTableEntry->Value) {
380 case CoreDepType:
381 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
382 //
383 // Get Offset info for the first thread in the core which current thread belongs to.
384 //
385 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
386 CurrentThread = FirstThread + ApLocation->Thread;
387 //
388 // First Notify all threads in current Core that this thread has ready.
389 //
390 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
391 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
392 }
393 //
394 // Second, check whether all valid threads in current core have ready.
395 //
396 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
397 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
398 }
399 break;
400
401 case PackageDepType:
402 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
403 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;
404 //
405 // Get Offset info for the first thread in the package which current thread belongs to.
406 //
407 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
408 //
409 // Get the possible threads count for current package.
410 //
411 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
412 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
413 //
414 // Get the valid thread count for current package.
415 //
416 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];
417
418 //
419 // Different packages may have different valid cores in them. If driver maintail clearly
420 // cores number in different packages, the logic will be much complicated.
421 // Here driver just simply records the max core number in all packages and use it as expect
422 // core number for all packages.
423 // In below two steps logic, first current thread will Release semaphore for each thread
424 // in current package. Maybe some threads are not valid in this package, but driver don't
425 // care. Second, driver will let current thread wait semaphore for all valid threads in
426 // current package. Because only the valid threads will do release semaphore for this
427 // thread, driver here only need to wait the valid thread count.
428 //
429
430 //
431 // First Notify all threads in current package that this thread has ready.
432 //
433 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
434 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
435 }
436 //
437 // Second, check whether all valid threads in current package have ready.
438 //
439 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
440 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
441 }
442 break;
443
444 default:
445 break;
446 }
447 break;
448
449 default:
450 break;
451 }
452 }
453 }
454
455 /**
456
457 Set Processor register for one AP.
458
459 @param PreSmmRegisterTable Use pre Smm register table or register table.
460
461 **/
462 VOID
463 SetRegister (
464 IN BOOLEAN PreSmmRegisterTable
465 )
466 {
467 CPU_REGISTER_TABLE *RegisterTable;
468 CPU_REGISTER_TABLE *RegisterTables;
469 UINT32 InitApicId;
470 UINTN ProcIndex;
471 UINTN Index;
472
473 if (PreSmmRegisterTable) {
474 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
475 } else {
476 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
477 }
478
479 InitApicId = GetInitialApicId ();
480 RegisterTable = NULL;
481 ProcIndex = (UINTN)-1;
482 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
483 if (RegisterTables[Index].InitialApicId == InitApicId) {
484 RegisterTable = &RegisterTables[Index];
485 ProcIndex = Index;
486 break;
487 }
488 }
489 ASSERT (RegisterTable != NULL);
490
491 if (mAcpiCpuData.ApLocation != 0) {
492 ProgramProcessorRegister (
493 RegisterTable,
494 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
495 &mAcpiCpuData.CpuStatus,
496 &mCpuFlags
497 );
498 } else {
499 ProgramProcessorRegister (
500 RegisterTable,
501 NULL,
502 &mAcpiCpuData.CpuStatus,
503 &mCpuFlags
504 );
505 }
506 }
507
508 /**
509 AP initialization before then after SMBASE relocation in the S3 boot path.
510 **/
511 VOID
512 InitializeAp (
513 VOID
514 )
515 {
516 UINTN TopOfStack;
517 UINT8 Stack[128];
518
519 LoadMtrrData (mAcpiCpuData.MtrrTable);
520
521 SetRegister (TRUE);
522
523 //
524 // Count down the number with lock mechanism.
525 //
526 InterlockedDecrement (&mNumberToFinish);
527
528 //
529 // Wait for BSP to signal SMM Base relocation done.
530 //
531 while (!mInitApsAfterSmmBaseReloc) {
532 CpuPause ();
533 }
534
535 ProgramVirtualWireMode ();
536 DisableLvtInterrupts ();
537
538 SetRegister (FALSE);
539
540 //
541 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
542 //
543 TopOfStack = (UINTN) Stack + sizeof (Stack);
544 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
545 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
546 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
547 }
548
549 /**
550 Prepares startup vector for APs.
551
552 This function prepares startup vector for APs.
553
554 @param WorkingBuffer The address of the work buffer.
555 **/
556 VOID
557 PrepareApStartupVector (
558 EFI_PHYSICAL_ADDRESS WorkingBuffer
559 )
560 {
561 EFI_PHYSICAL_ADDRESS StartupVector;
562 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
563
564 //
565 // Get the address map of startup code for AP,
566 // including code size, and offset of long jump instructions to redirect.
567 //
568 ZeroMem (&AddressMap, sizeof (AddressMap));
569 AsmGetAddressMap (&AddressMap);
570
571 StartupVector = WorkingBuffer;
572
573 //
574 // Copy AP startup code to startup vector, and then redirect the long jump
575 // instructions for mode switching.
576 //
577 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
578 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
579 if (AddressMap.LongJumpOffset != 0) {
580 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
581 }
582
583 //
584 // Get the start address of exchange data between BSP and AP.
585 //
586 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
587 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
588
589 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
590 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
591
592 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
593 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
594 mExchangeInfo->BufferStart = (UINT32) StartupVector;
595 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
596 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
597 }
598
599 /**
600 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
601
602 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
603 and restores MTRRs for both BSP and APs.
604
605 **/
606 VOID
607 InitializeCpuBeforeRebase (
608 VOID
609 )
610 {
611 LoadMtrrData (mAcpiCpuData.MtrrTable);
612
613 SetRegister (TRUE);
614
615 ProgramVirtualWireMode ();
616
617 PrepareApStartupVector (mAcpiCpuData.StartupVector);
618
619 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
620 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
621 } else {
622 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
623 }
624 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
625 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
626
627 //
628 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
629 //
630 mInitApsAfterSmmBaseReloc = FALSE;
631
632 //
633 // Send INIT IPI - SIPI to all APs
634 //
635 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
636
637 while (mNumberToFinish > 0) {
638 CpuPause ();
639 }
640 }
641
642 /**
643 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
644
645 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
646 data saved by normal boot path for both BSP and APs.
647
648 **/
649 VOID
650 InitializeCpuAfterRebase (
651 VOID
652 )
653 {
654 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
655 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
656 } else {
657 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
658 }
659 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
660
661 //
662 // Signal that SMM base relocation is complete and to continue initialization for all APs.
663 //
664 mInitApsAfterSmmBaseReloc = TRUE;
665
666 //
667 // Must begin set register after all APs have continue their initialization.
668 // This is a requirement to support semaphore mechanism in register table.
669 // Because if semaphore's dependence type is package type, semaphore will wait
670 // for all Aps in one package finishing their tasks before set next register
671 // for all APs. If the Aps not begin its task during BSP doing its task, the
672 // BSP thread will hang because it is waiting for other Aps in the same
673 // package finishing their task.
674 //
675 SetRegister (FALSE);
676
677 while (mNumberToFinish > 0) {
678 CpuPause ();
679 }
680 }
681
682 /**
683 Restore SMM Configuration in S3 boot path.
684
685 **/
686 VOID
687 RestoreSmmConfigurationInS3 (
688 VOID
689 )
690 {
691 if (!mAcpiS3Enable) {
692 return;
693 }
694
695 //
696 // Restore SMM Configuration in S3 boot path.
697 //
698 if (mRestoreSmmConfigurationInS3) {
699 //
700 // Need make sure gSmst is correct because below function may use them.
701 //
702 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
703 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
704 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
705 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
706 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
707
708 //
709 // Configure SMM Code Access Check feature if available.
710 //
711 ConfigSmmCodeAccessCheck ();
712
713 SmmCpuFeaturesCompleteSmmReadyToLock ();
714
715 mRestoreSmmConfigurationInS3 = FALSE;
716 }
717 }
718
719 /**
720 Perform SMM initialization for all processors in the S3 boot path.
721
722 For a native platform, MP initialization in the S3 boot path is also performed in this function.
723 **/
724 VOID
725 EFIAPI
726 SmmRestoreCpu (
727 VOID
728 )
729 {
730 SMM_S3_RESUME_STATE *SmmS3ResumeState;
731 IA32_DESCRIPTOR Ia32Idtr;
732 IA32_DESCRIPTOR X64Idtr;
733 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
734 EFI_STATUS Status;
735
736 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
737
738 mSmmS3Flag = TRUE;
739
740 //
741 // See if there is enough context to resume PEI Phase
742 //
743 if (mSmmS3ResumeState == NULL) {
744 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
745 CpuDeadLoop ();
746 }
747
748 SmmS3ResumeState = mSmmS3ResumeState;
749 ASSERT (SmmS3ResumeState != NULL);
750
751 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
752 //
753 // Save the IA32 IDT Descriptor
754 //
755 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
756
757 //
758 // Setup X64 IDT table
759 //
760 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
761 X64Idtr.Base = (UINTN) IdtEntryTable;
762 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
763 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
764
765 //
766 // Setup the default exception handler
767 //
768 Status = InitializeCpuExceptionHandlers (NULL);
769 ASSERT_EFI_ERROR (Status);
770
771 //
772 // Initialize Debug Agent to support source level debug
773 //
774 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
775 }
776
777 //
778 // Skip initialization if mAcpiCpuData is not valid
779 //
780 if (mAcpiCpuData.NumberOfCpus > 0) {
781 //
782 // First time microcode load and restore MTRRs
783 //
784 InitializeCpuBeforeRebase ();
785 }
786
787 //
788 // Restore SMBASE for BSP and all APs
789 //
790 SmmRelocateBases ();
791
792 //
793 // Skip initialization if mAcpiCpuData is not valid
794 //
795 if (mAcpiCpuData.NumberOfCpus > 0) {
796 //
797 // Restore MSRs for BSP and all APs
798 //
799 InitializeCpuAfterRebase ();
800 }
801
802 //
803 // Set a flag to restore SMM configuration in S3 path.
804 //
805 mRestoreSmmConfigurationInS3 = TRUE;
806
807 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
808 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
809 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
810 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
811 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
812
813 //
814 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
815 //
816 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
817 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
818
819 SwitchStack (
820 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
821 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
822 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
823 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
824 );
825 }
826
827 //
828 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
829 //
830 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
831 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
832 //
833 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
834 //
835 SaveAndSetDebugTimerInterrupt (FALSE);
836 //
837 // Restore IA32 IDT table
838 //
839 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
840 AsmDisablePaging64 (
841 SmmS3ResumeState->ReturnCs,
842 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
843 (UINT32)SmmS3ResumeState->ReturnContext1,
844 (UINT32)SmmS3ResumeState->ReturnContext2,
845 (UINT32)SmmS3ResumeState->ReturnStackPointer
846 );
847 }
848
849 //
850 // Can not resume PEI Phase
851 //
852 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
853 CpuDeadLoop ();
854 }
855
856 /**
857 Initialize SMM S3 resume state structure used during S3 Resume.
858
859 @param[in] Cr3 The base address of the page tables to use in SMM.
860
861 **/
862 VOID
863 InitSmmS3ResumeState (
864 IN UINT32 Cr3
865 )
866 {
867 VOID *GuidHob;
868 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
869 SMM_S3_RESUME_STATE *SmmS3ResumeState;
870 EFI_PHYSICAL_ADDRESS Address;
871 EFI_STATUS Status;
872
873 if (!mAcpiS3Enable) {
874 return;
875 }
876
877 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
878 if (GuidHob == NULL) {
879 DEBUG ((
880 DEBUG_ERROR,
881 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
882 __FUNCTION__,
883 &gEfiAcpiVariableGuid
884 ));
885 CpuDeadLoop ();
886 } else {
887 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
888
889 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
890 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
891
892 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
893 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
894
895 mSmmS3ResumeState = SmmS3ResumeState;
896 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
897
898 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
899
900 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
901 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
902 if (SmmS3ResumeState->SmmS3StackBase == 0) {
903 SmmS3ResumeState->SmmS3StackSize = 0;
904 }
905
906 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
907 SmmS3ResumeState->SmmS3Cr3 = Cr3;
908 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
909
910 if (sizeof (UINTN) == sizeof (UINT64)) {
911 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
912 }
913 if (sizeof (UINTN) == sizeof (UINT32)) {
914 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
915 }
916
917 //
918 // Patch SmmS3ResumeState->SmmS3Cr3
919 //
920 InitSmmS3Cr3 ();
921 }
922
923 //
924 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
925 // protected mode on S3 path
926 //
927 Address = BASE_4GB - 1;
928 Status = gBS->AllocatePages (
929 AllocateMaxAddress,
930 EfiACPIMemoryNVS,
931 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
932 &Address
933 );
934 ASSERT_EFI_ERROR (Status);
935 mApHltLoopCode = (UINT8 *) (UINTN) Address;
936 }
937
938 /**
939 Copy register table from ACPI NVS memory into SMRAM.
940
941 @param[in] DestinationRegisterTableList Points to destination register table.
942 @param[in] SourceRegisterTableList Points to source register table.
943 @param[in] NumberOfCpus Number of CPUs.
944
945 **/
946 VOID
947 CopyRegisterTable (
948 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
949 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
950 IN UINT32 NumberOfCpus
951 )
952 {
953 UINTN Index;
954 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
955
956 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
957 for (Index = 0; Index < NumberOfCpus; Index++) {
958 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
959 RegisterTableEntry = AllocateCopyPool (
960 DestinationRegisterTableList[Index].AllocatedSize,
961 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
962 );
963 ASSERT (RegisterTableEntry != NULL);
964 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
965 }
966 }
967 }
968
969 /**
970 Get ACPI CPU data.
971
972 **/
973 VOID
974 GetAcpiCpuData (
975 VOID
976 )
977 {
978 ACPI_CPU_DATA *AcpiCpuData;
979 IA32_DESCRIPTOR *Gdtr;
980 IA32_DESCRIPTOR *Idtr;
981 VOID *GdtForAp;
982 VOID *IdtForAp;
983 VOID *MachineCheckHandlerForAp;
984 CPU_STATUS_INFORMATION *CpuStatus;
985
986 if (!mAcpiS3Enable) {
987 return;
988 }
989
990 //
991 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
992 //
993 mAcpiCpuData.NumberOfCpus = 0;
994
995 //
996 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
997 //
998 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
999 if (AcpiCpuData == 0) {
1000 return;
1001 }
1002
1003 //
1004 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1005 //
1006 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1007
1008 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1009 ASSERT (mAcpiCpuData.MtrrTable != 0);
1010
1011 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1012
1013 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1014 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1015
1016 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1017
1018 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1019 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1020
1021 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1022
1023 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1024 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
1025
1026 CopyRegisterTable (
1027 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
1028 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
1029 mAcpiCpuData.NumberOfCpus
1030 );
1031
1032 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1033 ASSERT (mAcpiCpuData.RegisterTable != 0);
1034
1035 CopyRegisterTable (
1036 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
1037 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
1038 mAcpiCpuData.NumberOfCpus
1039 );
1040
1041 //
1042 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1043 //
1044 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1045 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1046
1047 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1048 ASSERT (GdtForAp != NULL);
1049 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1050 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1051
1052 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1053 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1054 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1055
1056 Gdtr->Base = (UINTN)GdtForAp;
1057 Idtr->Base = (UINTN)IdtForAp;
1058 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1059
1060 CpuStatus = &mAcpiCpuData.CpuStatus;
1061 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1062 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {
1063 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1064 sizeof (UINT32) * CpuStatus->PackageCount,
1065 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage
1066 );
1067 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);
1068 }
1069 if (AcpiCpuData->ApLocation != 0) {
1070 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1071 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1072 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
1073 );
1074 ASSERT (mAcpiCpuData.ApLocation != 0);
1075 }
1076 if (CpuStatus->PackageCount != 0) {
1077 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1078 sizeof (UINT32) * CpuStatus->PackageCount *
1079 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1080 );
1081 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1082 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1083 sizeof (UINT32) * CpuStatus->PackageCount *
1084 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1085 );
1086 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1087 }
1088 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1089 }
1090
1091 /**
1092 Get ACPI S3 enable flag.
1093
1094 **/
1095 VOID
1096 GetAcpiS3EnableFlag (
1097 VOID
1098 )
1099 {
1100 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1101 }