]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
66 ACPI_CPU_DATA mAcpiCpuData;
67 volatile UINT32 mNumberToFinish;
68 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71 //
72 // S3 boot flag
73 //
74 BOOLEAN mSmmS3Flag = FALSE;
75
76 //
77 // Pointer to structure used during S3 Resume
78 //
79 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81 BOOLEAN mAcpiS3Enable = TRUE;
82
83 UINT8 *mApHltLoopCode = NULL;
84 UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90 };
91
92 /**
93 Sync up the MTRR values for all processors.
94
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
96 **/
97 VOID
98 EFIAPI
99 LoadMtrrData (
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102
103 /*++
104
105 Routine Description:
106
107 Sync up the MTRR values for all processors.
108
109 Arguments:
110
111 Returns:
112 None
113
114 --*/
115 {
116 MTRR_SETTINGS *MtrrSettings;
117
118 MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;
119 MtrrSetAllMtrrs (MtrrSettings);
120 }
121
122 /**
123 Increment semaphore by 1.
124
125 @param Sem IN: 32-bit unsigned integer
126
127 **/
128 VOID
129 S3ReleaseSemaphore (
130 IN OUT volatile UINT32 *Sem
131 )
132 {
133 InterlockedIncrement (Sem);
134 }
135
136 /**
137 Decrement the semaphore by 1 if it is not zero.
138
139 Performs an atomic decrement operation for semaphore.
140 The compare exchange operation must be performed using
141 MP safe mechanisms.
142
143 @param Sem IN: 32-bit unsigned integer
144
145 **/
146 VOID
147 S3WaitForSemaphore (
148 IN OUT volatile UINT32 *Sem
149 )
150 {
151 UINT32 Value;
152
153 do {
154 Value = *Sem;
155 } while (Value == 0 ||
156 InterlockedCompareExchange32 (
157 Sem,
158 Value,
159 Value - 1
160 ) != Value);
161 }
162
163 /**
164 Read / write CR value.
165
166 @param[in] CrIndex The CR index which need to read/write.
167 @param[in] Read Read or write. TRUE is read.
168 @param[in,out] CrValue CR value.
169
170 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
171 **/
172 UINTN
173 ReadWriteCr (
174 IN UINT32 CrIndex,
175 IN BOOLEAN Read,
176 IN OUT UINTN *CrValue
177 )
178 {
179 switch (CrIndex) {
180 case 0:
181 if (Read) {
182 *CrValue = AsmReadCr0 ();
183 } else {
184 AsmWriteCr0 (*CrValue);
185 }
186
187 break;
188 case 2:
189 if (Read) {
190 *CrValue = AsmReadCr2 ();
191 } else {
192 AsmWriteCr2 (*CrValue);
193 }
194
195 break;
196 case 3:
197 if (Read) {
198 *CrValue = AsmReadCr3 ();
199 } else {
200 AsmWriteCr3 (*CrValue);
201 }
202
203 break;
204 case 4:
205 if (Read) {
206 *CrValue = AsmReadCr4 ();
207 } else {
208 AsmWriteCr4 (*CrValue);
209 }
210
211 break;
212 default:
213 return EFI_UNSUPPORTED;
214 }
215
216 return EFI_SUCCESS;
217 }
218
219 /**
220 Initialize the CPU registers from a register table.
221
222 @param[in] RegisterTable The register table for this AP.
223 @param[in] ApLocation AP location info for this ap.
224 @param[in] CpuStatus CPU status info for this CPU.
225 @param[in] CpuFlags Flags data structure used when program the register.
226
227 @note This service could be called by BSP/APs.
228 **/
229 VOID
230 ProgramProcessorRegister (
231 IN CPU_REGISTER_TABLE *RegisterTable,
232 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
233 IN CPU_STATUS_INFORMATION *CpuStatus,
234 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
235 )
236 {
237 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
238 UINTN Index;
239 UINTN Value;
240 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
241 volatile UINT32 *SemaphorePtr;
242 UINT32 FirstThread;
243 UINT32 CurrentThread;
244 UINT32 CurrentCore;
245 UINTN ProcessorIndex;
246 UINT32 *ThreadCountPerPackage;
247 UINT8 *ThreadCountPerCore;
248 EFI_STATUS Status;
249 UINT64 CurrentValue;
250
251 //
252 // Traverse Register Table of this logical processor
253 //
254 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;
255
256 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
257 RegisterTableEntry = &RegisterTableEntryHead[Index];
258
259 //
260 // Check the type of specified register
261 //
262 switch (RegisterTableEntry->RegisterType) {
263 //
264 // The specified register is Control Register
265 //
266 case ControlRegister:
267 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
268 if (EFI_ERROR (Status)) {
269 break;
270 }
271
272 if (RegisterTableEntry->TestThenWrite) {
273 CurrentValue = BitFieldRead64 (
274 Value,
275 RegisterTableEntry->ValidBitStart,
276 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
277 );
278 if (CurrentValue == RegisterTableEntry->Value) {
279 break;
280 }
281 }
282
283 Value = (UINTN)BitFieldWrite64 (
284 Value,
285 RegisterTableEntry->ValidBitStart,
286 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
287 RegisterTableEntry->Value
288 );
289 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
290 break;
291 //
292 // The specified register is Model Specific Register
293 //
294 case Msr:
295 if (RegisterTableEntry->TestThenWrite) {
296 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
297 if (RegisterTableEntry->ValidBitLength >= 64) {
298 if (Value == RegisterTableEntry->Value) {
299 break;
300 }
301 } else {
302 CurrentValue = BitFieldRead64 (
303 Value,
304 RegisterTableEntry->ValidBitStart,
305 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
306 );
307 if (CurrentValue == RegisterTableEntry->Value) {
308 break;
309 }
310 }
311 }
312
313 //
314 // If this function is called to restore register setting after INIT signal,
315 // there is no need to restore MSRs in register table.
316 //
317 if (RegisterTableEntry->ValidBitLength >= 64) {
318 //
319 // If length is not less than 64 bits, then directly write without reading
320 //
321 AsmWriteMsr64 (
322 RegisterTableEntry->Index,
323 RegisterTableEntry->Value
324 );
325 } else {
326 //
327 // Set the bit section according to bit start and length
328 //
329 AsmMsrBitFieldWrite64 (
330 RegisterTableEntry->Index,
331 RegisterTableEntry->ValidBitStart,
332 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
333 RegisterTableEntry->Value
334 );
335 }
336
337 break;
338 //
339 // MemoryMapped operations
340 //
341 case MemoryMapped:
342 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
343 MmioBitFieldWrite32 (
344 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
345 RegisterTableEntry->ValidBitStart,
346 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
347 (UINT32)RegisterTableEntry->Value
348 );
349 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
350 break;
351 //
352 // Enable or disable cache
353 //
354 case CacheControl:
355 //
356 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
357 //
358 if (RegisterTableEntry->Value == 0) {
359 AsmDisableCache ();
360 } else {
361 AsmEnableCache ();
362 }
363
364 break;
365
366 case Semaphore:
367 // Semaphore works logic like below:
368 //
369 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
370 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
371 //
372 // All threads (T0...Tn) waits in P() line and continues running
373 // together.
374 //
375 //
376 // T0 T1 ... Tn
377 //
378 // V(0...n) V(0...n) ... V(0...n)
379 // n * P(0) n * P(1) ... n * P(n)
380 //
381 ASSERT (
382 (ApLocation != NULL) &&
383 (CpuStatus->ThreadCountPerPackage != 0) &&
384 (CpuStatus->ThreadCountPerCore != 0) &&
385 (CpuFlags->CoreSemaphoreCount != NULL) &&
386 (CpuFlags->PackageSemaphoreCount != NULL)
387 );
388 switch (RegisterTableEntry->Value) {
389 case CoreDepType:
390 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
391 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
392
393 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
394 //
395 // Get Offset info for the first thread in the core which current thread belongs to.
396 //
397 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
398 CurrentThread = FirstThread + ApLocation->Thread;
399
400 //
401 // Different cores may have different valid threads in them. If driver maintail clearly
402 // thread index in different cores, the logic will be much complicated.
403 // Here driver just simply records the max thread number in all cores and use it as expect
404 // thread number for all cores.
405 // In below two steps logic, first current thread will Release semaphore for each thread
406 // in current core. Maybe some threads are not valid in this core, but driver don't
407 // care. Second, driver will let current thread wait semaphore for all valid threads in
408 // current core. Because only the valid threads will do release semaphore for this
409 // thread, driver here only need to wait the valid thread count.
410 //
411
412 //
413 // First Notify ALL THREADs in current Core that this thread is ready.
414 //
415 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {
416 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
417 }
418
419 //
420 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
421 //
422 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {
423 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
424 }
425
426 break;
427
428 case PackageDepType:
429 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
430 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
431 //
432 // Get Offset info for the first thread in the package which current thread belongs to.
433 //
434 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
435 //
436 // Get the possible threads count for current package.
437 //
438 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
439
440 //
441 // Different packages may have different valid threads in them. If driver maintail clearly
442 // thread index in different packages, the logic will be much complicated.
443 // Here driver just simply records the max thread number in all packages and use it as expect
444 // thread number for all packages.
445 // In below two steps logic, first current thread will Release semaphore for each thread
446 // in current package. Maybe some threads are not valid in this package, but driver don't
447 // care. Second, driver will let current thread wait semaphore for all valid threads in
448 // current package. Because only the valid threads will do release semaphore for this
449 // thread, driver here only need to wait the valid thread count.
450 //
451
452 //
453 // First Notify ALL THREADS in current package that this thread is ready.
454 //
455 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {
456 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
457 }
458
459 //
460 // Second, check whether VALID THREADS (not all threads) in current package are ready.
461 //
462 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {
463 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
464 }
465
466 break;
467
468 default:
469 break;
470 }
471
472 break;
473
474 default:
475 break;
476 }
477 }
478 }
479
480 /**
481
482 Set Processor register for one AP.
483
484 @param PreSmmRegisterTable Use pre Smm register table or register table.
485
486 **/
487 VOID
488 SetRegister (
489 IN BOOLEAN PreSmmRegisterTable
490 )
491 {
492 CPU_FEATURE_INIT_DATA *FeatureInitData;
493 CPU_REGISTER_TABLE *RegisterTable;
494 CPU_REGISTER_TABLE *RegisterTables;
495 UINT32 InitApicId;
496 UINTN ProcIndex;
497 UINTN Index;
498
499 FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;
500
501 if (PreSmmRegisterTable) {
502 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;
503 } else {
504 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;
505 }
506
507 if (RegisterTables == NULL) {
508 return;
509 }
510
511 InitApicId = GetInitialApicId ();
512 RegisterTable = NULL;
513 ProcIndex = (UINTN)-1;
514 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
515 if (RegisterTables[Index].InitialApicId == InitApicId) {
516 RegisterTable = &RegisterTables[Index];
517 ProcIndex = Index;
518 break;
519 }
520 }
521
522 ASSERT (RegisterTable != NULL);
523
524 if (FeatureInitData->ApLocation != 0) {
525 ProgramProcessorRegister (
526 RegisterTable,
527 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,
528 &FeatureInitData->CpuStatus,
529 &mCpuFlags
530 );
531 } else {
532 ProgramProcessorRegister (
533 RegisterTable,
534 NULL,
535 &FeatureInitData->CpuStatus,
536 &mCpuFlags
537 );
538 }
539 }
540
541 /**
542 AP initialization before then after SMBASE relocation in the S3 boot path.
543 **/
544 VOID
545 InitializeAp (
546 VOID
547 )
548 {
549 UINTN TopOfStack;
550 UINT8 Stack[128];
551
552 LoadMtrrData (mAcpiCpuData.MtrrTable);
553
554 SetRegister (TRUE);
555
556 //
557 // Count down the number with lock mechanism.
558 //
559 InterlockedDecrement (&mNumberToFinish);
560
561 //
562 // Wait for BSP to signal SMM Base relocation done.
563 //
564 while (!mInitApsAfterSmmBaseReloc) {
565 CpuPause ();
566 }
567
568 ProgramVirtualWireMode ();
569 DisableLvtInterrupts ();
570
571 SetRegister (FALSE);
572
573 //
574 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
575 //
576 TopOfStack = (UINTN)Stack + sizeof (Stack);
577 TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);
578 CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
579 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
580 }
581
582 /**
583 Prepares startup vector for APs.
584
585 This function prepares startup vector for APs.
586
587 @param WorkingBuffer The address of the work buffer.
588 **/
589 VOID
590 PrepareApStartupVector (
591 EFI_PHYSICAL_ADDRESS WorkingBuffer
592 )
593 {
594 EFI_PHYSICAL_ADDRESS StartupVector;
595 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
596
597 //
598 // Get the address map of startup code for AP,
599 // including code size, and offset of long jump instructions to redirect.
600 //
601 ZeroMem (&AddressMap, sizeof (AddressMap));
602 AsmGetAddressMap (&AddressMap);
603
604 StartupVector = WorkingBuffer;
605
606 //
607 // Copy AP startup code to startup vector, and then redirect the long jump
608 // instructions for mode switching.
609 //
610 CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
611 *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);
612 if (AddressMap.LongJumpOffset != 0) {
613 *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);
614 }
615
616 //
617 // Get the start address of exchange data between BSP and AP.
618 //
619 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);
620 ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
621
622 CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
623 CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
624
625 mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;
626 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
627 mExchangeInfo->BufferStart = (UINT32)StartupVector;
628 mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());
629 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
630 }
631
632 /**
633 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
634
635 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
636 and restores MTRRs for both BSP and APs.
637
638 **/
639 VOID
640 InitializeCpuBeforeRebase (
641 VOID
642 )
643 {
644 LoadMtrrData (mAcpiCpuData.MtrrTable);
645
646 SetRegister (TRUE);
647
648 ProgramVirtualWireMode ();
649
650 PrepareApStartupVector (mAcpiCpuData.StartupVector);
651
652 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
653 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
654 } else {
655 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
656 }
657
658 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
659 mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeAp;
660
661 //
662 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
663 //
664 mInitApsAfterSmmBaseReloc = FALSE;
665
666 //
667 // Send INIT IPI - SIPI to all APs
668 //
669 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
670
671 while (mNumberToFinish > 0) {
672 CpuPause ();
673 }
674 }
675
676 /**
677 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
678
679 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
680 data saved by normal boot path for both BSP and APs.
681
682 **/
683 VOID
684 InitializeCpuAfterRebase (
685 VOID
686 )
687 {
688 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
689 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
690 } else {
691 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
692 }
693
694 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
695
696 //
697 // Signal that SMM base relocation is complete and to continue initialization for all APs.
698 //
699 mInitApsAfterSmmBaseReloc = TRUE;
700
701 //
702 // Must begin set register after all APs have continue their initialization.
703 // This is a requirement to support semaphore mechanism in register table.
704 // Because if semaphore's dependence type is package type, semaphore will wait
705 // for all Aps in one package finishing their tasks before set next register
706 // for all APs. If the Aps not begin its task during BSP doing its task, the
707 // BSP thread will hang because it is waiting for other Aps in the same
708 // package finishing their task.
709 //
710 SetRegister (FALSE);
711
712 while (mNumberToFinish > 0) {
713 CpuPause ();
714 }
715 }
716
717 /**
718 Restore SMM Configuration in S3 boot path.
719
720 **/
721 VOID
722 RestoreSmmConfigurationInS3 (
723 VOID
724 )
725 {
726 if (!mAcpiS3Enable) {
727 return;
728 }
729
730 //
731 // Restore SMM Configuration in S3 boot path.
732 //
733 if (mRestoreSmmConfigurationInS3) {
734 //
735 // Need make sure gSmst is correct because below function may use them.
736 //
737 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
738 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
739 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
740 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
741 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
742
743 //
744 // Configure SMM Code Access Check feature if available.
745 //
746 ConfigSmmCodeAccessCheck ();
747
748 SmmCpuFeaturesCompleteSmmReadyToLock ();
749
750 mRestoreSmmConfigurationInS3 = FALSE;
751 }
752 }
753
754 /**
755 Perform SMM initialization for all processors in the S3 boot path.
756
757 For a native platform, MP initialization in the S3 boot path is also performed in this function.
758 **/
759 VOID
760 EFIAPI
761 SmmRestoreCpu (
762 VOID
763 )
764 {
765 SMM_S3_RESUME_STATE *SmmS3ResumeState;
766 IA32_DESCRIPTOR Ia32Idtr;
767 IA32_DESCRIPTOR X64Idtr;
768 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
769 EFI_STATUS Status;
770
771 DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));
772
773 mSmmS3Flag = TRUE;
774
775 //
776 // See if there is enough context to resume PEI Phase
777 //
778 if (mSmmS3ResumeState == NULL) {
779 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
780 CpuDeadLoop ();
781 }
782
783 SmmS3ResumeState = mSmmS3ResumeState;
784 ASSERT (SmmS3ResumeState != NULL);
785
786 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
787 //
788 // Save the IA32 IDT Descriptor
789 //
790 AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
791
792 //
793 // Setup X64 IDT table
794 //
795 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
796 X64Idtr.Base = (UINTN)IdtEntryTable;
797 X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
798 AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);
799
800 //
801 // Setup the default exception handler
802 //
803 Status = InitializeCpuExceptionHandlers (NULL);
804 ASSERT_EFI_ERROR (Status);
805
806 //
807 // Initialize Debug Agent to support source level debug
808 //
809 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
810 }
811
812 //
813 // Skip initialization if mAcpiCpuData is not valid
814 //
815 if (mAcpiCpuData.NumberOfCpus > 0) {
816 //
817 // First time microcode load and restore MTRRs
818 //
819 InitializeCpuBeforeRebase ();
820 }
821
822 //
823 // Restore SMBASE for BSP and all APs
824 //
825 SmmRelocateBases ();
826
827 //
828 // Skip initialization if mAcpiCpuData is not valid
829 //
830 if (mAcpiCpuData.NumberOfCpus > 0) {
831 //
832 // Restore MSRs for BSP and all APs
833 //
834 InitializeCpuAfterRebase ();
835 }
836
837 //
838 // Set a flag to restore SMM configuration in S3 path.
839 //
840 mRestoreSmmConfigurationInS3 = TRUE;
841
842 DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
843 DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
844 DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
845 DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
846 DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
847
848 //
849 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
850 //
851 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
852 DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
853
854 SwitchStack (
855 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
856 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
857 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
858 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
859 );
860 }
861
862 //
863 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
864 //
865 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
866 DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
867 //
868 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
869 //
870 SaveAndSetDebugTimerInterrupt (FALSE);
871 //
872 // Restore IA32 IDT table
873 //
874 AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
875 AsmDisablePaging64 (
876 SmmS3ResumeState->ReturnCs,
877 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
878 (UINT32)SmmS3ResumeState->ReturnContext1,
879 (UINT32)SmmS3ResumeState->ReturnContext2,
880 (UINT32)SmmS3ResumeState->ReturnStackPointer
881 );
882 }
883
884 //
885 // Can not resume PEI Phase
886 //
887 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
888 CpuDeadLoop ();
889 }
890
891 /**
892 Initialize SMM S3 resume state structure used during S3 Resume.
893
894 @param[in] Cr3 The base address of the page tables to use in SMM.
895
896 **/
897 VOID
898 InitSmmS3ResumeState (
899 IN UINT32 Cr3
900 )
901 {
902 VOID *GuidHob;
903 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
904 SMM_S3_RESUME_STATE *SmmS3ResumeState;
905 EFI_PHYSICAL_ADDRESS Address;
906 EFI_STATUS Status;
907
908 if (!mAcpiS3Enable) {
909 return;
910 }
911
912 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
913 if (GuidHob == NULL) {
914 DEBUG ((
915 DEBUG_ERROR,
916 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
917 __FUNCTION__,
918 &gEfiAcpiVariableGuid
919 ));
920 CpuDeadLoop ();
921 } else {
922 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);
923
924 DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
925 DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
926
927 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
928 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
929
930 mSmmS3ResumeState = SmmS3ResumeState;
931 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
932
933 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
934
935 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
936 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
937 if (SmmS3ResumeState->SmmS3StackBase == 0) {
938 SmmS3ResumeState->SmmS3StackSize = 0;
939 }
940
941 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
942 SmmS3ResumeState->SmmS3Cr3 = Cr3;
943 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
944
945 if (sizeof (UINTN) == sizeof (UINT64)) {
946 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
947 }
948
949 if (sizeof (UINTN) == sizeof (UINT32)) {
950 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
951 }
952
953 //
954 // Patch SmmS3ResumeState->SmmS3Cr3
955 //
956 InitSmmS3Cr3 ();
957 }
958
959 //
960 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
961 // protected mode on S3 path
962 //
963 Address = BASE_4GB - 1;
964 Status = gBS->AllocatePages (
965 AllocateMaxAddress,
966 EfiACPIMemoryNVS,
967 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
968 &Address
969 );
970 ASSERT_EFI_ERROR (Status);
971 mApHltLoopCode = (UINT8 *)(UINTN)Address;
972 }
973
974 /**
975 Copy register table from non-SMRAM into SMRAM.
976
977 @param[in] DestinationRegisterTableList Points to destination register table.
978 @param[in] SourceRegisterTableList Points to source register table.
979 @param[in] NumberOfCpus Number of CPUs.
980
981 **/
982 VOID
983 CopyRegisterTable (
984 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
985 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
986 IN UINT32 NumberOfCpus
987 )
988 {
989 UINTN Index;
990 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
991
992 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
993 for (Index = 0; Index < NumberOfCpus; Index++) {
994 if (DestinationRegisterTableList[Index].TableLength != 0) {
995 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
996 RegisterTableEntry = AllocateCopyPool (
997 DestinationRegisterTableList[Index].AllocatedSize,
998 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
999 );
1000 ASSERT (RegisterTableEntry != NULL);
1001 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
1002 }
1003 }
1004 }
1005
1006 /**
1007 Check whether the register table is empty or not.
1008
1009 @param[in] RegisterTable Point to the register table.
1010 @param[in] NumberOfCpus Number of CPUs.
1011
1012 @retval TRUE The register table is empty.
1013 @retval FALSE The register table is not empty.
1014 **/
1015 BOOLEAN
1016 IsRegisterTableEmpty (
1017 IN CPU_REGISTER_TABLE *RegisterTable,
1018 IN UINT32 NumberOfCpus
1019 )
1020 {
1021 UINTN Index;
1022
1023 if (RegisterTable != NULL) {
1024 for (Index = 0; Index < NumberOfCpus; Index++) {
1025 if (RegisterTable[Index].TableLength != 0) {
1026 return FALSE;
1027 }
1028 }
1029 }
1030
1031 return TRUE;
1032 }
1033
1034 /**
1035 Copy the data used to initialize processor register into SMRAM.
1036
1037 @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.
1038 @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.
1039
1040 **/
1041 VOID
1042 CopyCpuFeatureInitDatatoSmram (
1043 IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,
1044 IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc
1045 )
1046 {
1047 CPU_STATUS_INFORMATION *CpuStatus;
1048
1049 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
1050 CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1051 ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);
1052
1053 CopyRegisterTable (
1054 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,
1055 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,
1056 mAcpiCpuData.NumberOfCpus
1057 );
1058 }
1059
1060 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
1061 CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1062 ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);
1063
1064 CopyRegisterTable (
1065 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,
1066 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,
1067 mAcpiCpuData.NumberOfCpus
1068 );
1069 }
1070
1071 CpuStatus = &CpuFeatureInitDataDst->CpuStatus;
1072 CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1073
1074 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {
1075 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1076 sizeof (UINT32) * CpuStatus->PackageCount,
1077 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage
1078 );
1079 ASSERT (CpuStatus->ThreadCountPerPackage != 0);
1080 }
1081
1082 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {
1083 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1084 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
1085 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore
1086 );
1087 ASSERT (CpuStatus->ThreadCountPerCore != 0);
1088 }
1089
1090 if (CpuFeatureInitDataSrc->ApLocation != 0) {
1091 CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1092 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1093 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation
1094 );
1095 ASSERT (CpuFeatureInitDataDst->ApLocation != 0);
1096 }
1097 }
1098
1099 /**
1100 Get ACPI CPU data.
1101
1102 **/
1103 VOID
1104 GetAcpiCpuData (
1105 VOID
1106 )
1107 {
1108 ACPI_CPU_DATA *AcpiCpuData;
1109 IA32_DESCRIPTOR *Gdtr;
1110 IA32_DESCRIPTOR *Idtr;
1111 VOID *GdtForAp;
1112 VOID *IdtForAp;
1113 VOID *MachineCheckHandlerForAp;
1114 CPU_STATUS_INFORMATION *CpuStatus;
1115
1116 if (!mAcpiS3Enable) {
1117 return;
1118 }
1119
1120 //
1121 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1122 //
1123 mAcpiCpuData.NumberOfCpus = 0;
1124
1125 //
1126 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1127 //
1128 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
1129 if (AcpiCpuData == 0) {
1130 return;
1131 }
1132
1133 //
1134 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1135 //
1136 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1137
1138 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1139 ASSERT (mAcpiCpuData.MtrrTable != 0);
1140
1141 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1142
1143 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1144 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1145
1146 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1147
1148 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1149 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1150
1151 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1152
1153 //
1154 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1155 //
1156 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1157 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1158
1159 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1160 ASSERT (GdtForAp != NULL);
1161 IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));
1162 MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));
1163
1164 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1165 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1166 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1167
1168 Gdtr->Base = (UINTN)GdtForAp;
1169 Idtr->Base = (UINTN)IdtForAp;
1170 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1171
1172 ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));
1173
1174 if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {
1175 //
1176 // If the CPU features will not be initialized by CpuFeaturesPei module during
1177 // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
1178 // which will be consumed in SmmRestoreCpu during next S3 resume.
1179 //
1180 CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);
1181
1182 CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;
1183
1184 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1185 sizeof (UINT32) * CpuStatus->PackageCount *
1186 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1187 );
1188 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1189
1190 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1191 sizeof (UINT32) * CpuStatus->PackageCount *
1192 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1193 );
1194 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1195
1196 InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);
1197 }
1198 }
1199
1200 /**
1201 Get ACPI S3 enable flag.
1202
1203 **/
1204 VOID
1205 GetAcpiS3EnableFlag (
1206 VOID
1207 )
1208 {
1209 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1210 }