]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg: Change use of EFI_D_* to DEBUG_*
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
66 ACPI_CPU_DATA mAcpiCpuData;
67 volatile UINT32 mNumberToFinish;
68 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71 //
72 // S3 boot flag
73 //
74 BOOLEAN mSmmS3Flag = FALSE;
75
76 //
77 // Pointer to structure used during S3 Resume
78 //
79 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81 BOOLEAN mAcpiS3Enable = TRUE;
82
83 UINT8 *mApHltLoopCode = NULL;
84 UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90 };
91
92 /**
93 Sync up the MTRR values for all processors.
94
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
96 **/
97 VOID
98 EFIAPI
99 LoadMtrrData (
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102 /*++
103
104 Routine Description:
105
106 Sync up the MTRR values for all processors.
107
108 Arguments:
109
110 Returns:
111 None
112
113 --*/
114 {
115 MTRR_SETTINGS *MtrrSettings;
116
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
118 MtrrSetAllMtrrs (MtrrSettings);
119 }
120
121 /**
122 Increment semaphore by 1.
123
124 @param Sem IN: 32-bit unsigned integer
125
126 **/
127 VOID
128 S3ReleaseSemaphore (
129 IN OUT volatile UINT32 *Sem
130 )
131 {
132 InterlockedIncrement (Sem);
133 }
134
135 /**
136 Decrement the semaphore by 1 if it is not zero.
137
138 Performs an atomic decrement operation for semaphore.
139 The compare exchange operation must be performed using
140 MP safe mechanisms.
141
142 @param Sem IN: 32-bit unsigned integer
143
144 **/
145 VOID
146 S3WaitForSemaphore (
147 IN OUT volatile UINT32 *Sem
148 )
149 {
150 UINT32 Value;
151
152 do {
153 Value = *Sem;
154 } while (Value == 0 ||
155 InterlockedCompareExchange32 (
156 Sem,
157 Value,
158 Value - 1
159 ) != Value);
160 }
161
162 /**
163 Read / write CR value.
164
165 @param[in] CrIndex The CR index which need to read/write.
166 @param[in] Read Read or write. TRUE is read.
167 @param[in,out] CrValue CR value.
168
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
170 **/
171 UINTN
172 ReadWriteCr (
173 IN UINT32 CrIndex,
174 IN BOOLEAN Read,
175 IN OUT UINTN *CrValue
176 )
177 {
178 switch (CrIndex) {
179 case 0:
180 if (Read) {
181 *CrValue = AsmReadCr0 ();
182 } else {
183 AsmWriteCr0 (*CrValue);
184 }
185 break;
186 case 2:
187 if (Read) {
188 *CrValue = AsmReadCr2 ();
189 } else {
190 AsmWriteCr2 (*CrValue);
191 }
192 break;
193 case 3:
194 if (Read) {
195 *CrValue = AsmReadCr3 ();
196 } else {
197 AsmWriteCr3 (*CrValue);
198 }
199 break;
200 case 4:
201 if (Read) {
202 *CrValue = AsmReadCr4 ();
203 } else {
204 AsmWriteCr4 (*CrValue);
205 }
206 break;
207 default:
208 return EFI_UNSUPPORTED;;
209 }
210
211 return EFI_SUCCESS;
212 }
213
214 /**
215 Initialize the CPU registers from a register table.
216
217 @param[in] RegisterTable The register table for this AP.
218 @param[in] ApLocation AP location info for this ap.
219 @param[in] CpuStatus CPU status info for this CPU.
220 @param[in] CpuFlags Flags data structure used when program the register.
221
222 @note This service could be called by BSP/APs.
223 **/
224 VOID
225 ProgramProcessorRegister (
226 IN CPU_REGISTER_TABLE *RegisterTable,
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
228 IN CPU_STATUS_INFORMATION *CpuStatus,
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
230 )
231 {
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
233 UINTN Index;
234 UINTN Value;
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
236 volatile UINT32 *SemaphorePtr;
237 UINT32 FirstThread;
238 UINT32 CurrentThread;
239 UINT32 CurrentCore;
240 UINTN ProcessorIndex;
241 UINT32 *ThreadCountPerPackage;
242 UINT8 *ThreadCountPerCore;
243 EFI_STATUS Status;
244 UINT64 CurrentValue;
245
246 //
247 // Traverse Register Table of this logical processor
248 //
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
250
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
252
253 RegisterTableEntry = &RegisterTableEntryHead[Index];
254
255 //
256 // Check the type of specified register
257 //
258 switch (RegisterTableEntry->RegisterType) {
259 //
260 // The specified register is Control Register
261 //
262 case ControlRegister:
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
264 if (EFI_ERROR (Status)) {
265 break;
266 }
267 if (RegisterTableEntry->TestThenWrite) {
268 CurrentValue = BitFieldRead64 (
269 Value,
270 RegisterTableEntry->ValidBitStart,
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
272 );
273 if (CurrentValue == RegisterTableEntry->Value) {
274 break;
275 }
276 }
277 Value = (UINTN) BitFieldWrite64 (
278 Value,
279 RegisterTableEntry->ValidBitStart,
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
281 RegisterTableEntry->Value
282 );
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
284 break;
285 //
286 // The specified register is Model Specific Register
287 //
288 case Msr:
289 if (RegisterTableEntry->TestThenWrite) {
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
291 if (RegisterTableEntry->ValidBitLength >= 64) {
292 if (Value == RegisterTableEntry->Value) {
293 break;
294 }
295 } else {
296 CurrentValue = BitFieldRead64 (
297 Value,
298 RegisterTableEntry->ValidBitStart,
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
300 );
301 if (CurrentValue == RegisterTableEntry->Value) {
302 break;
303 }
304 }
305 }
306
307 //
308 // If this function is called to restore register setting after INIT signal,
309 // there is no need to restore MSRs in register table.
310 //
311 if (RegisterTableEntry->ValidBitLength >= 64) {
312 //
313 // If length is not less than 64 bits, then directly write without reading
314 //
315 AsmWriteMsr64 (
316 RegisterTableEntry->Index,
317 RegisterTableEntry->Value
318 );
319 } else {
320 //
321 // Set the bit section according to bit start and length
322 //
323 AsmMsrBitFieldWrite64 (
324 RegisterTableEntry->Index,
325 RegisterTableEntry->ValidBitStart,
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
327 RegisterTableEntry->Value
328 );
329 }
330 break;
331 //
332 // MemoryMapped operations
333 //
334 case MemoryMapped:
335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
336 MmioBitFieldWrite32 (
337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
338 RegisterTableEntry->ValidBitStart,
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
340 (UINT32)RegisterTableEntry->Value
341 );
342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
343 break;
344 //
345 // Enable or disable cache
346 //
347 case CacheControl:
348 //
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
350 //
351 if (RegisterTableEntry->Value == 0) {
352 AsmDisableCache ();
353 } else {
354 AsmEnableCache ();
355 }
356 break;
357
358 case Semaphore:
359 // Semaphore works logic like below:
360 //
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
363 //
364 // All threads (T0...Tn) waits in P() line and continues running
365 // together.
366 //
367 //
368 // T0 T1 ... Tn
369 //
370 // V(0...n) V(0...n) ... V(0...n)
371 // n * P(0) n * P(1) ... n * P(n)
372 //
373 ASSERT (
374 (ApLocation != NULL) &&
375 (CpuStatus->ThreadCountPerPackage != 0) &&
376 (CpuStatus->ThreadCountPerCore != 0) &&
377 (CpuFlags->CoreSemaphoreCount != NULL) &&
378 (CpuFlags->PackageSemaphoreCount != NULL)
379 );
380 switch (RegisterTableEntry->Value) {
381 case CoreDepType:
382 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
383 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
384
385 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
386 //
387 // Get Offset info for the first thread in the core which current thread belongs to.
388 //
389 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
390 CurrentThread = FirstThread + ApLocation->Thread;
391
392 //
393 // Different cores may have different valid threads in them. If driver maintail clearly
394 // thread index in different cores, the logic will be much complicated.
395 // Here driver just simply records the max thread number in all cores and use it as expect
396 // thread number for all cores.
397 // In below two steps logic, first current thread will Release semaphore for each thread
398 // in current core. Maybe some threads are not valid in this core, but driver don't
399 // care. Second, driver will let current thread wait semaphore for all valid threads in
400 // current core. Because only the valid threads will do release semaphore for this
401 // thread, driver here only need to wait the valid thread count.
402 //
403
404 //
405 // First Notify ALL THREADs in current Core that this thread is ready.
406 //
407 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
408 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
409 }
410 //
411 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
412 //
413 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) {
414 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
415 }
416 break;
417
418 case PackageDepType:
419 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
420 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
421 //
422 // Get Offset info for the first thread in the package which current thread belongs to.
423 //
424 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
425 //
426 // Get the possible threads count for current package.
427 //
428 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
429
430 //
431 // Different packages may have different valid threads in them. If driver maintail clearly
432 // thread index in different packages, the logic will be much complicated.
433 // Here driver just simply records the max thread number in all packages and use it as expect
434 // thread number for all packages.
435 // In below two steps logic, first current thread will Release semaphore for each thread
436 // in current package. Maybe some threads are not valid in this package, but driver don't
437 // care. Second, driver will let current thread wait semaphore for all valid threads in
438 // current package. Because only the valid threads will do release semaphore for this
439 // thread, driver here only need to wait the valid thread count.
440 //
441
442 //
443 // First Notify ALL THREADS in current package that this thread is ready.
444 //
445 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) {
446 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
447 }
448 //
449 // Second, check whether VALID THREADS (not all threads) in current package are ready.
450 //
451 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) {
452 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
453 }
454 break;
455
456 default:
457 break;
458 }
459 break;
460
461 default:
462 break;
463 }
464 }
465 }
466
467 /**
468
469 Set Processor register for one AP.
470
471 @param PreSmmRegisterTable Use pre Smm register table or register table.
472
473 **/
474 VOID
475 SetRegister (
476 IN BOOLEAN PreSmmRegisterTable
477 )
478 {
479 CPU_FEATURE_INIT_DATA *FeatureInitData;
480 CPU_REGISTER_TABLE *RegisterTable;
481 CPU_REGISTER_TABLE *RegisterTables;
482 UINT32 InitApicId;
483 UINTN ProcIndex;
484 UINTN Index;
485
486 FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;
487
488 if (PreSmmRegisterTable) {
489 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;
490 } else {
491 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;
492 }
493 if (RegisterTables == NULL) {
494 return;
495 }
496
497 InitApicId = GetInitialApicId ();
498 RegisterTable = NULL;
499 ProcIndex = (UINTN)-1;
500 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
501 if (RegisterTables[Index].InitialApicId == InitApicId) {
502 RegisterTable = &RegisterTables[Index];
503 ProcIndex = Index;
504 break;
505 }
506 }
507 ASSERT (RegisterTable != NULL);
508
509 if (FeatureInitData->ApLocation != 0) {
510 ProgramProcessorRegister (
511 RegisterTable,
512 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,
513 &FeatureInitData->CpuStatus,
514 &mCpuFlags
515 );
516 } else {
517 ProgramProcessorRegister (
518 RegisterTable,
519 NULL,
520 &FeatureInitData->CpuStatus,
521 &mCpuFlags
522 );
523 }
524 }
525
526 /**
527 AP initialization before then after SMBASE relocation in the S3 boot path.
528 **/
529 VOID
530 InitializeAp (
531 VOID
532 )
533 {
534 UINTN TopOfStack;
535 UINT8 Stack[128];
536
537 LoadMtrrData (mAcpiCpuData.MtrrTable);
538
539 SetRegister (TRUE);
540
541 //
542 // Count down the number with lock mechanism.
543 //
544 InterlockedDecrement (&mNumberToFinish);
545
546 //
547 // Wait for BSP to signal SMM Base relocation done.
548 //
549 while (!mInitApsAfterSmmBaseReloc) {
550 CpuPause ();
551 }
552
553 ProgramVirtualWireMode ();
554 DisableLvtInterrupts ();
555
556 SetRegister (FALSE);
557
558 //
559 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
560 //
561 TopOfStack = (UINTN) Stack + sizeof (Stack);
562 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
563 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
564 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
565 }
566
567 /**
568 Prepares startup vector for APs.
569
570 This function prepares startup vector for APs.
571
572 @param WorkingBuffer The address of the work buffer.
573 **/
574 VOID
575 PrepareApStartupVector (
576 EFI_PHYSICAL_ADDRESS WorkingBuffer
577 )
578 {
579 EFI_PHYSICAL_ADDRESS StartupVector;
580 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
581
582 //
583 // Get the address map of startup code for AP,
584 // including code size, and offset of long jump instructions to redirect.
585 //
586 ZeroMem (&AddressMap, sizeof (AddressMap));
587 AsmGetAddressMap (&AddressMap);
588
589 StartupVector = WorkingBuffer;
590
591 //
592 // Copy AP startup code to startup vector, and then redirect the long jump
593 // instructions for mode switching.
594 //
595 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
596 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
597 if (AddressMap.LongJumpOffset != 0) {
598 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
599 }
600
601 //
602 // Get the start address of exchange data between BSP and AP.
603 //
604 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
605 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
606
607 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
608 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
609
610 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
611 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
612 mExchangeInfo->BufferStart = (UINT32) StartupVector;
613 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
614 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
615 }
616
617 /**
618 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
619
620 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
621 and restores MTRRs for both BSP and APs.
622
623 **/
624 VOID
625 InitializeCpuBeforeRebase (
626 VOID
627 )
628 {
629 LoadMtrrData (mAcpiCpuData.MtrrTable);
630
631 SetRegister (TRUE);
632
633 ProgramVirtualWireMode ();
634
635 PrepareApStartupVector (mAcpiCpuData.StartupVector);
636
637 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
638 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
639 } else {
640 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
641 }
642 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
643 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
644
645 //
646 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
647 //
648 mInitApsAfterSmmBaseReloc = FALSE;
649
650 //
651 // Send INIT IPI - SIPI to all APs
652 //
653 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
654
655 while (mNumberToFinish > 0) {
656 CpuPause ();
657 }
658 }
659
660 /**
661 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
662
663 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
664 data saved by normal boot path for both BSP and APs.
665
666 **/
667 VOID
668 InitializeCpuAfterRebase (
669 VOID
670 )
671 {
672 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
673 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
674 } else {
675 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
676 }
677 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
678
679 //
680 // Signal that SMM base relocation is complete and to continue initialization for all APs.
681 //
682 mInitApsAfterSmmBaseReloc = TRUE;
683
684 //
685 // Must begin set register after all APs have continue their initialization.
686 // This is a requirement to support semaphore mechanism in register table.
687 // Because if semaphore's dependence type is package type, semaphore will wait
688 // for all Aps in one package finishing their tasks before set next register
689 // for all APs. If the Aps not begin its task during BSP doing its task, the
690 // BSP thread will hang because it is waiting for other Aps in the same
691 // package finishing their task.
692 //
693 SetRegister (FALSE);
694
695 while (mNumberToFinish > 0) {
696 CpuPause ();
697 }
698 }
699
700 /**
701 Restore SMM Configuration in S3 boot path.
702
703 **/
704 VOID
705 RestoreSmmConfigurationInS3 (
706 VOID
707 )
708 {
709 if (!mAcpiS3Enable) {
710 return;
711 }
712
713 //
714 // Restore SMM Configuration in S3 boot path.
715 //
716 if (mRestoreSmmConfigurationInS3) {
717 //
718 // Need make sure gSmst is correct because below function may use them.
719 //
720 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
721 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
722 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
723 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
724 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
725
726 //
727 // Configure SMM Code Access Check feature if available.
728 //
729 ConfigSmmCodeAccessCheck ();
730
731 SmmCpuFeaturesCompleteSmmReadyToLock ();
732
733 mRestoreSmmConfigurationInS3 = FALSE;
734 }
735 }
736
737 /**
738 Perform SMM initialization for all processors in the S3 boot path.
739
740 For a native platform, MP initialization in the S3 boot path is also performed in this function.
741 **/
742 VOID
743 EFIAPI
744 SmmRestoreCpu (
745 VOID
746 )
747 {
748 SMM_S3_RESUME_STATE *SmmS3ResumeState;
749 IA32_DESCRIPTOR Ia32Idtr;
750 IA32_DESCRIPTOR X64Idtr;
751 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
752 EFI_STATUS Status;
753
754 DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));
755
756 mSmmS3Flag = TRUE;
757
758 //
759 // See if there is enough context to resume PEI Phase
760 //
761 if (mSmmS3ResumeState == NULL) {
762 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
763 CpuDeadLoop ();
764 }
765
766 SmmS3ResumeState = mSmmS3ResumeState;
767 ASSERT (SmmS3ResumeState != NULL);
768
769 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
770 //
771 // Save the IA32 IDT Descriptor
772 //
773 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
774
775 //
776 // Setup X64 IDT table
777 //
778 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
779 X64Idtr.Base = (UINTN) IdtEntryTable;
780 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
781 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
782
783 //
784 // Setup the default exception handler
785 //
786 Status = InitializeCpuExceptionHandlers (NULL);
787 ASSERT_EFI_ERROR (Status);
788
789 //
790 // Initialize Debug Agent to support source level debug
791 //
792 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
793 }
794
795 //
796 // Skip initialization if mAcpiCpuData is not valid
797 //
798 if (mAcpiCpuData.NumberOfCpus > 0) {
799 //
800 // First time microcode load and restore MTRRs
801 //
802 InitializeCpuBeforeRebase ();
803 }
804
805 //
806 // Restore SMBASE for BSP and all APs
807 //
808 SmmRelocateBases ();
809
810 //
811 // Skip initialization if mAcpiCpuData is not valid
812 //
813 if (mAcpiCpuData.NumberOfCpus > 0) {
814 //
815 // Restore MSRs for BSP and all APs
816 //
817 InitializeCpuAfterRebase ();
818 }
819
820 //
821 // Set a flag to restore SMM configuration in S3 path.
822 //
823 mRestoreSmmConfigurationInS3 = TRUE;
824
825 DEBUG (( DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
826 DEBUG (( DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
827 DEBUG (( DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
828 DEBUG (( DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
829 DEBUG (( DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
830
831 //
832 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
833 //
834 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
835 DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
836
837 SwitchStack (
838 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
839 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
840 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
841 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
842 );
843 }
844
845 //
846 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
847 //
848 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
849 DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
850 //
851 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
852 //
853 SaveAndSetDebugTimerInterrupt (FALSE);
854 //
855 // Restore IA32 IDT table
856 //
857 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
858 AsmDisablePaging64 (
859 SmmS3ResumeState->ReturnCs,
860 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
861 (UINT32)SmmS3ResumeState->ReturnContext1,
862 (UINT32)SmmS3ResumeState->ReturnContext2,
863 (UINT32)SmmS3ResumeState->ReturnStackPointer
864 );
865 }
866
867 //
868 // Can not resume PEI Phase
869 //
870 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
871 CpuDeadLoop ();
872 }
873
874 /**
875 Initialize SMM S3 resume state structure used during S3 Resume.
876
877 @param[in] Cr3 The base address of the page tables to use in SMM.
878
879 **/
880 VOID
881 InitSmmS3ResumeState (
882 IN UINT32 Cr3
883 )
884 {
885 VOID *GuidHob;
886 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
887 SMM_S3_RESUME_STATE *SmmS3ResumeState;
888 EFI_PHYSICAL_ADDRESS Address;
889 EFI_STATUS Status;
890
891 if (!mAcpiS3Enable) {
892 return;
893 }
894
895 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
896 if (GuidHob == NULL) {
897 DEBUG ((
898 DEBUG_ERROR,
899 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
900 __FUNCTION__,
901 &gEfiAcpiVariableGuid
902 ));
903 CpuDeadLoop ();
904 } else {
905 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
906
907 DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
908 DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
909
910 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
911 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
912
913 mSmmS3ResumeState = SmmS3ResumeState;
914 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
915
916 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
917
918 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
919 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
920 if (SmmS3ResumeState->SmmS3StackBase == 0) {
921 SmmS3ResumeState->SmmS3StackSize = 0;
922 }
923
924 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
925 SmmS3ResumeState->SmmS3Cr3 = Cr3;
926 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
927
928 if (sizeof (UINTN) == sizeof (UINT64)) {
929 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
930 }
931 if (sizeof (UINTN) == sizeof (UINT32)) {
932 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
933 }
934
935 //
936 // Patch SmmS3ResumeState->SmmS3Cr3
937 //
938 InitSmmS3Cr3 ();
939 }
940
941 //
942 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
943 // protected mode on S3 path
944 //
945 Address = BASE_4GB - 1;
946 Status = gBS->AllocatePages (
947 AllocateMaxAddress,
948 EfiACPIMemoryNVS,
949 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
950 &Address
951 );
952 ASSERT_EFI_ERROR (Status);
953 mApHltLoopCode = (UINT8 *) (UINTN) Address;
954 }
955
956 /**
957 Copy register table from non-SMRAM into SMRAM.
958
959 @param[in] DestinationRegisterTableList Points to destination register table.
960 @param[in] SourceRegisterTableList Points to source register table.
961 @param[in] NumberOfCpus Number of CPUs.
962
963 **/
964 VOID
965 CopyRegisterTable (
966 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
967 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
968 IN UINT32 NumberOfCpus
969 )
970 {
971 UINTN Index;
972 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
973
974 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
975 for (Index = 0; Index < NumberOfCpus; Index++) {
976 if (DestinationRegisterTableList[Index].TableLength != 0) {
977 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
978 RegisterTableEntry = AllocateCopyPool (
979 DestinationRegisterTableList[Index].AllocatedSize,
980 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
981 );
982 ASSERT (RegisterTableEntry != NULL);
983 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
984 }
985 }
986 }
987
988 /**
989 Check whether the register table is empty or not.
990
991 @param[in] RegisterTable Point to the register table.
992 @param[in] NumberOfCpus Number of CPUs.
993
994 @retval TRUE The register table is empty.
995 @retval FALSE The register table is not empty.
996 **/
997 BOOLEAN
998 IsRegisterTableEmpty (
999 IN CPU_REGISTER_TABLE *RegisterTable,
1000 IN UINT32 NumberOfCpus
1001 )
1002 {
1003 UINTN Index;
1004
1005 if (RegisterTable != NULL) {
1006 for (Index = 0; Index < NumberOfCpus; Index++) {
1007 if (RegisterTable[Index].TableLength != 0) {
1008 return FALSE;
1009 }
1010 }
1011 }
1012
1013 return TRUE;
1014 }
1015
1016 /**
1017 Copy the data used to initialize processor register into SMRAM.
1018
1019 @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.
1020 @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.
1021
1022 **/
1023 VOID
1024 CopyCpuFeatureInitDatatoSmram (
1025 IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,
1026 IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc
1027 )
1028 {
1029 CPU_STATUS_INFORMATION *CpuStatus;
1030
1031 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
1032 CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1033 ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);
1034
1035 CopyRegisterTable (
1036 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,
1037 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,
1038 mAcpiCpuData.NumberOfCpus
1039 );
1040 }
1041
1042 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
1043 CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1044 ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);
1045
1046 CopyRegisterTable (
1047 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,
1048 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,
1049 mAcpiCpuData.NumberOfCpus
1050 );
1051 }
1052
1053 CpuStatus = &CpuFeatureInitDataDst->CpuStatus;
1054 CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1055
1056 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {
1057 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1058 sizeof (UINT32) * CpuStatus->PackageCount,
1059 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage
1060 );
1061 ASSERT (CpuStatus->ThreadCountPerPackage != 0);
1062 }
1063
1064 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {
1065 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1066 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
1067 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore
1068 );
1069 ASSERT (CpuStatus->ThreadCountPerCore != 0);
1070 }
1071
1072 if (CpuFeatureInitDataSrc->ApLocation != 0) {
1073 CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1074 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1075 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation
1076 );
1077 ASSERT (CpuFeatureInitDataDst->ApLocation != 0);
1078 }
1079 }
1080
1081 /**
1082 Get ACPI CPU data.
1083
1084 **/
1085 VOID
1086 GetAcpiCpuData (
1087 VOID
1088 )
1089 {
1090 ACPI_CPU_DATA *AcpiCpuData;
1091 IA32_DESCRIPTOR *Gdtr;
1092 IA32_DESCRIPTOR *Idtr;
1093 VOID *GdtForAp;
1094 VOID *IdtForAp;
1095 VOID *MachineCheckHandlerForAp;
1096 CPU_STATUS_INFORMATION *CpuStatus;
1097
1098 if (!mAcpiS3Enable) {
1099 return;
1100 }
1101
1102 //
1103 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1104 //
1105 mAcpiCpuData.NumberOfCpus = 0;
1106
1107 //
1108 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1109 //
1110 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
1111 if (AcpiCpuData == 0) {
1112 return;
1113 }
1114
1115 //
1116 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1117 //
1118 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1119
1120 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1121 ASSERT (mAcpiCpuData.MtrrTable != 0);
1122
1123 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1124
1125 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1126 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1127
1128 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1129
1130 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1131 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1132
1133 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1134
1135 //
1136 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1137 //
1138 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1139 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1140
1141 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1142 ASSERT (GdtForAp != NULL);
1143 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1144 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1145
1146 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1147 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1148 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1149
1150 Gdtr->Base = (UINTN)GdtForAp;
1151 Idtr->Base = (UINTN)IdtForAp;
1152 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1153
1154 ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));
1155
1156 if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {
1157 //
1158 // If the CPU features will not be initialized by CpuFeaturesPei module during
1159 // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
1160 // which will be consumed in SmmRestoreCpu during next S3 resume.
1161 //
1162 CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);
1163
1164 CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;
1165
1166 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1167 sizeof (UINT32) * CpuStatus->PackageCount *
1168 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1169 );
1170 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1171
1172 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1173 sizeof (UINT32) * CpuStatus->PackageCount *
1174 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1175 );
1176 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1177
1178 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1179 }
1180 }
1181
1182 /**
1183 Get ACPI S3 enable flag.
1184
1185 **/
1186 VOID
1187 GetAcpiS3EnableFlag (
1188 VOID
1189 )
1190 {
1191 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1192 }