]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg PiSmmCpuDxeSmm: Reduce SMRAM consumption in CpuS3.c
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
66 ACPI_CPU_DATA mAcpiCpuData;
67 volatile UINT32 mNumberToFinish;
68 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71 //
72 // S3 boot flag
73 //
74 BOOLEAN mSmmS3Flag = FALSE;
75
76 //
77 // Pointer to structure used during S3 Resume
78 //
79 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81 BOOLEAN mAcpiS3Enable = TRUE;
82
83 UINT8 *mApHltLoopCode = NULL;
84 UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90 };
91
92 /**
93 Sync up the MTRR values for all processors.
94
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
96 **/
97 VOID
98 EFIAPI
99 LoadMtrrData (
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102 /*++
103
104 Routine Description:
105
106 Sync up the MTRR values for all processors.
107
108 Arguments:
109
110 Returns:
111 None
112
113 --*/
114 {
115 MTRR_SETTINGS *MtrrSettings;
116
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
118 MtrrSetAllMtrrs (MtrrSettings);
119 }
120
121 /**
122 Increment semaphore by 1.
123
124 @param Sem IN: 32-bit unsigned integer
125
126 **/
127 VOID
128 S3ReleaseSemaphore (
129 IN OUT volatile UINT32 *Sem
130 )
131 {
132 InterlockedIncrement (Sem);
133 }
134
135 /**
136 Decrement the semaphore by 1 if it is not zero.
137
138 Performs an atomic decrement operation for semaphore.
139 The compare exchange operation must be performed using
140 MP safe mechanisms.
141
142 @param Sem IN: 32-bit unsigned integer
143
144 **/
145 VOID
146 S3WaitForSemaphore (
147 IN OUT volatile UINT32 *Sem
148 )
149 {
150 UINT32 Value;
151
152 do {
153 Value = *Sem;
154 } while (Value == 0 ||
155 InterlockedCompareExchange32 (
156 Sem,
157 Value,
158 Value - 1
159 ) != Value);
160 }
161
162 /**
163 Read / write CR value.
164
165 @param[in] CrIndex The CR index which need to read/write.
166 @param[in] Read Read or write. TRUE is read.
167 @param[in,out] CrValue CR value.
168
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
170 **/
171 UINTN
172 ReadWriteCr (
173 IN UINT32 CrIndex,
174 IN BOOLEAN Read,
175 IN OUT UINTN *CrValue
176 )
177 {
178 switch (CrIndex) {
179 case 0:
180 if (Read) {
181 *CrValue = AsmReadCr0 ();
182 } else {
183 AsmWriteCr0 (*CrValue);
184 }
185 break;
186 case 2:
187 if (Read) {
188 *CrValue = AsmReadCr2 ();
189 } else {
190 AsmWriteCr2 (*CrValue);
191 }
192 break;
193 case 3:
194 if (Read) {
195 *CrValue = AsmReadCr3 ();
196 } else {
197 AsmWriteCr3 (*CrValue);
198 }
199 break;
200 case 4:
201 if (Read) {
202 *CrValue = AsmReadCr4 ();
203 } else {
204 AsmWriteCr4 (*CrValue);
205 }
206 break;
207 default:
208 return EFI_UNSUPPORTED;;
209 }
210
211 return EFI_SUCCESS;
212 }
213
214 /**
215 Initialize the CPU registers from a register table.
216
217 @param[in] RegisterTable The register table for this AP.
218 @param[in] ApLocation AP location info for this ap.
219 @param[in] CpuStatus CPU status info for this CPU.
220 @param[in] CpuFlags Flags data structure used when program the register.
221
222 @note This service could be called by BSP/APs.
223 **/
224 VOID
225 ProgramProcessorRegister (
226 IN CPU_REGISTER_TABLE *RegisterTable,
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
228 IN CPU_STATUS_INFORMATION *CpuStatus,
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
230 )
231 {
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
233 UINTN Index;
234 UINTN Value;
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
236 volatile UINT32 *SemaphorePtr;
237 UINT32 FirstThread;
238 UINT32 CurrentThread;
239 UINT32 CurrentCore;
240 UINTN ProcessorIndex;
241 UINT32 *ThreadCountPerPackage;
242 UINT8 *ThreadCountPerCore;
243 EFI_STATUS Status;
244 UINT64 CurrentValue;
245
246 //
247 // Traverse Register Table of this logical processor
248 //
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
250
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
252
253 RegisterTableEntry = &RegisterTableEntryHead[Index];
254
255 //
256 // Check the type of specified register
257 //
258 switch (RegisterTableEntry->RegisterType) {
259 //
260 // The specified register is Control Register
261 //
262 case ControlRegister:
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
264 if (EFI_ERROR (Status)) {
265 break;
266 }
267 if (RegisterTableEntry->TestThenWrite) {
268 CurrentValue = BitFieldRead64 (
269 Value,
270 RegisterTableEntry->ValidBitStart,
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
272 );
273 if (CurrentValue == RegisterTableEntry->Value) {
274 break;
275 }
276 }
277 Value = (UINTN) BitFieldWrite64 (
278 Value,
279 RegisterTableEntry->ValidBitStart,
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
281 RegisterTableEntry->Value
282 );
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
284 break;
285 //
286 // The specified register is Model Specific Register
287 //
288 case Msr:
289 if (RegisterTableEntry->TestThenWrite) {
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
291 if (RegisterTableEntry->ValidBitLength >= 64) {
292 if (Value == RegisterTableEntry->Value) {
293 break;
294 }
295 } else {
296 CurrentValue = BitFieldRead64 (
297 Value,
298 RegisterTableEntry->ValidBitStart,
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
300 );
301 if (CurrentValue == RegisterTableEntry->Value) {
302 break;
303 }
304 }
305 }
306
307 //
308 // If this function is called to restore register setting after INIT signal,
309 // there is no need to restore MSRs in register table.
310 //
311 if (RegisterTableEntry->ValidBitLength >= 64) {
312 //
313 // If length is not less than 64 bits, then directly write without reading
314 //
315 AsmWriteMsr64 (
316 RegisterTableEntry->Index,
317 RegisterTableEntry->Value
318 );
319 } else {
320 //
321 // Set the bit section according to bit start and length
322 //
323 AsmMsrBitFieldWrite64 (
324 RegisterTableEntry->Index,
325 RegisterTableEntry->ValidBitStart,
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
327 RegisterTableEntry->Value
328 );
329 }
330 break;
331 //
332 // MemoryMapped operations
333 //
334 case MemoryMapped:
335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
336 MmioBitFieldWrite32 (
337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
338 RegisterTableEntry->ValidBitStart,
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
340 (UINT32)RegisterTableEntry->Value
341 );
342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
343 break;
344 //
345 // Enable or disable cache
346 //
347 case CacheControl:
348 //
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
350 //
351 if (RegisterTableEntry->Value == 0) {
352 AsmDisableCache ();
353 } else {
354 AsmEnableCache ();
355 }
356 break;
357
358 case Semaphore:
359 // Semaphore works logic like below:
360 //
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
363 //
364 // All threads (T0...Tn) waits in P() line and continues running
365 // together.
366 //
367 //
368 // T0 T1 ... Tn
369 //
370 // V(0...n) V(0...n) ... V(0...n)
371 // n * P(0) n * P(1) ... n * P(n)
372 //
373 ASSERT (
374 (ApLocation != NULL) &&
375 (CpuStatus->ThreadCountPerPackage != 0) &&
376 (CpuStatus->ThreadCountPerCore != 0) &&
377 (CpuFlags->CoreSemaphoreCount != NULL) &&
378 (CpuFlags->PackageSemaphoreCount != NULL)
379 );
380 switch (RegisterTableEntry->Value) {
381 case CoreDepType:
382 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
383 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
384
385 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
386 //
387 // Get Offset info for the first thread in the core which current thread belongs to.
388 //
389 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
390 CurrentThread = FirstThread + ApLocation->Thread;
391
392 //
393 // Different cores may have different valid threads in them. If driver maintail clearly
394 // thread index in different cores, the logic will be much complicated.
395 // Here driver just simply records the max thread number in all cores and use it as expect
396 // thread number for all cores.
397 // In below two steps logic, first current thread will Release semaphore for each thread
398 // in current core. Maybe some threads are not valid in this core, but driver don't
399 // care. Second, driver will let current thread wait semaphore for all valid threads in
400 // current core. Because only the valid threads will do release semaphore for this
401 // thread, driver here only need to wait the valid thread count.
402 //
403
404 //
405 // First Notify ALL THREADs in current Core that this thread is ready.
406 //
407 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
408 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
409 }
410 //
411 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
412 //
413 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) {
414 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
415 }
416 break;
417
418 case PackageDepType:
419 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
420 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
421 //
422 // Get Offset info for the first thread in the package which current thread belongs to.
423 //
424 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
425 //
426 // Get the possible threads count for current package.
427 //
428 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
429
430 //
431 // Different packages may have different valid threads in them. If driver maintail clearly
432 // thread index in different packages, the logic will be much complicated.
433 // Here driver just simply records the max thread number in all packages and use it as expect
434 // thread number for all packages.
435 // In below two steps logic, first current thread will Release semaphore for each thread
436 // in current package. Maybe some threads are not valid in this package, but driver don't
437 // care. Second, driver will let current thread wait semaphore for all valid threads in
438 // current package. Because only the valid threads will do release semaphore for this
439 // thread, driver here only need to wait the valid thread count.
440 //
441
442 //
443 // First Notify ALL THREADS in current package that this thread is ready.
444 //
445 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) {
446 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
447 }
448 //
449 // Second, check whether VALID THREADS (not all threads) in current package are ready.
450 //
451 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) {
452 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
453 }
454 break;
455
456 default:
457 break;
458 }
459 break;
460
461 default:
462 break;
463 }
464 }
465 }
466
467 /**
468
469 Set Processor register for one AP.
470
471 @param PreSmmRegisterTable Use pre Smm register table or register table.
472
473 **/
474 VOID
475 SetRegister (
476 IN BOOLEAN PreSmmRegisterTable
477 )
478 {
479 CPU_REGISTER_TABLE *RegisterTable;
480 CPU_REGISTER_TABLE *RegisterTables;
481 UINT32 InitApicId;
482 UINTN ProcIndex;
483 UINTN Index;
484
485 if (PreSmmRegisterTable) {
486 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
487 } else {
488 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
489 }
490 if (RegisterTables == NULL) {
491 return;
492 }
493
494 InitApicId = GetInitialApicId ();
495 RegisterTable = NULL;
496 ProcIndex = (UINTN)-1;
497 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
498 if (RegisterTables[Index].InitialApicId == InitApicId) {
499 RegisterTable = &RegisterTables[Index];
500 ProcIndex = Index;
501 break;
502 }
503 }
504 ASSERT (RegisterTable != NULL);
505
506 if (mAcpiCpuData.ApLocation != 0) {
507 ProgramProcessorRegister (
508 RegisterTable,
509 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
510 &mAcpiCpuData.CpuStatus,
511 &mCpuFlags
512 );
513 } else {
514 ProgramProcessorRegister (
515 RegisterTable,
516 NULL,
517 &mAcpiCpuData.CpuStatus,
518 &mCpuFlags
519 );
520 }
521 }
522
523 /**
524 AP initialization before then after SMBASE relocation in the S3 boot path.
525 **/
526 VOID
527 InitializeAp (
528 VOID
529 )
530 {
531 UINTN TopOfStack;
532 UINT8 Stack[128];
533
534 LoadMtrrData (mAcpiCpuData.MtrrTable);
535
536 SetRegister (TRUE);
537
538 //
539 // Count down the number with lock mechanism.
540 //
541 InterlockedDecrement (&mNumberToFinish);
542
543 //
544 // Wait for BSP to signal SMM Base relocation done.
545 //
546 while (!mInitApsAfterSmmBaseReloc) {
547 CpuPause ();
548 }
549
550 ProgramVirtualWireMode ();
551 DisableLvtInterrupts ();
552
553 SetRegister (FALSE);
554
555 //
556 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
557 //
558 TopOfStack = (UINTN) Stack + sizeof (Stack);
559 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
560 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
561 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
562 }
563
564 /**
565 Prepares startup vector for APs.
566
567 This function prepares startup vector for APs.
568
569 @param WorkingBuffer The address of the work buffer.
570 **/
571 VOID
572 PrepareApStartupVector (
573 EFI_PHYSICAL_ADDRESS WorkingBuffer
574 )
575 {
576 EFI_PHYSICAL_ADDRESS StartupVector;
577 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
578
579 //
580 // Get the address map of startup code for AP,
581 // including code size, and offset of long jump instructions to redirect.
582 //
583 ZeroMem (&AddressMap, sizeof (AddressMap));
584 AsmGetAddressMap (&AddressMap);
585
586 StartupVector = WorkingBuffer;
587
588 //
589 // Copy AP startup code to startup vector, and then redirect the long jump
590 // instructions for mode switching.
591 //
592 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
593 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
594 if (AddressMap.LongJumpOffset != 0) {
595 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
596 }
597
598 //
599 // Get the start address of exchange data between BSP and AP.
600 //
601 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
602 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
603
604 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
605 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
606
607 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
608 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
609 mExchangeInfo->BufferStart = (UINT32) StartupVector;
610 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
611 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
612 }
613
614 /**
615 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
616
617 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
618 and restores MTRRs for both BSP and APs.
619
620 **/
621 VOID
622 InitializeCpuBeforeRebase (
623 VOID
624 )
625 {
626 LoadMtrrData (mAcpiCpuData.MtrrTable);
627
628 SetRegister (TRUE);
629
630 ProgramVirtualWireMode ();
631
632 PrepareApStartupVector (mAcpiCpuData.StartupVector);
633
634 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
635 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
636 } else {
637 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
638 }
639 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
640 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
641
642 //
643 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
644 //
645 mInitApsAfterSmmBaseReloc = FALSE;
646
647 //
648 // Send INIT IPI - SIPI to all APs
649 //
650 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
651
652 while (mNumberToFinish > 0) {
653 CpuPause ();
654 }
655 }
656
657 /**
658 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
659
660 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
661 data saved by normal boot path for both BSP and APs.
662
663 **/
664 VOID
665 InitializeCpuAfterRebase (
666 VOID
667 )
668 {
669 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
670 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
671 } else {
672 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
673 }
674 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
675
676 //
677 // Signal that SMM base relocation is complete and to continue initialization for all APs.
678 //
679 mInitApsAfterSmmBaseReloc = TRUE;
680
681 //
682 // Must begin set register after all APs have continue their initialization.
683 // This is a requirement to support semaphore mechanism in register table.
684 // Because if semaphore's dependence type is package type, semaphore will wait
685 // for all Aps in one package finishing their tasks before set next register
686 // for all APs. If the Aps not begin its task during BSP doing its task, the
687 // BSP thread will hang because it is waiting for other Aps in the same
688 // package finishing their task.
689 //
690 SetRegister (FALSE);
691
692 while (mNumberToFinish > 0) {
693 CpuPause ();
694 }
695 }
696
697 /**
698 Restore SMM Configuration in S3 boot path.
699
700 **/
701 VOID
702 RestoreSmmConfigurationInS3 (
703 VOID
704 )
705 {
706 if (!mAcpiS3Enable) {
707 return;
708 }
709
710 //
711 // Restore SMM Configuration in S3 boot path.
712 //
713 if (mRestoreSmmConfigurationInS3) {
714 //
715 // Need make sure gSmst is correct because below function may use them.
716 //
717 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
718 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
719 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
720 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
721 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
722
723 //
724 // Configure SMM Code Access Check feature if available.
725 //
726 ConfigSmmCodeAccessCheck ();
727
728 SmmCpuFeaturesCompleteSmmReadyToLock ();
729
730 mRestoreSmmConfigurationInS3 = FALSE;
731 }
732 }
733
734 /**
735 Perform SMM initialization for all processors in the S3 boot path.
736
737 For a native platform, MP initialization in the S3 boot path is also performed in this function.
738 **/
739 VOID
740 EFIAPI
741 SmmRestoreCpu (
742 VOID
743 )
744 {
745 SMM_S3_RESUME_STATE *SmmS3ResumeState;
746 IA32_DESCRIPTOR Ia32Idtr;
747 IA32_DESCRIPTOR X64Idtr;
748 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
749 EFI_STATUS Status;
750
751 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
752
753 mSmmS3Flag = TRUE;
754
755 //
756 // See if there is enough context to resume PEI Phase
757 //
758 if (mSmmS3ResumeState == NULL) {
759 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
760 CpuDeadLoop ();
761 }
762
763 SmmS3ResumeState = mSmmS3ResumeState;
764 ASSERT (SmmS3ResumeState != NULL);
765
766 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
767 //
768 // Save the IA32 IDT Descriptor
769 //
770 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
771
772 //
773 // Setup X64 IDT table
774 //
775 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
776 X64Idtr.Base = (UINTN) IdtEntryTable;
777 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
778 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
779
780 //
781 // Setup the default exception handler
782 //
783 Status = InitializeCpuExceptionHandlers (NULL);
784 ASSERT_EFI_ERROR (Status);
785
786 //
787 // Initialize Debug Agent to support source level debug
788 //
789 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
790 }
791
792 //
793 // Skip initialization if mAcpiCpuData is not valid
794 //
795 if (mAcpiCpuData.NumberOfCpus > 0) {
796 //
797 // First time microcode load and restore MTRRs
798 //
799 InitializeCpuBeforeRebase ();
800 }
801
802 //
803 // Restore SMBASE for BSP and all APs
804 //
805 SmmRelocateBases ();
806
807 //
808 // Skip initialization if mAcpiCpuData is not valid
809 //
810 if (mAcpiCpuData.NumberOfCpus > 0) {
811 //
812 // Restore MSRs for BSP and all APs
813 //
814 InitializeCpuAfterRebase ();
815 }
816
817 //
818 // Set a flag to restore SMM configuration in S3 path.
819 //
820 mRestoreSmmConfigurationInS3 = TRUE;
821
822 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
823 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
824 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
825 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
826 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
827
828 //
829 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
830 //
831 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
832 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
833
834 SwitchStack (
835 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
836 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
837 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
838 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
839 );
840 }
841
842 //
843 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
844 //
845 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
846 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
847 //
848 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
849 //
850 SaveAndSetDebugTimerInterrupt (FALSE);
851 //
852 // Restore IA32 IDT table
853 //
854 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
855 AsmDisablePaging64 (
856 SmmS3ResumeState->ReturnCs,
857 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
858 (UINT32)SmmS3ResumeState->ReturnContext1,
859 (UINT32)SmmS3ResumeState->ReturnContext2,
860 (UINT32)SmmS3ResumeState->ReturnStackPointer
861 );
862 }
863
864 //
865 // Can not resume PEI Phase
866 //
867 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
868 CpuDeadLoop ();
869 }
870
871 /**
872 Initialize SMM S3 resume state structure used during S3 Resume.
873
874 @param[in] Cr3 The base address of the page tables to use in SMM.
875
876 **/
877 VOID
878 InitSmmS3ResumeState (
879 IN UINT32 Cr3
880 )
881 {
882 VOID *GuidHob;
883 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
884 SMM_S3_RESUME_STATE *SmmS3ResumeState;
885 EFI_PHYSICAL_ADDRESS Address;
886 EFI_STATUS Status;
887
888 if (!mAcpiS3Enable) {
889 return;
890 }
891
892 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
893 if (GuidHob == NULL) {
894 DEBUG ((
895 DEBUG_ERROR,
896 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
897 __FUNCTION__,
898 &gEfiAcpiVariableGuid
899 ));
900 CpuDeadLoop ();
901 } else {
902 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
903
904 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
905 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
906
907 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
908 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
909
910 mSmmS3ResumeState = SmmS3ResumeState;
911 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
912
913 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
914
915 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
916 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
917 if (SmmS3ResumeState->SmmS3StackBase == 0) {
918 SmmS3ResumeState->SmmS3StackSize = 0;
919 }
920
921 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
922 SmmS3ResumeState->SmmS3Cr3 = Cr3;
923 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
924
925 if (sizeof (UINTN) == sizeof (UINT64)) {
926 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
927 }
928 if (sizeof (UINTN) == sizeof (UINT32)) {
929 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
930 }
931
932 //
933 // Patch SmmS3ResumeState->SmmS3Cr3
934 //
935 InitSmmS3Cr3 ();
936 }
937
938 //
939 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
940 // protected mode on S3 path
941 //
942 Address = BASE_4GB - 1;
943 Status = gBS->AllocatePages (
944 AllocateMaxAddress,
945 EfiACPIMemoryNVS,
946 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
947 &Address
948 );
949 ASSERT_EFI_ERROR (Status);
950 mApHltLoopCode = (UINT8 *) (UINTN) Address;
951 }
952
953 /**
954 Copy register table from non-SMRAM into SMRAM.
955
956 @param[in] DestinationRegisterTableList Points to destination register table.
957 @param[in] SourceRegisterTableList Points to source register table.
958 @param[in] NumberOfCpus Number of CPUs.
959
960 **/
961 VOID
962 CopyRegisterTable (
963 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
964 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
965 IN UINT32 NumberOfCpus
966 )
967 {
968 UINTN Index;
969 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
970
971 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
972 for (Index = 0; Index < NumberOfCpus; Index++) {
973 if (DestinationRegisterTableList[Index].TableLength != 0) {
974 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
975 RegisterTableEntry = AllocateCopyPool (
976 DestinationRegisterTableList[Index].AllocatedSize,
977 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
978 );
979 ASSERT (RegisterTableEntry != NULL);
980 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
981 }
982 }
983 }
984
985 /**
986 Check whether the register table is empty or not.
987
988 @param[in] RegisterTable Point to the register table.
989 @param[in] NumberOfCpus Number of CPUs.
990
991 @retval TRUE The register table is empty.
992 @retval FALSE The register table is not empty.
993 **/
994 BOOLEAN
995 IsRegisterTableEmpty (
996 IN CPU_REGISTER_TABLE *RegisterTable,
997 IN UINT32 NumberOfCpus
998 )
999 {
1000 UINTN Index;
1001
1002 if (RegisterTable != NULL) {
1003 for (Index = 0; Index < NumberOfCpus; Index++) {
1004 if (RegisterTable[Index].TableLength != 0) {
1005 return FALSE;
1006 }
1007 }
1008 }
1009
1010 return TRUE;
1011 }
1012
1013 /**
1014 Get ACPI CPU data.
1015
1016 **/
1017 VOID
1018 GetAcpiCpuData (
1019 VOID
1020 )
1021 {
1022 ACPI_CPU_DATA *AcpiCpuData;
1023 IA32_DESCRIPTOR *Gdtr;
1024 IA32_DESCRIPTOR *Idtr;
1025 VOID *GdtForAp;
1026 VOID *IdtForAp;
1027 VOID *MachineCheckHandlerForAp;
1028 CPU_STATUS_INFORMATION *CpuStatus;
1029
1030 if (!mAcpiS3Enable) {
1031 return;
1032 }
1033
1034 //
1035 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1036 //
1037 mAcpiCpuData.NumberOfCpus = 0;
1038
1039 //
1040 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1041 //
1042 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
1043 if (AcpiCpuData == 0) {
1044 return;
1045 }
1046
1047 //
1048 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1049 //
1050 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1051
1052 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1053 ASSERT (mAcpiCpuData.MtrrTable != 0);
1054
1055 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1056
1057 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1058 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1059
1060 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1061
1062 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1063 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1064
1065 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1066
1067 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
1068 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1069 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
1070
1071 CopyRegisterTable (
1072 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
1073 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
1074 mAcpiCpuData.NumberOfCpus
1075 );
1076 } else {
1077 mAcpiCpuData.PreSmmInitRegisterTable = 0;
1078 }
1079
1080 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
1081 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1082 ASSERT (mAcpiCpuData.RegisterTable != 0);
1083
1084 CopyRegisterTable (
1085 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
1086 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
1087 mAcpiCpuData.NumberOfCpus
1088 );
1089 } else {
1090 mAcpiCpuData.RegisterTable = 0;
1091 }
1092
1093 //
1094 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1095 //
1096 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1097 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1098
1099 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1100 ASSERT (GdtForAp != NULL);
1101 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1102 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1103
1104 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1105 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1106 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1107
1108 Gdtr->Base = (UINTN)GdtForAp;
1109 Idtr->Base = (UINTN)IdtForAp;
1110 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1111
1112 CpuStatus = &mAcpiCpuData.CpuStatus;
1113 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1114 if (AcpiCpuData->CpuStatus.ThreadCountPerPackage != 0) {
1115 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1116 sizeof (UINT32) * CpuStatus->PackageCount,
1117 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerPackage
1118 );
1119 ASSERT (CpuStatus->ThreadCountPerPackage != 0);
1120 }
1121 if (AcpiCpuData->CpuStatus.ThreadCountPerCore != 0) {
1122 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1123 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
1124 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerCore
1125 );
1126 ASSERT (CpuStatus->ThreadCountPerCore != 0);
1127 }
1128 if (AcpiCpuData->ApLocation != 0) {
1129 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1130 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1131 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
1132 );
1133 ASSERT (mAcpiCpuData.ApLocation != 0);
1134 }
1135 if (CpuStatus->PackageCount != 0) {
1136 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1137 sizeof (UINT32) * CpuStatus->PackageCount *
1138 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1139 );
1140 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1141 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1142 sizeof (UINT32) * CpuStatus->PackageCount *
1143 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1144 );
1145 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1146 }
1147 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1148 }
1149
1150 /**
1151 Get ACPI S3 enable flag.
1152
1153 **/
1154 VOID
1155 GetAcpiS3EnableFlag (
1156 VOID
1157 )
1158 {
1159 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1160 }