]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
9592430636ec86e188a2c1be1621eb9fb2f6262c
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2020, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
66 ACPI_CPU_DATA mAcpiCpuData;
67 volatile UINT32 mNumberToFinish;
68 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71 //
72 // S3 boot flag
73 //
74 BOOLEAN mSmmS3Flag = FALSE;
75
76 //
77 // Pointer to structure used during S3 Resume
78 //
79 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81 BOOLEAN mAcpiS3Enable = TRUE;
82
83 UINT8 *mApHltLoopCode = NULL;
84 UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90 };
91
92 /**
93 Sync up the MTRR values for all processors.
94
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
96 **/
97 VOID
98 EFIAPI
99 LoadMtrrData (
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102 /*++
103
104 Routine Description:
105
106 Sync up the MTRR values for all processors.
107
108 Arguments:
109
110 Returns:
111 None
112
113 --*/
114 {
115 MTRR_SETTINGS *MtrrSettings;
116
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
118 MtrrSetAllMtrrs (MtrrSettings);
119 }
120
121 /**
122 Increment semaphore by 1.
123
124 @param Sem IN: 32-bit unsigned integer
125
126 **/
127 VOID
128 S3ReleaseSemaphore (
129 IN OUT volatile UINT32 *Sem
130 )
131 {
132 InterlockedIncrement (Sem);
133 }
134
135 /**
136 Decrement the semaphore by 1 if it is not zero.
137
138 Performs an atomic decrement operation for semaphore.
139 The compare exchange operation must be performed using
140 MP safe mechanisms.
141
142 @param Sem IN: 32-bit unsigned integer
143
144 **/
145 VOID
146 S3WaitForSemaphore (
147 IN OUT volatile UINT32 *Sem
148 )
149 {
150 UINT32 Value;
151
152 do {
153 Value = *Sem;
154 } while (Value == 0 ||
155 InterlockedCompareExchange32 (
156 Sem,
157 Value,
158 Value - 1
159 ) != Value);
160 }
161
162 /**
163 Read / write CR value.
164
165 @param[in] CrIndex The CR index which need to read/write.
166 @param[in] Read Read or write. TRUE is read.
167 @param[in,out] CrValue CR value.
168
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
170 **/
171 UINTN
172 ReadWriteCr (
173 IN UINT32 CrIndex,
174 IN BOOLEAN Read,
175 IN OUT UINTN *CrValue
176 )
177 {
178 switch (CrIndex) {
179 case 0:
180 if (Read) {
181 *CrValue = AsmReadCr0 ();
182 } else {
183 AsmWriteCr0 (*CrValue);
184 }
185 break;
186 case 2:
187 if (Read) {
188 *CrValue = AsmReadCr2 ();
189 } else {
190 AsmWriteCr2 (*CrValue);
191 }
192 break;
193 case 3:
194 if (Read) {
195 *CrValue = AsmReadCr3 ();
196 } else {
197 AsmWriteCr3 (*CrValue);
198 }
199 break;
200 case 4:
201 if (Read) {
202 *CrValue = AsmReadCr4 ();
203 } else {
204 AsmWriteCr4 (*CrValue);
205 }
206 break;
207 default:
208 return EFI_UNSUPPORTED;;
209 }
210
211 return EFI_SUCCESS;
212 }
213
214 /**
215 Initialize the CPU registers from a register table.
216
217 @param[in] RegisterTable The register table for this AP.
218 @param[in] ApLocation AP location info for this ap.
219 @param[in] CpuStatus CPU status info for this CPU.
220 @param[in] CpuFlags Flags data structure used when program the register.
221
222 @note This service could be called by BSP/APs.
223 **/
224 VOID
225 ProgramProcessorRegister (
226 IN CPU_REGISTER_TABLE *RegisterTable,
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
228 IN CPU_STATUS_INFORMATION *CpuStatus,
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
230 )
231 {
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
233 UINTN Index;
234 UINTN Value;
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
236 volatile UINT32 *SemaphorePtr;
237 UINT32 FirstThread;
238 UINT32 CurrentThread;
239 UINT32 CurrentCore;
240 UINTN ProcessorIndex;
241 UINT32 *ThreadCountPerPackage;
242 UINT8 *ThreadCountPerCore;
243 EFI_STATUS Status;
244 UINT64 CurrentValue;
245
246 //
247 // Traverse Register Table of this logical processor
248 //
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
250
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
252
253 RegisterTableEntry = &RegisterTableEntryHead[Index];
254
255 //
256 // Check the type of specified register
257 //
258 switch (RegisterTableEntry->RegisterType) {
259 //
260 // The specified register is Control Register
261 //
262 case ControlRegister:
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
264 if (EFI_ERROR (Status)) {
265 break;
266 }
267 if (RegisterTableEntry->TestThenWrite) {
268 CurrentValue = BitFieldRead64 (
269 Value,
270 RegisterTableEntry->ValidBitStart,
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
272 );
273 if (CurrentValue == RegisterTableEntry->Value) {
274 break;
275 }
276 }
277 Value = (UINTN) BitFieldWrite64 (
278 Value,
279 RegisterTableEntry->ValidBitStart,
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
281 RegisterTableEntry->Value
282 );
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
284 break;
285 //
286 // The specified register is Model Specific Register
287 //
288 case Msr:
289 if (RegisterTableEntry->TestThenWrite) {
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
291 if (RegisterTableEntry->ValidBitLength >= 64) {
292 if (Value == RegisterTableEntry->Value) {
293 break;
294 }
295 } else {
296 CurrentValue = BitFieldRead64 (
297 Value,
298 RegisterTableEntry->ValidBitStart,
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
300 );
301 if (CurrentValue == RegisterTableEntry->Value) {
302 break;
303 }
304 }
305 }
306
307 //
308 // If this function is called to restore register setting after INIT signal,
309 // there is no need to restore MSRs in register table.
310 //
311 if (RegisterTableEntry->ValidBitLength >= 64) {
312 //
313 // If length is not less than 64 bits, then directly write without reading
314 //
315 AsmWriteMsr64 (
316 RegisterTableEntry->Index,
317 RegisterTableEntry->Value
318 );
319 } else {
320 //
321 // Set the bit section according to bit start and length
322 //
323 AsmMsrBitFieldWrite64 (
324 RegisterTableEntry->Index,
325 RegisterTableEntry->ValidBitStart,
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
327 RegisterTableEntry->Value
328 );
329 }
330 break;
331 //
332 // MemoryMapped operations
333 //
334 case MemoryMapped:
335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
336 MmioBitFieldWrite32 (
337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
338 RegisterTableEntry->ValidBitStart,
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
340 (UINT32)RegisterTableEntry->Value
341 );
342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
343 break;
344 //
345 // Enable or disable cache
346 //
347 case CacheControl:
348 //
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
350 //
351 if (RegisterTableEntry->Value == 0) {
352 AsmDisableCache ();
353 } else {
354 AsmEnableCache ();
355 }
356 break;
357
358 case Semaphore:
359 // Semaphore works logic like below:
360 //
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
363 //
364 // All threads (T0...Tn) waits in P() line and continues running
365 // together.
366 //
367 //
368 // T0 T1 ... Tn
369 //
370 // V(0...n) V(0...n) ... V(0...n)
371 // n * P(0) n * P(1) ... n * P(n)
372 //
373 ASSERT (
374 (ApLocation != NULL) &&
375 (CpuStatus->ThreadCountPerPackage != 0) &&
376 (CpuStatus->ThreadCountPerCore != 0) &&
377 (CpuFlags->CoreSemaphoreCount != NULL) &&
378 (CpuFlags->PackageSemaphoreCount != NULL)
379 );
380 switch (RegisterTableEntry->Value) {
381 case CoreDepType:
382 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
383 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
384
385 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
386 //
387 // Get Offset info for the first thread in the core which current thread belongs to.
388 //
389 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
390 CurrentThread = FirstThread + ApLocation->Thread;
391
392 //
393 // Different cores may have different valid threads in them. If driver maintail clearly
394 // thread index in different cores, the logic will be much complicated.
395 // Here driver just simply records the max thread number in all cores and use it as expect
396 // thread number for all cores.
397 // In below two steps logic, first current thread will Release semaphore for each thread
398 // in current core. Maybe some threads are not valid in this core, but driver don't
399 // care. Second, driver will let current thread wait semaphore for all valid threads in
400 // current core. Because only the valid threads will do release semaphore for this
401 // thread, driver here only need to wait the valid thread count.
402 //
403
404 //
405 // First Notify ALL THREADs in current Core that this thread is ready.
406 //
407 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
408 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
409 }
410 //
411 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
412 //
413 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) {
414 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
415 }
416 break;
417
418 case PackageDepType:
419 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
420 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
421 //
422 // Get Offset info for the first thread in the package which current thread belongs to.
423 //
424 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
425 //
426 // Get the possible threads count for current package.
427 //
428 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
429
430 //
431 // Different packages may have different valid threads in them. If driver maintail clearly
432 // thread index in different packages, the logic will be much complicated.
433 // Here driver just simply records the max thread number in all packages and use it as expect
434 // thread number for all packages.
435 // In below two steps logic, first current thread will Release semaphore for each thread
436 // in current package. Maybe some threads are not valid in this package, but driver don't
437 // care. Second, driver will let current thread wait semaphore for all valid threads in
438 // current package. Because only the valid threads will do release semaphore for this
439 // thread, driver here only need to wait the valid thread count.
440 //
441
442 //
443 // First Notify ALL THREADS in current package that this thread is ready.
444 //
445 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) {
446 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
447 }
448 //
449 // Second, check whether VALID THREADS (not all threads) in current package are ready.
450 //
451 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) {
452 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
453 }
454 break;
455
456 default:
457 break;
458 }
459 break;
460
461 default:
462 break;
463 }
464 }
465 }
466
467 /**
468
469 Set Processor register for one AP.
470
471 @param PreSmmRegisterTable Use pre Smm register table or register table.
472
473 **/
474 VOID
475 SetRegister (
476 IN BOOLEAN PreSmmRegisterTable
477 )
478 {
479 CPU_REGISTER_TABLE *RegisterTable;
480 CPU_REGISTER_TABLE *RegisterTables;
481 UINT32 InitApicId;
482 UINTN ProcIndex;
483 UINTN Index;
484
485 if (PreSmmRegisterTable) {
486 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
487 } else {
488 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
489 }
490
491 InitApicId = GetInitialApicId ();
492 RegisterTable = NULL;
493 ProcIndex = (UINTN)-1;
494 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
495 if (RegisterTables[Index].InitialApicId == InitApicId) {
496 RegisterTable = &RegisterTables[Index];
497 ProcIndex = Index;
498 break;
499 }
500 }
501 ASSERT (RegisterTable != NULL);
502
503 if (mAcpiCpuData.ApLocation != 0) {
504 ProgramProcessorRegister (
505 RegisterTable,
506 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
507 &mAcpiCpuData.CpuStatus,
508 &mCpuFlags
509 );
510 } else {
511 ProgramProcessorRegister (
512 RegisterTable,
513 NULL,
514 &mAcpiCpuData.CpuStatus,
515 &mCpuFlags
516 );
517 }
518 }
519
520 /**
521 AP initialization before then after SMBASE relocation in the S3 boot path.
522 **/
523 VOID
524 InitializeAp (
525 VOID
526 )
527 {
528 UINTN TopOfStack;
529 UINT8 Stack[128];
530
531 LoadMtrrData (mAcpiCpuData.MtrrTable);
532
533 SetRegister (TRUE);
534
535 //
536 // Count down the number with lock mechanism.
537 //
538 InterlockedDecrement (&mNumberToFinish);
539
540 //
541 // Wait for BSP to signal SMM Base relocation done.
542 //
543 while (!mInitApsAfterSmmBaseReloc) {
544 CpuPause ();
545 }
546
547 ProgramVirtualWireMode ();
548 DisableLvtInterrupts ();
549
550 SetRegister (FALSE);
551
552 //
553 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
554 //
555 TopOfStack = (UINTN) Stack + sizeof (Stack);
556 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
557 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
558 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
559 }
560
561 /**
562 Prepares startup vector for APs.
563
564 This function prepares startup vector for APs.
565
566 @param WorkingBuffer The address of the work buffer.
567 **/
568 VOID
569 PrepareApStartupVector (
570 EFI_PHYSICAL_ADDRESS WorkingBuffer
571 )
572 {
573 EFI_PHYSICAL_ADDRESS StartupVector;
574 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
575
576 //
577 // Get the address map of startup code for AP,
578 // including code size, and offset of long jump instructions to redirect.
579 //
580 ZeroMem (&AddressMap, sizeof (AddressMap));
581 AsmGetAddressMap (&AddressMap);
582
583 StartupVector = WorkingBuffer;
584
585 //
586 // Copy AP startup code to startup vector, and then redirect the long jump
587 // instructions for mode switching.
588 //
589 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
590 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
591 if (AddressMap.LongJumpOffset != 0) {
592 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
593 }
594
595 //
596 // Get the start address of exchange data between BSP and AP.
597 //
598 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
599 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
600
601 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
602 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
603
604 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
605 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
606 mExchangeInfo->BufferStart = (UINT32) StartupVector;
607 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
608 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
609 }
610
611 /**
612 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
613
614 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
615 and restores MTRRs for both BSP and APs.
616
617 **/
618 VOID
619 InitializeCpuBeforeRebase (
620 VOID
621 )
622 {
623 LoadMtrrData (mAcpiCpuData.MtrrTable);
624
625 SetRegister (TRUE);
626
627 ProgramVirtualWireMode ();
628
629 PrepareApStartupVector (mAcpiCpuData.StartupVector);
630
631 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
632 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
633 } else {
634 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
635 }
636 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
637 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
638
639 //
640 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
641 //
642 mInitApsAfterSmmBaseReloc = FALSE;
643
644 //
645 // Send INIT IPI - SIPI to all APs
646 //
647 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
648
649 while (mNumberToFinish > 0) {
650 CpuPause ();
651 }
652 }
653
654 /**
655 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
656
657 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
658 data saved by normal boot path for both BSP and APs.
659
660 **/
661 VOID
662 InitializeCpuAfterRebase (
663 VOID
664 )
665 {
666 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
667 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
668 } else {
669 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
670 }
671 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
672
673 //
674 // Signal that SMM base relocation is complete and to continue initialization for all APs.
675 //
676 mInitApsAfterSmmBaseReloc = TRUE;
677
678 //
679 // Must begin set register after all APs have continue their initialization.
680 // This is a requirement to support semaphore mechanism in register table.
681 // Because if semaphore's dependence type is package type, semaphore will wait
682 // for all Aps in one package finishing their tasks before set next register
683 // for all APs. If the Aps not begin its task during BSP doing its task, the
684 // BSP thread will hang because it is waiting for other Aps in the same
685 // package finishing their task.
686 //
687 SetRegister (FALSE);
688
689 while (mNumberToFinish > 0) {
690 CpuPause ();
691 }
692 }
693
694 /**
695 Restore SMM Configuration in S3 boot path.
696
697 **/
698 VOID
699 RestoreSmmConfigurationInS3 (
700 VOID
701 )
702 {
703 if (!mAcpiS3Enable) {
704 return;
705 }
706
707 //
708 // Restore SMM Configuration in S3 boot path.
709 //
710 if (mRestoreSmmConfigurationInS3) {
711 //
712 // Need make sure gSmst is correct because below function may use them.
713 //
714 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
715 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
716 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
717 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
718 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
719
720 //
721 // Configure SMM Code Access Check feature if available.
722 //
723 ConfigSmmCodeAccessCheck ();
724
725 SmmCpuFeaturesCompleteSmmReadyToLock ();
726
727 mRestoreSmmConfigurationInS3 = FALSE;
728 }
729 }
730
731 /**
732 Perform SMM initialization for all processors in the S3 boot path.
733
734 For a native platform, MP initialization in the S3 boot path is also performed in this function.
735 **/
736 VOID
737 EFIAPI
738 SmmRestoreCpu (
739 VOID
740 )
741 {
742 SMM_S3_RESUME_STATE *SmmS3ResumeState;
743 IA32_DESCRIPTOR Ia32Idtr;
744 IA32_DESCRIPTOR X64Idtr;
745 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
746 EFI_STATUS Status;
747
748 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
749
750 mSmmS3Flag = TRUE;
751
752 //
753 // See if there is enough context to resume PEI Phase
754 //
755 if (mSmmS3ResumeState == NULL) {
756 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
757 CpuDeadLoop ();
758 }
759
760 SmmS3ResumeState = mSmmS3ResumeState;
761 ASSERT (SmmS3ResumeState != NULL);
762
763 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
764 //
765 // Save the IA32 IDT Descriptor
766 //
767 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
768
769 //
770 // Setup X64 IDT table
771 //
772 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
773 X64Idtr.Base = (UINTN) IdtEntryTable;
774 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
775 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
776
777 //
778 // Setup the default exception handler
779 //
780 Status = InitializeCpuExceptionHandlers (NULL);
781 ASSERT_EFI_ERROR (Status);
782
783 //
784 // Initialize Debug Agent to support source level debug
785 //
786 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
787 }
788
789 //
790 // Skip initialization if mAcpiCpuData is not valid
791 //
792 if (mAcpiCpuData.NumberOfCpus > 0) {
793 //
794 // First time microcode load and restore MTRRs
795 //
796 InitializeCpuBeforeRebase ();
797 }
798
799 //
800 // Restore SMBASE for BSP and all APs
801 //
802 SmmRelocateBases ();
803
804 //
805 // Skip initialization if mAcpiCpuData is not valid
806 //
807 if (mAcpiCpuData.NumberOfCpus > 0) {
808 //
809 // Restore MSRs for BSP and all APs
810 //
811 InitializeCpuAfterRebase ();
812 }
813
814 //
815 // Set a flag to restore SMM configuration in S3 path.
816 //
817 mRestoreSmmConfigurationInS3 = TRUE;
818
819 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
820 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
821 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
822 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
823 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
824
825 //
826 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
827 //
828 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
829 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
830
831 SwitchStack (
832 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
833 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
834 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
835 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
836 );
837 }
838
839 //
840 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
841 //
842 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
843 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
844 //
845 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
846 //
847 SaveAndSetDebugTimerInterrupt (FALSE);
848 //
849 // Restore IA32 IDT table
850 //
851 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
852 AsmDisablePaging64 (
853 SmmS3ResumeState->ReturnCs,
854 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
855 (UINT32)SmmS3ResumeState->ReturnContext1,
856 (UINT32)SmmS3ResumeState->ReturnContext2,
857 (UINT32)SmmS3ResumeState->ReturnStackPointer
858 );
859 }
860
861 //
862 // Can not resume PEI Phase
863 //
864 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
865 CpuDeadLoop ();
866 }
867
868 /**
869 Initialize SMM S3 resume state structure used during S3 Resume.
870
871 @param[in] Cr3 The base address of the page tables to use in SMM.
872
873 **/
874 VOID
875 InitSmmS3ResumeState (
876 IN UINT32 Cr3
877 )
878 {
879 VOID *GuidHob;
880 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
881 SMM_S3_RESUME_STATE *SmmS3ResumeState;
882 EFI_PHYSICAL_ADDRESS Address;
883 EFI_STATUS Status;
884
885 if (!mAcpiS3Enable) {
886 return;
887 }
888
889 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
890 if (GuidHob == NULL) {
891 DEBUG ((
892 DEBUG_ERROR,
893 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
894 __FUNCTION__,
895 &gEfiAcpiVariableGuid
896 ));
897 CpuDeadLoop ();
898 } else {
899 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
900
901 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
902 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
903
904 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
905 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
906
907 mSmmS3ResumeState = SmmS3ResumeState;
908 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
909
910 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
911
912 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
913 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
914 if (SmmS3ResumeState->SmmS3StackBase == 0) {
915 SmmS3ResumeState->SmmS3StackSize = 0;
916 }
917
918 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
919 SmmS3ResumeState->SmmS3Cr3 = Cr3;
920 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
921
922 if (sizeof (UINTN) == sizeof (UINT64)) {
923 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
924 }
925 if (sizeof (UINTN) == sizeof (UINT32)) {
926 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
927 }
928
929 //
930 // Patch SmmS3ResumeState->SmmS3Cr3
931 //
932 InitSmmS3Cr3 ();
933 }
934
935 //
936 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
937 // protected mode on S3 path
938 //
939 Address = BASE_4GB - 1;
940 Status = gBS->AllocatePages (
941 AllocateMaxAddress,
942 EfiACPIMemoryNVS,
943 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
944 &Address
945 );
946 ASSERT_EFI_ERROR (Status);
947 mApHltLoopCode = (UINT8 *) (UINTN) Address;
948 }
949
950 /**
951 Copy register table from ACPI NVS memory into SMRAM.
952
953 @param[in] DestinationRegisterTableList Points to destination register table.
954 @param[in] SourceRegisterTableList Points to source register table.
955 @param[in] NumberOfCpus Number of CPUs.
956
957 **/
958 VOID
959 CopyRegisterTable (
960 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
961 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
962 IN UINT32 NumberOfCpus
963 )
964 {
965 UINTN Index;
966 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
967
968 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
969 for (Index = 0; Index < NumberOfCpus; Index++) {
970 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
971 RegisterTableEntry = AllocateCopyPool (
972 DestinationRegisterTableList[Index].AllocatedSize,
973 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
974 );
975 ASSERT (RegisterTableEntry != NULL);
976 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
977 }
978 }
979 }
980
981 /**
982 Get ACPI CPU data.
983
984 **/
985 VOID
986 GetAcpiCpuData (
987 VOID
988 )
989 {
990 ACPI_CPU_DATA *AcpiCpuData;
991 IA32_DESCRIPTOR *Gdtr;
992 IA32_DESCRIPTOR *Idtr;
993 VOID *GdtForAp;
994 VOID *IdtForAp;
995 VOID *MachineCheckHandlerForAp;
996 CPU_STATUS_INFORMATION *CpuStatus;
997
998 if (!mAcpiS3Enable) {
999 return;
1000 }
1001
1002 //
1003 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1004 //
1005 mAcpiCpuData.NumberOfCpus = 0;
1006
1007 //
1008 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1009 //
1010 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
1011 if (AcpiCpuData == 0) {
1012 return;
1013 }
1014
1015 //
1016 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1017 //
1018 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1019
1020 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1021 ASSERT (mAcpiCpuData.MtrrTable != 0);
1022
1023 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1024
1025 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1026 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1027
1028 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1029
1030 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1031 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1032
1033 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1034
1035 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1036 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
1037
1038 CopyRegisterTable (
1039 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
1040 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
1041 mAcpiCpuData.NumberOfCpus
1042 );
1043
1044 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1045 ASSERT (mAcpiCpuData.RegisterTable != 0);
1046
1047 CopyRegisterTable (
1048 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
1049 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
1050 mAcpiCpuData.NumberOfCpus
1051 );
1052
1053 //
1054 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1055 //
1056 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1057 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1058
1059 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1060 ASSERT (GdtForAp != NULL);
1061 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1062 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1063
1064 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1065 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1066 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1067
1068 Gdtr->Base = (UINTN)GdtForAp;
1069 Idtr->Base = (UINTN)IdtForAp;
1070 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1071
1072 CpuStatus = &mAcpiCpuData.CpuStatus;
1073 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1074 if (AcpiCpuData->CpuStatus.ThreadCountPerPackage != 0) {
1075 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1076 sizeof (UINT32) * CpuStatus->PackageCount,
1077 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerPackage
1078 );
1079 ASSERT (CpuStatus->ThreadCountPerPackage != 0);
1080 }
1081 if (AcpiCpuData->CpuStatus.ThreadCountPerCore != 0) {
1082 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1083 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
1084 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerCore
1085 );
1086 ASSERT (CpuStatus->ThreadCountPerCore != 0);
1087 }
1088 if (AcpiCpuData->ApLocation != 0) {
1089 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1090 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1091 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
1092 );
1093 ASSERT (mAcpiCpuData.ApLocation != 0);
1094 }
1095 if (CpuStatus->PackageCount != 0) {
1096 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1097 sizeof (UINT32) * CpuStatus->PackageCount *
1098 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1099 );
1100 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1101 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1102 sizeof (UINT32) * CpuStatus->PackageCount *
1103 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1104 );
1105 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1106 }
1107 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1108 }
1109
1110 /**
1111 Get ACPI S3 enable flag.
1112
1113 **/
1114 VOID
1115 GetAcpiS3EnableFlag (
1116 VOID
1117 )
1118 {
1119 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1120 }