]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2023, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
66 ACPI_CPU_DATA mAcpiCpuData;
67 volatile UINT32 mNumberToFinish;
68 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71 //
72 // S3 boot flag
73 //
74 BOOLEAN mSmmS3Flag = FALSE;
75
76 //
77 // Pointer to structure used during S3 Resume
78 //
79 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81 BOOLEAN mAcpiS3Enable = TRUE;
82
83 UINT8 *mApHltLoopCode = NULL;
84 UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90 };
91
92 /**
93 Sync up the MTRR values for all processors.
94
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
96 **/
97 VOID
98 EFIAPI
99 LoadMtrrData (
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102
103 /*++
104
105 Routine Description:
106
107 Sync up the MTRR values for all processors.
108
109 Arguments:
110
111 Returns:
112 None
113
114 --*/
115 {
116 MTRR_SETTINGS *MtrrSettings;
117
118 MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;
119 MtrrSetAllMtrrs (MtrrSettings);
120 }
121
122 /**
123 Increment semaphore by 1.
124
125 @param Sem IN: 32-bit unsigned integer
126
127 **/
128 VOID
129 S3ReleaseSemaphore (
130 IN OUT volatile UINT32 *Sem
131 )
132 {
133 InterlockedIncrement (Sem);
134 }
135
136 /**
137 Decrement the semaphore by 1 if it is not zero.
138
139 Performs an atomic decrement operation for semaphore.
140 The compare exchange operation must be performed using
141 MP safe mechanisms.
142
143 @param Sem IN: 32-bit unsigned integer
144
145 **/
146 VOID
147 S3WaitForSemaphore (
148 IN OUT volatile UINT32 *Sem
149 )
150 {
151 UINT32 Value;
152
153 do {
154 Value = *Sem;
155 } while (Value == 0 ||
156 InterlockedCompareExchange32 (
157 Sem,
158 Value,
159 Value - 1
160 ) != Value);
161 }
162
163 /**
164 Read / write CR value.
165
166 @param[in] CrIndex The CR index which need to read/write.
167 @param[in] Read Read or write. TRUE is read.
168 @param[in,out] CrValue CR value.
169
170 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
171 **/
172 UINTN
173 ReadWriteCr (
174 IN UINT32 CrIndex,
175 IN BOOLEAN Read,
176 IN OUT UINTN *CrValue
177 )
178 {
179 switch (CrIndex) {
180 case 0:
181 if (Read) {
182 *CrValue = AsmReadCr0 ();
183 } else {
184 AsmWriteCr0 (*CrValue);
185 }
186
187 break;
188 case 2:
189 if (Read) {
190 *CrValue = AsmReadCr2 ();
191 } else {
192 AsmWriteCr2 (*CrValue);
193 }
194
195 break;
196 case 3:
197 if (Read) {
198 *CrValue = AsmReadCr3 ();
199 } else {
200 AsmWriteCr3 (*CrValue);
201 }
202
203 break;
204 case 4:
205 if (Read) {
206 *CrValue = AsmReadCr4 ();
207 } else {
208 AsmWriteCr4 (*CrValue);
209 }
210
211 break;
212 default:
213 return EFI_UNSUPPORTED;
214 }
215
216 return EFI_SUCCESS;
217 }
218
219 /**
220 Initialize the CPU registers from a register table.
221
222 @param[in] RegisterTable The register table for this AP.
223 @param[in] ApLocation AP location info for this ap.
224 @param[in] CpuStatus CPU status info for this CPU.
225 @param[in] CpuFlags Flags data structure used when program the register.
226
227 @note This service could be called by BSP/APs.
228 **/
229 VOID
230 ProgramProcessorRegister (
231 IN CPU_REGISTER_TABLE *RegisterTable,
232 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
233 IN CPU_STATUS_INFORMATION *CpuStatus,
234 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
235 )
236 {
237 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
238 UINTN Index;
239 UINTN Value;
240 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
241 volatile UINT32 *SemaphorePtr;
242 UINT32 FirstThread;
243 UINT32 CurrentThread;
244 UINT32 CurrentCore;
245 UINTN ProcessorIndex;
246 UINT32 *ThreadCountPerPackage;
247 UINT8 *ThreadCountPerCore;
248 EFI_STATUS Status;
249 UINT64 CurrentValue;
250
251 //
252 // Traverse Register Table of this logical processor
253 //
254 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;
255
256 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
257 RegisterTableEntry = &RegisterTableEntryHead[Index];
258
259 //
260 // Check the type of specified register
261 //
262 switch (RegisterTableEntry->RegisterType) {
263 //
264 // The specified register is Control Register
265 //
266 case ControlRegister:
267 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
268 if (EFI_ERROR (Status)) {
269 break;
270 }
271
272 if (RegisterTableEntry->TestThenWrite) {
273 CurrentValue = BitFieldRead64 (
274 Value,
275 RegisterTableEntry->ValidBitStart,
276 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
277 );
278 if (CurrentValue == RegisterTableEntry->Value) {
279 break;
280 }
281 }
282
283 Value = (UINTN)BitFieldWrite64 (
284 Value,
285 RegisterTableEntry->ValidBitStart,
286 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
287 RegisterTableEntry->Value
288 );
289 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
290 break;
291 //
292 // The specified register is Model Specific Register
293 //
294 case Msr:
295 if (RegisterTableEntry->TestThenWrite) {
296 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
297 if (RegisterTableEntry->ValidBitLength >= 64) {
298 if (Value == RegisterTableEntry->Value) {
299 break;
300 }
301 } else {
302 CurrentValue = BitFieldRead64 (
303 Value,
304 RegisterTableEntry->ValidBitStart,
305 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
306 );
307 if (CurrentValue == RegisterTableEntry->Value) {
308 break;
309 }
310 }
311 }
312
313 //
314 // If this function is called to restore register setting after INIT signal,
315 // there is no need to restore MSRs in register table.
316 //
317 if (RegisterTableEntry->ValidBitLength >= 64) {
318 //
319 // If length is not less than 64 bits, then directly write without reading
320 //
321 AsmWriteMsr64 (
322 RegisterTableEntry->Index,
323 RegisterTableEntry->Value
324 );
325 } else {
326 //
327 // Set the bit section according to bit start and length
328 //
329 AsmMsrBitFieldWrite64 (
330 RegisterTableEntry->Index,
331 RegisterTableEntry->ValidBitStart,
332 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
333 RegisterTableEntry->Value
334 );
335 }
336
337 break;
338 //
339 // MemoryMapped operations
340 //
341 case MemoryMapped:
342 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
343 MmioBitFieldWrite32 (
344 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
345 RegisterTableEntry->ValidBitStart,
346 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
347 (UINT32)RegisterTableEntry->Value
348 );
349 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
350 break;
351 //
352 // Enable or disable cache
353 //
354 case CacheControl:
355 //
356 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
357 //
358 if (RegisterTableEntry->Value == 0) {
359 AsmDisableCache ();
360 } else {
361 AsmEnableCache ();
362 }
363
364 break;
365
366 case Semaphore:
367 // Semaphore works logic like below:
368 //
369 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
370 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
371 //
372 // All threads (T0...Tn) waits in P() line and continues running
373 // together.
374 //
375 //
376 // T0 T1 ... Tn
377 //
378 // V(0...n) V(0...n) ... V(0...n)
379 // n * P(0) n * P(1) ... n * P(n)
380 //
381 ASSERT (
382 (ApLocation != NULL) &&
383 (CpuStatus->ThreadCountPerPackage != 0) &&
384 (CpuStatus->ThreadCountPerCore != 0) &&
385 (CpuFlags->CoreSemaphoreCount != NULL) &&
386 (CpuFlags->PackageSemaphoreCount != NULL)
387 );
388 switch (RegisterTableEntry->Value) {
389 case CoreDepType:
390 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
391 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
392
393 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
394 //
395 // Get Offset info for the first thread in the core which current thread belongs to.
396 //
397 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
398 CurrentThread = FirstThread + ApLocation->Thread;
399
400 //
401 // Different cores may have different valid threads in them. If driver maintail clearly
402 // thread index in different cores, the logic will be much complicated.
403 // Here driver just simply records the max thread number in all cores and use it as expect
404 // thread number for all cores.
405 // In below two steps logic, first current thread will Release semaphore for each thread
406 // in current core. Maybe some threads are not valid in this core, but driver don't
407 // care. Second, driver will let current thread wait semaphore for all valid threads in
408 // current core. Because only the valid threads will do release semaphore for this
409 // thread, driver here only need to wait the valid thread count.
410 //
411
412 //
413 // First Notify ALL THREADs in current Core that this thread is ready.
414 //
415 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {
416 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
417 }
418
419 //
420 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
421 //
422 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {
423 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
424 }
425
426 break;
427
428 case PackageDepType:
429 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
430 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
431 //
432 // Get Offset info for the first thread in the package which current thread belongs to.
433 //
434 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
435 //
436 // Get the possible threads count for current package.
437 //
438 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
439
440 //
441 // Different packages may have different valid threads in them. If driver maintail clearly
442 // thread index in different packages, the logic will be much complicated.
443 // Here driver just simply records the max thread number in all packages and use it as expect
444 // thread number for all packages.
445 // In below two steps logic, first current thread will Release semaphore for each thread
446 // in current package. Maybe some threads are not valid in this package, but driver don't
447 // care. Second, driver will let current thread wait semaphore for all valid threads in
448 // current package. Because only the valid threads will do release semaphore for this
449 // thread, driver here only need to wait the valid thread count.
450 //
451
452 //
453 // First Notify ALL THREADS in current package that this thread is ready.
454 //
455 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {
456 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
457 }
458
459 //
460 // Second, check whether VALID THREADS (not all threads) in current package are ready.
461 //
462 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {
463 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
464 }
465
466 break;
467
468 default:
469 break;
470 }
471
472 break;
473
474 default:
475 break;
476 }
477 }
478 }
479
480 /**
481
482 Set Processor register for one AP.
483
484 @param PreSmmRegisterTable Use pre Smm register table or register table.
485
486 **/
487 VOID
488 SetRegister (
489 IN BOOLEAN PreSmmRegisterTable
490 )
491 {
492 CPU_FEATURE_INIT_DATA *FeatureInitData;
493 CPU_REGISTER_TABLE *RegisterTable;
494 CPU_REGISTER_TABLE *RegisterTables;
495 UINT32 InitApicId;
496 UINTN ProcIndex;
497 UINTN Index;
498
499 FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;
500
501 if (PreSmmRegisterTable) {
502 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;
503 } else {
504 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;
505 }
506
507 if (RegisterTables == NULL) {
508 return;
509 }
510
511 InitApicId = GetInitialApicId ();
512 RegisterTable = NULL;
513 ProcIndex = (UINTN)-1;
514 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
515 if (RegisterTables[Index].InitialApicId == InitApicId) {
516 RegisterTable = &RegisterTables[Index];
517 ProcIndex = Index;
518 break;
519 }
520 }
521
522 ASSERT (RegisterTable != NULL);
523
524 if (FeatureInitData->ApLocation != 0) {
525 ProgramProcessorRegister (
526 RegisterTable,
527 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,
528 &FeatureInitData->CpuStatus,
529 &mCpuFlags
530 );
531 } else {
532 ProgramProcessorRegister (
533 RegisterTable,
534 NULL,
535 &FeatureInitData->CpuStatus,
536 &mCpuFlags
537 );
538 }
539 }
540
541 /**
542 AP initialization before then after SMBASE relocation in the S3 boot path.
543 **/
544 VOID
545 InitializeAp (
546 VOID
547 )
548 {
549 UINTN TopOfStack;
550 UINT8 Stack[128];
551
552 LoadMtrrData (mAcpiCpuData.MtrrTable);
553
554 SetRegister (TRUE);
555
556 //
557 // Count down the number with lock mechanism.
558 //
559 InterlockedDecrement (&mNumberToFinish);
560
561 //
562 // Wait for BSP to signal SMM Base relocation done.
563 //
564 while (!mInitApsAfterSmmBaseReloc) {
565 CpuPause ();
566 }
567
568 ProgramVirtualWireMode ();
569 DisableLvtInterrupts ();
570
571 SetRegister (FALSE);
572
573 //
574 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
575 //
576 TopOfStack = (UINTN)Stack + sizeof (Stack);
577 TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);
578 CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
579 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
580 }
581
582 /**
583 Prepares startup vector for APs.
584
585 This function prepares startup vector for APs.
586
587 @param WorkingBuffer The address of the work buffer.
588 **/
589 VOID
590 PrepareApStartupVector (
591 EFI_PHYSICAL_ADDRESS WorkingBuffer
592 )
593 {
594 EFI_PHYSICAL_ADDRESS StartupVector;
595 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
596
597 //
598 // Get the address map of startup code for AP,
599 // including code size, and offset of long jump instructions to redirect.
600 //
601 ZeroMem (&AddressMap, sizeof (AddressMap));
602 AsmGetAddressMap (&AddressMap);
603
604 StartupVector = WorkingBuffer;
605
606 //
607 // Copy AP startup code to startup vector, and then redirect the long jump
608 // instructions for mode switching.
609 //
610 CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
611 *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);
612 if (AddressMap.LongJumpOffset != 0) {
613 *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);
614 }
615
616 //
617 // Get the start address of exchange data between BSP and AP.
618 //
619 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);
620 ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
621
622 CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
623 CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
624
625 mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;
626 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
627 mExchangeInfo->BufferStart = (UINT32)StartupVector;
628 mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());
629 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
630 }
631
632 /**
633 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
634
635 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
636 and restores MTRRs for both BSP and APs.
637
638 **/
639 VOID
640 InitializeCpuBeforeRebase (
641 VOID
642 )
643 {
644 LoadMtrrData (mAcpiCpuData.MtrrTable);
645
646 SetRegister (TRUE);
647
648 ProgramVirtualWireMode ();
649
650 PrepareApStartupVector (mAcpiCpuData.StartupVector);
651
652 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
653 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
654 } else {
655 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
656 }
657
658 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
659 mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeAp;
660
661 //
662 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
663 //
664 mInitApsAfterSmmBaseReloc = FALSE;
665
666 //
667 // Send INIT IPI - SIPI to all APs
668 //
669 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
670
671 while (mNumberToFinish > 0) {
672 CpuPause ();
673 }
674 }
675
676 /**
677 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
678
679 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
680 data saved by normal boot path for both BSP and APs.
681
682 **/
683 VOID
684 InitializeCpuAfterRebase (
685 VOID
686 )
687 {
688 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
689 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
690 } else {
691 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
692 }
693
694 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
695
696 //
697 // Signal that SMM base relocation is complete and to continue initialization for all APs.
698 //
699 mInitApsAfterSmmBaseReloc = TRUE;
700
701 //
702 // Must begin set register after all APs have continue their initialization.
703 // This is a requirement to support semaphore mechanism in register table.
704 // Because if semaphore's dependence type is package type, semaphore will wait
705 // for all Aps in one package finishing their tasks before set next register
706 // for all APs. If the Aps not begin its task during BSP doing its task, the
707 // BSP thread will hang because it is waiting for other Aps in the same
708 // package finishing their task.
709 //
710 SetRegister (FALSE);
711
712 while (mNumberToFinish > 0) {
713 CpuPause ();
714 }
715 }
716
717 /**
718 Restore SMM Configuration in S3 boot path.
719
720 **/
721 VOID
722 RestoreSmmConfigurationInS3 (
723 VOID
724 )
725 {
726 if (!mAcpiS3Enable) {
727 return;
728 }
729
730 //
731 // Restore SMM Configuration in S3 boot path.
732 //
733 if (mRestoreSmmConfigurationInS3) {
734 //
735 // Need make sure gSmst is correct because below function may use them.
736 //
737 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
738 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
739 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
740 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
741 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
742
743 //
744 // Configure SMM Code Access Check feature if available.
745 //
746 ConfigSmmCodeAccessCheck ();
747
748 SmmCpuFeaturesCompleteSmmReadyToLock ();
749
750 mRestoreSmmConfigurationInS3 = FALSE;
751 }
752 }
753
754 /**
755 Perform SMM initialization for all processors in the S3 boot path.
756
757 For a native platform, MP initialization in the S3 boot path is also performed in this function.
758 **/
759 VOID
760 EFIAPI
761 SmmRestoreCpu (
762 VOID
763 )
764 {
765 SMM_S3_RESUME_STATE *SmmS3ResumeState;
766 IA32_DESCRIPTOR Ia32Idtr;
767 IA32_DESCRIPTOR X64Idtr;
768 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
769 EFI_STATUS Status;
770
771 DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));
772
773 mSmmS3Flag = TRUE;
774
775 //
776 // See if there is enough context to resume PEI Phase
777 //
778 if (mSmmS3ResumeState == NULL) {
779 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
780 CpuDeadLoop ();
781 }
782
783 SmmS3ResumeState = mSmmS3ResumeState;
784 ASSERT (SmmS3ResumeState != NULL);
785
786 //
787 // Setup 64bit IDT in 64bit SMM env when called from 32bit PEI.
788 // Note: 64bit PEI and 32bit DXE is not a supported combination.
789 //
790 if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) && (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == TRUE)) {
791 //
792 // Save the IA32 IDT Descriptor
793 //
794 AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
795
796 //
797 // Setup X64 IDT table
798 //
799 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
800 X64Idtr.Base = (UINTN)IdtEntryTable;
801 X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
802 AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);
803
804 //
805 // Setup the default exception handler
806 //
807 Status = InitializeCpuExceptionHandlers (NULL);
808 ASSERT_EFI_ERROR (Status);
809
810 //
811 // Initialize Debug Agent to support source level debug
812 //
813 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
814 }
815
816 //
817 // Skip initialization if mAcpiCpuData is not valid
818 //
819 if (mAcpiCpuData.NumberOfCpus > 0) {
820 //
821 // First time microcode load and restore MTRRs
822 //
823 InitializeCpuBeforeRebase ();
824 }
825
826 //
827 // Make sure the gSmmBaseHobGuid existence status is the same between normal and S3 boot.
828 //
829 ASSERT (mSmmRelocated == (BOOLEAN)(GetFirstGuidHob (&gSmmBaseHobGuid) != NULL));
830 if (mSmmRelocated != (BOOLEAN)(GetFirstGuidHob (&gSmmBaseHobGuid) != NULL)) {
831 DEBUG ((
832 DEBUG_ERROR,
833 "gSmmBaseHobGuid %a produced in normal boot but %a in S3 boot!",
834 mSmmRelocated ? "is" : "is not",
835 mSmmRelocated ? "is not" : "is"
836 ));
837 CpuDeadLoop ();
838 }
839
840 //
841 // Check whether Smm Relocation is done or not.
842 // If not, will do the SmmBases Relocation here!!!
843 //
844 if (!mSmmRelocated) {
845 //
846 // Restore SMBASE for BSP and all APs
847 //
848 SmmRelocateBases ();
849 } else {
850 //
851 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
852 //
853 ExecuteFirstSmiInit ();
854 }
855
856 //
857 // Skip initialization if mAcpiCpuData is not valid
858 //
859 if (mAcpiCpuData.NumberOfCpus > 0) {
860 //
861 // Restore MSRs for BSP and all APs
862 //
863 InitializeCpuAfterRebase ();
864 }
865
866 //
867 // Set a flag to restore SMM configuration in S3 path.
868 //
869 mRestoreSmmConfigurationInS3 = TRUE;
870
871 DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
872 DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
873 DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
874 DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
875 DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
876
877 //
878 // If SMM is in 32-bit mode or PcdDxeIplSwitchToLongMode is FALSE, then use SwitchStack() to resume PEI Phase.
879 // Note: 64bit PEI and 32bit DXE is not a supported combination.
880 //
881 if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) || (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == FALSE)) {
882 DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
883
884 SwitchStack (
885 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
886 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
887 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
888 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
889 );
890 }
891
892 //
893 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
894 //
895 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
896 DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
897 //
898 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
899 //
900 SaveAndSetDebugTimerInterrupt (FALSE);
901 //
902 // Restore IA32 IDT table
903 //
904 AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
905 AsmDisablePaging64 (
906 SmmS3ResumeState->ReturnCs,
907 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
908 (UINT32)SmmS3ResumeState->ReturnContext1,
909 (UINT32)SmmS3ResumeState->ReturnContext2,
910 (UINT32)SmmS3ResumeState->ReturnStackPointer
911 );
912 }
913
914 //
915 // Can not resume PEI Phase
916 //
917 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
918 CpuDeadLoop ();
919 }
920
921 /**
922 Initialize SMM S3 resume state structure used during S3 Resume.
923
924 @param[in] Cr3 The base address of the page tables to use in SMM.
925
926 **/
927 VOID
928 InitSmmS3ResumeState (
929 IN UINT32 Cr3
930 )
931 {
932 VOID *GuidHob;
933 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
934 SMM_S3_RESUME_STATE *SmmS3ResumeState;
935 EFI_PHYSICAL_ADDRESS Address;
936 EFI_STATUS Status;
937
938 if (!mAcpiS3Enable) {
939 return;
940 }
941
942 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
943 if (GuidHob == NULL) {
944 DEBUG ((
945 DEBUG_ERROR,
946 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
947 __FUNCTION__,
948 &gEfiAcpiVariableGuid
949 ));
950 CpuDeadLoop ();
951 } else {
952 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);
953
954 DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
955 DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
956
957 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
958 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
959
960 mSmmS3ResumeState = SmmS3ResumeState;
961 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
962
963 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
964
965 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
966 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
967 if (SmmS3ResumeState->SmmS3StackBase == 0) {
968 SmmS3ResumeState->SmmS3StackSize = 0;
969 }
970
971 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
972 SmmS3ResumeState->SmmS3Cr3 = Cr3;
973 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
974
975 if (sizeof (UINTN) == sizeof (UINT64)) {
976 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
977 }
978
979 if (sizeof (UINTN) == sizeof (UINT32)) {
980 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
981 }
982
983 //
984 // Patch SmmS3ResumeState->SmmS3Cr3
985 //
986 InitSmmS3Cr3 ();
987 }
988
989 //
990 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
991 // protected mode on S3 path
992 //
993 Address = BASE_4GB - 1;
994 Status = gBS->AllocatePages (
995 AllocateMaxAddress,
996 EfiACPIMemoryNVS,
997 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
998 &Address
999 );
1000 ASSERT_EFI_ERROR (Status);
1001 mApHltLoopCode = (UINT8 *)(UINTN)Address;
1002 }
1003
1004 /**
1005 Copy register table from non-SMRAM into SMRAM.
1006
1007 @param[in] DestinationRegisterTableList Points to destination register table.
1008 @param[in] SourceRegisterTableList Points to source register table.
1009 @param[in] NumberOfCpus Number of CPUs.
1010
1011 **/
1012 VOID
1013 CopyRegisterTable (
1014 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
1015 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
1016 IN UINT32 NumberOfCpus
1017 )
1018 {
1019 UINTN Index;
1020 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
1021
1022 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1023 for (Index = 0; Index < NumberOfCpus; Index++) {
1024 if (DestinationRegisterTableList[Index].TableLength != 0) {
1025 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
1026 RegisterTableEntry = AllocateCopyPool (
1027 DestinationRegisterTableList[Index].AllocatedSize,
1028 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
1029 );
1030 ASSERT (RegisterTableEntry != NULL);
1031 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
1032 }
1033 }
1034 }
1035
1036 /**
1037 Check whether the register table is empty or not.
1038
1039 @param[in] RegisterTable Point to the register table.
1040 @param[in] NumberOfCpus Number of CPUs.
1041
1042 @retval TRUE The register table is empty.
1043 @retval FALSE The register table is not empty.
1044 **/
1045 BOOLEAN
1046 IsRegisterTableEmpty (
1047 IN CPU_REGISTER_TABLE *RegisterTable,
1048 IN UINT32 NumberOfCpus
1049 )
1050 {
1051 UINTN Index;
1052
1053 if (RegisterTable != NULL) {
1054 for (Index = 0; Index < NumberOfCpus; Index++) {
1055 if (RegisterTable[Index].TableLength != 0) {
1056 return FALSE;
1057 }
1058 }
1059 }
1060
1061 return TRUE;
1062 }
1063
1064 /**
1065 Copy the data used to initialize processor register into SMRAM.
1066
1067 @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.
1068 @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.
1069
1070 **/
1071 VOID
1072 CopyCpuFeatureInitDatatoSmram (
1073 IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,
1074 IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc
1075 )
1076 {
1077 CPU_STATUS_INFORMATION *CpuStatus;
1078
1079 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
1080 CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1081 ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);
1082
1083 CopyRegisterTable (
1084 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,
1085 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,
1086 mAcpiCpuData.NumberOfCpus
1087 );
1088 }
1089
1090 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
1091 CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1092 ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);
1093
1094 CopyRegisterTable (
1095 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,
1096 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,
1097 mAcpiCpuData.NumberOfCpus
1098 );
1099 }
1100
1101 CpuStatus = &CpuFeatureInitDataDst->CpuStatus;
1102 CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1103
1104 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {
1105 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1106 sizeof (UINT32) * CpuStatus->PackageCount,
1107 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage
1108 );
1109 ASSERT (CpuStatus->ThreadCountPerPackage != 0);
1110 }
1111
1112 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {
1113 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1114 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
1115 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore
1116 );
1117 ASSERT (CpuStatus->ThreadCountPerCore != 0);
1118 }
1119
1120 if (CpuFeatureInitDataSrc->ApLocation != 0) {
1121 CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1122 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1123 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation
1124 );
1125 ASSERT (CpuFeatureInitDataDst->ApLocation != 0);
1126 }
1127 }
1128
1129 /**
1130 Get ACPI CPU data.
1131
1132 **/
1133 VOID
1134 GetAcpiCpuData (
1135 VOID
1136 )
1137 {
1138 ACPI_CPU_DATA *AcpiCpuData;
1139 IA32_DESCRIPTOR *Gdtr;
1140 IA32_DESCRIPTOR *Idtr;
1141 VOID *GdtForAp;
1142 VOID *IdtForAp;
1143 VOID *MachineCheckHandlerForAp;
1144 CPU_STATUS_INFORMATION *CpuStatus;
1145
1146 if (!mAcpiS3Enable) {
1147 return;
1148 }
1149
1150 //
1151 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1152 //
1153 mAcpiCpuData.NumberOfCpus = 0;
1154
1155 //
1156 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1157 //
1158 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
1159 if (AcpiCpuData == 0) {
1160 return;
1161 }
1162
1163 //
1164 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1165 //
1166 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1167
1168 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1169 ASSERT (mAcpiCpuData.MtrrTable != 0);
1170
1171 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1172
1173 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1174 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1175
1176 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1177
1178 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1179 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1180
1181 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1182
1183 //
1184 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1185 //
1186 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1187 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1188
1189 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1190 ASSERT (GdtForAp != NULL);
1191 IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));
1192 MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));
1193
1194 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1195 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1196 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1197
1198 Gdtr->Base = (UINTN)GdtForAp;
1199 Idtr->Base = (UINTN)IdtForAp;
1200 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1201
1202 ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));
1203
1204 if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {
1205 //
1206 // If the CPU features will not be initialized by CpuFeaturesPei module during
1207 // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
1208 // which will be consumed in SmmRestoreCpu during next S3 resume.
1209 //
1210 CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);
1211
1212 CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;
1213
1214 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1215 sizeof (UINT32) * CpuStatus->PackageCount *
1216 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1217 );
1218 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1219
1220 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1221 sizeof (UINT32) * CpuStatus->PackageCount *
1222 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1223 );
1224 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1225
1226 InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);
1227 }
1228 }
1229
1230 /**
1231 Get ACPI S3 enable flag.
1232
1233 **/
1234 VOID
1235 GetAcpiS3EnableFlag (
1236 VOID
1237 )
1238 {
1239 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1240 }