]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Supports test then write new value logic.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
43 } PROGRAM_CPU_REGISTER_FLAGS;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
66 ACPI_CPU_DATA mAcpiCpuData;
67 volatile UINT32 mNumberToFinish;
68 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71 //
72 // S3 boot flag
73 //
74 BOOLEAN mSmmS3Flag = FALSE;
75
76 //
77 // Pointer to structure used during S3 Resume
78 //
79 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81 BOOLEAN mAcpiS3Enable = TRUE;
82
83 UINT8 *mApHltLoopCode = NULL;
84 UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90 };
91
92 /**
93 Sync up the MTRR values for all processors.
94
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
96 **/
97 VOID
98 EFIAPI
99 LoadMtrrData (
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102 /*++
103
104 Routine Description:
105
106 Sync up the MTRR values for all processors.
107
108 Arguments:
109
110 Returns:
111 None
112
113 --*/
114 {
115 MTRR_SETTINGS *MtrrSettings;
116
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
118 MtrrSetAllMtrrs (MtrrSettings);
119 }
120
121 /**
122 Increment semaphore by 1.
123
124 @param Sem IN: 32-bit unsigned integer
125
126 **/
127 VOID
128 S3ReleaseSemaphore (
129 IN OUT volatile UINT32 *Sem
130 )
131 {
132 InterlockedIncrement (Sem);
133 }
134
135 /**
136 Decrement the semaphore by 1 if it is not zero.
137
138 Performs an atomic decrement operation for semaphore.
139 The compare exchange operation must be performed using
140 MP safe mechanisms.
141
142 @param Sem IN: 32-bit unsigned integer
143
144 **/
145 VOID
146 S3WaitForSemaphore (
147 IN OUT volatile UINT32 *Sem
148 )
149 {
150 UINT32 Value;
151
152 do {
153 Value = *Sem;
154 } while (Value == 0 ||
155 InterlockedCompareExchange32 (
156 Sem,
157 Value,
158 Value - 1
159 ) != Value);
160 }
161
162 /**
163 Read / write CR value.
164
165 @param[in] CrIndex The CR index which need to read/write.
166 @param[in] Read Read or write. TRUE is read.
167 @param[in,out] CrValue CR value.
168
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
170 **/
171 UINTN
172 ReadWriteCr (
173 IN UINT32 CrIndex,
174 IN BOOLEAN Read,
175 IN OUT UINTN *CrValue
176 )
177 {
178 switch (CrIndex) {
179 case 0:
180 if (Read) {
181 *CrValue = AsmReadCr0 ();
182 } else {
183 AsmWriteCr0 (*CrValue);
184 }
185 break;
186 case 2:
187 if (Read) {
188 *CrValue = AsmReadCr2 ();
189 } else {
190 AsmWriteCr2 (*CrValue);
191 }
192 break;
193 case 3:
194 if (Read) {
195 *CrValue = AsmReadCr3 ();
196 } else {
197 AsmWriteCr3 (*CrValue);
198 }
199 break;
200 case 4:
201 if (Read) {
202 *CrValue = AsmReadCr4 ();
203 } else {
204 AsmWriteCr4 (*CrValue);
205 }
206 break;
207 default:
208 return EFI_UNSUPPORTED;;
209 }
210
211 return EFI_SUCCESS;
212 }
213
214 /**
215 Initialize the CPU registers from a register table.
216
217 @param[in] RegisterTable The register table for this AP.
218 @param[in] ApLocation AP location info for this ap.
219 @param[in] CpuStatus CPU status info for this CPU.
220 @param[in] CpuFlags Flags data structure used when program the register.
221
222 @note This service could be called by BSP/APs.
223 **/
224 VOID
225 ProgramProcessorRegister (
226 IN CPU_REGISTER_TABLE *RegisterTable,
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
228 IN CPU_STATUS_INFORMATION *CpuStatus,
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
230 )
231 {
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
233 UINTN Index;
234 UINTN Value;
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
236 volatile UINT32 *SemaphorePtr;
237 UINT32 FirstThread;
238 UINT32 PackageThreadsCount;
239 UINT32 CurrentThread;
240 UINTN ProcessorIndex;
241 UINTN ValidThreadCount;
242 UINT32 *ValidCoreCountPerPackage;
243 EFI_STATUS Status;
244 UINT64 CurrentValue;
245
246 //
247 // Traverse Register Table of this logical processor
248 //
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
250
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
252
253 RegisterTableEntry = &RegisterTableEntryHead[Index];
254
255 //
256 // Check the type of specified register
257 //
258 switch (RegisterTableEntry->RegisterType) {
259 //
260 // The specified register is Control Register
261 //
262 case ControlRegister:
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
264 if (EFI_ERROR (Status)) {
265 break;
266 }
267 if (RegisterTableEntry->TestThenWrite) {
268 CurrentValue = BitFieldRead64 (
269 Value,
270 RegisterTableEntry->ValidBitStart,
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
272 );
273 if (CurrentValue == RegisterTableEntry->Value) {
274 break;
275 }
276 }
277 Value = (UINTN) BitFieldWrite64 (
278 Value,
279 RegisterTableEntry->ValidBitStart,
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
281 RegisterTableEntry->Value
282 );
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
284 break;
285 //
286 // The specified register is Model Specific Register
287 //
288 case Msr:
289 if (RegisterTableEntry->TestThenWrite) {
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
291 if (RegisterTableEntry->ValidBitLength >= 64) {
292 if (Value == RegisterTableEntry->Value) {
293 break;
294 }
295 } else {
296 CurrentValue = BitFieldRead64 (
297 Value,
298 RegisterTableEntry->ValidBitStart,
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
300 );
301 if (CurrentValue == RegisterTableEntry->Value) {
302 break;
303 }
304 }
305 }
306
307 //
308 // If this function is called to restore register setting after INIT signal,
309 // there is no need to restore MSRs in register table.
310 //
311 if (RegisterTableEntry->ValidBitLength >= 64) {
312 //
313 // If length is not less than 64 bits, then directly write without reading
314 //
315 AsmWriteMsr64 (
316 RegisterTableEntry->Index,
317 RegisterTableEntry->Value
318 );
319 } else {
320 //
321 // Set the bit section according to bit start and length
322 //
323 AsmMsrBitFieldWrite64 (
324 RegisterTableEntry->Index,
325 RegisterTableEntry->ValidBitStart,
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
327 RegisterTableEntry->Value
328 );
329 }
330 break;
331 //
332 // MemoryMapped operations
333 //
334 case MemoryMapped:
335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
336 MmioBitFieldWrite32 (
337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
338 RegisterTableEntry->ValidBitStart,
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
340 (UINT32)RegisterTableEntry->Value
341 );
342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
343 break;
344 //
345 // Enable or disable cache
346 //
347 case CacheControl:
348 //
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
350 //
351 if (RegisterTableEntry->Value == 0) {
352 AsmDisableCache ();
353 } else {
354 AsmEnableCache ();
355 }
356 break;
357
358 case Semaphore:
359 // Semaphore works logic like below:
360 //
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
363 //
364 // All threads (T0...Tn) waits in P() line and continues running
365 // together.
366 //
367 //
368 // T0 T1 ... Tn
369 //
370 // V(0...n) V(0...n) ... V(0...n)
371 // n * P(0) n * P(1) ... n * P(n)
372 //
373 ASSERT (
374 (ApLocation != NULL) &&
375 (CpuStatus->ValidCoreCountPerPackage != 0) &&
376 (CpuFlags->CoreSemaphoreCount != NULL) &&
377 (CpuFlags->PackageSemaphoreCount != NULL)
378 );
379 switch (RegisterTableEntry->Value) {
380 case CoreDepType:
381 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
382 //
383 // Get Offset info for the first thread in the core which current thread belongs to.
384 //
385 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
386 CurrentThread = FirstThread + ApLocation->Thread;
387 //
388 // First Notify all threads in current Core that this thread has ready.
389 //
390 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
391 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
392 }
393 //
394 // Second, check whether all valid threads in current core have ready.
395 //
396 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
397 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
398 }
399 break;
400
401 case PackageDepType:
402 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
403 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;
404 //
405 // Get Offset info for the first thread in the package which current thread belongs to.
406 //
407 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
408 //
409 // Get the possible threads count for current package.
410 //
411 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
412 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
413 //
414 // Get the valid thread count for current package.
415 //
416 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];
417
418 //
419 // Different packages may have different valid cores in them. If driver maintail clearly
420 // cores number in different packages, the logic will be much complicated.
421 // Here driver just simply records the max core number in all packages and use it as expect
422 // core number for all packages.
423 // In below two steps logic, first current thread will Release semaphore for each thread
424 // in current package. Maybe some threads are not valid in this package, but driver don't
425 // care. Second, driver will let current thread wait semaphore for all valid threads in
426 // current package. Because only the valid threads will do release semaphore for this
427 // thread, driver here only need to wait the valid thread count.
428 //
429
430 //
431 // First Notify all threads in current package that this thread has ready.
432 //
433 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
434 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
435 }
436 //
437 // Second, check whether all valid threads in current package have ready.
438 //
439 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
440 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
441 }
442 break;
443
444 default:
445 break;
446 }
447 break;
448
449 default:
450 break;
451 }
452 }
453 }
454
455 /**
456
457 Set Processor register for one AP.
458
459 @param PreSmmRegisterTable Use pre Smm register table or register table.
460
461 **/
462 VOID
463 SetRegister (
464 IN BOOLEAN PreSmmRegisterTable
465 )
466 {
467 CPU_REGISTER_TABLE *RegisterTable;
468 CPU_REGISTER_TABLE *RegisterTables;
469 UINT32 InitApicId;
470 UINTN ProcIndex;
471 UINTN Index;
472
473 if (PreSmmRegisterTable) {
474 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
475 } else {
476 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
477 }
478
479 InitApicId = GetInitialApicId ();
480 RegisterTable = NULL;
481 ProcIndex = (UINTN)-1;
482 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
483 if (RegisterTables[Index].InitialApicId == InitApicId) {
484 RegisterTable = &RegisterTables[Index];
485 ProcIndex = Index;
486 break;
487 }
488 }
489 ASSERT (RegisterTable != NULL);
490
491 if (mAcpiCpuData.ApLocation != 0) {
492 ProgramProcessorRegister (
493 RegisterTable,
494 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
495 &mAcpiCpuData.CpuStatus,
496 &mCpuFlags
497 );
498 } else {
499 ProgramProcessorRegister (
500 RegisterTable,
501 NULL,
502 &mAcpiCpuData.CpuStatus,
503 &mCpuFlags
504 );
505 }
506 }
507
508 /**
509 AP initialization before then after SMBASE relocation in the S3 boot path.
510 **/
511 VOID
512 InitializeAp (
513 VOID
514 )
515 {
516 UINTN TopOfStack;
517 UINT8 Stack[128];
518
519 LoadMtrrData (mAcpiCpuData.MtrrTable);
520
521 SetRegister (TRUE);
522
523 //
524 // Count down the number with lock mechanism.
525 //
526 InterlockedDecrement (&mNumberToFinish);
527
528 //
529 // Wait for BSP to signal SMM Base relocation done.
530 //
531 while (!mInitApsAfterSmmBaseReloc) {
532 CpuPause ();
533 }
534
535 ProgramVirtualWireMode ();
536 DisableLvtInterrupts ();
537
538 SetRegister (FALSE);
539
540 //
541 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
542 //
543 TopOfStack = (UINTN) Stack + sizeof (Stack);
544 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
545 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
546 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
547 }
548
549 /**
550 Prepares startup vector for APs.
551
552 This function prepares startup vector for APs.
553
554 @param WorkingBuffer The address of the work buffer.
555 **/
556 VOID
557 PrepareApStartupVector (
558 EFI_PHYSICAL_ADDRESS WorkingBuffer
559 )
560 {
561 EFI_PHYSICAL_ADDRESS StartupVector;
562 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
563
564 //
565 // Get the address map of startup code for AP,
566 // including code size, and offset of long jump instructions to redirect.
567 //
568 ZeroMem (&AddressMap, sizeof (AddressMap));
569 AsmGetAddressMap (&AddressMap);
570
571 StartupVector = WorkingBuffer;
572
573 //
574 // Copy AP startup code to startup vector, and then redirect the long jump
575 // instructions for mode switching.
576 //
577 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
578 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
579 if (AddressMap.LongJumpOffset != 0) {
580 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
581 }
582
583 //
584 // Get the start address of exchange data between BSP and AP.
585 //
586 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
587 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
588
589 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
590 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
591
592 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
593 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
594 mExchangeInfo->BufferStart = (UINT32) StartupVector;
595 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
596 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
597 }
598
599 /**
600 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
601
602 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
603 and restores MTRRs for both BSP and APs.
604
605 **/
606 VOID
607 InitializeCpuBeforeRebase (
608 VOID
609 )
610 {
611 LoadMtrrData (mAcpiCpuData.MtrrTable);
612
613 SetRegister (TRUE);
614
615 ProgramVirtualWireMode ();
616
617 PrepareApStartupVector (mAcpiCpuData.StartupVector);
618
619 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
620 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
621
622 //
623 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
624 //
625 mInitApsAfterSmmBaseReloc = FALSE;
626
627 //
628 // Send INIT IPI - SIPI to all APs
629 //
630 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
631
632 while (mNumberToFinish > 0) {
633 CpuPause ();
634 }
635 }
636
637 /**
638 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
639
640 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
641 data saved by normal boot path for both BSP and APs.
642
643 **/
644 VOID
645 InitializeCpuAfterRebase (
646 VOID
647 )
648 {
649 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
650
651 //
652 // Signal that SMM base relocation is complete and to continue initialization for all APs.
653 //
654 mInitApsAfterSmmBaseReloc = TRUE;
655
656 //
657 // Must begin set register after all APs have continue their initialization.
658 // This is a requirement to support semaphore mechanism in register table.
659 // Because if semaphore's dependence type is package type, semaphore will wait
660 // for all Aps in one package finishing their tasks before set next register
661 // for all APs. If the Aps not begin its task during BSP doing its task, the
662 // BSP thread will hang because it is waiting for other Aps in the same
663 // package finishing their task.
664 //
665 SetRegister (FALSE);
666
667 while (mNumberToFinish > 0) {
668 CpuPause ();
669 }
670 }
671
672 /**
673 Restore SMM Configuration in S3 boot path.
674
675 **/
676 VOID
677 RestoreSmmConfigurationInS3 (
678 VOID
679 )
680 {
681 if (!mAcpiS3Enable) {
682 return;
683 }
684
685 //
686 // Restore SMM Configuration in S3 boot path.
687 //
688 if (mRestoreSmmConfigurationInS3) {
689 //
690 // Need make sure gSmst is correct because below function may use them.
691 //
692 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
693 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
694 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
695 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
696 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
697
698 //
699 // Configure SMM Code Access Check feature if available.
700 //
701 ConfigSmmCodeAccessCheck ();
702
703 SmmCpuFeaturesCompleteSmmReadyToLock ();
704
705 mRestoreSmmConfigurationInS3 = FALSE;
706 }
707 }
708
709 /**
710 Perform SMM initialization for all processors in the S3 boot path.
711
712 For a native platform, MP initialization in the S3 boot path is also performed in this function.
713 **/
714 VOID
715 EFIAPI
716 SmmRestoreCpu (
717 VOID
718 )
719 {
720 SMM_S3_RESUME_STATE *SmmS3ResumeState;
721 IA32_DESCRIPTOR Ia32Idtr;
722 IA32_DESCRIPTOR X64Idtr;
723 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
724 EFI_STATUS Status;
725
726 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
727
728 mSmmS3Flag = TRUE;
729
730 //
731 // See if there is enough context to resume PEI Phase
732 //
733 if (mSmmS3ResumeState == NULL) {
734 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
735 CpuDeadLoop ();
736 }
737
738 SmmS3ResumeState = mSmmS3ResumeState;
739 ASSERT (SmmS3ResumeState != NULL);
740
741 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
742 //
743 // Save the IA32 IDT Descriptor
744 //
745 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
746
747 //
748 // Setup X64 IDT table
749 //
750 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
751 X64Idtr.Base = (UINTN) IdtEntryTable;
752 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
753 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
754
755 //
756 // Setup the default exception handler
757 //
758 Status = InitializeCpuExceptionHandlers (NULL);
759 ASSERT_EFI_ERROR (Status);
760
761 //
762 // Initialize Debug Agent to support source level debug
763 //
764 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
765 }
766
767 //
768 // Skip initialization if mAcpiCpuData is not valid
769 //
770 if (mAcpiCpuData.NumberOfCpus > 0) {
771 //
772 // First time microcode load and restore MTRRs
773 //
774 InitializeCpuBeforeRebase ();
775 }
776
777 //
778 // Restore SMBASE for BSP and all APs
779 //
780 SmmRelocateBases ();
781
782 //
783 // Skip initialization if mAcpiCpuData is not valid
784 //
785 if (mAcpiCpuData.NumberOfCpus > 0) {
786 //
787 // Restore MSRs for BSP and all APs
788 //
789 InitializeCpuAfterRebase ();
790 }
791
792 //
793 // Set a flag to restore SMM configuration in S3 path.
794 //
795 mRestoreSmmConfigurationInS3 = TRUE;
796
797 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
798 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
799 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
800 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
801 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
802
803 //
804 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
805 //
806 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
807 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
808
809 SwitchStack (
810 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
811 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
812 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
813 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
814 );
815 }
816
817 //
818 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
819 //
820 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
821 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
822 //
823 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
824 //
825 SaveAndSetDebugTimerInterrupt (FALSE);
826 //
827 // Restore IA32 IDT table
828 //
829 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
830 AsmDisablePaging64 (
831 SmmS3ResumeState->ReturnCs,
832 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
833 (UINT32)SmmS3ResumeState->ReturnContext1,
834 (UINT32)SmmS3ResumeState->ReturnContext2,
835 (UINT32)SmmS3ResumeState->ReturnStackPointer
836 );
837 }
838
839 //
840 // Can not resume PEI Phase
841 //
842 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
843 CpuDeadLoop ();
844 }
845
846 /**
847 Initialize SMM S3 resume state structure used during S3 Resume.
848
849 @param[in] Cr3 The base address of the page tables to use in SMM.
850
851 **/
852 VOID
853 InitSmmS3ResumeState (
854 IN UINT32 Cr3
855 )
856 {
857 VOID *GuidHob;
858 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
859 SMM_S3_RESUME_STATE *SmmS3ResumeState;
860 EFI_PHYSICAL_ADDRESS Address;
861 EFI_STATUS Status;
862
863 if (!mAcpiS3Enable) {
864 return;
865 }
866
867 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
868 if (GuidHob == NULL) {
869 DEBUG ((
870 DEBUG_ERROR,
871 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
872 __FUNCTION__,
873 &gEfiAcpiVariableGuid
874 ));
875 CpuDeadLoop ();
876 } else {
877 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
878
879 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
880 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
881
882 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
883 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
884
885 mSmmS3ResumeState = SmmS3ResumeState;
886 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
887
888 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
889
890 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
891 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
892 if (SmmS3ResumeState->SmmS3StackBase == 0) {
893 SmmS3ResumeState->SmmS3StackSize = 0;
894 }
895
896 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
897 SmmS3ResumeState->SmmS3Cr3 = Cr3;
898 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
899
900 if (sizeof (UINTN) == sizeof (UINT64)) {
901 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
902 }
903 if (sizeof (UINTN) == sizeof (UINT32)) {
904 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
905 }
906
907 //
908 // Patch SmmS3ResumeState->SmmS3Cr3
909 //
910 InitSmmS3Cr3 ();
911 }
912
913 //
914 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
915 // protected mode on S3 path
916 //
917 Address = BASE_4GB - 1;
918 Status = gBS->AllocatePages (
919 AllocateMaxAddress,
920 EfiACPIMemoryNVS,
921 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
922 &Address
923 );
924 ASSERT_EFI_ERROR (Status);
925 mApHltLoopCode = (UINT8 *) (UINTN) Address;
926 }
927
928 /**
929 Copy register table from ACPI NVS memory into SMRAM.
930
931 @param[in] DestinationRegisterTableList Points to destination register table.
932 @param[in] SourceRegisterTableList Points to source register table.
933 @param[in] NumberOfCpus Number of CPUs.
934
935 **/
936 VOID
937 CopyRegisterTable (
938 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
939 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
940 IN UINT32 NumberOfCpus
941 )
942 {
943 UINTN Index;
944 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
945
946 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
947 for (Index = 0; Index < NumberOfCpus; Index++) {
948 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
949 RegisterTableEntry = AllocateCopyPool (
950 DestinationRegisterTableList[Index].AllocatedSize,
951 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
952 );
953 ASSERT (RegisterTableEntry != NULL);
954 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
955 }
956 }
957 }
958
959 /**
960 Get ACPI CPU data.
961
962 **/
963 VOID
964 GetAcpiCpuData (
965 VOID
966 )
967 {
968 ACPI_CPU_DATA *AcpiCpuData;
969 IA32_DESCRIPTOR *Gdtr;
970 IA32_DESCRIPTOR *Idtr;
971 VOID *GdtForAp;
972 VOID *IdtForAp;
973 VOID *MachineCheckHandlerForAp;
974 CPU_STATUS_INFORMATION *CpuStatus;
975
976 if (!mAcpiS3Enable) {
977 return;
978 }
979
980 //
981 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
982 //
983 mAcpiCpuData.NumberOfCpus = 0;
984
985 //
986 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
987 //
988 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
989 if (AcpiCpuData == 0) {
990 return;
991 }
992
993 //
994 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
995 //
996 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
997
998 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
999 ASSERT (mAcpiCpuData.MtrrTable != 0);
1000
1001 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1002
1003 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1004 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1005
1006 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1007
1008 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1009 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1010
1011 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1012
1013 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1014 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
1015
1016 CopyRegisterTable (
1017 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
1018 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
1019 mAcpiCpuData.NumberOfCpus
1020 );
1021
1022 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1023 ASSERT (mAcpiCpuData.RegisterTable != 0);
1024
1025 CopyRegisterTable (
1026 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
1027 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
1028 mAcpiCpuData.NumberOfCpus
1029 );
1030
1031 //
1032 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1033 //
1034 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1035 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1036
1037 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1038 ASSERT (GdtForAp != NULL);
1039 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1040 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1041
1042 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1043 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1044 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1045
1046 Gdtr->Base = (UINTN)GdtForAp;
1047 Idtr->Base = (UINTN)IdtForAp;
1048 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1049
1050 CpuStatus = &mAcpiCpuData.CpuStatus;
1051 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1052 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {
1053 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1054 sizeof (UINT32) * CpuStatus->PackageCount,
1055 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage
1056 );
1057 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);
1058 }
1059 if (AcpiCpuData->ApLocation != 0) {
1060 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1061 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1062 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
1063 );
1064 ASSERT (mAcpiCpuData.ApLocation != 0);
1065 }
1066 if (CpuStatus->PackageCount != 0) {
1067 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1068 sizeof (UINT32) * CpuStatus->PackageCount *
1069 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1070 );
1071 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1072 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1073 sizeof (UINT32) * CpuStatus->PackageCount *
1074 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1075 );
1076 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1077 }
1078 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1079 }
1080
1081 /**
1082 Get ACPI S3 enable flag.
1083
1084 **/
1085 VOID
1086 GetAcpiS3EnableFlag (
1087 VOID
1088 )
1089 {
1090 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1091 }