]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "PiSmmCpuDxeSmm.h"
10
11 #pragma pack(1)
12 typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
22 } MP_CPU_EXCHANGE_INFO;
23 #pragma pack()
24
25 typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
32 } MP_ASSEMBLY_ADDRESS_MAP;
33
34 //
35 // Flags used when program the register.
36 //
37 typedef struct {
38 volatile UINTN ConsoleLogLock; // Spinlock used to control console.
39 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
40 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
41 // core level semaphore.
42 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
43 // package level semaphore.
44 } PROGRAM_CPU_REGISTER_FLAGS;
45
46 //
47 // Signal that SMM BASE relocation is complete.
48 //
49 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
50
51 /**
52 Get starting address and size of the rendezvous entry for APs.
53 Information for fixing a jump instruction in the code is also returned.
54
55 @param AddressMap Output buffer for address map information.
56 **/
57 VOID *
58 EFIAPI
59 AsmGetAddressMap (
60 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
61 );
62
63 #define LEGACY_REGION_SIZE (2 * 0x1000)
64 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
65
66 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
67 ACPI_CPU_DATA mAcpiCpuData;
68 volatile UINT32 mNumberToFinish;
69 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
70 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
71
72 //
73 // S3 boot flag
74 //
75 BOOLEAN mSmmS3Flag = FALSE;
76
77 //
78 // Pointer to structure used during S3 Resume
79 //
80 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
81
82 BOOLEAN mAcpiS3Enable = TRUE;
83
84 UINT8 *mApHltLoopCode = NULL;
85 UINT8 mApHltLoopCodeTemplate[] = {
86 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
87 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
88 0xFA, // cli
89 0xF4, // hlt
90 0xEB, 0xFC // jmp $-2
91 };
92
93 CHAR16 *mRegisterTypeStr[] = {L"MSR", L"CR", L"MMIO", L"CACHE", L"SEMAP", L"INVALID" };
94
95 /**
96 Sync up the MTRR values for all processors.
97
98 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
99 **/
100 VOID
101 EFIAPI
102 LoadMtrrData (
103 EFI_PHYSICAL_ADDRESS MtrrTable
104 )
105 /*++
106
107 Routine Description:
108
109 Sync up the MTRR values for all processors.
110
111 Arguments:
112
113 Returns:
114 None
115
116 --*/
117 {
118 MTRR_SETTINGS *MtrrSettings;
119
120 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
121 MtrrSetAllMtrrs (MtrrSettings);
122 }
123
124 /**
125 Increment semaphore by 1.
126
127 @param Sem IN: 32-bit unsigned integer
128
129 **/
130 VOID
131 S3ReleaseSemaphore (
132 IN OUT volatile UINT32 *Sem
133 )
134 {
135 InterlockedIncrement (Sem);
136 }
137
138 /**
139 Decrement the semaphore by 1 if it is not zero.
140
141 Performs an atomic decrement operation for semaphore.
142 The compare exchange operation must be performed using
143 MP safe mechanisms.
144
145 @param Sem IN: 32-bit unsigned integer
146
147 **/
148 VOID
149 S3WaitForSemaphore (
150 IN OUT volatile UINT32 *Sem
151 )
152 {
153 UINT32 Value;
154
155 do {
156 Value = *Sem;
157 } while (Value == 0 ||
158 InterlockedCompareExchange32 (
159 Sem,
160 Value,
161 Value - 1
162 ) != Value);
163 }
164
165 /**
166 Initialize the CPU registers from a register table.
167
168 @param[in] RegisterTable The register table for this AP.
169 @param[in] ApLocation AP location info for this ap.
170 @param[in] CpuStatus CPU status info for this CPU.
171 @param[in] CpuFlags Flags data structure used when program the register.
172
173 @note This service could be called by BSP/APs.
174 **/
175 VOID
176 ProgramProcessorRegister (
177 IN CPU_REGISTER_TABLE *RegisterTable,
178 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
179 IN CPU_STATUS_INFORMATION *CpuStatus,
180 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
181 )
182 {
183 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
184 UINTN Index;
185 UINTN Value;
186 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
187 volatile UINT32 *SemaphorePtr;
188 UINT32 FirstThread;
189 UINT32 PackageThreadsCount;
190 UINT32 CurrentThread;
191 UINTN ProcessorIndex;
192 UINTN ThreadIndex;
193 UINTN ValidThreadCount;
194 UINT32 *ValidCoreCountPerPackage;
195
196 //
197 // Traverse Register Table of this logical processor
198 //
199 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
200
201 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
202
203 RegisterTableEntry = &RegisterTableEntryHead[Index];
204
205 DEBUG_CODE_BEGIN ();
206 if (ApLocation != NULL) {
207 AcquireSpinLock (&CpuFlags->ConsoleLogLock);
208 ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +
209 ApLocation->Core * CpuStatus->MaxThreadCount +
210 ApLocation->Thread;
211 DEBUG ((
212 DEBUG_INFO,
213 "Processor = %lu, Entry Index %lu, Type = %s!\n",
214 (UINT64)ThreadIndex,
215 (UINT64)Index,
216 mRegisterTypeStr[MIN ((REGISTER_TYPE)RegisterTableEntry->RegisterType, InvalidReg)]
217 ));
218 ReleaseSpinLock (&CpuFlags->ConsoleLogLock);
219 }
220 DEBUG_CODE_END ();
221
222 //
223 // Check the type of specified register
224 //
225 switch (RegisterTableEntry->RegisterType) {
226 //
227 // The specified register is Control Register
228 //
229 case ControlRegister:
230 switch (RegisterTableEntry->Index) {
231 case 0:
232 Value = AsmReadCr0 ();
233 Value = (UINTN) BitFieldWrite64 (
234 Value,
235 RegisterTableEntry->ValidBitStart,
236 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
237 (UINTN) RegisterTableEntry->Value
238 );
239 AsmWriteCr0 (Value);
240 break;
241 case 2:
242 Value = AsmReadCr2 ();
243 Value = (UINTN) BitFieldWrite64 (
244 Value,
245 RegisterTableEntry->ValidBitStart,
246 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
247 (UINTN) RegisterTableEntry->Value
248 );
249 AsmWriteCr2 (Value);
250 break;
251 case 3:
252 Value = AsmReadCr3 ();
253 Value = (UINTN) BitFieldWrite64 (
254 Value,
255 RegisterTableEntry->ValidBitStart,
256 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
257 (UINTN) RegisterTableEntry->Value
258 );
259 AsmWriteCr3 (Value);
260 break;
261 case 4:
262 Value = AsmReadCr4 ();
263 Value = (UINTN) BitFieldWrite64 (
264 Value,
265 RegisterTableEntry->ValidBitStart,
266 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
267 (UINTN) RegisterTableEntry->Value
268 );
269 AsmWriteCr4 (Value);
270 break;
271 default:
272 break;
273 }
274 break;
275 //
276 // The specified register is Model Specific Register
277 //
278 case Msr:
279 //
280 // If this function is called to restore register setting after INIT signal,
281 // there is no need to restore MSRs in register table.
282 //
283 if (RegisterTableEntry->ValidBitLength >= 64) {
284 //
285 // If length is not less than 64 bits, then directly write without reading
286 //
287 AsmWriteMsr64 (
288 RegisterTableEntry->Index,
289 RegisterTableEntry->Value
290 );
291 } else {
292 //
293 // Set the bit section according to bit start and length
294 //
295 AsmMsrBitFieldWrite64 (
296 RegisterTableEntry->Index,
297 RegisterTableEntry->ValidBitStart,
298 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
299 RegisterTableEntry->Value
300 );
301 }
302 break;
303 //
304 // MemoryMapped operations
305 //
306 case MemoryMapped:
307 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
308 MmioBitFieldWrite32 (
309 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
310 RegisterTableEntry->ValidBitStart,
311 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
312 (UINT32)RegisterTableEntry->Value
313 );
314 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
315 break;
316 //
317 // Enable or disable cache
318 //
319 case CacheControl:
320 //
321 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
322 //
323 if (RegisterTableEntry->Value == 0) {
324 AsmDisableCache ();
325 } else {
326 AsmEnableCache ();
327 }
328 break;
329
330 case Semaphore:
331 // Semaphore works logic like below:
332 //
333 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
334 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
335 //
336 // All threads (T0...Tn) waits in P() line and continues running
337 // together.
338 //
339 //
340 // T0 T1 ... Tn
341 //
342 // V(0...n) V(0...n) ... V(0...n)
343 // n * P(0) n * P(1) ... n * P(n)
344 //
345 ASSERT (
346 (ApLocation != NULL) &&
347 (CpuStatus->ValidCoreCountPerPackage != 0) &&
348 (CpuFlags->CoreSemaphoreCount != NULL) &&
349 (CpuFlags->PackageSemaphoreCount != NULL)
350 );
351 switch (RegisterTableEntry->Value) {
352 case CoreDepType:
353 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
354 //
355 // Get Offset info for the first thread in the core which current thread belongs to.
356 //
357 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
358 CurrentThread = FirstThread + ApLocation->Thread;
359 //
360 // First Notify all threads in current Core that this thread has ready.
361 //
362 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
363 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
364 }
365 //
366 // Second, check whether all valid threads in current core have ready.
367 //
368 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
369 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
370 }
371 break;
372
373 case PackageDepType:
374 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
375 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;
376 //
377 // Get Offset info for the first thread in the package which current thread belongs to.
378 //
379 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
380 //
381 // Get the possible threads count for current package.
382 //
383 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
384 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
385 //
386 // Get the valid thread count for current package.
387 //
388 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];
389
390 //
391 // Different packages may have different valid cores in them. If driver maintail clearly
392 // cores number in different packages, the logic will be much complicated.
393 // Here driver just simply records the max core number in all packages and use it as expect
394 // core number for all packages.
395 // In below two steps logic, first current thread will Release semaphore for each thread
396 // in current package. Maybe some threads are not valid in this package, but driver don't
397 // care. Second, driver will let current thread wait semaphore for all valid threads in
398 // current package. Because only the valid threads will do release semaphore for this
399 // thread, driver here only need to wait the valid thread count.
400 //
401
402 //
403 // First Notify all threads in current package that this thread has ready.
404 //
405 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
406 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
407 }
408 //
409 // Second, check whether all valid threads in current package have ready.
410 //
411 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
412 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
413 }
414 break;
415
416 default:
417 break;
418 }
419 break;
420
421 default:
422 break;
423 }
424 }
425 }
426
427 /**
428
429 Set Processor register for one AP.
430
431 @param PreSmmRegisterTable Use pre Smm register table or register table.
432
433 **/
434 VOID
435 SetRegister (
436 IN BOOLEAN PreSmmRegisterTable
437 )
438 {
439 CPU_REGISTER_TABLE *RegisterTable;
440 CPU_REGISTER_TABLE *RegisterTables;
441 UINT32 InitApicId;
442 UINTN ProcIndex;
443 UINTN Index;
444
445 if (PreSmmRegisterTable) {
446 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
447 } else {
448 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
449 }
450
451 InitApicId = GetInitialApicId ();
452 RegisterTable = NULL;
453 ProcIndex = (UINTN)-1;
454 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
455 if (RegisterTables[Index].InitialApicId == InitApicId) {
456 RegisterTable = &RegisterTables[Index];
457 ProcIndex = Index;
458 break;
459 }
460 }
461 ASSERT (RegisterTable != NULL);
462
463 if (mAcpiCpuData.ApLocation != 0) {
464 ProgramProcessorRegister (
465 RegisterTable,
466 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
467 &mAcpiCpuData.CpuStatus,
468 &mCpuFlags
469 );
470 } else {
471 ProgramProcessorRegister (
472 RegisterTable,
473 NULL,
474 &mAcpiCpuData.CpuStatus,
475 &mCpuFlags
476 );
477 }
478 }
479
480 /**
481 AP initialization before then after SMBASE relocation in the S3 boot path.
482 **/
483 VOID
484 InitializeAp (
485 VOID
486 )
487 {
488 UINTN TopOfStack;
489 UINT8 Stack[128];
490
491 LoadMtrrData (mAcpiCpuData.MtrrTable);
492
493 SetRegister (TRUE);
494
495 //
496 // Count down the number with lock mechanism.
497 //
498 InterlockedDecrement (&mNumberToFinish);
499
500 //
501 // Wait for BSP to signal SMM Base relocation done.
502 //
503 while (!mInitApsAfterSmmBaseReloc) {
504 CpuPause ();
505 }
506
507 ProgramVirtualWireMode ();
508 DisableLvtInterrupts ();
509
510 SetRegister (FALSE);
511
512 //
513 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
514 //
515 TopOfStack = (UINTN) Stack + sizeof (Stack);
516 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
517 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
518 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
519 }
520
521 /**
522 Prepares startup vector for APs.
523
524 This function prepares startup vector for APs.
525
526 @param WorkingBuffer The address of the work buffer.
527 **/
528 VOID
529 PrepareApStartupVector (
530 EFI_PHYSICAL_ADDRESS WorkingBuffer
531 )
532 {
533 EFI_PHYSICAL_ADDRESS StartupVector;
534 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
535
536 //
537 // Get the address map of startup code for AP,
538 // including code size, and offset of long jump instructions to redirect.
539 //
540 ZeroMem (&AddressMap, sizeof (AddressMap));
541 AsmGetAddressMap (&AddressMap);
542
543 StartupVector = WorkingBuffer;
544
545 //
546 // Copy AP startup code to startup vector, and then redirect the long jump
547 // instructions for mode switching.
548 //
549 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
550 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
551 if (AddressMap.LongJumpOffset != 0) {
552 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
553 }
554
555 //
556 // Get the start address of exchange data between BSP and AP.
557 //
558 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
559 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
560
561 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
562 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
563
564 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
565 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
566 mExchangeInfo->BufferStart = (UINT32) StartupVector;
567 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
568 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
569 }
570
571 /**
572 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
573
574 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
575 and restores MTRRs for both BSP and APs.
576
577 **/
578 VOID
579 InitializeCpuBeforeRebase (
580 VOID
581 )
582 {
583 LoadMtrrData (mAcpiCpuData.MtrrTable);
584
585 SetRegister (TRUE);
586
587 ProgramVirtualWireMode ();
588
589 PrepareApStartupVector (mAcpiCpuData.StartupVector);
590
591 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
592 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
593
594 //
595 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
596 //
597 mInitApsAfterSmmBaseReloc = FALSE;
598
599 //
600 // Send INIT IPI - SIPI to all APs
601 //
602 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
603
604 while (mNumberToFinish > 0) {
605 CpuPause ();
606 }
607 }
608
609 /**
610 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
611
612 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
613 data saved by normal boot path for both BSP and APs.
614
615 **/
616 VOID
617 InitializeCpuAfterRebase (
618 VOID
619 )
620 {
621 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
622
623 //
624 // Signal that SMM base relocation is complete and to continue initialization for all APs.
625 //
626 mInitApsAfterSmmBaseReloc = TRUE;
627
628 //
629 // Must begin set register after all APs have continue their initialization.
630 // This is a requirement to support semaphore mechanism in register table.
631 // Because if semaphore's dependence type is package type, semaphore will wait
632 // for all Aps in one package finishing their tasks before set next register
633 // for all APs. If the Aps not begin its task during BSP doing its task, the
634 // BSP thread will hang because it is waiting for other Aps in the same
635 // package finishing their task.
636 //
637 SetRegister (FALSE);
638
639 while (mNumberToFinish > 0) {
640 CpuPause ();
641 }
642 }
643
644 /**
645 Restore SMM Configuration in S3 boot path.
646
647 **/
648 VOID
649 RestoreSmmConfigurationInS3 (
650 VOID
651 )
652 {
653 if (!mAcpiS3Enable) {
654 return;
655 }
656
657 //
658 // Restore SMM Configuration in S3 boot path.
659 //
660 if (mRestoreSmmConfigurationInS3) {
661 //
662 // Need make sure gSmst is correct because below function may use them.
663 //
664 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
665 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
666 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
667 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
668 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
669
670 //
671 // Configure SMM Code Access Check feature if available.
672 //
673 ConfigSmmCodeAccessCheck ();
674
675 SmmCpuFeaturesCompleteSmmReadyToLock ();
676
677 mRestoreSmmConfigurationInS3 = FALSE;
678 }
679 }
680
681 /**
682 Perform SMM initialization for all processors in the S3 boot path.
683
684 For a native platform, MP initialization in the S3 boot path is also performed in this function.
685 **/
686 VOID
687 EFIAPI
688 SmmRestoreCpu (
689 VOID
690 )
691 {
692 SMM_S3_RESUME_STATE *SmmS3ResumeState;
693 IA32_DESCRIPTOR Ia32Idtr;
694 IA32_DESCRIPTOR X64Idtr;
695 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
696 EFI_STATUS Status;
697
698 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
699
700 mSmmS3Flag = TRUE;
701
702 //
703 // See if there is enough context to resume PEI Phase
704 //
705 if (mSmmS3ResumeState == NULL) {
706 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
707 CpuDeadLoop ();
708 }
709
710 SmmS3ResumeState = mSmmS3ResumeState;
711 ASSERT (SmmS3ResumeState != NULL);
712
713 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
714 //
715 // Save the IA32 IDT Descriptor
716 //
717 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
718
719 //
720 // Setup X64 IDT table
721 //
722 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
723 X64Idtr.Base = (UINTN) IdtEntryTable;
724 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
725 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
726
727 //
728 // Setup the default exception handler
729 //
730 Status = InitializeCpuExceptionHandlers (NULL);
731 ASSERT_EFI_ERROR (Status);
732
733 //
734 // Initialize Debug Agent to support source level debug
735 //
736 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
737 }
738
739 //
740 // Skip initialization if mAcpiCpuData is not valid
741 //
742 if (mAcpiCpuData.NumberOfCpus > 0) {
743 //
744 // First time microcode load and restore MTRRs
745 //
746 InitializeCpuBeforeRebase ();
747 }
748
749 //
750 // Restore SMBASE for BSP and all APs
751 //
752 SmmRelocateBases ();
753
754 //
755 // Skip initialization if mAcpiCpuData is not valid
756 //
757 if (mAcpiCpuData.NumberOfCpus > 0) {
758 //
759 // Restore MSRs for BSP and all APs
760 //
761 InitializeCpuAfterRebase ();
762 }
763
764 //
765 // Set a flag to restore SMM configuration in S3 path.
766 //
767 mRestoreSmmConfigurationInS3 = TRUE;
768
769 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
770 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
771 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
772 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
773 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
774
775 //
776 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
777 //
778 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
779 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
780
781 SwitchStack (
782 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
783 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
784 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
785 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
786 );
787 }
788
789 //
790 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
791 //
792 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
793 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
794 //
795 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
796 //
797 SaveAndSetDebugTimerInterrupt (FALSE);
798 //
799 // Restore IA32 IDT table
800 //
801 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
802 AsmDisablePaging64 (
803 SmmS3ResumeState->ReturnCs,
804 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
805 (UINT32)SmmS3ResumeState->ReturnContext1,
806 (UINT32)SmmS3ResumeState->ReturnContext2,
807 (UINT32)SmmS3ResumeState->ReturnStackPointer
808 );
809 }
810
811 //
812 // Can not resume PEI Phase
813 //
814 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
815 CpuDeadLoop ();
816 }
817
818 /**
819 Initialize SMM S3 resume state structure used during S3 Resume.
820
821 @param[in] Cr3 The base address of the page tables to use in SMM.
822
823 **/
824 VOID
825 InitSmmS3ResumeState (
826 IN UINT32 Cr3
827 )
828 {
829 VOID *GuidHob;
830 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
831 SMM_S3_RESUME_STATE *SmmS3ResumeState;
832 EFI_PHYSICAL_ADDRESS Address;
833 EFI_STATUS Status;
834
835 if (!mAcpiS3Enable) {
836 return;
837 }
838
839 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
840 if (GuidHob == NULL) {
841 DEBUG ((
842 DEBUG_ERROR,
843 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
844 __FUNCTION__,
845 &gEfiAcpiVariableGuid
846 ));
847 CpuDeadLoop ();
848 } else {
849 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
850
851 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
852 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
853
854 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
855 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
856
857 mSmmS3ResumeState = SmmS3ResumeState;
858 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
859
860 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
861
862 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
863 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
864 if (SmmS3ResumeState->SmmS3StackBase == 0) {
865 SmmS3ResumeState->SmmS3StackSize = 0;
866 }
867
868 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
869 SmmS3ResumeState->SmmS3Cr3 = Cr3;
870 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
871
872 if (sizeof (UINTN) == sizeof (UINT64)) {
873 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
874 }
875 if (sizeof (UINTN) == sizeof (UINT32)) {
876 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
877 }
878
879 //
880 // Patch SmmS3ResumeState->SmmS3Cr3
881 //
882 InitSmmS3Cr3 ();
883 }
884
885 //
886 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
887 // protected mode on S3 path
888 //
889 Address = BASE_4GB - 1;
890 Status = gBS->AllocatePages (
891 AllocateMaxAddress,
892 EfiACPIMemoryNVS,
893 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
894 &Address
895 );
896 ASSERT_EFI_ERROR (Status);
897 mApHltLoopCode = (UINT8 *) (UINTN) Address;
898 }
899
900 /**
901 Copy register table from ACPI NVS memory into SMRAM.
902
903 @param[in] DestinationRegisterTableList Points to destination register table.
904 @param[in] SourceRegisterTableList Points to source register table.
905 @param[in] NumberOfCpus Number of CPUs.
906
907 **/
908 VOID
909 CopyRegisterTable (
910 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
911 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
912 IN UINT32 NumberOfCpus
913 )
914 {
915 UINTN Index;
916 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
917
918 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
919 for (Index = 0; Index < NumberOfCpus; Index++) {
920 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
921 RegisterTableEntry = AllocateCopyPool (
922 DestinationRegisterTableList[Index].AllocatedSize,
923 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
924 );
925 ASSERT (RegisterTableEntry != NULL);
926 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
927 }
928 }
929 }
930
931 /**
932 Get ACPI CPU data.
933
934 **/
935 VOID
936 GetAcpiCpuData (
937 VOID
938 )
939 {
940 ACPI_CPU_DATA *AcpiCpuData;
941 IA32_DESCRIPTOR *Gdtr;
942 IA32_DESCRIPTOR *Idtr;
943 VOID *GdtForAp;
944 VOID *IdtForAp;
945 VOID *MachineCheckHandlerForAp;
946 CPU_STATUS_INFORMATION *CpuStatus;
947
948 if (!mAcpiS3Enable) {
949 return;
950 }
951
952 //
953 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
954 //
955 mAcpiCpuData.NumberOfCpus = 0;
956
957 //
958 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
959 //
960 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
961 if (AcpiCpuData == 0) {
962 return;
963 }
964
965 //
966 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
967 //
968 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
969
970 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
971 ASSERT (mAcpiCpuData.MtrrTable != 0);
972
973 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
974
975 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
976 ASSERT (mAcpiCpuData.GdtrProfile != 0);
977
978 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
979
980 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
981 ASSERT (mAcpiCpuData.IdtrProfile != 0);
982
983 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
984
985 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
986 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
987
988 CopyRegisterTable (
989 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
990 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
991 mAcpiCpuData.NumberOfCpus
992 );
993
994 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
995 ASSERT (mAcpiCpuData.RegisterTable != 0);
996
997 CopyRegisterTable (
998 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
999 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
1000 mAcpiCpuData.NumberOfCpus
1001 );
1002
1003 //
1004 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1005 //
1006 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1007 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1008
1009 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1010 ASSERT (GdtForAp != NULL);
1011 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1012 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1013
1014 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1015 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1016 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1017
1018 Gdtr->Base = (UINTN)GdtForAp;
1019 Idtr->Base = (UINTN)IdtForAp;
1020 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1021
1022 CpuStatus = &mAcpiCpuData.CpuStatus;
1023 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1024 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {
1025 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1026 sizeof (UINT32) * CpuStatus->PackageCount,
1027 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage
1028 );
1029 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);
1030 }
1031 if (AcpiCpuData->ApLocation != 0) {
1032 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1033 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1034 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
1035 );
1036 ASSERT (mAcpiCpuData.ApLocation != 0);
1037 }
1038 if (CpuStatus->PackageCount != 0) {
1039 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1040 sizeof (UINT32) * CpuStatus->PackageCount *
1041 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1042 );
1043 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1044 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1045 sizeof (UINT32) * CpuStatus->PackageCount *
1046 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1047 );
1048 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1049 }
1050 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1051 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.ConsoleLogLock);
1052 }
1053
1054 /**
1055 Get ACPI S3 enable flag.
1056
1057 **/
1058 VOID
1059 GetAcpiS3EnableFlag (
1060 VOID
1061 )
1062 {
1063 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1064 }