]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
79372ce17a14269fa0f0a5dbc78cdc086eed0cc3
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 #pragma pack(1)
18 typedef struct {
19 UINTN Lock;
20 VOID *StackStart;
21 UINTN StackSize;
22 VOID *ApFunction;
23 IA32_DESCRIPTOR GdtrProfile;
24 IA32_DESCRIPTOR IdtrProfile;
25 UINT32 BufferStart;
26 UINT32 Cr3;
27 UINTN InitializeFloatingPointUnitsAddress;
28 } MP_CPU_EXCHANGE_INFO;
29 #pragma pack()
30
31 typedef struct {
32 UINT8 *RendezvousFunnelAddress;
33 UINTN PModeEntryOffset;
34 UINTN FlatJumpOffset;
35 UINTN Size;
36 UINTN LModeEntryOffset;
37 UINTN LongJumpOffset;
38 } MP_ASSEMBLY_ADDRESS_MAP;
39
40 //
41 // Flags used when program the register.
42 //
43 typedef struct {
44 volatile UINTN ConsoleLogLock; // Spinlock used to control console.
45 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
46 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
47 // core level semaphore.
48 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
49 // package level semaphore.
50 } PROGRAM_CPU_REGISTER_FLAGS;
51
52 //
53 // Signal that SMM BASE relocation is complete.
54 //
55 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
56
57 /**
58 Get starting address and size of the rendezvous entry for APs.
59 Information for fixing a jump instruction in the code is also returned.
60
61 @param AddressMap Output buffer for address map information.
62 **/
63 VOID *
64 EFIAPI
65 AsmGetAddressMap (
66 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
67 );
68
69 #define LEGACY_REGION_SIZE (2 * 0x1000)
70 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
71
72 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
73 ACPI_CPU_DATA mAcpiCpuData;
74 volatile UINT32 mNumberToFinish;
75 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
76 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
77
78 //
79 // S3 boot flag
80 //
81 BOOLEAN mSmmS3Flag = FALSE;
82
83 //
84 // Pointer to structure used during S3 Resume
85 //
86 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
87
88 BOOLEAN mAcpiS3Enable = TRUE;
89
90 UINT8 *mApHltLoopCode = NULL;
91 UINT8 mApHltLoopCodeTemplate[] = {
92 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
93 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
94 0xFA, // cli
95 0xF4, // hlt
96 0xEB, 0xFC // jmp $-2
97 };
98
99 CHAR16 *mRegisterTypeStr[] = {L"MSR", L"CR", L"MMIO", L"CACHE", L"SEMAP", L"INVALID" };
100
101 /**
102 Sync up the MTRR values for all processors.
103
104 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
105 **/
106 VOID
107 EFIAPI
108 LoadMtrrData (
109 EFI_PHYSICAL_ADDRESS MtrrTable
110 )
111 /*++
112
113 Routine Description:
114
115 Sync up the MTRR values for all processors.
116
117 Arguments:
118
119 Returns:
120 None
121
122 --*/
123 {
124 MTRR_SETTINGS *MtrrSettings;
125
126 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
127 MtrrSetAllMtrrs (MtrrSettings);
128 }
129
130 /**
131 Increment semaphore by 1.
132
133 @param Sem IN: 32-bit unsigned integer
134
135 **/
136 VOID
137 S3ReleaseSemaphore (
138 IN OUT volatile UINT32 *Sem
139 )
140 {
141 InterlockedIncrement (Sem);
142 }
143
144 /**
145 Decrement the semaphore by 1 if it is not zero.
146
147 Performs an atomic decrement operation for semaphore.
148 The compare exchange operation must be performed using
149 MP safe mechanisms.
150
151 @param Sem IN: 32-bit unsigned integer
152
153 **/
154 VOID
155 S3WaitForSemaphore (
156 IN OUT volatile UINT32 *Sem
157 )
158 {
159 UINT32 Value;
160
161 do {
162 Value = *Sem;
163 } while (Value == 0 ||
164 InterlockedCompareExchange32 (
165 Sem,
166 Value,
167 Value - 1
168 ) != Value);
169 }
170
171 /**
172 Initialize the CPU registers from a register table.
173
174 @param[in] RegisterTable The register table for this AP.
175 @param[in] ApLocation AP location info for this ap.
176 @param[in] CpuStatus CPU status info for this CPU.
177 @param[in] CpuFlags Flags data structure used when program the register.
178
179 @note This service could be called by BSP/APs.
180 **/
181 VOID
182 ProgramProcessorRegister (
183 IN CPU_REGISTER_TABLE *RegisterTable,
184 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
185 IN CPU_STATUS_INFORMATION *CpuStatus,
186 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
187 )
188 {
189 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
190 UINTN Index;
191 UINTN Value;
192 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
193 volatile UINT32 *SemaphorePtr;
194 UINT32 FirstThread;
195 UINT32 PackageThreadsCount;
196 UINT32 CurrentThread;
197 UINTN ProcessorIndex;
198 UINTN ThreadIndex;
199 UINTN ValidThreadCount;
200 UINT32 *ValidCoreCountPerPackage;
201
202 //
203 // Traverse Register Table of this logical processor
204 //
205 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
206
207 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
208
209 RegisterTableEntry = &RegisterTableEntryHead[Index];
210
211 DEBUG_CODE_BEGIN ();
212 if (ApLocation != NULL) {
213 AcquireSpinLock (&CpuFlags->ConsoleLogLock);
214 ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +
215 ApLocation->Core * CpuStatus->MaxThreadCount +
216 ApLocation->Thread;
217 DEBUG ((
218 DEBUG_INFO,
219 "Processor = %lu, Entry Index %lu, Type = %s!\n",
220 (UINT64)ThreadIndex,
221 (UINT64)Index,
222 mRegisterTypeStr[MIN ((REGISTER_TYPE)RegisterTableEntry->RegisterType, InvalidReg)]
223 ));
224 ReleaseSpinLock (&CpuFlags->ConsoleLogLock);
225 }
226 DEBUG_CODE_END ();
227
228 //
229 // Check the type of specified register
230 //
231 switch (RegisterTableEntry->RegisterType) {
232 //
233 // The specified register is Control Register
234 //
235 case ControlRegister:
236 switch (RegisterTableEntry->Index) {
237 case 0:
238 Value = AsmReadCr0 ();
239 Value = (UINTN) BitFieldWrite64 (
240 Value,
241 RegisterTableEntry->ValidBitStart,
242 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
243 (UINTN) RegisterTableEntry->Value
244 );
245 AsmWriteCr0 (Value);
246 break;
247 case 2:
248 Value = AsmReadCr2 ();
249 Value = (UINTN) BitFieldWrite64 (
250 Value,
251 RegisterTableEntry->ValidBitStart,
252 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
253 (UINTN) RegisterTableEntry->Value
254 );
255 AsmWriteCr2 (Value);
256 break;
257 case 3:
258 Value = AsmReadCr3 ();
259 Value = (UINTN) BitFieldWrite64 (
260 Value,
261 RegisterTableEntry->ValidBitStart,
262 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
263 (UINTN) RegisterTableEntry->Value
264 );
265 AsmWriteCr3 (Value);
266 break;
267 case 4:
268 Value = AsmReadCr4 ();
269 Value = (UINTN) BitFieldWrite64 (
270 Value,
271 RegisterTableEntry->ValidBitStart,
272 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
273 (UINTN) RegisterTableEntry->Value
274 );
275 AsmWriteCr4 (Value);
276 break;
277 default:
278 break;
279 }
280 break;
281 //
282 // The specified register is Model Specific Register
283 //
284 case Msr:
285 //
286 // If this function is called to restore register setting after INIT signal,
287 // there is no need to restore MSRs in register table.
288 //
289 if (RegisterTableEntry->ValidBitLength >= 64) {
290 //
291 // If length is not less than 64 bits, then directly write without reading
292 //
293 AsmWriteMsr64 (
294 RegisterTableEntry->Index,
295 RegisterTableEntry->Value
296 );
297 } else {
298 //
299 // Set the bit section according to bit start and length
300 //
301 AsmMsrBitFieldWrite64 (
302 RegisterTableEntry->Index,
303 RegisterTableEntry->ValidBitStart,
304 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
305 RegisterTableEntry->Value
306 );
307 }
308 break;
309 //
310 // MemoryMapped operations
311 //
312 case MemoryMapped:
313 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
314 MmioBitFieldWrite32 (
315 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
316 RegisterTableEntry->ValidBitStart,
317 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
318 (UINT32)RegisterTableEntry->Value
319 );
320 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
321 break;
322 //
323 // Enable or disable cache
324 //
325 case CacheControl:
326 //
327 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
328 //
329 if (RegisterTableEntry->Value == 0) {
330 AsmDisableCache ();
331 } else {
332 AsmEnableCache ();
333 }
334 break;
335
336 case Semaphore:
337 // Semaphore works logic like below:
338 //
339 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
340 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
341 //
342 // All threads (T0...Tn) waits in P() line and continues running
343 // together.
344 //
345 //
346 // T0 T1 ... Tn
347 //
348 // V(0...n) V(0...n) ... V(0...n)
349 // n * P(0) n * P(1) ... n * P(n)
350 //
351 ASSERT (
352 (ApLocation != NULL) &&
353 (CpuStatus->ValidCoreCountPerPackage != 0) &&
354 (CpuFlags->CoreSemaphoreCount != NULL) &&
355 (CpuFlags->PackageSemaphoreCount != NULL)
356 );
357 switch (RegisterTableEntry->Value) {
358 case CoreDepType:
359 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
360 //
361 // Get Offset info for the first thread in the core which current thread belongs to.
362 //
363 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
364 CurrentThread = FirstThread + ApLocation->Thread;
365 //
366 // First Notify all threads in current Core that this thread has ready.
367 //
368 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
369 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
370 }
371 //
372 // Second, check whether all valid threads in current core have ready.
373 //
374 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
375 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
376 }
377 break;
378
379 case PackageDepType:
380 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
381 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;
382 //
383 // Get Offset info for the first thread in the package which current thread belongs to.
384 //
385 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
386 //
387 // Get the possible threads count for current package.
388 //
389 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
390 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
391 //
392 // Get the valid thread count for current package.
393 //
394 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];
395
396 //
397 // Different packages may have different valid cores in them. If driver maintail clearly
398 // cores number in different packages, the logic will be much complicated.
399 // Here driver just simply records the max core number in all packages and use it as expect
400 // core number for all packages.
401 // In below two steps logic, first current thread will Release semaphore for each thread
402 // in current package. Maybe some threads are not valid in this package, but driver don't
403 // care. Second, driver will let current thread wait semaphore for all valid threads in
404 // current package. Because only the valid threads will do release semaphore for this
405 // thread, driver here only need to wait the valid thread count.
406 //
407
408 //
409 // First Notify all threads in current package that this thread has ready.
410 //
411 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
412 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
413 }
414 //
415 // Second, check whether all valid threads in current package have ready.
416 //
417 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
418 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
419 }
420 break;
421
422 default:
423 break;
424 }
425 break;
426
427 default:
428 break;
429 }
430 }
431 }
432
433 /**
434
435 Set Processor register for one AP.
436
437 @param PreSmmRegisterTable Use pre Smm register table or register table.
438
439 **/
440 VOID
441 SetRegister (
442 IN BOOLEAN PreSmmRegisterTable
443 )
444 {
445 CPU_REGISTER_TABLE *RegisterTable;
446 CPU_REGISTER_TABLE *RegisterTables;
447 UINT32 InitApicId;
448 UINTN ProcIndex;
449 UINTN Index;
450
451 if (PreSmmRegisterTable) {
452 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
453 } else {
454 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
455 }
456
457 InitApicId = GetInitialApicId ();
458 RegisterTable = NULL;
459 ProcIndex = (UINTN)-1;
460 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
461 if (RegisterTables[Index].InitialApicId == InitApicId) {
462 RegisterTable = &RegisterTables[Index];
463 ProcIndex = Index;
464 break;
465 }
466 }
467 ASSERT (RegisterTable != NULL);
468
469 if (mAcpiCpuData.ApLocation != 0) {
470 ProgramProcessorRegister (
471 RegisterTable,
472 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
473 &mAcpiCpuData.CpuStatus,
474 &mCpuFlags
475 );
476 } else {
477 ProgramProcessorRegister (
478 RegisterTable,
479 NULL,
480 &mAcpiCpuData.CpuStatus,
481 &mCpuFlags
482 );
483 }
484 }
485
486 /**
487 AP initialization before then after SMBASE relocation in the S3 boot path.
488 **/
489 VOID
490 InitializeAp (
491 VOID
492 )
493 {
494 UINTN TopOfStack;
495 UINT8 Stack[128];
496
497 LoadMtrrData (mAcpiCpuData.MtrrTable);
498
499 SetRegister (TRUE);
500
501 //
502 // Count down the number with lock mechanism.
503 //
504 InterlockedDecrement (&mNumberToFinish);
505
506 //
507 // Wait for BSP to signal SMM Base relocation done.
508 //
509 while (!mInitApsAfterSmmBaseReloc) {
510 CpuPause ();
511 }
512
513 ProgramVirtualWireMode ();
514 DisableLvtInterrupts ();
515
516 SetRegister (FALSE);
517
518 //
519 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
520 //
521 TopOfStack = (UINTN) Stack + sizeof (Stack);
522 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
523 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
524 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
525 }
526
527 /**
528 Prepares startup vector for APs.
529
530 This function prepares startup vector for APs.
531
532 @param WorkingBuffer The address of the work buffer.
533 **/
534 VOID
535 PrepareApStartupVector (
536 EFI_PHYSICAL_ADDRESS WorkingBuffer
537 )
538 {
539 EFI_PHYSICAL_ADDRESS StartupVector;
540 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
541
542 //
543 // Get the address map of startup code for AP,
544 // including code size, and offset of long jump instructions to redirect.
545 //
546 ZeroMem (&AddressMap, sizeof (AddressMap));
547 AsmGetAddressMap (&AddressMap);
548
549 StartupVector = WorkingBuffer;
550
551 //
552 // Copy AP startup code to startup vector, and then redirect the long jump
553 // instructions for mode switching.
554 //
555 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
556 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
557 if (AddressMap.LongJumpOffset != 0) {
558 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
559 }
560
561 //
562 // Get the start address of exchange data between BSP and AP.
563 //
564 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
565 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
566
567 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
568 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
569
570 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
571 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
572 mExchangeInfo->BufferStart = (UINT32) StartupVector;
573 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
574 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
575 }
576
577 /**
578 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
579
580 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
581 and restores MTRRs for both BSP and APs.
582
583 **/
584 VOID
585 InitializeCpuBeforeRebase (
586 VOID
587 )
588 {
589 LoadMtrrData (mAcpiCpuData.MtrrTable);
590
591 SetRegister (TRUE);
592
593 ProgramVirtualWireMode ();
594
595 PrepareApStartupVector (mAcpiCpuData.StartupVector);
596
597 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
598 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
599
600 //
601 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
602 //
603 mInitApsAfterSmmBaseReloc = FALSE;
604
605 //
606 // Send INIT IPI - SIPI to all APs
607 //
608 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
609
610 while (mNumberToFinish > 0) {
611 CpuPause ();
612 }
613 }
614
615 /**
616 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
617
618 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
619 data saved by normal boot path for both BSP and APs.
620
621 **/
622 VOID
623 InitializeCpuAfterRebase (
624 VOID
625 )
626 {
627 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
628
629 //
630 // Signal that SMM base relocation is complete and to continue initialization for all APs.
631 //
632 mInitApsAfterSmmBaseReloc = TRUE;
633
634 //
635 // Must begin set register after all APs have continue their initialization.
636 // This is a requirement to support semaphore mechanism in register table.
637 // Because if semaphore's dependence type is package type, semaphore will wait
638 // for all Aps in one package finishing their tasks before set next register
639 // for all APs. If the Aps not begin its task during BSP doing its task, the
640 // BSP thread will hang because it is waiting for other Aps in the same
641 // package finishing their task.
642 //
643 SetRegister (FALSE);
644
645 while (mNumberToFinish > 0) {
646 CpuPause ();
647 }
648 }
649
650 /**
651 Restore SMM Configuration in S3 boot path.
652
653 **/
654 VOID
655 RestoreSmmConfigurationInS3 (
656 VOID
657 )
658 {
659 if (!mAcpiS3Enable) {
660 return;
661 }
662
663 //
664 // Restore SMM Configuration in S3 boot path.
665 //
666 if (mRestoreSmmConfigurationInS3) {
667 //
668 // Need make sure gSmst is correct because below function may use them.
669 //
670 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
671 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
672 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
673 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
674 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
675
676 //
677 // Configure SMM Code Access Check feature if available.
678 //
679 ConfigSmmCodeAccessCheck ();
680
681 SmmCpuFeaturesCompleteSmmReadyToLock ();
682
683 mRestoreSmmConfigurationInS3 = FALSE;
684 }
685 }
686
687 /**
688 Perform SMM initialization for all processors in the S3 boot path.
689
690 For a native platform, MP initialization in the S3 boot path is also performed in this function.
691 **/
692 VOID
693 EFIAPI
694 SmmRestoreCpu (
695 VOID
696 )
697 {
698 SMM_S3_RESUME_STATE *SmmS3ResumeState;
699 IA32_DESCRIPTOR Ia32Idtr;
700 IA32_DESCRIPTOR X64Idtr;
701 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
702 EFI_STATUS Status;
703
704 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
705
706 mSmmS3Flag = TRUE;
707
708 //
709 // See if there is enough context to resume PEI Phase
710 //
711 if (mSmmS3ResumeState == NULL) {
712 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
713 CpuDeadLoop ();
714 }
715
716 SmmS3ResumeState = mSmmS3ResumeState;
717 ASSERT (SmmS3ResumeState != NULL);
718
719 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
720 //
721 // Save the IA32 IDT Descriptor
722 //
723 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
724
725 //
726 // Setup X64 IDT table
727 //
728 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
729 X64Idtr.Base = (UINTN) IdtEntryTable;
730 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
731 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
732
733 //
734 // Setup the default exception handler
735 //
736 Status = InitializeCpuExceptionHandlers (NULL);
737 ASSERT_EFI_ERROR (Status);
738
739 //
740 // Initialize Debug Agent to support source level debug
741 //
742 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
743 }
744
745 //
746 // Skip initialization if mAcpiCpuData is not valid
747 //
748 if (mAcpiCpuData.NumberOfCpus > 0) {
749 //
750 // First time microcode load and restore MTRRs
751 //
752 InitializeCpuBeforeRebase ();
753 }
754
755 //
756 // Restore SMBASE for BSP and all APs
757 //
758 SmmRelocateBases ();
759
760 //
761 // Skip initialization if mAcpiCpuData is not valid
762 //
763 if (mAcpiCpuData.NumberOfCpus > 0) {
764 //
765 // Restore MSRs for BSP and all APs
766 //
767 InitializeCpuAfterRebase ();
768 }
769
770 //
771 // Set a flag to restore SMM configuration in S3 path.
772 //
773 mRestoreSmmConfigurationInS3 = TRUE;
774
775 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
776 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
777 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
778 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
779 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
780
781 //
782 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
783 //
784 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
785 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
786
787 SwitchStack (
788 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
789 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
790 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
791 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
792 );
793 }
794
795 //
796 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
797 //
798 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
799 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
800 //
801 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
802 //
803 SaveAndSetDebugTimerInterrupt (FALSE);
804 //
805 // Restore IA32 IDT table
806 //
807 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
808 AsmDisablePaging64 (
809 SmmS3ResumeState->ReturnCs,
810 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
811 (UINT32)SmmS3ResumeState->ReturnContext1,
812 (UINT32)SmmS3ResumeState->ReturnContext2,
813 (UINT32)SmmS3ResumeState->ReturnStackPointer
814 );
815 }
816
817 //
818 // Can not resume PEI Phase
819 //
820 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
821 CpuDeadLoop ();
822 }
823
824 /**
825 Initialize SMM S3 resume state structure used during S3 Resume.
826
827 @param[in] Cr3 The base address of the page tables to use in SMM.
828
829 **/
830 VOID
831 InitSmmS3ResumeState (
832 IN UINT32 Cr3
833 )
834 {
835 VOID *GuidHob;
836 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
837 SMM_S3_RESUME_STATE *SmmS3ResumeState;
838 EFI_PHYSICAL_ADDRESS Address;
839 EFI_STATUS Status;
840
841 if (!mAcpiS3Enable) {
842 return;
843 }
844
845 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
846 if (GuidHob == NULL) {
847 DEBUG ((
848 DEBUG_ERROR,
849 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
850 __FUNCTION__,
851 &gEfiAcpiVariableGuid
852 ));
853 CpuDeadLoop ();
854 } else {
855 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
856
857 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
858 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
859
860 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
861 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
862
863 mSmmS3ResumeState = SmmS3ResumeState;
864 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
865
866 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
867
868 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
869 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
870 if (SmmS3ResumeState->SmmS3StackBase == 0) {
871 SmmS3ResumeState->SmmS3StackSize = 0;
872 }
873
874 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
875 SmmS3ResumeState->SmmS3Cr3 = Cr3;
876 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
877
878 if (sizeof (UINTN) == sizeof (UINT64)) {
879 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
880 }
881 if (sizeof (UINTN) == sizeof (UINT32)) {
882 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
883 }
884
885 //
886 // Patch SmmS3ResumeState->SmmS3Cr3
887 //
888 InitSmmS3Cr3 ();
889 }
890
891 //
892 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
893 // protected mode on S3 path
894 //
895 Address = BASE_4GB - 1;
896 Status = gBS->AllocatePages (
897 AllocateMaxAddress,
898 EfiACPIMemoryNVS,
899 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
900 &Address
901 );
902 ASSERT_EFI_ERROR (Status);
903 mApHltLoopCode = (UINT8 *) (UINTN) Address;
904 }
905
906 /**
907 Copy register table from ACPI NVS memory into SMRAM.
908
909 @param[in] DestinationRegisterTableList Points to destination register table.
910 @param[in] SourceRegisterTableList Points to source register table.
911 @param[in] NumberOfCpus Number of CPUs.
912
913 **/
914 VOID
915 CopyRegisterTable (
916 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
917 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
918 IN UINT32 NumberOfCpus
919 )
920 {
921 UINTN Index;
922 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
923
924 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
925 for (Index = 0; Index < NumberOfCpus; Index++) {
926 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
927 RegisterTableEntry = AllocateCopyPool (
928 DestinationRegisterTableList[Index].AllocatedSize,
929 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
930 );
931 ASSERT (RegisterTableEntry != NULL);
932 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
933 }
934 }
935 }
936
937 /**
938 Get ACPI CPU data.
939
940 **/
941 VOID
942 GetAcpiCpuData (
943 VOID
944 )
945 {
946 ACPI_CPU_DATA *AcpiCpuData;
947 IA32_DESCRIPTOR *Gdtr;
948 IA32_DESCRIPTOR *Idtr;
949 VOID *GdtForAp;
950 VOID *IdtForAp;
951 VOID *MachineCheckHandlerForAp;
952 CPU_STATUS_INFORMATION *CpuStatus;
953
954 if (!mAcpiS3Enable) {
955 return;
956 }
957
958 //
959 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
960 //
961 mAcpiCpuData.NumberOfCpus = 0;
962
963 //
964 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
965 //
966 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
967 if (AcpiCpuData == 0) {
968 return;
969 }
970
971 //
972 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
973 //
974 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
975
976 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
977 ASSERT (mAcpiCpuData.MtrrTable != 0);
978
979 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
980
981 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
982 ASSERT (mAcpiCpuData.GdtrProfile != 0);
983
984 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
985
986 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
987 ASSERT (mAcpiCpuData.IdtrProfile != 0);
988
989 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
990
991 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
992 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
993
994 CopyRegisterTable (
995 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
996 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
997 mAcpiCpuData.NumberOfCpus
998 );
999
1000 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1001 ASSERT (mAcpiCpuData.RegisterTable != 0);
1002
1003 CopyRegisterTable (
1004 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
1005 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
1006 mAcpiCpuData.NumberOfCpus
1007 );
1008
1009 //
1010 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1011 //
1012 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1013 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1014
1015 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1016 ASSERT (GdtForAp != NULL);
1017 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1018 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1019
1020 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1021 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1022 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1023
1024 Gdtr->Base = (UINTN)GdtForAp;
1025 Idtr->Base = (UINTN)IdtForAp;
1026 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1027
1028 CpuStatus = &mAcpiCpuData.CpuStatus;
1029 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1030 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {
1031 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1032 sizeof (UINT32) * CpuStatus->PackageCount,
1033 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage
1034 );
1035 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);
1036 }
1037 if (AcpiCpuData->ApLocation != 0) {
1038 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1039 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1040 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
1041 );
1042 ASSERT (mAcpiCpuData.ApLocation != 0);
1043 }
1044 if (CpuStatus->PackageCount != 0) {
1045 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1046 sizeof (UINT32) * CpuStatus->PackageCount *
1047 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1048 );
1049 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1050 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1051 sizeof (UINT32) * CpuStatus->PackageCount *
1052 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1053 );
1054 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1055 }
1056 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1057 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.ConsoleLogLock);
1058 }
1059
1060 /**
1061 Get ACPI S3 enable flag.
1062
1063 **/
1064 VOID
1065 GetAcpiS3EnableFlag (
1066 VOID
1067 )
1068 {
1069 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1070 }