]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Fix build failure for VS2012 and GCC49.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 #pragma pack(1)
18 typedef struct {
19 UINTN Lock;
20 VOID *StackStart;
21 UINTN StackSize;
22 VOID *ApFunction;
23 IA32_DESCRIPTOR GdtrProfile;
24 IA32_DESCRIPTOR IdtrProfile;
25 UINT32 BufferStart;
26 UINT32 Cr3;
27 UINTN InitializeFloatingPointUnitsAddress;
28 } MP_CPU_EXCHANGE_INFO;
29 #pragma pack()
30
31 typedef struct {
32 UINT8 *RendezvousFunnelAddress;
33 UINTN PModeEntryOffset;
34 UINTN FlatJumpOffset;
35 UINTN Size;
36 UINTN LModeEntryOffset;
37 UINTN LongJumpOffset;
38 } MP_ASSEMBLY_ADDRESS_MAP;
39
40 //
41 // Flags used when program the register.
42 //
43 typedef struct {
44 volatile UINTN ConsoleLogLock; // Spinlock used to control console.
45 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
46 volatile UINT32 *SemaphoreCount; // Semaphore used to program semaphore.
47 } PROGRAM_CPU_REGISTER_FLAGS;
48
49 //
50 // Signal that SMM BASE relocation is complete.
51 //
52 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
53
54 /**
55 Get starting address and size of the rendezvous entry for APs.
56 Information for fixing a jump instruction in the code is also returned.
57
58 @param AddressMap Output buffer for address map information.
59 **/
60 VOID *
61 EFIAPI
62 AsmGetAddressMap (
63 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
64 );
65
66 #define LEGACY_REGION_SIZE (2 * 0x1000)
67 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
68
69 PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
70 ACPI_CPU_DATA mAcpiCpuData;
71 volatile UINT32 mNumberToFinish;
72 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
73 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
74
75 //
76 // S3 boot flag
77 //
78 BOOLEAN mSmmS3Flag = FALSE;
79
80 //
81 // Pointer to structure used during S3 Resume
82 //
83 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
84
85 BOOLEAN mAcpiS3Enable = TRUE;
86
87 UINT8 *mApHltLoopCode = NULL;
88 UINT8 mApHltLoopCodeTemplate[] = {
89 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
90 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
91 0xFA, // cli
92 0xF4, // hlt
93 0xEB, 0xFC // jmp $-2
94 };
95
96 CHAR16 *mRegisterTypeStr[] = {L"MSR", L"CR", L"MMIO", L"CACHE", L"SEMAP", L"INVALID" };
97
98 /**
99 Sync up the MTRR values for all processors.
100
101 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
102 **/
103 VOID
104 EFIAPI
105 LoadMtrrData (
106 EFI_PHYSICAL_ADDRESS MtrrTable
107 )
108 /*++
109
110 Routine Description:
111
112 Sync up the MTRR values for all processors.
113
114 Arguments:
115
116 Returns:
117 None
118
119 --*/
120 {
121 MTRR_SETTINGS *MtrrSettings;
122
123 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
124 MtrrSetAllMtrrs (MtrrSettings);
125 }
126
127 /**
128 Increment semaphore by 1.
129
130 @param Sem IN: 32-bit unsigned integer
131
132 **/
133 VOID
134 S3ReleaseSemaphore (
135 IN OUT volatile UINT32 *Sem
136 )
137 {
138 InterlockedIncrement (Sem);
139 }
140
141 /**
142 Decrement the semaphore by 1 if it is not zero.
143
144 Performs an atomic decrement operation for semaphore.
145 The compare exchange operation must be performed using
146 MP safe mechanisms.
147
148 @param Sem IN: 32-bit unsigned integer
149
150 **/
151 VOID
152 S3WaitForSemaphore (
153 IN OUT volatile UINT32 *Sem
154 )
155 {
156 UINT32 Value;
157
158 do {
159 Value = *Sem;
160 } while (Value == 0 ||
161 InterlockedCompareExchange32 (
162 Sem,
163 Value,
164 Value - 1
165 ) != Value);
166 }
167
168 /**
169 Initialize the CPU registers from a register table.
170
171 @param[in] RegisterTable The register table for this AP.
172 @param[in] ApLocation AP location info for this ap.
173 @param[in] CpuStatus CPU status info for this CPU.
174 @param[in] CpuFlags Flags data structure used when program the register.
175
176 @note This service could be called by BSP/APs.
177 **/
178 VOID
179 ProgramProcessorRegister (
180 IN CPU_REGISTER_TABLE *RegisterTable,
181 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
182 IN CPU_STATUS_INFORMATION *CpuStatus,
183 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
184 )
185 {
186 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
187 UINTN Index;
188 UINTN Value;
189 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
190 volatile UINT32 *SemaphorePtr;
191 UINT32 FirstThread;
192 UINT32 PackageThreadsCount;
193 UINT32 CurrentThread;
194 UINTN ProcessorIndex;
195 UINTN ThreadIndex;
196 UINTN ValidThreadCount;
197 UINT32 *ValidCoreCountPerPackage;
198
199 //
200 // Traverse Register Table of this logical processor
201 //
202 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
203
204 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
205
206 RegisterTableEntry = &RegisterTableEntryHead[Index];
207
208 DEBUG_CODE_BEGIN ();
209 if (ApLocation != NULL) {
210 AcquireSpinLock (&CpuFlags->ConsoleLogLock);
211 ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +
212 ApLocation->Core * CpuStatus->MaxThreadCount +
213 ApLocation->Thread;
214 DEBUG ((
215 DEBUG_INFO,
216 "Processor = %lu, Entry Index %lu, Type = %s!\n",
217 (UINT64)ThreadIndex,
218 (UINT64)Index,
219 mRegisterTypeStr[MIN ((REGISTER_TYPE)RegisterTableEntry->RegisterType, InvalidReg)]
220 ));
221 ReleaseSpinLock (&CpuFlags->ConsoleLogLock);
222 }
223 DEBUG_CODE_END ();
224
225 //
226 // Check the type of specified register
227 //
228 switch (RegisterTableEntry->RegisterType) {
229 //
230 // The specified register is Control Register
231 //
232 case ControlRegister:
233 switch (RegisterTableEntry->Index) {
234 case 0:
235 Value = AsmReadCr0 ();
236 Value = (UINTN) BitFieldWrite64 (
237 Value,
238 RegisterTableEntry->ValidBitStart,
239 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
240 (UINTN) RegisterTableEntry->Value
241 );
242 AsmWriteCr0 (Value);
243 break;
244 case 2:
245 Value = AsmReadCr2 ();
246 Value = (UINTN) BitFieldWrite64 (
247 Value,
248 RegisterTableEntry->ValidBitStart,
249 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
250 (UINTN) RegisterTableEntry->Value
251 );
252 AsmWriteCr2 (Value);
253 break;
254 case 3:
255 Value = AsmReadCr3 ();
256 Value = (UINTN) BitFieldWrite64 (
257 Value,
258 RegisterTableEntry->ValidBitStart,
259 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
260 (UINTN) RegisterTableEntry->Value
261 );
262 AsmWriteCr3 (Value);
263 break;
264 case 4:
265 Value = AsmReadCr4 ();
266 Value = (UINTN) BitFieldWrite64 (
267 Value,
268 RegisterTableEntry->ValidBitStart,
269 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
270 (UINTN) RegisterTableEntry->Value
271 );
272 AsmWriteCr4 (Value);
273 break;
274 default:
275 break;
276 }
277 break;
278 //
279 // The specified register is Model Specific Register
280 //
281 case Msr:
282 //
283 // If this function is called to restore register setting after INIT signal,
284 // there is no need to restore MSRs in register table.
285 //
286 if (RegisterTableEntry->ValidBitLength >= 64) {
287 //
288 // If length is not less than 64 bits, then directly write without reading
289 //
290 AsmWriteMsr64 (
291 RegisterTableEntry->Index,
292 RegisterTableEntry->Value
293 );
294 } else {
295 //
296 // Set the bit section according to bit start and length
297 //
298 AsmMsrBitFieldWrite64 (
299 RegisterTableEntry->Index,
300 RegisterTableEntry->ValidBitStart,
301 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
302 RegisterTableEntry->Value
303 );
304 }
305 break;
306 //
307 // MemoryMapped operations
308 //
309 case MemoryMapped:
310 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
311 MmioBitFieldWrite32 (
312 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
313 RegisterTableEntry->ValidBitStart,
314 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
315 (UINT32)RegisterTableEntry->Value
316 );
317 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
318 break;
319 //
320 // Enable or disable cache
321 //
322 case CacheControl:
323 //
324 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
325 //
326 if (RegisterTableEntry->Value == 0) {
327 AsmDisableCache ();
328 } else {
329 AsmEnableCache ();
330 }
331 break;
332
333 case Semaphore:
334 // Semaphore works logic like below:
335 //
336 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
337 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
338 //
339 // All threads (T0...Tn) waits in P() line and continues running
340 // together.
341 //
342 //
343 // T0 T1 ... Tn
344 //
345 // V(0...n) V(0...n) ... V(0...n)
346 // n * P(0) n * P(1) ... n * P(n)
347 //
348 ASSERT (
349 (ApLocation != NULL) &&
350 (CpuStatus->ValidCoreCountPerPackage != 0) &&
351 (CpuFlags->SemaphoreCount) != NULL
352 );
353 SemaphorePtr = CpuFlags->SemaphoreCount;
354 switch (RegisterTableEntry->Value) {
355 case CoreDepType:
356 //
357 // Get Offset info for the first thread in the core which current thread belongs to.
358 //
359 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
360 CurrentThread = FirstThread + ApLocation->Thread;
361 //
362 // First Notify all threads in current Core that this thread has ready.
363 //
364 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
365 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
366 }
367 //
368 // Second, check whether all valid threads in current core have ready.
369 //
370 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
371 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
372 }
373 break;
374
375 case PackageDepType:
376 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;
377 //
378 // Get Offset info for the first thread in the package which current thread belongs to.
379 //
380 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
381 //
382 // Get the possible threads count for current package.
383 //
384 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
385 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
386 //
387 // Get the valid thread count for current package.
388 //
389 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];
390
391 //
392 // Different packages may have different valid cores in them. If driver maintail clearly
393 // cores number in different packages, the logic will be much complicated.
394 // Here driver just simply records the max core number in all packages and use it as expect
395 // core number for all packages.
396 // In below two steps logic, first current thread will Release semaphore for each thread
397 // in current package. Maybe some threads are not valid in this package, but driver don't
398 // care. Second, driver will let current thread wait semaphore for all valid threads in
399 // current package. Because only the valid threads will do release semaphore for this
400 // thread, driver here only need to wait the valid thread count.
401 //
402
403 //
404 // First Notify all threads in current package that this thread has ready.
405 //
406 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
407 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
408 }
409 //
410 // Second, check whether all valid threads in current package have ready.
411 //
412 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
413 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
414 }
415 break;
416
417 default:
418 break;
419 }
420 break;
421
422 default:
423 break;
424 }
425 }
426 }
427
428 /**
429
430 Set Processor register for one AP.
431
432 @param PreSmmRegisterTable Use pre Smm register table or register table.
433
434 **/
435 VOID
436 SetRegister (
437 IN BOOLEAN PreSmmRegisterTable
438 )
439 {
440 CPU_REGISTER_TABLE *RegisterTable;
441 CPU_REGISTER_TABLE *RegisterTables;
442 UINT32 InitApicId;
443 UINTN ProcIndex;
444 UINTN Index;
445
446 if (PreSmmRegisterTable) {
447 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
448 } else {
449 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
450 }
451
452 InitApicId = GetInitialApicId ();
453 RegisterTable = NULL;
454 ProcIndex = (UINTN)-1;
455 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
456 if (RegisterTables[Index].InitialApicId == InitApicId) {
457 RegisterTable = &RegisterTables[Index];
458 ProcIndex = Index;
459 break;
460 }
461 }
462 ASSERT (RegisterTable != NULL);
463
464 if (mAcpiCpuData.ApLocation != 0) {
465 ProgramProcessorRegister (
466 RegisterTable,
467 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
468 &mAcpiCpuData.CpuStatus,
469 &mCpuFlags
470 );
471 } else {
472 ProgramProcessorRegister (
473 RegisterTable,
474 NULL,
475 &mAcpiCpuData.CpuStatus,
476 &mCpuFlags
477 );
478 }
479 }
480
481 /**
482 AP initialization before then after SMBASE relocation in the S3 boot path.
483 **/
484 VOID
485 InitializeAp (
486 VOID
487 )
488 {
489 UINTN TopOfStack;
490 UINT8 Stack[128];
491
492 LoadMtrrData (mAcpiCpuData.MtrrTable);
493
494 SetRegister (TRUE);
495
496 //
497 // Count down the number with lock mechanism.
498 //
499 InterlockedDecrement (&mNumberToFinish);
500
501 //
502 // Wait for BSP to signal SMM Base relocation done.
503 //
504 while (!mInitApsAfterSmmBaseReloc) {
505 CpuPause ();
506 }
507
508 ProgramVirtualWireMode ();
509 DisableLvtInterrupts ();
510
511 SetRegister (FALSE);
512
513 //
514 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
515 //
516 TopOfStack = (UINTN) Stack + sizeof (Stack);
517 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
518 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
519 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
520 }
521
522 /**
523 Prepares startup vector for APs.
524
525 This function prepares startup vector for APs.
526
527 @param WorkingBuffer The address of the work buffer.
528 **/
529 VOID
530 PrepareApStartupVector (
531 EFI_PHYSICAL_ADDRESS WorkingBuffer
532 )
533 {
534 EFI_PHYSICAL_ADDRESS StartupVector;
535 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
536
537 //
538 // Get the address map of startup code for AP,
539 // including code size, and offset of long jump instructions to redirect.
540 //
541 ZeroMem (&AddressMap, sizeof (AddressMap));
542 AsmGetAddressMap (&AddressMap);
543
544 StartupVector = WorkingBuffer;
545
546 //
547 // Copy AP startup code to startup vector, and then redirect the long jump
548 // instructions for mode switching.
549 //
550 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
551 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
552 if (AddressMap.LongJumpOffset != 0) {
553 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
554 }
555
556 //
557 // Get the start address of exchange data between BSP and AP.
558 //
559 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
560 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
561
562 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
563 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
564
565 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
566 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
567 mExchangeInfo->BufferStart = (UINT32) StartupVector;
568 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
569 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
570 }
571
572 /**
573 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
574
575 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
576 and restores MTRRs for both BSP and APs.
577
578 **/
579 VOID
580 InitializeCpuBeforeRebase (
581 VOID
582 )
583 {
584 LoadMtrrData (mAcpiCpuData.MtrrTable);
585
586 SetRegister (TRUE);
587
588 ProgramVirtualWireMode ();
589
590 PrepareApStartupVector (mAcpiCpuData.StartupVector);
591
592 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
593 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
594
595 //
596 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
597 //
598 mInitApsAfterSmmBaseReloc = FALSE;
599
600 //
601 // Send INIT IPI - SIPI to all APs
602 //
603 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
604
605 while (mNumberToFinish > 0) {
606 CpuPause ();
607 }
608 }
609
610 /**
611 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
612
613 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
614 data saved by normal boot path for both BSP and APs.
615
616 **/
617 VOID
618 InitializeCpuAfterRebase (
619 VOID
620 )
621 {
622 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
623
624 //
625 // Signal that SMM base relocation is complete and to continue initialization for all APs.
626 //
627 mInitApsAfterSmmBaseReloc = TRUE;
628
629 //
630 // Must begin set register after all APs have continue their initialization.
631 // This is a requirement to support semaphore mechanism in register table.
632 // Because if semaphore's dependence type is package type, semaphore will wait
633 // for all Aps in one package finishing their tasks before set next register
634 // for all APs. If the Aps not begin its task during BSP doing its task, the
635 // BSP thread will hang because it is waiting for other Aps in the same
636 // package finishing their task.
637 //
638 SetRegister (FALSE);
639
640 while (mNumberToFinish > 0) {
641 CpuPause ();
642 }
643 }
644
645 /**
646 Restore SMM Configuration in S3 boot path.
647
648 **/
649 VOID
650 RestoreSmmConfigurationInS3 (
651 VOID
652 )
653 {
654 if (!mAcpiS3Enable) {
655 return;
656 }
657
658 //
659 // Restore SMM Configuration in S3 boot path.
660 //
661 if (mRestoreSmmConfigurationInS3) {
662 //
663 // Need make sure gSmst is correct because below function may use them.
664 //
665 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
666 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
667 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
668 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
669 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
670
671 //
672 // Configure SMM Code Access Check feature if available.
673 //
674 ConfigSmmCodeAccessCheck ();
675
676 SmmCpuFeaturesCompleteSmmReadyToLock ();
677
678 mRestoreSmmConfigurationInS3 = FALSE;
679 }
680 }
681
682 /**
683 Perform SMM initialization for all processors in the S3 boot path.
684
685 For a native platform, MP initialization in the S3 boot path is also performed in this function.
686 **/
687 VOID
688 EFIAPI
689 SmmRestoreCpu (
690 VOID
691 )
692 {
693 SMM_S3_RESUME_STATE *SmmS3ResumeState;
694 IA32_DESCRIPTOR Ia32Idtr;
695 IA32_DESCRIPTOR X64Idtr;
696 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
697 EFI_STATUS Status;
698
699 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
700
701 mSmmS3Flag = TRUE;
702
703 //
704 // See if there is enough context to resume PEI Phase
705 //
706 if (mSmmS3ResumeState == NULL) {
707 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
708 CpuDeadLoop ();
709 }
710
711 SmmS3ResumeState = mSmmS3ResumeState;
712 ASSERT (SmmS3ResumeState != NULL);
713
714 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
715 //
716 // Save the IA32 IDT Descriptor
717 //
718 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
719
720 //
721 // Setup X64 IDT table
722 //
723 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
724 X64Idtr.Base = (UINTN) IdtEntryTable;
725 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
726 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
727
728 //
729 // Setup the default exception handler
730 //
731 Status = InitializeCpuExceptionHandlers (NULL);
732 ASSERT_EFI_ERROR (Status);
733
734 //
735 // Initialize Debug Agent to support source level debug
736 //
737 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
738 }
739
740 //
741 // Skip initialization if mAcpiCpuData is not valid
742 //
743 if (mAcpiCpuData.NumberOfCpus > 0) {
744 //
745 // First time microcode load and restore MTRRs
746 //
747 InitializeCpuBeforeRebase ();
748 }
749
750 //
751 // Restore SMBASE for BSP and all APs
752 //
753 SmmRelocateBases ();
754
755 //
756 // Skip initialization if mAcpiCpuData is not valid
757 //
758 if (mAcpiCpuData.NumberOfCpus > 0) {
759 //
760 // Restore MSRs for BSP and all APs
761 //
762 InitializeCpuAfterRebase ();
763 }
764
765 //
766 // Set a flag to restore SMM configuration in S3 path.
767 //
768 mRestoreSmmConfigurationInS3 = TRUE;
769
770 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
771 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
772 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
773 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
774 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
775
776 //
777 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
778 //
779 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
780 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
781
782 SwitchStack (
783 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
784 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
785 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
786 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
787 );
788 }
789
790 //
791 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
792 //
793 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
794 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
795 //
796 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
797 //
798 SaveAndSetDebugTimerInterrupt (FALSE);
799 //
800 // Restore IA32 IDT table
801 //
802 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
803 AsmDisablePaging64 (
804 SmmS3ResumeState->ReturnCs,
805 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
806 (UINT32)SmmS3ResumeState->ReturnContext1,
807 (UINT32)SmmS3ResumeState->ReturnContext2,
808 (UINT32)SmmS3ResumeState->ReturnStackPointer
809 );
810 }
811
812 //
813 // Can not resume PEI Phase
814 //
815 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
816 CpuDeadLoop ();
817 }
818
819 /**
820 Initialize SMM S3 resume state structure used during S3 Resume.
821
822 @param[in] Cr3 The base address of the page tables to use in SMM.
823
824 **/
825 VOID
826 InitSmmS3ResumeState (
827 IN UINT32 Cr3
828 )
829 {
830 VOID *GuidHob;
831 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
832 SMM_S3_RESUME_STATE *SmmS3ResumeState;
833 EFI_PHYSICAL_ADDRESS Address;
834 EFI_STATUS Status;
835
836 if (!mAcpiS3Enable) {
837 return;
838 }
839
840 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
841 if (GuidHob == NULL) {
842 DEBUG ((
843 DEBUG_ERROR,
844 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
845 __FUNCTION__,
846 &gEfiAcpiVariableGuid
847 ));
848 CpuDeadLoop ();
849 } else {
850 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
851
852 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
853 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
854
855 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
856 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
857
858 mSmmS3ResumeState = SmmS3ResumeState;
859 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
860
861 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
862
863 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
864 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
865 if (SmmS3ResumeState->SmmS3StackBase == 0) {
866 SmmS3ResumeState->SmmS3StackSize = 0;
867 }
868
869 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
870 SmmS3ResumeState->SmmS3Cr3 = Cr3;
871 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
872
873 if (sizeof (UINTN) == sizeof (UINT64)) {
874 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
875 }
876 if (sizeof (UINTN) == sizeof (UINT32)) {
877 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
878 }
879
880 //
881 // Patch SmmS3ResumeState->SmmS3Cr3
882 //
883 InitSmmS3Cr3 ();
884 }
885
886 //
887 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
888 // protected mode on S3 path
889 //
890 Address = BASE_4GB - 1;
891 Status = gBS->AllocatePages (
892 AllocateMaxAddress,
893 EfiACPIMemoryNVS,
894 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
895 &Address
896 );
897 ASSERT_EFI_ERROR (Status);
898 mApHltLoopCode = (UINT8 *) (UINTN) Address;
899 }
900
901 /**
902 Copy register table from ACPI NVS memory into SMRAM.
903
904 @param[in] DestinationRegisterTableList Points to destination register table.
905 @param[in] SourceRegisterTableList Points to source register table.
906 @param[in] NumberOfCpus Number of CPUs.
907
908 **/
909 VOID
910 CopyRegisterTable (
911 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
912 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
913 IN UINT32 NumberOfCpus
914 )
915 {
916 UINTN Index;
917 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
918
919 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
920 for (Index = 0; Index < NumberOfCpus; Index++) {
921 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
922 RegisterTableEntry = AllocateCopyPool (
923 DestinationRegisterTableList[Index].AllocatedSize,
924 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
925 );
926 ASSERT (RegisterTableEntry != NULL);
927 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
928 }
929 }
930 }
931
932 /**
933 Get ACPI CPU data.
934
935 **/
936 VOID
937 GetAcpiCpuData (
938 VOID
939 )
940 {
941 ACPI_CPU_DATA *AcpiCpuData;
942 IA32_DESCRIPTOR *Gdtr;
943 IA32_DESCRIPTOR *Idtr;
944 VOID *GdtForAp;
945 VOID *IdtForAp;
946 VOID *MachineCheckHandlerForAp;
947 CPU_STATUS_INFORMATION *CpuStatus;
948
949 if (!mAcpiS3Enable) {
950 return;
951 }
952
953 //
954 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
955 //
956 mAcpiCpuData.NumberOfCpus = 0;
957
958 //
959 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
960 //
961 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
962 if (AcpiCpuData == 0) {
963 return;
964 }
965
966 //
967 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
968 //
969 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
970
971 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
972 ASSERT (mAcpiCpuData.MtrrTable != 0);
973
974 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
975
976 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
977 ASSERT (mAcpiCpuData.GdtrProfile != 0);
978
979 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
980
981 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
982 ASSERT (mAcpiCpuData.IdtrProfile != 0);
983
984 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
985
986 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
987 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
988
989 CopyRegisterTable (
990 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
991 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
992 mAcpiCpuData.NumberOfCpus
993 );
994
995 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
996 ASSERT (mAcpiCpuData.RegisterTable != 0);
997
998 CopyRegisterTable (
999 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
1000 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
1001 mAcpiCpuData.NumberOfCpus
1002 );
1003
1004 //
1005 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1006 //
1007 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1008 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1009
1010 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1011 ASSERT (GdtForAp != NULL);
1012 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
1013 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
1014
1015 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1016 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1017 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1018
1019 Gdtr->Base = (UINTN)GdtForAp;
1020 Idtr->Base = (UINTN)IdtForAp;
1021 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1022
1023 CpuStatus = &mAcpiCpuData.CpuStatus;
1024 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1025 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {
1026 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1027 sizeof (UINT32) * CpuStatus->PackageCount,
1028 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage
1029 );
1030 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);
1031 }
1032 if (AcpiCpuData->ApLocation != 0) {
1033 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1034 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1035 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
1036 );
1037 ASSERT (mAcpiCpuData.ApLocation != 0);
1038 }
1039 if (CpuStatus->PackageCount != 0) {
1040 mCpuFlags.SemaphoreCount = AllocateZeroPool (
1041 sizeof (UINT32) * CpuStatus->PackageCount *
1042 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount);
1043 ASSERT (mCpuFlags.SemaphoreCount != NULL);
1044 }
1045 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
1046 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.ConsoleLogLock);
1047 }
1048
1049 /**
1050 Get ACPI S3 enable flag.
1051
1052 **/
1053 VOID
1054 GetAcpiS3EnableFlag (
1055 VOID
1056 )
1057 {
1058 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1059 }