]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Move S3 related code to CpuS3.c
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 /**
43 Get starting address and size of the rendezvous entry for APs.
44 Information for fixing a jump instruction in the code is also returned.
45
46 @param AddressMap Output buffer for address map information.
47 **/
48 VOID *
49 EFIAPI
50 AsmGetAddressMap (
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
52 );
53
54 #define LEGACY_REGION_SIZE (2 * 0x1000)
55 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
56
57 ACPI_CPU_DATA mAcpiCpuData;
58 UINT32 mNumberToFinish;
59 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
60 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
61 VOID *mGdtForAp = NULL;
62 VOID *mIdtForAp = NULL;
63 VOID *mMachineCheckHandlerForAp = NULL;
64 MP_MSR_LOCK *mMsrSpinLocks = NULL;
65 UINTN mMsrSpinLockCount;
66 UINTN mMsrCount = 0;
67
68 //
69 // S3 boot flag
70 //
71 BOOLEAN mSmmS3Flag = FALSE;
72
73 //
74 // Pointer to structure used during S3 Resume
75 //
76 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
77
78 /**
79 Get MSR spin lock by MSR index.
80
81 @param MsrIndex MSR index value.
82
83 @return Pointer to MSR spin lock.
84
85 **/
86 SPIN_LOCK *
87 GetMsrSpinLockByIndex (
88 IN UINT32 MsrIndex
89 )
90 {
91 UINTN Index;
92 for (Index = 0; Index < mMsrCount; Index++) {
93 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
94 return mMsrSpinLocks[Index].SpinLock;
95 }
96 }
97 return NULL;
98 }
99
100 /**
101 Initialize MSR spin lock by MSR index.
102
103 @param MsrIndex MSR index value.
104
105 **/
106 VOID
107 InitMsrSpinLockByIndex (
108 IN UINT32 MsrIndex
109 )
110 {
111 UINTN MsrSpinLockCount;
112 UINTN NewMsrSpinLockCount;
113 UINTN Index;
114 UINTN AddedSize;
115
116 if (mMsrSpinLocks == NULL) {
117 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
118 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
119 ASSERT (mMsrSpinLocks != NULL);
120 for (Index = 0; Index < MsrSpinLockCount; Index++) {
121 mMsrSpinLocks[Index].SpinLock =
122 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
123 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
124 }
125 mMsrSpinLockCount = MsrSpinLockCount;
126 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
127 }
128 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
129 //
130 // Initialize spin lock for MSR programming
131 //
132 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
133 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
134 mMsrCount ++;
135 if (mMsrCount == mMsrSpinLockCount) {
136 //
137 // If MSR spin lock buffer is full, enlarge it
138 //
139 AddedSize = SIZE_4KB;
140 mSmmCpuSemaphores.SemaphoreMsr.Msr =
141 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
142 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
143 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
144 mMsrSpinLocks = ReallocatePool (
145 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
146 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
147 mMsrSpinLocks
148 );
149 ASSERT (mMsrSpinLocks != NULL);
150 mMsrSpinLockCount = NewMsrSpinLockCount;
151 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
152 mMsrSpinLocks[Index].SpinLock =
153 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
154 (Index - mMsrCount) * mSemaphoreSize);
155 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
156 }
157 }
158 }
159 }
160
161 /**
162 Sync up the MTRR values for all processors.
163
164 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
165 **/
166 VOID
167 EFIAPI
168 LoadMtrrData (
169 EFI_PHYSICAL_ADDRESS MtrrTable
170 )
171 /*++
172
173 Routine Description:
174
175 Sync up the MTRR values for all processors.
176
177 Arguments:
178
179 Returns:
180 None
181
182 --*/
183 {
184 MTRR_SETTINGS *MtrrSettings;
185
186 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
187 MtrrSetAllMtrrs (MtrrSettings);
188 }
189
190 /**
191 Programs registers for the calling processor.
192
193 This function programs registers for the calling processor.
194
195 @param RegisterTable Pointer to register table of the running processor.
196
197 **/
198 VOID
199 SetProcessorRegister (
200 IN CPU_REGISTER_TABLE *RegisterTable
201 )
202 {
203 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
204 UINTN Index;
205 UINTN Value;
206 SPIN_LOCK *MsrSpinLock;
207
208 //
209 // Traverse Register Table of this logical processor
210 //
211 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
212 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
213 //
214 // Check the type of specified register
215 //
216 switch (RegisterTableEntry->RegisterType) {
217 //
218 // The specified register is Control Register
219 //
220 case ControlRegister:
221 switch (RegisterTableEntry->Index) {
222 case 0:
223 Value = AsmReadCr0 ();
224 Value = (UINTN) BitFieldWrite64 (
225 Value,
226 RegisterTableEntry->ValidBitStart,
227 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
228 (UINTN) RegisterTableEntry->Value
229 );
230 AsmWriteCr0 (Value);
231 break;
232 case 2:
233 Value = AsmReadCr2 ();
234 Value = (UINTN) BitFieldWrite64 (
235 Value,
236 RegisterTableEntry->ValidBitStart,
237 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
238 (UINTN) RegisterTableEntry->Value
239 );
240 AsmWriteCr2 (Value);
241 break;
242 case 3:
243 Value = AsmReadCr3 ();
244 Value = (UINTN) BitFieldWrite64 (
245 Value,
246 RegisterTableEntry->ValidBitStart,
247 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
248 (UINTN) RegisterTableEntry->Value
249 );
250 AsmWriteCr3 (Value);
251 break;
252 case 4:
253 Value = AsmReadCr4 ();
254 Value = (UINTN) BitFieldWrite64 (
255 Value,
256 RegisterTableEntry->ValidBitStart,
257 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
258 (UINTN) RegisterTableEntry->Value
259 );
260 AsmWriteCr4 (Value);
261 break;
262 default:
263 break;
264 }
265 break;
266 //
267 // The specified register is Model Specific Register
268 //
269 case Msr:
270 //
271 // If this function is called to restore register setting after INIT signal,
272 // there is no need to restore MSRs in register table.
273 //
274 if (RegisterTableEntry->ValidBitLength >= 64) {
275 //
276 // If length is not less than 64 bits, then directly write without reading
277 //
278 AsmWriteMsr64 (
279 RegisterTableEntry->Index,
280 RegisterTableEntry->Value
281 );
282 } else {
283 //
284 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
285 // to make sure MSR read/write operation is atomic.
286 //
287 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
288 AcquireSpinLock (MsrSpinLock);
289 //
290 // Set the bit section according to bit start and length
291 //
292 AsmMsrBitFieldWrite64 (
293 RegisterTableEntry->Index,
294 RegisterTableEntry->ValidBitStart,
295 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
296 RegisterTableEntry->Value
297 );
298 ReleaseSpinLock (MsrSpinLock);
299 }
300 break;
301 //
302 // MemoryMapped operations
303 //
304 case MemoryMapped:
305 AcquireSpinLock (mMemoryMappedLock);
306 MmioBitFieldWrite32 (
307 RegisterTableEntry->Index,
308 RegisterTableEntry->ValidBitStart,
309 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
310 (UINT32)RegisterTableEntry->Value
311 );
312 ReleaseSpinLock (mMemoryMappedLock);
313 break;
314 //
315 // Enable or disable cache
316 //
317 case CacheControl:
318 //
319 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
320 //
321 if (RegisterTableEntry->Value == 0) {
322 AsmDisableCache ();
323 } else {
324 AsmEnableCache ();
325 }
326 break;
327
328 default:
329 break;
330 }
331 }
332 }
333
334 /**
335 AP initialization before SMBASE relocation in the S3 boot path.
336 **/
337 VOID
338 EarlyMPRendezvousProcedure (
339 VOID
340 )
341 {
342 CPU_REGISTER_TABLE *RegisterTableList;
343 UINT32 InitApicId;
344 UINTN Index;
345
346 LoadMtrrData (mAcpiCpuData.MtrrTable);
347
348 //
349 // Find processor number for this CPU.
350 //
351 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
352 InitApicId = GetInitialApicId ();
353 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
354 if (RegisterTableList[Index].InitialApicId == InitApicId) {
355 SetProcessorRegister (&RegisterTableList[Index]);
356 break;
357 }
358 }
359
360 //
361 // Count down the number with lock mechanism.
362 //
363 InterlockedDecrement (&mNumberToFinish);
364 }
365
366 /**
367 AP initialization after SMBASE relocation in the S3 boot path.
368 **/
369 VOID
370 MPRendezvousProcedure (
371 VOID
372 )
373 {
374 CPU_REGISTER_TABLE *RegisterTableList;
375 UINT32 InitApicId;
376 UINTN Index;
377
378 ProgramVirtualWireMode ();
379 DisableLvtInterrupts ();
380
381 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
382 InitApicId = GetInitialApicId ();
383 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
384 if (RegisterTableList[Index].InitialApicId == InitApicId) {
385 SetProcessorRegister (&RegisterTableList[Index]);
386 break;
387 }
388 }
389
390 //
391 // Count down the number with lock mechanism.
392 //
393 InterlockedDecrement (&mNumberToFinish);
394 }
395
396 /**
397 Prepares startup vector for APs.
398
399 This function prepares startup vector for APs.
400
401 @param WorkingBuffer The address of the work buffer.
402 **/
403 VOID
404 PrepareApStartupVector (
405 EFI_PHYSICAL_ADDRESS WorkingBuffer
406 )
407 {
408 EFI_PHYSICAL_ADDRESS StartupVector;
409 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
410
411 //
412 // Get the address map of startup code for AP,
413 // including code size, and offset of long jump instructions to redirect.
414 //
415 ZeroMem (&AddressMap, sizeof (AddressMap));
416 AsmGetAddressMap (&AddressMap);
417
418 StartupVector = WorkingBuffer;
419
420 //
421 // Copy AP startup code to startup vector, and then redirect the long jump
422 // instructions for mode switching.
423 //
424 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
425 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
426 if (AddressMap.LongJumpOffset != 0) {
427 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
428 }
429
430 //
431 // Get the start address of exchange data between BSP and AP.
432 //
433 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
434 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
435
436 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
437 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
438
439 //
440 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
441 //
442 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
443 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
444 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
445
446 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
447 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
448 mExchangeInfo->BufferStart = (UINT32) StartupVector;
449 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
450 }
451
452 /**
453 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
454
455 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
456 and restores MTRRs for both BSP and APs.
457
458 **/
459 VOID
460 EarlyInitializeCpu (
461 VOID
462 )
463 {
464 CPU_REGISTER_TABLE *RegisterTableList;
465 UINT32 InitApicId;
466 UINTN Index;
467
468 LoadMtrrData (mAcpiCpuData.MtrrTable);
469
470 //
471 // Find processor number for this CPU.
472 //
473 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
474 InitApicId = GetInitialApicId ();
475 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
476 if (RegisterTableList[Index].InitialApicId == InitApicId) {
477 SetProcessorRegister (&RegisterTableList[Index]);
478 break;
479 }
480 }
481
482 ProgramVirtualWireMode ();
483
484 PrepareApStartupVector (mAcpiCpuData.StartupVector);
485
486 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
487 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
488
489 //
490 // Send INIT IPI - SIPI to all APs
491 //
492 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
493
494 while (mNumberToFinish > 0) {
495 CpuPause ();
496 }
497 }
498
499 /**
500 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
501
502 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
503 data saved by normal boot path for both BSP and APs.
504
505 **/
506 VOID
507 InitializeCpu (
508 VOID
509 )
510 {
511 CPU_REGISTER_TABLE *RegisterTableList;
512 UINT32 InitApicId;
513 UINTN Index;
514
515 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
516 InitApicId = GetInitialApicId ();
517 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
518 if (RegisterTableList[Index].InitialApicId == InitApicId) {
519 SetProcessorRegister (&RegisterTableList[Index]);
520 break;
521 }
522 }
523
524 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
525 //
526 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
527 // Re-initialize StackAddress to original beginning address.
528 //
529 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
530 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
531
532 //
533 // Send INIT IPI - SIPI to all APs
534 //
535 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
536
537 while (mNumberToFinish > 0) {
538 CpuPause ();
539 }
540 }
541
542 /**
543 Restore SMM Configuration in S3 boot path.
544
545 **/
546 VOID
547 RestoreSmmConfigurationInS3 (
548 VOID
549 )
550 {
551 //
552 // Restore SMM Configuration in S3 boot path.
553 //
554 if (mRestoreSmmConfigurationInS3) {
555 //
556 // Need make sure gSmst is correct because below function may use them.
557 //
558 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
559 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
560 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
561 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
562 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
563
564 //
565 // Configure SMM Code Access Check feature if available.
566 //
567 ConfigSmmCodeAccessCheck ();
568
569 SmmCpuFeaturesCompleteSmmReadyToLock ();
570
571 mRestoreSmmConfigurationInS3 = FALSE;
572 }
573 }
574
575 /**
576 Perform SMM initialization for all processors in the S3 boot path.
577
578 For a native platform, MP initialization in the S3 boot path is also performed in this function.
579 **/
580 VOID
581 EFIAPI
582 SmmRestoreCpu (
583 VOID
584 )
585 {
586 SMM_S3_RESUME_STATE *SmmS3ResumeState;
587 IA32_DESCRIPTOR Ia32Idtr;
588 IA32_DESCRIPTOR X64Idtr;
589 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
590 EFI_STATUS Status;
591
592 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
593
594 mSmmS3Flag = TRUE;
595
596 InitializeSpinLock (mMemoryMappedLock);
597
598 //
599 // See if there is enough context to resume PEI Phase
600 //
601 if (mSmmS3ResumeState == NULL) {
602 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
603 CpuDeadLoop ();
604 }
605
606 SmmS3ResumeState = mSmmS3ResumeState;
607 ASSERT (SmmS3ResumeState != NULL);
608
609 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
610 //
611 // Save the IA32 IDT Descriptor
612 //
613 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
614
615 //
616 // Setup X64 IDT table
617 //
618 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
619 X64Idtr.Base = (UINTN) IdtEntryTable;
620 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
621 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
622
623 //
624 // Setup the default exception handler
625 //
626 Status = InitializeCpuExceptionHandlers (NULL);
627 ASSERT_EFI_ERROR (Status);
628
629 //
630 // Initialize Debug Agent to support source level debug
631 //
632 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
633 }
634
635 //
636 // Skip initialization if mAcpiCpuData is not valid
637 //
638 if (mAcpiCpuData.NumberOfCpus > 0) {
639 //
640 // First time microcode load and restore MTRRs
641 //
642 EarlyInitializeCpu ();
643 }
644
645 //
646 // Restore SMBASE for BSP and all APs
647 //
648 SmmRelocateBases ();
649
650 //
651 // Skip initialization if mAcpiCpuData is not valid
652 //
653 if (mAcpiCpuData.NumberOfCpus > 0) {
654 //
655 // Restore MSRs for BSP and all APs
656 //
657 InitializeCpu ();
658 }
659
660 //
661 // Set a flag to restore SMM configuration in S3 path.
662 //
663 mRestoreSmmConfigurationInS3 = TRUE;
664
665 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
666 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
667 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
668 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
669 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
670
671 //
672 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
673 //
674 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
675 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
676
677 SwitchStack (
678 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
679 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
680 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
681 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
682 );
683 }
684
685 //
686 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
687 //
688 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
689 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
690 //
691 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
692 //
693 SaveAndSetDebugTimerInterrupt (FALSE);
694 //
695 // Restore IA32 IDT table
696 //
697 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
698 AsmDisablePaging64 (
699 SmmS3ResumeState->ReturnCs,
700 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
701 (UINT32)SmmS3ResumeState->ReturnContext1,
702 (UINT32)SmmS3ResumeState->ReturnContext2,
703 (UINT32)SmmS3ResumeState->ReturnStackPointer
704 );
705 }
706
707 //
708 // Can not resume PEI Phase
709 //
710 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
711 CpuDeadLoop ();
712 }
713
714 /**
715 Initialize SMM S3 resume state structure used during S3 Resume.
716
717 @param[in] Cr3 The base address of the page tables to use in SMM.
718
719 **/
720 VOID
721 InitSmmS3ResumeState (
722 IN UINT32 Cr3
723 )
724 {
725 VOID *GuidHob;
726 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
727 SMM_S3_RESUME_STATE *SmmS3ResumeState;
728
729 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
730 if (GuidHob != NULL) {
731 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
732
733 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
734 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
735
736 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
737 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
738
739 mSmmS3ResumeState = SmmS3ResumeState;
740 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
741
742 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
743
744 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
745 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
746 if (SmmS3ResumeState->SmmS3StackBase == 0) {
747 SmmS3ResumeState->SmmS3StackSize = 0;
748 }
749
750 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
751 SmmS3ResumeState->SmmS3Cr3 = Cr3;
752 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
753
754 if (sizeof (UINTN) == sizeof (UINT64)) {
755 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
756 }
757 if (sizeof (UINTN) == sizeof (UINT32)) {
758 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
759 }
760 }
761
762 //
763 // Patch SmmS3ResumeState->SmmS3Cr3
764 //
765 InitSmmS3Cr3 ();
766 }
767
768 /**
769 Copy register table from ACPI NVS memory into SMRAM.
770
771 @param[in] DestinationRegisterTableList Points to destination register table.
772 @param[in] SourceRegisterTableList Points to source register table.
773 @param[in] NumberOfCpus Number of CPUs.
774
775 **/
776 VOID
777 CopyRegisterTable (
778 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
779 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
780 IN UINT32 NumberOfCpus
781 )
782 {
783 UINTN Index;
784 UINTN Index1;
785 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
786
787 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
788 for (Index = 0; Index < NumberOfCpus; Index++) {
789 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);
790 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);
791 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);
792 //
793 // Go though all MSRs in register table to initialize MSR spin lock
794 //
795 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;
796 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
797 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
798 //
799 // Initialize MSR spin lock only for those MSRs need bit field writing
800 //
801 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
802 }
803 }
804 }
805 }
806
807 /**
808 Get ACPI CPU data.
809
810 **/
811 VOID
812 GetAcpiCpuData (
813 VOID
814 )
815 {
816 ACPI_CPU_DATA *AcpiCpuData;
817 IA32_DESCRIPTOR *Gdtr;
818 IA32_DESCRIPTOR *Idtr;
819
820 //
821 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
822 //
823 mAcpiCpuData.NumberOfCpus = 0;
824
825 //
826 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
827 //
828 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
829 if (AcpiCpuData == 0) {
830 return;
831 }
832
833 //
834 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
835 //
836 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
837
838 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
839 ASSERT (mAcpiCpuData.MtrrTable != 0);
840
841 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
842
843 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
844 ASSERT (mAcpiCpuData.GdtrProfile != 0);
845
846 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
847
848 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
849 ASSERT (mAcpiCpuData.IdtrProfile != 0);
850
851 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
852
853 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
854 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
855
856 CopyRegisterTable (
857 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
858 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
859 mAcpiCpuData.NumberOfCpus
860 );
861
862 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
863 ASSERT (mAcpiCpuData.RegisterTable != 0);
864
865 CopyRegisterTable (
866 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
867 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
868 mAcpiCpuData.NumberOfCpus
869 );
870
871 //
872 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
873 //
874 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
875 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
876
877 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
878 ASSERT (mGdtForAp != NULL);
879 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
880 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
881
882 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
883 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
884 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
885 }