]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Consume PcdAcpiS3Enable to control the code
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 /**
43 Get starting address and size of the rendezvous entry for APs.
44 Information for fixing a jump instruction in the code is also returned.
45
46 @param AddressMap Output buffer for address map information.
47 **/
48 VOID *
49 EFIAPI
50 AsmGetAddressMap (
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
52 );
53
54 #define LEGACY_REGION_SIZE (2 * 0x1000)
55 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
56
57 ACPI_CPU_DATA mAcpiCpuData;
58 UINT32 mNumberToFinish;
59 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
60 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
61 VOID *mGdtForAp = NULL;
62 VOID *mIdtForAp = NULL;
63 VOID *mMachineCheckHandlerForAp = NULL;
64 MP_MSR_LOCK *mMsrSpinLocks = NULL;
65 UINTN mMsrSpinLockCount;
66 UINTN mMsrCount = 0;
67
68 //
69 // S3 boot flag
70 //
71 BOOLEAN mSmmS3Flag = FALSE;
72
73 //
74 // Pointer to structure used during S3 Resume
75 //
76 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
77
78 BOOLEAN mAcpiS3Enable = TRUE;
79
80 /**
81 Get MSR spin lock by MSR index.
82
83 @param MsrIndex MSR index value.
84
85 @return Pointer to MSR spin lock.
86
87 **/
88 SPIN_LOCK *
89 GetMsrSpinLockByIndex (
90 IN UINT32 MsrIndex
91 )
92 {
93 UINTN Index;
94 for (Index = 0; Index < mMsrCount; Index++) {
95 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
96 return mMsrSpinLocks[Index].SpinLock;
97 }
98 }
99 return NULL;
100 }
101
102 /**
103 Initialize MSR spin lock by MSR index.
104
105 @param MsrIndex MSR index value.
106
107 **/
108 VOID
109 InitMsrSpinLockByIndex (
110 IN UINT32 MsrIndex
111 )
112 {
113 UINTN MsrSpinLockCount;
114 UINTN NewMsrSpinLockCount;
115 UINTN Index;
116 UINTN AddedSize;
117
118 if (mMsrSpinLocks == NULL) {
119 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
120 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
121 ASSERT (mMsrSpinLocks != NULL);
122 for (Index = 0; Index < MsrSpinLockCount; Index++) {
123 mMsrSpinLocks[Index].SpinLock =
124 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
125 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
126 }
127 mMsrSpinLockCount = MsrSpinLockCount;
128 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
129 }
130 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
131 //
132 // Initialize spin lock for MSR programming
133 //
134 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
135 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
136 mMsrCount ++;
137 if (mMsrCount == mMsrSpinLockCount) {
138 //
139 // If MSR spin lock buffer is full, enlarge it
140 //
141 AddedSize = SIZE_4KB;
142 mSmmCpuSemaphores.SemaphoreMsr.Msr =
143 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
144 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
145 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
146 mMsrSpinLocks = ReallocatePool (
147 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
148 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
149 mMsrSpinLocks
150 );
151 ASSERT (mMsrSpinLocks != NULL);
152 mMsrSpinLockCount = NewMsrSpinLockCount;
153 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
154 mMsrSpinLocks[Index].SpinLock =
155 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
156 (Index - mMsrCount) * mSemaphoreSize);
157 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
158 }
159 }
160 }
161 }
162
163 /**
164 Sync up the MTRR values for all processors.
165
166 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
167 **/
168 VOID
169 EFIAPI
170 LoadMtrrData (
171 EFI_PHYSICAL_ADDRESS MtrrTable
172 )
173 /*++
174
175 Routine Description:
176
177 Sync up the MTRR values for all processors.
178
179 Arguments:
180
181 Returns:
182 None
183
184 --*/
185 {
186 MTRR_SETTINGS *MtrrSettings;
187
188 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
189 MtrrSetAllMtrrs (MtrrSettings);
190 }
191
192 /**
193 Programs registers for the calling processor.
194
195 This function programs registers for the calling processor.
196
197 @param RegisterTable Pointer to register table of the running processor.
198
199 **/
200 VOID
201 SetProcessorRegister (
202 IN CPU_REGISTER_TABLE *RegisterTable
203 )
204 {
205 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
206 UINTN Index;
207 UINTN Value;
208 SPIN_LOCK *MsrSpinLock;
209
210 //
211 // Traverse Register Table of this logical processor
212 //
213 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
214 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
215 //
216 // Check the type of specified register
217 //
218 switch (RegisterTableEntry->RegisterType) {
219 //
220 // The specified register is Control Register
221 //
222 case ControlRegister:
223 switch (RegisterTableEntry->Index) {
224 case 0:
225 Value = AsmReadCr0 ();
226 Value = (UINTN) BitFieldWrite64 (
227 Value,
228 RegisterTableEntry->ValidBitStart,
229 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
230 (UINTN) RegisterTableEntry->Value
231 );
232 AsmWriteCr0 (Value);
233 break;
234 case 2:
235 Value = AsmReadCr2 ();
236 Value = (UINTN) BitFieldWrite64 (
237 Value,
238 RegisterTableEntry->ValidBitStart,
239 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
240 (UINTN) RegisterTableEntry->Value
241 );
242 AsmWriteCr2 (Value);
243 break;
244 case 3:
245 Value = AsmReadCr3 ();
246 Value = (UINTN) BitFieldWrite64 (
247 Value,
248 RegisterTableEntry->ValidBitStart,
249 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
250 (UINTN) RegisterTableEntry->Value
251 );
252 AsmWriteCr3 (Value);
253 break;
254 case 4:
255 Value = AsmReadCr4 ();
256 Value = (UINTN) BitFieldWrite64 (
257 Value,
258 RegisterTableEntry->ValidBitStart,
259 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
260 (UINTN) RegisterTableEntry->Value
261 );
262 AsmWriteCr4 (Value);
263 break;
264 default:
265 break;
266 }
267 break;
268 //
269 // The specified register is Model Specific Register
270 //
271 case Msr:
272 //
273 // If this function is called to restore register setting after INIT signal,
274 // there is no need to restore MSRs in register table.
275 //
276 if (RegisterTableEntry->ValidBitLength >= 64) {
277 //
278 // If length is not less than 64 bits, then directly write without reading
279 //
280 AsmWriteMsr64 (
281 RegisterTableEntry->Index,
282 RegisterTableEntry->Value
283 );
284 } else {
285 //
286 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
287 // to make sure MSR read/write operation is atomic.
288 //
289 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
290 AcquireSpinLock (MsrSpinLock);
291 //
292 // Set the bit section according to bit start and length
293 //
294 AsmMsrBitFieldWrite64 (
295 RegisterTableEntry->Index,
296 RegisterTableEntry->ValidBitStart,
297 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
298 RegisterTableEntry->Value
299 );
300 ReleaseSpinLock (MsrSpinLock);
301 }
302 break;
303 //
304 // MemoryMapped operations
305 //
306 case MemoryMapped:
307 AcquireSpinLock (mMemoryMappedLock);
308 MmioBitFieldWrite32 (
309 RegisterTableEntry->Index,
310 RegisterTableEntry->ValidBitStart,
311 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
312 (UINT32)RegisterTableEntry->Value
313 );
314 ReleaseSpinLock (mMemoryMappedLock);
315 break;
316 //
317 // Enable or disable cache
318 //
319 case CacheControl:
320 //
321 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
322 //
323 if (RegisterTableEntry->Value == 0) {
324 AsmDisableCache ();
325 } else {
326 AsmEnableCache ();
327 }
328 break;
329
330 default:
331 break;
332 }
333 }
334 }
335
336 /**
337 AP initialization before SMBASE relocation in the S3 boot path.
338 **/
339 VOID
340 EarlyMPRendezvousProcedure (
341 VOID
342 )
343 {
344 CPU_REGISTER_TABLE *RegisterTableList;
345 UINT32 InitApicId;
346 UINTN Index;
347
348 LoadMtrrData (mAcpiCpuData.MtrrTable);
349
350 //
351 // Find processor number for this CPU.
352 //
353 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
354 InitApicId = GetInitialApicId ();
355 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
356 if (RegisterTableList[Index].InitialApicId == InitApicId) {
357 SetProcessorRegister (&RegisterTableList[Index]);
358 break;
359 }
360 }
361
362 //
363 // Count down the number with lock mechanism.
364 //
365 InterlockedDecrement (&mNumberToFinish);
366 }
367
368 /**
369 AP initialization after SMBASE relocation in the S3 boot path.
370 **/
371 VOID
372 MPRendezvousProcedure (
373 VOID
374 )
375 {
376 CPU_REGISTER_TABLE *RegisterTableList;
377 UINT32 InitApicId;
378 UINTN Index;
379
380 ProgramVirtualWireMode ();
381 DisableLvtInterrupts ();
382
383 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
384 InitApicId = GetInitialApicId ();
385 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
386 if (RegisterTableList[Index].InitialApicId == InitApicId) {
387 SetProcessorRegister (&RegisterTableList[Index]);
388 break;
389 }
390 }
391
392 //
393 // Count down the number with lock mechanism.
394 //
395 InterlockedDecrement (&mNumberToFinish);
396 }
397
398 /**
399 Prepares startup vector for APs.
400
401 This function prepares startup vector for APs.
402
403 @param WorkingBuffer The address of the work buffer.
404 **/
405 VOID
406 PrepareApStartupVector (
407 EFI_PHYSICAL_ADDRESS WorkingBuffer
408 )
409 {
410 EFI_PHYSICAL_ADDRESS StartupVector;
411 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
412
413 //
414 // Get the address map of startup code for AP,
415 // including code size, and offset of long jump instructions to redirect.
416 //
417 ZeroMem (&AddressMap, sizeof (AddressMap));
418 AsmGetAddressMap (&AddressMap);
419
420 StartupVector = WorkingBuffer;
421
422 //
423 // Copy AP startup code to startup vector, and then redirect the long jump
424 // instructions for mode switching.
425 //
426 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
427 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
428 if (AddressMap.LongJumpOffset != 0) {
429 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
430 }
431
432 //
433 // Get the start address of exchange data between BSP and AP.
434 //
435 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
436 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
437
438 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
439 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
440
441 //
442 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
443 //
444 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
445 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
446 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
447
448 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
449 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
450 mExchangeInfo->BufferStart = (UINT32) StartupVector;
451 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
452 }
453
454 /**
455 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
456
457 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
458 and restores MTRRs for both BSP and APs.
459
460 **/
461 VOID
462 EarlyInitializeCpu (
463 VOID
464 )
465 {
466 CPU_REGISTER_TABLE *RegisterTableList;
467 UINT32 InitApicId;
468 UINTN Index;
469
470 LoadMtrrData (mAcpiCpuData.MtrrTable);
471
472 //
473 // Find processor number for this CPU.
474 //
475 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
476 InitApicId = GetInitialApicId ();
477 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
478 if (RegisterTableList[Index].InitialApicId == InitApicId) {
479 SetProcessorRegister (&RegisterTableList[Index]);
480 break;
481 }
482 }
483
484 ProgramVirtualWireMode ();
485
486 PrepareApStartupVector (mAcpiCpuData.StartupVector);
487
488 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
489 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
490
491 //
492 // Send INIT IPI - SIPI to all APs
493 //
494 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
495
496 while (mNumberToFinish > 0) {
497 CpuPause ();
498 }
499 }
500
501 /**
502 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
503
504 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
505 data saved by normal boot path for both BSP and APs.
506
507 **/
508 VOID
509 InitializeCpu (
510 VOID
511 )
512 {
513 CPU_REGISTER_TABLE *RegisterTableList;
514 UINT32 InitApicId;
515 UINTN Index;
516
517 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
518 InitApicId = GetInitialApicId ();
519 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
520 if (RegisterTableList[Index].InitialApicId == InitApicId) {
521 SetProcessorRegister (&RegisterTableList[Index]);
522 break;
523 }
524 }
525
526 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
527 //
528 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
529 // Re-initialize StackAddress to original beginning address.
530 //
531 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
532 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
533
534 //
535 // Send INIT IPI - SIPI to all APs
536 //
537 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
538
539 while (mNumberToFinish > 0) {
540 CpuPause ();
541 }
542 }
543
544 /**
545 Restore SMM Configuration in S3 boot path.
546
547 **/
548 VOID
549 RestoreSmmConfigurationInS3 (
550 VOID
551 )
552 {
553 if (!mAcpiS3Enable) {
554 return;
555 }
556
557 //
558 // Restore SMM Configuration in S3 boot path.
559 //
560 if (mRestoreSmmConfigurationInS3) {
561 //
562 // Need make sure gSmst is correct because below function may use them.
563 //
564 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
565 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
566 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
567 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
568 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
569
570 //
571 // Configure SMM Code Access Check feature if available.
572 //
573 ConfigSmmCodeAccessCheck ();
574
575 SmmCpuFeaturesCompleteSmmReadyToLock ();
576
577 mRestoreSmmConfigurationInS3 = FALSE;
578 }
579 }
580
581 /**
582 Perform SMM initialization for all processors in the S3 boot path.
583
584 For a native platform, MP initialization in the S3 boot path is also performed in this function.
585 **/
586 VOID
587 EFIAPI
588 SmmRestoreCpu (
589 VOID
590 )
591 {
592 SMM_S3_RESUME_STATE *SmmS3ResumeState;
593 IA32_DESCRIPTOR Ia32Idtr;
594 IA32_DESCRIPTOR X64Idtr;
595 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
596 EFI_STATUS Status;
597
598 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
599
600 mSmmS3Flag = TRUE;
601
602 InitializeSpinLock (mMemoryMappedLock);
603
604 //
605 // See if there is enough context to resume PEI Phase
606 //
607 if (mSmmS3ResumeState == NULL) {
608 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
609 CpuDeadLoop ();
610 }
611
612 SmmS3ResumeState = mSmmS3ResumeState;
613 ASSERT (SmmS3ResumeState != NULL);
614
615 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
616 //
617 // Save the IA32 IDT Descriptor
618 //
619 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
620
621 //
622 // Setup X64 IDT table
623 //
624 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
625 X64Idtr.Base = (UINTN) IdtEntryTable;
626 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
627 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
628
629 //
630 // Setup the default exception handler
631 //
632 Status = InitializeCpuExceptionHandlers (NULL);
633 ASSERT_EFI_ERROR (Status);
634
635 //
636 // Initialize Debug Agent to support source level debug
637 //
638 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
639 }
640
641 //
642 // Skip initialization if mAcpiCpuData is not valid
643 //
644 if (mAcpiCpuData.NumberOfCpus > 0) {
645 //
646 // First time microcode load and restore MTRRs
647 //
648 EarlyInitializeCpu ();
649 }
650
651 //
652 // Restore SMBASE for BSP and all APs
653 //
654 SmmRelocateBases ();
655
656 //
657 // Skip initialization if mAcpiCpuData is not valid
658 //
659 if (mAcpiCpuData.NumberOfCpus > 0) {
660 //
661 // Restore MSRs for BSP and all APs
662 //
663 InitializeCpu ();
664 }
665
666 //
667 // Set a flag to restore SMM configuration in S3 path.
668 //
669 mRestoreSmmConfigurationInS3 = TRUE;
670
671 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
672 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
673 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
674 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
675 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
676
677 //
678 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
679 //
680 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
681 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
682
683 SwitchStack (
684 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
685 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
686 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
687 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
688 );
689 }
690
691 //
692 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
693 //
694 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
695 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
696 //
697 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
698 //
699 SaveAndSetDebugTimerInterrupt (FALSE);
700 //
701 // Restore IA32 IDT table
702 //
703 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
704 AsmDisablePaging64 (
705 SmmS3ResumeState->ReturnCs,
706 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
707 (UINT32)SmmS3ResumeState->ReturnContext1,
708 (UINT32)SmmS3ResumeState->ReturnContext2,
709 (UINT32)SmmS3ResumeState->ReturnStackPointer
710 );
711 }
712
713 //
714 // Can not resume PEI Phase
715 //
716 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
717 CpuDeadLoop ();
718 }
719
720 /**
721 Initialize SMM S3 resume state structure used during S3 Resume.
722
723 @param[in] Cr3 The base address of the page tables to use in SMM.
724
725 **/
726 VOID
727 InitSmmS3ResumeState (
728 IN UINT32 Cr3
729 )
730 {
731 VOID *GuidHob;
732 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
733 SMM_S3_RESUME_STATE *SmmS3ResumeState;
734
735 if (!mAcpiS3Enable) {
736 return;
737 }
738
739 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
740 if (GuidHob != NULL) {
741 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
742
743 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
744 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
745
746 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
747 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
748
749 mSmmS3ResumeState = SmmS3ResumeState;
750 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
751
752 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
753
754 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
755 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
756 if (SmmS3ResumeState->SmmS3StackBase == 0) {
757 SmmS3ResumeState->SmmS3StackSize = 0;
758 }
759
760 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
761 SmmS3ResumeState->SmmS3Cr3 = Cr3;
762 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
763
764 if (sizeof (UINTN) == sizeof (UINT64)) {
765 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
766 }
767 if (sizeof (UINTN) == sizeof (UINT32)) {
768 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
769 }
770 }
771
772 //
773 // Patch SmmS3ResumeState->SmmS3Cr3
774 //
775 InitSmmS3Cr3 ();
776 }
777
778 /**
779 Copy register table from ACPI NVS memory into SMRAM.
780
781 @param[in] DestinationRegisterTableList Points to destination register table.
782 @param[in] SourceRegisterTableList Points to source register table.
783 @param[in] NumberOfCpus Number of CPUs.
784
785 **/
786 VOID
787 CopyRegisterTable (
788 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
789 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
790 IN UINT32 NumberOfCpus
791 )
792 {
793 UINTN Index;
794 UINTN Index1;
795 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
796
797 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
798 for (Index = 0; Index < NumberOfCpus; Index++) {
799 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);
800 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);
801 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);
802 //
803 // Go though all MSRs in register table to initialize MSR spin lock
804 //
805 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;
806 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
807 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
808 //
809 // Initialize MSR spin lock only for those MSRs need bit field writing
810 //
811 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
812 }
813 }
814 }
815 }
816
817 /**
818 Get ACPI CPU data.
819
820 **/
821 VOID
822 GetAcpiCpuData (
823 VOID
824 )
825 {
826 ACPI_CPU_DATA *AcpiCpuData;
827 IA32_DESCRIPTOR *Gdtr;
828 IA32_DESCRIPTOR *Idtr;
829
830 if (!mAcpiS3Enable) {
831 return;
832 }
833
834 //
835 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
836 //
837 mAcpiCpuData.NumberOfCpus = 0;
838
839 //
840 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
841 //
842 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
843 if (AcpiCpuData == 0) {
844 return;
845 }
846
847 //
848 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
849 //
850 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
851
852 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
853 ASSERT (mAcpiCpuData.MtrrTable != 0);
854
855 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
856
857 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
858 ASSERT (mAcpiCpuData.GdtrProfile != 0);
859
860 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
861
862 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
863 ASSERT (mAcpiCpuData.IdtrProfile != 0);
864
865 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
866
867 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
868 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
869
870 CopyRegisterTable (
871 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
872 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
873 mAcpiCpuData.NumberOfCpus
874 );
875
876 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
877 ASSERT (mAcpiCpuData.RegisterTable != 0);
878
879 CopyRegisterTable (
880 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
881 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
882 mAcpiCpuData.NumberOfCpus
883 );
884
885 //
886 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
887 //
888 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
889 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
890
891 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
892 ASSERT (mGdtForAp != NULL);
893 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
894 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
895
896 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
897 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
898 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
899 }
900
901 /**
902 Get ACPI S3 enable flag.
903
904 **/
905 VOID
906 GetAcpiS3EnableFlag (
907 VOID
908 )
909 {
910 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
911 }