]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
94e5ab2c0ee22398fb846ac720ada8a3535be9e4
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 //
43 // Signal that SMM BASE relocation is complete.
44 //
45 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
46
47 /**
48 Get starting address and size of the rendezvous entry for APs.
49 Information for fixing a jump instruction in the code is also returned.
50
51 @param AddressMap Output buffer for address map information.
52 **/
53 VOID *
54 EFIAPI
55 AsmGetAddressMap (
56 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
57 );
58
59 #define LEGACY_REGION_SIZE (2 * 0x1000)
60 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
61
62 ACPI_CPU_DATA mAcpiCpuData;
63 volatile UINT32 mNumberToFinish;
64 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
65 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
66 VOID *mGdtForAp = NULL;
67 VOID *mIdtForAp = NULL;
68 VOID *mMachineCheckHandlerForAp = NULL;
69 MP_MSR_LOCK *mMsrSpinLocks = NULL;
70 UINTN mMsrSpinLockCount;
71 UINTN mMsrCount = 0;
72
73 //
74 // S3 boot flag
75 //
76 BOOLEAN mSmmS3Flag = FALSE;
77
78 //
79 // Pointer to structure used during S3 Resume
80 //
81 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
82
83 BOOLEAN mAcpiS3Enable = TRUE;
84
85 UINT8 *mApHltLoopCode = NULL;
86 UINT8 mApHltLoopCodeTemplate[] = {
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
89 0xFA, // cli
90 0xF4, // hlt
91 0xEB, 0xFC // jmp $-2
92 };
93
94 /**
95 Get MSR spin lock by MSR index.
96
97 @param MsrIndex MSR index value.
98
99 @return Pointer to MSR spin lock.
100
101 **/
102 SPIN_LOCK *
103 GetMsrSpinLockByIndex (
104 IN UINT32 MsrIndex
105 )
106 {
107 UINTN Index;
108 for (Index = 0; Index < mMsrCount; Index++) {
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
110 return mMsrSpinLocks[Index].SpinLock;
111 }
112 }
113 return NULL;
114 }
115
116 /**
117 Initialize MSR spin lock by MSR index.
118
119 @param MsrIndex MSR index value.
120
121 **/
122 VOID
123 InitMsrSpinLockByIndex (
124 IN UINT32 MsrIndex
125 )
126 {
127 UINTN MsrSpinLockCount;
128 UINTN NewMsrSpinLockCount;
129 UINTN Index;
130 UINTN AddedSize;
131
132 if (mMsrSpinLocks == NULL) {
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
135 ASSERT (mMsrSpinLocks != NULL);
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {
137 mMsrSpinLocks[Index].SpinLock =
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
140 }
141 mMsrSpinLockCount = MsrSpinLockCount;
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
143 }
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
145 //
146 // Initialize spin lock for MSR programming
147 //
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
150 mMsrCount ++;
151 if (mMsrCount == mMsrSpinLockCount) {
152 //
153 // If MSR spin lock buffer is full, enlarge it
154 //
155 AddedSize = SIZE_4KB;
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
160 mMsrSpinLocks = ReallocatePool (
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
163 mMsrSpinLocks
164 );
165 ASSERT (mMsrSpinLocks != NULL);
166 mMsrSpinLockCount = NewMsrSpinLockCount;
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
168 mMsrSpinLocks[Index].SpinLock =
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
170 (Index - mMsrCount) * mSemaphoreSize);
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
172 }
173 }
174 }
175 }
176
177 /**
178 Sync up the MTRR values for all processors.
179
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
181 **/
182 VOID
183 EFIAPI
184 LoadMtrrData (
185 EFI_PHYSICAL_ADDRESS MtrrTable
186 )
187 /*++
188
189 Routine Description:
190
191 Sync up the MTRR values for all processors.
192
193 Arguments:
194
195 Returns:
196 None
197
198 --*/
199 {
200 MTRR_SETTINGS *MtrrSettings;
201
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
203 MtrrSetAllMtrrs (MtrrSettings);
204 }
205
206 /**
207 Programs registers for the calling processor.
208
209 This function programs registers for the calling processor.
210
211 @param RegisterTables Pointer to register table of the running processor.
212 @param RegisterTableCount Register table count.
213
214 **/
215 VOID
216 SetProcessorRegister (
217 IN CPU_REGISTER_TABLE *RegisterTables,
218 IN UINTN RegisterTableCount
219 )
220 {
221 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
222 UINTN Index;
223 UINTN Value;
224 SPIN_LOCK *MsrSpinLock;
225 UINT32 InitApicId;
226 CPU_REGISTER_TABLE *RegisterTable;
227
228 InitApicId = GetInitialApicId ();
229 RegisterTable = NULL;
230 for (Index = 0; Index < RegisterTableCount; Index++) {
231 if (RegisterTables[Index].InitialApicId == InitApicId) {
232 RegisterTable = &RegisterTables[Index];
233 break;
234 }
235 }
236 ASSERT (RegisterTable != NULL);
237
238 //
239 // Traverse Register Table of this logical processor
240 //
241 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
242 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
243 //
244 // Check the type of specified register
245 //
246 switch (RegisterTableEntry->RegisterType) {
247 //
248 // The specified register is Control Register
249 //
250 case ControlRegister:
251 switch (RegisterTableEntry->Index) {
252 case 0:
253 Value = AsmReadCr0 ();
254 Value = (UINTN) BitFieldWrite64 (
255 Value,
256 RegisterTableEntry->ValidBitStart,
257 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
258 (UINTN) RegisterTableEntry->Value
259 );
260 AsmWriteCr0 (Value);
261 break;
262 case 2:
263 Value = AsmReadCr2 ();
264 Value = (UINTN) BitFieldWrite64 (
265 Value,
266 RegisterTableEntry->ValidBitStart,
267 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
268 (UINTN) RegisterTableEntry->Value
269 );
270 AsmWriteCr2 (Value);
271 break;
272 case 3:
273 Value = AsmReadCr3 ();
274 Value = (UINTN) BitFieldWrite64 (
275 Value,
276 RegisterTableEntry->ValidBitStart,
277 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
278 (UINTN) RegisterTableEntry->Value
279 );
280 AsmWriteCr3 (Value);
281 break;
282 case 4:
283 Value = AsmReadCr4 ();
284 Value = (UINTN) BitFieldWrite64 (
285 Value,
286 RegisterTableEntry->ValidBitStart,
287 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
288 (UINTN) RegisterTableEntry->Value
289 );
290 AsmWriteCr4 (Value);
291 break;
292 default:
293 break;
294 }
295 break;
296 //
297 // The specified register is Model Specific Register
298 //
299 case Msr:
300 //
301 // If this function is called to restore register setting after INIT signal,
302 // there is no need to restore MSRs in register table.
303 //
304 if (RegisterTableEntry->ValidBitLength >= 64) {
305 //
306 // If length is not less than 64 bits, then directly write without reading
307 //
308 AsmWriteMsr64 (
309 RegisterTableEntry->Index,
310 RegisterTableEntry->Value
311 );
312 } else {
313 //
314 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
315 // to make sure MSR read/write operation is atomic.
316 //
317 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
318 AcquireSpinLock (MsrSpinLock);
319 //
320 // Set the bit section according to bit start and length
321 //
322 AsmMsrBitFieldWrite64 (
323 RegisterTableEntry->Index,
324 RegisterTableEntry->ValidBitStart,
325 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
326 RegisterTableEntry->Value
327 );
328 ReleaseSpinLock (MsrSpinLock);
329 }
330 break;
331 //
332 // MemoryMapped operations
333 //
334 case MemoryMapped:
335 AcquireSpinLock (mMemoryMappedLock);
336 MmioBitFieldWrite32 (
337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
338 RegisterTableEntry->ValidBitStart,
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
340 (UINT32)RegisterTableEntry->Value
341 );
342 ReleaseSpinLock (mMemoryMappedLock);
343 break;
344 //
345 // Enable or disable cache
346 //
347 case CacheControl:
348 //
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
350 //
351 if (RegisterTableEntry->Value == 0) {
352 AsmDisableCache ();
353 } else {
354 AsmEnableCache ();
355 }
356 break;
357
358 default:
359 break;
360 }
361 }
362 }
363
364 /**
365 AP initialization before then after SMBASE relocation in the S3 boot path.
366 **/
367 VOID
368 InitializeAp (
369 VOID
370 )
371 {
372 UINTN TopOfStack;
373 UINT8 Stack[128];
374
375 LoadMtrrData (mAcpiCpuData.MtrrTable);
376
377 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
378
379 //
380 // Count down the number with lock mechanism.
381 //
382 InterlockedDecrement (&mNumberToFinish);
383
384 //
385 // Wait for BSP to signal SMM Base relocation done.
386 //
387 while (!mInitApsAfterSmmBaseReloc) {
388 CpuPause ();
389 }
390
391 ProgramVirtualWireMode ();
392 DisableLvtInterrupts ();
393
394 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
395
396 //
397 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
398 //
399 TopOfStack = (UINTN) Stack + sizeof (Stack);
400 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
401 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
402 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
403 }
404
405 /**
406 Prepares startup vector for APs.
407
408 This function prepares startup vector for APs.
409
410 @param WorkingBuffer The address of the work buffer.
411 **/
412 VOID
413 PrepareApStartupVector (
414 EFI_PHYSICAL_ADDRESS WorkingBuffer
415 )
416 {
417 EFI_PHYSICAL_ADDRESS StartupVector;
418 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
419
420 //
421 // Get the address map of startup code for AP,
422 // including code size, and offset of long jump instructions to redirect.
423 //
424 ZeroMem (&AddressMap, sizeof (AddressMap));
425 AsmGetAddressMap (&AddressMap);
426
427 StartupVector = WorkingBuffer;
428
429 //
430 // Copy AP startup code to startup vector, and then redirect the long jump
431 // instructions for mode switching.
432 //
433 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
434 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
435 if (AddressMap.LongJumpOffset != 0) {
436 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
437 }
438
439 //
440 // Get the start address of exchange data between BSP and AP.
441 //
442 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
443 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
444
445 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
446 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
447
448 //
449 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
450 //
451 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
452 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
453 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
454
455 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
456 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
457 mExchangeInfo->BufferStart = (UINT32) StartupVector;
458 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
459 }
460
461 /**
462 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
463
464 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
465 and restores MTRRs for both BSP and APs.
466
467 **/
468 VOID
469 InitializeCpuBeforeRebase (
470 VOID
471 )
472 {
473 LoadMtrrData (mAcpiCpuData.MtrrTable);
474
475 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
476
477 ProgramVirtualWireMode ();
478
479 PrepareApStartupVector (mAcpiCpuData.StartupVector);
480
481 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
482 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
483
484 //
485 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
486 //
487 mInitApsAfterSmmBaseReloc = FALSE;
488
489 //
490 // Send INIT IPI - SIPI to all APs
491 //
492 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
493
494 while (mNumberToFinish > 0) {
495 CpuPause ();
496 }
497 }
498
499 /**
500 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
501
502 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
503 data saved by normal boot path for both BSP and APs.
504
505 **/
506 VOID
507 InitializeCpuAfterRebase (
508 VOID
509 )
510 {
511 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
512
513 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
514
515 //
516 // Signal that SMM base relocation is complete and to continue initialization.
517 //
518 mInitApsAfterSmmBaseReloc = TRUE;
519
520 while (mNumberToFinish > 0) {
521 CpuPause ();
522 }
523 }
524
525 /**
526 Restore SMM Configuration in S3 boot path.
527
528 **/
529 VOID
530 RestoreSmmConfigurationInS3 (
531 VOID
532 )
533 {
534 if (!mAcpiS3Enable) {
535 return;
536 }
537
538 //
539 // Restore SMM Configuration in S3 boot path.
540 //
541 if (mRestoreSmmConfigurationInS3) {
542 //
543 // Need make sure gSmst is correct because below function may use them.
544 //
545 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
546 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
547 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
548 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
549 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
550
551 //
552 // Configure SMM Code Access Check feature if available.
553 //
554 ConfigSmmCodeAccessCheck ();
555
556 SmmCpuFeaturesCompleteSmmReadyToLock ();
557
558 mRestoreSmmConfigurationInS3 = FALSE;
559 }
560 }
561
562 /**
563 Perform SMM initialization for all processors in the S3 boot path.
564
565 For a native platform, MP initialization in the S3 boot path is also performed in this function.
566 **/
567 VOID
568 EFIAPI
569 SmmRestoreCpu (
570 VOID
571 )
572 {
573 SMM_S3_RESUME_STATE *SmmS3ResumeState;
574 IA32_DESCRIPTOR Ia32Idtr;
575 IA32_DESCRIPTOR X64Idtr;
576 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
577 EFI_STATUS Status;
578
579 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
580
581 mSmmS3Flag = TRUE;
582
583 InitializeSpinLock (mMemoryMappedLock);
584
585 //
586 // See if there is enough context to resume PEI Phase
587 //
588 if (mSmmS3ResumeState == NULL) {
589 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
590 CpuDeadLoop ();
591 }
592
593 SmmS3ResumeState = mSmmS3ResumeState;
594 ASSERT (SmmS3ResumeState != NULL);
595
596 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
597 //
598 // Save the IA32 IDT Descriptor
599 //
600 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
601
602 //
603 // Setup X64 IDT table
604 //
605 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
606 X64Idtr.Base = (UINTN) IdtEntryTable;
607 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
608 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
609
610 //
611 // Setup the default exception handler
612 //
613 Status = InitializeCpuExceptionHandlers (NULL);
614 ASSERT_EFI_ERROR (Status);
615
616 //
617 // Initialize Debug Agent to support source level debug
618 //
619 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
620 }
621
622 //
623 // Skip initialization if mAcpiCpuData is not valid
624 //
625 if (mAcpiCpuData.NumberOfCpus > 0) {
626 //
627 // First time microcode load and restore MTRRs
628 //
629 InitializeCpuBeforeRebase ();
630 }
631
632 //
633 // Restore SMBASE for BSP and all APs
634 //
635 SmmRelocateBases ();
636
637 //
638 // Skip initialization if mAcpiCpuData is not valid
639 //
640 if (mAcpiCpuData.NumberOfCpus > 0) {
641 //
642 // Restore MSRs for BSP and all APs
643 //
644 InitializeCpuAfterRebase ();
645 }
646
647 //
648 // Set a flag to restore SMM configuration in S3 path.
649 //
650 mRestoreSmmConfigurationInS3 = TRUE;
651
652 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
653 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
654 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
655 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
656 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
657
658 //
659 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
660 //
661 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
662 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
663
664 SwitchStack (
665 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
666 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
667 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
668 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
669 );
670 }
671
672 //
673 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
674 //
675 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
676 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
677 //
678 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
679 //
680 SaveAndSetDebugTimerInterrupt (FALSE);
681 //
682 // Restore IA32 IDT table
683 //
684 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
685 AsmDisablePaging64 (
686 SmmS3ResumeState->ReturnCs,
687 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
688 (UINT32)SmmS3ResumeState->ReturnContext1,
689 (UINT32)SmmS3ResumeState->ReturnContext2,
690 (UINT32)SmmS3ResumeState->ReturnStackPointer
691 );
692 }
693
694 //
695 // Can not resume PEI Phase
696 //
697 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
698 CpuDeadLoop ();
699 }
700
701 /**
702 Initialize SMM S3 resume state structure used during S3 Resume.
703
704 @param[in] Cr3 The base address of the page tables to use in SMM.
705
706 **/
707 VOID
708 InitSmmS3ResumeState (
709 IN UINT32 Cr3
710 )
711 {
712 VOID *GuidHob;
713 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
714 SMM_S3_RESUME_STATE *SmmS3ResumeState;
715 EFI_PHYSICAL_ADDRESS Address;
716 EFI_STATUS Status;
717
718 if (!mAcpiS3Enable) {
719 return;
720 }
721
722 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
723 if (GuidHob != NULL) {
724 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
725
726 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
727 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
728
729 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
730 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
731
732 mSmmS3ResumeState = SmmS3ResumeState;
733 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
734
735 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
736
737 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
738 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
739 if (SmmS3ResumeState->SmmS3StackBase == 0) {
740 SmmS3ResumeState->SmmS3StackSize = 0;
741 }
742
743 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
744 SmmS3ResumeState->SmmS3Cr3 = Cr3;
745 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
746
747 if (sizeof (UINTN) == sizeof (UINT64)) {
748 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
749 }
750 if (sizeof (UINTN) == sizeof (UINT32)) {
751 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
752 }
753 }
754
755 //
756 // Patch SmmS3ResumeState->SmmS3Cr3
757 //
758 InitSmmS3Cr3 ();
759
760 //
761 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
762 // protected mode on S3 path
763 //
764 Address = BASE_4GB - 1;
765 Status = gBS->AllocatePages (
766 AllocateMaxAddress,
767 EfiACPIMemoryNVS,
768 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
769 &Address
770 );
771 ASSERT_EFI_ERROR (Status);
772 mApHltLoopCode = (UINT8 *) (UINTN) Address;
773 }
774
775 /**
776 Copy register table from ACPI NVS memory into SMRAM.
777
778 @param[in] DestinationRegisterTableList Points to destination register table.
779 @param[in] SourceRegisterTableList Points to source register table.
780 @param[in] NumberOfCpus Number of CPUs.
781
782 **/
783 VOID
784 CopyRegisterTable (
785 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
786 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
787 IN UINT32 NumberOfCpus
788 )
789 {
790 UINTN Index;
791 UINTN Index1;
792 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
793
794 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
795 for (Index = 0; Index < NumberOfCpus; Index++) {
796 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
797 RegisterTableEntry = AllocateCopyPool (
798 DestinationRegisterTableList[Index].AllocatedSize,
799 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
800 );
801 ASSERT (RegisterTableEntry != NULL);
802 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
803 //
804 // Go though all MSRs in register table to initialize MSR spin lock
805 //
806 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
807 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
808 //
809 // Initialize MSR spin lock only for those MSRs need bit field writing
810 //
811 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
812 }
813 }
814 }
815 }
816 }
817
818 /**
819 Get ACPI CPU data.
820
821 **/
822 VOID
823 GetAcpiCpuData (
824 VOID
825 )
826 {
827 ACPI_CPU_DATA *AcpiCpuData;
828 IA32_DESCRIPTOR *Gdtr;
829 IA32_DESCRIPTOR *Idtr;
830
831 if (!mAcpiS3Enable) {
832 return;
833 }
834
835 //
836 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
837 //
838 mAcpiCpuData.NumberOfCpus = 0;
839
840 //
841 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
842 //
843 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
844 if (AcpiCpuData == 0) {
845 return;
846 }
847
848 //
849 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
850 //
851 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
852
853 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
854 ASSERT (mAcpiCpuData.MtrrTable != 0);
855
856 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
857
858 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
859 ASSERT (mAcpiCpuData.GdtrProfile != 0);
860
861 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
862
863 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
864 ASSERT (mAcpiCpuData.IdtrProfile != 0);
865
866 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
867
868 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
869 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
870
871 CopyRegisterTable (
872 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
873 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
874 mAcpiCpuData.NumberOfCpus
875 );
876
877 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
878 ASSERT (mAcpiCpuData.RegisterTable != 0);
879
880 CopyRegisterTable (
881 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
882 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
883 mAcpiCpuData.NumberOfCpus
884 );
885
886 //
887 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
888 //
889 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
890 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
891
892 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
893 ASSERT (mGdtForAp != NULL);
894 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
895 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
896
897 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
898 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
899 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
900 }
901
902 /**
903 Get ACPI S3 enable flag.
904
905 **/
906 VOID
907 GetAcpiS3EnableFlag (
908 VOID
909 )
910 {
911 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
912 }