]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
MdePkg/BaseLib: add PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 #pragma pack(1)
18 typedef struct {
19 UINTN Lock;
20 VOID *StackStart;
21 UINTN StackSize;
22 VOID *ApFunction;
23 IA32_DESCRIPTOR GdtrProfile;
24 IA32_DESCRIPTOR IdtrProfile;
25 UINT32 BufferStart;
26 UINT32 Cr3;
27 UINTN InitializeFloatingPointUnitsAddress;
28 } MP_CPU_EXCHANGE_INFO;
29 #pragma pack()
30
31 typedef struct {
32 UINT8 *RendezvousFunnelAddress;
33 UINTN PModeEntryOffset;
34 UINTN FlatJumpOffset;
35 UINTN Size;
36 UINTN LModeEntryOffset;
37 UINTN LongJumpOffset;
38 } MP_ASSEMBLY_ADDRESS_MAP;
39
40 //
41 // Spin lock used to serialize MemoryMapped operation
42 //
43 SPIN_LOCK *mMemoryMappedLock = NULL;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 ACPI_CPU_DATA mAcpiCpuData;
66 volatile UINT32 mNumberToFinish;
67 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
68 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
69 VOID *mGdtForAp = NULL;
70 VOID *mIdtForAp = NULL;
71 VOID *mMachineCheckHandlerForAp = NULL;
72 MP_MSR_LOCK *mMsrSpinLocks = NULL;
73 UINTN mMsrSpinLockCount;
74 UINTN mMsrCount = 0;
75
76 //
77 // S3 boot flag
78 //
79 BOOLEAN mSmmS3Flag = FALSE;
80
81 //
82 // Pointer to structure used during S3 Resume
83 //
84 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
85
86 BOOLEAN mAcpiS3Enable = TRUE;
87
88 UINT8 *mApHltLoopCode = NULL;
89 UINT8 mApHltLoopCodeTemplate[] = {
90 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
91 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
92 0xFA, // cli
93 0xF4, // hlt
94 0xEB, 0xFC // jmp $-2
95 };
96
97 /**
98 Get MSR spin lock by MSR index.
99
100 @param MsrIndex MSR index value.
101
102 @return Pointer to MSR spin lock.
103
104 **/
105 SPIN_LOCK *
106 GetMsrSpinLockByIndex (
107 IN UINT32 MsrIndex
108 )
109 {
110 UINTN Index;
111 for (Index = 0; Index < mMsrCount; Index++) {
112 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
113 return mMsrSpinLocks[Index].SpinLock;
114 }
115 }
116 return NULL;
117 }
118
119 /**
120 Initialize MSR spin lock by MSR index.
121
122 @param MsrIndex MSR index value.
123
124 **/
125 VOID
126 InitMsrSpinLockByIndex (
127 IN UINT32 MsrIndex
128 )
129 {
130 UINTN MsrSpinLockCount;
131 UINTN NewMsrSpinLockCount;
132 UINTN Index;
133 UINTN AddedSize;
134
135 if (mMsrSpinLocks == NULL) {
136 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
137 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
138 ASSERT (mMsrSpinLocks != NULL);
139 for (Index = 0; Index < MsrSpinLockCount; Index++) {
140 mMsrSpinLocks[Index].SpinLock =
141 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
142 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
143 }
144 mMsrSpinLockCount = MsrSpinLockCount;
145 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
146 }
147 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
148 //
149 // Initialize spin lock for MSR programming
150 //
151 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
152 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
153 mMsrCount ++;
154 if (mMsrCount == mMsrSpinLockCount) {
155 //
156 // If MSR spin lock buffer is full, enlarge it
157 //
158 AddedSize = SIZE_4KB;
159 mSmmCpuSemaphores.SemaphoreMsr.Msr =
160 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
161 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
162 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
163 mMsrSpinLocks = ReallocatePool (
164 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
165 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
166 mMsrSpinLocks
167 );
168 ASSERT (mMsrSpinLocks != NULL);
169 mMsrSpinLockCount = NewMsrSpinLockCount;
170 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
171 mMsrSpinLocks[Index].SpinLock =
172 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
173 (Index - mMsrCount) * mSemaphoreSize);
174 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
175 }
176 }
177 }
178 }
179
180 /**
181 Sync up the MTRR values for all processors.
182
183 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
184 **/
185 VOID
186 EFIAPI
187 LoadMtrrData (
188 EFI_PHYSICAL_ADDRESS MtrrTable
189 )
190 /*++
191
192 Routine Description:
193
194 Sync up the MTRR values for all processors.
195
196 Arguments:
197
198 Returns:
199 None
200
201 --*/
202 {
203 MTRR_SETTINGS *MtrrSettings;
204
205 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
206 MtrrSetAllMtrrs (MtrrSettings);
207 }
208
209 /**
210 Programs registers for the calling processor.
211
212 This function programs registers for the calling processor.
213
214 @param RegisterTables Pointer to register table of the running processor.
215 @param RegisterTableCount Register table count.
216
217 **/
218 VOID
219 SetProcessorRegister (
220 IN CPU_REGISTER_TABLE *RegisterTables,
221 IN UINTN RegisterTableCount
222 )
223 {
224 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
225 UINTN Index;
226 UINTN Value;
227 SPIN_LOCK *MsrSpinLock;
228 UINT32 InitApicId;
229 CPU_REGISTER_TABLE *RegisterTable;
230
231 InitApicId = GetInitialApicId ();
232 RegisterTable = NULL;
233 for (Index = 0; Index < RegisterTableCount; Index++) {
234 if (RegisterTables[Index].InitialApicId == InitApicId) {
235 RegisterTable = &RegisterTables[Index];
236 break;
237 }
238 }
239 ASSERT (RegisterTable != NULL);
240
241 //
242 // Traverse Register Table of this logical processor
243 //
244 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
245 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
246 //
247 // Check the type of specified register
248 //
249 switch (RegisterTableEntry->RegisterType) {
250 //
251 // The specified register is Control Register
252 //
253 case ControlRegister:
254 switch (RegisterTableEntry->Index) {
255 case 0:
256 Value = AsmReadCr0 ();
257 Value = (UINTN) BitFieldWrite64 (
258 Value,
259 RegisterTableEntry->ValidBitStart,
260 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
261 (UINTN) RegisterTableEntry->Value
262 );
263 AsmWriteCr0 (Value);
264 break;
265 case 2:
266 Value = AsmReadCr2 ();
267 Value = (UINTN) BitFieldWrite64 (
268 Value,
269 RegisterTableEntry->ValidBitStart,
270 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
271 (UINTN) RegisterTableEntry->Value
272 );
273 AsmWriteCr2 (Value);
274 break;
275 case 3:
276 Value = AsmReadCr3 ();
277 Value = (UINTN) BitFieldWrite64 (
278 Value,
279 RegisterTableEntry->ValidBitStart,
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
281 (UINTN) RegisterTableEntry->Value
282 );
283 AsmWriteCr3 (Value);
284 break;
285 case 4:
286 Value = AsmReadCr4 ();
287 Value = (UINTN) BitFieldWrite64 (
288 Value,
289 RegisterTableEntry->ValidBitStart,
290 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
291 (UINTN) RegisterTableEntry->Value
292 );
293 AsmWriteCr4 (Value);
294 break;
295 default:
296 break;
297 }
298 break;
299 //
300 // The specified register is Model Specific Register
301 //
302 case Msr:
303 //
304 // If this function is called to restore register setting after INIT signal,
305 // there is no need to restore MSRs in register table.
306 //
307 if (RegisterTableEntry->ValidBitLength >= 64) {
308 //
309 // If length is not less than 64 bits, then directly write without reading
310 //
311 AsmWriteMsr64 (
312 RegisterTableEntry->Index,
313 RegisterTableEntry->Value
314 );
315 } else {
316 //
317 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
318 // to make sure MSR read/write operation is atomic.
319 //
320 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
321 AcquireSpinLock (MsrSpinLock);
322 //
323 // Set the bit section according to bit start and length
324 //
325 AsmMsrBitFieldWrite64 (
326 RegisterTableEntry->Index,
327 RegisterTableEntry->ValidBitStart,
328 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
329 RegisterTableEntry->Value
330 );
331 ReleaseSpinLock (MsrSpinLock);
332 }
333 break;
334 //
335 // MemoryMapped operations
336 //
337 case MemoryMapped:
338 AcquireSpinLock (mMemoryMappedLock);
339 MmioBitFieldWrite32 (
340 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
341 RegisterTableEntry->ValidBitStart,
342 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
343 (UINT32)RegisterTableEntry->Value
344 );
345 ReleaseSpinLock (mMemoryMappedLock);
346 break;
347 //
348 // Enable or disable cache
349 //
350 case CacheControl:
351 //
352 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
353 //
354 if (RegisterTableEntry->Value == 0) {
355 AsmDisableCache ();
356 } else {
357 AsmEnableCache ();
358 }
359 break;
360
361 default:
362 break;
363 }
364 }
365 }
366
367 /**
368 AP initialization before then after SMBASE relocation in the S3 boot path.
369 **/
370 VOID
371 InitializeAp (
372 VOID
373 )
374 {
375 UINTN TopOfStack;
376 UINT8 Stack[128];
377
378 LoadMtrrData (mAcpiCpuData.MtrrTable);
379
380 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
381
382 //
383 // Count down the number with lock mechanism.
384 //
385 InterlockedDecrement (&mNumberToFinish);
386
387 //
388 // Wait for BSP to signal SMM Base relocation done.
389 //
390 while (!mInitApsAfterSmmBaseReloc) {
391 CpuPause ();
392 }
393
394 ProgramVirtualWireMode ();
395 DisableLvtInterrupts ();
396
397 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
398
399 //
400 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
401 //
402 TopOfStack = (UINTN) Stack + sizeof (Stack);
403 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
404 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
405 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
406 }
407
408 /**
409 Prepares startup vector for APs.
410
411 This function prepares startup vector for APs.
412
413 @param WorkingBuffer The address of the work buffer.
414 **/
415 VOID
416 PrepareApStartupVector (
417 EFI_PHYSICAL_ADDRESS WorkingBuffer
418 )
419 {
420 EFI_PHYSICAL_ADDRESS StartupVector;
421 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
422
423 //
424 // Get the address map of startup code for AP,
425 // including code size, and offset of long jump instructions to redirect.
426 //
427 ZeroMem (&AddressMap, sizeof (AddressMap));
428 AsmGetAddressMap (&AddressMap);
429
430 StartupVector = WorkingBuffer;
431
432 //
433 // Copy AP startup code to startup vector, and then redirect the long jump
434 // instructions for mode switching.
435 //
436 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
437 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
438 if (AddressMap.LongJumpOffset != 0) {
439 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
440 }
441
442 //
443 // Get the start address of exchange data between BSP and AP.
444 //
445 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
446 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
447
448 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
449 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
450
451 //
452 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
453 //
454 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
455 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
456 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
457
458 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
459 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
460 mExchangeInfo->BufferStart = (UINT32) StartupVector;
461 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
462 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
463 }
464
465 /**
466 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
467
468 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
469 and restores MTRRs for both BSP and APs.
470
471 **/
472 VOID
473 InitializeCpuBeforeRebase (
474 VOID
475 )
476 {
477 LoadMtrrData (mAcpiCpuData.MtrrTable);
478
479 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
480
481 ProgramVirtualWireMode ();
482
483 PrepareApStartupVector (mAcpiCpuData.StartupVector);
484
485 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
486 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
487
488 //
489 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
490 //
491 mInitApsAfterSmmBaseReloc = FALSE;
492
493 //
494 // Send INIT IPI - SIPI to all APs
495 //
496 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
497
498 while (mNumberToFinish > 0) {
499 CpuPause ();
500 }
501 }
502
503 /**
504 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
505
506 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
507 data saved by normal boot path for both BSP and APs.
508
509 **/
510 VOID
511 InitializeCpuAfterRebase (
512 VOID
513 )
514 {
515 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
516
517 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
518
519 //
520 // Signal that SMM base relocation is complete and to continue initialization.
521 //
522 mInitApsAfterSmmBaseReloc = TRUE;
523
524 while (mNumberToFinish > 0) {
525 CpuPause ();
526 }
527 }
528
529 /**
530 Restore SMM Configuration in S3 boot path.
531
532 **/
533 VOID
534 RestoreSmmConfigurationInS3 (
535 VOID
536 )
537 {
538 if (!mAcpiS3Enable) {
539 return;
540 }
541
542 //
543 // Restore SMM Configuration in S3 boot path.
544 //
545 if (mRestoreSmmConfigurationInS3) {
546 //
547 // Need make sure gSmst is correct because below function may use them.
548 //
549 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
550 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
551 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
552 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
553 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
554
555 //
556 // Configure SMM Code Access Check feature if available.
557 //
558 ConfigSmmCodeAccessCheck ();
559
560 SmmCpuFeaturesCompleteSmmReadyToLock ();
561
562 mRestoreSmmConfigurationInS3 = FALSE;
563 }
564 }
565
566 /**
567 Perform SMM initialization for all processors in the S3 boot path.
568
569 For a native platform, MP initialization in the S3 boot path is also performed in this function.
570 **/
571 VOID
572 EFIAPI
573 SmmRestoreCpu (
574 VOID
575 )
576 {
577 SMM_S3_RESUME_STATE *SmmS3ResumeState;
578 IA32_DESCRIPTOR Ia32Idtr;
579 IA32_DESCRIPTOR X64Idtr;
580 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
581 EFI_STATUS Status;
582
583 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
584
585 mSmmS3Flag = TRUE;
586
587 InitializeSpinLock (mMemoryMappedLock);
588
589 //
590 // See if there is enough context to resume PEI Phase
591 //
592 if (mSmmS3ResumeState == NULL) {
593 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
594 CpuDeadLoop ();
595 }
596
597 SmmS3ResumeState = mSmmS3ResumeState;
598 ASSERT (SmmS3ResumeState != NULL);
599
600 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
601 //
602 // Save the IA32 IDT Descriptor
603 //
604 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
605
606 //
607 // Setup X64 IDT table
608 //
609 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
610 X64Idtr.Base = (UINTN) IdtEntryTable;
611 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
612 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
613
614 //
615 // Setup the default exception handler
616 //
617 Status = InitializeCpuExceptionHandlers (NULL);
618 ASSERT_EFI_ERROR (Status);
619
620 //
621 // Initialize Debug Agent to support source level debug
622 //
623 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
624 }
625
626 //
627 // Skip initialization if mAcpiCpuData is not valid
628 //
629 if (mAcpiCpuData.NumberOfCpus > 0) {
630 //
631 // First time microcode load and restore MTRRs
632 //
633 InitializeCpuBeforeRebase ();
634 }
635
636 //
637 // Restore SMBASE for BSP and all APs
638 //
639 SmmRelocateBases ();
640
641 //
642 // Skip initialization if mAcpiCpuData is not valid
643 //
644 if (mAcpiCpuData.NumberOfCpus > 0) {
645 //
646 // Restore MSRs for BSP and all APs
647 //
648 InitializeCpuAfterRebase ();
649 }
650
651 //
652 // Set a flag to restore SMM configuration in S3 path.
653 //
654 mRestoreSmmConfigurationInS3 = TRUE;
655
656 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
657 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
658 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
659 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
660 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
661
662 //
663 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
664 //
665 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
666 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
667
668 SwitchStack (
669 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
670 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
671 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
672 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
673 );
674 }
675
676 //
677 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
678 //
679 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
680 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
681 //
682 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
683 //
684 SaveAndSetDebugTimerInterrupt (FALSE);
685 //
686 // Restore IA32 IDT table
687 //
688 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
689 AsmDisablePaging64 (
690 SmmS3ResumeState->ReturnCs,
691 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
692 (UINT32)SmmS3ResumeState->ReturnContext1,
693 (UINT32)SmmS3ResumeState->ReturnContext2,
694 (UINT32)SmmS3ResumeState->ReturnStackPointer
695 );
696 }
697
698 //
699 // Can not resume PEI Phase
700 //
701 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
702 CpuDeadLoop ();
703 }
704
705 /**
706 Initialize SMM S3 resume state structure used during S3 Resume.
707
708 @param[in] Cr3 The base address of the page tables to use in SMM.
709
710 **/
711 VOID
712 InitSmmS3ResumeState (
713 IN UINT32 Cr3
714 )
715 {
716 VOID *GuidHob;
717 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
718 SMM_S3_RESUME_STATE *SmmS3ResumeState;
719 EFI_PHYSICAL_ADDRESS Address;
720 EFI_STATUS Status;
721
722 if (!mAcpiS3Enable) {
723 return;
724 }
725
726 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
727 if (GuidHob != NULL) {
728 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
729
730 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
731 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
732
733 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
734 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
735
736 mSmmS3ResumeState = SmmS3ResumeState;
737 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
738
739 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
740
741 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
742 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
743 if (SmmS3ResumeState->SmmS3StackBase == 0) {
744 SmmS3ResumeState->SmmS3StackSize = 0;
745 }
746
747 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
748 SmmS3ResumeState->SmmS3Cr3 = Cr3;
749 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
750
751 if (sizeof (UINTN) == sizeof (UINT64)) {
752 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
753 }
754 if (sizeof (UINTN) == sizeof (UINT32)) {
755 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
756 }
757 }
758
759 //
760 // Patch SmmS3ResumeState->SmmS3Cr3
761 //
762 InitSmmS3Cr3 ();
763
764 //
765 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
766 // protected mode on S3 path
767 //
768 Address = BASE_4GB - 1;
769 Status = gBS->AllocatePages (
770 AllocateMaxAddress,
771 EfiACPIMemoryNVS,
772 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
773 &Address
774 );
775 ASSERT_EFI_ERROR (Status);
776 mApHltLoopCode = (UINT8 *) (UINTN) Address;
777 }
778
779 /**
780 Copy register table from ACPI NVS memory into SMRAM.
781
782 @param[in] DestinationRegisterTableList Points to destination register table.
783 @param[in] SourceRegisterTableList Points to source register table.
784 @param[in] NumberOfCpus Number of CPUs.
785
786 **/
787 VOID
788 CopyRegisterTable (
789 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
790 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
791 IN UINT32 NumberOfCpus
792 )
793 {
794 UINTN Index;
795 UINTN Index1;
796 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
797
798 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
799 for (Index = 0; Index < NumberOfCpus; Index++) {
800 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
801 RegisterTableEntry = AllocateCopyPool (
802 DestinationRegisterTableList[Index].AllocatedSize,
803 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
804 );
805 ASSERT (RegisterTableEntry != NULL);
806 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
807 //
808 // Go though all MSRs in register table to initialize MSR spin lock
809 //
810 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
811 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
812 //
813 // Initialize MSR spin lock only for those MSRs need bit field writing
814 //
815 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
816 }
817 }
818 }
819 }
820 }
821
822 /**
823 Get ACPI CPU data.
824
825 **/
826 VOID
827 GetAcpiCpuData (
828 VOID
829 )
830 {
831 ACPI_CPU_DATA *AcpiCpuData;
832 IA32_DESCRIPTOR *Gdtr;
833 IA32_DESCRIPTOR *Idtr;
834
835 if (!mAcpiS3Enable) {
836 return;
837 }
838
839 //
840 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
841 //
842 mAcpiCpuData.NumberOfCpus = 0;
843
844 //
845 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
846 //
847 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
848 if (AcpiCpuData == 0) {
849 return;
850 }
851
852 //
853 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
854 //
855 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
856
857 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
858 ASSERT (mAcpiCpuData.MtrrTable != 0);
859
860 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
861
862 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
863 ASSERT (mAcpiCpuData.GdtrProfile != 0);
864
865 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
866
867 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
868 ASSERT (mAcpiCpuData.IdtrProfile != 0);
869
870 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
871
872 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
873 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
874
875 CopyRegisterTable (
876 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
877 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
878 mAcpiCpuData.NumberOfCpus
879 );
880
881 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
882 ASSERT (mAcpiCpuData.RegisterTable != 0);
883
884 CopyRegisterTable (
885 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
886 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
887 mAcpiCpuData.NumberOfCpus
888 );
889
890 //
891 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
892 //
893 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
894 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
895
896 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
897 ASSERT (mGdtForAp != NULL);
898 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
899 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
900
901 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
902 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
903 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
904 }
905
906 /**
907 Get ACPI S3 enable flag.
908
909 **/
910 VOID
911 GetAcpiS3EnableFlag (
912 VOID
913 )
914 {
915 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
916 }