]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Use GDT/IDT saved in Smram.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 #pragma pack(1)
18 typedef struct {
19 UINTN Lock;
20 VOID *StackStart;
21 UINTN StackSize;
22 VOID *ApFunction;
23 IA32_DESCRIPTOR GdtrProfile;
24 IA32_DESCRIPTOR IdtrProfile;
25 UINT32 BufferStart;
26 UINT32 Cr3;
27 UINTN InitializeFloatingPointUnitsAddress;
28 } MP_CPU_EXCHANGE_INFO;
29 #pragma pack()
30
31 typedef struct {
32 UINT8 *RendezvousFunnelAddress;
33 UINTN PModeEntryOffset;
34 UINTN FlatJumpOffset;
35 UINTN Size;
36 UINTN LModeEntryOffset;
37 UINTN LongJumpOffset;
38 } MP_ASSEMBLY_ADDRESS_MAP;
39
40 //
41 // Spin lock used to serialize MemoryMapped operation
42 //
43 SPIN_LOCK *mMemoryMappedLock = NULL;
44
45 //
46 // Signal that SMM BASE relocation is complete.
47 //
48 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
50 /**
51 Get starting address and size of the rendezvous entry for APs.
52 Information for fixing a jump instruction in the code is also returned.
53
54 @param AddressMap Output buffer for address map information.
55 **/
56 VOID *
57 EFIAPI
58 AsmGetAddressMap (
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62 #define LEGACY_REGION_SIZE (2 * 0x1000)
63 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
65 ACPI_CPU_DATA mAcpiCpuData;
66 volatile UINT32 mNumberToFinish;
67 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
68 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
69 MP_MSR_LOCK *mMsrSpinLocks = NULL;
70 UINTN mMsrSpinLockCount;
71 UINTN mMsrCount = 0;
72
73 //
74 // S3 boot flag
75 //
76 BOOLEAN mSmmS3Flag = FALSE;
77
78 //
79 // Pointer to structure used during S3 Resume
80 //
81 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
82
83 BOOLEAN mAcpiS3Enable = TRUE;
84
85 UINT8 *mApHltLoopCode = NULL;
86 UINT8 mApHltLoopCodeTemplate[] = {
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
89 0xFA, // cli
90 0xF4, // hlt
91 0xEB, 0xFC // jmp $-2
92 };
93
94 /**
95 Get MSR spin lock by MSR index.
96
97 @param MsrIndex MSR index value.
98
99 @return Pointer to MSR spin lock.
100
101 **/
102 SPIN_LOCK *
103 GetMsrSpinLockByIndex (
104 IN UINT32 MsrIndex
105 )
106 {
107 UINTN Index;
108 for (Index = 0; Index < mMsrCount; Index++) {
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
110 return mMsrSpinLocks[Index].SpinLock;
111 }
112 }
113 return NULL;
114 }
115
116 /**
117 Initialize MSR spin lock by MSR index.
118
119 @param MsrIndex MSR index value.
120
121 **/
122 VOID
123 InitMsrSpinLockByIndex (
124 IN UINT32 MsrIndex
125 )
126 {
127 UINTN MsrSpinLockCount;
128 UINTN NewMsrSpinLockCount;
129 UINTN Index;
130 UINTN AddedSize;
131
132 if (mMsrSpinLocks == NULL) {
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
135 ASSERT (mMsrSpinLocks != NULL);
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {
137 mMsrSpinLocks[Index].SpinLock =
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
140 }
141 mMsrSpinLockCount = MsrSpinLockCount;
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
143 }
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
145 //
146 // Initialize spin lock for MSR programming
147 //
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
150 mMsrCount ++;
151 if (mMsrCount == mMsrSpinLockCount) {
152 //
153 // If MSR spin lock buffer is full, enlarge it
154 //
155 AddedSize = SIZE_4KB;
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
160 mMsrSpinLocks = ReallocatePool (
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
163 mMsrSpinLocks
164 );
165 ASSERT (mMsrSpinLocks != NULL);
166 mMsrSpinLockCount = NewMsrSpinLockCount;
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
168 mMsrSpinLocks[Index].SpinLock =
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
170 (Index - mMsrCount) * mSemaphoreSize);
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
172 }
173 }
174 }
175 }
176
177 /**
178 Sync up the MTRR values for all processors.
179
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
181 **/
182 VOID
183 EFIAPI
184 LoadMtrrData (
185 EFI_PHYSICAL_ADDRESS MtrrTable
186 )
187 /*++
188
189 Routine Description:
190
191 Sync up the MTRR values for all processors.
192
193 Arguments:
194
195 Returns:
196 None
197
198 --*/
199 {
200 MTRR_SETTINGS *MtrrSettings;
201
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
203 MtrrSetAllMtrrs (MtrrSettings);
204 }
205
206 /**
207 Programs registers for the calling processor.
208
209 This function programs registers for the calling processor.
210
211 @param RegisterTables Pointer to register table of the running processor.
212 @param RegisterTableCount Register table count.
213
214 **/
215 VOID
216 SetProcessorRegister (
217 IN CPU_REGISTER_TABLE *RegisterTables,
218 IN UINTN RegisterTableCount
219 )
220 {
221 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
222 UINTN Index;
223 UINTN Value;
224 SPIN_LOCK *MsrSpinLock;
225 UINT32 InitApicId;
226 CPU_REGISTER_TABLE *RegisterTable;
227
228 InitApicId = GetInitialApicId ();
229 RegisterTable = NULL;
230 for (Index = 0; Index < RegisterTableCount; Index++) {
231 if (RegisterTables[Index].InitialApicId == InitApicId) {
232 RegisterTable = &RegisterTables[Index];
233 break;
234 }
235 }
236 ASSERT (RegisterTable != NULL);
237
238 //
239 // Traverse Register Table of this logical processor
240 //
241 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
242 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
243 //
244 // Check the type of specified register
245 //
246 switch (RegisterTableEntry->RegisterType) {
247 //
248 // The specified register is Control Register
249 //
250 case ControlRegister:
251 switch (RegisterTableEntry->Index) {
252 case 0:
253 Value = AsmReadCr0 ();
254 Value = (UINTN) BitFieldWrite64 (
255 Value,
256 RegisterTableEntry->ValidBitStart,
257 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
258 (UINTN) RegisterTableEntry->Value
259 );
260 AsmWriteCr0 (Value);
261 break;
262 case 2:
263 Value = AsmReadCr2 ();
264 Value = (UINTN) BitFieldWrite64 (
265 Value,
266 RegisterTableEntry->ValidBitStart,
267 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
268 (UINTN) RegisterTableEntry->Value
269 );
270 AsmWriteCr2 (Value);
271 break;
272 case 3:
273 Value = AsmReadCr3 ();
274 Value = (UINTN) BitFieldWrite64 (
275 Value,
276 RegisterTableEntry->ValidBitStart,
277 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
278 (UINTN) RegisterTableEntry->Value
279 );
280 AsmWriteCr3 (Value);
281 break;
282 case 4:
283 Value = AsmReadCr4 ();
284 Value = (UINTN) BitFieldWrite64 (
285 Value,
286 RegisterTableEntry->ValidBitStart,
287 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
288 (UINTN) RegisterTableEntry->Value
289 );
290 AsmWriteCr4 (Value);
291 break;
292 default:
293 break;
294 }
295 break;
296 //
297 // The specified register is Model Specific Register
298 //
299 case Msr:
300 //
301 // If this function is called to restore register setting after INIT signal,
302 // there is no need to restore MSRs in register table.
303 //
304 if (RegisterTableEntry->ValidBitLength >= 64) {
305 //
306 // If length is not less than 64 bits, then directly write without reading
307 //
308 AsmWriteMsr64 (
309 RegisterTableEntry->Index,
310 RegisterTableEntry->Value
311 );
312 } else {
313 //
314 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
315 // to make sure MSR read/write operation is atomic.
316 //
317 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
318 AcquireSpinLock (MsrSpinLock);
319 //
320 // Set the bit section according to bit start and length
321 //
322 AsmMsrBitFieldWrite64 (
323 RegisterTableEntry->Index,
324 RegisterTableEntry->ValidBitStart,
325 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
326 RegisterTableEntry->Value
327 );
328 ReleaseSpinLock (MsrSpinLock);
329 }
330 break;
331 //
332 // MemoryMapped operations
333 //
334 case MemoryMapped:
335 AcquireSpinLock (mMemoryMappedLock);
336 MmioBitFieldWrite32 (
337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
338 RegisterTableEntry->ValidBitStart,
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
340 (UINT32)RegisterTableEntry->Value
341 );
342 ReleaseSpinLock (mMemoryMappedLock);
343 break;
344 //
345 // Enable or disable cache
346 //
347 case CacheControl:
348 //
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
350 //
351 if (RegisterTableEntry->Value == 0) {
352 AsmDisableCache ();
353 } else {
354 AsmEnableCache ();
355 }
356 break;
357
358 default:
359 break;
360 }
361 }
362 }
363
364 /**
365 AP initialization before then after SMBASE relocation in the S3 boot path.
366 **/
367 VOID
368 InitializeAp (
369 VOID
370 )
371 {
372 UINTN TopOfStack;
373 UINT8 Stack[128];
374
375 LoadMtrrData (mAcpiCpuData.MtrrTable);
376
377 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
378
379 //
380 // Count down the number with lock mechanism.
381 //
382 InterlockedDecrement (&mNumberToFinish);
383
384 //
385 // Wait for BSP to signal SMM Base relocation done.
386 //
387 while (!mInitApsAfterSmmBaseReloc) {
388 CpuPause ();
389 }
390
391 ProgramVirtualWireMode ();
392 DisableLvtInterrupts ();
393
394 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
395
396 //
397 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
398 //
399 TopOfStack = (UINTN) Stack + sizeof (Stack);
400 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
401 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
402 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
403 }
404
405 /**
406 Prepares startup vector for APs.
407
408 This function prepares startup vector for APs.
409
410 @param WorkingBuffer The address of the work buffer.
411 **/
412 VOID
413 PrepareApStartupVector (
414 EFI_PHYSICAL_ADDRESS WorkingBuffer
415 )
416 {
417 EFI_PHYSICAL_ADDRESS StartupVector;
418 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
419
420 //
421 // Get the address map of startup code for AP,
422 // including code size, and offset of long jump instructions to redirect.
423 //
424 ZeroMem (&AddressMap, sizeof (AddressMap));
425 AsmGetAddressMap (&AddressMap);
426
427 StartupVector = WorkingBuffer;
428
429 //
430 // Copy AP startup code to startup vector, and then redirect the long jump
431 // instructions for mode switching.
432 //
433 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
434 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
435 if (AddressMap.LongJumpOffset != 0) {
436 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
437 }
438
439 //
440 // Get the start address of exchange data between BSP and AP.
441 //
442 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
443 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
444
445 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
446 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
447
448 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
449 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
450 mExchangeInfo->BufferStart = (UINT32) StartupVector;
451 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
452 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
453 }
454
455 /**
456 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
457
458 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
459 and restores MTRRs for both BSP and APs.
460
461 **/
462 VOID
463 InitializeCpuBeforeRebase (
464 VOID
465 )
466 {
467 LoadMtrrData (mAcpiCpuData.MtrrTable);
468
469 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
470
471 ProgramVirtualWireMode ();
472
473 PrepareApStartupVector (mAcpiCpuData.StartupVector);
474
475 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
476 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
477
478 //
479 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
480 //
481 mInitApsAfterSmmBaseReloc = FALSE;
482
483 //
484 // Send INIT IPI - SIPI to all APs
485 //
486 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
487
488 while (mNumberToFinish > 0) {
489 CpuPause ();
490 }
491 }
492
493 /**
494 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
495
496 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
497 data saved by normal boot path for both BSP and APs.
498
499 **/
500 VOID
501 InitializeCpuAfterRebase (
502 VOID
503 )
504 {
505 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
506
507 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
508
509 //
510 // Signal that SMM base relocation is complete and to continue initialization.
511 //
512 mInitApsAfterSmmBaseReloc = TRUE;
513
514 while (mNumberToFinish > 0) {
515 CpuPause ();
516 }
517 }
518
519 /**
520 Restore SMM Configuration in S3 boot path.
521
522 **/
523 VOID
524 RestoreSmmConfigurationInS3 (
525 VOID
526 )
527 {
528 if (!mAcpiS3Enable) {
529 return;
530 }
531
532 //
533 // Restore SMM Configuration in S3 boot path.
534 //
535 if (mRestoreSmmConfigurationInS3) {
536 //
537 // Need make sure gSmst is correct because below function may use them.
538 //
539 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
540 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
541 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
542 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
543 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
544
545 //
546 // Configure SMM Code Access Check feature if available.
547 //
548 ConfigSmmCodeAccessCheck ();
549
550 SmmCpuFeaturesCompleteSmmReadyToLock ();
551
552 mRestoreSmmConfigurationInS3 = FALSE;
553 }
554 }
555
556 /**
557 Perform SMM initialization for all processors in the S3 boot path.
558
559 For a native platform, MP initialization in the S3 boot path is also performed in this function.
560 **/
561 VOID
562 EFIAPI
563 SmmRestoreCpu (
564 VOID
565 )
566 {
567 SMM_S3_RESUME_STATE *SmmS3ResumeState;
568 IA32_DESCRIPTOR Ia32Idtr;
569 IA32_DESCRIPTOR X64Idtr;
570 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
571 EFI_STATUS Status;
572
573 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
574
575 mSmmS3Flag = TRUE;
576
577 InitializeSpinLock (mMemoryMappedLock);
578
579 //
580 // See if there is enough context to resume PEI Phase
581 //
582 if (mSmmS3ResumeState == NULL) {
583 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
584 CpuDeadLoop ();
585 }
586
587 SmmS3ResumeState = mSmmS3ResumeState;
588 ASSERT (SmmS3ResumeState != NULL);
589
590 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
591 //
592 // Save the IA32 IDT Descriptor
593 //
594 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
595
596 //
597 // Setup X64 IDT table
598 //
599 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
600 X64Idtr.Base = (UINTN) IdtEntryTable;
601 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
602 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
603
604 //
605 // Setup the default exception handler
606 //
607 Status = InitializeCpuExceptionHandlers (NULL);
608 ASSERT_EFI_ERROR (Status);
609
610 //
611 // Initialize Debug Agent to support source level debug
612 //
613 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
614 }
615
616 //
617 // Skip initialization if mAcpiCpuData is not valid
618 //
619 if (mAcpiCpuData.NumberOfCpus > 0) {
620 //
621 // First time microcode load and restore MTRRs
622 //
623 InitializeCpuBeforeRebase ();
624 }
625
626 //
627 // Restore SMBASE for BSP and all APs
628 //
629 SmmRelocateBases ();
630
631 //
632 // Skip initialization if mAcpiCpuData is not valid
633 //
634 if (mAcpiCpuData.NumberOfCpus > 0) {
635 //
636 // Restore MSRs for BSP and all APs
637 //
638 InitializeCpuAfterRebase ();
639 }
640
641 //
642 // Set a flag to restore SMM configuration in S3 path.
643 //
644 mRestoreSmmConfigurationInS3 = TRUE;
645
646 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
647 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
648 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
649 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
650 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
651
652 //
653 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
654 //
655 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
656 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
657
658 SwitchStack (
659 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
660 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
661 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
662 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
663 );
664 }
665
666 //
667 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
668 //
669 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
670 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
671 //
672 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
673 //
674 SaveAndSetDebugTimerInterrupt (FALSE);
675 //
676 // Restore IA32 IDT table
677 //
678 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
679 AsmDisablePaging64 (
680 SmmS3ResumeState->ReturnCs,
681 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
682 (UINT32)SmmS3ResumeState->ReturnContext1,
683 (UINT32)SmmS3ResumeState->ReturnContext2,
684 (UINT32)SmmS3ResumeState->ReturnStackPointer
685 );
686 }
687
688 //
689 // Can not resume PEI Phase
690 //
691 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
692 CpuDeadLoop ();
693 }
694
695 /**
696 Initialize SMM S3 resume state structure used during S3 Resume.
697
698 @param[in] Cr3 The base address of the page tables to use in SMM.
699
700 **/
701 VOID
702 InitSmmS3ResumeState (
703 IN UINT32 Cr3
704 )
705 {
706 VOID *GuidHob;
707 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
708 SMM_S3_RESUME_STATE *SmmS3ResumeState;
709 EFI_PHYSICAL_ADDRESS Address;
710 EFI_STATUS Status;
711
712 if (!mAcpiS3Enable) {
713 return;
714 }
715
716 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
717 if (GuidHob != NULL) {
718 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
719
720 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
721 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
722
723 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
724 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
725
726 mSmmS3ResumeState = SmmS3ResumeState;
727 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
728
729 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
730
731 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
732 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
733 if (SmmS3ResumeState->SmmS3StackBase == 0) {
734 SmmS3ResumeState->SmmS3StackSize = 0;
735 }
736
737 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
738 SmmS3ResumeState->SmmS3Cr3 = Cr3;
739 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
740
741 if (sizeof (UINTN) == sizeof (UINT64)) {
742 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
743 }
744 if (sizeof (UINTN) == sizeof (UINT32)) {
745 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
746 }
747 }
748
749 //
750 // Patch SmmS3ResumeState->SmmS3Cr3
751 //
752 InitSmmS3Cr3 ();
753
754 //
755 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
756 // protected mode on S3 path
757 //
758 Address = BASE_4GB - 1;
759 Status = gBS->AllocatePages (
760 AllocateMaxAddress,
761 EfiACPIMemoryNVS,
762 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
763 &Address
764 );
765 ASSERT_EFI_ERROR (Status);
766 mApHltLoopCode = (UINT8 *) (UINTN) Address;
767 }
768
769 /**
770 Copy register table from ACPI NVS memory into SMRAM.
771
772 @param[in] DestinationRegisterTableList Points to destination register table.
773 @param[in] SourceRegisterTableList Points to source register table.
774 @param[in] NumberOfCpus Number of CPUs.
775
776 **/
777 VOID
778 CopyRegisterTable (
779 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
780 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
781 IN UINT32 NumberOfCpus
782 )
783 {
784 UINTN Index;
785 UINTN Index1;
786 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
787
788 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
789 for (Index = 0; Index < NumberOfCpus; Index++) {
790 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
791 RegisterTableEntry = AllocateCopyPool (
792 DestinationRegisterTableList[Index].AllocatedSize,
793 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
794 );
795 ASSERT (RegisterTableEntry != NULL);
796 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
797 //
798 // Go though all MSRs in register table to initialize MSR spin lock
799 //
800 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
801 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
802 //
803 // Initialize MSR spin lock only for those MSRs need bit field writing
804 //
805 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
806 }
807 }
808 }
809 }
810 }
811
812 /**
813 Get ACPI CPU data.
814
815 **/
816 VOID
817 GetAcpiCpuData (
818 VOID
819 )
820 {
821 ACPI_CPU_DATA *AcpiCpuData;
822 IA32_DESCRIPTOR *Gdtr;
823 IA32_DESCRIPTOR *Idtr;
824 VOID *GdtForAp;
825 VOID *IdtForAp;
826 VOID *MachineCheckHandlerForAp;
827
828 if (!mAcpiS3Enable) {
829 return;
830 }
831
832 //
833 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
834 //
835 mAcpiCpuData.NumberOfCpus = 0;
836
837 //
838 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
839 //
840 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
841 if (AcpiCpuData == 0) {
842 return;
843 }
844
845 //
846 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
847 //
848 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
849
850 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
851 ASSERT (mAcpiCpuData.MtrrTable != 0);
852
853 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
854
855 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
856 ASSERT (mAcpiCpuData.GdtrProfile != 0);
857
858 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
859
860 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
861 ASSERT (mAcpiCpuData.IdtrProfile != 0);
862
863 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
864
865 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
866 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
867
868 CopyRegisterTable (
869 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
870 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
871 mAcpiCpuData.NumberOfCpus
872 );
873
874 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
875 ASSERT (mAcpiCpuData.RegisterTable != 0);
876
877 CopyRegisterTable (
878 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
879 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
880 mAcpiCpuData.NumberOfCpus
881 );
882
883 //
884 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
885 //
886 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
887 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
888
889 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
890 ASSERT (GdtForAp != NULL);
891 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
892 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
893
894 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
895 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
896 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
897
898 Gdtr->Base = (UINTN)GdtForAp;
899 Idtr->Base = (UINTN)IdtForAp;
900 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
901 }
902
903 /**
904 Get ACPI S3 enable flag.
905
906 **/
907 VOID
908 GetAcpiS3EnableFlag (
909 VOID
910 )
911 {
912 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
913 }