]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
ArmPkg/Include: Add standard SMC function IDs for MM interface.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 //
43 // Signal that SMM BASE relocation is complete.
44 //
45 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
46
47 /**
48 Get starting address and size of the rendezvous entry for APs.
49 Information for fixing a jump instruction in the code is also returned.
50
51 @param AddressMap Output buffer for address map information.
52 **/
53 VOID *
54 EFIAPI
55 AsmGetAddressMap (
56 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
57 );
58
59 #define LEGACY_REGION_SIZE (2 * 0x1000)
60 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
61
62 ACPI_CPU_DATA mAcpiCpuData;
63 volatile UINT32 mNumberToFinish;
64 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
65 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
66 VOID *mGdtForAp = NULL;
67 VOID *mIdtForAp = NULL;
68 VOID *mMachineCheckHandlerForAp = NULL;
69 MP_MSR_LOCK *mMsrSpinLocks = NULL;
70 UINTN mMsrSpinLockCount;
71 UINTN mMsrCount = 0;
72
73 //
74 // S3 boot flag
75 //
76 BOOLEAN mSmmS3Flag = FALSE;
77
78 //
79 // Pointer to structure used during S3 Resume
80 //
81 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
82
83 BOOLEAN mAcpiS3Enable = TRUE;
84
85 UINT8 *mApHltLoopCode = NULL;
86 UINT8 mApHltLoopCodeTemplate[] = {
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
89 0xFA, // cli
90 0xF4, // hlt
91 0xEB, 0xFC // jmp $-2
92 };
93
94 /**
95 Get MSR spin lock by MSR index.
96
97 @param MsrIndex MSR index value.
98
99 @return Pointer to MSR spin lock.
100
101 **/
102 SPIN_LOCK *
103 GetMsrSpinLockByIndex (
104 IN UINT32 MsrIndex
105 )
106 {
107 UINTN Index;
108 for (Index = 0; Index < mMsrCount; Index++) {
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
110 return mMsrSpinLocks[Index].SpinLock;
111 }
112 }
113 return NULL;
114 }
115
116 /**
117 Initialize MSR spin lock by MSR index.
118
119 @param MsrIndex MSR index value.
120
121 **/
122 VOID
123 InitMsrSpinLockByIndex (
124 IN UINT32 MsrIndex
125 )
126 {
127 UINTN MsrSpinLockCount;
128 UINTN NewMsrSpinLockCount;
129 UINTN Index;
130 UINTN AddedSize;
131
132 if (mMsrSpinLocks == NULL) {
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
135 ASSERT (mMsrSpinLocks != NULL);
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {
137 mMsrSpinLocks[Index].SpinLock =
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
140 }
141 mMsrSpinLockCount = MsrSpinLockCount;
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
143 }
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
145 //
146 // Initialize spin lock for MSR programming
147 //
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
150 mMsrCount ++;
151 if (mMsrCount == mMsrSpinLockCount) {
152 //
153 // If MSR spin lock buffer is full, enlarge it
154 //
155 AddedSize = SIZE_4KB;
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
160 mMsrSpinLocks = ReallocatePool (
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
163 mMsrSpinLocks
164 );
165 ASSERT (mMsrSpinLocks != NULL);
166 mMsrSpinLockCount = NewMsrSpinLockCount;
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
168 mMsrSpinLocks[Index].SpinLock =
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
170 (Index - mMsrCount) * mSemaphoreSize);
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
172 }
173 }
174 }
175 }
176
177 /**
178 Sync up the MTRR values for all processors.
179
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
181 **/
182 VOID
183 EFIAPI
184 LoadMtrrData (
185 EFI_PHYSICAL_ADDRESS MtrrTable
186 )
187 /*++
188
189 Routine Description:
190
191 Sync up the MTRR values for all processors.
192
193 Arguments:
194
195 Returns:
196 None
197
198 --*/
199 {
200 MTRR_SETTINGS *MtrrSettings;
201
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
203 MtrrSetAllMtrrs (MtrrSettings);
204 }
205
206 /**
207 Programs registers for the calling processor.
208
209 This function programs registers for the calling processor.
210
211 @param RegisterTables Pointer to register table of the running processor.
212 @param RegisterTableCount Register table count.
213
214 **/
215 VOID
216 SetProcessorRegister (
217 IN CPU_REGISTER_TABLE *RegisterTables,
218 IN UINTN RegisterTableCount
219 )
220 {
221 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
222 UINTN Index;
223 UINTN Value;
224 SPIN_LOCK *MsrSpinLock;
225 UINT32 InitApicId;
226 CPU_REGISTER_TABLE *RegisterTable;
227
228 InitApicId = GetInitialApicId ();
229 for (Index = 0; Index < RegisterTableCount; Index++) {
230 if (RegisterTables[Index].InitialApicId == InitApicId) {
231 RegisterTable = &RegisterTables[Index];
232 break;
233 }
234 }
235
236 //
237 // Traverse Register Table of this logical processor
238 //
239 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
240 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
241 //
242 // Check the type of specified register
243 //
244 switch (RegisterTableEntry->RegisterType) {
245 //
246 // The specified register is Control Register
247 //
248 case ControlRegister:
249 switch (RegisterTableEntry->Index) {
250 case 0:
251 Value = AsmReadCr0 ();
252 Value = (UINTN) BitFieldWrite64 (
253 Value,
254 RegisterTableEntry->ValidBitStart,
255 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
256 (UINTN) RegisterTableEntry->Value
257 );
258 AsmWriteCr0 (Value);
259 break;
260 case 2:
261 Value = AsmReadCr2 ();
262 Value = (UINTN) BitFieldWrite64 (
263 Value,
264 RegisterTableEntry->ValidBitStart,
265 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
266 (UINTN) RegisterTableEntry->Value
267 );
268 AsmWriteCr2 (Value);
269 break;
270 case 3:
271 Value = AsmReadCr3 ();
272 Value = (UINTN) BitFieldWrite64 (
273 Value,
274 RegisterTableEntry->ValidBitStart,
275 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
276 (UINTN) RegisterTableEntry->Value
277 );
278 AsmWriteCr3 (Value);
279 break;
280 case 4:
281 Value = AsmReadCr4 ();
282 Value = (UINTN) BitFieldWrite64 (
283 Value,
284 RegisterTableEntry->ValidBitStart,
285 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
286 (UINTN) RegisterTableEntry->Value
287 );
288 AsmWriteCr4 (Value);
289 break;
290 default:
291 break;
292 }
293 break;
294 //
295 // The specified register is Model Specific Register
296 //
297 case Msr:
298 //
299 // If this function is called to restore register setting after INIT signal,
300 // there is no need to restore MSRs in register table.
301 //
302 if (RegisterTableEntry->ValidBitLength >= 64) {
303 //
304 // If length is not less than 64 bits, then directly write without reading
305 //
306 AsmWriteMsr64 (
307 RegisterTableEntry->Index,
308 RegisterTableEntry->Value
309 );
310 } else {
311 //
312 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
313 // to make sure MSR read/write operation is atomic.
314 //
315 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
316 AcquireSpinLock (MsrSpinLock);
317 //
318 // Set the bit section according to bit start and length
319 //
320 AsmMsrBitFieldWrite64 (
321 RegisterTableEntry->Index,
322 RegisterTableEntry->ValidBitStart,
323 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
324 RegisterTableEntry->Value
325 );
326 ReleaseSpinLock (MsrSpinLock);
327 }
328 break;
329 //
330 // MemoryMapped operations
331 //
332 case MemoryMapped:
333 AcquireSpinLock (mMemoryMappedLock);
334 MmioBitFieldWrite32 (
335 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
336 RegisterTableEntry->ValidBitStart,
337 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
338 (UINT32)RegisterTableEntry->Value
339 );
340 ReleaseSpinLock (mMemoryMappedLock);
341 break;
342 //
343 // Enable or disable cache
344 //
345 case CacheControl:
346 //
347 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
348 //
349 if (RegisterTableEntry->Value == 0) {
350 AsmDisableCache ();
351 } else {
352 AsmEnableCache ();
353 }
354 break;
355
356 default:
357 break;
358 }
359 }
360 }
361
362 /**
363 AP initialization before then after SMBASE relocation in the S3 boot path.
364 **/
365 VOID
366 InitializeAp (
367 VOID
368 )
369 {
370 UINTN TopOfStack;
371 UINT8 Stack[128];
372
373 LoadMtrrData (mAcpiCpuData.MtrrTable);
374
375 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
376
377 //
378 // Count down the number with lock mechanism.
379 //
380 InterlockedDecrement (&mNumberToFinish);
381
382 //
383 // Wait for BSP to signal SMM Base relocation done.
384 //
385 while (!mInitApsAfterSmmBaseReloc) {
386 CpuPause ();
387 }
388
389 ProgramVirtualWireMode ();
390 DisableLvtInterrupts ();
391
392 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
393
394 //
395 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
396 //
397 TopOfStack = (UINTN) Stack + sizeof (Stack);
398 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
399 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
400 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
401 }
402
403 /**
404 Prepares startup vector for APs.
405
406 This function prepares startup vector for APs.
407
408 @param WorkingBuffer The address of the work buffer.
409 **/
410 VOID
411 PrepareApStartupVector (
412 EFI_PHYSICAL_ADDRESS WorkingBuffer
413 )
414 {
415 EFI_PHYSICAL_ADDRESS StartupVector;
416 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
417
418 //
419 // Get the address map of startup code for AP,
420 // including code size, and offset of long jump instructions to redirect.
421 //
422 ZeroMem (&AddressMap, sizeof (AddressMap));
423 AsmGetAddressMap (&AddressMap);
424
425 StartupVector = WorkingBuffer;
426
427 //
428 // Copy AP startup code to startup vector, and then redirect the long jump
429 // instructions for mode switching.
430 //
431 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
432 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
433 if (AddressMap.LongJumpOffset != 0) {
434 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
435 }
436
437 //
438 // Get the start address of exchange data between BSP and AP.
439 //
440 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
441 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
442
443 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
444 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
445
446 //
447 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
448 //
449 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
450 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
451 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
452
453 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
454 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
455 mExchangeInfo->BufferStart = (UINT32) StartupVector;
456 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
457 }
458
459 /**
460 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
461
462 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
463 and restores MTRRs for both BSP and APs.
464
465 **/
466 VOID
467 InitializeCpuBeforeRebase (
468 VOID
469 )
470 {
471 LoadMtrrData (mAcpiCpuData.MtrrTable);
472
473 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
474
475 ProgramVirtualWireMode ();
476
477 PrepareApStartupVector (mAcpiCpuData.StartupVector);
478
479 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
480 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
481
482 //
483 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
484 //
485 mInitApsAfterSmmBaseReloc = FALSE;
486
487 //
488 // Send INIT IPI - SIPI to all APs
489 //
490 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
491
492 while (mNumberToFinish > 0) {
493 CpuPause ();
494 }
495 }
496
497 /**
498 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
499
500 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
501 data saved by normal boot path for both BSP and APs.
502
503 **/
504 VOID
505 InitializeCpuAfterRebase (
506 VOID
507 )
508 {
509 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
510
511 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
512
513 //
514 // Signal that SMM base relocation is complete and to continue initialization.
515 //
516 mInitApsAfterSmmBaseReloc = TRUE;
517
518 while (mNumberToFinish > 0) {
519 CpuPause ();
520 }
521 }
522
523 /**
524 Restore SMM Configuration in S3 boot path.
525
526 **/
527 VOID
528 RestoreSmmConfigurationInS3 (
529 VOID
530 )
531 {
532 if (!mAcpiS3Enable) {
533 return;
534 }
535
536 //
537 // Restore SMM Configuration in S3 boot path.
538 //
539 if (mRestoreSmmConfigurationInS3) {
540 //
541 // Need make sure gSmst is correct because below function may use them.
542 //
543 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
544 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
545 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
546 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
547 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
548
549 //
550 // Configure SMM Code Access Check feature if available.
551 //
552 ConfigSmmCodeAccessCheck ();
553
554 SmmCpuFeaturesCompleteSmmReadyToLock ();
555
556 mRestoreSmmConfigurationInS3 = FALSE;
557 }
558 }
559
560 /**
561 Perform SMM initialization for all processors in the S3 boot path.
562
563 For a native platform, MP initialization in the S3 boot path is also performed in this function.
564 **/
565 VOID
566 EFIAPI
567 SmmRestoreCpu (
568 VOID
569 )
570 {
571 SMM_S3_RESUME_STATE *SmmS3ResumeState;
572 IA32_DESCRIPTOR Ia32Idtr;
573 IA32_DESCRIPTOR X64Idtr;
574 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
575 EFI_STATUS Status;
576
577 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
578
579 mSmmS3Flag = TRUE;
580
581 InitializeSpinLock (mMemoryMappedLock);
582
583 //
584 // See if there is enough context to resume PEI Phase
585 //
586 if (mSmmS3ResumeState == NULL) {
587 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
588 CpuDeadLoop ();
589 }
590
591 SmmS3ResumeState = mSmmS3ResumeState;
592 ASSERT (SmmS3ResumeState != NULL);
593
594 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
595 //
596 // Save the IA32 IDT Descriptor
597 //
598 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
599
600 //
601 // Setup X64 IDT table
602 //
603 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
604 X64Idtr.Base = (UINTN) IdtEntryTable;
605 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
606 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
607
608 //
609 // Setup the default exception handler
610 //
611 Status = InitializeCpuExceptionHandlers (NULL);
612 ASSERT_EFI_ERROR (Status);
613
614 //
615 // Initialize Debug Agent to support source level debug
616 //
617 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
618 }
619
620 //
621 // Skip initialization if mAcpiCpuData is not valid
622 //
623 if (mAcpiCpuData.NumberOfCpus > 0) {
624 //
625 // First time microcode load and restore MTRRs
626 //
627 InitializeCpuBeforeRebase ();
628 }
629
630 //
631 // Restore SMBASE for BSP and all APs
632 //
633 SmmRelocateBases ();
634
635 //
636 // Skip initialization if mAcpiCpuData is not valid
637 //
638 if (mAcpiCpuData.NumberOfCpus > 0) {
639 //
640 // Restore MSRs for BSP and all APs
641 //
642 InitializeCpuAfterRebase ();
643 }
644
645 //
646 // Set a flag to restore SMM configuration in S3 path.
647 //
648 mRestoreSmmConfigurationInS3 = TRUE;
649
650 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
651 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
652 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
653 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
654 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
655
656 //
657 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
658 //
659 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
660 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
661
662 SwitchStack (
663 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
664 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
665 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
666 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
667 );
668 }
669
670 //
671 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
672 //
673 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
674 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
675 //
676 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
677 //
678 SaveAndSetDebugTimerInterrupt (FALSE);
679 //
680 // Restore IA32 IDT table
681 //
682 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
683 AsmDisablePaging64 (
684 SmmS3ResumeState->ReturnCs,
685 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
686 (UINT32)SmmS3ResumeState->ReturnContext1,
687 (UINT32)SmmS3ResumeState->ReturnContext2,
688 (UINT32)SmmS3ResumeState->ReturnStackPointer
689 );
690 }
691
692 //
693 // Can not resume PEI Phase
694 //
695 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
696 CpuDeadLoop ();
697 }
698
699 /**
700 Initialize SMM S3 resume state structure used during S3 Resume.
701
702 @param[in] Cr3 The base address of the page tables to use in SMM.
703
704 **/
705 VOID
706 InitSmmS3ResumeState (
707 IN UINT32 Cr3
708 )
709 {
710 VOID *GuidHob;
711 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
712 SMM_S3_RESUME_STATE *SmmS3ResumeState;
713 EFI_PHYSICAL_ADDRESS Address;
714 EFI_STATUS Status;
715
716 if (!mAcpiS3Enable) {
717 return;
718 }
719
720 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
721 if (GuidHob != NULL) {
722 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
723
724 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
725 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
726
727 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
728 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
729
730 mSmmS3ResumeState = SmmS3ResumeState;
731 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
732
733 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
734
735 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
736 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
737 if (SmmS3ResumeState->SmmS3StackBase == 0) {
738 SmmS3ResumeState->SmmS3StackSize = 0;
739 }
740
741 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
742 SmmS3ResumeState->SmmS3Cr3 = Cr3;
743 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
744
745 if (sizeof (UINTN) == sizeof (UINT64)) {
746 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
747 }
748 if (sizeof (UINTN) == sizeof (UINT32)) {
749 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
750 }
751 }
752
753 //
754 // Patch SmmS3ResumeState->SmmS3Cr3
755 //
756 InitSmmS3Cr3 ();
757
758 //
759 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
760 // protected mode on S3 path
761 //
762 Address = BASE_4GB - 1;
763 Status = gBS->AllocatePages (
764 AllocateMaxAddress,
765 EfiACPIMemoryNVS,
766 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
767 &Address
768 );
769 ASSERT_EFI_ERROR (Status);
770 mApHltLoopCode = (UINT8 *) (UINTN) Address;
771 }
772
773 /**
774 Copy register table from ACPI NVS memory into SMRAM.
775
776 @param[in] DestinationRegisterTableList Points to destination register table.
777 @param[in] SourceRegisterTableList Points to source register table.
778 @param[in] NumberOfCpus Number of CPUs.
779
780 **/
781 VOID
782 CopyRegisterTable (
783 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
784 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
785 IN UINT32 NumberOfCpus
786 )
787 {
788 UINTN Index;
789 UINTN Index1;
790 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
791
792 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
793 for (Index = 0; Index < NumberOfCpus; Index++) {
794 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
795 RegisterTableEntry = AllocateCopyPool (
796 DestinationRegisterTableList[Index].AllocatedSize,
797 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
798 );
799 ASSERT (RegisterTableEntry != NULL);
800 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
801 //
802 // Go though all MSRs in register table to initialize MSR spin lock
803 //
804 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
805 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
806 //
807 // Initialize MSR spin lock only for those MSRs need bit field writing
808 //
809 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
810 }
811 }
812 }
813 }
814 }
815
816 /**
817 Get ACPI CPU data.
818
819 **/
820 VOID
821 GetAcpiCpuData (
822 VOID
823 )
824 {
825 ACPI_CPU_DATA *AcpiCpuData;
826 IA32_DESCRIPTOR *Gdtr;
827 IA32_DESCRIPTOR *Idtr;
828
829 if (!mAcpiS3Enable) {
830 return;
831 }
832
833 //
834 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
835 //
836 mAcpiCpuData.NumberOfCpus = 0;
837
838 //
839 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
840 //
841 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
842 if (AcpiCpuData == 0) {
843 return;
844 }
845
846 //
847 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
848 //
849 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
850
851 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
852 ASSERT (mAcpiCpuData.MtrrTable != 0);
853
854 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
855
856 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
857 ASSERT (mAcpiCpuData.GdtrProfile != 0);
858
859 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
860
861 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
862 ASSERT (mAcpiCpuData.IdtrProfile != 0);
863
864 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
865
866 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
867 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
868
869 CopyRegisterTable (
870 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
871 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
872 mAcpiCpuData.NumberOfCpus
873 );
874
875 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
876 ASSERT (mAcpiCpuData.RegisterTable != 0);
877
878 CopyRegisterTable (
879 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
880 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
881 mAcpiCpuData.NumberOfCpus
882 );
883
884 //
885 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
886 //
887 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
888 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
889
890 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
891 ASSERT (mGdtForAp != NULL);
892 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
893 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
894
895 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
896 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
897 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
898 }
899
900 /**
901 Get ACPI S3 enable flag.
902
903 **/
904 VOID
905 GetAcpiS3EnableFlag (
906 VOID
907 )
908 {
909 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
910 }