]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Combine INIT-SIPI-SIPI.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 //
43 // Signal that SMM BASE relocation is complete.
44 //
45 volatile BOOLEAN mInitApsAfterSmmBaseReloc;
46
47 /**
48 Get starting address and size of the rendezvous entry for APs.
49 Information for fixing a jump instruction in the code is also returned.
50
51 @param AddressMap Output buffer for address map information.
52 **/
53 VOID *
54 EFIAPI
55 AsmGetAddressMap (
56 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
57 );
58
59 #define LEGACY_REGION_SIZE (2 * 0x1000)
60 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
61
62 ACPI_CPU_DATA mAcpiCpuData;
63 volatile UINT32 mNumberToFinish;
64 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
65 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
66 VOID *mGdtForAp = NULL;
67 VOID *mIdtForAp = NULL;
68 VOID *mMachineCheckHandlerForAp = NULL;
69 MP_MSR_LOCK *mMsrSpinLocks = NULL;
70 UINTN mMsrSpinLockCount;
71 UINTN mMsrCount = 0;
72
73 //
74 // S3 boot flag
75 //
76 BOOLEAN mSmmS3Flag = FALSE;
77
78 //
79 // Pointer to structure used during S3 Resume
80 //
81 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
82
83 BOOLEAN mAcpiS3Enable = TRUE;
84
85 UINT8 *mApHltLoopCode = NULL;
86 UINT8 mApHltLoopCodeTemplate[] = {
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
89 0xFA, // cli
90 0xF4, // hlt
91 0xEB, 0xFC // jmp $-2
92 };
93
94 /**
95 Get MSR spin lock by MSR index.
96
97 @param MsrIndex MSR index value.
98
99 @return Pointer to MSR spin lock.
100
101 **/
102 SPIN_LOCK *
103 GetMsrSpinLockByIndex (
104 IN UINT32 MsrIndex
105 )
106 {
107 UINTN Index;
108 for (Index = 0; Index < mMsrCount; Index++) {
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
110 return mMsrSpinLocks[Index].SpinLock;
111 }
112 }
113 return NULL;
114 }
115
116 /**
117 Initialize MSR spin lock by MSR index.
118
119 @param MsrIndex MSR index value.
120
121 **/
122 VOID
123 InitMsrSpinLockByIndex (
124 IN UINT32 MsrIndex
125 )
126 {
127 UINTN MsrSpinLockCount;
128 UINTN NewMsrSpinLockCount;
129 UINTN Index;
130 UINTN AddedSize;
131
132 if (mMsrSpinLocks == NULL) {
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
135 ASSERT (mMsrSpinLocks != NULL);
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {
137 mMsrSpinLocks[Index].SpinLock =
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
140 }
141 mMsrSpinLockCount = MsrSpinLockCount;
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
143 }
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
145 //
146 // Initialize spin lock for MSR programming
147 //
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
150 mMsrCount ++;
151 if (mMsrCount == mMsrSpinLockCount) {
152 //
153 // If MSR spin lock buffer is full, enlarge it
154 //
155 AddedSize = SIZE_4KB;
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
160 mMsrSpinLocks = ReallocatePool (
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
163 mMsrSpinLocks
164 );
165 ASSERT (mMsrSpinLocks != NULL);
166 mMsrSpinLockCount = NewMsrSpinLockCount;
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
168 mMsrSpinLocks[Index].SpinLock =
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
170 (Index - mMsrCount) * mSemaphoreSize);
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
172 }
173 }
174 }
175 }
176
177 /**
178 Sync up the MTRR values for all processors.
179
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
181 **/
182 VOID
183 EFIAPI
184 LoadMtrrData (
185 EFI_PHYSICAL_ADDRESS MtrrTable
186 )
187 /*++
188
189 Routine Description:
190
191 Sync up the MTRR values for all processors.
192
193 Arguments:
194
195 Returns:
196 None
197
198 --*/
199 {
200 MTRR_SETTINGS *MtrrSettings;
201
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
203 MtrrSetAllMtrrs (MtrrSettings);
204 }
205
206 /**
207 Programs registers for the calling processor.
208
209 This function programs registers for the calling processor.
210
211 @param RegisterTable Pointer to register table of the running processor.
212
213 **/
214 VOID
215 SetProcessorRegister (
216 IN CPU_REGISTER_TABLE *RegisterTable
217 )
218 {
219 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
220 UINTN Index;
221 UINTN Value;
222 SPIN_LOCK *MsrSpinLock;
223
224 //
225 // Traverse Register Table of this logical processor
226 //
227 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
228 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
229 //
230 // Check the type of specified register
231 //
232 switch (RegisterTableEntry->RegisterType) {
233 //
234 // The specified register is Control Register
235 //
236 case ControlRegister:
237 switch (RegisterTableEntry->Index) {
238 case 0:
239 Value = AsmReadCr0 ();
240 Value = (UINTN) BitFieldWrite64 (
241 Value,
242 RegisterTableEntry->ValidBitStart,
243 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
244 (UINTN) RegisterTableEntry->Value
245 );
246 AsmWriteCr0 (Value);
247 break;
248 case 2:
249 Value = AsmReadCr2 ();
250 Value = (UINTN) BitFieldWrite64 (
251 Value,
252 RegisterTableEntry->ValidBitStart,
253 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
254 (UINTN) RegisterTableEntry->Value
255 );
256 AsmWriteCr2 (Value);
257 break;
258 case 3:
259 Value = AsmReadCr3 ();
260 Value = (UINTN) BitFieldWrite64 (
261 Value,
262 RegisterTableEntry->ValidBitStart,
263 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
264 (UINTN) RegisterTableEntry->Value
265 );
266 AsmWriteCr3 (Value);
267 break;
268 case 4:
269 Value = AsmReadCr4 ();
270 Value = (UINTN) BitFieldWrite64 (
271 Value,
272 RegisterTableEntry->ValidBitStart,
273 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
274 (UINTN) RegisterTableEntry->Value
275 );
276 AsmWriteCr4 (Value);
277 break;
278 default:
279 break;
280 }
281 break;
282 //
283 // The specified register is Model Specific Register
284 //
285 case Msr:
286 //
287 // If this function is called to restore register setting after INIT signal,
288 // there is no need to restore MSRs in register table.
289 //
290 if (RegisterTableEntry->ValidBitLength >= 64) {
291 //
292 // If length is not less than 64 bits, then directly write without reading
293 //
294 AsmWriteMsr64 (
295 RegisterTableEntry->Index,
296 RegisterTableEntry->Value
297 );
298 } else {
299 //
300 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
301 // to make sure MSR read/write operation is atomic.
302 //
303 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
304 AcquireSpinLock (MsrSpinLock);
305 //
306 // Set the bit section according to bit start and length
307 //
308 AsmMsrBitFieldWrite64 (
309 RegisterTableEntry->Index,
310 RegisterTableEntry->ValidBitStart,
311 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
312 RegisterTableEntry->Value
313 );
314 ReleaseSpinLock (MsrSpinLock);
315 }
316 break;
317 //
318 // MemoryMapped operations
319 //
320 case MemoryMapped:
321 AcquireSpinLock (mMemoryMappedLock);
322 MmioBitFieldWrite32 (
323 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
324 RegisterTableEntry->ValidBitStart,
325 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
326 (UINT32)RegisterTableEntry->Value
327 );
328 ReleaseSpinLock (mMemoryMappedLock);
329 break;
330 //
331 // Enable or disable cache
332 //
333 case CacheControl:
334 //
335 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
336 //
337 if (RegisterTableEntry->Value == 0) {
338 AsmDisableCache ();
339 } else {
340 AsmEnableCache ();
341 }
342 break;
343
344 default:
345 break;
346 }
347 }
348 }
349
350
351
352 /**
353 AP initialization before then after SMBASE relocation in the S3 boot path.
354 **/
355 VOID
356 MPRendezvousProcedure (
357 VOID
358 )
359 {
360 CPU_REGISTER_TABLE *RegisterTableList;
361 UINT32 InitApicId;
362 UINTN Index;
363 UINTN TopOfStack;
364 UINT8 Stack[128];
365
366 LoadMtrrData (mAcpiCpuData.MtrrTable);
367
368 //
369 // Find processor number for this CPU.
370 //
371 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
372 InitApicId = GetInitialApicId ();
373 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
374 if (RegisterTableList[Index].InitialApicId == InitApicId) {
375 SetProcessorRegister (&RegisterTableList[Index]);
376 break;
377 }
378 }
379
380
381 //
382 // Count down the number with lock mechanism.
383 //
384 InterlockedDecrement (&mNumberToFinish);
385
386 //
387 // Wait for BSP to signal SMM Base relocation done.
388 //
389 while (!mInitApsAfterSmmBaseReloc) {
390 CpuPause ();
391 }
392
393 ProgramVirtualWireMode ();
394 DisableLvtInterrupts ();
395
396 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
397 InitApicId = GetInitialApicId ();
398 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
399 if (RegisterTableList[Index].InitialApicId == InitApicId) {
400 SetProcessorRegister (&RegisterTableList[Index]);
401 break;
402 }
403 }
404
405 //
406 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
407 //
408 TopOfStack = (UINTN) Stack + sizeof (Stack);
409 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
410 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
411 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
412 }
413
414 /**
415 Prepares startup vector for APs.
416
417 This function prepares startup vector for APs.
418
419 @param WorkingBuffer The address of the work buffer.
420 **/
421 VOID
422 PrepareApStartupVector (
423 EFI_PHYSICAL_ADDRESS WorkingBuffer
424 )
425 {
426 EFI_PHYSICAL_ADDRESS StartupVector;
427 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
428
429 //
430 // Get the address map of startup code for AP,
431 // including code size, and offset of long jump instructions to redirect.
432 //
433 ZeroMem (&AddressMap, sizeof (AddressMap));
434 AsmGetAddressMap (&AddressMap);
435
436 StartupVector = WorkingBuffer;
437
438 //
439 // Copy AP startup code to startup vector, and then redirect the long jump
440 // instructions for mode switching.
441 //
442 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
443 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
444 if (AddressMap.LongJumpOffset != 0) {
445 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
446 }
447
448 //
449 // Get the start address of exchange data between BSP and AP.
450 //
451 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
452 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
453
454 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
455 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
456
457 //
458 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
459 //
460 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
461 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
462 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
463
464 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
465 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
466 mExchangeInfo->BufferStart = (UINT32) StartupVector;
467 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
468 }
469
470 /**
471 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
472
473 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
474 and restores MTRRs for both BSP and APs.
475
476 **/
477 VOID
478 EarlyInitializeCpu (
479 VOID
480 )
481 {
482 CPU_REGISTER_TABLE *RegisterTableList;
483 UINT32 InitApicId;
484 UINTN Index;
485
486 LoadMtrrData (mAcpiCpuData.MtrrTable);
487
488 //
489 // Find processor number for this CPU.
490 //
491 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
492 InitApicId = GetInitialApicId ();
493 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
494 if (RegisterTableList[Index].InitialApicId == InitApicId) {
495 SetProcessorRegister (&RegisterTableList[Index]);
496 break;
497 }
498 }
499
500 ProgramVirtualWireMode ();
501
502 PrepareApStartupVector (mAcpiCpuData.StartupVector);
503
504 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
505 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
506
507 //
508 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
509 //
510 mInitApsAfterSmmBaseReloc = FALSE;
511
512 //
513 // Send INIT IPI - SIPI to all APs
514 //
515 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
516
517 while (mNumberToFinish > 0) {
518 CpuPause ();
519 }
520 }
521
522 /**
523 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
524
525 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
526 data saved by normal boot path for both BSP and APs.
527
528 **/
529 VOID
530 InitializeCpu (
531 VOID
532 )
533 {
534 CPU_REGISTER_TABLE *RegisterTableList;
535 UINT32 InitApicId;
536 UINTN Index;
537
538 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
539 InitApicId = GetInitialApicId ();
540 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
541 if (RegisterTableList[Index].InitialApicId == InitApicId) {
542 SetProcessorRegister (&RegisterTableList[Index]);
543 break;
544 }
545 }
546
547 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
548
549 //
550 // Signal that SMM base relocation is complete and to continue initialization.
551 //
552 mInitApsAfterSmmBaseReloc = TRUE;
553
554 while (mNumberToFinish > 0) {
555 CpuPause ();
556 }
557 }
558
559 /**
560 Restore SMM Configuration in S3 boot path.
561
562 **/
563 VOID
564 RestoreSmmConfigurationInS3 (
565 VOID
566 )
567 {
568 if (!mAcpiS3Enable) {
569 return;
570 }
571
572 //
573 // Restore SMM Configuration in S3 boot path.
574 //
575 if (mRestoreSmmConfigurationInS3) {
576 //
577 // Need make sure gSmst is correct because below function may use them.
578 //
579 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
580 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
581 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
582 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
583 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
584
585 //
586 // Configure SMM Code Access Check feature if available.
587 //
588 ConfigSmmCodeAccessCheck ();
589
590 SmmCpuFeaturesCompleteSmmReadyToLock ();
591
592 mRestoreSmmConfigurationInS3 = FALSE;
593 }
594 }
595
596 /**
597 Perform SMM initialization for all processors in the S3 boot path.
598
599 For a native platform, MP initialization in the S3 boot path is also performed in this function.
600 **/
601 VOID
602 EFIAPI
603 SmmRestoreCpu (
604 VOID
605 )
606 {
607 SMM_S3_RESUME_STATE *SmmS3ResumeState;
608 IA32_DESCRIPTOR Ia32Idtr;
609 IA32_DESCRIPTOR X64Idtr;
610 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
611 EFI_STATUS Status;
612
613 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
614
615 mSmmS3Flag = TRUE;
616
617 InitializeSpinLock (mMemoryMappedLock);
618
619 //
620 // See if there is enough context to resume PEI Phase
621 //
622 if (mSmmS3ResumeState == NULL) {
623 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
624 CpuDeadLoop ();
625 }
626
627 SmmS3ResumeState = mSmmS3ResumeState;
628 ASSERT (SmmS3ResumeState != NULL);
629
630 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
631 //
632 // Save the IA32 IDT Descriptor
633 //
634 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
635
636 //
637 // Setup X64 IDT table
638 //
639 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
640 X64Idtr.Base = (UINTN) IdtEntryTable;
641 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
642 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
643
644 //
645 // Setup the default exception handler
646 //
647 Status = InitializeCpuExceptionHandlers (NULL);
648 ASSERT_EFI_ERROR (Status);
649
650 //
651 // Initialize Debug Agent to support source level debug
652 //
653 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
654 }
655
656 //
657 // Skip initialization if mAcpiCpuData is not valid
658 //
659 if (mAcpiCpuData.NumberOfCpus > 0) {
660 //
661 // First time microcode load and restore MTRRs
662 //
663 EarlyInitializeCpu ();
664 }
665
666 //
667 // Restore SMBASE for BSP and all APs
668 //
669 SmmRelocateBases ();
670
671 //
672 // Skip initialization if mAcpiCpuData is not valid
673 //
674 if (mAcpiCpuData.NumberOfCpus > 0) {
675 //
676 // Restore MSRs for BSP and all APs
677 //
678 InitializeCpu ();
679 }
680
681 //
682 // Set a flag to restore SMM configuration in S3 path.
683 //
684 mRestoreSmmConfigurationInS3 = TRUE;
685
686 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
687 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
688 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
689 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
690 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
691
692 //
693 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
694 //
695 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
696 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
697
698 SwitchStack (
699 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
700 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
701 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
702 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
703 );
704 }
705
706 //
707 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
708 //
709 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
710 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
711 //
712 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
713 //
714 SaveAndSetDebugTimerInterrupt (FALSE);
715 //
716 // Restore IA32 IDT table
717 //
718 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
719 AsmDisablePaging64 (
720 SmmS3ResumeState->ReturnCs,
721 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
722 (UINT32)SmmS3ResumeState->ReturnContext1,
723 (UINT32)SmmS3ResumeState->ReturnContext2,
724 (UINT32)SmmS3ResumeState->ReturnStackPointer
725 );
726 }
727
728 //
729 // Can not resume PEI Phase
730 //
731 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
732 CpuDeadLoop ();
733 }
734
735 /**
736 Initialize SMM S3 resume state structure used during S3 Resume.
737
738 @param[in] Cr3 The base address of the page tables to use in SMM.
739
740 **/
741 VOID
742 InitSmmS3ResumeState (
743 IN UINT32 Cr3
744 )
745 {
746 VOID *GuidHob;
747 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
748 SMM_S3_RESUME_STATE *SmmS3ResumeState;
749 EFI_PHYSICAL_ADDRESS Address;
750 EFI_STATUS Status;
751
752 if (!mAcpiS3Enable) {
753 return;
754 }
755
756 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
757 if (GuidHob != NULL) {
758 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
759
760 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
761 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
762
763 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
764 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
765
766 mSmmS3ResumeState = SmmS3ResumeState;
767 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
768
769 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
770
771 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
772 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
773 if (SmmS3ResumeState->SmmS3StackBase == 0) {
774 SmmS3ResumeState->SmmS3StackSize = 0;
775 }
776
777 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
778 SmmS3ResumeState->SmmS3Cr3 = Cr3;
779 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
780
781 if (sizeof (UINTN) == sizeof (UINT64)) {
782 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
783 }
784 if (sizeof (UINTN) == sizeof (UINT32)) {
785 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
786 }
787 }
788
789 //
790 // Patch SmmS3ResumeState->SmmS3Cr3
791 //
792 InitSmmS3Cr3 ();
793
794 //
795 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
796 // protected mode on S3 path
797 //
798 Address = BASE_4GB - 1;
799 Status = gBS->AllocatePages (
800 AllocateMaxAddress,
801 EfiACPIMemoryNVS,
802 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
803 &Address
804 );
805 ASSERT_EFI_ERROR (Status);
806 mApHltLoopCode = (UINT8 *) (UINTN) Address;
807 }
808
809 /**
810 Copy register table from ACPI NVS memory into SMRAM.
811
812 @param[in] DestinationRegisterTableList Points to destination register table.
813 @param[in] SourceRegisterTableList Points to source register table.
814 @param[in] NumberOfCpus Number of CPUs.
815
816 **/
817 VOID
818 CopyRegisterTable (
819 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
820 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
821 IN UINT32 NumberOfCpus
822 )
823 {
824 UINTN Index;
825 UINTN Index1;
826 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
827
828 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
829 for (Index = 0; Index < NumberOfCpus; Index++) {
830 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {
831 RegisterTableEntry = AllocateCopyPool (
832 DestinationRegisterTableList[Index].AllocatedSize,
833 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
834 );
835 ASSERT (RegisterTableEntry != NULL);
836 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
837 //
838 // Go though all MSRs in register table to initialize MSR spin lock
839 //
840 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
841 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
842 //
843 // Initialize MSR spin lock only for those MSRs need bit field writing
844 //
845 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
846 }
847 }
848 }
849 }
850 }
851
852 /**
853 Get ACPI CPU data.
854
855 **/
856 VOID
857 GetAcpiCpuData (
858 VOID
859 )
860 {
861 ACPI_CPU_DATA *AcpiCpuData;
862 IA32_DESCRIPTOR *Gdtr;
863 IA32_DESCRIPTOR *Idtr;
864
865 if (!mAcpiS3Enable) {
866 return;
867 }
868
869 //
870 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
871 //
872 mAcpiCpuData.NumberOfCpus = 0;
873
874 //
875 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
876 //
877 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
878 if (AcpiCpuData == 0) {
879 return;
880 }
881
882 //
883 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
884 //
885 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
886
887 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
888 ASSERT (mAcpiCpuData.MtrrTable != 0);
889
890 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
891
892 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
893 ASSERT (mAcpiCpuData.GdtrProfile != 0);
894
895 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
896
897 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
898 ASSERT (mAcpiCpuData.IdtrProfile != 0);
899
900 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
901
902 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
903 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
904
905 CopyRegisterTable (
906 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
907 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
908 mAcpiCpuData.NumberOfCpus
909 );
910
911 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
912 ASSERT (mAcpiCpuData.RegisterTable != 0);
913
914 CopyRegisterTable (
915 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
916 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
917 mAcpiCpuData.NumberOfCpus
918 );
919
920 //
921 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
922 //
923 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
924 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
925
926 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
927 ASSERT (mGdtForAp != NULL);
928 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
929 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
930
931 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
932 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
933 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
934 }
935
936 /**
937 Get ACPI S3 enable flag.
938
939 **/
940 VOID
941 GetAcpiS3EnableFlag (
942 VOID
943 )
944 {
945 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
946 }