]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Put AP into safe hlt-loop code on S3 path
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 /**
43 Get starting address and size of the rendezvous entry for APs.
44 Information for fixing a jump instruction in the code is also returned.
45
46 @param AddressMap Output buffer for address map information.
47 **/
48 VOID *
49 EFIAPI
50 AsmGetAddressMap (
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
52 );
53
54 #define LEGACY_REGION_SIZE (2 * 0x1000)
55 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
56
57 ACPI_CPU_DATA mAcpiCpuData;
58 UINT32 mNumberToFinish;
59 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
60 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
61 VOID *mGdtForAp = NULL;
62 VOID *mIdtForAp = NULL;
63 VOID *mMachineCheckHandlerForAp = NULL;
64 MP_MSR_LOCK *mMsrSpinLocks = NULL;
65 UINTN mMsrSpinLockCount;
66 UINTN mMsrCount = 0;
67
68 //
69 // S3 boot flag
70 //
71 BOOLEAN mSmmS3Flag = FALSE;
72
73 //
74 // Pointer to structure used during S3 Resume
75 //
76 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
77
78 BOOLEAN mAcpiS3Enable = TRUE;
79
80 UINT8 *mApHltLoopCode = NULL;
81 UINT8 mApHltLoopCodeTemplate[] = {
82 0xFA, // cli
83 0xF4, // hlt
84 0xEB, 0xFC // jmp $-2
85 };
86
87 /**
88 Get MSR spin lock by MSR index.
89
90 @param MsrIndex MSR index value.
91
92 @return Pointer to MSR spin lock.
93
94 **/
95 SPIN_LOCK *
96 GetMsrSpinLockByIndex (
97 IN UINT32 MsrIndex
98 )
99 {
100 UINTN Index;
101 for (Index = 0; Index < mMsrCount; Index++) {
102 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
103 return mMsrSpinLocks[Index].SpinLock;
104 }
105 }
106 return NULL;
107 }
108
109 /**
110 Initialize MSR spin lock by MSR index.
111
112 @param MsrIndex MSR index value.
113
114 **/
115 VOID
116 InitMsrSpinLockByIndex (
117 IN UINT32 MsrIndex
118 )
119 {
120 UINTN MsrSpinLockCount;
121 UINTN NewMsrSpinLockCount;
122 UINTN Index;
123 UINTN AddedSize;
124
125 if (mMsrSpinLocks == NULL) {
126 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
127 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
128 ASSERT (mMsrSpinLocks != NULL);
129 for (Index = 0; Index < MsrSpinLockCount; Index++) {
130 mMsrSpinLocks[Index].SpinLock =
131 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
132 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
133 }
134 mMsrSpinLockCount = MsrSpinLockCount;
135 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
136 }
137 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
138 //
139 // Initialize spin lock for MSR programming
140 //
141 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
142 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
143 mMsrCount ++;
144 if (mMsrCount == mMsrSpinLockCount) {
145 //
146 // If MSR spin lock buffer is full, enlarge it
147 //
148 AddedSize = SIZE_4KB;
149 mSmmCpuSemaphores.SemaphoreMsr.Msr =
150 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
151 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
152 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
153 mMsrSpinLocks = ReallocatePool (
154 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
155 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
156 mMsrSpinLocks
157 );
158 ASSERT (mMsrSpinLocks != NULL);
159 mMsrSpinLockCount = NewMsrSpinLockCount;
160 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
161 mMsrSpinLocks[Index].SpinLock =
162 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
163 (Index - mMsrCount) * mSemaphoreSize);
164 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
165 }
166 }
167 }
168 }
169
170 /**
171 Sync up the MTRR values for all processors.
172
173 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
174 **/
175 VOID
176 EFIAPI
177 LoadMtrrData (
178 EFI_PHYSICAL_ADDRESS MtrrTable
179 )
180 /*++
181
182 Routine Description:
183
184 Sync up the MTRR values for all processors.
185
186 Arguments:
187
188 Returns:
189 None
190
191 --*/
192 {
193 MTRR_SETTINGS *MtrrSettings;
194
195 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
196 MtrrSetAllMtrrs (MtrrSettings);
197 }
198
199 /**
200 Programs registers for the calling processor.
201
202 This function programs registers for the calling processor.
203
204 @param RegisterTable Pointer to register table of the running processor.
205
206 **/
207 VOID
208 SetProcessorRegister (
209 IN CPU_REGISTER_TABLE *RegisterTable
210 )
211 {
212 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
213 UINTN Index;
214 UINTN Value;
215 SPIN_LOCK *MsrSpinLock;
216
217 //
218 // Traverse Register Table of this logical processor
219 //
220 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
221 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
222 //
223 // Check the type of specified register
224 //
225 switch (RegisterTableEntry->RegisterType) {
226 //
227 // The specified register is Control Register
228 //
229 case ControlRegister:
230 switch (RegisterTableEntry->Index) {
231 case 0:
232 Value = AsmReadCr0 ();
233 Value = (UINTN) BitFieldWrite64 (
234 Value,
235 RegisterTableEntry->ValidBitStart,
236 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
237 (UINTN) RegisterTableEntry->Value
238 );
239 AsmWriteCr0 (Value);
240 break;
241 case 2:
242 Value = AsmReadCr2 ();
243 Value = (UINTN) BitFieldWrite64 (
244 Value,
245 RegisterTableEntry->ValidBitStart,
246 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
247 (UINTN) RegisterTableEntry->Value
248 );
249 AsmWriteCr2 (Value);
250 break;
251 case 3:
252 Value = AsmReadCr3 ();
253 Value = (UINTN) BitFieldWrite64 (
254 Value,
255 RegisterTableEntry->ValidBitStart,
256 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
257 (UINTN) RegisterTableEntry->Value
258 );
259 AsmWriteCr3 (Value);
260 break;
261 case 4:
262 Value = AsmReadCr4 ();
263 Value = (UINTN) BitFieldWrite64 (
264 Value,
265 RegisterTableEntry->ValidBitStart,
266 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
267 (UINTN) RegisterTableEntry->Value
268 );
269 AsmWriteCr4 (Value);
270 break;
271 default:
272 break;
273 }
274 break;
275 //
276 // The specified register is Model Specific Register
277 //
278 case Msr:
279 //
280 // If this function is called to restore register setting after INIT signal,
281 // there is no need to restore MSRs in register table.
282 //
283 if (RegisterTableEntry->ValidBitLength >= 64) {
284 //
285 // If length is not less than 64 bits, then directly write without reading
286 //
287 AsmWriteMsr64 (
288 RegisterTableEntry->Index,
289 RegisterTableEntry->Value
290 );
291 } else {
292 //
293 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
294 // to make sure MSR read/write operation is atomic.
295 //
296 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
297 AcquireSpinLock (MsrSpinLock);
298 //
299 // Set the bit section according to bit start and length
300 //
301 AsmMsrBitFieldWrite64 (
302 RegisterTableEntry->Index,
303 RegisterTableEntry->ValidBitStart,
304 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
305 RegisterTableEntry->Value
306 );
307 ReleaseSpinLock (MsrSpinLock);
308 }
309 break;
310 //
311 // MemoryMapped operations
312 //
313 case MemoryMapped:
314 AcquireSpinLock (mMemoryMappedLock);
315 MmioBitFieldWrite32 (
316 RegisterTableEntry->Index,
317 RegisterTableEntry->ValidBitStart,
318 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
319 (UINT32)RegisterTableEntry->Value
320 );
321 ReleaseSpinLock (mMemoryMappedLock);
322 break;
323 //
324 // Enable or disable cache
325 //
326 case CacheControl:
327 //
328 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
329 //
330 if (RegisterTableEntry->Value == 0) {
331 AsmDisableCache ();
332 } else {
333 AsmEnableCache ();
334 }
335 break;
336
337 default:
338 break;
339 }
340 }
341 }
342
343 /**
344 AP initialization before SMBASE relocation in the S3 boot path.
345 **/
346 VOID
347 EarlyMPRendezvousProcedure (
348 VOID
349 )
350 {
351 CPU_REGISTER_TABLE *RegisterTableList;
352 UINT32 InitApicId;
353 UINTN Index;
354
355 LoadMtrrData (mAcpiCpuData.MtrrTable);
356
357 //
358 // Find processor number for this CPU.
359 //
360 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
361 InitApicId = GetInitialApicId ();
362 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
363 if (RegisterTableList[Index].InitialApicId == InitApicId) {
364 SetProcessorRegister (&RegisterTableList[Index]);
365 break;
366 }
367 }
368
369 //
370 // Count down the number with lock mechanism.
371 //
372 InterlockedDecrement (&mNumberToFinish);
373 }
374
375 /**
376 AP initialization after SMBASE relocation in the S3 boot path.
377 **/
378 VOID
379 MPRendezvousProcedure (
380 VOID
381 )
382 {
383 CPU_REGISTER_TABLE *RegisterTableList;
384 UINT32 InitApicId;
385 UINTN Index;
386 UINT32 TopOfStack;
387 UINT8 Stack[128];
388
389 ProgramVirtualWireMode ();
390 DisableLvtInterrupts ();
391
392 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
393 InitApicId = GetInitialApicId ();
394 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
395 if (RegisterTableList[Index].InitialApicId == InitApicId) {
396 SetProcessorRegister (&RegisterTableList[Index]);
397 break;
398 }
399 }
400
401 //
402 // Count down the number with lock mechanism.
403 //
404 InterlockedDecrement (&mNumberToFinish);
405
406 //
407 // Place AP into the safe code
408 //
409 TopOfStack = (UINT32) (UINTN) Stack + sizeof (Stack);
410 TopOfStack &= ~(UINT32) (CPU_STACK_ALIGNMENT - 1);
411 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
412 TransferApToSafeState ((UINT32) (UINTN) mApHltLoopCode, TopOfStack);
413 }
414
415 /**
416 Prepares startup vector for APs.
417
418 This function prepares startup vector for APs.
419
420 @param WorkingBuffer The address of the work buffer.
421 **/
422 VOID
423 PrepareApStartupVector (
424 EFI_PHYSICAL_ADDRESS WorkingBuffer
425 )
426 {
427 EFI_PHYSICAL_ADDRESS StartupVector;
428 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
429
430 //
431 // Get the address map of startup code for AP,
432 // including code size, and offset of long jump instructions to redirect.
433 //
434 ZeroMem (&AddressMap, sizeof (AddressMap));
435 AsmGetAddressMap (&AddressMap);
436
437 StartupVector = WorkingBuffer;
438
439 //
440 // Copy AP startup code to startup vector, and then redirect the long jump
441 // instructions for mode switching.
442 //
443 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
444 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
445 if (AddressMap.LongJumpOffset != 0) {
446 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
447 }
448
449 //
450 // Get the start address of exchange data between BSP and AP.
451 //
452 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
453 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
454
455 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
456 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
457
458 //
459 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
460 //
461 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
462 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
463 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
464
465 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
466 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
467 mExchangeInfo->BufferStart = (UINT32) StartupVector;
468 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
469 }
470
471 /**
472 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
473
474 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
475 and restores MTRRs for both BSP and APs.
476
477 **/
478 VOID
479 EarlyInitializeCpu (
480 VOID
481 )
482 {
483 CPU_REGISTER_TABLE *RegisterTableList;
484 UINT32 InitApicId;
485 UINTN Index;
486
487 LoadMtrrData (mAcpiCpuData.MtrrTable);
488
489 //
490 // Find processor number for this CPU.
491 //
492 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
493 InitApicId = GetInitialApicId ();
494 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
495 if (RegisterTableList[Index].InitialApicId == InitApicId) {
496 SetProcessorRegister (&RegisterTableList[Index]);
497 break;
498 }
499 }
500
501 ProgramVirtualWireMode ();
502
503 PrepareApStartupVector (mAcpiCpuData.StartupVector);
504
505 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
506 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
507
508 //
509 // Send INIT IPI - SIPI to all APs
510 //
511 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
512
513 while (mNumberToFinish > 0) {
514 CpuPause ();
515 }
516 }
517
518 /**
519 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
520
521 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
522 data saved by normal boot path for both BSP and APs.
523
524 **/
525 VOID
526 InitializeCpu (
527 VOID
528 )
529 {
530 CPU_REGISTER_TABLE *RegisterTableList;
531 UINT32 InitApicId;
532 UINTN Index;
533
534 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
535 InitApicId = GetInitialApicId ();
536 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
537 if (RegisterTableList[Index].InitialApicId == InitApicId) {
538 SetProcessorRegister (&RegisterTableList[Index]);
539 break;
540 }
541 }
542
543 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
544 //
545 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
546 // Re-initialize StackAddress to original beginning address.
547 //
548 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
549 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
550
551 //
552 // Send INIT IPI - SIPI to all APs
553 //
554 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
555
556 while (mNumberToFinish > 0) {
557 CpuPause ();
558 }
559 }
560
561 /**
562 Restore SMM Configuration in S3 boot path.
563
564 **/
565 VOID
566 RestoreSmmConfigurationInS3 (
567 VOID
568 )
569 {
570 if (!mAcpiS3Enable) {
571 return;
572 }
573
574 //
575 // Restore SMM Configuration in S3 boot path.
576 //
577 if (mRestoreSmmConfigurationInS3) {
578 //
579 // Need make sure gSmst is correct because below function may use them.
580 //
581 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
582 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
583 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
584 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
585 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
586
587 //
588 // Configure SMM Code Access Check feature if available.
589 //
590 ConfigSmmCodeAccessCheck ();
591
592 SmmCpuFeaturesCompleteSmmReadyToLock ();
593
594 mRestoreSmmConfigurationInS3 = FALSE;
595 }
596 }
597
598 /**
599 Perform SMM initialization for all processors in the S3 boot path.
600
601 For a native platform, MP initialization in the S3 boot path is also performed in this function.
602 **/
603 VOID
604 EFIAPI
605 SmmRestoreCpu (
606 VOID
607 )
608 {
609 SMM_S3_RESUME_STATE *SmmS3ResumeState;
610 IA32_DESCRIPTOR Ia32Idtr;
611 IA32_DESCRIPTOR X64Idtr;
612 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
613 EFI_STATUS Status;
614
615 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
616
617 mSmmS3Flag = TRUE;
618
619 InitializeSpinLock (mMemoryMappedLock);
620
621 //
622 // See if there is enough context to resume PEI Phase
623 //
624 if (mSmmS3ResumeState == NULL) {
625 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
626 CpuDeadLoop ();
627 }
628
629 SmmS3ResumeState = mSmmS3ResumeState;
630 ASSERT (SmmS3ResumeState != NULL);
631
632 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
633 //
634 // Save the IA32 IDT Descriptor
635 //
636 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
637
638 //
639 // Setup X64 IDT table
640 //
641 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
642 X64Idtr.Base = (UINTN) IdtEntryTable;
643 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
644 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
645
646 //
647 // Setup the default exception handler
648 //
649 Status = InitializeCpuExceptionHandlers (NULL);
650 ASSERT_EFI_ERROR (Status);
651
652 //
653 // Initialize Debug Agent to support source level debug
654 //
655 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
656 }
657
658 //
659 // Skip initialization if mAcpiCpuData is not valid
660 //
661 if (mAcpiCpuData.NumberOfCpus > 0) {
662 //
663 // First time microcode load and restore MTRRs
664 //
665 EarlyInitializeCpu ();
666 }
667
668 //
669 // Restore SMBASE for BSP and all APs
670 //
671 SmmRelocateBases ();
672
673 //
674 // Skip initialization if mAcpiCpuData is not valid
675 //
676 if (mAcpiCpuData.NumberOfCpus > 0) {
677 //
678 // Restore MSRs for BSP and all APs
679 //
680 InitializeCpu ();
681 }
682
683 //
684 // Set a flag to restore SMM configuration in S3 path.
685 //
686 mRestoreSmmConfigurationInS3 = TRUE;
687
688 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
689 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
690 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
691 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
692 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
693
694 //
695 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
696 //
697 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
698 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
699
700 SwitchStack (
701 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
702 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
703 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
704 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
705 );
706 }
707
708 //
709 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
710 //
711 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
712 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
713 //
714 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
715 //
716 SaveAndSetDebugTimerInterrupt (FALSE);
717 //
718 // Restore IA32 IDT table
719 //
720 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
721 AsmDisablePaging64 (
722 SmmS3ResumeState->ReturnCs,
723 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
724 (UINT32)SmmS3ResumeState->ReturnContext1,
725 (UINT32)SmmS3ResumeState->ReturnContext2,
726 (UINT32)SmmS3ResumeState->ReturnStackPointer
727 );
728 }
729
730 //
731 // Can not resume PEI Phase
732 //
733 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
734 CpuDeadLoop ();
735 }
736
737 /**
738 Initialize SMM S3 resume state structure used during S3 Resume.
739
740 @param[in] Cr3 The base address of the page tables to use in SMM.
741
742 **/
743 VOID
744 InitSmmS3ResumeState (
745 IN UINT32 Cr3
746 )
747 {
748 VOID *GuidHob;
749 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
750 SMM_S3_RESUME_STATE *SmmS3ResumeState;
751 EFI_PHYSICAL_ADDRESS Address;
752 EFI_STATUS Status;
753
754 if (!mAcpiS3Enable) {
755 return;
756 }
757
758 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
759 if (GuidHob != NULL) {
760 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
761
762 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
763 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
764
765 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
766 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
767
768 mSmmS3ResumeState = SmmS3ResumeState;
769 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
770
771 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
772
773 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
774 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
775 if (SmmS3ResumeState->SmmS3StackBase == 0) {
776 SmmS3ResumeState->SmmS3StackSize = 0;
777 }
778
779 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
780 SmmS3ResumeState->SmmS3Cr3 = Cr3;
781 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
782
783 if (sizeof (UINTN) == sizeof (UINT64)) {
784 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
785 }
786 if (sizeof (UINTN) == sizeof (UINT32)) {
787 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
788 }
789 }
790
791 //
792 // Patch SmmS3ResumeState->SmmS3Cr3
793 //
794 InitSmmS3Cr3 ();
795
796 //
797 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
798 // protected mode on S3 path
799 //
800 Address = BASE_4GB - 1;
801 Status = gBS->AllocatePages (
802 AllocateMaxAddress,
803 EfiACPIMemoryNVS,
804 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
805 &Address
806 );
807 ASSERT_EFI_ERROR (Status);
808 mApHltLoopCode = (UINT8 *) (UINTN) Address;
809 }
810
811 /**
812 Copy register table from ACPI NVS memory into SMRAM.
813
814 @param[in] DestinationRegisterTableList Points to destination register table.
815 @param[in] SourceRegisterTableList Points to source register table.
816 @param[in] NumberOfCpus Number of CPUs.
817
818 **/
819 VOID
820 CopyRegisterTable (
821 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
822 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
823 IN UINT32 NumberOfCpus
824 )
825 {
826 UINTN Index;
827 UINTN Index1;
828 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
829
830 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
831 for (Index = 0; Index < NumberOfCpus; Index++) {
832 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);
833 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);
834 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);
835 //
836 // Go though all MSRs in register table to initialize MSR spin lock
837 //
838 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;
839 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
840 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
841 //
842 // Initialize MSR spin lock only for those MSRs need bit field writing
843 //
844 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
845 }
846 }
847 }
848 }
849
850 /**
851 Get ACPI CPU data.
852
853 **/
854 VOID
855 GetAcpiCpuData (
856 VOID
857 )
858 {
859 ACPI_CPU_DATA *AcpiCpuData;
860 IA32_DESCRIPTOR *Gdtr;
861 IA32_DESCRIPTOR *Idtr;
862
863 if (!mAcpiS3Enable) {
864 return;
865 }
866
867 //
868 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
869 //
870 mAcpiCpuData.NumberOfCpus = 0;
871
872 //
873 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
874 //
875 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
876 if (AcpiCpuData == 0) {
877 return;
878 }
879
880 //
881 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
882 //
883 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
884
885 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
886 ASSERT (mAcpiCpuData.MtrrTable != 0);
887
888 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
889
890 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
891 ASSERT (mAcpiCpuData.GdtrProfile != 0);
892
893 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
894
895 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
896 ASSERT (mAcpiCpuData.IdtrProfile != 0);
897
898 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
899
900 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
901 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
902
903 CopyRegisterTable (
904 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
905 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
906 mAcpiCpuData.NumberOfCpus
907 );
908
909 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
910 ASSERT (mAcpiCpuData.RegisterTable != 0);
911
912 CopyRegisterTable (
913 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
914 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
915 mAcpiCpuData.NumberOfCpus
916 );
917
918 //
919 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
920 //
921 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
922 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
923
924 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
925 ASSERT (mGdtForAp != NULL);
926 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
927 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
928
929 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
930 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
931 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
932 }
933
934 /**
935 Get ACPI S3 enable flag.
936
937 **/
938 VOID
939 GetAcpiS3EnableFlag (
940 VOID
941 )
942 {
943 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
944 }