]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/AcpiCpuData: Update RegisterTableEntry type
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 /**
43 Get starting address and size of the rendezvous entry for APs.
44 Information for fixing a jump instruction in the code is also returned.
45
46 @param AddressMap Output buffer for address map information.
47 **/
48 VOID *
49 EFIAPI
50 AsmGetAddressMap (
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
52 );
53
54 #define LEGACY_REGION_SIZE (2 * 0x1000)
55 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
56
57 ACPI_CPU_DATA mAcpiCpuData;
58 volatile UINT32 mNumberToFinish;
59 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
60 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
61 VOID *mGdtForAp = NULL;
62 VOID *mIdtForAp = NULL;
63 VOID *mMachineCheckHandlerForAp = NULL;
64 MP_MSR_LOCK *mMsrSpinLocks = NULL;
65 UINTN mMsrSpinLockCount;
66 UINTN mMsrCount = 0;
67
68 //
69 // S3 boot flag
70 //
71 BOOLEAN mSmmS3Flag = FALSE;
72
73 //
74 // Pointer to structure used during S3 Resume
75 //
76 SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
77
78 BOOLEAN mAcpiS3Enable = TRUE;
79
80 UINT8 *mApHltLoopCode = NULL;
81 UINT8 mApHltLoopCodeTemplate[] = {
82 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
83 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
84 0xFA, // cli
85 0xF4, // hlt
86 0xEB, 0xFC // jmp $-2
87 };
88
89 /**
90 Get MSR spin lock by MSR index.
91
92 @param MsrIndex MSR index value.
93
94 @return Pointer to MSR spin lock.
95
96 **/
97 SPIN_LOCK *
98 GetMsrSpinLockByIndex (
99 IN UINT32 MsrIndex
100 )
101 {
102 UINTN Index;
103 for (Index = 0; Index < mMsrCount; Index++) {
104 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
105 return mMsrSpinLocks[Index].SpinLock;
106 }
107 }
108 return NULL;
109 }
110
111 /**
112 Initialize MSR spin lock by MSR index.
113
114 @param MsrIndex MSR index value.
115
116 **/
117 VOID
118 InitMsrSpinLockByIndex (
119 IN UINT32 MsrIndex
120 )
121 {
122 UINTN MsrSpinLockCount;
123 UINTN NewMsrSpinLockCount;
124 UINTN Index;
125 UINTN AddedSize;
126
127 if (mMsrSpinLocks == NULL) {
128 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
129 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
130 ASSERT (mMsrSpinLocks != NULL);
131 for (Index = 0; Index < MsrSpinLockCount; Index++) {
132 mMsrSpinLocks[Index].SpinLock =
133 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
134 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
135 }
136 mMsrSpinLockCount = MsrSpinLockCount;
137 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
138 }
139 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
140 //
141 // Initialize spin lock for MSR programming
142 //
143 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
144 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
145 mMsrCount ++;
146 if (mMsrCount == mMsrSpinLockCount) {
147 //
148 // If MSR spin lock buffer is full, enlarge it
149 //
150 AddedSize = SIZE_4KB;
151 mSmmCpuSemaphores.SemaphoreMsr.Msr =
152 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
153 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
154 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
155 mMsrSpinLocks = ReallocatePool (
156 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
157 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
158 mMsrSpinLocks
159 );
160 ASSERT (mMsrSpinLocks != NULL);
161 mMsrSpinLockCount = NewMsrSpinLockCount;
162 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
163 mMsrSpinLocks[Index].SpinLock =
164 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
165 (Index - mMsrCount) * mSemaphoreSize);
166 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
167 }
168 }
169 }
170 }
171
172 /**
173 Sync up the MTRR values for all processors.
174
175 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
176 **/
177 VOID
178 EFIAPI
179 LoadMtrrData (
180 EFI_PHYSICAL_ADDRESS MtrrTable
181 )
182 /*++
183
184 Routine Description:
185
186 Sync up the MTRR values for all processors.
187
188 Arguments:
189
190 Returns:
191 None
192
193 --*/
194 {
195 MTRR_SETTINGS *MtrrSettings;
196
197 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
198 MtrrSetAllMtrrs (MtrrSettings);
199 }
200
201 /**
202 Programs registers for the calling processor.
203
204 This function programs registers for the calling processor.
205
206 @param RegisterTable Pointer to register table of the running processor.
207
208 **/
209 VOID
210 SetProcessorRegister (
211 IN CPU_REGISTER_TABLE *RegisterTable
212 )
213 {
214 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
215 UINTN Index;
216 UINTN Value;
217 SPIN_LOCK *MsrSpinLock;
218
219 //
220 // Traverse Register Table of this logical processor
221 //
222 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
223 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
224 //
225 // Check the type of specified register
226 //
227 switch (RegisterTableEntry->RegisterType) {
228 //
229 // The specified register is Control Register
230 //
231 case ControlRegister:
232 switch (RegisterTableEntry->Index) {
233 case 0:
234 Value = AsmReadCr0 ();
235 Value = (UINTN) BitFieldWrite64 (
236 Value,
237 RegisterTableEntry->ValidBitStart,
238 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
239 (UINTN) RegisterTableEntry->Value
240 );
241 AsmWriteCr0 (Value);
242 break;
243 case 2:
244 Value = AsmReadCr2 ();
245 Value = (UINTN) BitFieldWrite64 (
246 Value,
247 RegisterTableEntry->ValidBitStart,
248 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
249 (UINTN) RegisterTableEntry->Value
250 );
251 AsmWriteCr2 (Value);
252 break;
253 case 3:
254 Value = AsmReadCr3 ();
255 Value = (UINTN) BitFieldWrite64 (
256 Value,
257 RegisterTableEntry->ValidBitStart,
258 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
259 (UINTN) RegisterTableEntry->Value
260 );
261 AsmWriteCr3 (Value);
262 break;
263 case 4:
264 Value = AsmReadCr4 ();
265 Value = (UINTN) BitFieldWrite64 (
266 Value,
267 RegisterTableEntry->ValidBitStart,
268 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
269 (UINTN) RegisterTableEntry->Value
270 );
271 AsmWriteCr4 (Value);
272 break;
273 default:
274 break;
275 }
276 break;
277 //
278 // The specified register is Model Specific Register
279 //
280 case Msr:
281 //
282 // If this function is called to restore register setting after INIT signal,
283 // there is no need to restore MSRs in register table.
284 //
285 if (RegisterTableEntry->ValidBitLength >= 64) {
286 //
287 // If length is not less than 64 bits, then directly write without reading
288 //
289 AsmWriteMsr64 (
290 RegisterTableEntry->Index,
291 RegisterTableEntry->Value
292 );
293 } else {
294 //
295 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
296 // to make sure MSR read/write operation is atomic.
297 //
298 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
299 AcquireSpinLock (MsrSpinLock);
300 //
301 // Set the bit section according to bit start and length
302 //
303 AsmMsrBitFieldWrite64 (
304 RegisterTableEntry->Index,
305 RegisterTableEntry->ValidBitStart,
306 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
307 RegisterTableEntry->Value
308 );
309 ReleaseSpinLock (MsrSpinLock);
310 }
311 break;
312 //
313 // MemoryMapped operations
314 //
315 case MemoryMapped:
316 AcquireSpinLock (mMemoryMappedLock);
317 MmioBitFieldWrite32 (
318 RegisterTableEntry->Index,
319 RegisterTableEntry->ValidBitStart,
320 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
321 (UINT32)RegisterTableEntry->Value
322 );
323 ReleaseSpinLock (mMemoryMappedLock);
324 break;
325 //
326 // Enable or disable cache
327 //
328 case CacheControl:
329 //
330 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
331 //
332 if (RegisterTableEntry->Value == 0) {
333 AsmDisableCache ();
334 } else {
335 AsmEnableCache ();
336 }
337 break;
338
339 default:
340 break;
341 }
342 }
343 }
344
345 /**
346 AP initialization before SMBASE relocation in the S3 boot path.
347 **/
348 VOID
349 EarlyMPRendezvousProcedure (
350 VOID
351 )
352 {
353 CPU_REGISTER_TABLE *RegisterTableList;
354 UINT32 InitApicId;
355 UINTN Index;
356
357 LoadMtrrData (mAcpiCpuData.MtrrTable);
358
359 //
360 // Find processor number for this CPU.
361 //
362 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
363 InitApicId = GetInitialApicId ();
364 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
365 if (RegisterTableList[Index].InitialApicId == InitApicId) {
366 SetProcessorRegister (&RegisterTableList[Index]);
367 break;
368 }
369 }
370
371 //
372 // Count down the number with lock mechanism.
373 //
374 InterlockedDecrement (&mNumberToFinish);
375 }
376
377 /**
378 AP initialization after SMBASE relocation in the S3 boot path.
379 **/
380 VOID
381 MPRendezvousProcedure (
382 VOID
383 )
384 {
385 CPU_REGISTER_TABLE *RegisterTableList;
386 UINT32 InitApicId;
387 UINTN Index;
388 UINTN TopOfStack;
389 UINT8 Stack[128];
390
391 ProgramVirtualWireMode ();
392 DisableLvtInterrupts ();
393
394 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
395 InitApicId = GetInitialApicId ();
396 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
397 if (RegisterTableList[Index].InitialApicId == InitApicId) {
398 SetProcessorRegister (&RegisterTableList[Index]);
399 break;
400 }
401 }
402
403 //
404 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
405 //
406 TopOfStack = (UINTN) Stack + sizeof (Stack);
407 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
408 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
409 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
410 }
411
412 /**
413 Prepares startup vector for APs.
414
415 This function prepares startup vector for APs.
416
417 @param WorkingBuffer The address of the work buffer.
418 **/
419 VOID
420 PrepareApStartupVector (
421 EFI_PHYSICAL_ADDRESS WorkingBuffer
422 )
423 {
424 EFI_PHYSICAL_ADDRESS StartupVector;
425 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
426
427 //
428 // Get the address map of startup code for AP,
429 // including code size, and offset of long jump instructions to redirect.
430 //
431 ZeroMem (&AddressMap, sizeof (AddressMap));
432 AsmGetAddressMap (&AddressMap);
433
434 StartupVector = WorkingBuffer;
435
436 //
437 // Copy AP startup code to startup vector, and then redirect the long jump
438 // instructions for mode switching.
439 //
440 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
441 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
442 if (AddressMap.LongJumpOffset != 0) {
443 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
444 }
445
446 //
447 // Get the start address of exchange data between BSP and AP.
448 //
449 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
450 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
451
452 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
453 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
454
455 //
456 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
457 //
458 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
459 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
460 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
461
462 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
463 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
464 mExchangeInfo->BufferStart = (UINT32) StartupVector;
465 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
466 }
467
468 /**
469 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
470
471 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
472 and restores MTRRs for both BSP and APs.
473
474 **/
475 VOID
476 EarlyInitializeCpu (
477 VOID
478 )
479 {
480 CPU_REGISTER_TABLE *RegisterTableList;
481 UINT32 InitApicId;
482 UINTN Index;
483
484 LoadMtrrData (mAcpiCpuData.MtrrTable);
485
486 //
487 // Find processor number for this CPU.
488 //
489 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
490 InitApicId = GetInitialApicId ();
491 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
492 if (RegisterTableList[Index].InitialApicId == InitApicId) {
493 SetProcessorRegister (&RegisterTableList[Index]);
494 break;
495 }
496 }
497
498 ProgramVirtualWireMode ();
499
500 PrepareApStartupVector (mAcpiCpuData.StartupVector);
501
502 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
503 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
504
505 //
506 // Send INIT IPI - SIPI to all APs
507 //
508 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
509
510 while (mNumberToFinish > 0) {
511 CpuPause ();
512 }
513 }
514
515 /**
516 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
517
518 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
519 data saved by normal boot path for both BSP and APs.
520
521 **/
522 VOID
523 InitializeCpu (
524 VOID
525 )
526 {
527 CPU_REGISTER_TABLE *RegisterTableList;
528 UINT32 InitApicId;
529 UINTN Index;
530
531 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
532 InitApicId = GetInitialApicId ();
533 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
534 if (RegisterTableList[Index].InitialApicId == InitApicId) {
535 SetProcessorRegister (&RegisterTableList[Index]);
536 break;
537 }
538 }
539
540 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
541 //
542 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
543 // Re-initialize StackAddress to original beginning address.
544 //
545 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
546 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
547
548 //
549 // Send INIT IPI - SIPI to all APs
550 //
551 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
552
553 while (mNumberToFinish > 0) {
554 CpuPause ();
555 }
556 }
557
558 /**
559 Restore SMM Configuration in S3 boot path.
560
561 **/
562 VOID
563 RestoreSmmConfigurationInS3 (
564 VOID
565 )
566 {
567 if (!mAcpiS3Enable) {
568 return;
569 }
570
571 //
572 // Restore SMM Configuration in S3 boot path.
573 //
574 if (mRestoreSmmConfigurationInS3) {
575 //
576 // Need make sure gSmst is correct because below function may use them.
577 //
578 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
579 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
580 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
581 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
582 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
583
584 //
585 // Configure SMM Code Access Check feature if available.
586 //
587 ConfigSmmCodeAccessCheck ();
588
589 SmmCpuFeaturesCompleteSmmReadyToLock ();
590
591 mRestoreSmmConfigurationInS3 = FALSE;
592 }
593 }
594
595 /**
596 Perform SMM initialization for all processors in the S3 boot path.
597
598 For a native platform, MP initialization in the S3 boot path is also performed in this function.
599 **/
600 VOID
601 EFIAPI
602 SmmRestoreCpu (
603 VOID
604 )
605 {
606 SMM_S3_RESUME_STATE *SmmS3ResumeState;
607 IA32_DESCRIPTOR Ia32Idtr;
608 IA32_DESCRIPTOR X64Idtr;
609 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
610 EFI_STATUS Status;
611
612 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
613
614 mSmmS3Flag = TRUE;
615
616 InitializeSpinLock (mMemoryMappedLock);
617
618 //
619 // See if there is enough context to resume PEI Phase
620 //
621 if (mSmmS3ResumeState == NULL) {
622 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
623 CpuDeadLoop ();
624 }
625
626 SmmS3ResumeState = mSmmS3ResumeState;
627 ASSERT (SmmS3ResumeState != NULL);
628
629 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
630 //
631 // Save the IA32 IDT Descriptor
632 //
633 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
634
635 //
636 // Setup X64 IDT table
637 //
638 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
639 X64Idtr.Base = (UINTN) IdtEntryTable;
640 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
641 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
642
643 //
644 // Setup the default exception handler
645 //
646 Status = InitializeCpuExceptionHandlers (NULL);
647 ASSERT_EFI_ERROR (Status);
648
649 //
650 // Initialize Debug Agent to support source level debug
651 //
652 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
653 }
654
655 //
656 // Skip initialization if mAcpiCpuData is not valid
657 //
658 if (mAcpiCpuData.NumberOfCpus > 0) {
659 //
660 // First time microcode load and restore MTRRs
661 //
662 EarlyInitializeCpu ();
663 }
664
665 //
666 // Restore SMBASE for BSP and all APs
667 //
668 SmmRelocateBases ();
669
670 //
671 // Skip initialization if mAcpiCpuData is not valid
672 //
673 if (mAcpiCpuData.NumberOfCpus > 0) {
674 //
675 // Restore MSRs for BSP and all APs
676 //
677 InitializeCpu ();
678 }
679
680 //
681 // Set a flag to restore SMM configuration in S3 path.
682 //
683 mRestoreSmmConfigurationInS3 = TRUE;
684
685 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
686 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
687 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
688 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
689 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
690
691 //
692 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
693 //
694 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
695 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
696
697 SwitchStack (
698 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
699 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
700 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
701 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
702 );
703 }
704
705 //
706 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
707 //
708 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
709 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
710 //
711 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
712 //
713 SaveAndSetDebugTimerInterrupt (FALSE);
714 //
715 // Restore IA32 IDT table
716 //
717 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
718 AsmDisablePaging64 (
719 SmmS3ResumeState->ReturnCs,
720 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
721 (UINT32)SmmS3ResumeState->ReturnContext1,
722 (UINT32)SmmS3ResumeState->ReturnContext2,
723 (UINT32)SmmS3ResumeState->ReturnStackPointer
724 );
725 }
726
727 //
728 // Can not resume PEI Phase
729 //
730 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
731 CpuDeadLoop ();
732 }
733
734 /**
735 Initialize SMM S3 resume state structure used during S3 Resume.
736
737 @param[in] Cr3 The base address of the page tables to use in SMM.
738
739 **/
740 VOID
741 InitSmmS3ResumeState (
742 IN UINT32 Cr3
743 )
744 {
745 VOID *GuidHob;
746 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
747 SMM_S3_RESUME_STATE *SmmS3ResumeState;
748 EFI_PHYSICAL_ADDRESS Address;
749 EFI_STATUS Status;
750
751 if (!mAcpiS3Enable) {
752 return;
753 }
754
755 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
756 if (GuidHob != NULL) {
757 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
758
759 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
760 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
761
762 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
763 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
764
765 mSmmS3ResumeState = SmmS3ResumeState;
766 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
767
768 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
769
770 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
771 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
772 if (SmmS3ResumeState->SmmS3StackBase == 0) {
773 SmmS3ResumeState->SmmS3StackSize = 0;
774 }
775
776 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
777 SmmS3ResumeState->SmmS3Cr3 = Cr3;
778 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
779
780 if (sizeof (UINTN) == sizeof (UINT64)) {
781 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
782 }
783 if (sizeof (UINTN) == sizeof (UINT32)) {
784 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
785 }
786 }
787
788 //
789 // Patch SmmS3ResumeState->SmmS3Cr3
790 //
791 InitSmmS3Cr3 ();
792
793 //
794 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
795 // protected mode on S3 path
796 //
797 Address = BASE_4GB - 1;
798 Status = gBS->AllocatePages (
799 AllocateMaxAddress,
800 EfiACPIMemoryNVS,
801 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
802 &Address
803 );
804 ASSERT_EFI_ERROR (Status);
805 mApHltLoopCode = (UINT8 *) (UINTN) Address;
806 }
807
808 /**
809 Copy register table from ACPI NVS memory into SMRAM.
810
811 @param[in] DestinationRegisterTableList Points to destination register table.
812 @param[in] SourceRegisterTableList Points to source register table.
813 @param[in] NumberOfCpus Number of CPUs.
814
815 **/
816 VOID
817 CopyRegisterTable (
818 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
819 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
820 IN UINT32 NumberOfCpus
821 )
822 {
823 UINTN Index;
824 UINTN Index1;
825 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
826
827 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
828 for (Index = 0; Index < NumberOfCpus; Index++) {
829 RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);
830 ASSERT (RegisterTableEntry != NULL);
831 CopyMem (RegisterTableEntry, (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);
832 //
833 // Go though all MSRs in register table to initialize MSR spin lock
834 //
835 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
836 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
837 //
838 // Initialize MSR spin lock only for those MSRs need bit field writing
839 //
840 InitMsrSpinLockByIndex (RegisterTableEntry->Index);
841 }
842 }
843 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
844 }
845 }
846
847 /**
848 Get ACPI CPU data.
849
850 **/
851 VOID
852 GetAcpiCpuData (
853 VOID
854 )
855 {
856 ACPI_CPU_DATA *AcpiCpuData;
857 IA32_DESCRIPTOR *Gdtr;
858 IA32_DESCRIPTOR *Idtr;
859
860 if (!mAcpiS3Enable) {
861 return;
862 }
863
864 //
865 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
866 //
867 mAcpiCpuData.NumberOfCpus = 0;
868
869 //
870 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
871 //
872 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
873 if (AcpiCpuData == 0) {
874 return;
875 }
876
877 //
878 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
879 //
880 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
881
882 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
883 ASSERT (mAcpiCpuData.MtrrTable != 0);
884
885 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
886
887 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
888 ASSERT (mAcpiCpuData.GdtrProfile != 0);
889
890 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
891
892 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
893 ASSERT (mAcpiCpuData.IdtrProfile != 0);
894
895 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
896
897 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
898 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
899
900 CopyRegisterTable (
901 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
902 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
903 mAcpiCpuData.NumberOfCpus
904 );
905
906 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
907 ASSERT (mAcpiCpuData.RegisterTable != 0);
908
909 CopyRegisterTable (
910 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
911 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
912 mAcpiCpuData.NumberOfCpus
913 );
914
915 //
916 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
917 //
918 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
919 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
920
921 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
922 ASSERT (mGdtForAp != NULL);
923 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
924 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
925
926 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
927 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
928 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
929 }
930
931 /**
932 Get ACPI S3 enable flag.
933
934 **/
935 VOID
936 GetAcpiS3EnableFlag (
937 VOID
938 )
939 {
940 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
941 }