]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Put AP into safe hlt-loop code on S3 path
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
695e62d1 4Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17typedef struct {\r
18 UINTN Lock;\r
19 VOID *StackStart;\r
20 UINTN StackSize;\r
21 VOID *ApFunction;\r
22 IA32_DESCRIPTOR GdtrProfile;\r
23 IA32_DESCRIPTOR IdtrProfile;\r
24 UINT32 BufferStart;\r
25 UINT32 Cr3;\r
26} MP_CPU_EXCHANGE_INFO;\r
27\r
28typedef struct {\r
29 UINT8 *RendezvousFunnelAddress;\r
30 UINTN PModeEntryOffset;\r
31 UINTN FlatJumpOffset;\r
32 UINTN Size;\r
33 UINTN LModeEntryOffset;\r
34 UINTN LongJumpOffset;\r
35} MP_ASSEMBLY_ADDRESS_MAP;\r
36\r
6c4c15fa
JF
37//\r
38// Spin lock used to serialize MemoryMapped operation\r
39//\r
40SPIN_LOCK *mMemoryMappedLock = NULL;\r
41\r
529a5a86
MK
42/**\r
43 Get starting address and size of the rendezvous entry for APs.\r
44 Information for fixing a jump instruction in the code is also returned.\r
45\r
46 @param AddressMap Output buffer for address map information.\r
47**/\r
48VOID *\r
49EFIAPI\r
50AsmGetAddressMap (\r
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
52 );\r
53\r
54#define LEGACY_REGION_SIZE (2 * 0x1000)\r
55#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
56\r
57ACPI_CPU_DATA mAcpiCpuData;\r
58UINT32 mNumberToFinish;\r
59MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
60BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
61VOID *mGdtForAp = NULL;\r
62VOID *mIdtForAp = NULL;\r
63VOID *mMachineCheckHandlerForAp = NULL;\r
64MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 65UINTN mMsrSpinLockCount;\r
529a5a86
MK
66UINTN mMsrCount = 0;\r
67\r
0bdc9e75
SZ
68//\r
69// S3 boot flag\r
70//\r
71BOOLEAN mSmmS3Flag = FALSE;\r
72\r
73//\r
74// Pointer to structure used during S3 Resume\r
75//\r
76SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
77\r
b10d5ddc
SZ
78BOOLEAN mAcpiS3Enable = TRUE;\r
79\r
4a0f88dd
JF
80UINT8 *mApHltLoopCode = NULL;\r
81UINT8 mApHltLoopCodeTemplate[] = {\r
82 0xFA, // cli\r
83 0xF4, // hlt\r
84 0xEB, 0xFC // jmp $-2\r
85 };\r
86\r
529a5a86
MK
87/**\r
88 Get MSR spin lock by MSR index.\r
89\r
90 @param MsrIndex MSR index value.\r
91\r
92 @return Pointer to MSR spin lock.\r
93\r
94**/\r
95SPIN_LOCK *\r
96GetMsrSpinLockByIndex (\r
97 IN UINT32 MsrIndex\r
98 )\r
99{\r
100 UINTN Index;\r
101 for (Index = 0; Index < mMsrCount; Index++) {\r
102 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 103 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
104 }\r
105 }\r
106 return NULL;\r
107}\r
108\r
109/**\r
110 Initialize MSR spin lock by MSR index.\r
111\r
112 @param MsrIndex MSR index value.\r
113\r
114**/\r
115VOID\r
116InitMsrSpinLockByIndex (\r
117 IN UINT32 MsrIndex\r
118 )\r
119{\r
dc99315b 120 UINTN MsrSpinLockCount;\r
529a5a86 121 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
122 UINTN Index;\r
123 UINTN AddedSize;\r
529a5a86
MK
124\r
125 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
126 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
127 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 128 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
129 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
130 mMsrSpinLocks[Index].SpinLock =\r
131 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
132 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
133 }\r
134 mMsrSpinLockCount = MsrSpinLockCount;\r
135 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
136 }\r
137 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
138 //\r
139 // Initialize spin lock for MSR programming\r
140 //\r
141 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 142 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
143 mMsrCount ++;\r
144 if (mMsrCount == mMsrSpinLockCount) {\r
145 //\r
146 // If MSR spin lock buffer is full, enlarge it\r
147 //\r
dc99315b
JF
148 AddedSize = SIZE_4KB;\r
149 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
150 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
151 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
152 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
153 mMsrSpinLocks = ReallocatePool (\r
154 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
155 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
156 mMsrSpinLocks\r
157 );\r
dc99315b 158 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 159 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
160 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
161 mMsrSpinLocks[Index].SpinLock =\r
162 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
163 (Index - mMsrCount) * mSemaphoreSize);\r
164 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
165 }\r
529a5a86
MK
166 }\r
167 }\r
168}\r
169\r
170/**\r
171 Sync up the MTRR values for all processors.\r
172\r
173 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
174**/\r
175VOID\r
176EFIAPI\r
177LoadMtrrData (\r
178 EFI_PHYSICAL_ADDRESS MtrrTable\r
179 )\r
180/*++\r
181\r
182Routine Description:\r
183\r
184 Sync up the MTRR values for all processors.\r
185\r
186Arguments:\r
187\r
188Returns:\r
189 None\r
190\r
191--*/\r
192{\r
193 MTRR_SETTINGS *MtrrSettings;\r
194\r
195 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
196 MtrrSetAllMtrrs (MtrrSettings);\r
197}\r
198\r
199/**\r
200 Programs registers for the calling processor.\r
201\r
202 This function programs registers for the calling processor.\r
203\r
204 @param RegisterTable Pointer to register table of the running processor.\r
205\r
206**/\r
207VOID\r
208SetProcessorRegister (\r
209 IN CPU_REGISTER_TABLE *RegisterTable\r
210 )\r
211{\r
212 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
213 UINTN Index;\r
214 UINTN Value;\r
215 SPIN_LOCK *MsrSpinLock;\r
216\r
217 //\r
218 // Traverse Register Table of this logical processor\r
219 //\r
220 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
221 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
222 //\r
223 // Check the type of specified register\r
224 //\r
225 switch (RegisterTableEntry->RegisterType) {\r
226 //\r
227 // The specified register is Control Register\r
228 //\r
229 case ControlRegister:\r
230 switch (RegisterTableEntry->Index) {\r
231 case 0:\r
232 Value = AsmReadCr0 ();\r
233 Value = (UINTN) BitFieldWrite64 (\r
234 Value,\r
235 RegisterTableEntry->ValidBitStart,\r
236 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
237 (UINTN) RegisterTableEntry->Value\r
238 );\r
239 AsmWriteCr0 (Value);\r
240 break;\r
241 case 2:\r
242 Value = AsmReadCr2 ();\r
243 Value = (UINTN) BitFieldWrite64 (\r
244 Value,\r
245 RegisterTableEntry->ValidBitStart,\r
246 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
247 (UINTN) RegisterTableEntry->Value\r
248 );\r
249 AsmWriteCr2 (Value);\r
250 break;\r
251 case 3:\r
252 Value = AsmReadCr3 ();\r
253 Value = (UINTN) BitFieldWrite64 (\r
254 Value,\r
255 RegisterTableEntry->ValidBitStart,\r
256 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
257 (UINTN) RegisterTableEntry->Value\r
258 );\r
259 AsmWriteCr3 (Value);\r
260 break;\r
261 case 4:\r
262 Value = AsmReadCr4 ();\r
263 Value = (UINTN) BitFieldWrite64 (\r
264 Value,\r
265 RegisterTableEntry->ValidBitStart,\r
266 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
267 (UINTN) RegisterTableEntry->Value\r
268 );\r
269 AsmWriteCr4 (Value);\r
270 break;\r
271 default:\r
272 break;\r
273 }\r
274 break;\r
275 //\r
276 // The specified register is Model Specific Register\r
277 //\r
278 case Msr:\r
279 //\r
280 // If this function is called to restore register setting after INIT signal,\r
281 // there is no need to restore MSRs in register table.\r
282 //\r
283 if (RegisterTableEntry->ValidBitLength >= 64) {\r
284 //\r
285 // If length is not less than 64 bits, then directly write without reading\r
286 //\r
287 AsmWriteMsr64 (\r
288 RegisterTableEntry->Index,\r
289 RegisterTableEntry->Value\r
290 );\r
291 } else {\r
292 //\r
293 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
294 // to make sure MSR read/write operation is atomic.\r
295 //\r
296 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
297 AcquireSpinLock (MsrSpinLock);\r
298 //\r
299 // Set the bit section according to bit start and length\r
300 //\r
301 AsmMsrBitFieldWrite64 (\r
302 RegisterTableEntry->Index,\r
303 RegisterTableEntry->ValidBitStart,\r
304 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
305 RegisterTableEntry->Value\r
306 );\r
307 ReleaseSpinLock (MsrSpinLock);\r
308 }\r
309 break;\r
310 //\r
6c4c15fa
JF
311 // MemoryMapped operations\r
312 //\r
313 case MemoryMapped:\r
314 AcquireSpinLock (mMemoryMappedLock);\r
315 MmioBitFieldWrite32 (\r
316 RegisterTableEntry->Index,\r
317 RegisterTableEntry->ValidBitStart,\r
318 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
319 (UINT32)RegisterTableEntry->Value\r
320 );\r
321 ReleaseSpinLock (mMemoryMappedLock);\r
322 break;\r
323 //\r
529a5a86
MK
324 // Enable or disable cache\r
325 //\r
326 case CacheControl:\r
327 //\r
328 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
329 //\r
330 if (RegisterTableEntry->Value == 0) {\r
331 AsmDisableCache ();\r
332 } else {\r
333 AsmEnableCache ();\r
334 }\r
335 break;\r
336\r
337 default:\r
338 break;\r
339 }\r
340 }\r
341}\r
342\r
343/**\r
344 AP initialization before SMBASE relocation in the S3 boot path.\r
345**/\r
346VOID\r
347EarlyMPRendezvousProcedure (\r
348 VOID\r
349 )\r
350{\r
351 CPU_REGISTER_TABLE *RegisterTableList;\r
352 UINT32 InitApicId;\r
353 UINTN Index;\r
354\r
355 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
356\r
357 //\r
358 // Find processor number for this CPU.\r
359 //\r
360 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
361 InitApicId = GetInitialApicId ();\r
362 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
363 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
364 SetProcessorRegister (&RegisterTableList[Index]);\r
365 break;\r
366 }\r
367 }\r
368\r
369 //\r
370 // Count down the number with lock mechanism.\r
371 //\r
372 InterlockedDecrement (&mNumberToFinish);\r
373}\r
374\r
375/**\r
376 AP initialization after SMBASE relocation in the S3 boot path.\r
377**/\r
378VOID\r
379MPRendezvousProcedure (\r
380 VOID\r
381 )\r
382{\r
383 CPU_REGISTER_TABLE *RegisterTableList;\r
384 UINT32 InitApicId;\r
385 UINTN Index;\r
4a0f88dd
JF
386 UINT32 TopOfStack;\r
387 UINT8 Stack[128];\r
529a5a86
MK
388\r
389 ProgramVirtualWireMode ();\r
390 DisableLvtInterrupts ();\r
391\r
392 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
393 InitApicId = GetInitialApicId ();\r
394 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
395 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
396 SetProcessorRegister (&RegisterTableList[Index]);\r
397 break;\r
398 }\r
399 }\r
400\r
401 //\r
402 // Count down the number with lock mechanism.\r
403 //\r
404 InterlockedDecrement (&mNumberToFinish);\r
4a0f88dd
JF
405\r
406 //\r
407 // Place AP into the safe code\r
408 //\r
409 TopOfStack = (UINT32) (UINTN) Stack + sizeof (Stack);\r
410 TopOfStack &= ~(UINT32) (CPU_STACK_ALIGNMENT - 1);\r
411 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
412 TransferApToSafeState ((UINT32) (UINTN) mApHltLoopCode, TopOfStack);\r
529a5a86
MK
413}\r
414\r
415/**\r
416 Prepares startup vector for APs.\r
417\r
418 This function prepares startup vector for APs.\r
419\r
420 @param WorkingBuffer The address of the work buffer.\r
421**/\r
422VOID\r
423PrepareApStartupVector (\r
424 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
425 )\r
426{\r
427 EFI_PHYSICAL_ADDRESS StartupVector;\r
428 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
429\r
430 //\r
431 // Get the address map of startup code for AP,\r
432 // including code size, and offset of long jump instructions to redirect.\r
433 //\r
434 ZeroMem (&AddressMap, sizeof (AddressMap));\r
435 AsmGetAddressMap (&AddressMap);\r
436\r
437 StartupVector = WorkingBuffer;\r
438\r
439 //\r
440 // Copy AP startup code to startup vector, and then redirect the long jump\r
441 // instructions for mode switching.\r
442 //\r
443 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
444 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
445 if (AddressMap.LongJumpOffset != 0) {\r
446 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
447 }\r
448\r
449 //\r
450 // Get the start address of exchange data between BSP and AP.\r
451 //\r
452 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
453 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
454\r
455 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
456 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
457\r
458 //\r
459 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
460 //\r
461 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
462 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
463 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
464\r
465 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
466 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
467 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
468 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
469}\r
470\r
471/**\r
472 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
473\r
474 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
475 and restores MTRRs for both BSP and APs.\r
476\r
477**/\r
478VOID\r
479EarlyInitializeCpu (\r
480 VOID\r
481 )\r
482{\r
483 CPU_REGISTER_TABLE *RegisterTableList;\r
484 UINT32 InitApicId;\r
485 UINTN Index;\r
486\r
487 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
488\r
489 //\r
490 // Find processor number for this CPU.\r
491 //\r
492 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
493 InitApicId = GetInitialApicId ();\r
494 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
495 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
496 SetProcessorRegister (&RegisterTableList[Index]);\r
497 break;\r
498 }\r
499 }\r
500\r
501 ProgramVirtualWireMode ();\r
502\r
503 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
504\r
505 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
506 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r
507\r
508 //\r
509 // Send INIT IPI - SIPI to all APs\r
510 //\r
511 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
512\r
513 while (mNumberToFinish > 0) {\r
514 CpuPause ();\r
515 }\r
516}\r
517\r
518/**\r
519 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
520\r
521 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
522 data saved by normal boot path for both BSP and APs.\r
523\r
524**/\r
525VOID\r
526InitializeCpu (\r
527 VOID\r
528 )\r
529{\r
530 CPU_REGISTER_TABLE *RegisterTableList;\r
531 UINT32 InitApicId;\r
532 UINTN Index;\r
533\r
534 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
535 InitApicId = GetInitialApicId ();\r
536 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
537 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
538 SetProcessorRegister (&RegisterTableList[Index]);\r
539 break;\r
540 }\r
541 }\r
542\r
543 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
544 //\r
545 // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r
546 // Re-initialize StackAddress to original beginning address.\r
547 //\r
548 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
549 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r
550\r
551 //\r
552 // Send INIT IPI - SIPI to all APs\r
553 //\r
554 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
555\r
556 while (mNumberToFinish > 0) {\r
557 CpuPause ();\r
558 }\r
559}\r
0bdc9e75
SZ
560\r
561/**\r
562 Restore SMM Configuration in S3 boot path.\r
563\r
564**/\r
565VOID\r
566RestoreSmmConfigurationInS3 (\r
567 VOID\r
568 )\r
569{\r
b10d5ddc
SZ
570 if (!mAcpiS3Enable) {\r
571 return;\r
572 }\r
573\r
0bdc9e75
SZ
574 //\r
575 // Restore SMM Configuration in S3 boot path.\r
576 //\r
577 if (mRestoreSmmConfigurationInS3) {\r
578 //\r
579 // Need make sure gSmst is correct because below function may use them.\r
580 //\r
581 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
582 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
583 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
584 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
585 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
586\r
587 //\r
588 // Configure SMM Code Access Check feature if available.\r
589 //\r
590 ConfigSmmCodeAccessCheck ();\r
591\r
592 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
593\r
594 mRestoreSmmConfigurationInS3 = FALSE;\r
595 }\r
596}\r
597\r
598/**\r
599 Perform SMM initialization for all processors in the S3 boot path.\r
600\r
601 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
602**/\r
603VOID\r
604EFIAPI\r
605SmmRestoreCpu (\r
606 VOID\r
607 )\r
608{\r
609 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
610 IA32_DESCRIPTOR Ia32Idtr;\r
611 IA32_DESCRIPTOR X64Idtr;\r
612 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
613 EFI_STATUS Status;\r
614\r
615 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
616\r
617 mSmmS3Flag = TRUE;\r
618\r
619 InitializeSpinLock (mMemoryMappedLock);\r
620\r
621 //\r
622 // See if there is enough context to resume PEI Phase\r
623 //\r
624 if (mSmmS3ResumeState == NULL) {\r
625 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
626 CpuDeadLoop ();\r
627 }\r
628\r
629 SmmS3ResumeState = mSmmS3ResumeState;\r
630 ASSERT (SmmS3ResumeState != NULL);\r
631\r
632 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
633 //\r
634 // Save the IA32 IDT Descriptor\r
635 //\r
636 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
637\r
638 //\r
639 // Setup X64 IDT table\r
640 //\r
641 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
642 X64Idtr.Base = (UINTN) IdtEntryTable;\r
643 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
644 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
645\r
646 //\r
647 // Setup the default exception handler\r
648 //\r
649 Status = InitializeCpuExceptionHandlers (NULL);\r
650 ASSERT_EFI_ERROR (Status);\r
651\r
652 //\r
653 // Initialize Debug Agent to support source level debug\r
654 //\r
655 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
656 }\r
657\r
658 //\r
659 // Skip initialization if mAcpiCpuData is not valid\r
660 //\r
661 if (mAcpiCpuData.NumberOfCpus > 0) {\r
662 //\r
663 // First time microcode load and restore MTRRs\r
664 //\r
665 EarlyInitializeCpu ();\r
666 }\r
667\r
668 //\r
669 // Restore SMBASE for BSP and all APs\r
670 //\r
671 SmmRelocateBases ();\r
672\r
673 //\r
674 // Skip initialization if mAcpiCpuData is not valid\r
675 //\r
676 if (mAcpiCpuData.NumberOfCpus > 0) {\r
677 //\r
678 // Restore MSRs for BSP and all APs\r
679 //\r
680 InitializeCpu ();\r
681 }\r
682\r
683 //\r
684 // Set a flag to restore SMM configuration in S3 path.\r
685 //\r
686 mRestoreSmmConfigurationInS3 = TRUE;\r
687\r
688 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
689 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
690 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
691 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
692 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
693\r
694 //\r
695 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
696 //\r
697 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
698 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
699\r
700 SwitchStack (\r
701 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
702 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
703 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
704 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
705 );\r
706 }\r
707\r
708 //\r
709 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
710 //\r
711 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
712 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
713 //\r
714 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
715 //\r
716 SaveAndSetDebugTimerInterrupt (FALSE);\r
717 //\r
718 // Restore IA32 IDT table\r
719 //\r
720 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
721 AsmDisablePaging64 (\r
722 SmmS3ResumeState->ReturnCs,\r
723 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
724 (UINT32)SmmS3ResumeState->ReturnContext1,\r
725 (UINT32)SmmS3ResumeState->ReturnContext2,\r
726 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
727 );\r
728 }\r
729\r
730 //\r
731 // Can not resume PEI Phase\r
732 //\r
733 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
734 CpuDeadLoop ();\r
735}\r
736\r
737/**\r
738 Initialize SMM S3 resume state structure used during S3 Resume.\r
739\r
740 @param[in] Cr3 The base address of the page tables to use in SMM.\r
741\r
742**/\r
743VOID\r
744InitSmmS3ResumeState (\r
745 IN UINT32 Cr3\r
746 )\r
747{\r
748 VOID *GuidHob;\r
749 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
750 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
751 EFI_PHYSICAL_ADDRESS Address;\r
752 EFI_STATUS Status;\r
0bdc9e75 753\r
b10d5ddc
SZ
754 if (!mAcpiS3Enable) {\r
755 return;\r
756 }\r
757\r
0bdc9e75
SZ
758 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
759 if (GuidHob != NULL) {\r
760 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
761\r
762 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
763 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
764\r
765 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
766 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
767\r
768 mSmmS3ResumeState = SmmS3ResumeState;\r
769 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
770\r
771 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
772\r
773 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
774 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
775 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
776 SmmS3ResumeState->SmmS3StackSize = 0;\r
777 }\r
778\r
779 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
780 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
781 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
782\r
783 if (sizeof (UINTN) == sizeof (UINT64)) {\r
784 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
785 }\r
786 if (sizeof (UINTN) == sizeof (UINT32)) {\r
787 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
788 }\r
789 }\r
790\r
791 //\r
792 // Patch SmmS3ResumeState->SmmS3Cr3\r
793 //\r
794 InitSmmS3Cr3 ();\r
4a0f88dd
JF
795\r
796 //\r
797 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
798 // protected mode on S3 path\r
799 //\r
800 Address = BASE_4GB - 1;\r
801 Status = gBS->AllocatePages (\r
802 AllocateMaxAddress,\r
803 EfiACPIMemoryNVS,\r
804 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
805 &Address\r
806 );\r
807 ASSERT_EFI_ERROR (Status);\r
808 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
809}\r
810\r
811/**\r
812 Copy register table from ACPI NVS memory into SMRAM.\r
813\r
814 @param[in] DestinationRegisterTableList Points to destination register table.\r
815 @param[in] SourceRegisterTableList Points to source register table.\r
816 @param[in] NumberOfCpus Number of CPUs.\r
817\r
818**/\r
819VOID\r
820CopyRegisterTable (\r
821 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
822 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
823 IN UINT32 NumberOfCpus\r
824 )\r
825{\r
826 UINTN Index;\r
827 UINTN Index1;\r
828 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
829\r
830 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
831 for (Index = 0; Index < NumberOfCpus; Index++) {\r
832 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
833 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
834 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
835 //\r
836 // Go though all MSRs in register table to initialize MSR spin lock\r
837 //\r
838 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
839 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
840 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
841 //\r
842 // Initialize MSR spin lock only for those MSRs need bit field writing\r
843 //\r
844 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
845 }\r
846 }\r
847 }\r
848}\r
849\r
850/**\r
851 Get ACPI CPU data.\r
852\r
853**/\r
854VOID\r
855GetAcpiCpuData (\r
856 VOID\r
857 )\r
858{\r
859 ACPI_CPU_DATA *AcpiCpuData;\r
860 IA32_DESCRIPTOR *Gdtr;\r
861 IA32_DESCRIPTOR *Idtr;\r
862\r
b10d5ddc
SZ
863 if (!mAcpiS3Enable) {\r
864 return;\r
865 }\r
866\r
0bdc9e75
SZ
867 //\r
868 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
869 //\r
870 mAcpiCpuData.NumberOfCpus = 0;\r
871\r
872 //\r
873 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
874 //\r
875 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
876 if (AcpiCpuData == 0) {\r
877 return;\r
878 }\r
879\r
880 //\r
881 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
882 //\r
883 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
884\r
885 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
886 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
887\r
888 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
889\r
890 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
891 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
892\r
893 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
894\r
895 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
896 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
897\r
898 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
899\r
900 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
901 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
902\r
903 CopyRegisterTable (\r
904 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
905 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
906 mAcpiCpuData.NumberOfCpus\r
907 );\r
908\r
909 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
910 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
911\r
912 CopyRegisterTable (\r
913 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
914 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
915 mAcpiCpuData.NumberOfCpus\r
916 );\r
917\r
918 //\r
919 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
920 //\r
921 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
922 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
923\r
924 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
925 ASSERT (mGdtForAp != NULL);\r
926 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
927 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
928\r
929 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
930 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
931 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
932}\r
b10d5ddc
SZ
933\r
934/**\r
935 Get ACPI S3 enable flag.\r
936\r
937**/\r
938VOID\r
939GetAcpiS3EnableFlag (\r
940 VOID\r
941 )\r
942{\r
943 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
944}\r