]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
BaseTools/Capsule: Do not support -o with --dump-info
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
e21e355e 4Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
e21e355e 17#pragma pack(1)\r
529a5a86
MK
18typedef struct {\r
19 UINTN Lock;\r
20 VOID *StackStart;\r
21 UINTN StackSize;\r
22 VOID *ApFunction;\r
23 IA32_DESCRIPTOR GdtrProfile;\r
24 IA32_DESCRIPTOR IdtrProfile;\r
25 UINT32 BufferStart;\r
26 UINT32 Cr3;\r
e21e355e 27 UINTN InitializeFloatingPointUnitsAddress;\r
529a5a86 28} MP_CPU_EXCHANGE_INFO;\r
e21e355e 29#pragma pack()\r
529a5a86
MK
30\r
31typedef struct {\r
32 UINT8 *RendezvousFunnelAddress;\r
33 UINTN PModeEntryOffset;\r
34 UINTN FlatJumpOffset;\r
35 UINTN Size;\r
36 UINTN LModeEntryOffset;\r
37 UINTN LongJumpOffset;\r
38} MP_ASSEMBLY_ADDRESS_MAP;\r
39\r
6c4c15fa
JF
40//\r
41// Spin lock used to serialize MemoryMapped operation\r
42//\r
43SPIN_LOCK *mMemoryMappedLock = NULL;\r
44\r
7677b4db
ED
45//\r
46// Signal that SMM BASE relocation is complete.\r
47//\r
48volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
49\r
529a5a86
MK
50/**\r
51 Get starting address and size of the rendezvous entry for APs.\r
52 Information for fixing a jump instruction in the code is also returned.\r
53\r
54 @param AddressMap Output buffer for address map information.\r
55**/\r
56VOID *\r
57EFIAPI\r
58AsmGetAddressMap (\r
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
60 );\r
61\r
62#define LEGACY_REGION_SIZE (2 * 0x1000)\r
63#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
64\r
65ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 66volatile UINT32 mNumberToFinish;\r
529a5a86
MK
67MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
68BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
69VOID *mGdtForAp = NULL;\r
70VOID *mIdtForAp = NULL;\r
71VOID *mMachineCheckHandlerForAp = NULL;\r
72MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 73UINTN mMsrSpinLockCount;\r
529a5a86
MK
74UINTN mMsrCount = 0;\r
75\r
0bdc9e75
SZ
76//\r
77// S3 boot flag\r
78//\r
79BOOLEAN mSmmS3Flag = FALSE;\r
80\r
81//\r
82// Pointer to structure used during S3 Resume\r
83//\r
84SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
85\r
b10d5ddc
SZ
86BOOLEAN mAcpiS3Enable = TRUE;\r
87\r
4a0f88dd
JF
88UINT8 *mApHltLoopCode = NULL;\r
89UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
90 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
91 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
92 0xFA, // cli\r
93 0xF4, // hlt\r
94 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
95 };\r
96\r
529a5a86
MK
97/**\r
98 Get MSR spin lock by MSR index.\r
99\r
100 @param MsrIndex MSR index value.\r
101\r
102 @return Pointer to MSR spin lock.\r
103\r
104**/\r
105SPIN_LOCK *\r
106GetMsrSpinLockByIndex (\r
107 IN UINT32 MsrIndex\r
108 )\r
109{\r
110 UINTN Index;\r
111 for (Index = 0; Index < mMsrCount; Index++) {\r
112 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 113 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
114 }\r
115 }\r
116 return NULL;\r
117}\r
118\r
119/**\r
120 Initialize MSR spin lock by MSR index.\r
121\r
122 @param MsrIndex MSR index value.\r
123\r
124**/\r
125VOID\r
126InitMsrSpinLockByIndex (\r
127 IN UINT32 MsrIndex\r
128 )\r
129{\r
dc99315b 130 UINTN MsrSpinLockCount;\r
529a5a86 131 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
132 UINTN Index;\r
133 UINTN AddedSize;\r
529a5a86
MK
134\r
135 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
136 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
137 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 138 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
139 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
140 mMsrSpinLocks[Index].SpinLock =\r
141 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
142 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
143 }\r
144 mMsrSpinLockCount = MsrSpinLockCount;\r
145 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
146 }\r
147 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
148 //\r
149 // Initialize spin lock for MSR programming\r
150 //\r
151 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 152 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
153 mMsrCount ++;\r
154 if (mMsrCount == mMsrSpinLockCount) {\r
155 //\r
156 // If MSR spin lock buffer is full, enlarge it\r
157 //\r
dc99315b
JF
158 AddedSize = SIZE_4KB;\r
159 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
160 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
161 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
162 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
163 mMsrSpinLocks = ReallocatePool (\r
164 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
165 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
166 mMsrSpinLocks\r
167 );\r
dc99315b 168 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 169 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
170 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
171 mMsrSpinLocks[Index].SpinLock =\r
172 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
173 (Index - mMsrCount) * mSemaphoreSize);\r
174 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
175 }\r
529a5a86
MK
176 }\r
177 }\r
178}\r
179\r
180/**\r
181 Sync up the MTRR values for all processors.\r
182\r
183 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
184**/\r
185VOID\r
186EFIAPI\r
187LoadMtrrData (\r
188 EFI_PHYSICAL_ADDRESS MtrrTable\r
189 )\r
190/*++\r
191\r
192Routine Description:\r
193\r
194 Sync up the MTRR values for all processors.\r
195\r
196Arguments:\r
197\r
198Returns:\r
199 None\r
200\r
201--*/\r
202{\r
203 MTRR_SETTINGS *MtrrSettings;\r
204\r
205 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
206 MtrrSetAllMtrrs (MtrrSettings);\r
207}\r
208\r
209/**\r
210 Programs registers for the calling processor.\r
211\r
212 This function programs registers for the calling processor.\r
213\r
94744aa2
ED
214 @param RegisterTables Pointer to register table of the running processor.\r
215 @param RegisterTableCount Register table count.\r
529a5a86
MK
216\r
217**/\r
218VOID\r
219SetProcessorRegister (\r
94744aa2
ED
220 IN CPU_REGISTER_TABLE *RegisterTables,\r
221 IN UINTN RegisterTableCount\r
529a5a86
MK
222 )\r
223{\r
224 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
225 UINTN Index;\r
226 UINTN Value;\r
227 SPIN_LOCK *MsrSpinLock;\r
94744aa2
ED
228 UINT32 InitApicId;\r
229 CPU_REGISTER_TABLE *RegisterTable;\r
230\r
231 InitApicId = GetInitialApicId ();\r
c9b094f6 232 RegisterTable = NULL;\r
94744aa2
ED
233 for (Index = 0; Index < RegisterTableCount; Index++) {\r
234 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
235 RegisterTable = &RegisterTables[Index];\r
236 break;\r
237 }\r
238 }\r
c9b094f6 239 ASSERT (RegisterTable != NULL);\r
529a5a86
MK
240\r
241 //\r
242 // Traverse Register Table of this logical processor\r
243 //\r
244 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
245 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
246 //\r
247 // Check the type of specified register\r
248 //\r
249 switch (RegisterTableEntry->RegisterType) {\r
250 //\r
251 // The specified register is Control Register\r
252 //\r
253 case ControlRegister:\r
254 switch (RegisterTableEntry->Index) {\r
255 case 0:\r
256 Value = AsmReadCr0 ();\r
257 Value = (UINTN) BitFieldWrite64 (\r
258 Value,\r
259 RegisterTableEntry->ValidBitStart,\r
260 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
261 (UINTN) RegisterTableEntry->Value\r
262 );\r
263 AsmWriteCr0 (Value);\r
264 break;\r
265 case 2:\r
266 Value = AsmReadCr2 ();\r
267 Value = (UINTN) BitFieldWrite64 (\r
268 Value,\r
269 RegisterTableEntry->ValidBitStart,\r
270 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
271 (UINTN) RegisterTableEntry->Value\r
272 );\r
273 AsmWriteCr2 (Value);\r
274 break;\r
275 case 3:\r
276 Value = AsmReadCr3 ();\r
277 Value = (UINTN) BitFieldWrite64 (\r
278 Value,\r
279 RegisterTableEntry->ValidBitStart,\r
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
281 (UINTN) RegisterTableEntry->Value\r
282 );\r
283 AsmWriteCr3 (Value);\r
284 break;\r
285 case 4:\r
286 Value = AsmReadCr4 ();\r
287 Value = (UINTN) BitFieldWrite64 (\r
288 Value,\r
289 RegisterTableEntry->ValidBitStart,\r
290 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
291 (UINTN) RegisterTableEntry->Value\r
292 );\r
293 AsmWriteCr4 (Value);\r
294 break;\r
295 default:\r
296 break;\r
297 }\r
298 break;\r
299 //\r
300 // The specified register is Model Specific Register\r
301 //\r
302 case Msr:\r
303 //\r
304 // If this function is called to restore register setting after INIT signal,\r
305 // there is no need to restore MSRs in register table.\r
306 //\r
307 if (RegisterTableEntry->ValidBitLength >= 64) {\r
308 //\r
309 // If length is not less than 64 bits, then directly write without reading\r
310 //\r
311 AsmWriteMsr64 (\r
312 RegisterTableEntry->Index,\r
313 RegisterTableEntry->Value\r
314 );\r
315 } else {\r
316 //\r
317 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
318 // to make sure MSR read/write operation is atomic.\r
319 //\r
320 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
321 AcquireSpinLock (MsrSpinLock);\r
322 //\r
323 // Set the bit section according to bit start and length\r
324 //\r
325 AsmMsrBitFieldWrite64 (\r
326 RegisterTableEntry->Index,\r
327 RegisterTableEntry->ValidBitStart,\r
328 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
329 RegisterTableEntry->Value\r
330 );\r
331 ReleaseSpinLock (MsrSpinLock);\r
332 }\r
333 break;\r
334 //\r
6c4c15fa
JF
335 // MemoryMapped operations\r
336 //\r
337 case MemoryMapped:\r
338 AcquireSpinLock (mMemoryMappedLock);\r
339 MmioBitFieldWrite32 (\r
30b7a50b 340 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
341 RegisterTableEntry->ValidBitStart,\r
342 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
343 (UINT32)RegisterTableEntry->Value\r
344 );\r
345 ReleaseSpinLock (mMemoryMappedLock);\r
346 break;\r
347 //\r
529a5a86
MK
348 // Enable or disable cache\r
349 //\r
350 case CacheControl:\r
351 //\r
352 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
353 //\r
354 if (RegisterTableEntry->Value == 0) {\r
355 AsmDisableCache ();\r
356 } else {\r
357 AsmEnableCache ();\r
358 }\r
359 break;\r
360\r
361 default:\r
362 break;\r
363 }\r
364 }\r
365}\r
366\r
367/**\r
7677b4db 368 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
369**/\r
370VOID\r
94744aa2 371InitializeAp (\r
529a5a86
MK
372 VOID\r
373 )\r
374{\r
7677b4db
ED
375 UINTN TopOfStack;\r
376 UINT8 Stack[128];\r
529a5a86
MK
377\r
378 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
379\r
94744aa2 380 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
7677b4db 381\r
529a5a86
MK
382 //\r
383 // Count down the number with lock mechanism.\r
384 //\r
385 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 386\r
7677b4db
ED
387 //\r
388 // Wait for BSP to signal SMM Base relocation done.\r
389 //\r
390 while (!mInitApsAfterSmmBaseReloc) {\r
391 CpuPause ();\r
392 }\r
529a5a86
MK
393\r
394 ProgramVirtualWireMode ();\r
395 DisableLvtInterrupts ();\r
396\r
94744aa2 397 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
398\r
399 //\r
ec8a3877 400 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 401 //\r
672b80c8
MK
402 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
403 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 404 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 405 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
406}\r
407\r
408/**\r
409 Prepares startup vector for APs.\r
410\r
411 This function prepares startup vector for APs.\r
412\r
413 @param WorkingBuffer The address of the work buffer.\r
414**/\r
415VOID\r
416PrepareApStartupVector (\r
417 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
418 )\r
419{\r
420 EFI_PHYSICAL_ADDRESS StartupVector;\r
421 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
422\r
423 //\r
424 // Get the address map of startup code for AP,\r
425 // including code size, and offset of long jump instructions to redirect.\r
426 //\r
427 ZeroMem (&AddressMap, sizeof (AddressMap));\r
428 AsmGetAddressMap (&AddressMap);\r
429\r
430 StartupVector = WorkingBuffer;\r
431\r
432 //\r
433 // Copy AP startup code to startup vector, and then redirect the long jump\r
434 // instructions for mode switching.\r
435 //\r
436 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
437 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
438 if (AddressMap.LongJumpOffset != 0) {\r
439 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
440 }\r
441\r
442 //\r
443 // Get the start address of exchange data between BSP and AP.\r
444 //\r
445 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
446 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
447\r
448 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
449 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
450\r
451 //\r
452 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
453 //\r
454 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
455 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
456 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
457\r
458 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
459 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
460 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
461 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
e21e355e 462 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
529a5a86
MK
463}\r
464\r
465/**\r
466 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
467\r
468 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
469 and restores MTRRs for both BSP and APs.\r
470\r
471**/\r
472VOID\r
94744aa2 473InitializeCpuBeforeRebase (\r
529a5a86
MK
474 VOID\r
475 )\r
476{\r
529a5a86
MK
477 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
478\r
94744aa2 479 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
480\r
481 ProgramVirtualWireMode ();\r
482\r
483 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
484\r
485 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
94744aa2 486 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
487\r
488 //\r
489 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
490 //\r
491 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
492\r
493 //\r
494 // Send INIT IPI - SIPI to all APs\r
495 //\r
496 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
497\r
498 while (mNumberToFinish > 0) {\r
499 CpuPause ();\r
500 }\r
501}\r
502\r
503/**\r
504 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
505\r
506 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
507 data saved by normal boot path for both BSP and APs.\r
508\r
509**/\r
510VOID\r
94744aa2 511InitializeCpuAfterRebase (\r
529a5a86
MK
512 VOID\r
513 )\r
514{\r
94744aa2 515 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
516\r
517 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
518\r
519 //\r
7677b4db 520 // Signal that SMM base relocation is complete and to continue initialization.\r
529a5a86 521 //\r
7677b4db 522 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86
MK
523\r
524 while (mNumberToFinish > 0) {\r
525 CpuPause ();\r
526 }\r
527}\r
0bdc9e75
SZ
528\r
529/**\r
530 Restore SMM Configuration in S3 boot path.\r
531\r
532**/\r
533VOID\r
534RestoreSmmConfigurationInS3 (\r
535 VOID\r
536 )\r
537{\r
b10d5ddc
SZ
538 if (!mAcpiS3Enable) {\r
539 return;\r
540 }\r
541\r
0bdc9e75
SZ
542 //\r
543 // Restore SMM Configuration in S3 boot path.\r
544 //\r
545 if (mRestoreSmmConfigurationInS3) {\r
546 //\r
547 // Need make sure gSmst is correct because below function may use them.\r
548 //\r
549 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
550 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
551 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
552 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
553 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
554\r
555 //\r
556 // Configure SMM Code Access Check feature if available.\r
557 //\r
558 ConfigSmmCodeAccessCheck ();\r
559\r
560 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
561\r
562 mRestoreSmmConfigurationInS3 = FALSE;\r
563 }\r
564}\r
565\r
566/**\r
567 Perform SMM initialization for all processors in the S3 boot path.\r
568\r
569 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
570**/\r
571VOID\r
572EFIAPI\r
573SmmRestoreCpu (\r
574 VOID\r
575 )\r
576{\r
577 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
578 IA32_DESCRIPTOR Ia32Idtr;\r
579 IA32_DESCRIPTOR X64Idtr;\r
580 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
581 EFI_STATUS Status;\r
582\r
583 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
584\r
585 mSmmS3Flag = TRUE;\r
586\r
587 InitializeSpinLock (mMemoryMappedLock);\r
588\r
589 //\r
590 // See if there is enough context to resume PEI Phase\r
591 //\r
592 if (mSmmS3ResumeState == NULL) {\r
593 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
594 CpuDeadLoop ();\r
595 }\r
596\r
597 SmmS3ResumeState = mSmmS3ResumeState;\r
598 ASSERT (SmmS3ResumeState != NULL);\r
599\r
600 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
601 //\r
602 // Save the IA32 IDT Descriptor\r
603 //\r
604 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
605\r
606 //\r
607 // Setup X64 IDT table\r
608 //\r
609 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
610 X64Idtr.Base = (UINTN) IdtEntryTable;\r
611 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
612 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
613\r
614 //\r
615 // Setup the default exception handler\r
616 //\r
617 Status = InitializeCpuExceptionHandlers (NULL);\r
618 ASSERT_EFI_ERROR (Status);\r
619\r
620 //\r
621 // Initialize Debug Agent to support source level debug\r
622 //\r
623 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
624 }\r
625\r
626 //\r
627 // Skip initialization if mAcpiCpuData is not valid\r
628 //\r
629 if (mAcpiCpuData.NumberOfCpus > 0) {\r
630 //\r
631 // First time microcode load and restore MTRRs\r
632 //\r
94744aa2 633 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
634 }\r
635\r
636 //\r
637 // Restore SMBASE for BSP and all APs\r
638 //\r
639 SmmRelocateBases ();\r
640\r
641 //\r
642 // Skip initialization if mAcpiCpuData is not valid\r
643 //\r
644 if (mAcpiCpuData.NumberOfCpus > 0) {\r
645 //\r
646 // Restore MSRs for BSP and all APs\r
647 //\r
94744aa2 648 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
649 }\r
650\r
651 //\r
652 // Set a flag to restore SMM configuration in S3 path.\r
653 //\r
654 mRestoreSmmConfigurationInS3 = TRUE;\r
655\r
656 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
657 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
658 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
659 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
660 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
661\r
662 //\r
663 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
664 //\r
665 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
666 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
667\r
668 SwitchStack (\r
669 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
670 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
671 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
672 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
673 );\r
674 }\r
675\r
676 //\r
677 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
678 //\r
679 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
680 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
681 //\r
682 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
683 //\r
684 SaveAndSetDebugTimerInterrupt (FALSE);\r
685 //\r
686 // Restore IA32 IDT table\r
687 //\r
688 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
689 AsmDisablePaging64 (\r
690 SmmS3ResumeState->ReturnCs,\r
691 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
692 (UINT32)SmmS3ResumeState->ReturnContext1,\r
693 (UINT32)SmmS3ResumeState->ReturnContext2,\r
694 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
695 );\r
696 }\r
697\r
698 //\r
699 // Can not resume PEI Phase\r
700 //\r
701 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
702 CpuDeadLoop ();\r
703}\r
704\r
705/**\r
706 Initialize SMM S3 resume state structure used during S3 Resume.\r
707\r
708 @param[in] Cr3 The base address of the page tables to use in SMM.\r
709\r
710**/\r
711VOID\r
712InitSmmS3ResumeState (\r
713 IN UINT32 Cr3\r
714 )\r
715{\r
716 VOID *GuidHob;\r
717 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
718 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
719 EFI_PHYSICAL_ADDRESS Address;\r
720 EFI_STATUS Status;\r
0bdc9e75 721\r
b10d5ddc
SZ
722 if (!mAcpiS3Enable) {\r
723 return;\r
724 }\r
725\r
0bdc9e75
SZ
726 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
727 if (GuidHob != NULL) {\r
728 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
729\r
730 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
731 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
732\r
733 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
734 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
735\r
736 mSmmS3ResumeState = SmmS3ResumeState;\r
737 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
738\r
739 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
740\r
741 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
742 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
743 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
744 SmmS3ResumeState->SmmS3StackSize = 0;\r
745 }\r
746\r
f0053e83 747 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
0bdc9e75 748 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
351b49c1 749 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
0bdc9e75
SZ
750\r
751 if (sizeof (UINTN) == sizeof (UINT64)) {\r
752 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
753 }\r
754 if (sizeof (UINTN) == sizeof (UINT32)) {\r
755 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
756 }\r
757 }\r
758\r
759 //\r
760 // Patch SmmS3ResumeState->SmmS3Cr3\r
761 //\r
762 InitSmmS3Cr3 ();\r
4a0f88dd
JF
763\r
764 //\r
765 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
766 // protected mode on S3 path\r
767 //\r
768 Address = BASE_4GB - 1;\r
769 Status = gBS->AllocatePages (\r
770 AllocateMaxAddress,\r
771 EfiACPIMemoryNVS,\r
772 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
773 &Address\r
774 );\r
775 ASSERT_EFI_ERROR (Status);\r
776 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
777}\r
778\r
779/**\r
780 Copy register table from ACPI NVS memory into SMRAM.\r
781\r
782 @param[in] DestinationRegisterTableList Points to destination register table.\r
783 @param[in] SourceRegisterTableList Points to source register table.\r
784 @param[in] NumberOfCpus Number of CPUs.\r
785\r
786**/\r
787VOID\r
788CopyRegisterTable (\r
789 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
790 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
791 IN UINT32 NumberOfCpus\r
792 )\r
793{\r
794 UINTN Index;\r
795 UINTN Index1;\r
796 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
797\r
798 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
799 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
800 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
801 RegisterTableEntry = AllocateCopyPool (\r
802 DestinationRegisterTableList[Index].AllocatedSize,\r
803 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
804 );\r
805 ASSERT (RegisterTableEntry != NULL);\r
806 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
807 //\r
808 // Go though all MSRs in register table to initialize MSR spin lock\r
809 //\r
810 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
811 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
812 //\r
813 // Initialize MSR spin lock only for those MSRs need bit field writing\r
814 //\r
815 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
816 }\r
0bdc9e75
SZ
817 }\r
818 }\r
819 }\r
820}\r
821\r
822/**\r
823 Get ACPI CPU data.\r
824\r
825**/\r
826VOID\r
827GetAcpiCpuData (\r
828 VOID\r
829 )\r
830{\r
831 ACPI_CPU_DATA *AcpiCpuData;\r
832 IA32_DESCRIPTOR *Gdtr;\r
833 IA32_DESCRIPTOR *Idtr;\r
834\r
b10d5ddc
SZ
835 if (!mAcpiS3Enable) {\r
836 return;\r
837 }\r
838\r
0bdc9e75
SZ
839 //\r
840 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
841 //\r
842 mAcpiCpuData.NumberOfCpus = 0;\r
843\r
844 //\r
845 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
846 //\r
847 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
848 if (AcpiCpuData == 0) {\r
849 return;\r
850 }\r
851\r
852 //\r
853 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
854 //\r
855 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
856\r
857 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
858 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
859\r
860 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
861\r
862 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
863 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
864\r
865 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
866\r
867 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
868 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
869\r
870 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
871\r
872 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
873 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
874\r
875 CopyRegisterTable (\r
876 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
877 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
878 mAcpiCpuData.NumberOfCpus\r
879 );\r
880\r
881 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
882 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
883\r
884 CopyRegisterTable (\r
885 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
886 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
887 mAcpiCpuData.NumberOfCpus\r
888 );\r
889\r
890 //\r
891 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
892 //\r
893 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
894 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
895\r
896 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
897 ASSERT (mGdtForAp != NULL);\r
898 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
899 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
900\r
901 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
902 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
903 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
904}\r
b10d5ddc
SZ
905\r
906/**\r
907 Get ACPI S3 enable flag.\r
908\r
909**/\r
910VOID\r
911GetAcpiS3EnableFlag (\r
912 VOID\r
913 )\r
914{\r
915 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
916}\r