]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: add message for S3 config error
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
e21e355e 4Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
e21e355e 17#pragma pack(1)\r
529a5a86
MK
18typedef struct {\r
19 UINTN Lock;\r
20 VOID *StackStart;\r
21 UINTN StackSize;\r
22 VOID *ApFunction;\r
23 IA32_DESCRIPTOR GdtrProfile;\r
24 IA32_DESCRIPTOR IdtrProfile;\r
25 UINT32 BufferStart;\r
26 UINT32 Cr3;\r
e21e355e 27 UINTN InitializeFloatingPointUnitsAddress;\r
529a5a86 28} MP_CPU_EXCHANGE_INFO;\r
e21e355e 29#pragma pack()\r
529a5a86
MK
30\r
31typedef struct {\r
32 UINT8 *RendezvousFunnelAddress;\r
33 UINTN PModeEntryOffset;\r
34 UINTN FlatJumpOffset;\r
35 UINTN Size;\r
36 UINTN LModeEntryOffset;\r
37 UINTN LongJumpOffset;\r
38} MP_ASSEMBLY_ADDRESS_MAP;\r
39\r
6c4c15fa
JF
40//\r
41// Spin lock used to serialize MemoryMapped operation\r
42//\r
43SPIN_LOCK *mMemoryMappedLock = NULL;\r
44\r
7677b4db
ED
45//\r
46// Signal that SMM BASE relocation is complete.\r
47//\r
48volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
49\r
529a5a86
MK
50/**\r
51 Get starting address and size of the rendezvous entry for APs.\r
52 Information for fixing a jump instruction in the code is also returned.\r
53\r
54 @param AddressMap Output buffer for address map information.\r
55**/\r
56VOID *\r
57EFIAPI\r
58AsmGetAddressMap (\r
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
60 );\r
61\r
62#define LEGACY_REGION_SIZE (2 * 0x1000)\r
63#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
64\r
65ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 66volatile UINT32 mNumberToFinish;\r
529a5a86
MK
67MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
68BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
529a5a86 69MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 70UINTN mMsrSpinLockCount;\r
529a5a86
MK
71UINTN mMsrCount = 0;\r
72\r
0bdc9e75
SZ
73//\r
74// S3 boot flag\r
75//\r
76BOOLEAN mSmmS3Flag = FALSE;\r
77\r
78//\r
79// Pointer to structure used during S3 Resume\r
80//\r
81SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
82\r
b10d5ddc
SZ
83BOOLEAN mAcpiS3Enable = TRUE;\r
84\r
4a0f88dd
JF
85UINT8 *mApHltLoopCode = NULL;\r
86UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
89 0xFA, // cli\r
90 0xF4, // hlt\r
91 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
92 };\r
93\r
529a5a86
MK
94/**\r
95 Get MSR spin lock by MSR index.\r
96\r
97 @param MsrIndex MSR index value.\r
98\r
99 @return Pointer to MSR spin lock.\r
100\r
101**/\r
102SPIN_LOCK *\r
103GetMsrSpinLockByIndex (\r
104 IN UINT32 MsrIndex\r
105 )\r
106{\r
107 UINTN Index;\r
108 for (Index = 0; Index < mMsrCount; Index++) {\r
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 110 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
111 }\r
112 }\r
113 return NULL;\r
114}\r
115\r
116/**\r
117 Initialize MSR spin lock by MSR index.\r
118\r
119 @param MsrIndex MSR index value.\r
120\r
121**/\r
122VOID\r
123InitMsrSpinLockByIndex (\r
124 IN UINT32 MsrIndex\r
125 )\r
126{\r
dc99315b 127 UINTN MsrSpinLockCount;\r
529a5a86 128 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
129 UINTN Index;\r
130 UINTN AddedSize;\r
529a5a86
MK
131\r
132 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 135 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
137 mMsrSpinLocks[Index].SpinLock =\r
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
140 }\r
141 mMsrSpinLockCount = MsrSpinLockCount;\r
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
143 }\r
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
145 //\r
146 // Initialize spin lock for MSR programming\r
147 //\r
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
150 mMsrCount ++;\r
151 if (mMsrCount == mMsrSpinLockCount) {\r
152 //\r
153 // If MSR spin lock buffer is full, enlarge it\r
154 //\r
dc99315b
JF
155 AddedSize = SIZE_4KB;\r
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
160 mMsrSpinLocks = ReallocatePool (\r
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
163 mMsrSpinLocks\r
164 );\r
dc99315b 165 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 166 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
168 mMsrSpinLocks[Index].SpinLock =\r
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
170 (Index - mMsrCount) * mSemaphoreSize);\r
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
172 }\r
529a5a86
MK
173 }\r
174 }\r
175}\r
176\r
177/**\r
178 Sync up the MTRR values for all processors.\r
179\r
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
181**/\r
182VOID\r
183EFIAPI\r
184LoadMtrrData (\r
185 EFI_PHYSICAL_ADDRESS MtrrTable\r
186 )\r
187/*++\r
188\r
189Routine Description:\r
190\r
191 Sync up the MTRR values for all processors.\r
192\r
193Arguments:\r
194\r
195Returns:\r
196 None\r
197\r
198--*/\r
199{\r
200 MTRR_SETTINGS *MtrrSettings;\r
201\r
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
203 MtrrSetAllMtrrs (MtrrSettings);\r
204}\r
205\r
206/**\r
207 Programs registers for the calling processor.\r
208\r
209 This function programs registers for the calling processor.\r
210\r
94744aa2
ED
211 @param RegisterTables Pointer to register table of the running processor.\r
212 @param RegisterTableCount Register table count.\r
529a5a86
MK
213\r
214**/\r
215VOID\r
216SetProcessorRegister (\r
94744aa2
ED
217 IN CPU_REGISTER_TABLE *RegisterTables,\r
218 IN UINTN RegisterTableCount\r
529a5a86
MK
219 )\r
220{\r
221 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
222 UINTN Index;\r
223 UINTN Value;\r
224 SPIN_LOCK *MsrSpinLock;\r
94744aa2
ED
225 UINT32 InitApicId;\r
226 CPU_REGISTER_TABLE *RegisterTable;\r
227\r
228 InitApicId = GetInitialApicId ();\r
c9b094f6 229 RegisterTable = NULL;\r
94744aa2
ED
230 for (Index = 0; Index < RegisterTableCount; Index++) {\r
231 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
232 RegisterTable = &RegisterTables[Index];\r
233 break;\r
234 }\r
235 }\r
c9b094f6 236 ASSERT (RegisterTable != NULL);\r
529a5a86
MK
237\r
238 //\r
239 // Traverse Register Table of this logical processor\r
240 //\r
241 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
242 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
243 //\r
244 // Check the type of specified register\r
245 //\r
246 switch (RegisterTableEntry->RegisterType) {\r
247 //\r
248 // The specified register is Control Register\r
249 //\r
250 case ControlRegister:\r
251 switch (RegisterTableEntry->Index) {\r
252 case 0:\r
253 Value = AsmReadCr0 ();\r
254 Value = (UINTN) BitFieldWrite64 (\r
255 Value,\r
256 RegisterTableEntry->ValidBitStart,\r
257 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
258 (UINTN) RegisterTableEntry->Value\r
259 );\r
260 AsmWriteCr0 (Value);\r
261 break;\r
262 case 2:\r
263 Value = AsmReadCr2 ();\r
264 Value = (UINTN) BitFieldWrite64 (\r
265 Value,\r
266 RegisterTableEntry->ValidBitStart,\r
267 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
268 (UINTN) RegisterTableEntry->Value\r
269 );\r
270 AsmWriteCr2 (Value);\r
271 break;\r
272 case 3:\r
273 Value = AsmReadCr3 ();\r
274 Value = (UINTN) BitFieldWrite64 (\r
275 Value,\r
276 RegisterTableEntry->ValidBitStart,\r
277 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
278 (UINTN) RegisterTableEntry->Value\r
279 );\r
280 AsmWriteCr3 (Value);\r
281 break;\r
282 case 4:\r
283 Value = AsmReadCr4 ();\r
284 Value = (UINTN) BitFieldWrite64 (\r
285 Value,\r
286 RegisterTableEntry->ValidBitStart,\r
287 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
288 (UINTN) RegisterTableEntry->Value\r
289 );\r
290 AsmWriteCr4 (Value);\r
291 break;\r
292 default:\r
293 break;\r
294 }\r
295 break;\r
296 //\r
297 // The specified register is Model Specific Register\r
298 //\r
299 case Msr:\r
300 //\r
301 // If this function is called to restore register setting after INIT signal,\r
302 // there is no need to restore MSRs in register table.\r
303 //\r
304 if (RegisterTableEntry->ValidBitLength >= 64) {\r
305 //\r
306 // If length is not less than 64 bits, then directly write without reading\r
307 //\r
308 AsmWriteMsr64 (\r
309 RegisterTableEntry->Index,\r
310 RegisterTableEntry->Value\r
311 );\r
312 } else {\r
313 //\r
314 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
315 // to make sure MSR read/write operation is atomic.\r
316 //\r
317 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
318 AcquireSpinLock (MsrSpinLock);\r
319 //\r
320 // Set the bit section according to bit start and length\r
321 //\r
322 AsmMsrBitFieldWrite64 (\r
323 RegisterTableEntry->Index,\r
324 RegisterTableEntry->ValidBitStart,\r
325 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
326 RegisterTableEntry->Value\r
327 );\r
328 ReleaseSpinLock (MsrSpinLock);\r
329 }\r
330 break;\r
331 //\r
6c4c15fa
JF
332 // MemoryMapped operations\r
333 //\r
334 case MemoryMapped:\r
335 AcquireSpinLock (mMemoryMappedLock);\r
336 MmioBitFieldWrite32 (\r
30b7a50b 337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
338 RegisterTableEntry->ValidBitStart,\r
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
340 (UINT32)RegisterTableEntry->Value\r
341 );\r
342 ReleaseSpinLock (mMemoryMappedLock);\r
343 break;\r
344 //\r
529a5a86
MK
345 // Enable or disable cache\r
346 //\r
347 case CacheControl:\r
348 //\r
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
350 //\r
351 if (RegisterTableEntry->Value == 0) {\r
352 AsmDisableCache ();\r
353 } else {\r
354 AsmEnableCache ();\r
355 }\r
356 break;\r
357\r
358 default:\r
359 break;\r
360 }\r
361 }\r
362}\r
363\r
364/**\r
7677b4db 365 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
366**/\r
367VOID\r
94744aa2 368InitializeAp (\r
529a5a86
MK
369 VOID\r
370 )\r
371{\r
7677b4db
ED
372 UINTN TopOfStack;\r
373 UINT8 Stack[128];\r
529a5a86
MK
374\r
375 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
376\r
94744aa2 377 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
7677b4db 378\r
529a5a86
MK
379 //\r
380 // Count down the number with lock mechanism.\r
381 //\r
382 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 383\r
7677b4db
ED
384 //\r
385 // Wait for BSP to signal SMM Base relocation done.\r
386 //\r
387 while (!mInitApsAfterSmmBaseReloc) {\r
388 CpuPause ();\r
389 }\r
529a5a86
MK
390\r
391 ProgramVirtualWireMode ();\r
392 DisableLvtInterrupts ();\r
393\r
94744aa2 394 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
395\r
396 //\r
ec8a3877 397 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 398 //\r
672b80c8
MK
399 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
400 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 401 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 402 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
403}\r
404\r
405/**\r
406 Prepares startup vector for APs.\r
407\r
408 This function prepares startup vector for APs.\r
409\r
410 @param WorkingBuffer The address of the work buffer.\r
411**/\r
412VOID\r
413PrepareApStartupVector (\r
414 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
415 )\r
416{\r
417 EFI_PHYSICAL_ADDRESS StartupVector;\r
418 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
419\r
420 //\r
421 // Get the address map of startup code for AP,\r
422 // including code size, and offset of long jump instructions to redirect.\r
423 //\r
424 ZeroMem (&AddressMap, sizeof (AddressMap));\r
425 AsmGetAddressMap (&AddressMap);\r
426\r
427 StartupVector = WorkingBuffer;\r
428\r
429 //\r
430 // Copy AP startup code to startup vector, and then redirect the long jump\r
431 // instructions for mode switching.\r
432 //\r
433 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
434 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
435 if (AddressMap.LongJumpOffset != 0) {\r
436 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
437 }\r
438\r
439 //\r
440 // Get the start address of exchange data between BSP and AP.\r
441 //\r
442 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
443 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
444\r
445 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
446 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
447\r
529a5a86
MK
448 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
449 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
450 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
451 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
e21e355e 452 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
529a5a86
MK
453}\r
454\r
455/**\r
456 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
457\r
458 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
459 and restores MTRRs for both BSP and APs.\r
460\r
461**/\r
462VOID\r
94744aa2 463InitializeCpuBeforeRebase (\r
529a5a86
MK
464 VOID\r
465 )\r
466{\r
529a5a86
MK
467 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
468\r
94744aa2 469 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
470\r
471 ProgramVirtualWireMode ();\r
472\r
473 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
474\r
475 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
94744aa2 476 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
477\r
478 //\r
479 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
480 //\r
481 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
482\r
483 //\r
484 // Send INIT IPI - SIPI to all APs\r
485 //\r
486 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
487\r
488 while (mNumberToFinish > 0) {\r
489 CpuPause ();\r
490 }\r
491}\r
492\r
493/**\r
494 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
495\r
496 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
497 data saved by normal boot path for both BSP and APs.\r
498\r
499**/\r
500VOID\r
94744aa2 501InitializeCpuAfterRebase (\r
529a5a86
MK
502 VOID\r
503 )\r
504{\r
94744aa2 505 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
506\r
507 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
508\r
509 //\r
7677b4db 510 // Signal that SMM base relocation is complete and to continue initialization.\r
529a5a86 511 //\r
7677b4db 512 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86
MK
513\r
514 while (mNumberToFinish > 0) {\r
515 CpuPause ();\r
516 }\r
517}\r
0bdc9e75
SZ
518\r
519/**\r
520 Restore SMM Configuration in S3 boot path.\r
521\r
522**/\r
523VOID\r
524RestoreSmmConfigurationInS3 (\r
525 VOID\r
526 )\r
527{\r
b10d5ddc
SZ
528 if (!mAcpiS3Enable) {\r
529 return;\r
530 }\r
531\r
0bdc9e75
SZ
532 //\r
533 // Restore SMM Configuration in S3 boot path.\r
534 //\r
535 if (mRestoreSmmConfigurationInS3) {\r
536 //\r
537 // Need make sure gSmst is correct because below function may use them.\r
538 //\r
539 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
540 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
541 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
542 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
543 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
544\r
545 //\r
546 // Configure SMM Code Access Check feature if available.\r
547 //\r
548 ConfigSmmCodeAccessCheck ();\r
549\r
550 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
551\r
552 mRestoreSmmConfigurationInS3 = FALSE;\r
553 }\r
554}\r
555\r
556/**\r
557 Perform SMM initialization for all processors in the S3 boot path.\r
558\r
559 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
560**/\r
561VOID\r
562EFIAPI\r
563SmmRestoreCpu (\r
564 VOID\r
565 )\r
566{\r
567 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
568 IA32_DESCRIPTOR Ia32Idtr;\r
569 IA32_DESCRIPTOR X64Idtr;\r
570 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
571 EFI_STATUS Status;\r
572\r
573 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
574\r
575 mSmmS3Flag = TRUE;\r
576\r
577 InitializeSpinLock (mMemoryMappedLock);\r
578\r
579 //\r
580 // See if there is enough context to resume PEI Phase\r
581 //\r
582 if (mSmmS3ResumeState == NULL) {\r
583 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
584 CpuDeadLoop ();\r
585 }\r
586\r
587 SmmS3ResumeState = mSmmS3ResumeState;\r
588 ASSERT (SmmS3ResumeState != NULL);\r
589\r
590 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
591 //\r
592 // Save the IA32 IDT Descriptor\r
593 //\r
594 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
595\r
596 //\r
597 // Setup X64 IDT table\r
598 //\r
599 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
600 X64Idtr.Base = (UINTN) IdtEntryTable;\r
601 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
602 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
603\r
604 //\r
605 // Setup the default exception handler\r
606 //\r
607 Status = InitializeCpuExceptionHandlers (NULL);\r
608 ASSERT_EFI_ERROR (Status);\r
609\r
610 //\r
611 // Initialize Debug Agent to support source level debug\r
612 //\r
613 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
614 }\r
615\r
616 //\r
617 // Skip initialization if mAcpiCpuData is not valid\r
618 //\r
619 if (mAcpiCpuData.NumberOfCpus > 0) {\r
620 //\r
621 // First time microcode load and restore MTRRs\r
622 //\r
94744aa2 623 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
624 }\r
625\r
626 //\r
627 // Restore SMBASE for BSP and all APs\r
628 //\r
629 SmmRelocateBases ();\r
630\r
631 //\r
632 // Skip initialization if mAcpiCpuData is not valid\r
633 //\r
634 if (mAcpiCpuData.NumberOfCpus > 0) {\r
635 //\r
636 // Restore MSRs for BSP and all APs\r
637 //\r
94744aa2 638 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
639 }\r
640\r
641 //\r
642 // Set a flag to restore SMM configuration in S3 path.\r
643 //\r
644 mRestoreSmmConfigurationInS3 = TRUE;\r
645\r
646 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
647 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
648 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
649 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
650 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
651\r
652 //\r
653 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
654 //\r
655 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
656 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
657\r
658 SwitchStack (\r
659 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
660 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
661 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
662 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
663 );\r
664 }\r
665\r
666 //\r
667 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
668 //\r
669 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
670 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
671 //\r
672 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
673 //\r
674 SaveAndSetDebugTimerInterrupt (FALSE);\r
675 //\r
676 // Restore IA32 IDT table\r
677 //\r
678 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
679 AsmDisablePaging64 (\r
680 SmmS3ResumeState->ReturnCs,\r
681 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
682 (UINT32)SmmS3ResumeState->ReturnContext1,\r
683 (UINT32)SmmS3ResumeState->ReturnContext2,\r
684 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
685 );\r
686 }\r
687\r
688 //\r
689 // Can not resume PEI Phase\r
690 //\r
691 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
692 CpuDeadLoop ();\r
693}\r
694\r
695/**\r
696 Initialize SMM S3 resume state structure used during S3 Resume.\r
697\r
698 @param[in] Cr3 The base address of the page tables to use in SMM.\r
699\r
700**/\r
701VOID\r
702InitSmmS3ResumeState (\r
703 IN UINT32 Cr3\r
704 )\r
705{\r
706 VOID *GuidHob;\r
707 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
708 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
709 EFI_PHYSICAL_ADDRESS Address;\r
710 EFI_STATUS Status;\r
0bdc9e75 711\r
b10d5ddc
SZ
712 if (!mAcpiS3Enable) {\r
713 return;\r
714 }\r
715\r
0bdc9e75 716 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
a95c9cfd
JW
717 if (GuidHob == NULL) {\r
718 DEBUG ((\r
719 DEBUG_ERROR,\r
720 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
721 __FUNCTION__,\r
722 &gEfiAcpiVariableGuid\r
723 ));\r
724 CpuDeadLoop ();\r
725 } else {\r
0bdc9e75
SZ
726 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
727\r
728 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
729 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
730\r
731 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
732 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
733\r
734 mSmmS3ResumeState = SmmS3ResumeState;\r
735 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
736\r
737 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
738\r
739 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
740 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
741 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
742 SmmS3ResumeState->SmmS3StackSize = 0;\r
743 }\r
744\r
f0053e83 745 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
0bdc9e75 746 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
351b49c1 747 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
0bdc9e75
SZ
748\r
749 if (sizeof (UINTN) == sizeof (UINT64)) {\r
750 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
751 }\r
752 if (sizeof (UINTN) == sizeof (UINT32)) {\r
753 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
754 }\r
755 }\r
756\r
757 //\r
758 // Patch SmmS3ResumeState->SmmS3Cr3\r
759 //\r
760 InitSmmS3Cr3 ();\r
4a0f88dd
JF
761\r
762 //\r
763 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
764 // protected mode on S3 path\r
765 //\r
766 Address = BASE_4GB - 1;\r
767 Status = gBS->AllocatePages (\r
768 AllocateMaxAddress,\r
769 EfiACPIMemoryNVS,\r
770 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
771 &Address\r
772 );\r
773 ASSERT_EFI_ERROR (Status);\r
774 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
775}\r
776\r
777/**\r
778 Copy register table from ACPI NVS memory into SMRAM.\r
779\r
780 @param[in] DestinationRegisterTableList Points to destination register table.\r
781 @param[in] SourceRegisterTableList Points to source register table.\r
782 @param[in] NumberOfCpus Number of CPUs.\r
783\r
784**/\r
785VOID\r
786CopyRegisterTable (\r
787 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
788 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
789 IN UINT32 NumberOfCpus\r
790 )\r
791{\r
792 UINTN Index;\r
793 UINTN Index1;\r
794 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
795\r
796 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
797 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
798 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
799 RegisterTableEntry = AllocateCopyPool (\r
800 DestinationRegisterTableList[Index].AllocatedSize,\r
801 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
802 );\r
803 ASSERT (RegisterTableEntry != NULL);\r
804 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
805 //\r
806 // Go though all MSRs in register table to initialize MSR spin lock\r
807 //\r
808 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
809 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
810 //\r
811 // Initialize MSR spin lock only for those MSRs need bit field writing\r
812 //\r
813 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
814 }\r
0bdc9e75
SZ
815 }\r
816 }\r
817 }\r
818}\r
819\r
820/**\r
821 Get ACPI CPU data.\r
822\r
823**/\r
824VOID\r
825GetAcpiCpuData (\r
826 VOID\r
827 )\r
828{\r
829 ACPI_CPU_DATA *AcpiCpuData;\r
830 IA32_DESCRIPTOR *Gdtr;\r
831 IA32_DESCRIPTOR *Idtr;\r
293f8766
ED
832 VOID *GdtForAp;\r
833 VOID *IdtForAp;\r
834 VOID *MachineCheckHandlerForAp;\r
0bdc9e75 835\r
b10d5ddc
SZ
836 if (!mAcpiS3Enable) {\r
837 return;\r
838 }\r
839\r
0bdc9e75
SZ
840 //\r
841 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
842 //\r
843 mAcpiCpuData.NumberOfCpus = 0;\r
844\r
845 //\r
846 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
847 //\r
848 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
849 if (AcpiCpuData == 0) {\r
850 return;\r
851 }\r
852\r
853 //\r
854 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
855 //\r
856 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
857\r
858 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
859 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
860\r
861 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
862\r
863 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
864 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
865\r
866 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
867\r
868 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
869 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
870\r
871 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
872\r
873 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
874 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
875\r
876 CopyRegisterTable (\r
877 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
878 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
879 mAcpiCpuData.NumberOfCpus\r
880 );\r
881\r
882 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
883 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
884\r
885 CopyRegisterTable (\r
886 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
887 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
888 mAcpiCpuData.NumberOfCpus\r
889 );\r
890\r
891 //\r
892 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
893 //\r
894 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
895 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
896\r
293f8766
ED
897 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
898 ASSERT (GdtForAp != NULL);\r
899 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
900 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
901\r
902 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
903 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
904 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
0bdc9e75 905\r
293f8766
ED
906 Gdtr->Base = (UINTN)GdtForAp;\r
907 Idtr->Base = (UINTN)IdtForAp;\r
908 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
0bdc9e75 909}\r
b10d5ddc
SZ
910\r
911/**\r
912 Get ACPI S3 enable flag.\r
913\r
914**/\r
915VOID\r
916GetAcpiS3EnableFlag (\r
917 VOID\r
918 )\r
919{\r
920 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
921}\r