]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
MdeModulePkg/DxeCore: Add comments for the ASSERT to check NULL ptr
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
9cc45009 4Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17typedef struct {\r
18 UINTN Lock;\r
19 VOID *StackStart;\r
20 UINTN StackSize;\r
21 VOID *ApFunction;\r
22 IA32_DESCRIPTOR GdtrProfile;\r
23 IA32_DESCRIPTOR IdtrProfile;\r
24 UINT32 BufferStart;\r
25 UINT32 Cr3;\r
26} MP_CPU_EXCHANGE_INFO;\r
27\r
28typedef struct {\r
29 UINT8 *RendezvousFunnelAddress;\r
30 UINTN PModeEntryOffset;\r
31 UINTN FlatJumpOffset;\r
32 UINTN Size;\r
33 UINTN LModeEntryOffset;\r
34 UINTN LongJumpOffset;\r
35} MP_ASSEMBLY_ADDRESS_MAP;\r
36\r
6c4c15fa
JF
37//\r
38// Spin lock used to serialize MemoryMapped operation\r
39//\r
40SPIN_LOCK *mMemoryMappedLock = NULL;\r
41\r
7677b4db
ED
42//\r
43// Signal that SMM BASE relocation is complete.\r
44//\r
45volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
46\r
529a5a86
MK
47/**\r
48 Get starting address and size of the rendezvous entry for APs.\r
49 Information for fixing a jump instruction in the code is also returned.\r
50\r
51 @param AddressMap Output buffer for address map information.\r
52**/\r
53VOID *\r
54EFIAPI\r
55AsmGetAddressMap (\r
56 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
57 );\r
58\r
59#define LEGACY_REGION_SIZE (2 * 0x1000)\r
60#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
61\r
62ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 63volatile UINT32 mNumberToFinish;\r
529a5a86
MK
64MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
65BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
66VOID *mGdtForAp = NULL;\r
67VOID *mIdtForAp = NULL;\r
68VOID *mMachineCheckHandlerForAp = NULL;\r
69MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 70UINTN mMsrSpinLockCount;\r
529a5a86
MK
71UINTN mMsrCount = 0;\r
72\r
0bdc9e75
SZ
73//\r
74// S3 boot flag\r
75//\r
76BOOLEAN mSmmS3Flag = FALSE;\r
77\r
78//\r
79// Pointer to structure used during S3 Resume\r
80//\r
81SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
82\r
b10d5ddc
SZ
83BOOLEAN mAcpiS3Enable = TRUE;\r
84\r
4a0f88dd
JF
85UINT8 *mApHltLoopCode = NULL;\r
86UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
89 0xFA, // cli\r
90 0xF4, // hlt\r
91 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
92 };\r
93\r
529a5a86
MK
94/**\r
95 Get MSR spin lock by MSR index.\r
96\r
97 @param MsrIndex MSR index value.\r
98\r
99 @return Pointer to MSR spin lock.\r
100\r
101**/\r
102SPIN_LOCK *\r
103GetMsrSpinLockByIndex (\r
104 IN UINT32 MsrIndex\r
105 )\r
106{\r
107 UINTN Index;\r
108 for (Index = 0; Index < mMsrCount; Index++) {\r
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 110 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
111 }\r
112 }\r
113 return NULL;\r
114}\r
115\r
116/**\r
117 Initialize MSR spin lock by MSR index.\r
118\r
119 @param MsrIndex MSR index value.\r
120\r
121**/\r
122VOID\r
123InitMsrSpinLockByIndex (\r
124 IN UINT32 MsrIndex\r
125 )\r
126{\r
dc99315b 127 UINTN MsrSpinLockCount;\r
529a5a86 128 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
129 UINTN Index;\r
130 UINTN AddedSize;\r
529a5a86
MK
131\r
132 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 135 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
137 mMsrSpinLocks[Index].SpinLock =\r
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
140 }\r
141 mMsrSpinLockCount = MsrSpinLockCount;\r
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
143 }\r
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
145 //\r
146 // Initialize spin lock for MSR programming\r
147 //\r
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
150 mMsrCount ++;\r
151 if (mMsrCount == mMsrSpinLockCount) {\r
152 //\r
153 // If MSR spin lock buffer is full, enlarge it\r
154 //\r
dc99315b
JF
155 AddedSize = SIZE_4KB;\r
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
160 mMsrSpinLocks = ReallocatePool (\r
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
163 mMsrSpinLocks\r
164 );\r
dc99315b 165 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 166 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
168 mMsrSpinLocks[Index].SpinLock =\r
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
170 (Index - mMsrCount) * mSemaphoreSize);\r
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
172 }\r
529a5a86
MK
173 }\r
174 }\r
175}\r
176\r
177/**\r
178 Sync up the MTRR values for all processors.\r
179\r
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
181**/\r
182VOID\r
183EFIAPI\r
184LoadMtrrData (\r
185 EFI_PHYSICAL_ADDRESS MtrrTable\r
186 )\r
187/*++\r
188\r
189Routine Description:\r
190\r
191 Sync up the MTRR values for all processors.\r
192\r
193Arguments:\r
194\r
195Returns:\r
196 None\r
197\r
198--*/\r
199{\r
200 MTRR_SETTINGS *MtrrSettings;\r
201\r
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
203 MtrrSetAllMtrrs (MtrrSettings);\r
204}\r
205\r
206/**\r
207 Programs registers for the calling processor.\r
208\r
209 This function programs registers for the calling processor.\r
210\r
94744aa2
ED
211 @param RegisterTables Pointer to register table of the running processor.\r
212 @param RegisterTableCount Register table count.\r
529a5a86
MK
213\r
214**/\r
215VOID\r
216SetProcessorRegister (\r
94744aa2
ED
217 IN CPU_REGISTER_TABLE *RegisterTables,\r
218 IN UINTN RegisterTableCount\r
529a5a86
MK
219 )\r
220{\r
221 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
222 UINTN Index;\r
223 UINTN Value;\r
224 SPIN_LOCK *MsrSpinLock;\r
94744aa2
ED
225 UINT32 InitApicId;\r
226 CPU_REGISTER_TABLE *RegisterTable;\r
227\r
228 InitApicId = GetInitialApicId ();\r
229 for (Index = 0; Index < RegisterTableCount; Index++) {\r
230 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
231 RegisterTable = &RegisterTables[Index];\r
232 break;\r
233 }\r
234 }\r
529a5a86
MK
235\r
236 //\r
237 // Traverse Register Table of this logical processor\r
238 //\r
239 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
240 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
241 //\r
242 // Check the type of specified register\r
243 //\r
244 switch (RegisterTableEntry->RegisterType) {\r
245 //\r
246 // The specified register is Control Register\r
247 //\r
248 case ControlRegister:\r
249 switch (RegisterTableEntry->Index) {\r
250 case 0:\r
251 Value = AsmReadCr0 ();\r
252 Value = (UINTN) BitFieldWrite64 (\r
253 Value,\r
254 RegisterTableEntry->ValidBitStart,\r
255 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
256 (UINTN) RegisterTableEntry->Value\r
257 );\r
258 AsmWriteCr0 (Value);\r
259 break;\r
260 case 2:\r
261 Value = AsmReadCr2 ();\r
262 Value = (UINTN) BitFieldWrite64 (\r
263 Value,\r
264 RegisterTableEntry->ValidBitStart,\r
265 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
266 (UINTN) RegisterTableEntry->Value\r
267 );\r
268 AsmWriteCr2 (Value);\r
269 break;\r
270 case 3:\r
271 Value = AsmReadCr3 ();\r
272 Value = (UINTN) BitFieldWrite64 (\r
273 Value,\r
274 RegisterTableEntry->ValidBitStart,\r
275 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
276 (UINTN) RegisterTableEntry->Value\r
277 );\r
278 AsmWriteCr3 (Value);\r
279 break;\r
280 case 4:\r
281 Value = AsmReadCr4 ();\r
282 Value = (UINTN) BitFieldWrite64 (\r
283 Value,\r
284 RegisterTableEntry->ValidBitStart,\r
285 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
286 (UINTN) RegisterTableEntry->Value\r
287 );\r
288 AsmWriteCr4 (Value);\r
289 break;\r
290 default:\r
291 break;\r
292 }\r
293 break;\r
294 //\r
295 // The specified register is Model Specific Register\r
296 //\r
297 case Msr:\r
298 //\r
299 // If this function is called to restore register setting after INIT signal,\r
300 // there is no need to restore MSRs in register table.\r
301 //\r
302 if (RegisterTableEntry->ValidBitLength >= 64) {\r
303 //\r
304 // If length is not less than 64 bits, then directly write without reading\r
305 //\r
306 AsmWriteMsr64 (\r
307 RegisterTableEntry->Index,\r
308 RegisterTableEntry->Value\r
309 );\r
310 } else {\r
311 //\r
312 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
313 // to make sure MSR read/write operation is atomic.\r
314 //\r
315 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
316 AcquireSpinLock (MsrSpinLock);\r
317 //\r
318 // Set the bit section according to bit start and length\r
319 //\r
320 AsmMsrBitFieldWrite64 (\r
321 RegisterTableEntry->Index,\r
322 RegisterTableEntry->ValidBitStart,\r
323 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
324 RegisterTableEntry->Value\r
325 );\r
326 ReleaseSpinLock (MsrSpinLock);\r
327 }\r
328 break;\r
329 //\r
6c4c15fa
JF
330 // MemoryMapped operations\r
331 //\r
332 case MemoryMapped:\r
333 AcquireSpinLock (mMemoryMappedLock);\r
334 MmioBitFieldWrite32 (\r
30b7a50b 335 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
336 RegisterTableEntry->ValidBitStart,\r
337 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
338 (UINT32)RegisterTableEntry->Value\r
339 );\r
340 ReleaseSpinLock (mMemoryMappedLock);\r
341 break;\r
342 //\r
529a5a86
MK
343 // Enable or disable cache\r
344 //\r
345 case CacheControl:\r
346 //\r
347 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
348 //\r
349 if (RegisterTableEntry->Value == 0) {\r
350 AsmDisableCache ();\r
351 } else {\r
352 AsmEnableCache ();\r
353 }\r
354 break;\r
355\r
356 default:\r
357 break;\r
358 }\r
359 }\r
360}\r
361\r
362/**\r
7677b4db 363 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
364**/\r
365VOID\r
94744aa2 366InitializeAp (\r
529a5a86
MK
367 VOID\r
368 )\r
369{\r
7677b4db
ED
370 UINTN TopOfStack;\r
371 UINT8 Stack[128];\r
529a5a86
MK
372\r
373 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
374\r
94744aa2 375 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
7677b4db 376\r
529a5a86
MK
377 //\r
378 // Count down the number with lock mechanism.\r
379 //\r
380 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 381\r
7677b4db
ED
382 //\r
383 // Wait for BSP to signal SMM Base relocation done.\r
384 //\r
385 while (!mInitApsAfterSmmBaseReloc) {\r
386 CpuPause ();\r
387 }\r
529a5a86
MK
388\r
389 ProgramVirtualWireMode ();\r
390 DisableLvtInterrupts ();\r
391\r
94744aa2 392 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
393\r
394 //\r
ec8a3877 395 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 396 //\r
672b80c8
MK
397 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
398 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 399 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 400 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
401}\r
402\r
403/**\r
404 Prepares startup vector for APs.\r
405\r
406 This function prepares startup vector for APs.\r
407\r
408 @param WorkingBuffer The address of the work buffer.\r
409**/\r
410VOID\r
411PrepareApStartupVector (\r
412 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
413 )\r
414{\r
415 EFI_PHYSICAL_ADDRESS StartupVector;\r
416 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
417\r
418 //\r
419 // Get the address map of startup code for AP,\r
420 // including code size, and offset of long jump instructions to redirect.\r
421 //\r
422 ZeroMem (&AddressMap, sizeof (AddressMap));\r
423 AsmGetAddressMap (&AddressMap);\r
424\r
425 StartupVector = WorkingBuffer;\r
426\r
427 //\r
428 // Copy AP startup code to startup vector, and then redirect the long jump\r
429 // instructions for mode switching.\r
430 //\r
431 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
432 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
433 if (AddressMap.LongJumpOffset != 0) {\r
434 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
435 }\r
436\r
437 //\r
438 // Get the start address of exchange data between BSP and AP.\r
439 //\r
440 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
441 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
442\r
443 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
444 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
445\r
446 //\r
447 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
448 //\r
449 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
450 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
451 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
452\r
453 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
454 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
455 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
456 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
457}\r
458\r
459/**\r
460 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
461\r
462 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
463 and restores MTRRs for both BSP and APs.\r
464\r
465**/\r
466VOID\r
94744aa2 467InitializeCpuBeforeRebase (\r
529a5a86
MK
468 VOID\r
469 )\r
470{\r
529a5a86
MK
471 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
472\r
94744aa2 473 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
474\r
475 ProgramVirtualWireMode ();\r
476\r
477 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
478\r
479 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
94744aa2 480 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
481\r
482 //\r
483 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
484 //\r
485 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
486\r
487 //\r
488 // Send INIT IPI - SIPI to all APs\r
489 //\r
490 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
491\r
492 while (mNumberToFinish > 0) {\r
493 CpuPause ();\r
494 }\r
495}\r
496\r
497/**\r
498 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
499\r
500 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
501 data saved by normal boot path for both BSP and APs.\r
502\r
503**/\r
504VOID\r
94744aa2 505InitializeCpuAfterRebase (\r
529a5a86
MK
506 VOID\r
507 )\r
508{\r
94744aa2 509 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
510\r
511 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
512\r
513 //\r
7677b4db 514 // Signal that SMM base relocation is complete and to continue initialization.\r
529a5a86 515 //\r
7677b4db 516 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86
MK
517\r
518 while (mNumberToFinish > 0) {\r
519 CpuPause ();\r
520 }\r
521}\r
0bdc9e75
SZ
522\r
523/**\r
524 Restore SMM Configuration in S3 boot path.\r
525\r
526**/\r
527VOID\r
528RestoreSmmConfigurationInS3 (\r
529 VOID\r
530 )\r
531{\r
b10d5ddc
SZ
532 if (!mAcpiS3Enable) {\r
533 return;\r
534 }\r
535\r
0bdc9e75
SZ
536 //\r
537 // Restore SMM Configuration in S3 boot path.\r
538 //\r
539 if (mRestoreSmmConfigurationInS3) {\r
540 //\r
541 // Need make sure gSmst is correct because below function may use them.\r
542 //\r
543 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
544 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
545 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
546 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
547 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
548\r
549 //\r
550 // Configure SMM Code Access Check feature if available.\r
551 //\r
552 ConfigSmmCodeAccessCheck ();\r
553\r
554 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
555\r
556 mRestoreSmmConfigurationInS3 = FALSE;\r
557 }\r
558}\r
559\r
560/**\r
561 Perform SMM initialization for all processors in the S3 boot path.\r
562\r
563 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
564**/\r
565VOID\r
566EFIAPI\r
567SmmRestoreCpu (\r
568 VOID\r
569 )\r
570{\r
571 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
572 IA32_DESCRIPTOR Ia32Idtr;\r
573 IA32_DESCRIPTOR X64Idtr;\r
574 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
575 EFI_STATUS Status;\r
576\r
577 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
578\r
579 mSmmS3Flag = TRUE;\r
580\r
581 InitializeSpinLock (mMemoryMappedLock);\r
582\r
583 //\r
584 // See if there is enough context to resume PEI Phase\r
585 //\r
586 if (mSmmS3ResumeState == NULL) {\r
587 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
588 CpuDeadLoop ();\r
589 }\r
590\r
591 SmmS3ResumeState = mSmmS3ResumeState;\r
592 ASSERT (SmmS3ResumeState != NULL);\r
593\r
594 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
595 //\r
596 // Save the IA32 IDT Descriptor\r
597 //\r
598 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
599\r
600 //\r
601 // Setup X64 IDT table\r
602 //\r
603 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
604 X64Idtr.Base = (UINTN) IdtEntryTable;\r
605 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
606 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
607\r
608 //\r
609 // Setup the default exception handler\r
610 //\r
611 Status = InitializeCpuExceptionHandlers (NULL);\r
612 ASSERT_EFI_ERROR (Status);\r
613\r
614 //\r
615 // Initialize Debug Agent to support source level debug\r
616 //\r
617 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
618 }\r
619\r
620 //\r
621 // Skip initialization if mAcpiCpuData is not valid\r
622 //\r
623 if (mAcpiCpuData.NumberOfCpus > 0) {\r
624 //\r
625 // First time microcode load and restore MTRRs\r
626 //\r
94744aa2 627 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
628 }\r
629\r
630 //\r
631 // Restore SMBASE for BSP and all APs\r
632 //\r
633 SmmRelocateBases ();\r
634\r
635 //\r
636 // Skip initialization if mAcpiCpuData is not valid\r
637 //\r
638 if (mAcpiCpuData.NumberOfCpus > 0) {\r
639 //\r
640 // Restore MSRs for BSP and all APs\r
641 //\r
94744aa2 642 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
643 }\r
644\r
645 //\r
646 // Set a flag to restore SMM configuration in S3 path.\r
647 //\r
648 mRestoreSmmConfigurationInS3 = TRUE;\r
649\r
650 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
651 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
652 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
653 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
654 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
655\r
656 //\r
657 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
658 //\r
659 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
660 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
661\r
662 SwitchStack (\r
663 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
664 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
665 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
666 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
667 );\r
668 }\r
669\r
670 //\r
671 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
672 //\r
673 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
674 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
675 //\r
676 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
677 //\r
678 SaveAndSetDebugTimerInterrupt (FALSE);\r
679 //\r
680 // Restore IA32 IDT table\r
681 //\r
682 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
683 AsmDisablePaging64 (\r
684 SmmS3ResumeState->ReturnCs,\r
685 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
686 (UINT32)SmmS3ResumeState->ReturnContext1,\r
687 (UINT32)SmmS3ResumeState->ReturnContext2,\r
688 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
689 );\r
690 }\r
691\r
692 //\r
693 // Can not resume PEI Phase\r
694 //\r
695 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
696 CpuDeadLoop ();\r
697}\r
698\r
699/**\r
700 Initialize SMM S3 resume state structure used during S3 Resume.\r
701\r
702 @param[in] Cr3 The base address of the page tables to use in SMM.\r
703\r
704**/\r
705VOID\r
706InitSmmS3ResumeState (\r
707 IN UINT32 Cr3\r
708 )\r
709{\r
710 VOID *GuidHob;\r
711 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
712 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
713 EFI_PHYSICAL_ADDRESS Address;\r
714 EFI_STATUS Status;\r
0bdc9e75 715\r
b10d5ddc
SZ
716 if (!mAcpiS3Enable) {\r
717 return;\r
718 }\r
719\r
0bdc9e75
SZ
720 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
721 if (GuidHob != NULL) {\r
722 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
723\r
724 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
725 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
726\r
727 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
728 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
729\r
730 mSmmS3ResumeState = SmmS3ResumeState;\r
731 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
732\r
733 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
734\r
735 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
736 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
737 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
738 SmmS3ResumeState->SmmS3StackSize = 0;\r
739 }\r
740\r
741 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
742 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
743 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
744\r
745 if (sizeof (UINTN) == sizeof (UINT64)) {\r
746 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
747 }\r
748 if (sizeof (UINTN) == sizeof (UINT32)) {\r
749 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
750 }\r
751 }\r
752\r
753 //\r
754 // Patch SmmS3ResumeState->SmmS3Cr3\r
755 //\r
756 InitSmmS3Cr3 ();\r
4a0f88dd
JF
757\r
758 //\r
759 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
760 // protected mode on S3 path\r
761 //\r
762 Address = BASE_4GB - 1;\r
763 Status = gBS->AllocatePages (\r
764 AllocateMaxAddress,\r
765 EfiACPIMemoryNVS,\r
766 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
767 &Address\r
768 );\r
769 ASSERT_EFI_ERROR (Status);\r
770 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
771}\r
772\r
773/**\r
774 Copy register table from ACPI NVS memory into SMRAM.\r
775\r
776 @param[in] DestinationRegisterTableList Points to destination register table.\r
777 @param[in] SourceRegisterTableList Points to source register table.\r
778 @param[in] NumberOfCpus Number of CPUs.\r
779\r
780**/\r
781VOID\r
782CopyRegisterTable (\r
783 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
784 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
785 IN UINT32 NumberOfCpus\r
786 )\r
787{\r
788 UINTN Index;\r
789 UINTN Index1;\r
790 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
791\r
792 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
793 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
794 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
795 RegisterTableEntry = AllocateCopyPool (\r
796 DestinationRegisterTableList[Index].AllocatedSize,\r
797 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
798 );\r
799 ASSERT (RegisterTableEntry != NULL);\r
800 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
801 //\r
802 // Go though all MSRs in register table to initialize MSR spin lock\r
803 //\r
804 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
805 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
806 //\r
807 // Initialize MSR spin lock only for those MSRs need bit field writing\r
808 //\r
809 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
810 }\r
0bdc9e75
SZ
811 }\r
812 }\r
813 }\r
814}\r
815\r
816/**\r
817 Get ACPI CPU data.\r
818\r
819**/\r
820VOID\r
821GetAcpiCpuData (\r
822 VOID\r
823 )\r
824{\r
825 ACPI_CPU_DATA *AcpiCpuData;\r
826 IA32_DESCRIPTOR *Gdtr;\r
827 IA32_DESCRIPTOR *Idtr;\r
828\r
b10d5ddc
SZ
829 if (!mAcpiS3Enable) {\r
830 return;\r
831 }\r
832\r
0bdc9e75
SZ
833 //\r
834 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
835 //\r
836 mAcpiCpuData.NumberOfCpus = 0;\r
837\r
838 //\r
839 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
840 //\r
841 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
842 if (AcpiCpuData == 0) {\r
843 return;\r
844 }\r
845\r
846 //\r
847 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
848 //\r
849 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
850\r
851 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
852 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
853\r
854 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
855\r
856 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
857 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
858\r
859 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
860\r
861 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
862 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
863\r
864 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
865\r
866 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
867 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
868\r
869 CopyRegisterTable (\r
870 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
871 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
872 mAcpiCpuData.NumberOfCpus\r
873 );\r
874\r
875 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
876 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
877\r
878 CopyRegisterTable (\r
879 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
880 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
881 mAcpiCpuData.NumberOfCpus\r
882 );\r
883\r
884 //\r
885 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
886 //\r
887 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
888 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
889\r
890 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
891 ASSERT (mGdtForAp != NULL);\r
892 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
893 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
894\r
895 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
896 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
897 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
898}\r
b10d5ddc
SZ
899\r
900/**\r
901 Get ACPI S3 enable flag.\r
902\r
903**/\r
904VOID\r
905GetAcpiS3EnableFlag (\r
906 VOID\r
907 )\r
908{\r
909 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
910}\r