]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Combine INIT-SIPI-SIPI.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
9cc45009 4Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17typedef struct {\r
18 UINTN Lock;\r
19 VOID *StackStart;\r
20 UINTN StackSize;\r
21 VOID *ApFunction;\r
22 IA32_DESCRIPTOR GdtrProfile;\r
23 IA32_DESCRIPTOR IdtrProfile;\r
24 UINT32 BufferStart;\r
25 UINT32 Cr3;\r
26} MP_CPU_EXCHANGE_INFO;\r
27\r
28typedef struct {\r
29 UINT8 *RendezvousFunnelAddress;\r
30 UINTN PModeEntryOffset;\r
31 UINTN FlatJumpOffset;\r
32 UINTN Size;\r
33 UINTN LModeEntryOffset;\r
34 UINTN LongJumpOffset;\r
35} MP_ASSEMBLY_ADDRESS_MAP;\r
36\r
6c4c15fa
JF
37//\r
38// Spin lock used to serialize MemoryMapped operation\r
39//\r
40SPIN_LOCK *mMemoryMappedLock = NULL;\r
41\r
7677b4db
ED
42//\r
43// Signal that SMM BASE relocation is complete.\r
44//\r
45volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
46\r
529a5a86
MK
47/**\r
48 Get starting address and size of the rendezvous entry for APs.\r
49 Information for fixing a jump instruction in the code is also returned.\r
50\r
51 @param AddressMap Output buffer for address map information.\r
52**/\r
53VOID *\r
54EFIAPI\r
55AsmGetAddressMap (\r
56 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
57 );\r
58\r
59#define LEGACY_REGION_SIZE (2 * 0x1000)\r
60#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
61\r
62ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 63volatile UINT32 mNumberToFinish;\r
529a5a86
MK
64MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
65BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
66VOID *mGdtForAp = NULL;\r
67VOID *mIdtForAp = NULL;\r
68VOID *mMachineCheckHandlerForAp = NULL;\r
69MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 70UINTN mMsrSpinLockCount;\r
529a5a86
MK
71UINTN mMsrCount = 0;\r
72\r
0bdc9e75
SZ
73//\r
74// S3 boot flag\r
75//\r
76BOOLEAN mSmmS3Flag = FALSE;\r
77\r
78//\r
79// Pointer to structure used during S3 Resume\r
80//\r
81SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
82\r
b10d5ddc
SZ
83BOOLEAN mAcpiS3Enable = TRUE;\r
84\r
4a0f88dd
JF
85UINT8 *mApHltLoopCode = NULL;\r
86UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
89 0xFA, // cli\r
90 0xF4, // hlt\r
91 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
92 };\r
93\r
529a5a86
MK
94/**\r
95 Get MSR spin lock by MSR index.\r
96\r
97 @param MsrIndex MSR index value.\r
98\r
99 @return Pointer to MSR spin lock.\r
100\r
101**/\r
102SPIN_LOCK *\r
103GetMsrSpinLockByIndex (\r
104 IN UINT32 MsrIndex\r
105 )\r
106{\r
107 UINTN Index;\r
108 for (Index = 0; Index < mMsrCount; Index++) {\r
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 110 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
111 }\r
112 }\r
113 return NULL;\r
114}\r
115\r
116/**\r
117 Initialize MSR spin lock by MSR index.\r
118\r
119 @param MsrIndex MSR index value.\r
120\r
121**/\r
122VOID\r
123InitMsrSpinLockByIndex (\r
124 IN UINT32 MsrIndex\r
125 )\r
126{\r
dc99315b 127 UINTN MsrSpinLockCount;\r
529a5a86 128 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
129 UINTN Index;\r
130 UINTN AddedSize;\r
529a5a86
MK
131\r
132 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 135 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
137 mMsrSpinLocks[Index].SpinLock =\r
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
140 }\r
141 mMsrSpinLockCount = MsrSpinLockCount;\r
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
143 }\r
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
145 //\r
146 // Initialize spin lock for MSR programming\r
147 //\r
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
150 mMsrCount ++;\r
151 if (mMsrCount == mMsrSpinLockCount) {\r
152 //\r
153 // If MSR spin lock buffer is full, enlarge it\r
154 //\r
dc99315b
JF
155 AddedSize = SIZE_4KB;\r
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
160 mMsrSpinLocks = ReallocatePool (\r
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
163 mMsrSpinLocks\r
164 );\r
dc99315b 165 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 166 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
168 mMsrSpinLocks[Index].SpinLock =\r
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
170 (Index - mMsrCount) * mSemaphoreSize);\r
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
172 }\r
529a5a86
MK
173 }\r
174 }\r
175}\r
176\r
177/**\r
178 Sync up the MTRR values for all processors.\r
179\r
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
181**/\r
182VOID\r
183EFIAPI\r
184LoadMtrrData (\r
185 EFI_PHYSICAL_ADDRESS MtrrTable\r
186 )\r
187/*++\r
188\r
189Routine Description:\r
190\r
191 Sync up the MTRR values for all processors.\r
192\r
193Arguments:\r
194\r
195Returns:\r
196 None\r
197\r
198--*/\r
199{\r
200 MTRR_SETTINGS *MtrrSettings;\r
201\r
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
203 MtrrSetAllMtrrs (MtrrSettings);\r
204}\r
205\r
206/**\r
207 Programs registers for the calling processor.\r
208\r
209 This function programs registers for the calling processor.\r
210\r
211 @param RegisterTable Pointer to register table of the running processor.\r
212\r
213**/\r
214VOID\r
215SetProcessorRegister (\r
216 IN CPU_REGISTER_TABLE *RegisterTable\r
217 )\r
218{\r
219 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
220 UINTN Index;\r
221 UINTN Value;\r
222 SPIN_LOCK *MsrSpinLock;\r
223\r
224 //\r
225 // Traverse Register Table of this logical processor\r
226 //\r
227 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
228 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
229 //\r
230 // Check the type of specified register\r
231 //\r
232 switch (RegisterTableEntry->RegisterType) {\r
233 //\r
234 // The specified register is Control Register\r
235 //\r
236 case ControlRegister:\r
237 switch (RegisterTableEntry->Index) {\r
238 case 0:\r
239 Value = AsmReadCr0 ();\r
240 Value = (UINTN) BitFieldWrite64 (\r
241 Value,\r
242 RegisterTableEntry->ValidBitStart,\r
243 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
244 (UINTN) RegisterTableEntry->Value\r
245 );\r
246 AsmWriteCr0 (Value);\r
247 break;\r
248 case 2:\r
249 Value = AsmReadCr2 ();\r
250 Value = (UINTN) BitFieldWrite64 (\r
251 Value,\r
252 RegisterTableEntry->ValidBitStart,\r
253 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
254 (UINTN) RegisterTableEntry->Value\r
255 );\r
256 AsmWriteCr2 (Value);\r
257 break;\r
258 case 3:\r
259 Value = AsmReadCr3 ();\r
260 Value = (UINTN) BitFieldWrite64 (\r
261 Value,\r
262 RegisterTableEntry->ValidBitStart,\r
263 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
264 (UINTN) RegisterTableEntry->Value\r
265 );\r
266 AsmWriteCr3 (Value);\r
267 break;\r
268 case 4:\r
269 Value = AsmReadCr4 ();\r
270 Value = (UINTN) BitFieldWrite64 (\r
271 Value,\r
272 RegisterTableEntry->ValidBitStart,\r
273 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
274 (UINTN) RegisterTableEntry->Value\r
275 );\r
276 AsmWriteCr4 (Value);\r
277 break;\r
278 default:\r
279 break;\r
280 }\r
281 break;\r
282 //\r
283 // The specified register is Model Specific Register\r
284 //\r
285 case Msr:\r
286 //\r
287 // If this function is called to restore register setting after INIT signal,\r
288 // there is no need to restore MSRs in register table.\r
289 //\r
290 if (RegisterTableEntry->ValidBitLength >= 64) {\r
291 //\r
292 // If length is not less than 64 bits, then directly write without reading\r
293 //\r
294 AsmWriteMsr64 (\r
295 RegisterTableEntry->Index,\r
296 RegisterTableEntry->Value\r
297 );\r
298 } else {\r
299 //\r
300 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
301 // to make sure MSR read/write operation is atomic.\r
302 //\r
303 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
304 AcquireSpinLock (MsrSpinLock);\r
305 //\r
306 // Set the bit section according to bit start and length\r
307 //\r
308 AsmMsrBitFieldWrite64 (\r
309 RegisterTableEntry->Index,\r
310 RegisterTableEntry->ValidBitStart,\r
311 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
312 RegisterTableEntry->Value\r
313 );\r
314 ReleaseSpinLock (MsrSpinLock);\r
315 }\r
316 break;\r
317 //\r
6c4c15fa
JF
318 // MemoryMapped operations\r
319 //\r
320 case MemoryMapped:\r
321 AcquireSpinLock (mMemoryMappedLock);\r
322 MmioBitFieldWrite32 (\r
30b7a50b 323 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
324 RegisterTableEntry->ValidBitStart,\r
325 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
326 (UINT32)RegisterTableEntry->Value\r
327 );\r
328 ReleaseSpinLock (mMemoryMappedLock);\r
329 break;\r
330 //\r
529a5a86
MK
331 // Enable or disable cache\r
332 //\r
333 case CacheControl:\r
334 //\r
335 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
336 //\r
337 if (RegisterTableEntry->Value == 0) {\r
338 AsmDisableCache ();\r
339 } else {\r
340 AsmEnableCache ();\r
341 }\r
342 break;\r
343\r
344 default:\r
345 break;\r
346 }\r
347 }\r
348}\r
349\r
7677b4db
ED
350\r
351\r
529a5a86 352/**\r
7677b4db 353 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
354**/\r
355VOID\r
7677b4db 356MPRendezvousProcedure (\r
529a5a86
MK
357 VOID\r
358 )\r
359{\r
360 CPU_REGISTER_TABLE *RegisterTableList;\r
361 UINT32 InitApicId;\r
362 UINTN Index;\r
7677b4db
ED
363 UINTN TopOfStack;\r
364 UINT8 Stack[128];\r
529a5a86
MK
365\r
366 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
367\r
368 //\r
369 // Find processor number for this CPU.\r
370 //\r
371 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
372 InitApicId = GetInitialApicId ();\r
373 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
374 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
375 SetProcessorRegister (&RegisterTableList[Index]);\r
376 break;\r
377 }\r
378 }\r
379\r
7677b4db 380\r
529a5a86
MK
381 //\r
382 // Count down the number with lock mechanism.\r
383 //\r
384 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 385\r
7677b4db
ED
386 //\r
387 // Wait for BSP to signal SMM Base relocation done.\r
388 //\r
389 while (!mInitApsAfterSmmBaseReloc) {\r
390 CpuPause ();\r
391 }\r
529a5a86
MK
392\r
393 ProgramVirtualWireMode ();\r
394 DisableLvtInterrupts ();\r
395\r
396 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
397 InitApicId = GetInitialApicId ();\r
398 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
399 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
400 SetProcessorRegister (&RegisterTableList[Index]);\r
401 break;\r
402 }\r
403 }\r
404\r
405 //\r
ec8a3877 406 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 407 //\r
672b80c8
MK
408 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
409 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 410 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 411 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
412}\r
413\r
414/**\r
415 Prepares startup vector for APs.\r
416\r
417 This function prepares startup vector for APs.\r
418\r
419 @param WorkingBuffer The address of the work buffer.\r
420**/\r
421VOID\r
422PrepareApStartupVector (\r
423 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
424 )\r
425{\r
426 EFI_PHYSICAL_ADDRESS StartupVector;\r
427 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
428\r
429 //\r
430 // Get the address map of startup code for AP,\r
431 // including code size, and offset of long jump instructions to redirect.\r
432 //\r
433 ZeroMem (&AddressMap, sizeof (AddressMap));\r
434 AsmGetAddressMap (&AddressMap);\r
435\r
436 StartupVector = WorkingBuffer;\r
437\r
438 //\r
439 // Copy AP startup code to startup vector, and then redirect the long jump\r
440 // instructions for mode switching.\r
441 //\r
442 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
443 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
444 if (AddressMap.LongJumpOffset != 0) {\r
445 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
446 }\r
447\r
448 //\r
449 // Get the start address of exchange data between BSP and AP.\r
450 //\r
451 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
452 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
453\r
454 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
455 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
456\r
457 //\r
458 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
459 //\r
460 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
461 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
462 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
463\r
464 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
465 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
466 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
467 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
468}\r
469\r
470/**\r
471 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
472\r
473 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
474 and restores MTRRs for both BSP and APs.\r
475\r
476**/\r
477VOID\r
478EarlyInitializeCpu (\r
479 VOID\r
480 )\r
481{\r
482 CPU_REGISTER_TABLE *RegisterTableList;\r
483 UINT32 InitApicId;\r
484 UINTN Index;\r
485\r
486 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
487\r
488 //\r
489 // Find processor number for this CPU.\r
490 //\r
491 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
492 InitApicId = GetInitialApicId ();\r
493 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
494 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
495 SetProcessorRegister (&RegisterTableList[Index]);\r
496 break;\r
497 }\r
498 }\r
499\r
500 ProgramVirtualWireMode ();\r
501\r
502 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
503\r
504 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
7677b4db
ED
505 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r
506\r
507 //\r
508 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
509 //\r
510 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
511\r
512 //\r
513 // Send INIT IPI - SIPI to all APs\r
514 //\r
515 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
516\r
517 while (mNumberToFinish > 0) {\r
518 CpuPause ();\r
519 }\r
520}\r
521\r
522/**\r
523 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
524\r
525 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
526 data saved by normal boot path for both BSP and APs.\r
527\r
528**/\r
529VOID\r
530InitializeCpu (\r
531 VOID\r
532 )\r
533{\r
534 CPU_REGISTER_TABLE *RegisterTableList;\r
535 UINT32 InitApicId;\r
536 UINTN Index;\r
537\r
538 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
539 InitApicId = GetInitialApicId ();\r
540 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
541 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
542 SetProcessorRegister (&RegisterTableList[Index]);\r
543 break;\r
544 }\r
545 }\r
546\r
547 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
548\r
549 //\r
7677b4db 550 // Signal that SMM base relocation is complete and to continue initialization.\r
529a5a86 551 //\r
7677b4db 552 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86
MK
553\r
554 while (mNumberToFinish > 0) {\r
555 CpuPause ();\r
556 }\r
557}\r
0bdc9e75
SZ
558\r
559/**\r
560 Restore SMM Configuration in S3 boot path.\r
561\r
562**/\r
563VOID\r
564RestoreSmmConfigurationInS3 (\r
565 VOID\r
566 )\r
567{\r
b10d5ddc
SZ
568 if (!mAcpiS3Enable) {\r
569 return;\r
570 }\r
571\r
0bdc9e75
SZ
572 //\r
573 // Restore SMM Configuration in S3 boot path.\r
574 //\r
575 if (mRestoreSmmConfigurationInS3) {\r
576 //\r
577 // Need make sure gSmst is correct because below function may use them.\r
578 //\r
579 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
580 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
581 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
582 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
583 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
584\r
585 //\r
586 // Configure SMM Code Access Check feature if available.\r
587 //\r
588 ConfigSmmCodeAccessCheck ();\r
589\r
590 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
591\r
592 mRestoreSmmConfigurationInS3 = FALSE;\r
593 }\r
594}\r
595\r
596/**\r
597 Perform SMM initialization for all processors in the S3 boot path.\r
598\r
599 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
600**/\r
601VOID\r
602EFIAPI\r
603SmmRestoreCpu (\r
604 VOID\r
605 )\r
606{\r
607 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
608 IA32_DESCRIPTOR Ia32Idtr;\r
609 IA32_DESCRIPTOR X64Idtr;\r
610 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
611 EFI_STATUS Status;\r
612\r
613 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
614\r
615 mSmmS3Flag = TRUE;\r
616\r
617 InitializeSpinLock (mMemoryMappedLock);\r
618\r
619 //\r
620 // See if there is enough context to resume PEI Phase\r
621 //\r
622 if (mSmmS3ResumeState == NULL) {\r
623 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
624 CpuDeadLoop ();\r
625 }\r
626\r
627 SmmS3ResumeState = mSmmS3ResumeState;\r
628 ASSERT (SmmS3ResumeState != NULL);\r
629\r
630 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
631 //\r
632 // Save the IA32 IDT Descriptor\r
633 //\r
634 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
635\r
636 //\r
637 // Setup X64 IDT table\r
638 //\r
639 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
640 X64Idtr.Base = (UINTN) IdtEntryTable;\r
641 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
642 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
643\r
644 //\r
645 // Setup the default exception handler\r
646 //\r
647 Status = InitializeCpuExceptionHandlers (NULL);\r
648 ASSERT_EFI_ERROR (Status);\r
649\r
650 //\r
651 // Initialize Debug Agent to support source level debug\r
652 //\r
653 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
654 }\r
655\r
656 //\r
657 // Skip initialization if mAcpiCpuData is not valid\r
658 //\r
659 if (mAcpiCpuData.NumberOfCpus > 0) {\r
660 //\r
661 // First time microcode load and restore MTRRs\r
662 //\r
663 EarlyInitializeCpu ();\r
664 }\r
665\r
666 //\r
667 // Restore SMBASE for BSP and all APs\r
668 //\r
669 SmmRelocateBases ();\r
670\r
671 //\r
672 // Skip initialization if mAcpiCpuData is not valid\r
673 //\r
674 if (mAcpiCpuData.NumberOfCpus > 0) {\r
675 //\r
676 // Restore MSRs for BSP and all APs\r
677 //\r
678 InitializeCpu ();\r
679 }\r
680\r
681 //\r
682 // Set a flag to restore SMM configuration in S3 path.\r
683 //\r
684 mRestoreSmmConfigurationInS3 = TRUE;\r
685\r
686 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
687 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
688 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
689 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
690 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
691\r
692 //\r
693 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
694 //\r
695 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
696 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
697\r
698 SwitchStack (\r
699 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
700 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
701 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
702 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
703 );\r
704 }\r
705\r
706 //\r
707 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
708 //\r
709 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
710 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
711 //\r
712 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
713 //\r
714 SaveAndSetDebugTimerInterrupt (FALSE);\r
715 //\r
716 // Restore IA32 IDT table\r
717 //\r
718 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
719 AsmDisablePaging64 (\r
720 SmmS3ResumeState->ReturnCs,\r
721 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
722 (UINT32)SmmS3ResumeState->ReturnContext1,\r
723 (UINT32)SmmS3ResumeState->ReturnContext2,\r
724 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
725 );\r
726 }\r
727\r
728 //\r
729 // Can not resume PEI Phase\r
730 //\r
731 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
732 CpuDeadLoop ();\r
733}\r
734\r
735/**\r
736 Initialize SMM S3 resume state structure used during S3 Resume.\r
737\r
738 @param[in] Cr3 The base address of the page tables to use in SMM.\r
739\r
740**/\r
741VOID\r
742InitSmmS3ResumeState (\r
743 IN UINT32 Cr3\r
744 )\r
745{\r
746 VOID *GuidHob;\r
747 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
748 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
749 EFI_PHYSICAL_ADDRESS Address;\r
750 EFI_STATUS Status;\r
0bdc9e75 751\r
b10d5ddc
SZ
752 if (!mAcpiS3Enable) {\r
753 return;\r
754 }\r
755\r
0bdc9e75
SZ
756 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
757 if (GuidHob != NULL) {\r
758 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
759\r
760 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
761 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
762\r
763 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
764 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
765\r
766 mSmmS3ResumeState = SmmS3ResumeState;\r
767 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
768\r
769 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
770\r
771 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
772 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
773 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
774 SmmS3ResumeState->SmmS3StackSize = 0;\r
775 }\r
776\r
777 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
778 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
779 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
780\r
781 if (sizeof (UINTN) == sizeof (UINT64)) {\r
782 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
783 }\r
784 if (sizeof (UINTN) == sizeof (UINT32)) {\r
785 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
786 }\r
787 }\r
788\r
789 //\r
790 // Patch SmmS3ResumeState->SmmS3Cr3\r
791 //\r
792 InitSmmS3Cr3 ();\r
4a0f88dd
JF
793\r
794 //\r
795 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
796 // protected mode on S3 path\r
797 //\r
798 Address = BASE_4GB - 1;\r
799 Status = gBS->AllocatePages (\r
800 AllocateMaxAddress,\r
801 EfiACPIMemoryNVS,\r
802 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
803 &Address\r
804 );\r
805 ASSERT_EFI_ERROR (Status);\r
806 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
807}\r
808\r
809/**\r
810 Copy register table from ACPI NVS memory into SMRAM.\r
811\r
812 @param[in] DestinationRegisterTableList Points to destination register table.\r
813 @param[in] SourceRegisterTableList Points to source register table.\r
814 @param[in] NumberOfCpus Number of CPUs.\r
815\r
816**/\r
817VOID\r
818CopyRegisterTable (\r
819 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
820 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
821 IN UINT32 NumberOfCpus\r
822 )\r
823{\r
824 UINTN Index;\r
825 UINTN Index1;\r
826 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
827\r
828 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
829 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
830 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
831 RegisterTableEntry = AllocateCopyPool (\r
832 DestinationRegisterTableList[Index].AllocatedSize,\r
833 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
834 );\r
835 ASSERT (RegisterTableEntry != NULL);\r
836 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
837 //\r
838 // Go though all MSRs in register table to initialize MSR spin lock\r
839 //\r
840 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
841 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
842 //\r
843 // Initialize MSR spin lock only for those MSRs need bit field writing\r
844 //\r
845 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
846 }\r
0bdc9e75
SZ
847 }\r
848 }\r
849 }\r
850}\r
851\r
852/**\r
853 Get ACPI CPU data.\r
854\r
855**/\r
856VOID\r
857GetAcpiCpuData (\r
858 VOID\r
859 )\r
860{\r
861 ACPI_CPU_DATA *AcpiCpuData;\r
862 IA32_DESCRIPTOR *Gdtr;\r
863 IA32_DESCRIPTOR *Idtr;\r
864\r
b10d5ddc
SZ
865 if (!mAcpiS3Enable) {\r
866 return;\r
867 }\r
868\r
0bdc9e75
SZ
869 //\r
870 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
871 //\r
872 mAcpiCpuData.NumberOfCpus = 0;\r
873\r
874 //\r
875 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
876 //\r
877 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
878 if (AcpiCpuData == 0) {\r
879 return;\r
880 }\r
881\r
882 //\r
883 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
884 //\r
885 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
886\r
887 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
888 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
889\r
890 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
891\r
892 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
893 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
894\r
895 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
896\r
897 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
898 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
899\r
900 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
901\r
902 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
903 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
904\r
905 CopyRegisterTable (\r
906 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
907 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
908 mAcpiCpuData.NumberOfCpus\r
909 );\r
910\r
911 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
912 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
913\r
914 CopyRegisterTable (\r
915 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
916 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
917 mAcpiCpuData.NumberOfCpus\r
918 );\r
919\r
920 //\r
921 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
922 //\r
923 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
924 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
925\r
926 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
927 ASSERT (mGdtForAp != NULL);\r
928 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
929 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
930\r
931 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
932 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
933 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
934}\r
b10d5ddc
SZ
935\r
936/**\r
937 Get ACPI S3 enable flag.\r
938\r
939**/\r
940VOID\r
941GetAcpiS3EnableFlag (\r
942 VOID\r
943 )\r
944{\r
945 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
946}\r