]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/CpuS3DataDxe: Consume PcdAcpiS3Enable to control the code
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
695e62d1 4Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17typedef struct {\r
18 UINTN Lock;\r
19 VOID *StackStart;\r
20 UINTN StackSize;\r
21 VOID *ApFunction;\r
22 IA32_DESCRIPTOR GdtrProfile;\r
23 IA32_DESCRIPTOR IdtrProfile;\r
24 UINT32 BufferStart;\r
25 UINT32 Cr3;\r
26} MP_CPU_EXCHANGE_INFO;\r
27\r
28typedef struct {\r
29 UINT8 *RendezvousFunnelAddress;\r
30 UINTN PModeEntryOffset;\r
31 UINTN FlatJumpOffset;\r
32 UINTN Size;\r
33 UINTN LModeEntryOffset;\r
34 UINTN LongJumpOffset;\r
35} MP_ASSEMBLY_ADDRESS_MAP;\r
36\r
6c4c15fa
JF
37//\r
38// Spin lock used to serialize MemoryMapped operation\r
39//\r
40SPIN_LOCK *mMemoryMappedLock = NULL;\r
41\r
529a5a86
MK
42/**\r
43 Get starting address and size of the rendezvous entry for APs.\r
44 Information for fixing a jump instruction in the code is also returned.\r
45\r
46 @param AddressMap Output buffer for address map information.\r
47**/\r
48VOID *\r
49EFIAPI\r
50AsmGetAddressMap (\r
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
52 );\r
53\r
54#define LEGACY_REGION_SIZE (2 * 0x1000)\r
55#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
56\r
57ACPI_CPU_DATA mAcpiCpuData;\r
58UINT32 mNumberToFinish;\r
59MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
60BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
61VOID *mGdtForAp = NULL;\r
62VOID *mIdtForAp = NULL;\r
63VOID *mMachineCheckHandlerForAp = NULL;\r
64MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 65UINTN mMsrSpinLockCount;\r
529a5a86
MK
66UINTN mMsrCount = 0;\r
67\r
68/**\r
69 Get MSR spin lock by MSR index.\r
70\r
71 @param MsrIndex MSR index value.\r
72\r
73 @return Pointer to MSR spin lock.\r
74\r
75**/\r
76SPIN_LOCK *\r
77GetMsrSpinLockByIndex (\r
78 IN UINT32 MsrIndex\r
79 )\r
80{\r
81 UINTN Index;\r
82 for (Index = 0; Index < mMsrCount; Index++) {\r
83 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 84 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
85 }\r
86 }\r
87 return NULL;\r
88}\r
89\r
90/**\r
91 Initialize MSR spin lock by MSR index.\r
92\r
93 @param MsrIndex MSR index value.\r
94\r
95**/\r
96VOID\r
97InitMsrSpinLockByIndex (\r
98 IN UINT32 MsrIndex\r
99 )\r
100{\r
dc99315b 101 UINTN MsrSpinLockCount;\r
529a5a86 102 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
103 UINTN Index;\r
104 UINTN AddedSize;\r
529a5a86
MK
105\r
106 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
107 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
108 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 109 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
110 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
111 mMsrSpinLocks[Index].SpinLock =\r
112 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
113 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
114 }\r
115 mMsrSpinLockCount = MsrSpinLockCount;\r
116 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
117 }\r
118 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
119 //\r
120 // Initialize spin lock for MSR programming\r
121 //\r
122 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 123 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
124 mMsrCount ++;\r
125 if (mMsrCount == mMsrSpinLockCount) {\r
126 //\r
127 // If MSR spin lock buffer is full, enlarge it\r
128 //\r
dc99315b
JF
129 AddedSize = SIZE_4KB;\r
130 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
131 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
132 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
133 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
134 mMsrSpinLocks = ReallocatePool (\r
135 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
136 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
137 mMsrSpinLocks\r
138 );\r
dc99315b 139 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 140 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
141 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
142 mMsrSpinLocks[Index].SpinLock =\r
143 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
144 (Index - mMsrCount) * mSemaphoreSize);\r
145 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
146 }\r
529a5a86
MK
147 }\r
148 }\r
149}\r
150\r
151/**\r
152 Sync up the MTRR values for all processors.\r
153\r
154 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
155**/\r
156VOID\r
157EFIAPI\r
158LoadMtrrData (\r
159 EFI_PHYSICAL_ADDRESS MtrrTable\r
160 )\r
161/*++\r
162\r
163Routine Description:\r
164\r
165 Sync up the MTRR values for all processors.\r
166\r
167Arguments:\r
168\r
169Returns:\r
170 None\r
171\r
172--*/\r
173{\r
174 MTRR_SETTINGS *MtrrSettings;\r
175\r
176 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
177 MtrrSetAllMtrrs (MtrrSettings);\r
178}\r
179\r
180/**\r
181 Programs registers for the calling processor.\r
182\r
183 This function programs registers for the calling processor.\r
184\r
185 @param RegisterTable Pointer to register table of the running processor.\r
186\r
187**/\r
188VOID\r
189SetProcessorRegister (\r
190 IN CPU_REGISTER_TABLE *RegisterTable\r
191 )\r
192{\r
193 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
194 UINTN Index;\r
195 UINTN Value;\r
196 SPIN_LOCK *MsrSpinLock;\r
197\r
198 //\r
199 // Traverse Register Table of this logical processor\r
200 //\r
201 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
202 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
203 //\r
204 // Check the type of specified register\r
205 //\r
206 switch (RegisterTableEntry->RegisterType) {\r
207 //\r
208 // The specified register is Control Register\r
209 //\r
210 case ControlRegister:\r
211 switch (RegisterTableEntry->Index) {\r
212 case 0:\r
213 Value = AsmReadCr0 ();\r
214 Value = (UINTN) BitFieldWrite64 (\r
215 Value,\r
216 RegisterTableEntry->ValidBitStart,\r
217 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
218 (UINTN) RegisterTableEntry->Value\r
219 );\r
220 AsmWriteCr0 (Value);\r
221 break;\r
222 case 2:\r
223 Value = AsmReadCr2 ();\r
224 Value = (UINTN) BitFieldWrite64 (\r
225 Value,\r
226 RegisterTableEntry->ValidBitStart,\r
227 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
228 (UINTN) RegisterTableEntry->Value\r
229 );\r
230 AsmWriteCr2 (Value);\r
231 break;\r
232 case 3:\r
233 Value = AsmReadCr3 ();\r
234 Value = (UINTN) BitFieldWrite64 (\r
235 Value,\r
236 RegisterTableEntry->ValidBitStart,\r
237 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
238 (UINTN) RegisterTableEntry->Value\r
239 );\r
240 AsmWriteCr3 (Value);\r
241 break;\r
242 case 4:\r
243 Value = AsmReadCr4 ();\r
244 Value = (UINTN) BitFieldWrite64 (\r
245 Value,\r
246 RegisterTableEntry->ValidBitStart,\r
247 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
248 (UINTN) RegisterTableEntry->Value\r
249 );\r
250 AsmWriteCr4 (Value);\r
251 break;\r
252 default:\r
253 break;\r
254 }\r
255 break;\r
256 //\r
257 // The specified register is Model Specific Register\r
258 //\r
259 case Msr:\r
260 //\r
261 // If this function is called to restore register setting after INIT signal,\r
262 // there is no need to restore MSRs in register table.\r
263 //\r
264 if (RegisterTableEntry->ValidBitLength >= 64) {\r
265 //\r
266 // If length is not less than 64 bits, then directly write without reading\r
267 //\r
268 AsmWriteMsr64 (\r
269 RegisterTableEntry->Index,\r
270 RegisterTableEntry->Value\r
271 );\r
272 } else {\r
273 //\r
274 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
275 // to make sure MSR read/write operation is atomic.\r
276 //\r
277 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
278 AcquireSpinLock (MsrSpinLock);\r
279 //\r
280 // Set the bit section according to bit start and length\r
281 //\r
282 AsmMsrBitFieldWrite64 (\r
283 RegisterTableEntry->Index,\r
284 RegisterTableEntry->ValidBitStart,\r
285 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
286 RegisterTableEntry->Value\r
287 );\r
288 ReleaseSpinLock (MsrSpinLock);\r
289 }\r
290 break;\r
291 //\r
6c4c15fa
JF
292 // MemoryMapped operations\r
293 //\r
294 case MemoryMapped:\r
295 AcquireSpinLock (mMemoryMappedLock);\r
296 MmioBitFieldWrite32 (\r
297 RegisterTableEntry->Index,\r
298 RegisterTableEntry->ValidBitStart,\r
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
300 (UINT32)RegisterTableEntry->Value\r
301 );\r
302 ReleaseSpinLock (mMemoryMappedLock);\r
303 break;\r
304 //\r
529a5a86
MK
305 // Enable or disable cache\r
306 //\r
307 case CacheControl:\r
308 //\r
309 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
310 //\r
311 if (RegisterTableEntry->Value == 0) {\r
312 AsmDisableCache ();\r
313 } else {\r
314 AsmEnableCache ();\r
315 }\r
316 break;\r
317\r
318 default:\r
319 break;\r
320 }\r
321 }\r
322}\r
323\r
324/**\r
325 AP initialization before SMBASE relocation in the S3 boot path.\r
326**/\r
327VOID\r
328EarlyMPRendezvousProcedure (\r
329 VOID\r
330 )\r
331{\r
332 CPU_REGISTER_TABLE *RegisterTableList;\r
333 UINT32 InitApicId;\r
334 UINTN Index;\r
335\r
336 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
337\r
338 //\r
339 // Find processor number for this CPU.\r
340 //\r
341 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
342 InitApicId = GetInitialApicId ();\r
343 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
344 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
345 SetProcessorRegister (&RegisterTableList[Index]);\r
346 break;\r
347 }\r
348 }\r
349\r
350 //\r
351 // Count down the number with lock mechanism.\r
352 //\r
353 InterlockedDecrement (&mNumberToFinish);\r
354}\r
355\r
356/**\r
357 AP initialization after SMBASE relocation in the S3 boot path.\r
358**/\r
359VOID\r
360MPRendezvousProcedure (\r
361 VOID\r
362 )\r
363{\r
364 CPU_REGISTER_TABLE *RegisterTableList;\r
365 UINT32 InitApicId;\r
366 UINTN Index;\r
367\r
368 ProgramVirtualWireMode ();\r
369 DisableLvtInterrupts ();\r
370\r
371 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
372 InitApicId = GetInitialApicId ();\r
373 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
374 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
375 SetProcessorRegister (&RegisterTableList[Index]);\r
376 break;\r
377 }\r
378 }\r
379\r
380 //\r
381 // Count down the number with lock mechanism.\r
382 //\r
383 InterlockedDecrement (&mNumberToFinish);\r
384}\r
385\r
386/**\r
387 Prepares startup vector for APs.\r
388\r
389 This function prepares startup vector for APs.\r
390\r
391 @param WorkingBuffer The address of the work buffer.\r
392**/\r
393VOID\r
394PrepareApStartupVector (\r
395 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
396 )\r
397{\r
398 EFI_PHYSICAL_ADDRESS StartupVector;\r
399 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
400\r
401 //\r
402 // Get the address map of startup code for AP,\r
403 // including code size, and offset of long jump instructions to redirect.\r
404 //\r
405 ZeroMem (&AddressMap, sizeof (AddressMap));\r
406 AsmGetAddressMap (&AddressMap);\r
407\r
408 StartupVector = WorkingBuffer;\r
409\r
410 //\r
411 // Copy AP startup code to startup vector, and then redirect the long jump\r
412 // instructions for mode switching.\r
413 //\r
414 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
415 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
416 if (AddressMap.LongJumpOffset != 0) {\r
417 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
418 }\r
419\r
420 //\r
421 // Get the start address of exchange data between BSP and AP.\r
422 //\r
423 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
424 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
425\r
426 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
427 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
428\r
429 //\r
430 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
431 //\r
432 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
433 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
434 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
435\r
436 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
437 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
438 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
439 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
440}\r
441\r
442/**\r
443 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
444\r
445 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
446 and restores MTRRs for both BSP and APs.\r
447\r
448**/\r
449VOID\r
450EarlyInitializeCpu (\r
451 VOID\r
452 )\r
453{\r
454 CPU_REGISTER_TABLE *RegisterTableList;\r
455 UINT32 InitApicId;\r
456 UINTN Index;\r
457\r
458 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
459\r
460 //\r
461 // Find processor number for this CPU.\r
462 //\r
463 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
464 InitApicId = GetInitialApicId ();\r
465 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
466 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
467 SetProcessorRegister (&RegisterTableList[Index]);\r
468 break;\r
469 }\r
470 }\r
471\r
472 ProgramVirtualWireMode ();\r
473\r
474 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
475\r
476 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
477 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r
478\r
479 //\r
480 // Send INIT IPI - SIPI to all APs\r
481 //\r
482 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
483\r
484 while (mNumberToFinish > 0) {\r
485 CpuPause ();\r
486 }\r
487}\r
488\r
489/**\r
490 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
491\r
492 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
493 data saved by normal boot path for both BSP and APs.\r
494\r
495**/\r
496VOID\r
497InitializeCpu (\r
498 VOID\r
499 )\r
500{\r
501 CPU_REGISTER_TABLE *RegisterTableList;\r
502 UINT32 InitApicId;\r
503 UINTN Index;\r
504\r
505 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
506 InitApicId = GetInitialApicId ();\r
507 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
508 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
509 SetProcessorRegister (&RegisterTableList[Index]);\r
510 break;\r
511 }\r
512 }\r
513\r
514 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
515 //\r
516 // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r
517 // Re-initialize StackAddress to original beginning address.\r
518 //\r
519 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
520 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r
521\r
522 //\r
523 // Send INIT IPI - SIPI to all APs\r
524 //\r
525 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
526\r
527 while (mNumberToFinish > 0) {\r
528 CpuPause ();\r
529 }\r
530}\r