]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Using MSRs semaphores in aligned buffer
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
695e62d1 4Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17typedef struct {\r
18 UINTN Lock;\r
19 VOID *StackStart;\r
20 UINTN StackSize;\r
21 VOID *ApFunction;\r
22 IA32_DESCRIPTOR GdtrProfile;\r
23 IA32_DESCRIPTOR IdtrProfile;\r
24 UINT32 BufferStart;\r
25 UINT32 Cr3;\r
26} MP_CPU_EXCHANGE_INFO;\r
27\r
28typedef struct {\r
29 UINT8 *RendezvousFunnelAddress;\r
30 UINTN PModeEntryOffset;\r
31 UINTN FlatJumpOffset;\r
32 UINTN Size;\r
33 UINTN LModeEntryOffset;\r
34 UINTN LongJumpOffset;\r
35} MP_ASSEMBLY_ADDRESS_MAP;\r
36\r
37/**\r
38 Get starting address and size of the rendezvous entry for APs.\r
39 Information for fixing a jump instruction in the code is also returned.\r
40\r
41 @param AddressMap Output buffer for address map information.\r
42**/\r
43VOID *\r
44EFIAPI\r
45AsmGetAddressMap (\r
46 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
47 );\r
48\r
49#define LEGACY_REGION_SIZE (2 * 0x1000)\r
50#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
51\r
52ACPI_CPU_DATA mAcpiCpuData;\r
53UINT32 mNumberToFinish;\r
54MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
55BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
56VOID *mGdtForAp = NULL;\r
57VOID *mIdtForAp = NULL;\r
58VOID *mMachineCheckHandlerForAp = NULL;\r
59MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 60UINTN mMsrSpinLockCount;\r
529a5a86
MK
61UINTN mMsrCount = 0;\r
62\r
63/**\r
64 Get MSR spin lock by MSR index.\r
65\r
66 @param MsrIndex MSR index value.\r
67\r
68 @return Pointer to MSR spin lock.\r
69\r
70**/\r
71SPIN_LOCK *\r
72GetMsrSpinLockByIndex (\r
73 IN UINT32 MsrIndex\r
74 )\r
75{\r
76 UINTN Index;\r
77 for (Index = 0; Index < mMsrCount; Index++) {\r
78 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 79 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
80 }\r
81 }\r
82 return NULL;\r
83}\r
84\r
85/**\r
86 Initialize MSR spin lock by MSR index.\r
87\r
88 @param MsrIndex MSR index value.\r
89\r
90**/\r
91VOID\r
92InitMsrSpinLockByIndex (\r
93 IN UINT32 MsrIndex\r
94 )\r
95{\r
dc99315b 96 UINTN MsrSpinLockCount;\r
529a5a86 97 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
98 UINTN Index;\r
99 UINTN AddedSize;\r
529a5a86
MK
100\r
101 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
102 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
103 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 104 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
105 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
106 mMsrSpinLocks[Index].SpinLock =\r
107 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
108 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
109 }\r
110 mMsrSpinLockCount = MsrSpinLockCount;\r
111 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
112 }\r
113 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
114 //\r
115 // Initialize spin lock for MSR programming\r
116 //\r
117 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 118 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
119 mMsrCount ++;\r
120 if (mMsrCount == mMsrSpinLockCount) {\r
121 //\r
122 // If MSR spin lock buffer is full, enlarge it\r
123 //\r
dc99315b
JF
124 AddedSize = SIZE_4KB;\r
125 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
126 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
127 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
128 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
129 mMsrSpinLocks = ReallocatePool (\r
130 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
131 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
132 mMsrSpinLocks\r
133 );\r
dc99315b 134 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 135 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
136 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
137 mMsrSpinLocks[Index].SpinLock =\r
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
139 (Index - mMsrCount) * mSemaphoreSize);\r
140 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
141 }\r
529a5a86
MK
142 }\r
143 }\r
144}\r
145\r
146/**\r
147 Sync up the MTRR values for all processors.\r
148\r
149 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
150**/\r
151VOID\r
152EFIAPI\r
153LoadMtrrData (\r
154 EFI_PHYSICAL_ADDRESS MtrrTable\r
155 )\r
156/*++\r
157\r
158Routine Description:\r
159\r
160 Sync up the MTRR values for all processors.\r
161\r
162Arguments:\r
163\r
164Returns:\r
165 None\r
166\r
167--*/\r
168{\r
169 MTRR_SETTINGS *MtrrSettings;\r
170\r
171 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
172 MtrrSetAllMtrrs (MtrrSettings);\r
173}\r
174\r
175/**\r
176 Programs registers for the calling processor.\r
177\r
178 This function programs registers for the calling processor.\r
179\r
180 @param RegisterTable Pointer to register table of the running processor.\r
181\r
182**/\r
183VOID\r
184SetProcessorRegister (\r
185 IN CPU_REGISTER_TABLE *RegisterTable\r
186 )\r
187{\r
188 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
189 UINTN Index;\r
190 UINTN Value;\r
191 SPIN_LOCK *MsrSpinLock;\r
192\r
193 //\r
194 // Traverse Register Table of this logical processor\r
195 //\r
196 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
197 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
198 //\r
199 // Check the type of specified register\r
200 //\r
201 switch (RegisterTableEntry->RegisterType) {\r
202 //\r
203 // The specified register is Control Register\r
204 //\r
205 case ControlRegister:\r
206 switch (RegisterTableEntry->Index) {\r
207 case 0:\r
208 Value = AsmReadCr0 ();\r
209 Value = (UINTN) BitFieldWrite64 (\r
210 Value,\r
211 RegisterTableEntry->ValidBitStart,\r
212 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
213 (UINTN) RegisterTableEntry->Value\r
214 );\r
215 AsmWriteCr0 (Value);\r
216 break;\r
217 case 2:\r
218 Value = AsmReadCr2 ();\r
219 Value = (UINTN) BitFieldWrite64 (\r
220 Value,\r
221 RegisterTableEntry->ValidBitStart,\r
222 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
223 (UINTN) RegisterTableEntry->Value\r
224 );\r
225 AsmWriteCr2 (Value);\r
226 break;\r
227 case 3:\r
228 Value = AsmReadCr3 ();\r
229 Value = (UINTN) BitFieldWrite64 (\r
230 Value,\r
231 RegisterTableEntry->ValidBitStart,\r
232 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
233 (UINTN) RegisterTableEntry->Value\r
234 );\r
235 AsmWriteCr3 (Value);\r
236 break;\r
237 case 4:\r
238 Value = AsmReadCr4 ();\r
239 Value = (UINTN) BitFieldWrite64 (\r
240 Value,\r
241 RegisterTableEntry->ValidBitStart,\r
242 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
243 (UINTN) RegisterTableEntry->Value\r
244 );\r
245 AsmWriteCr4 (Value);\r
246 break;\r
247 default:\r
248 break;\r
249 }\r
250 break;\r
251 //\r
252 // The specified register is Model Specific Register\r
253 //\r
254 case Msr:\r
255 //\r
256 // If this function is called to restore register setting after INIT signal,\r
257 // there is no need to restore MSRs in register table.\r
258 //\r
259 if (RegisterTableEntry->ValidBitLength >= 64) {\r
260 //\r
261 // If length is not less than 64 bits, then directly write without reading\r
262 //\r
263 AsmWriteMsr64 (\r
264 RegisterTableEntry->Index,\r
265 RegisterTableEntry->Value\r
266 );\r
267 } else {\r
268 //\r
269 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
270 // to make sure MSR read/write operation is atomic.\r
271 //\r
272 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
273 AcquireSpinLock (MsrSpinLock);\r
274 //\r
275 // Set the bit section according to bit start and length\r
276 //\r
277 AsmMsrBitFieldWrite64 (\r
278 RegisterTableEntry->Index,\r
279 RegisterTableEntry->ValidBitStart,\r
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
281 RegisterTableEntry->Value\r
282 );\r
283 ReleaseSpinLock (MsrSpinLock);\r
284 }\r
285 break;\r
286 //\r
287 // Enable or disable cache\r
288 //\r
289 case CacheControl:\r
290 //\r
291 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
292 //\r
293 if (RegisterTableEntry->Value == 0) {\r
294 AsmDisableCache ();\r
295 } else {\r
296 AsmEnableCache ();\r
297 }\r
298 break;\r
299\r
300 default:\r
301 break;\r
302 }\r
303 }\r
304}\r
305\r
306/**\r
307 AP initialization before SMBASE relocation in the S3 boot path.\r
308**/\r
309VOID\r
310EarlyMPRendezvousProcedure (\r
311 VOID\r
312 )\r
313{\r
314 CPU_REGISTER_TABLE *RegisterTableList;\r
315 UINT32 InitApicId;\r
316 UINTN Index;\r
317\r
318 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
319\r
320 //\r
321 // Find processor number for this CPU.\r
322 //\r
323 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
324 InitApicId = GetInitialApicId ();\r
325 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
326 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
327 SetProcessorRegister (&RegisterTableList[Index]);\r
328 break;\r
329 }\r
330 }\r
331\r
332 //\r
333 // Count down the number with lock mechanism.\r
334 //\r
335 InterlockedDecrement (&mNumberToFinish);\r
336}\r
337\r
338/**\r
339 AP initialization after SMBASE relocation in the S3 boot path.\r
340**/\r
341VOID\r
342MPRendezvousProcedure (\r
343 VOID\r
344 )\r
345{\r
346 CPU_REGISTER_TABLE *RegisterTableList;\r
347 UINT32 InitApicId;\r
348 UINTN Index;\r
349\r
350 ProgramVirtualWireMode ();\r
351 DisableLvtInterrupts ();\r
352\r
353 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
354 InitApicId = GetInitialApicId ();\r
355 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
356 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
357 SetProcessorRegister (&RegisterTableList[Index]);\r
358 break;\r
359 }\r
360 }\r
361\r
362 //\r
363 // Count down the number with lock mechanism.\r
364 //\r
365 InterlockedDecrement (&mNumberToFinish);\r
366}\r
367\r
368/**\r
369 Prepares startup vector for APs.\r
370\r
371 This function prepares startup vector for APs.\r
372\r
373 @param WorkingBuffer The address of the work buffer.\r
374**/\r
375VOID\r
376PrepareApStartupVector (\r
377 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
378 )\r
379{\r
380 EFI_PHYSICAL_ADDRESS StartupVector;\r
381 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
382\r
383 //\r
384 // Get the address map of startup code for AP,\r
385 // including code size, and offset of long jump instructions to redirect.\r
386 //\r
387 ZeroMem (&AddressMap, sizeof (AddressMap));\r
388 AsmGetAddressMap (&AddressMap);\r
389\r
390 StartupVector = WorkingBuffer;\r
391\r
392 //\r
393 // Copy AP startup code to startup vector, and then redirect the long jump\r
394 // instructions for mode switching.\r
395 //\r
396 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
397 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
398 if (AddressMap.LongJumpOffset != 0) {\r
399 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
400 }\r
401\r
402 //\r
403 // Get the start address of exchange data between BSP and AP.\r
404 //\r
405 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
406 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
407\r
408 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
409 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
410\r
411 //\r
412 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
413 //\r
414 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
415 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
416 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
417\r
418 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
419 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
420 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
421 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
422}\r
423\r
424/**\r
425 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
426\r
427 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
428 and restores MTRRs for both BSP and APs.\r
429\r
430**/\r
431VOID\r
432EarlyInitializeCpu (\r
433 VOID\r
434 )\r
435{\r
436 CPU_REGISTER_TABLE *RegisterTableList;\r
437 UINT32 InitApicId;\r
438 UINTN Index;\r
439\r
440 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
441\r
442 //\r
443 // Find processor number for this CPU.\r
444 //\r
445 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
446 InitApicId = GetInitialApicId ();\r
447 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
448 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
449 SetProcessorRegister (&RegisterTableList[Index]);\r
450 break;\r
451 }\r
452 }\r
453\r
454 ProgramVirtualWireMode ();\r
455\r
456 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
457\r
458 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
459 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r
460\r
461 //\r
462 // Send INIT IPI - SIPI to all APs\r
463 //\r
464 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
465\r
466 while (mNumberToFinish > 0) {\r
467 CpuPause ();\r
468 }\r
469}\r
470\r
471/**\r
472 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
473\r
474 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
475 data saved by normal boot path for both BSP and APs.\r
476\r
477**/\r
478VOID\r
479InitializeCpu (\r
480 VOID\r
481 )\r
482{\r
483 CPU_REGISTER_TABLE *RegisterTableList;\r
484 UINT32 InitApicId;\r
485 UINTN Index;\r
486\r
487 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
488 InitApicId = GetInitialApicId ();\r
489 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
490 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
491 SetProcessorRegister (&RegisterTableList[Index]);\r
492 break;\r
493 }\r
494 }\r
495\r
496 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
497 //\r
498 // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r
499 // Re-initialize StackAddress to original beginning address.\r
500 //\r
501 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
502 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r
503\r
504 //\r
505 // Send INIT IPI - SIPI to all APs\r
506 //\r
507 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
508\r
509 while (mNumberToFinish > 0) {\r
510 CpuPause ();\r
511 }\r
512}\r