]>
Commit | Line | Data |
---|---|---|
529a5a86 MK |
1 | /** @file\r |
2 | Code for Processor S3 restoration\r | |
3 | \r | |
4 | Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>\r | |
5 | This program and the accompanying materials\r | |
6 | are licensed and made available under the terms and conditions of the BSD License\r | |
7 | which accompanies this distribution. The full text of the license may be found at\r | |
8 | http://opensource.org/licenses/bsd-license.php\r | |
9 | \r | |
10 | THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
11 | WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
12 | \r | |
13 | **/\r | |
14 | \r | |
15 | #include "PiSmmCpuDxeSmm.h"\r | |
16 | \r | |
17 | typedef struct {\r | |
18 | UINTN Lock;\r | |
19 | VOID *StackStart;\r | |
20 | UINTN StackSize;\r | |
21 | VOID *ApFunction;\r | |
22 | IA32_DESCRIPTOR GdtrProfile;\r | |
23 | IA32_DESCRIPTOR IdtrProfile;\r | |
24 | UINT32 BufferStart;\r | |
25 | UINT32 Cr3;\r | |
26 | } MP_CPU_EXCHANGE_INFO;\r | |
27 | \r | |
28 | typedef struct {\r | |
29 | UINT8 *RendezvousFunnelAddress;\r | |
30 | UINTN PModeEntryOffset;\r | |
31 | UINTN FlatJumpOffset;\r | |
32 | UINTN Size;\r | |
33 | UINTN LModeEntryOffset;\r | |
34 | UINTN LongJumpOffset;\r | |
35 | } MP_ASSEMBLY_ADDRESS_MAP;\r | |
36 | \r | |
37 | /**\r | |
38 | Get starting address and size of the rendezvous entry for APs.\r | |
39 | Information for fixing a jump instruction in the code is also returned.\r | |
40 | \r | |
41 | @param AddressMap Output buffer for address map information.\r | |
42 | **/\r | |
43 | VOID *\r | |
44 | EFIAPI\r | |
45 | AsmGetAddressMap (\r | |
46 | MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r | |
47 | );\r | |
48 | \r | |
49 | #define LEGACY_REGION_SIZE (2 * 0x1000)\r | |
50 | #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r | |
51 | #define MSR_SPIN_LOCK_INIT_NUM 15\r | |
52 | \r | |
53 | ACPI_CPU_DATA mAcpiCpuData;\r | |
54 | UINT32 mNumberToFinish;\r | |
55 | MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r | |
56 | BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r | |
57 | VOID *mGdtForAp = NULL;\r | |
58 | VOID *mIdtForAp = NULL;\r | |
59 | VOID *mMachineCheckHandlerForAp = NULL;\r | |
60 | MP_MSR_LOCK *mMsrSpinLocks = NULL;\r | |
61 | UINTN mMsrSpinLockCount = MSR_SPIN_LOCK_INIT_NUM;\r | |
62 | UINTN mMsrCount = 0;\r | |
63 | \r | |
64 | /**\r | |
65 | Get MSR spin lock by MSR index.\r | |
66 | \r | |
67 | @param MsrIndex MSR index value.\r | |
68 | \r | |
69 | @return Pointer to MSR spin lock.\r | |
70 | \r | |
71 | **/\r | |
72 | SPIN_LOCK *\r | |
73 | GetMsrSpinLockByIndex (\r | |
74 | IN UINT32 MsrIndex\r | |
75 | )\r | |
76 | {\r | |
77 | UINTN Index;\r | |
78 | for (Index = 0; Index < mMsrCount; Index++) {\r | |
79 | if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r | |
80 | return &mMsrSpinLocks[Index].SpinLock;\r | |
81 | }\r | |
82 | }\r | |
83 | return NULL;\r | |
84 | }\r | |
85 | \r | |
86 | /**\r | |
87 | Initialize MSR spin lock by MSR index.\r | |
88 | \r | |
89 | @param MsrIndex MSR index value.\r | |
90 | \r | |
91 | **/\r | |
92 | VOID\r | |
93 | InitMsrSpinLockByIndex (\r | |
94 | IN UINT32 MsrIndex\r | |
95 | )\r | |
96 | {\r | |
97 | UINTN NewMsrSpinLockCount;\r | |
98 | \r | |
99 | if (mMsrSpinLocks == NULL) {\r | |
100 | mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * mMsrSpinLockCount);\r | |
101 | ASSERT (mMsrSpinLocks != NULL);\r | |
102 | }\r | |
103 | if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r | |
104 | //\r | |
105 | // Initialize spin lock for MSR programming\r | |
106 | //\r | |
107 | mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r | |
108 | InitializeSpinLock (&mMsrSpinLocks[mMsrCount].SpinLock);\r | |
109 | mMsrCount ++;\r | |
110 | if (mMsrCount == mMsrSpinLockCount) {\r | |
111 | //\r | |
112 | // If MSR spin lock buffer is full, enlarge it\r | |
113 | //\r | |
114 | NewMsrSpinLockCount = mMsrSpinLockCount + MSR_SPIN_LOCK_INIT_NUM;\r | |
115 | mMsrSpinLocks = ReallocatePool (\r | |
116 | sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r | |
117 | sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r | |
118 | mMsrSpinLocks\r | |
119 | );\r | |
120 | mMsrSpinLockCount = NewMsrSpinLockCount;\r | |
121 | }\r | |
122 | }\r | |
123 | }\r | |
124 | \r | |
125 | /**\r | |
126 | Sync up the MTRR values for all processors.\r | |
127 | \r | |
128 | @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r | |
129 | **/\r | |
130 | VOID\r | |
131 | EFIAPI\r | |
132 | LoadMtrrData (\r | |
133 | EFI_PHYSICAL_ADDRESS MtrrTable\r | |
134 | )\r | |
135 | /*++\r | |
136 | \r | |
137 | Routine Description:\r | |
138 | \r | |
139 | Sync up the MTRR values for all processors.\r | |
140 | \r | |
141 | Arguments:\r | |
142 | \r | |
143 | Returns:\r | |
144 | None\r | |
145 | \r | |
146 | --*/\r | |
147 | {\r | |
148 | MTRR_SETTINGS *MtrrSettings;\r | |
149 | \r | |
150 | MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r | |
151 | MtrrSetAllMtrrs (MtrrSettings);\r | |
152 | }\r | |
153 | \r | |
154 | /**\r | |
155 | Programs registers for the calling processor.\r | |
156 | \r | |
157 | This function programs registers for the calling processor.\r | |
158 | \r | |
159 | @param RegisterTable Pointer to register table of the running processor.\r | |
160 | \r | |
161 | **/\r | |
162 | VOID\r | |
163 | SetProcessorRegister (\r | |
164 | IN CPU_REGISTER_TABLE *RegisterTable\r | |
165 | )\r | |
166 | {\r | |
167 | CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r | |
168 | UINTN Index;\r | |
169 | UINTN Value;\r | |
170 | SPIN_LOCK *MsrSpinLock;\r | |
171 | \r | |
172 | //\r | |
173 | // Traverse Register Table of this logical processor\r | |
174 | //\r | |
175 | RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r | |
176 | for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r | |
177 | //\r | |
178 | // Check the type of specified register\r | |
179 | //\r | |
180 | switch (RegisterTableEntry->RegisterType) {\r | |
181 | //\r | |
182 | // The specified register is Control Register\r | |
183 | //\r | |
184 | case ControlRegister:\r | |
185 | switch (RegisterTableEntry->Index) {\r | |
186 | case 0:\r | |
187 | Value = AsmReadCr0 ();\r | |
188 | Value = (UINTN) BitFieldWrite64 (\r | |
189 | Value,\r | |
190 | RegisterTableEntry->ValidBitStart,\r | |
191 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
192 | (UINTN) RegisterTableEntry->Value\r | |
193 | );\r | |
194 | AsmWriteCr0 (Value);\r | |
195 | break;\r | |
196 | case 2:\r | |
197 | Value = AsmReadCr2 ();\r | |
198 | Value = (UINTN) BitFieldWrite64 (\r | |
199 | Value,\r | |
200 | RegisterTableEntry->ValidBitStart,\r | |
201 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
202 | (UINTN) RegisterTableEntry->Value\r | |
203 | );\r | |
204 | AsmWriteCr2 (Value);\r | |
205 | break;\r | |
206 | case 3:\r | |
207 | Value = AsmReadCr3 ();\r | |
208 | Value = (UINTN) BitFieldWrite64 (\r | |
209 | Value,\r | |
210 | RegisterTableEntry->ValidBitStart,\r | |
211 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
212 | (UINTN) RegisterTableEntry->Value\r | |
213 | );\r | |
214 | AsmWriteCr3 (Value);\r | |
215 | break;\r | |
216 | case 4:\r | |
217 | Value = AsmReadCr4 ();\r | |
218 | Value = (UINTN) BitFieldWrite64 (\r | |
219 | Value,\r | |
220 | RegisterTableEntry->ValidBitStart,\r | |
221 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
222 | (UINTN) RegisterTableEntry->Value\r | |
223 | );\r | |
224 | AsmWriteCr4 (Value);\r | |
225 | break;\r | |
226 | default:\r | |
227 | break;\r | |
228 | }\r | |
229 | break;\r | |
230 | //\r | |
231 | // The specified register is Model Specific Register\r | |
232 | //\r | |
233 | case Msr:\r | |
234 | //\r | |
235 | // If this function is called to restore register setting after INIT signal,\r | |
236 | // there is no need to restore MSRs in register table.\r | |
237 | //\r | |
238 | if (RegisterTableEntry->ValidBitLength >= 64) {\r | |
239 | //\r | |
240 | // If length is not less than 64 bits, then directly write without reading\r | |
241 | //\r | |
242 | AsmWriteMsr64 (\r | |
243 | RegisterTableEntry->Index,\r | |
244 | RegisterTableEntry->Value\r | |
245 | );\r | |
246 | } else {\r | |
247 | //\r | |
248 | // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r | |
249 | // to make sure MSR read/write operation is atomic.\r | |
250 | //\r | |
251 | MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r | |
252 | AcquireSpinLock (MsrSpinLock);\r | |
253 | //\r | |
254 | // Set the bit section according to bit start and length\r | |
255 | //\r | |
256 | AsmMsrBitFieldWrite64 (\r | |
257 | RegisterTableEntry->Index,\r | |
258 | RegisterTableEntry->ValidBitStart,\r | |
259 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
260 | RegisterTableEntry->Value\r | |
261 | );\r | |
262 | ReleaseSpinLock (MsrSpinLock);\r | |
263 | }\r | |
264 | break;\r | |
265 | //\r | |
266 | // Enable or disable cache\r | |
267 | //\r | |
268 | case CacheControl:\r | |
269 | //\r | |
270 | // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r | |
271 | //\r | |
272 | if (RegisterTableEntry->Value == 0) {\r | |
273 | AsmDisableCache ();\r | |
274 | } else {\r | |
275 | AsmEnableCache ();\r | |
276 | }\r | |
277 | break;\r | |
278 | \r | |
279 | default:\r | |
280 | break;\r | |
281 | }\r | |
282 | }\r | |
283 | }\r | |
284 | \r | |
285 | /**\r | |
286 | AP initialization before SMBASE relocation in the S3 boot path.\r | |
287 | **/\r | |
288 | VOID\r | |
289 | EarlyMPRendezvousProcedure (\r | |
290 | VOID\r | |
291 | )\r | |
292 | {\r | |
293 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
294 | UINT32 InitApicId;\r | |
295 | UINTN Index;\r | |
296 | \r | |
297 | LoadMtrrData (mAcpiCpuData.MtrrTable);\r | |
298 | \r | |
299 | //\r | |
300 | // Find processor number for this CPU.\r | |
301 | //\r | |
302 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r | |
303 | InitApicId = GetInitialApicId ();\r | |
304 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
305 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
306 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
307 | break;\r | |
308 | }\r | |
309 | }\r | |
310 | \r | |
311 | //\r | |
312 | // Count down the number with lock mechanism.\r | |
313 | //\r | |
314 | InterlockedDecrement (&mNumberToFinish);\r | |
315 | }\r | |
316 | \r | |
317 | /**\r | |
318 | AP initialization after SMBASE relocation in the S3 boot path.\r | |
319 | **/\r | |
320 | VOID\r | |
321 | MPRendezvousProcedure (\r | |
322 | VOID\r | |
323 | )\r | |
324 | {\r | |
325 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
326 | UINT32 InitApicId;\r | |
327 | UINTN Index;\r | |
328 | \r | |
329 | ProgramVirtualWireMode ();\r | |
330 | DisableLvtInterrupts ();\r | |
331 | \r | |
332 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r | |
333 | InitApicId = GetInitialApicId ();\r | |
334 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
335 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
336 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
337 | break;\r | |
338 | }\r | |
339 | }\r | |
340 | \r | |
341 | //\r | |
342 | // Count down the number with lock mechanism.\r | |
343 | //\r | |
344 | InterlockedDecrement (&mNumberToFinish);\r | |
345 | }\r | |
346 | \r | |
347 | /**\r | |
348 | Prepares startup vector for APs.\r | |
349 | \r | |
350 | This function prepares startup vector for APs.\r | |
351 | \r | |
352 | @param WorkingBuffer The address of the work buffer.\r | |
353 | **/\r | |
354 | VOID\r | |
355 | PrepareApStartupVector (\r | |
356 | EFI_PHYSICAL_ADDRESS WorkingBuffer\r | |
357 | )\r | |
358 | {\r | |
359 | EFI_PHYSICAL_ADDRESS StartupVector;\r | |
360 | MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r | |
361 | \r | |
362 | //\r | |
363 | // Get the address map of startup code for AP,\r | |
364 | // including code size, and offset of long jump instructions to redirect.\r | |
365 | //\r | |
366 | ZeroMem (&AddressMap, sizeof (AddressMap));\r | |
367 | AsmGetAddressMap (&AddressMap);\r | |
368 | \r | |
369 | StartupVector = WorkingBuffer;\r | |
370 | \r | |
371 | //\r | |
372 | // Copy AP startup code to startup vector, and then redirect the long jump\r | |
373 | // instructions for mode switching.\r | |
374 | //\r | |
375 | CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r | |
376 | *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r | |
377 | if (AddressMap.LongJumpOffset != 0) {\r | |
378 | *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r | |
379 | }\r | |
380 | \r | |
381 | //\r | |
382 | // Get the start address of exchange data between BSP and AP.\r | |
383 | //\r | |
384 | mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r | |
385 | ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r | |
386 | \r | |
387 | CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
388 | CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
389 | \r | |
390 | //\r | |
391 | // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r | |
392 | //\r | |
393 | CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r | |
394 | CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r | |
395 | CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r | |
396 | \r | |
397 | mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r | |
398 | mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r | |
399 | mExchangeInfo->BufferStart = (UINT32) StartupVector;\r | |
400 | mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r | |
401 | }\r | |
402 | \r | |
403 | /**\r | |
404 | The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r | |
405 | \r | |
406 | The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r | |
407 | and restores MTRRs for both BSP and APs.\r | |
408 | \r | |
409 | **/\r | |
410 | VOID\r | |
411 | EarlyInitializeCpu (\r | |
412 | VOID\r | |
413 | )\r | |
414 | {\r | |
415 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
416 | UINT32 InitApicId;\r | |
417 | UINTN Index;\r | |
418 | \r | |
419 | LoadMtrrData (mAcpiCpuData.MtrrTable);\r | |
420 | \r | |
421 | //\r | |
422 | // Find processor number for this CPU.\r | |
423 | //\r | |
424 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r | |
425 | InitApicId = GetInitialApicId ();\r | |
426 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
427 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
428 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
429 | break;\r | |
430 | }\r | |
431 | }\r | |
432 | \r | |
433 | ProgramVirtualWireMode ();\r | |
434 | \r | |
435 | PrepareApStartupVector (mAcpiCpuData.StartupVector);\r | |
436 | \r | |
437 | mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r | |
438 | mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r | |
439 | \r | |
440 | //\r | |
441 | // Send INIT IPI - SIPI to all APs\r | |
442 | //\r | |
443 | SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r | |
444 | \r | |
445 | while (mNumberToFinish > 0) {\r | |
446 | CpuPause ();\r | |
447 | }\r | |
448 | }\r | |
449 | \r | |
450 | /**\r | |
451 | The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r | |
452 | \r | |
453 | The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r | |
454 | data saved by normal boot path for both BSP and APs.\r | |
455 | \r | |
456 | **/\r | |
457 | VOID\r | |
458 | InitializeCpu (\r | |
459 | VOID\r | |
460 | )\r | |
461 | {\r | |
462 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
463 | UINT32 InitApicId;\r | |
464 | UINTN Index;\r | |
465 | \r | |
466 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r | |
467 | InitApicId = GetInitialApicId ();\r | |
468 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
469 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
470 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
471 | break;\r | |
472 | }\r | |
473 | }\r | |
474 | \r | |
475 | mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r | |
476 | //\r | |
477 | // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r | |
478 | // Re-initialize StackAddress to original beginning address.\r | |
479 | //\r | |
480 | mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r | |
481 | mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r | |
482 | \r | |
483 | //\r | |
484 | // Send INIT IPI - SIPI to all APs\r | |
485 | //\r | |
486 | SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r | |
487 | \r | |
488 | while (mNumberToFinish > 0) {\r | |
489 | CpuPause ();\r | |
490 | }\r | |
491 | }\r |