]>
Commit | Line | Data |
---|---|---|
529a5a86 MK |
1 | /** @file\r |
2 | Code for Processor S3 restoration\r | |
3 | \r | |
695e62d1 | 4 | Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r |
529a5a86 MK |
5 | This program and the accompanying materials\r |
6 | are licensed and made available under the terms and conditions of the BSD License\r | |
7 | which accompanies this distribution. The full text of the license may be found at\r | |
8 | http://opensource.org/licenses/bsd-license.php\r | |
9 | \r | |
10 | THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
11 | WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
12 | \r | |
13 | **/\r | |
14 | \r | |
15 | #include "PiSmmCpuDxeSmm.h"\r | |
16 | \r | |
17 | typedef struct {\r | |
18 | UINTN Lock;\r | |
19 | VOID *StackStart;\r | |
20 | UINTN StackSize;\r | |
21 | VOID *ApFunction;\r | |
22 | IA32_DESCRIPTOR GdtrProfile;\r | |
23 | IA32_DESCRIPTOR IdtrProfile;\r | |
24 | UINT32 BufferStart;\r | |
25 | UINT32 Cr3;\r | |
26 | } MP_CPU_EXCHANGE_INFO;\r | |
27 | \r | |
28 | typedef struct {\r | |
29 | UINT8 *RendezvousFunnelAddress;\r | |
30 | UINTN PModeEntryOffset;\r | |
31 | UINTN FlatJumpOffset;\r | |
32 | UINTN Size;\r | |
33 | UINTN LModeEntryOffset;\r | |
34 | UINTN LongJumpOffset;\r | |
35 | } MP_ASSEMBLY_ADDRESS_MAP;\r | |
36 | \r | |
6c4c15fa JF |
37 | //\r |
38 | // Spin lock used to serialize MemoryMapped operation\r | |
39 | //\r | |
40 | SPIN_LOCK *mMemoryMappedLock = NULL;\r | |
41 | \r | |
529a5a86 MK |
42 | /**\r |
43 | Get starting address and size of the rendezvous entry for APs.\r | |
44 | Information for fixing a jump instruction in the code is also returned.\r | |
45 | \r | |
46 | @param AddressMap Output buffer for address map information.\r | |
47 | **/\r | |
48 | VOID *\r | |
49 | EFIAPI\r | |
50 | AsmGetAddressMap (\r | |
51 | MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r | |
52 | );\r | |
53 | \r | |
54 | #define LEGACY_REGION_SIZE (2 * 0x1000)\r | |
55 | #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r | |
529a5a86 MK |
56 | \r |
57 | ACPI_CPU_DATA mAcpiCpuData;\r | |
58 | UINT32 mNumberToFinish;\r | |
59 | MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r | |
60 | BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r | |
61 | VOID *mGdtForAp = NULL;\r | |
62 | VOID *mIdtForAp = NULL;\r | |
63 | VOID *mMachineCheckHandlerForAp = NULL;\r | |
64 | MP_MSR_LOCK *mMsrSpinLocks = NULL;\r | |
dc99315b | 65 | UINTN mMsrSpinLockCount;\r |
529a5a86 MK |
66 | UINTN mMsrCount = 0;\r |
67 | \r | |
0bdc9e75 SZ |
68 | //\r |
69 | // S3 boot flag\r | |
70 | //\r | |
71 | BOOLEAN mSmmS3Flag = FALSE;\r | |
72 | \r | |
73 | //\r | |
74 | // Pointer to structure used during S3 Resume\r | |
75 | //\r | |
76 | SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r | |
77 | \r | |
529a5a86 MK |
78 | /**\r |
79 | Get MSR spin lock by MSR index.\r | |
80 | \r | |
81 | @param MsrIndex MSR index value.\r | |
82 | \r | |
83 | @return Pointer to MSR spin lock.\r | |
84 | \r | |
85 | **/\r | |
86 | SPIN_LOCK *\r | |
87 | GetMsrSpinLockByIndex (\r | |
88 | IN UINT32 MsrIndex\r | |
89 | )\r | |
90 | {\r | |
91 | UINTN Index;\r | |
92 | for (Index = 0; Index < mMsrCount; Index++) {\r | |
93 | if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r | |
dc99315b | 94 | return mMsrSpinLocks[Index].SpinLock;\r |
529a5a86 MK |
95 | }\r |
96 | }\r | |
97 | return NULL;\r | |
98 | }\r | |
99 | \r | |
100 | /**\r | |
101 | Initialize MSR spin lock by MSR index.\r | |
102 | \r | |
103 | @param MsrIndex MSR index value.\r | |
104 | \r | |
105 | **/\r | |
106 | VOID\r | |
107 | InitMsrSpinLockByIndex (\r | |
108 | IN UINT32 MsrIndex\r | |
109 | )\r | |
110 | {\r | |
dc99315b | 111 | UINTN MsrSpinLockCount;\r |
529a5a86 | 112 | UINTN NewMsrSpinLockCount;\r |
dc99315b JF |
113 | UINTN Index;\r |
114 | UINTN AddedSize;\r | |
529a5a86 MK |
115 | \r |
116 | if (mMsrSpinLocks == NULL) {\r | |
dc99315b JF |
117 | MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r |
118 | mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r | |
529a5a86 | 119 | ASSERT (mMsrSpinLocks != NULL);\r |
dc99315b JF |
120 | for (Index = 0; Index < MsrSpinLockCount; Index++) {\r |
121 | mMsrSpinLocks[Index].SpinLock =\r | |
122 | (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r | |
123 | mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r | |
124 | }\r | |
125 | mMsrSpinLockCount = MsrSpinLockCount;\r | |
126 | mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r | |
529a5a86 MK |
127 | }\r |
128 | if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r | |
129 | //\r | |
130 | // Initialize spin lock for MSR programming\r | |
131 | //\r | |
132 | mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r | |
dc99315b | 133 | InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r |
529a5a86 MK |
134 | mMsrCount ++;\r |
135 | if (mMsrCount == mMsrSpinLockCount) {\r | |
136 | //\r | |
137 | // If MSR spin lock buffer is full, enlarge it\r | |
138 | //\r | |
dc99315b JF |
139 | AddedSize = SIZE_4KB;\r |
140 | mSmmCpuSemaphores.SemaphoreMsr.Msr =\r | |
141 | AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r | |
142 | ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r | |
143 | NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r | |
529a5a86 MK |
144 | mMsrSpinLocks = ReallocatePool (\r |
145 | sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r | |
146 | sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r | |
147 | mMsrSpinLocks\r | |
148 | );\r | |
dc99315b | 149 | ASSERT (mMsrSpinLocks != NULL);\r |
529a5a86 | 150 | mMsrSpinLockCount = NewMsrSpinLockCount;\r |
dc99315b JF |
151 | for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r |
152 | mMsrSpinLocks[Index].SpinLock =\r | |
153 | (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r | |
154 | (Index - mMsrCount) * mSemaphoreSize);\r | |
155 | mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r | |
156 | }\r | |
529a5a86 MK |
157 | }\r |
158 | }\r | |
159 | }\r | |
160 | \r | |
161 | /**\r | |
162 | Sync up the MTRR values for all processors.\r | |
163 | \r | |
164 | @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r | |
165 | **/\r | |
166 | VOID\r | |
167 | EFIAPI\r | |
168 | LoadMtrrData (\r | |
169 | EFI_PHYSICAL_ADDRESS MtrrTable\r | |
170 | )\r | |
171 | /*++\r | |
172 | \r | |
173 | Routine Description:\r | |
174 | \r | |
175 | Sync up the MTRR values for all processors.\r | |
176 | \r | |
177 | Arguments:\r | |
178 | \r | |
179 | Returns:\r | |
180 | None\r | |
181 | \r | |
182 | --*/\r | |
183 | {\r | |
184 | MTRR_SETTINGS *MtrrSettings;\r | |
185 | \r | |
186 | MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r | |
187 | MtrrSetAllMtrrs (MtrrSettings);\r | |
188 | }\r | |
189 | \r | |
190 | /**\r | |
191 | Programs registers for the calling processor.\r | |
192 | \r | |
193 | This function programs registers for the calling processor.\r | |
194 | \r | |
195 | @param RegisterTable Pointer to register table of the running processor.\r | |
196 | \r | |
197 | **/\r | |
198 | VOID\r | |
199 | SetProcessorRegister (\r | |
200 | IN CPU_REGISTER_TABLE *RegisterTable\r | |
201 | )\r | |
202 | {\r | |
203 | CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r | |
204 | UINTN Index;\r | |
205 | UINTN Value;\r | |
206 | SPIN_LOCK *MsrSpinLock;\r | |
207 | \r | |
208 | //\r | |
209 | // Traverse Register Table of this logical processor\r | |
210 | //\r | |
211 | RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r | |
212 | for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r | |
213 | //\r | |
214 | // Check the type of specified register\r | |
215 | //\r | |
216 | switch (RegisterTableEntry->RegisterType) {\r | |
217 | //\r | |
218 | // The specified register is Control Register\r | |
219 | //\r | |
220 | case ControlRegister:\r | |
221 | switch (RegisterTableEntry->Index) {\r | |
222 | case 0:\r | |
223 | Value = AsmReadCr0 ();\r | |
224 | Value = (UINTN) BitFieldWrite64 (\r | |
225 | Value,\r | |
226 | RegisterTableEntry->ValidBitStart,\r | |
227 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
228 | (UINTN) RegisterTableEntry->Value\r | |
229 | );\r | |
230 | AsmWriteCr0 (Value);\r | |
231 | break;\r | |
232 | case 2:\r | |
233 | Value = AsmReadCr2 ();\r | |
234 | Value = (UINTN) BitFieldWrite64 (\r | |
235 | Value,\r | |
236 | RegisterTableEntry->ValidBitStart,\r | |
237 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
238 | (UINTN) RegisterTableEntry->Value\r | |
239 | );\r | |
240 | AsmWriteCr2 (Value);\r | |
241 | break;\r | |
242 | case 3:\r | |
243 | Value = AsmReadCr3 ();\r | |
244 | Value = (UINTN) BitFieldWrite64 (\r | |
245 | Value,\r | |
246 | RegisterTableEntry->ValidBitStart,\r | |
247 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
248 | (UINTN) RegisterTableEntry->Value\r | |
249 | );\r | |
250 | AsmWriteCr3 (Value);\r | |
251 | break;\r | |
252 | case 4:\r | |
253 | Value = AsmReadCr4 ();\r | |
254 | Value = (UINTN) BitFieldWrite64 (\r | |
255 | Value,\r | |
256 | RegisterTableEntry->ValidBitStart,\r | |
257 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
258 | (UINTN) RegisterTableEntry->Value\r | |
259 | );\r | |
260 | AsmWriteCr4 (Value);\r | |
261 | break;\r | |
262 | default:\r | |
263 | break;\r | |
264 | }\r | |
265 | break;\r | |
266 | //\r | |
267 | // The specified register is Model Specific Register\r | |
268 | //\r | |
269 | case Msr:\r | |
270 | //\r | |
271 | // If this function is called to restore register setting after INIT signal,\r | |
272 | // there is no need to restore MSRs in register table.\r | |
273 | //\r | |
274 | if (RegisterTableEntry->ValidBitLength >= 64) {\r | |
275 | //\r | |
276 | // If length is not less than 64 bits, then directly write without reading\r | |
277 | //\r | |
278 | AsmWriteMsr64 (\r | |
279 | RegisterTableEntry->Index,\r | |
280 | RegisterTableEntry->Value\r | |
281 | );\r | |
282 | } else {\r | |
283 | //\r | |
284 | // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r | |
285 | // to make sure MSR read/write operation is atomic.\r | |
286 | //\r | |
287 | MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r | |
288 | AcquireSpinLock (MsrSpinLock);\r | |
289 | //\r | |
290 | // Set the bit section according to bit start and length\r | |
291 | //\r | |
292 | AsmMsrBitFieldWrite64 (\r | |
293 | RegisterTableEntry->Index,\r | |
294 | RegisterTableEntry->ValidBitStart,\r | |
295 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
296 | RegisterTableEntry->Value\r | |
297 | );\r | |
298 | ReleaseSpinLock (MsrSpinLock);\r | |
299 | }\r | |
300 | break;\r | |
301 | //\r | |
6c4c15fa JF |
302 | // MemoryMapped operations\r |
303 | //\r | |
304 | case MemoryMapped:\r | |
305 | AcquireSpinLock (mMemoryMappedLock);\r | |
306 | MmioBitFieldWrite32 (\r | |
307 | RegisterTableEntry->Index,\r | |
308 | RegisterTableEntry->ValidBitStart,\r | |
309 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
310 | (UINT32)RegisterTableEntry->Value\r | |
311 | );\r | |
312 | ReleaseSpinLock (mMemoryMappedLock);\r | |
313 | break;\r | |
314 | //\r | |
529a5a86 MK |
315 | // Enable or disable cache\r |
316 | //\r | |
317 | case CacheControl:\r | |
318 | //\r | |
319 | // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r | |
320 | //\r | |
321 | if (RegisterTableEntry->Value == 0) {\r | |
322 | AsmDisableCache ();\r | |
323 | } else {\r | |
324 | AsmEnableCache ();\r | |
325 | }\r | |
326 | break;\r | |
327 | \r | |
328 | default:\r | |
329 | break;\r | |
330 | }\r | |
331 | }\r | |
332 | }\r | |
333 | \r | |
334 | /**\r | |
335 | AP initialization before SMBASE relocation in the S3 boot path.\r | |
336 | **/\r | |
337 | VOID\r | |
338 | EarlyMPRendezvousProcedure (\r | |
339 | VOID\r | |
340 | )\r | |
341 | {\r | |
342 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
343 | UINT32 InitApicId;\r | |
344 | UINTN Index;\r | |
345 | \r | |
346 | LoadMtrrData (mAcpiCpuData.MtrrTable);\r | |
347 | \r | |
348 | //\r | |
349 | // Find processor number for this CPU.\r | |
350 | //\r | |
351 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r | |
352 | InitApicId = GetInitialApicId ();\r | |
353 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
354 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
355 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
356 | break;\r | |
357 | }\r | |
358 | }\r | |
359 | \r | |
360 | //\r | |
361 | // Count down the number with lock mechanism.\r | |
362 | //\r | |
363 | InterlockedDecrement (&mNumberToFinish);\r | |
364 | }\r | |
365 | \r | |
366 | /**\r | |
367 | AP initialization after SMBASE relocation in the S3 boot path.\r | |
368 | **/\r | |
369 | VOID\r | |
370 | MPRendezvousProcedure (\r | |
371 | VOID\r | |
372 | )\r | |
373 | {\r | |
374 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
375 | UINT32 InitApicId;\r | |
376 | UINTN Index;\r | |
377 | \r | |
378 | ProgramVirtualWireMode ();\r | |
379 | DisableLvtInterrupts ();\r | |
380 | \r | |
381 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r | |
382 | InitApicId = GetInitialApicId ();\r | |
383 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
384 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
385 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
386 | break;\r | |
387 | }\r | |
388 | }\r | |
389 | \r | |
390 | //\r | |
391 | // Count down the number with lock mechanism.\r | |
392 | //\r | |
393 | InterlockedDecrement (&mNumberToFinish);\r | |
394 | }\r | |
395 | \r | |
396 | /**\r | |
397 | Prepares startup vector for APs.\r | |
398 | \r | |
399 | This function prepares startup vector for APs.\r | |
400 | \r | |
401 | @param WorkingBuffer The address of the work buffer.\r | |
402 | **/\r | |
403 | VOID\r | |
404 | PrepareApStartupVector (\r | |
405 | EFI_PHYSICAL_ADDRESS WorkingBuffer\r | |
406 | )\r | |
407 | {\r | |
408 | EFI_PHYSICAL_ADDRESS StartupVector;\r | |
409 | MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r | |
410 | \r | |
411 | //\r | |
412 | // Get the address map of startup code for AP,\r | |
413 | // including code size, and offset of long jump instructions to redirect.\r | |
414 | //\r | |
415 | ZeroMem (&AddressMap, sizeof (AddressMap));\r | |
416 | AsmGetAddressMap (&AddressMap);\r | |
417 | \r | |
418 | StartupVector = WorkingBuffer;\r | |
419 | \r | |
420 | //\r | |
421 | // Copy AP startup code to startup vector, and then redirect the long jump\r | |
422 | // instructions for mode switching.\r | |
423 | //\r | |
424 | CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r | |
425 | *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r | |
426 | if (AddressMap.LongJumpOffset != 0) {\r | |
427 | *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r | |
428 | }\r | |
429 | \r | |
430 | //\r | |
431 | // Get the start address of exchange data between BSP and AP.\r | |
432 | //\r | |
433 | mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r | |
434 | ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r | |
435 | \r | |
436 | CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
437 | CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
438 | \r | |
439 | //\r | |
440 | // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r | |
441 | //\r | |
442 | CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r | |
443 | CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r | |
444 | CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r | |
445 | \r | |
446 | mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r | |
447 | mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r | |
448 | mExchangeInfo->BufferStart = (UINT32) StartupVector;\r | |
449 | mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r | |
450 | }\r | |
451 | \r | |
452 | /**\r | |
453 | The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r | |
454 | \r | |
455 | The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r | |
456 | and restores MTRRs for both BSP and APs.\r | |
457 | \r | |
458 | **/\r | |
459 | VOID\r | |
460 | EarlyInitializeCpu (\r | |
461 | VOID\r | |
462 | )\r | |
463 | {\r | |
464 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
465 | UINT32 InitApicId;\r | |
466 | UINTN Index;\r | |
467 | \r | |
468 | LoadMtrrData (mAcpiCpuData.MtrrTable);\r | |
469 | \r | |
470 | //\r | |
471 | // Find processor number for this CPU.\r | |
472 | //\r | |
473 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r | |
474 | InitApicId = GetInitialApicId ();\r | |
475 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
476 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
477 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
478 | break;\r | |
479 | }\r | |
480 | }\r | |
481 | \r | |
482 | ProgramVirtualWireMode ();\r | |
483 | \r | |
484 | PrepareApStartupVector (mAcpiCpuData.StartupVector);\r | |
485 | \r | |
486 | mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r | |
487 | mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r | |
488 | \r | |
489 | //\r | |
490 | // Send INIT IPI - SIPI to all APs\r | |
491 | //\r | |
492 | SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r | |
493 | \r | |
494 | while (mNumberToFinish > 0) {\r | |
495 | CpuPause ();\r | |
496 | }\r | |
497 | }\r | |
498 | \r | |
499 | /**\r | |
500 | The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r | |
501 | \r | |
502 | The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r | |
503 | data saved by normal boot path for both BSP and APs.\r | |
504 | \r | |
505 | **/\r | |
506 | VOID\r | |
507 | InitializeCpu (\r | |
508 | VOID\r | |
509 | )\r | |
510 | {\r | |
511 | CPU_REGISTER_TABLE *RegisterTableList;\r | |
512 | UINT32 InitApicId;\r | |
513 | UINTN Index;\r | |
514 | \r | |
515 | RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r | |
516 | InitApicId = GetInitialApicId ();\r | |
517 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r | |
518 | if (RegisterTableList[Index].InitialApicId == InitApicId) {\r | |
519 | SetProcessorRegister (&RegisterTableList[Index]);\r | |
520 | break;\r | |
521 | }\r | |
522 | }\r | |
523 | \r | |
524 | mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r | |
525 | //\r | |
526 | // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r | |
527 | // Re-initialize StackAddress to original beginning address.\r | |
528 | //\r | |
529 | mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r | |
530 | mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r | |
531 | \r | |
532 | //\r | |
533 | // Send INIT IPI - SIPI to all APs\r | |
534 | //\r | |
535 | SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r | |
536 | \r | |
537 | while (mNumberToFinish > 0) {\r | |
538 | CpuPause ();\r | |
539 | }\r | |
540 | }\r | |
0bdc9e75 SZ |
541 | \r |
542 | /**\r | |
543 | Restore SMM Configuration in S3 boot path.\r | |
544 | \r | |
545 | **/\r | |
546 | VOID\r | |
547 | RestoreSmmConfigurationInS3 (\r | |
548 | VOID\r | |
549 | )\r | |
550 | {\r | |
551 | //\r | |
552 | // Restore SMM Configuration in S3 boot path.\r | |
553 | //\r | |
554 | if (mRestoreSmmConfigurationInS3) {\r | |
555 | //\r | |
556 | // Need make sure gSmst is correct because below function may use them.\r | |
557 | //\r | |
558 | gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r | |
559 | gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r | |
560 | gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r | |
561 | gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r | |
562 | gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r | |
563 | \r | |
564 | //\r | |
565 | // Configure SMM Code Access Check feature if available.\r | |
566 | //\r | |
567 | ConfigSmmCodeAccessCheck ();\r | |
568 | \r | |
569 | SmmCpuFeaturesCompleteSmmReadyToLock ();\r | |
570 | \r | |
571 | mRestoreSmmConfigurationInS3 = FALSE;\r | |
572 | }\r | |
573 | }\r | |
574 | \r | |
575 | /**\r | |
576 | Perform SMM initialization for all processors in the S3 boot path.\r | |
577 | \r | |
578 | For a native platform, MP initialization in the S3 boot path is also performed in this function.\r | |
579 | **/\r | |
580 | VOID\r | |
581 | EFIAPI\r | |
582 | SmmRestoreCpu (\r | |
583 | VOID\r | |
584 | )\r | |
585 | {\r | |
586 | SMM_S3_RESUME_STATE *SmmS3ResumeState;\r | |
587 | IA32_DESCRIPTOR Ia32Idtr;\r | |
588 | IA32_DESCRIPTOR X64Idtr;\r | |
589 | IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r | |
590 | EFI_STATUS Status;\r | |
591 | \r | |
592 | DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r | |
593 | \r | |
594 | mSmmS3Flag = TRUE;\r | |
595 | \r | |
596 | InitializeSpinLock (mMemoryMappedLock);\r | |
597 | \r | |
598 | //\r | |
599 | // See if there is enough context to resume PEI Phase\r | |
600 | //\r | |
601 | if (mSmmS3ResumeState == NULL) {\r | |
602 | DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r | |
603 | CpuDeadLoop ();\r | |
604 | }\r | |
605 | \r | |
606 | SmmS3ResumeState = mSmmS3ResumeState;\r | |
607 | ASSERT (SmmS3ResumeState != NULL);\r | |
608 | \r | |
609 | if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r | |
610 | //\r | |
611 | // Save the IA32 IDT Descriptor\r | |
612 | //\r | |
613 | AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r | |
614 | \r | |
615 | //\r | |
616 | // Setup X64 IDT table\r | |
617 | //\r | |
618 | ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r | |
619 | X64Idtr.Base = (UINTN) IdtEntryTable;\r | |
620 | X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r | |
621 | AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r | |
622 | \r | |
623 | //\r | |
624 | // Setup the default exception handler\r | |
625 | //\r | |
626 | Status = InitializeCpuExceptionHandlers (NULL);\r | |
627 | ASSERT_EFI_ERROR (Status);\r | |
628 | \r | |
629 | //\r | |
630 | // Initialize Debug Agent to support source level debug\r | |
631 | //\r | |
632 | InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r | |
633 | }\r | |
634 | \r | |
635 | //\r | |
636 | // Skip initialization if mAcpiCpuData is not valid\r | |
637 | //\r | |
638 | if (mAcpiCpuData.NumberOfCpus > 0) {\r | |
639 | //\r | |
640 | // First time microcode load and restore MTRRs\r | |
641 | //\r | |
642 | EarlyInitializeCpu ();\r | |
643 | }\r | |
644 | \r | |
645 | //\r | |
646 | // Restore SMBASE for BSP and all APs\r | |
647 | //\r | |
648 | SmmRelocateBases ();\r | |
649 | \r | |
650 | //\r | |
651 | // Skip initialization if mAcpiCpuData is not valid\r | |
652 | //\r | |
653 | if (mAcpiCpuData.NumberOfCpus > 0) {\r | |
654 | //\r | |
655 | // Restore MSRs for BSP and all APs\r | |
656 | //\r | |
657 | InitializeCpu ();\r | |
658 | }\r | |
659 | \r | |
660 | //\r | |
661 | // Set a flag to restore SMM configuration in S3 path.\r | |
662 | //\r | |
663 | mRestoreSmmConfigurationInS3 = TRUE;\r | |
664 | \r | |
665 | DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r | |
666 | DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r | |
667 | DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r | |
668 | DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r | |
669 | DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r | |
670 | \r | |
671 | //\r | |
672 | // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r | |
673 | //\r | |
674 | if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r | |
675 | DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r | |
676 | \r | |
677 | SwitchStack (\r | |
678 | (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r | |
679 | (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r | |
680 | (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r | |
681 | (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r | |
682 | );\r | |
683 | }\r | |
684 | \r | |
685 | //\r | |
686 | // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r | |
687 | //\r | |
688 | if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r | |
689 | DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r | |
690 | //\r | |
691 | // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r | |
692 | //\r | |
693 | SaveAndSetDebugTimerInterrupt (FALSE);\r | |
694 | //\r | |
695 | // Restore IA32 IDT table\r | |
696 | //\r | |
697 | AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r | |
698 | AsmDisablePaging64 (\r | |
699 | SmmS3ResumeState->ReturnCs,\r | |
700 | (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r | |
701 | (UINT32)SmmS3ResumeState->ReturnContext1,\r | |
702 | (UINT32)SmmS3ResumeState->ReturnContext2,\r | |
703 | (UINT32)SmmS3ResumeState->ReturnStackPointer\r | |
704 | );\r | |
705 | }\r | |
706 | \r | |
707 | //\r | |
708 | // Can not resume PEI Phase\r | |
709 | //\r | |
710 | DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r | |
711 | CpuDeadLoop ();\r | |
712 | }\r | |
713 | \r | |
714 | /**\r | |
715 | Initialize SMM S3 resume state structure used during S3 Resume.\r | |
716 | \r | |
717 | @param[in] Cr3 The base address of the page tables to use in SMM.\r | |
718 | \r | |
719 | **/\r | |
720 | VOID\r | |
721 | InitSmmS3ResumeState (\r | |
722 | IN UINT32 Cr3\r | |
723 | )\r | |
724 | {\r | |
725 | VOID *GuidHob;\r | |
726 | EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r | |
727 | SMM_S3_RESUME_STATE *SmmS3ResumeState;\r | |
728 | \r | |
729 | GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r | |
730 | if (GuidHob != NULL) {\r | |
731 | SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r | |
732 | \r | |
733 | DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r | |
734 | DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r | |
735 | \r | |
736 | SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r | |
737 | ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r | |
738 | \r | |
739 | mSmmS3ResumeState = SmmS3ResumeState;\r | |
740 | SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r | |
741 | \r | |
742 | SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r | |
743 | \r | |
744 | SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r | |
745 | SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r | |
746 | if (SmmS3ResumeState->SmmS3StackBase == 0) {\r | |
747 | SmmS3ResumeState->SmmS3StackSize = 0;\r | |
748 | }\r | |
749 | \r | |
750 | SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r | |
751 | SmmS3ResumeState->SmmS3Cr3 = Cr3;\r | |
752 | SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r | |
753 | \r | |
754 | if (sizeof (UINTN) == sizeof (UINT64)) {\r | |
755 | SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r | |
756 | }\r | |
757 | if (sizeof (UINTN) == sizeof (UINT32)) {\r | |
758 | SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r | |
759 | }\r | |
760 | }\r | |
761 | \r | |
762 | //\r | |
763 | // Patch SmmS3ResumeState->SmmS3Cr3\r | |
764 | //\r | |
765 | InitSmmS3Cr3 ();\r | |
766 | }\r | |
767 | \r | |
768 | /**\r | |
769 | Copy register table from ACPI NVS memory into SMRAM.\r | |
770 | \r | |
771 | @param[in] DestinationRegisterTableList Points to destination register table.\r | |
772 | @param[in] SourceRegisterTableList Points to source register table.\r | |
773 | @param[in] NumberOfCpus Number of CPUs.\r | |
774 | \r | |
775 | **/\r | |
776 | VOID\r | |
777 | CopyRegisterTable (\r | |
778 | IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r | |
779 | IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r | |
780 | IN UINT32 NumberOfCpus\r | |
781 | )\r | |
782 | {\r | |
783 | UINTN Index;\r | |
784 | UINTN Index1;\r | |
785 | CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r | |
786 | \r | |
787 | CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r | |
788 | for (Index = 0; Index < NumberOfCpus; Index++) {\r | |
789 | DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r | |
790 | ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r | |
791 | CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r | |
792 | //\r | |
793 | // Go though all MSRs in register table to initialize MSR spin lock\r | |
794 | //\r | |
795 | RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r | |
796 | for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r | |
797 | if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r | |
798 | //\r | |
799 | // Initialize MSR spin lock only for those MSRs need bit field writing\r | |
800 | //\r | |
801 | InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r | |
802 | }\r | |
803 | }\r | |
804 | }\r | |
805 | }\r | |
806 | \r | |
807 | /**\r | |
808 | Get ACPI CPU data.\r | |
809 | \r | |
810 | **/\r | |
811 | VOID\r | |
812 | GetAcpiCpuData (\r | |
813 | VOID\r | |
814 | )\r | |
815 | {\r | |
816 | ACPI_CPU_DATA *AcpiCpuData;\r | |
817 | IA32_DESCRIPTOR *Gdtr;\r | |
818 | IA32_DESCRIPTOR *Idtr;\r | |
819 | \r | |
820 | //\r | |
821 | // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r | |
822 | //\r | |
823 | mAcpiCpuData.NumberOfCpus = 0;\r | |
824 | \r | |
825 | //\r | |
826 | // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r | |
827 | //\r | |
828 | AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r | |
829 | if (AcpiCpuData == 0) {\r | |
830 | return;\r | |
831 | }\r | |
832 | \r | |
833 | //\r | |
834 | // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r | |
835 | //\r | |
836 | CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r | |
837 | \r | |
838 | mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r | |
839 | ASSERT (mAcpiCpuData.MtrrTable != 0);\r | |
840 | \r | |
841 | CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r | |
842 | \r | |
843 | mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r | |
844 | ASSERT (mAcpiCpuData.GdtrProfile != 0);\r | |
845 | \r | |
846 | CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
847 | \r | |
848 | mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r | |
849 | ASSERT (mAcpiCpuData.IdtrProfile != 0);\r | |
850 | \r | |
851 | CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
852 | \r | |
853 | mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r | |
854 | ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r | |
855 | \r | |
856 | CopyRegisterTable (\r | |
857 | (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r | |
858 | (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r | |
859 | mAcpiCpuData.NumberOfCpus\r | |
860 | );\r | |
861 | \r | |
862 | mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r | |
863 | ASSERT (mAcpiCpuData.RegisterTable != 0);\r | |
864 | \r | |
865 | CopyRegisterTable (\r | |
866 | (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r | |
867 | (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r | |
868 | mAcpiCpuData.NumberOfCpus\r | |
869 | );\r | |
870 | \r | |
871 | //\r | |
872 | // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r | |
873 | //\r | |
874 | Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r | |
875 | Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r | |
876 | \r | |
877 | mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r | |
878 | ASSERT (mGdtForAp != NULL);\r | |
879 | mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r | |
880 | mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r | |
881 | \r | |
882 | CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r | |
883 | CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r | |
884 | CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r | |
885 | }\r |