]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
MdeModulePkg/BMMUI: add comments for function parameter
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
695e62d1 4Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17typedef struct {\r
18 UINTN Lock;\r
19 VOID *StackStart;\r
20 UINTN StackSize;\r
21 VOID *ApFunction;\r
22 IA32_DESCRIPTOR GdtrProfile;\r
23 IA32_DESCRIPTOR IdtrProfile;\r
24 UINT32 BufferStart;\r
25 UINT32 Cr3;\r
26} MP_CPU_EXCHANGE_INFO;\r
27\r
28typedef struct {\r
29 UINT8 *RendezvousFunnelAddress;\r
30 UINTN PModeEntryOffset;\r
31 UINTN FlatJumpOffset;\r
32 UINTN Size;\r
33 UINTN LModeEntryOffset;\r
34 UINTN LongJumpOffset;\r
35} MP_ASSEMBLY_ADDRESS_MAP;\r
36\r
6c4c15fa
JF
37//\r
38// Spin lock used to serialize MemoryMapped operation\r
39//\r
40SPIN_LOCK *mMemoryMappedLock = NULL;\r
41\r
529a5a86
MK
42/**\r
43 Get starting address and size of the rendezvous entry for APs.\r
44 Information for fixing a jump instruction in the code is also returned.\r
45\r
46 @param AddressMap Output buffer for address map information.\r
47**/\r
48VOID *\r
49EFIAPI\r
50AsmGetAddressMap (\r
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
52 );\r
53\r
54#define LEGACY_REGION_SIZE (2 * 0x1000)\r
55#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
56\r
57ACPI_CPU_DATA mAcpiCpuData;\r
58UINT32 mNumberToFinish;\r
59MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
60BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
61VOID *mGdtForAp = NULL;\r
62VOID *mIdtForAp = NULL;\r
63VOID *mMachineCheckHandlerForAp = NULL;\r
64MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 65UINTN mMsrSpinLockCount;\r
529a5a86
MK
66UINTN mMsrCount = 0;\r
67\r
0bdc9e75
SZ
68//\r
69// S3 boot flag\r
70//\r
71BOOLEAN mSmmS3Flag = FALSE;\r
72\r
73//\r
74// Pointer to structure used during S3 Resume\r
75//\r
76SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
77\r
b10d5ddc
SZ
78BOOLEAN mAcpiS3Enable = TRUE;\r
79\r
529a5a86
MK
80/**\r
81 Get MSR spin lock by MSR index.\r
82\r
83 @param MsrIndex MSR index value.\r
84\r
85 @return Pointer to MSR spin lock.\r
86\r
87**/\r
88SPIN_LOCK *\r
89GetMsrSpinLockByIndex (\r
90 IN UINT32 MsrIndex\r
91 )\r
92{\r
93 UINTN Index;\r
94 for (Index = 0; Index < mMsrCount; Index++) {\r
95 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 96 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
97 }\r
98 }\r
99 return NULL;\r
100}\r
101\r
102/**\r
103 Initialize MSR spin lock by MSR index.\r
104\r
105 @param MsrIndex MSR index value.\r
106\r
107**/\r
108VOID\r
109InitMsrSpinLockByIndex (\r
110 IN UINT32 MsrIndex\r
111 )\r
112{\r
dc99315b 113 UINTN MsrSpinLockCount;\r
529a5a86 114 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
115 UINTN Index;\r
116 UINTN AddedSize;\r
529a5a86
MK
117\r
118 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
119 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
120 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 121 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
122 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
123 mMsrSpinLocks[Index].SpinLock =\r
124 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
125 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
126 }\r
127 mMsrSpinLockCount = MsrSpinLockCount;\r
128 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
129 }\r
130 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
131 //\r
132 // Initialize spin lock for MSR programming\r
133 //\r
134 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 135 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
136 mMsrCount ++;\r
137 if (mMsrCount == mMsrSpinLockCount) {\r
138 //\r
139 // If MSR spin lock buffer is full, enlarge it\r
140 //\r
dc99315b
JF
141 AddedSize = SIZE_4KB;\r
142 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
143 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
144 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
145 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
146 mMsrSpinLocks = ReallocatePool (\r
147 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
148 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
149 mMsrSpinLocks\r
150 );\r
dc99315b 151 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 152 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
153 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
154 mMsrSpinLocks[Index].SpinLock =\r
155 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
156 (Index - mMsrCount) * mSemaphoreSize);\r
157 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
158 }\r
529a5a86
MK
159 }\r
160 }\r
161}\r
162\r
163/**\r
164 Sync up the MTRR values for all processors.\r
165\r
166 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
167**/\r
168VOID\r
169EFIAPI\r
170LoadMtrrData (\r
171 EFI_PHYSICAL_ADDRESS MtrrTable\r
172 )\r
173/*++\r
174\r
175Routine Description:\r
176\r
177 Sync up the MTRR values for all processors.\r
178\r
179Arguments:\r
180\r
181Returns:\r
182 None\r
183\r
184--*/\r
185{\r
186 MTRR_SETTINGS *MtrrSettings;\r
187\r
188 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
189 MtrrSetAllMtrrs (MtrrSettings);\r
190}\r
191\r
192/**\r
193 Programs registers for the calling processor.\r
194\r
195 This function programs registers for the calling processor.\r
196\r
197 @param RegisterTable Pointer to register table of the running processor.\r
198\r
199**/\r
200VOID\r
201SetProcessorRegister (\r
202 IN CPU_REGISTER_TABLE *RegisterTable\r
203 )\r
204{\r
205 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
206 UINTN Index;\r
207 UINTN Value;\r
208 SPIN_LOCK *MsrSpinLock;\r
209\r
210 //\r
211 // Traverse Register Table of this logical processor\r
212 //\r
213 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
214 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
215 //\r
216 // Check the type of specified register\r
217 //\r
218 switch (RegisterTableEntry->RegisterType) {\r
219 //\r
220 // The specified register is Control Register\r
221 //\r
222 case ControlRegister:\r
223 switch (RegisterTableEntry->Index) {\r
224 case 0:\r
225 Value = AsmReadCr0 ();\r
226 Value = (UINTN) BitFieldWrite64 (\r
227 Value,\r
228 RegisterTableEntry->ValidBitStart,\r
229 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
230 (UINTN) RegisterTableEntry->Value\r
231 );\r
232 AsmWriteCr0 (Value);\r
233 break;\r
234 case 2:\r
235 Value = AsmReadCr2 ();\r
236 Value = (UINTN) BitFieldWrite64 (\r
237 Value,\r
238 RegisterTableEntry->ValidBitStart,\r
239 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
240 (UINTN) RegisterTableEntry->Value\r
241 );\r
242 AsmWriteCr2 (Value);\r
243 break;\r
244 case 3:\r
245 Value = AsmReadCr3 ();\r
246 Value = (UINTN) BitFieldWrite64 (\r
247 Value,\r
248 RegisterTableEntry->ValidBitStart,\r
249 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
250 (UINTN) RegisterTableEntry->Value\r
251 );\r
252 AsmWriteCr3 (Value);\r
253 break;\r
254 case 4:\r
255 Value = AsmReadCr4 ();\r
256 Value = (UINTN) BitFieldWrite64 (\r
257 Value,\r
258 RegisterTableEntry->ValidBitStart,\r
259 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
260 (UINTN) RegisterTableEntry->Value\r
261 );\r
262 AsmWriteCr4 (Value);\r
263 break;\r
264 default:\r
265 break;\r
266 }\r
267 break;\r
268 //\r
269 // The specified register is Model Specific Register\r
270 //\r
271 case Msr:\r
272 //\r
273 // If this function is called to restore register setting after INIT signal,\r
274 // there is no need to restore MSRs in register table.\r
275 //\r
276 if (RegisterTableEntry->ValidBitLength >= 64) {\r
277 //\r
278 // If length is not less than 64 bits, then directly write without reading\r
279 //\r
280 AsmWriteMsr64 (\r
281 RegisterTableEntry->Index,\r
282 RegisterTableEntry->Value\r
283 );\r
284 } else {\r
285 //\r
286 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
287 // to make sure MSR read/write operation is atomic.\r
288 //\r
289 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
290 AcquireSpinLock (MsrSpinLock);\r
291 //\r
292 // Set the bit section according to bit start and length\r
293 //\r
294 AsmMsrBitFieldWrite64 (\r
295 RegisterTableEntry->Index,\r
296 RegisterTableEntry->ValidBitStart,\r
297 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
298 RegisterTableEntry->Value\r
299 );\r
300 ReleaseSpinLock (MsrSpinLock);\r
301 }\r
302 break;\r
303 //\r
6c4c15fa
JF
304 // MemoryMapped operations\r
305 //\r
306 case MemoryMapped:\r
307 AcquireSpinLock (mMemoryMappedLock);\r
308 MmioBitFieldWrite32 (\r
309 RegisterTableEntry->Index,\r
310 RegisterTableEntry->ValidBitStart,\r
311 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
312 (UINT32)RegisterTableEntry->Value\r
313 );\r
314 ReleaseSpinLock (mMemoryMappedLock);\r
315 break;\r
316 //\r
529a5a86
MK
317 // Enable or disable cache\r
318 //\r
319 case CacheControl:\r
320 //\r
321 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
322 //\r
323 if (RegisterTableEntry->Value == 0) {\r
324 AsmDisableCache ();\r
325 } else {\r
326 AsmEnableCache ();\r
327 }\r
328 break;\r
329\r
330 default:\r
331 break;\r
332 }\r
333 }\r
334}\r
335\r
336/**\r
337 AP initialization before SMBASE relocation in the S3 boot path.\r
338**/\r
339VOID\r
340EarlyMPRendezvousProcedure (\r
341 VOID\r
342 )\r
343{\r
344 CPU_REGISTER_TABLE *RegisterTableList;\r
345 UINT32 InitApicId;\r
346 UINTN Index;\r
347\r
348 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
349\r
350 //\r
351 // Find processor number for this CPU.\r
352 //\r
353 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
354 InitApicId = GetInitialApicId ();\r
355 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
356 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
357 SetProcessorRegister (&RegisterTableList[Index]);\r
358 break;\r
359 }\r
360 }\r
361\r
362 //\r
363 // Count down the number with lock mechanism.\r
364 //\r
365 InterlockedDecrement (&mNumberToFinish);\r
366}\r
367\r
368/**\r
369 AP initialization after SMBASE relocation in the S3 boot path.\r
370**/\r
371VOID\r
372MPRendezvousProcedure (\r
373 VOID\r
374 )\r
375{\r
376 CPU_REGISTER_TABLE *RegisterTableList;\r
377 UINT32 InitApicId;\r
378 UINTN Index;\r
379\r
380 ProgramVirtualWireMode ();\r
381 DisableLvtInterrupts ();\r
382\r
383 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
384 InitApicId = GetInitialApicId ();\r
385 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
386 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
387 SetProcessorRegister (&RegisterTableList[Index]);\r
388 break;\r
389 }\r
390 }\r
391\r
392 //\r
393 // Count down the number with lock mechanism.\r
394 //\r
395 InterlockedDecrement (&mNumberToFinish);\r
396}\r
397\r
398/**\r
399 Prepares startup vector for APs.\r
400\r
401 This function prepares startup vector for APs.\r
402\r
403 @param WorkingBuffer The address of the work buffer.\r
404**/\r
405VOID\r
406PrepareApStartupVector (\r
407 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
408 )\r
409{\r
410 EFI_PHYSICAL_ADDRESS StartupVector;\r
411 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
412\r
413 //\r
414 // Get the address map of startup code for AP,\r
415 // including code size, and offset of long jump instructions to redirect.\r
416 //\r
417 ZeroMem (&AddressMap, sizeof (AddressMap));\r
418 AsmGetAddressMap (&AddressMap);\r
419\r
420 StartupVector = WorkingBuffer;\r
421\r
422 //\r
423 // Copy AP startup code to startup vector, and then redirect the long jump\r
424 // instructions for mode switching.\r
425 //\r
426 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
427 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
428 if (AddressMap.LongJumpOffset != 0) {\r
429 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
430 }\r
431\r
432 //\r
433 // Get the start address of exchange data between BSP and AP.\r
434 //\r
435 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
436 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
437\r
438 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
439 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
440\r
441 //\r
442 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
443 //\r
444 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
445 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
446 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
447\r
448 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
449 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
450 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
451 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
452}\r
453\r
454/**\r
455 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
456\r
457 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
458 and restores MTRRs for both BSP and APs.\r
459\r
460**/\r
461VOID\r
462EarlyInitializeCpu (\r
463 VOID\r
464 )\r
465{\r
466 CPU_REGISTER_TABLE *RegisterTableList;\r
467 UINT32 InitApicId;\r
468 UINTN Index;\r
469\r
470 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
471\r
472 //\r
473 // Find processor number for this CPU.\r
474 //\r
475 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;\r
476 InitApicId = GetInitialApicId ();\r
477 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
478 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
479 SetProcessorRegister (&RegisterTableList[Index]);\r
480 break;\r
481 }\r
482 }\r
483\r
484 ProgramVirtualWireMode ();\r
485\r
486 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
487\r
488 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
489 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;\r
490\r
491 //\r
492 // Send INIT IPI - SIPI to all APs\r
493 //\r
494 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
495\r
496 while (mNumberToFinish > 0) {\r
497 CpuPause ();\r
498 }\r
499}\r
500\r
501/**\r
502 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
503\r
504 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
505 data saved by normal boot path for both BSP and APs.\r
506\r
507**/\r
508VOID\r
509InitializeCpu (\r
510 VOID\r
511 )\r
512{\r
513 CPU_REGISTER_TABLE *RegisterTableList;\r
514 UINT32 InitApicId;\r
515 UINTN Index;\r
516\r
517 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;\r
518 InitApicId = GetInitialApicId ();\r
519 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
520 if (RegisterTableList[Index].InitialApicId == InitApicId) {\r
521 SetProcessorRegister (&RegisterTableList[Index]);\r
522 break;\r
523 }\r
524 }\r
525\r
526 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
527 //\r
528 // StackStart was updated when APs were waken up in EarlyInitializeCpu.\r
529 // Re-initialize StackAddress to original beginning address.\r
530 //\r
531 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
532 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;\r
533\r
534 //\r
535 // Send INIT IPI - SIPI to all APs\r
536 //\r
537 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
538\r
539 while (mNumberToFinish > 0) {\r
540 CpuPause ();\r
541 }\r
542}\r
0bdc9e75
SZ
543\r
544/**\r
545 Restore SMM Configuration in S3 boot path.\r
546\r
547**/\r
548VOID\r
549RestoreSmmConfigurationInS3 (\r
550 VOID\r
551 )\r
552{\r
b10d5ddc
SZ
553 if (!mAcpiS3Enable) {\r
554 return;\r
555 }\r
556\r
0bdc9e75
SZ
557 //\r
558 // Restore SMM Configuration in S3 boot path.\r
559 //\r
560 if (mRestoreSmmConfigurationInS3) {\r
561 //\r
562 // Need make sure gSmst is correct because below function may use them.\r
563 //\r
564 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
565 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
566 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
567 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
568 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
569\r
570 //\r
571 // Configure SMM Code Access Check feature if available.\r
572 //\r
573 ConfigSmmCodeAccessCheck ();\r
574\r
575 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
576\r
577 mRestoreSmmConfigurationInS3 = FALSE;\r
578 }\r
579}\r
580\r
581/**\r
582 Perform SMM initialization for all processors in the S3 boot path.\r
583\r
584 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
585**/\r
586VOID\r
587EFIAPI\r
588SmmRestoreCpu (\r
589 VOID\r
590 )\r
591{\r
592 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
593 IA32_DESCRIPTOR Ia32Idtr;\r
594 IA32_DESCRIPTOR X64Idtr;\r
595 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
596 EFI_STATUS Status;\r
597\r
598 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
599\r
600 mSmmS3Flag = TRUE;\r
601\r
602 InitializeSpinLock (mMemoryMappedLock);\r
603\r
604 //\r
605 // See if there is enough context to resume PEI Phase\r
606 //\r
607 if (mSmmS3ResumeState == NULL) {\r
608 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
609 CpuDeadLoop ();\r
610 }\r
611\r
612 SmmS3ResumeState = mSmmS3ResumeState;\r
613 ASSERT (SmmS3ResumeState != NULL);\r
614\r
615 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
616 //\r
617 // Save the IA32 IDT Descriptor\r
618 //\r
619 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
620\r
621 //\r
622 // Setup X64 IDT table\r
623 //\r
624 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
625 X64Idtr.Base = (UINTN) IdtEntryTable;\r
626 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
627 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
628\r
629 //\r
630 // Setup the default exception handler\r
631 //\r
632 Status = InitializeCpuExceptionHandlers (NULL);\r
633 ASSERT_EFI_ERROR (Status);\r
634\r
635 //\r
636 // Initialize Debug Agent to support source level debug\r
637 //\r
638 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
639 }\r
640\r
641 //\r
642 // Skip initialization if mAcpiCpuData is not valid\r
643 //\r
644 if (mAcpiCpuData.NumberOfCpus > 0) {\r
645 //\r
646 // First time microcode load and restore MTRRs\r
647 //\r
648 EarlyInitializeCpu ();\r
649 }\r
650\r
651 //\r
652 // Restore SMBASE for BSP and all APs\r
653 //\r
654 SmmRelocateBases ();\r
655\r
656 //\r
657 // Skip initialization if mAcpiCpuData is not valid\r
658 //\r
659 if (mAcpiCpuData.NumberOfCpus > 0) {\r
660 //\r
661 // Restore MSRs for BSP and all APs\r
662 //\r
663 InitializeCpu ();\r
664 }\r
665\r
666 //\r
667 // Set a flag to restore SMM configuration in S3 path.\r
668 //\r
669 mRestoreSmmConfigurationInS3 = TRUE;\r
670\r
671 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
672 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
673 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
674 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
675 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
676\r
677 //\r
678 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
679 //\r
680 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
681 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
682\r
683 SwitchStack (\r
684 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
685 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
686 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
687 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
688 );\r
689 }\r
690\r
691 //\r
692 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
693 //\r
694 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
695 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
696 //\r
697 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
698 //\r
699 SaveAndSetDebugTimerInterrupt (FALSE);\r
700 //\r
701 // Restore IA32 IDT table\r
702 //\r
703 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
704 AsmDisablePaging64 (\r
705 SmmS3ResumeState->ReturnCs,\r
706 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
707 (UINT32)SmmS3ResumeState->ReturnContext1,\r
708 (UINT32)SmmS3ResumeState->ReturnContext2,\r
709 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
710 );\r
711 }\r
712\r
713 //\r
714 // Can not resume PEI Phase\r
715 //\r
716 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
717 CpuDeadLoop ();\r
718}\r
719\r
720/**\r
721 Initialize SMM S3 resume state structure used during S3 Resume.\r
722\r
723 @param[in] Cr3 The base address of the page tables to use in SMM.\r
724\r
725**/\r
726VOID\r
727InitSmmS3ResumeState (\r
728 IN UINT32 Cr3\r
729 )\r
730{\r
731 VOID *GuidHob;\r
732 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
733 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
734\r
b10d5ddc
SZ
735 if (!mAcpiS3Enable) {\r
736 return;\r
737 }\r
738\r
0bdc9e75
SZ
739 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
740 if (GuidHob != NULL) {\r
741 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
742\r
743 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
744 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
745\r
746 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
747 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
748\r
749 mSmmS3ResumeState = SmmS3ResumeState;\r
750 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
751\r
752 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
753\r
754 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
755 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
756 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
757 SmmS3ResumeState->SmmS3StackSize = 0;\r
758 }\r
759\r
760 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
761 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
762 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
763\r
764 if (sizeof (UINTN) == sizeof (UINT64)) {\r
765 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
766 }\r
767 if (sizeof (UINTN) == sizeof (UINT32)) {\r
768 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
769 }\r
770 }\r
771\r
772 //\r
773 // Patch SmmS3ResumeState->SmmS3Cr3\r
774 //\r
775 InitSmmS3Cr3 ();\r
776}\r
777\r
778/**\r
779 Copy register table from ACPI NVS memory into SMRAM.\r
780\r
781 @param[in] DestinationRegisterTableList Points to destination register table.\r
782 @param[in] SourceRegisterTableList Points to source register table.\r
783 @param[in] NumberOfCpus Number of CPUs.\r
784\r
785**/\r
786VOID\r
787CopyRegisterTable (\r
788 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
789 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
790 IN UINT32 NumberOfCpus\r
791 )\r
792{\r
793 UINTN Index;\r
794 UINTN Index1;\r
795 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
796\r
797 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
798 for (Index = 0; Index < NumberOfCpus; Index++) {\r
799 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
800 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
801 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
802 //\r
803 // Go though all MSRs in register table to initialize MSR spin lock\r
804 //\r
805 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
806 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
807 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
808 //\r
809 // Initialize MSR spin lock only for those MSRs need bit field writing\r
810 //\r
811 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
812 }\r
813 }\r
814 }\r
815}\r
816\r
817/**\r
818 Get ACPI CPU data.\r
819\r
820**/\r
821VOID\r
822GetAcpiCpuData (\r
823 VOID\r
824 )\r
825{\r
826 ACPI_CPU_DATA *AcpiCpuData;\r
827 IA32_DESCRIPTOR *Gdtr;\r
828 IA32_DESCRIPTOR *Idtr;\r
829\r
b10d5ddc
SZ
830 if (!mAcpiS3Enable) {\r
831 return;\r
832 }\r
833\r
0bdc9e75
SZ
834 //\r
835 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
836 //\r
837 mAcpiCpuData.NumberOfCpus = 0;\r
838\r
839 //\r
840 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
841 //\r
842 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
843 if (AcpiCpuData == 0) {\r
844 return;\r
845 }\r
846\r
847 //\r
848 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
849 //\r
850 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
851\r
852 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
853 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
854\r
855 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
856\r
857 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
858 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
859\r
860 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
861\r
862 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
863 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
864\r
865 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
866\r
867 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
868 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
869\r
870 CopyRegisterTable (\r
871 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
872 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
873 mAcpiCpuData.NumberOfCpus\r
874 );\r
875\r
876 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
877 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
878\r
879 CopyRegisterTable (\r
880 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
881 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
882 mAcpiCpuData.NumberOfCpus\r
883 );\r
884\r
885 //\r
886 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
887 //\r
888 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
889 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
890\r
891 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
892 ASSERT (mGdtForAp != NULL);\r
893 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
894 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
895\r
896 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
897 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
898 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
899}\r
b10d5ddc
SZ
900\r
901/**\r
902 Get ACPI S3 enable flag.\r
903\r
904**/\r
905VOID\r
906GetAcpiS3EnableFlag (\r
907 VOID\r
908 )\r
909{\r
910 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
911}\r