]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/CpuDxe: Enable protection for newly added page table
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
9cc45009 4Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17typedef struct {\r
18 UINTN Lock;\r
19 VOID *StackStart;\r
20 UINTN StackSize;\r
21 VOID *ApFunction;\r
22 IA32_DESCRIPTOR GdtrProfile;\r
23 IA32_DESCRIPTOR IdtrProfile;\r
24 UINT32 BufferStart;\r
25 UINT32 Cr3;\r
26} MP_CPU_EXCHANGE_INFO;\r
27\r
28typedef struct {\r
29 UINT8 *RendezvousFunnelAddress;\r
30 UINTN PModeEntryOffset;\r
31 UINTN FlatJumpOffset;\r
32 UINTN Size;\r
33 UINTN LModeEntryOffset;\r
34 UINTN LongJumpOffset;\r
35} MP_ASSEMBLY_ADDRESS_MAP;\r
36\r
6c4c15fa
JF
37//\r
38// Spin lock used to serialize MemoryMapped operation\r
39//\r
40SPIN_LOCK *mMemoryMappedLock = NULL;\r
41\r
7677b4db
ED
42//\r
43// Signal that SMM BASE relocation is complete.\r
44//\r
45volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
46\r
529a5a86
MK
47/**\r
48 Get starting address and size of the rendezvous entry for APs.\r
49 Information for fixing a jump instruction in the code is also returned.\r
50\r
51 @param AddressMap Output buffer for address map information.\r
52**/\r
53VOID *\r
54EFIAPI\r
55AsmGetAddressMap (\r
56 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
57 );\r
58\r
59#define LEGACY_REGION_SIZE (2 * 0x1000)\r
60#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86
MK
61\r
62ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 63volatile UINT32 mNumberToFinish;\r
529a5a86
MK
64MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
65BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
66VOID *mGdtForAp = NULL;\r
67VOID *mIdtForAp = NULL;\r
68VOID *mMachineCheckHandlerForAp = NULL;\r
69MP_MSR_LOCK *mMsrSpinLocks = NULL;\r
dc99315b 70UINTN mMsrSpinLockCount;\r
529a5a86
MK
71UINTN mMsrCount = 0;\r
72\r
0bdc9e75
SZ
73//\r
74// S3 boot flag\r
75//\r
76BOOLEAN mSmmS3Flag = FALSE;\r
77\r
78//\r
79// Pointer to structure used during S3 Resume\r
80//\r
81SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
82\r
b10d5ddc
SZ
83BOOLEAN mAcpiS3Enable = TRUE;\r
84\r
4a0f88dd
JF
85UINT8 *mApHltLoopCode = NULL;\r
86UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
89 0xFA, // cli\r
90 0xF4, // hlt\r
91 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
92 };\r
93\r
529a5a86
MK
94/**\r
95 Get MSR spin lock by MSR index.\r
96\r
97 @param MsrIndex MSR index value.\r
98\r
99 @return Pointer to MSR spin lock.\r
100\r
101**/\r
102SPIN_LOCK *\r
103GetMsrSpinLockByIndex (\r
104 IN UINT32 MsrIndex\r
105 )\r
106{\r
107 UINTN Index;\r
108 for (Index = 0; Index < mMsrCount; Index++) {\r
109 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {\r
dc99315b 110 return mMsrSpinLocks[Index].SpinLock;\r
529a5a86
MK
111 }\r
112 }\r
113 return NULL;\r
114}\r
115\r
116/**\r
117 Initialize MSR spin lock by MSR index.\r
118\r
119 @param MsrIndex MSR index value.\r
120\r
121**/\r
122VOID\r
123InitMsrSpinLockByIndex (\r
124 IN UINT32 MsrIndex\r
125 )\r
126{\r
dc99315b 127 UINTN MsrSpinLockCount;\r
529a5a86 128 UINTN NewMsrSpinLockCount;\r
dc99315b
JF
129 UINTN Index;\r
130 UINTN AddedSize;\r
529a5a86
MK
131\r
132 if (mMsrSpinLocks == NULL) {\r
dc99315b
JF
133 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;\r
134 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);\r
529a5a86 135 ASSERT (mMsrSpinLocks != NULL);\r
dc99315b
JF
136 for (Index = 0; Index < MsrSpinLockCount; Index++) {\r
137 mMsrSpinLocks[Index].SpinLock =\r
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);\r
139 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
140 }\r
141 mMsrSpinLockCount = MsrSpinLockCount;\r
142 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;\r
529a5a86
MK
143 }\r
144 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {\r
145 //\r
146 // Initialize spin lock for MSR programming\r
147 //\r
148 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;\r
dc99315b 149 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);\r
529a5a86
MK
150 mMsrCount ++;\r
151 if (mMsrCount == mMsrSpinLockCount) {\r
152 //\r
153 // If MSR spin lock buffer is full, enlarge it\r
154 //\r
dc99315b
JF
155 AddedSize = SIZE_4KB;\r
156 mSmmCpuSemaphores.SemaphoreMsr.Msr =\r
157 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));\r
158 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);\r
159 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;\r
529a5a86
MK
160 mMsrSpinLocks = ReallocatePool (\r
161 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,\r
162 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,\r
163 mMsrSpinLocks\r
164 );\r
dc99315b 165 ASSERT (mMsrSpinLocks != NULL);\r
529a5a86 166 mMsrSpinLockCount = NewMsrSpinLockCount;\r
dc99315b
JF
167 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {\r
168 mMsrSpinLocks[Index].SpinLock =\r
169 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +\r
170 (Index - mMsrCount) * mSemaphoreSize);\r
171 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;\r
172 }\r
529a5a86
MK
173 }\r
174 }\r
175}\r
176\r
177/**\r
178 Sync up the MTRR values for all processors.\r
179\r
180 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
181**/\r
182VOID\r
183EFIAPI\r
184LoadMtrrData (\r
185 EFI_PHYSICAL_ADDRESS MtrrTable\r
186 )\r
187/*++\r
188\r
189Routine Description:\r
190\r
191 Sync up the MTRR values for all processors.\r
192\r
193Arguments:\r
194\r
195Returns:\r
196 None\r
197\r
198--*/\r
199{\r
200 MTRR_SETTINGS *MtrrSettings;\r
201\r
202 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
203 MtrrSetAllMtrrs (MtrrSettings);\r
204}\r
205\r
206/**\r
207 Programs registers for the calling processor.\r
208\r
209 This function programs registers for the calling processor.\r
210\r
94744aa2
ED
211 @param RegisterTables Pointer to register table of the running processor.\r
212 @param RegisterTableCount Register table count.\r
529a5a86
MK
213\r
214**/\r
215VOID\r
216SetProcessorRegister (\r
94744aa2
ED
217 IN CPU_REGISTER_TABLE *RegisterTables,\r
218 IN UINTN RegisterTableCount\r
529a5a86
MK
219 )\r
220{\r
221 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
222 UINTN Index;\r
223 UINTN Value;\r
224 SPIN_LOCK *MsrSpinLock;\r
94744aa2
ED
225 UINT32 InitApicId;\r
226 CPU_REGISTER_TABLE *RegisterTable;\r
227\r
228 InitApicId = GetInitialApicId ();\r
c9b094f6 229 RegisterTable = NULL;\r
94744aa2
ED
230 for (Index = 0; Index < RegisterTableCount; Index++) {\r
231 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
232 RegisterTable = &RegisterTables[Index];\r
233 break;\r
234 }\r
235 }\r
c9b094f6 236 ASSERT (RegisterTable != NULL);\r
529a5a86
MK
237\r
238 //\r
239 // Traverse Register Table of this logical processor\r
240 //\r
241 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
242 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {\r
243 //\r
244 // Check the type of specified register\r
245 //\r
246 switch (RegisterTableEntry->RegisterType) {\r
247 //\r
248 // The specified register is Control Register\r
249 //\r
250 case ControlRegister:\r
251 switch (RegisterTableEntry->Index) {\r
252 case 0:\r
253 Value = AsmReadCr0 ();\r
254 Value = (UINTN) BitFieldWrite64 (\r
255 Value,\r
256 RegisterTableEntry->ValidBitStart,\r
257 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
258 (UINTN) RegisterTableEntry->Value\r
259 );\r
260 AsmWriteCr0 (Value);\r
261 break;\r
262 case 2:\r
263 Value = AsmReadCr2 ();\r
264 Value = (UINTN) BitFieldWrite64 (\r
265 Value,\r
266 RegisterTableEntry->ValidBitStart,\r
267 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
268 (UINTN) RegisterTableEntry->Value\r
269 );\r
270 AsmWriteCr2 (Value);\r
271 break;\r
272 case 3:\r
273 Value = AsmReadCr3 ();\r
274 Value = (UINTN) BitFieldWrite64 (\r
275 Value,\r
276 RegisterTableEntry->ValidBitStart,\r
277 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
278 (UINTN) RegisterTableEntry->Value\r
279 );\r
280 AsmWriteCr3 (Value);\r
281 break;\r
282 case 4:\r
283 Value = AsmReadCr4 ();\r
284 Value = (UINTN) BitFieldWrite64 (\r
285 Value,\r
286 RegisterTableEntry->ValidBitStart,\r
287 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
288 (UINTN) RegisterTableEntry->Value\r
289 );\r
290 AsmWriteCr4 (Value);\r
291 break;\r
292 default:\r
293 break;\r
294 }\r
295 break;\r
296 //\r
297 // The specified register is Model Specific Register\r
298 //\r
299 case Msr:\r
300 //\r
301 // If this function is called to restore register setting after INIT signal,\r
302 // there is no need to restore MSRs in register table.\r
303 //\r
304 if (RegisterTableEntry->ValidBitLength >= 64) {\r
305 //\r
306 // If length is not less than 64 bits, then directly write without reading\r
307 //\r
308 AsmWriteMsr64 (\r
309 RegisterTableEntry->Index,\r
310 RegisterTableEntry->Value\r
311 );\r
312 } else {\r
313 //\r
314 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode\r
315 // to make sure MSR read/write operation is atomic.\r
316 //\r
317 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);\r
318 AcquireSpinLock (MsrSpinLock);\r
319 //\r
320 // Set the bit section according to bit start and length\r
321 //\r
322 AsmMsrBitFieldWrite64 (\r
323 RegisterTableEntry->Index,\r
324 RegisterTableEntry->ValidBitStart,\r
325 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
326 RegisterTableEntry->Value\r
327 );\r
328 ReleaseSpinLock (MsrSpinLock);\r
329 }\r
330 break;\r
331 //\r
6c4c15fa
JF
332 // MemoryMapped operations\r
333 //\r
334 case MemoryMapped:\r
335 AcquireSpinLock (mMemoryMappedLock);\r
336 MmioBitFieldWrite32 (\r
30b7a50b 337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
338 RegisterTableEntry->ValidBitStart,\r
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
340 (UINT32)RegisterTableEntry->Value\r
341 );\r
342 ReleaseSpinLock (mMemoryMappedLock);\r
343 break;\r
344 //\r
529a5a86
MK
345 // Enable or disable cache\r
346 //\r
347 case CacheControl:\r
348 //\r
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
350 //\r
351 if (RegisterTableEntry->Value == 0) {\r
352 AsmDisableCache ();\r
353 } else {\r
354 AsmEnableCache ();\r
355 }\r
356 break;\r
357\r
358 default:\r
359 break;\r
360 }\r
361 }\r
362}\r
363\r
364/**\r
7677b4db 365 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
366**/\r
367VOID\r
94744aa2 368InitializeAp (\r
529a5a86
MK
369 VOID\r
370 )\r
371{\r
7677b4db
ED
372 UINTN TopOfStack;\r
373 UINT8 Stack[128];\r
529a5a86
MK
374\r
375 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
376\r
94744aa2 377 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
7677b4db 378\r
529a5a86
MK
379 //\r
380 // Count down the number with lock mechanism.\r
381 //\r
382 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 383\r
7677b4db
ED
384 //\r
385 // Wait for BSP to signal SMM Base relocation done.\r
386 //\r
387 while (!mInitApsAfterSmmBaseReloc) {\r
388 CpuPause ();\r
389 }\r
529a5a86
MK
390\r
391 ProgramVirtualWireMode ();\r
392 DisableLvtInterrupts ();\r
393\r
94744aa2 394 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
395\r
396 //\r
ec8a3877 397 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 398 //\r
672b80c8
MK
399 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
400 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 401 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 402 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
403}\r
404\r
405/**\r
406 Prepares startup vector for APs.\r
407\r
408 This function prepares startup vector for APs.\r
409\r
410 @param WorkingBuffer The address of the work buffer.\r
411**/\r
412VOID\r
413PrepareApStartupVector (\r
414 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
415 )\r
416{\r
417 EFI_PHYSICAL_ADDRESS StartupVector;\r
418 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
419\r
420 //\r
421 // Get the address map of startup code for AP,\r
422 // including code size, and offset of long jump instructions to redirect.\r
423 //\r
424 ZeroMem (&AddressMap, sizeof (AddressMap));\r
425 AsmGetAddressMap (&AddressMap);\r
426\r
427 StartupVector = WorkingBuffer;\r
428\r
429 //\r
430 // Copy AP startup code to startup vector, and then redirect the long jump\r
431 // instructions for mode switching.\r
432 //\r
433 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
434 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
435 if (AddressMap.LongJumpOffset != 0) {\r
436 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
437 }\r
438\r
439 //\r
440 // Get the start address of exchange data between BSP and AP.\r
441 //\r
442 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
443 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
444\r
445 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
446 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
447\r
448 //\r
449 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory\r
450 //\r
451 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);\r
452 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);\r
453 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);\r
454\r
455 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
456 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
457 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
458 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
459}\r
460\r
461/**\r
462 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
463\r
464 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
465 and restores MTRRs for both BSP and APs.\r
466\r
467**/\r
468VOID\r
94744aa2 469InitializeCpuBeforeRebase (\r
529a5a86
MK
470 VOID\r
471 )\r
472{\r
529a5a86
MK
473 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
474\r
94744aa2 475 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
476\r
477 ProgramVirtualWireMode ();\r
478\r
479 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
480\r
481 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
94744aa2 482 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
483\r
484 //\r
485 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
486 //\r
487 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
488\r
489 //\r
490 // Send INIT IPI - SIPI to all APs\r
491 //\r
492 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
493\r
494 while (mNumberToFinish > 0) {\r
495 CpuPause ();\r
496 }\r
497}\r
498\r
499/**\r
500 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
501\r
502 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
503 data saved by normal boot path for both BSP and APs.\r
504\r
505**/\r
506VOID\r
94744aa2 507InitializeCpuAfterRebase (\r
529a5a86
MK
508 VOID\r
509 )\r
510{\r
94744aa2 511 SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);\r
529a5a86
MK
512\r
513 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
514\r
515 //\r
7677b4db 516 // Signal that SMM base relocation is complete and to continue initialization.\r
529a5a86 517 //\r
7677b4db 518 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86
MK
519\r
520 while (mNumberToFinish > 0) {\r
521 CpuPause ();\r
522 }\r
523}\r
0bdc9e75
SZ
524\r
525/**\r
526 Restore SMM Configuration in S3 boot path.\r
527\r
528**/\r
529VOID\r
530RestoreSmmConfigurationInS3 (\r
531 VOID\r
532 )\r
533{\r
b10d5ddc
SZ
534 if (!mAcpiS3Enable) {\r
535 return;\r
536 }\r
537\r
0bdc9e75
SZ
538 //\r
539 // Restore SMM Configuration in S3 boot path.\r
540 //\r
541 if (mRestoreSmmConfigurationInS3) {\r
542 //\r
543 // Need make sure gSmst is correct because below function may use them.\r
544 //\r
545 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
546 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
547 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
548 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
549 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
550\r
551 //\r
552 // Configure SMM Code Access Check feature if available.\r
553 //\r
554 ConfigSmmCodeAccessCheck ();\r
555\r
556 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
557\r
558 mRestoreSmmConfigurationInS3 = FALSE;\r
559 }\r
560}\r
561\r
562/**\r
563 Perform SMM initialization for all processors in the S3 boot path.\r
564\r
565 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
566**/\r
567VOID\r
568EFIAPI\r
569SmmRestoreCpu (\r
570 VOID\r
571 )\r
572{\r
573 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
574 IA32_DESCRIPTOR Ia32Idtr;\r
575 IA32_DESCRIPTOR X64Idtr;\r
576 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
577 EFI_STATUS Status;\r
578\r
579 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
580\r
581 mSmmS3Flag = TRUE;\r
582\r
583 InitializeSpinLock (mMemoryMappedLock);\r
584\r
585 //\r
586 // See if there is enough context to resume PEI Phase\r
587 //\r
588 if (mSmmS3ResumeState == NULL) {\r
589 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
590 CpuDeadLoop ();\r
591 }\r
592\r
593 SmmS3ResumeState = mSmmS3ResumeState;\r
594 ASSERT (SmmS3ResumeState != NULL);\r
595\r
596 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
597 //\r
598 // Save the IA32 IDT Descriptor\r
599 //\r
600 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
601\r
602 //\r
603 // Setup X64 IDT table\r
604 //\r
605 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
606 X64Idtr.Base = (UINTN) IdtEntryTable;\r
607 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
608 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
609\r
610 //\r
611 // Setup the default exception handler\r
612 //\r
613 Status = InitializeCpuExceptionHandlers (NULL);\r
614 ASSERT_EFI_ERROR (Status);\r
615\r
616 //\r
617 // Initialize Debug Agent to support source level debug\r
618 //\r
619 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
620 }\r
621\r
622 //\r
623 // Skip initialization if mAcpiCpuData is not valid\r
624 //\r
625 if (mAcpiCpuData.NumberOfCpus > 0) {\r
626 //\r
627 // First time microcode load and restore MTRRs\r
628 //\r
94744aa2 629 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
630 }\r
631\r
632 //\r
633 // Restore SMBASE for BSP and all APs\r
634 //\r
635 SmmRelocateBases ();\r
636\r
637 //\r
638 // Skip initialization if mAcpiCpuData is not valid\r
639 //\r
640 if (mAcpiCpuData.NumberOfCpus > 0) {\r
641 //\r
642 // Restore MSRs for BSP and all APs\r
643 //\r
94744aa2 644 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
645 }\r
646\r
647 //\r
648 // Set a flag to restore SMM configuration in S3 path.\r
649 //\r
650 mRestoreSmmConfigurationInS3 = TRUE;\r
651\r
652 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
653 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
654 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
655 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
656 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
657\r
658 //\r
659 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
660 //\r
661 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
662 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
663\r
664 SwitchStack (\r
665 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
666 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
667 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
668 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
669 );\r
670 }\r
671\r
672 //\r
673 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
674 //\r
675 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
676 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
677 //\r
678 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
679 //\r
680 SaveAndSetDebugTimerInterrupt (FALSE);\r
681 //\r
682 // Restore IA32 IDT table\r
683 //\r
684 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
685 AsmDisablePaging64 (\r
686 SmmS3ResumeState->ReturnCs,\r
687 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
688 (UINT32)SmmS3ResumeState->ReturnContext1,\r
689 (UINT32)SmmS3ResumeState->ReturnContext2,\r
690 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
691 );\r
692 }\r
693\r
694 //\r
695 // Can not resume PEI Phase\r
696 //\r
697 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
698 CpuDeadLoop ();\r
699}\r
700\r
701/**\r
702 Initialize SMM S3 resume state structure used during S3 Resume.\r
703\r
704 @param[in] Cr3 The base address of the page tables to use in SMM.\r
705\r
706**/\r
707VOID\r
708InitSmmS3ResumeState (\r
709 IN UINT32 Cr3\r
710 )\r
711{\r
712 VOID *GuidHob;\r
713 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
714 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
715 EFI_PHYSICAL_ADDRESS Address;\r
716 EFI_STATUS Status;\r
0bdc9e75 717\r
b10d5ddc
SZ
718 if (!mAcpiS3Enable) {\r
719 return;\r
720 }\r
721\r
0bdc9e75
SZ
722 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
723 if (GuidHob != NULL) {\r
724 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
725\r
726 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
727 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
728\r
729 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
730 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
731\r
732 mSmmS3ResumeState = SmmS3ResumeState;\r
733 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
734\r
735 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
736\r
737 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
738 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
739 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
740 SmmS3ResumeState->SmmS3StackSize = 0;\r
741 }\r
742\r
743 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
744 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
745 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
746\r
747 if (sizeof (UINTN) == sizeof (UINT64)) {\r
748 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
749 }\r
750 if (sizeof (UINTN) == sizeof (UINT32)) {\r
751 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
752 }\r
753 }\r
754\r
755 //\r
756 // Patch SmmS3ResumeState->SmmS3Cr3\r
757 //\r
758 InitSmmS3Cr3 ();\r
4a0f88dd
JF
759\r
760 //\r
761 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
762 // protected mode on S3 path\r
763 //\r
764 Address = BASE_4GB - 1;\r
765 Status = gBS->AllocatePages (\r
766 AllocateMaxAddress,\r
767 EfiACPIMemoryNVS,\r
768 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
769 &Address\r
770 );\r
771 ASSERT_EFI_ERROR (Status);\r
772 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
773}\r
774\r
775/**\r
776 Copy register table from ACPI NVS memory into SMRAM.\r
777\r
778 @param[in] DestinationRegisterTableList Points to destination register table.\r
779 @param[in] SourceRegisterTableList Points to source register table.\r
780 @param[in] NumberOfCpus Number of CPUs.\r
781\r
782**/\r
783VOID\r
784CopyRegisterTable (\r
785 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
786 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
787 IN UINT32 NumberOfCpus\r
788 )\r
789{\r
790 UINTN Index;\r
791 UINTN Index1;\r
792 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
793\r
794 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
795 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
796 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
797 RegisterTableEntry = AllocateCopyPool (\r
798 DestinationRegisterTableList[Index].AllocatedSize,\r
799 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
800 );\r
801 ASSERT (RegisterTableEntry != NULL);\r
802 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
803 //\r
804 // Go though all MSRs in register table to initialize MSR spin lock\r
805 //\r
806 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
807 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
808 //\r
809 // Initialize MSR spin lock only for those MSRs need bit field writing\r
810 //\r
811 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
812 }\r
0bdc9e75
SZ
813 }\r
814 }\r
815 }\r
816}\r
817\r
818/**\r
819 Get ACPI CPU data.\r
820\r
821**/\r
822VOID\r
823GetAcpiCpuData (\r
824 VOID\r
825 )\r
826{\r
827 ACPI_CPU_DATA *AcpiCpuData;\r
828 IA32_DESCRIPTOR *Gdtr;\r
829 IA32_DESCRIPTOR *Idtr;\r
830\r
b10d5ddc
SZ
831 if (!mAcpiS3Enable) {\r
832 return;\r
833 }\r
834\r
0bdc9e75
SZ
835 //\r
836 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
837 //\r
838 mAcpiCpuData.NumberOfCpus = 0;\r
839\r
840 //\r
841 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
842 //\r
843 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
844 if (AcpiCpuData == 0) {\r
845 return;\r
846 }\r
847\r
848 //\r
849 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
850 //\r
851 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
852\r
853 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
854 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
855\r
856 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
857\r
858 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
859 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
860\r
861 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
862\r
863 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
864 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
865\r
866 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
867\r
868 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
869 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
870\r
871 CopyRegisterTable (\r
872 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
873 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
874 mAcpiCpuData.NumberOfCpus\r
875 );\r
876\r
877 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
878 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
879\r
880 CopyRegisterTable (\r
881 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
882 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
883 mAcpiCpuData.NumberOfCpus\r
884 );\r
885\r
886 //\r
887 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
888 //\r
889 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
890 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
891\r
892 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
893 ASSERT (mGdtForAp != NULL);\r
894 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
895 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
896\r
897 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
898 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
899 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
900}\r
b10d5ddc
SZ
901\r
902/**\r
903 Get ACPI S3 enable flag.\r
904\r
905**/\r
906VOID\r
907GetAcpiS3EnableFlag (\r
908 VOID\r
909 )\r
910{\r
911 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
912}\r