]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg: Change use of EFI_D_* to DEBUG_*
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
e992cc3f 4Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>\r
0acd8697 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
6\r
7**/\r
8\r
9#include "PiSmmCpuDxeSmm.h"\r
10\r
e21e355e 11#pragma pack(1)\r
529a5a86
MK
12typedef struct {\r
13 UINTN Lock;\r
14 VOID *StackStart;\r
15 UINTN StackSize;\r
16 VOID *ApFunction;\r
17 IA32_DESCRIPTOR GdtrProfile;\r
18 IA32_DESCRIPTOR IdtrProfile;\r
19 UINT32 BufferStart;\r
20 UINT32 Cr3;\r
e21e355e 21 UINTN InitializeFloatingPointUnitsAddress;\r
529a5a86 22} MP_CPU_EXCHANGE_INFO;\r
e21e355e 23#pragma pack()\r
529a5a86
MK
24\r
25typedef struct {\r
26 UINT8 *RendezvousFunnelAddress;\r
27 UINTN PModeEntryOffset;\r
28 UINTN FlatJumpOffset;\r
29 UINTN Size;\r
30 UINTN LModeEntryOffset;\r
31 UINTN LongJumpOffset;\r
32} MP_ASSEMBLY_ADDRESS_MAP;\r
33\r
6c4c15fa 34//\r
93324390 35// Flags used when program the register.\r
6c4c15fa 36//\r
93324390 37typedef struct {\r
9bae7811
ED
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
40 // core level semaphore.\r
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
42 // package level semaphore.\r
93324390 43} PROGRAM_CPU_REGISTER_FLAGS;\r
6c4c15fa 44\r
7677b4db
ED
45//\r
46// Signal that SMM BASE relocation is complete.\r
47//\r
48volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
49\r
529a5a86
MK
50/**\r
51 Get starting address and size of the rendezvous entry for APs.\r
52 Information for fixing a jump instruction in the code is also returned.\r
53\r
54 @param AddressMap Output buffer for address map information.\r
55**/\r
56VOID *\r
57EFIAPI\r
58AsmGetAddressMap (\r
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
60 );\r
61\r
62#define LEGACY_REGION_SIZE (2 * 0x1000)\r
63#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86 64\r
93324390 65PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
529a5a86 66ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 67volatile UINT32 mNumberToFinish;\r
529a5a86
MK
68MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
69BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
529a5a86 70\r
0bdc9e75
SZ
71//\r
72// S3 boot flag\r
73//\r
74BOOLEAN mSmmS3Flag = FALSE;\r
75\r
76//\r
77// Pointer to structure used during S3 Resume\r
78//\r
79SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
80\r
b10d5ddc
SZ
81BOOLEAN mAcpiS3Enable = TRUE;\r
82\r
4a0f88dd
JF
83UINT8 *mApHltLoopCode = NULL;\r
84UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
87 0xFA, // cli\r
88 0xF4, // hlt\r
89 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
90 };\r
91\r
529a5a86
MK
92/**\r
93 Sync up the MTRR values for all processors.\r
94\r
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
96**/\r
97VOID\r
98EFIAPI\r
99LoadMtrrData (\r
100 EFI_PHYSICAL_ADDRESS MtrrTable\r
101 )\r
102/*++\r
103\r
104Routine Description:\r
105\r
106 Sync up the MTRR values for all processors.\r
107\r
108Arguments:\r
109\r
110Returns:\r
111 None\r
112\r
113--*/\r
114{\r
115 MTRR_SETTINGS *MtrrSettings;\r
116\r
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
118 MtrrSetAllMtrrs (MtrrSettings);\r
119}\r
120\r
121/**\r
93324390 122 Increment semaphore by 1.\r
529a5a86 123\r
93324390 124 @param Sem IN: 32-bit unsigned integer\r
529a5a86 125\r
93324390
ED
126**/\r
127VOID\r
128S3ReleaseSemaphore (\r
129 IN OUT volatile UINT32 *Sem\r
130 )\r
131{\r
132 InterlockedIncrement (Sem);\r
133}\r
134\r
135/**\r
136 Decrement the semaphore by 1 if it is not zero.\r
529a5a86 137\r
93324390
ED
138 Performs an atomic decrement operation for semaphore.\r
139 The compare exchange operation must be performed using\r
140 MP safe mechanisms.\r
141\r
142 @param Sem IN: 32-bit unsigned integer\r
143\r
144**/\r
145VOID\r
146S3WaitForSemaphore (\r
147 IN OUT volatile UINT32 *Sem\r
148 )\r
149{\r
150 UINT32 Value;\r
151\r
152 do {\r
153 Value = *Sem;\r
154 } while (Value == 0 ||\r
155 InterlockedCompareExchange32 (\r
156 Sem,\r
157 Value,\r
158 Value - 1\r
159 ) != Value);\r
160}\r
161\r
ef21a304
ED
162/**\r
163 Read / write CR value.\r
164\r
165 @param[in] CrIndex The CR index which need to read/write.\r
166 @param[in] Read Read or write. TRUE is read.\r
167 @param[in,out] CrValue CR value.\r
168\r
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.\r
170**/\r
171UINTN\r
172ReadWriteCr (\r
173 IN UINT32 CrIndex,\r
174 IN BOOLEAN Read,\r
175 IN OUT UINTN *CrValue\r
176 )\r
177{\r
178 switch (CrIndex) {\r
179 case 0:\r
180 if (Read) {\r
181 *CrValue = AsmReadCr0 ();\r
182 } else {\r
183 AsmWriteCr0 (*CrValue);\r
184 }\r
185 break;\r
186 case 2:\r
187 if (Read) {\r
188 *CrValue = AsmReadCr2 ();\r
189 } else {\r
190 AsmWriteCr2 (*CrValue);\r
191 }\r
192 break;\r
193 case 3:\r
194 if (Read) {\r
195 *CrValue = AsmReadCr3 ();\r
196 } else {\r
197 AsmWriteCr3 (*CrValue);\r
198 }\r
199 break;\r
200 case 4:\r
201 if (Read) {\r
202 *CrValue = AsmReadCr4 ();\r
203 } else {\r
204 AsmWriteCr4 (*CrValue);\r
205 }\r
206 break;\r
207 default:\r
208 return EFI_UNSUPPORTED;;\r
209 }\r
210\r
211 return EFI_SUCCESS;\r
212}\r
213\r
93324390
ED
214/**\r
215 Initialize the CPU registers from a register table.\r
216\r
217 @param[in] RegisterTable The register table for this AP.\r
218 @param[in] ApLocation AP location info for this ap.\r
219 @param[in] CpuStatus CPU status info for this CPU.\r
220 @param[in] CpuFlags Flags data structure used when program the register.\r
221\r
222 @note This service could be called by BSP/APs.\r
529a5a86
MK
223**/\r
224VOID\r
93324390
ED
225ProgramProcessorRegister (\r
226 IN CPU_REGISTER_TABLE *RegisterTable,\r
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
228 IN CPU_STATUS_INFORMATION *CpuStatus,\r
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
529a5a86
MK
230 )\r
231{\r
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
233 UINTN Index;\r
234 UINTN Value;\r
93324390
ED
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r
236 volatile UINT32 *SemaphorePtr;\r
237 UINT32 FirstThread;\r
93324390 238 UINT32 CurrentThread;\r
6af76adb 239 UINT32 CurrentCore;\r
93324390 240 UINTN ProcessorIndex;\r
6af76adb
RN
241 UINT32 *ThreadCountPerPackage;\r
242 UINT8 *ThreadCountPerCore;\r
ef21a304 243 EFI_STATUS Status;\r
cfbcaad2 244 UINT64 CurrentValue;\r
529a5a86
MK
245\r
246 //\r
247 // Traverse Register Table of this logical processor\r
248 //\r
93324390
ED
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
250\r
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
252\r
253 RegisterTableEntry = &RegisterTableEntryHead[Index];\r
254\r
529a5a86
MK
255 //\r
256 // Check the type of specified register\r
257 //\r
258 switch (RegisterTableEntry->RegisterType) {\r
259 //\r
260 // The specified register is Control Register\r
261 //\r
262 case ControlRegister:\r
ef21a304
ED
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r
264 if (EFI_ERROR (Status)) {\r
529a5a86
MK
265 break;\r
266 }\r
cfbcaad2
ED
267 if (RegisterTableEntry->TestThenWrite) {\r
268 CurrentValue = BitFieldRead64 (\r
269 Value,\r
270 RegisterTableEntry->ValidBitStart,\r
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
272 );\r
273 if (CurrentValue == RegisterTableEntry->Value) {\r
274 break;\r
275 }\r
276 }\r
ef21a304
ED
277 Value = (UINTN) BitFieldWrite64 (\r
278 Value,\r
279 RegisterTableEntry->ValidBitStart,\r
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
281 RegisterTableEntry->Value\r
282 );\r
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r
529a5a86
MK
284 break;\r
285 //\r
286 // The specified register is Model Specific Register\r
287 //\r
288 case Msr:\r
cfbcaad2
ED
289 if (RegisterTableEntry->TestThenWrite) {\r
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);\r
291 if (RegisterTableEntry->ValidBitLength >= 64) {\r
292 if (Value == RegisterTableEntry->Value) {\r
293 break;\r
294 }\r
295 } else {\r
296 CurrentValue = BitFieldRead64 (\r
297 Value,\r
298 RegisterTableEntry->ValidBitStart,\r
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
300 );\r
301 if (CurrentValue == RegisterTableEntry->Value) {\r
302 break;\r
303 }\r
304 }\r
305 }\r
306\r
529a5a86
MK
307 //\r
308 // If this function is called to restore register setting after INIT signal,\r
309 // there is no need to restore MSRs in register table.\r
310 //\r
311 if (RegisterTableEntry->ValidBitLength >= 64) {\r
312 //\r
313 // If length is not less than 64 bits, then directly write without reading\r
314 //\r
315 AsmWriteMsr64 (\r
316 RegisterTableEntry->Index,\r
317 RegisterTableEntry->Value\r
318 );\r
319 } else {\r
529a5a86
MK
320 //\r
321 // Set the bit section according to bit start and length\r
322 //\r
323 AsmMsrBitFieldWrite64 (\r
324 RegisterTableEntry->Index,\r
325 RegisterTableEntry->ValidBitStart,\r
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
327 RegisterTableEntry->Value\r
328 );\r
529a5a86
MK
329 }\r
330 break;\r
331 //\r
6c4c15fa
JF
332 // MemoryMapped operations\r
333 //\r
334 case MemoryMapped:\r
93324390 335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa 336 MmioBitFieldWrite32 (\r
30b7a50b 337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
338 RegisterTableEntry->ValidBitStart,\r
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
340 (UINT32)RegisterTableEntry->Value\r
341 );\r
93324390 342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa
JF
343 break;\r
344 //\r
529a5a86
MK
345 // Enable or disable cache\r
346 //\r
347 case CacheControl:\r
348 //\r
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
350 //\r
351 if (RegisterTableEntry->Value == 0) {\r
352 AsmDisableCache ();\r
353 } else {\r
354 AsmEnableCache ();\r
355 }\r
356 break;\r
357\r
93324390
ED
358 case Semaphore:\r
359 // Semaphore works logic like below:\r
360 //\r
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
363 //\r
364 // All threads (T0...Tn) waits in P() line and continues running\r
365 // together.\r
366 //\r
367 //\r
368 // T0 T1 ... Tn\r
369 //\r
370 // V(0...n) V(0...n) ... V(0...n)\r
371 // n * P(0) n * P(1) ... n * P(n)\r
372 //\r
373 ASSERT (\r
e07e3ceb 374 (ApLocation != NULL) &&\r
6af76adb
RN
375 (CpuStatus->ThreadCountPerPackage != 0) &&\r
376 (CpuStatus->ThreadCountPerCore != 0) &&\r
9bae7811
ED
377 (CpuFlags->CoreSemaphoreCount != NULL) &&\r
378 (CpuFlags->PackageSemaphoreCount != NULL)\r
93324390 379 );\r
93324390
ED
380 switch (RegisterTableEntry->Value) {\r
381 case CoreDepType:\r
9bae7811 382 SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
6af76adb
RN
383 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;\r
384\r
385 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;\r
93324390
ED
386 //\r
387 // Get Offset info for the first thread in the core which current thread belongs to.\r
388 //\r
6af76adb 389 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;\r
93324390 390 CurrentThread = FirstThread + ApLocation->Thread;\r
6af76adb 391\r
93324390 392 //\r
6af76adb
RN
393 // Different cores may have different valid threads in them. If driver maintail clearly\r
394 // thread index in different cores, the logic will be much complicated.\r
395 // Here driver just simply records the max thread number in all cores and use it as expect\r
396 // thread number for all cores.\r
397 // In below two steps logic, first current thread will Release semaphore for each thread\r
398 // in current core. Maybe some threads are not valid in this core, but driver don't\r
399 // care. Second, driver will let current thread wait semaphore for all valid threads in\r
400 // current core. Because only the valid threads will do release semaphore for this\r
401 // thread, driver here only need to wait the valid thread count.\r
402 //\r
403\r
404 //\r
405 // First Notify ALL THREADs in current Core that this thread is ready.\r
93324390
ED
406 //\r
407 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
408 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
409 }\r
410 //\r
6af76adb 411 // Second, check whether all VALID THREADs (not all threads) in current core are ready.\r
93324390 412 //\r
6af76adb 413 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) {\r
93324390
ED
414 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
415 }\r
416 break;\r
417\r
418 case PackageDepType:\r
9bae7811 419 SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
6af76adb 420 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;\r
93324390
ED
421 //\r
422 // Get Offset info for the first thread in the package which current thread belongs to.\r
423 //\r
424 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
425 //\r
426 // Get the possible threads count for current package.\r
427 //\r
93324390 428 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
93324390
ED
429\r
430 //\r
6af76adb
RN
431 // Different packages may have different valid threads in them. If driver maintail clearly\r
432 // thread index in different packages, the logic will be much complicated.\r
433 // Here driver just simply records the max thread number in all packages and use it as expect\r
434 // thread number for all packages.\r
93324390
ED
435 // In below two steps logic, first current thread will Release semaphore for each thread\r
436 // in current package. Maybe some threads are not valid in this package, but driver don't\r
437 // care. Second, driver will let current thread wait semaphore for all valid threads in\r
438 // current package. Because only the valid threads will do release semaphore for this\r
439 // thread, driver here only need to wait the valid thread count.\r
440 //\r
441\r
442 //\r
6af76adb 443 // First Notify ALL THREADS in current package that this thread is ready.\r
93324390 444 //\r
6af76adb 445 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) {\r
93324390
ED
446 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
447 }\r
448 //\r
6af76adb 449 // Second, check whether VALID THREADS (not all threads) in current package are ready.\r
93324390 450 //\r
6af76adb 451 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) {\r
93324390
ED
452 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
453 }\r
454 break;\r
455\r
456 default:\r
457 break;\r
458 }\r
459 break;\r
460\r
529a5a86
MK
461 default:\r
462 break;\r
463 }\r
464 }\r
465}\r
466\r
93324390
ED
467/**\r
468\r
469 Set Processor register for one AP.\r
e07e3ceb 470\r
93324390
ED
471 @param PreSmmRegisterTable Use pre Smm register table or register table.\r
472\r
473**/\r
474VOID\r
475SetRegister (\r
476 IN BOOLEAN PreSmmRegisterTable\r
477 )\r
478{\r
010753b7 479 CPU_FEATURE_INIT_DATA *FeatureInitData;\r
93324390
ED
480 CPU_REGISTER_TABLE *RegisterTable;\r
481 CPU_REGISTER_TABLE *RegisterTables;\r
482 UINT32 InitApicId;\r
483 UINTN ProcIndex;\r
484 UINTN Index;\r
485\r
010753b7
LY
486 FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;\r
487\r
93324390 488 if (PreSmmRegisterTable) {\r
010753b7 489 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;\r
93324390 490 } else {\r
010753b7 491 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;\r
93324390 492 }\r
e992cc3f
SZ
493 if (RegisterTables == NULL) {\r
494 return;\r
495 }\r
93324390
ED
496\r
497 InitApicId = GetInitialApicId ();\r
498 RegisterTable = NULL;\r
7db4034f 499 ProcIndex = (UINTN)-1;\r
93324390
ED
500 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
501 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
502 RegisterTable = &RegisterTables[Index];\r
503 ProcIndex = Index;\r
504 break;\r
505 }\r
506 }\r
507 ASSERT (RegisterTable != NULL);\r
508\r
010753b7 509 if (FeatureInitData->ApLocation != 0) {\r
93324390
ED
510 ProgramProcessorRegister (\r
511 RegisterTable,\r
010753b7
LY
512 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,\r
513 &FeatureInitData->CpuStatus,\r
93324390
ED
514 &mCpuFlags\r
515 );\r
516 } else {\r
517 ProgramProcessorRegister (\r
518 RegisterTable,\r
519 NULL,\r
010753b7 520 &FeatureInitData->CpuStatus,\r
93324390
ED
521 &mCpuFlags\r
522 );\r
523 }\r
524}\r
525\r
529a5a86 526/**\r
7677b4db 527 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
528**/\r
529VOID\r
94744aa2 530InitializeAp (\r
529a5a86
MK
531 VOID\r
532 )\r
533{\r
7677b4db
ED
534 UINTN TopOfStack;\r
535 UINT8 Stack[128];\r
529a5a86
MK
536\r
537 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
538\r
93324390 539 SetRegister (TRUE);\r
7677b4db 540\r
529a5a86
MK
541 //\r
542 // Count down the number with lock mechanism.\r
543 //\r
544 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 545\r
7677b4db
ED
546 //\r
547 // Wait for BSP to signal SMM Base relocation done.\r
548 //\r
549 while (!mInitApsAfterSmmBaseReloc) {\r
550 CpuPause ();\r
551 }\r
529a5a86
MK
552\r
553 ProgramVirtualWireMode ();\r
554 DisableLvtInterrupts ();\r
555\r
93324390 556 SetRegister (FALSE);\r
529a5a86
MK
557\r
558 //\r
ec8a3877 559 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 560 //\r
672b80c8
MK
561 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
562 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 563 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 564 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
565}\r
566\r
567/**\r
568 Prepares startup vector for APs.\r
569\r
570 This function prepares startup vector for APs.\r
571\r
572 @param WorkingBuffer The address of the work buffer.\r
573**/\r
574VOID\r
575PrepareApStartupVector (\r
576 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
577 )\r
578{\r
579 EFI_PHYSICAL_ADDRESS StartupVector;\r
580 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
581\r
582 //\r
583 // Get the address map of startup code for AP,\r
584 // including code size, and offset of long jump instructions to redirect.\r
585 //\r
586 ZeroMem (&AddressMap, sizeof (AddressMap));\r
587 AsmGetAddressMap (&AddressMap);\r
588\r
589 StartupVector = WorkingBuffer;\r
590\r
591 //\r
592 // Copy AP startup code to startup vector, and then redirect the long jump\r
593 // instructions for mode switching.\r
594 //\r
595 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
596 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
597 if (AddressMap.LongJumpOffset != 0) {\r
598 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
599 }\r
600\r
601 //\r
602 // Get the start address of exchange data between BSP and AP.\r
603 //\r
604 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
605 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
606\r
607 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
608 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
609\r
529a5a86
MK
610 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
611 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
612 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
613 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
e21e355e 614 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
529a5a86
MK
615}\r
616\r
617/**\r
618 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
619\r
620 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
621 and restores MTRRs for both BSP and APs.\r
622\r
623**/\r
624VOID\r
94744aa2 625InitializeCpuBeforeRebase (\r
529a5a86
MK
626 VOID\r
627 )\r
628{\r
529a5a86
MK
629 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
630\r
93324390 631 SetRegister (TRUE);\r
529a5a86
MK
632\r
633 ProgramVirtualWireMode ();\r
634\r
635 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
636\r
90e11edd
LE
637 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
638 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r
639 } else {\r
640 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
641 }\r
642 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
94744aa2 643 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
644\r
645 //\r
646 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
647 //\r
648 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
649\r
650 //\r
651 // Send INIT IPI - SIPI to all APs\r
652 //\r
653 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
654\r
655 while (mNumberToFinish > 0) {\r
656 CpuPause ();\r
657 }\r
658}\r
659\r
660/**\r
661 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
662\r
663 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
664 data saved by normal boot path for both BSP and APs.\r
665\r
666**/\r
667VOID\r
94744aa2 668InitializeCpuAfterRebase (\r
529a5a86
MK
669 VOID\r
670 )\r
671{\r
90e11edd
LE
672 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
673 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r
674 } else {\r
675 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
676 }\r
677 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
529a5a86
MK
678\r
679 //\r
93324390 680 // Signal that SMM base relocation is complete and to continue initialization for all APs.\r
529a5a86 681 //\r
7677b4db 682 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86 683\r
93324390
ED
684 //\r
685 // Must begin set register after all APs have continue their initialization.\r
686 // This is a requirement to support semaphore mechanism in register table.\r
687 // Because if semaphore's dependence type is package type, semaphore will wait\r
688 // for all Aps in one package finishing their tasks before set next register\r
689 // for all APs. If the Aps not begin its task during BSP doing its task, the\r
690 // BSP thread will hang because it is waiting for other Aps in the same\r
691 // package finishing their task.\r
692 //\r
693 SetRegister (FALSE);\r
694\r
529a5a86
MK
695 while (mNumberToFinish > 0) {\r
696 CpuPause ();\r
697 }\r
698}\r
0bdc9e75
SZ
699\r
700/**\r
701 Restore SMM Configuration in S3 boot path.\r
702\r
703**/\r
704VOID\r
705RestoreSmmConfigurationInS3 (\r
706 VOID\r
707 )\r
708{\r
b10d5ddc
SZ
709 if (!mAcpiS3Enable) {\r
710 return;\r
711 }\r
712\r
0bdc9e75
SZ
713 //\r
714 // Restore SMM Configuration in S3 boot path.\r
715 //\r
716 if (mRestoreSmmConfigurationInS3) {\r
717 //\r
718 // Need make sure gSmst is correct because below function may use them.\r
719 //\r
720 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
721 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
722 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
723 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
724 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
725\r
726 //\r
727 // Configure SMM Code Access Check feature if available.\r
728 //\r
729 ConfigSmmCodeAccessCheck ();\r
730\r
731 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
732\r
733 mRestoreSmmConfigurationInS3 = FALSE;\r
734 }\r
735}\r
736\r
737/**\r
738 Perform SMM initialization for all processors in the S3 boot path.\r
739\r
740 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
741**/\r
742VOID\r
743EFIAPI\r
744SmmRestoreCpu (\r
745 VOID\r
746 )\r
747{\r
748 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
749 IA32_DESCRIPTOR Ia32Idtr;\r
750 IA32_DESCRIPTOR X64Idtr;\r
751 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
752 EFI_STATUS Status;\r
753\r
96e1cba5 754 DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));\r
0bdc9e75
SZ
755\r
756 mSmmS3Flag = TRUE;\r
757\r
0bdc9e75
SZ
758 //\r
759 // See if there is enough context to resume PEI Phase\r
760 //\r
761 if (mSmmS3ResumeState == NULL) {\r
96e1cba5 762 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));\r
0bdc9e75
SZ
763 CpuDeadLoop ();\r
764 }\r
765\r
766 SmmS3ResumeState = mSmmS3ResumeState;\r
767 ASSERT (SmmS3ResumeState != NULL);\r
768\r
769 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
770 //\r
771 // Save the IA32 IDT Descriptor\r
772 //\r
773 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
774\r
775 //\r
776 // Setup X64 IDT table\r
777 //\r
778 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
779 X64Idtr.Base = (UINTN) IdtEntryTable;\r
780 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
781 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
782\r
783 //\r
784 // Setup the default exception handler\r
785 //\r
786 Status = InitializeCpuExceptionHandlers (NULL);\r
787 ASSERT_EFI_ERROR (Status);\r
788\r
789 //\r
790 // Initialize Debug Agent to support source level debug\r
791 //\r
792 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
793 }\r
794\r
795 //\r
796 // Skip initialization if mAcpiCpuData is not valid\r
797 //\r
798 if (mAcpiCpuData.NumberOfCpus > 0) {\r
799 //\r
800 // First time microcode load and restore MTRRs\r
801 //\r
94744aa2 802 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
803 }\r
804\r
805 //\r
806 // Restore SMBASE for BSP and all APs\r
807 //\r
808 SmmRelocateBases ();\r
809\r
810 //\r
811 // Skip initialization if mAcpiCpuData is not valid\r
812 //\r
813 if (mAcpiCpuData.NumberOfCpus > 0) {\r
814 //\r
815 // Restore MSRs for BSP and all APs\r
816 //\r
94744aa2 817 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
818 }\r
819\r
820 //\r
821 // Set a flag to restore SMM configuration in S3 path.\r
822 //\r
823 mRestoreSmmConfigurationInS3 = TRUE;\r
824\r
96e1cba5
MK
825 DEBUG (( DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
826 DEBUG (( DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
827 DEBUG (( DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
828 DEBUG (( DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
829 DEBUG (( DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
0bdc9e75
SZ
830\r
831 //\r
832 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
833 //\r
834 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
96e1cba5 835 DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
0bdc9e75
SZ
836\r
837 SwitchStack (\r
838 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
839 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
840 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
841 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
842 );\r
843 }\r
844\r
845 //\r
846 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
847 //\r
848 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
96e1cba5 849 DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
0bdc9e75
SZ
850 //\r
851 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
852 //\r
853 SaveAndSetDebugTimerInterrupt (FALSE);\r
854 //\r
855 // Restore IA32 IDT table\r
856 //\r
857 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
858 AsmDisablePaging64 (\r
859 SmmS3ResumeState->ReturnCs,\r
860 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
861 (UINT32)SmmS3ResumeState->ReturnContext1,\r
862 (UINT32)SmmS3ResumeState->ReturnContext2,\r
863 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
864 );\r
865 }\r
866\r
867 //\r
868 // Can not resume PEI Phase\r
869 //\r
96e1cba5 870 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));\r
0bdc9e75
SZ
871 CpuDeadLoop ();\r
872}\r
873\r
874/**\r
875 Initialize SMM S3 resume state structure used during S3 Resume.\r
876\r
877 @param[in] Cr3 The base address of the page tables to use in SMM.\r
878\r
879**/\r
880VOID\r
881InitSmmS3ResumeState (\r
882 IN UINT32 Cr3\r
883 )\r
884{\r
885 VOID *GuidHob;\r
886 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
887 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
888 EFI_PHYSICAL_ADDRESS Address;\r
889 EFI_STATUS Status;\r
0bdc9e75 890\r
b10d5ddc
SZ
891 if (!mAcpiS3Enable) {\r
892 return;\r
893 }\r
894\r
0bdc9e75 895 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
a95c9cfd
JW
896 if (GuidHob == NULL) {\r
897 DEBUG ((\r
898 DEBUG_ERROR,\r
899 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
900 __FUNCTION__,\r
901 &gEfiAcpiVariableGuid\r
902 ));\r
903 CpuDeadLoop ();\r
904 } else {\r
0bdc9e75
SZ
905 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
906\r
96e1cba5
MK
907 DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
908 DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
0bdc9e75
SZ
909\r
910 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
911 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
912\r
913 mSmmS3ResumeState = SmmS3ResumeState;\r
914 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
915\r
916 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
917\r
918 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
919 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
920 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
921 SmmS3ResumeState->SmmS3StackSize = 0;\r
922 }\r
923\r
f0053e83 924 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
0bdc9e75 925 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
351b49c1 926 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
0bdc9e75
SZ
927\r
928 if (sizeof (UINTN) == sizeof (UINT64)) {\r
929 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
930 }\r
931 if (sizeof (UINTN) == sizeof (UINT32)) {\r
932 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
933 }\r
0bdc9e75 934\r
16d84657
JW
935 //\r
936 // Patch SmmS3ResumeState->SmmS3Cr3\r
937 //\r
938 InitSmmS3Cr3 ();\r
939 }\r
4a0f88dd
JF
940\r
941 //\r
942 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
943 // protected mode on S3 path\r
944 //\r
945 Address = BASE_4GB - 1;\r
946 Status = gBS->AllocatePages (\r
947 AllocateMaxAddress,\r
948 EfiACPIMemoryNVS,\r
949 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
950 &Address\r
951 );\r
952 ASSERT_EFI_ERROR (Status);\r
953 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
954}\r
955\r
956/**\r
e992cc3f 957 Copy register table from non-SMRAM into SMRAM.\r
0bdc9e75
SZ
958\r
959 @param[in] DestinationRegisterTableList Points to destination register table.\r
960 @param[in] SourceRegisterTableList Points to source register table.\r
961 @param[in] NumberOfCpus Number of CPUs.\r
962\r
963**/\r
964VOID\r
965CopyRegisterTable (\r
966 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
967 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
968 IN UINT32 NumberOfCpus\r
969 )\r
970{\r
971 UINTN Index;\r
0bdc9e75
SZ
972 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
973\r
974 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
975 for (Index = 0; Index < NumberOfCpus; Index++) {\r
e992cc3f
SZ
976 if (DestinationRegisterTableList[Index].TableLength != 0) {\r
977 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);\r
30d995ee
JF
978 RegisterTableEntry = AllocateCopyPool (\r
979 DestinationRegisterTableList[Index].AllocatedSize,\r
980 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
981 );\r
982 ASSERT (RegisterTableEntry != NULL);\r
983 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
0bdc9e75
SZ
984 }\r
985 }\r
986}\r
987\r
e992cc3f
SZ
988/**\r
989 Check whether the register table is empty or not.\r
990\r
991 @param[in] RegisterTable Point to the register table.\r
992 @param[in] NumberOfCpus Number of CPUs.\r
993\r
994 @retval TRUE The register table is empty.\r
995 @retval FALSE The register table is not empty.\r
996**/\r
997BOOLEAN\r
998IsRegisterTableEmpty (\r
999 IN CPU_REGISTER_TABLE *RegisterTable,\r
1000 IN UINT32 NumberOfCpus\r
1001 )\r
1002{\r
1003 UINTN Index;\r
1004\r
1005 if (RegisterTable != NULL) {\r
1006 for (Index = 0; Index < NumberOfCpus; Index++) {\r
1007 if (RegisterTable[Index].TableLength != 0) {\r
1008 return FALSE;\r
1009 }\r
1010 }\r
1011 }\r
1012\r
1013 return TRUE;\r
1014}\r
1015\r
010753b7
LY
1016/**\r
1017 Copy the data used to initialize processor register into SMRAM.\r
1018\r
1019 @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.\r
1020 @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.\r
1021\r
1022**/\r
1023VOID\r
1024CopyCpuFeatureInitDatatoSmram (\r
1025 IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,\r
1026 IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc\r
1027 )\r
1028{\r
1029 CPU_STATUS_INFORMATION *CpuStatus;\r
1030\r
1031 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {\r
1032 CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
1033 ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);\r
1034\r
1035 CopyRegisterTable (\r
1036 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,\r
1037 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,\r
1038 mAcpiCpuData.NumberOfCpus\r
1039 );\r
1040 }\r
1041\r
1042 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {\r
1043 CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
1044 ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);\r
1045\r
1046 CopyRegisterTable (\r
1047 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,\r
1048 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,\r
1049 mAcpiCpuData.NumberOfCpus\r
1050 );\r
1051 }\r
1052\r
1053 CpuStatus = &CpuFeatureInitDataDst->CpuStatus;\r
1054 CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r
1055\r
1056 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {\r
1057 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1058 sizeof (UINT32) * CpuStatus->PackageCount,\r
1059 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage\r
1060 );\r
1061 ASSERT (CpuStatus->ThreadCountPerPackage != 0);\r
1062 }\r
1063\r
1064 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {\r
1065 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1066 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),\r
1067 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore\r
1068 );\r
1069 ASSERT (CpuStatus->ThreadCountPerCore != 0);\r
1070 }\r
1071\r
1072 if (CpuFeatureInitDataSrc->ApLocation != 0) {\r
1073 CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1074 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
1075 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation\r
1076 );\r
1077 ASSERT (CpuFeatureInitDataDst->ApLocation != 0);\r
1078 }\r
1079}\r
1080\r
0bdc9e75
SZ
1081/**\r
1082 Get ACPI CPU data.\r
1083\r
1084**/\r
1085VOID\r
1086GetAcpiCpuData (\r
1087 VOID\r
1088 )\r
1089{\r
1090 ACPI_CPU_DATA *AcpiCpuData;\r
1091 IA32_DESCRIPTOR *Gdtr;\r
1092 IA32_DESCRIPTOR *Idtr;\r
293f8766
ED
1093 VOID *GdtForAp;\r
1094 VOID *IdtForAp;\r
1095 VOID *MachineCheckHandlerForAp;\r
93324390 1096 CPU_STATUS_INFORMATION *CpuStatus;\r
0bdc9e75 1097\r
b10d5ddc
SZ
1098 if (!mAcpiS3Enable) {\r
1099 return;\r
1100 }\r
1101\r
0bdc9e75
SZ
1102 //\r
1103 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
1104 //\r
1105 mAcpiCpuData.NumberOfCpus = 0;\r
1106\r
1107 //\r
1108 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
1109 //\r
1110 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
1111 if (AcpiCpuData == 0) {\r
1112 return;\r
1113 }\r
1114\r
1115 //\r
1116 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
1117 //\r
1118 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
1119\r
1120 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
1121 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
1122\r
1123 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
1124\r
1125 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
1126 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
1127\r
1128 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
1129\r
1130 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
1131 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
1132\r
1133 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
1134\r
0bdc9e75
SZ
1135 //\r
1136 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
1137 //\r
1138 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
1139 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
1140\r
010753b7 1141 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
293f8766
ED
1142 ASSERT (GdtForAp != NULL);\r
1143 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
1144 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
1145\r
1146 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
1147 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
1148 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
0bdc9e75 1149\r
293f8766
ED
1150 Gdtr->Base = (UINTN)GdtForAp;\r
1151 Idtr->Base = (UINTN)IdtForAp;\r
1152 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
93324390 1153\r
010753b7 1154 ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));\r
010753b7 1155\r
89f7ed8b
LY
1156 if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {\r
1157 //\r
1158 // If the CPU features will not be initialized by CpuFeaturesPei module during\r
1159 // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,\r
1160 // which will be consumed in SmmRestoreCpu during next S3 resume.\r
1161 //\r
1162 CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);\r
010753b7 1163\r
89f7ed8b 1164 CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;\r
010753b7 1165\r
89f7ed8b
LY
1166 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (\r
1167 sizeof (UINT32) * CpuStatus->PackageCount *\r
1168 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1169 );\r
1170 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);\r
010753b7 1171\r
89f7ed8b
LY
1172 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (\r
1173 sizeof (UINT32) * CpuStatus->PackageCount *\r
1174 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1175 );\r
1176 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r
1177\r
1178 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);\r
1179 }\r
0bdc9e75 1180}\r
b10d5ddc
SZ
1181\r
1182/**\r
1183 Get ACPI S3 enable flag.\r
1184\r
1185**/\r
1186VOID\r
1187GetAcpiS3EnableFlag (\r
1188 VOID\r
1189 )\r
1190{\r
1191 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
1192}\r