]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpu: Add Shadow Stack Support for X86 SMM.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
e21e355e 4Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
e21e355e 17#pragma pack(1)\r
529a5a86
MK
18typedef struct {\r
19 UINTN Lock;\r
20 VOID *StackStart;\r
21 UINTN StackSize;\r
22 VOID *ApFunction;\r
23 IA32_DESCRIPTOR GdtrProfile;\r
24 IA32_DESCRIPTOR IdtrProfile;\r
25 UINT32 BufferStart;\r
26 UINT32 Cr3;\r
e21e355e 27 UINTN InitializeFloatingPointUnitsAddress;\r
529a5a86 28} MP_CPU_EXCHANGE_INFO;\r
e21e355e 29#pragma pack()\r
529a5a86
MK
30\r
31typedef struct {\r
32 UINT8 *RendezvousFunnelAddress;\r
33 UINTN PModeEntryOffset;\r
34 UINTN FlatJumpOffset;\r
35 UINTN Size;\r
36 UINTN LModeEntryOffset;\r
37 UINTN LongJumpOffset;\r
38} MP_ASSEMBLY_ADDRESS_MAP;\r
39\r
6c4c15fa 40//\r
93324390 41// Flags used when program the register.\r
6c4c15fa 42//\r
93324390 43typedef struct {\r
9bae7811
ED
44 volatile UINTN ConsoleLogLock; // Spinlock used to control console.\r
45 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
46 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
47 // core level semaphore.\r
48 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
49 // package level semaphore.\r
93324390 50} PROGRAM_CPU_REGISTER_FLAGS;\r
6c4c15fa 51\r
7677b4db
ED
52//\r
53// Signal that SMM BASE relocation is complete.\r
54//\r
55volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
56\r
529a5a86
MK
57/**\r
58 Get starting address and size of the rendezvous entry for APs.\r
59 Information for fixing a jump instruction in the code is also returned.\r
60\r
61 @param AddressMap Output buffer for address map information.\r
62**/\r
63VOID *\r
64EFIAPI\r
65AsmGetAddressMap (\r
66 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
67 );\r
68\r
69#define LEGACY_REGION_SIZE (2 * 0x1000)\r
70#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86 71\r
93324390 72PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
529a5a86 73ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 74volatile UINT32 mNumberToFinish;\r
529a5a86
MK
75MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
76BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
529a5a86 77\r
0bdc9e75
SZ
78//\r
79// S3 boot flag\r
80//\r
81BOOLEAN mSmmS3Flag = FALSE;\r
82\r
83//\r
84// Pointer to structure used during S3 Resume\r
85//\r
86SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
87\r
b10d5ddc
SZ
88BOOLEAN mAcpiS3Enable = TRUE;\r
89\r
4a0f88dd
JF
90UINT8 *mApHltLoopCode = NULL;\r
91UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
92 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
93 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
94 0xFA, // cli\r
95 0xF4, // hlt\r
96 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
97 };\r
98\r
93324390 99CHAR16 *mRegisterTypeStr[] = {L"MSR", L"CR", L"MMIO", L"CACHE", L"SEMAP", L"INVALID" };\r
529a5a86
MK
100\r
101/**\r
102 Sync up the MTRR values for all processors.\r
103\r
104 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
105**/\r
106VOID\r
107EFIAPI\r
108LoadMtrrData (\r
109 EFI_PHYSICAL_ADDRESS MtrrTable\r
110 )\r
111/*++\r
112\r
113Routine Description:\r
114\r
115 Sync up the MTRR values for all processors.\r
116\r
117Arguments:\r
118\r
119Returns:\r
120 None\r
121\r
122--*/\r
123{\r
124 MTRR_SETTINGS *MtrrSettings;\r
125\r
126 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
127 MtrrSetAllMtrrs (MtrrSettings);\r
128}\r
129\r
130/**\r
93324390 131 Increment semaphore by 1.\r
529a5a86 132\r
93324390 133 @param Sem IN: 32-bit unsigned integer\r
529a5a86 134\r
93324390
ED
135**/\r
136VOID\r
137S3ReleaseSemaphore (\r
138 IN OUT volatile UINT32 *Sem\r
139 )\r
140{\r
141 InterlockedIncrement (Sem);\r
142}\r
143\r
144/**\r
145 Decrement the semaphore by 1 if it is not zero.\r
529a5a86 146\r
93324390
ED
147 Performs an atomic decrement operation for semaphore.\r
148 The compare exchange operation must be performed using\r
149 MP safe mechanisms.\r
150\r
151 @param Sem IN: 32-bit unsigned integer\r
152\r
153**/\r
154VOID\r
155S3WaitForSemaphore (\r
156 IN OUT volatile UINT32 *Sem\r
157 )\r
158{\r
159 UINT32 Value;\r
160\r
161 do {\r
162 Value = *Sem;\r
163 } while (Value == 0 ||\r
164 InterlockedCompareExchange32 (\r
165 Sem,\r
166 Value,\r
167 Value - 1\r
168 ) != Value);\r
169}\r
170\r
171/**\r
172 Initialize the CPU registers from a register table.\r
173\r
174 @param[in] RegisterTable The register table for this AP.\r
175 @param[in] ApLocation AP location info for this ap.\r
176 @param[in] CpuStatus CPU status info for this CPU.\r
177 @param[in] CpuFlags Flags data structure used when program the register.\r
178\r
179 @note This service could be called by BSP/APs.\r
529a5a86
MK
180**/\r
181VOID\r
93324390
ED
182ProgramProcessorRegister (\r
183 IN CPU_REGISTER_TABLE *RegisterTable,\r
184 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
185 IN CPU_STATUS_INFORMATION *CpuStatus,\r
186 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
529a5a86
MK
187 )\r
188{\r
189 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
190 UINTN Index;\r
191 UINTN Value;\r
93324390
ED
192 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r
193 volatile UINT32 *SemaphorePtr;\r
194 UINT32 FirstThread;\r
195 UINT32 PackageThreadsCount;\r
196 UINT32 CurrentThread;\r
197 UINTN ProcessorIndex;\r
198 UINTN ThreadIndex;\r
199 UINTN ValidThreadCount;\r
200 UINT32 *ValidCoreCountPerPackage;\r
529a5a86
MK
201\r
202 //\r
203 // Traverse Register Table of this logical processor\r
204 //\r
93324390
ED
205 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
206\r
207 for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
208\r
209 RegisterTableEntry = &RegisterTableEntryHead[Index];\r
210\r
211 DEBUG_CODE_BEGIN ();\r
212 if (ApLocation != NULL) {\r
213 AcquireSpinLock (&CpuFlags->ConsoleLogLock);\r
214 ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +\r
215 ApLocation->Core * CpuStatus->MaxThreadCount +\r
216 ApLocation->Thread;\r
217 DEBUG ((\r
218 DEBUG_INFO,\r
219 "Processor = %lu, Entry Index %lu, Type = %s!\n",\r
220 (UINT64)ThreadIndex,\r
221 (UINT64)Index,\r
222 mRegisterTypeStr[MIN ((REGISTER_TYPE)RegisterTableEntry->RegisterType, InvalidReg)]\r
223 ));\r
224 ReleaseSpinLock (&CpuFlags->ConsoleLogLock);\r
225 }\r
226 DEBUG_CODE_END ();\r
227\r
529a5a86
MK
228 //\r
229 // Check the type of specified register\r
230 //\r
231 switch (RegisterTableEntry->RegisterType) {\r
232 //\r
233 // The specified register is Control Register\r
234 //\r
235 case ControlRegister:\r
236 switch (RegisterTableEntry->Index) {\r
237 case 0:\r
238 Value = AsmReadCr0 ();\r
239 Value = (UINTN) BitFieldWrite64 (\r
240 Value,\r
241 RegisterTableEntry->ValidBitStart,\r
242 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
243 (UINTN) RegisterTableEntry->Value\r
244 );\r
245 AsmWriteCr0 (Value);\r
246 break;\r
247 case 2:\r
248 Value = AsmReadCr2 ();\r
249 Value = (UINTN) BitFieldWrite64 (\r
250 Value,\r
251 RegisterTableEntry->ValidBitStart,\r
252 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
253 (UINTN) RegisterTableEntry->Value\r
254 );\r
255 AsmWriteCr2 (Value);\r
256 break;\r
257 case 3:\r
258 Value = AsmReadCr3 ();\r
259 Value = (UINTN) BitFieldWrite64 (\r
260 Value,\r
261 RegisterTableEntry->ValidBitStart,\r
262 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
263 (UINTN) RegisterTableEntry->Value\r
264 );\r
265 AsmWriteCr3 (Value);\r
266 break;\r
267 case 4:\r
268 Value = AsmReadCr4 ();\r
269 Value = (UINTN) BitFieldWrite64 (\r
270 Value,\r
271 RegisterTableEntry->ValidBitStart,\r
272 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
273 (UINTN) RegisterTableEntry->Value\r
274 );\r
275 AsmWriteCr4 (Value);\r
276 break;\r
277 default:\r
278 break;\r
279 }\r
280 break;\r
281 //\r
282 // The specified register is Model Specific Register\r
283 //\r
284 case Msr:\r
285 //\r
286 // If this function is called to restore register setting after INIT signal,\r
287 // there is no need to restore MSRs in register table.\r
288 //\r
289 if (RegisterTableEntry->ValidBitLength >= 64) {\r
290 //\r
291 // If length is not less than 64 bits, then directly write without reading\r
292 //\r
293 AsmWriteMsr64 (\r
294 RegisterTableEntry->Index,\r
295 RegisterTableEntry->Value\r
296 );\r
297 } else {\r
529a5a86
MK
298 //\r
299 // Set the bit section according to bit start and length\r
300 //\r
301 AsmMsrBitFieldWrite64 (\r
302 RegisterTableEntry->Index,\r
303 RegisterTableEntry->ValidBitStart,\r
304 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
305 RegisterTableEntry->Value\r
306 );\r
529a5a86
MK
307 }\r
308 break;\r
309 //\r
6c4c15fa
JF
310 // MemoryMapped operations\r
311 //\r
312 case MemoryMapped:\r
93324390 313 AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa 314 MmioBitFieldWrite32 (\r
30b7a50b 315 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
316 RegisterTableEntry->ValidBitStart,\r
317 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
318 (UINT32)RegisterTableEntry->Value\r
319 );\r
93324390 320 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa
JF
321 break;\r
322 //\r
529a5a86
MK
323 // Enable or disable cache\r
324 //\r
325 case CacheControl:\r
326 //\r
327 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
328 //\r
329 if (RegisterTableEntry->Value == 0) {\r
330 AsmDisableCache ();\r
331 } else {\r
332 AsmEnableCache ();\r
333 }\r
334 break;\r
335\r
93324390
ED
336 case Semaphore:\r
337 // Semaphore works logic like below:\r
338 //\r
339 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
340 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
341 //\r
342 // All threads (T0...Tn) waits in P() line and continues running\r
343 // together.\r
344 //\r
345 //\r
346 // T0 T1 ... Tn\r
347 //\r
348 // V(0...n) V(0...n) ... V(0...n)\r
349 // n * P(0) n * P(1) ... n * P(n)\r
350 //\r
351 ASSERT (\r
e07e3ceb 352 (ApLocation != NULL) &&\r
93324390 353 (CpuStatus->ValidCoreCountPerPackage != 0) &&\r
9bae7811
ED
354 (CpuFlags->CoreSemaphoreCount != NULL) &&\r
355 (CpuFlags->PackageSemaphoreCount != NULL)\r
93324390 356 );\r
93324390
ED
357 switch (RegisterTableEntry->Value) {\r
358 case CoreDepType:\r
9bae7811 359 SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
93324390
ED
360 //\r
361 // Get Offset info for the first thread in the core which current thread belongs to.\r
362 //\r
363 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;\r
364 CurrentThread = FirstThread + ApLocation->Thread;\r
365 //\r
366 // First Notify all threads in current Core that this thread has ready.\r
367 //\r
368 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
369 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
370 }\r
371 //\r
372 // Second, check whether all valid threads in current core have ready.\r
373 //\r
374 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
375 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
376 }\r
377 break;\r
378\r
379 case PackageDepType:\r
9bae7811 380 SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
93324390
ED
381 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;\r
382 //\r
383 // Get Offset info for the first thread in the package which current thread belongs to.\r
384 //\r
385 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
386 //\r
387 // Get the possible threads count for current package.\r
388 //\r
389 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;\r
390 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
391 //\r
392 // Get the valid thread count for current package.\r
393 //\r
394 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];\r
395\r
396 //\r
397 // Different packages may have different valid cores in them. If driver maintail clearly\r
398 // cores number in different packages, the logic will be much complicated.\r
399 // Here driver just simply records the max core number in all packages and use it as expect\r
400 // core number for all packages.\r
401 // In below two steps logic, first current thread will Release semaphore for each thread\r
402 // in current package. Maybe some threads are not valid in this package, but driver don't\r
403 // care. Second, driver will let current thread wait semaphore for all valid threads in\r
404 // current package. Because only the valid threads will do release semaphore for this\r
405 // thread, driver here only need to wait the valid thread count.\r
406 //\r
407\r
408 //\r
409 // First Notify all threads in current package that this thread has ready.\r
410 //\r
411 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {\r
412 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
413 }\r
414 //\r
415 // Second, check whether all valid threads in current package have ready.\r
416 //\r
417 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {\r
418 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
419 }\r
420 break;\r
421\r
422 default:\r
423 break;\r
424 }\r
425 break;\r
426\r
529a5a86
MK
427 default:\r
428 break;\r
429 }\r
430 }\r
431}\r
432\r
93324390
ED
433/**\r
434\r
435 Set Processor register for one AP.\r
e07e3ceb 436\r
93324390
ED
437 @param PreSmmRegisterTable Use pre Smm register table or register table.\r
438\r
439**/\r
440VOID\r
441SetRegister (\r
442 IN BOOLEAN PreSmmRegisterTable\r
443 )\r
444{\r
445 CPU_REGISTER_TABLE *RegisterTable;\r
446 CPU_REGISTER_TABLE *RegisterTables;\r
447 UINT32 InitApicId;\r
448 UINTN ProcIndex;\r
449 UINTN Index;\r
450\r
451 if (PreSmmRegisterTable) {\r
452 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;\r
453 } else {\r
454 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;\r
455 }\r
456\r
457 InitApicId = GetInitialApicId ();\r
458 RegisterTable = NULL;\r
7db4034f 459 ProcIndex = (UINTN)-1;\r
93324390
ED
460 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
461 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
462 RegisterTable = &RegisterTables[Index];\r
463 ProcIndex = Index;\r
464 break;\r
465 }\r
466 }\r
467 ASSERT (RegisterTable != NULL);\r
468\r
469 if (mAcpiCpuData.ApLocation != 0) {\r
470 ProgramProcessorRegister (\r
471 RegisterTable,\r
472 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,\r
473 &mAcpiCpuData.CpuStatus,\r
474 &mCpuFlags\r
475 );\r
476 } else {\r
477 ProgramProcessorRegister (\r
478 RegisterTable,\r
479 NULL,\r
480 &mAcpiCpuData.CpuStatus,\r
481 &mCpuFlags\r
482 );\r
483 }\r
484}\r
485\r
529a5a86 486/**\r
7677b4db 487 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
488**/\r
489VOID\r
94744aa2 490InitializeAp (\r
529a5a86
MK
491 VOID\r
492 )\r
493{\r
7677b4db
ED
494 UINTN TopOfStack;\r
495 UINT8 Stack[128];\r
529a5a86
MK
496\r
497 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
498\r
93324390 499 SetRegister (TRUE);\r
7677b4db 500\r
529a5a86
MK
501 //\r
502 // Count down the number with lock mechanism.\r
503 //\r
504 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 505\r
7677b4db
ED
506 //\r
507 // Wait for BSP to signal SMM Base relocation done.\r
508 //\r
509 while (!mInitApsAfterSmmBaseReloc) {\r
510 CpuPause ();\r
511 }\r
529a5a86
MK
512\r
513 ProgramVirtualWireMode ();\r
514 DisableLvtInterrupts ();\r
515\r
93324390 516 SetRegister (FALSE);\r
529a5a86
MK
517\r
518 //\r
ec8a3877 519 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 520 //\r
672b80c8
MK
521 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
522 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 523 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 524 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
525}\r
526\r
527/**\r
528 Prepares startup vector for APs.\r
529\r
530 This function prepares startup vector for APs.\r
531\r
532 @param WorkingBuffer The address of the work buffer.\r
533**/\r
534VOID\r
535PrepareApStartupVector (\r
536 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
537 )\r
538{\r
539 EFI_PHYSICAL_ADDRESS StartupVector;\r
540 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
541\r
542 //\r
543 // Get the address map of startup code for AP,\r
544 // including code size, and offset of long jump instructions to redirect.\r
545 //\r
546 ZeroMem (&AddressMap, sizeof (AddressMap));\r
547 AsmGetAddressMap (&AddressMap);\r
548\r
549 StartupVector = WorkingBuffer;\r
550\r
551 //\r
552 // Copy AP startup code to startup vector, and then redirect the long jump\r
553 // instructions for mode switching.\r
554 //\r
555 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
556 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
557 if (AddressMap.LongJumpOffset != 0) {\r
558 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
559 }\r
560\r
561 //\r
562 // Get the start address of exchange data between BSP and AP.\r
563 //\r
564 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
565 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
566\r
567 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
568 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
569\r
529a5a86
MK
570 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
571 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
572 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
573 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
e21e355e 574 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
529a5a86
MK
575}\r
576\r
577/**\r
578 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
579\r
580 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
581 and restores MTRRs for both BSP and APs.\r
582\r
583**/\r
584VOID\r
94744aa2 585InitializeCpuBeforeRebase (\r
529a5a86
MK
586 VOID\r
587 )\r
588{\r
529a5a86
MK
589 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
590\r
93324390 591 SetRegister (TRUE);\r
529a5a86
MK
592\r
593 ProgramVirtualWireMode ();\r
594\r
595 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
596\r
597 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
94744aa2 598 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
599\r
600 //\r
601 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
602 //\r
603 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
604\r
605 //\r
606 // Send INIT IPI - SIPI to all APs\r
607 //\r
608 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
609\r
610 while (mNumberToFinish > 0) {\r
611 CpuPause ();\r
612 }\r
613}\r
614\r
615/**\r
616 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
617\r
618 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
619 data saved by normal boot path for both BSP and APs.\r
620\r
621**/\r
622VOID\r
94744aa2 623InitializeCpuAfterRebase (\r
529a5a86
MK
624 VOID\r
625 )\r
626{\r
529a5a86 627 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
628\r
629 //\r
93324390 630 // Signal that SMM base relocation is complete and to continue initialization for all APs.\r
529a5a86 631 //\r
7677b4db 632 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86 633\r
93324390
ED
634 //\r
635 // Must begin set register after all APs have continue their initialization.\r
636 // This is a requirement to support semaphore mechanism in register table.\r
637 // Because if semaphore's dependence type is package type, semaphore will wait\r
638 // for all Aps in one package finishing their tasks before set next register\r
639 // for all APs. If the Aps not begin its task during BSP doing its task, the\r
640 // BSP thread will hang because it is waiting for other Aps in the same\r
641 // package finishing their task.\r
642 //\r
643 SetRegister (FALSE);\r
644\r
529a5a86
MK
645 while (mNumberToFinish > 0) {\r
646 CpuPause ();\r
647 }\r
648}\r
0bdc9e75
SZ
649\r
650/**\r
651 Restore SMM Configuration in S3 boot path.\r
652\r
653**/\r
654VOID\r
655RestoreSmmConfigurationInS3 (\r
656 VOID\r
657 )\r
658{\r
b10d5ddc
SZ
659 if (!mAcpiS3Enable) {\r
660 return;\r
661 }\r
662\r
0bdc9e75
SZ
663 //\r
664 // Restore SMM Configuration in S3 boot path.\r
665 //\r
666 if (mRestoreSmmConfigurationInS3) {\r
667 //\r
668 // Need make sure gSmst is correct because below function may use them.\r
669 //\r
670 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
671 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
672 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
673 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
674 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
675\r
676 //\r
677 // Configure SMM Code Access Check feature if available.\r
678 //\r
679 ConfigSmmCodeAccessCheck ();\r
680\r
681 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
682\r
683 mRestoreSmmConfigurationInS3 = FALSE;\r
684 }\r
685}\r
686\r
687/**\r
688 Perform SMM initialization for all processors in the S3 boot path.\r
689\r
690 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
691**/\r
692VOID\r
693EFIAPI\r
694SmmRestoreCpu (\r
695 VOID\r
696 )\r
697{\r
698 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
699 IA32_DESCRIPTOR Ia32Idtr;\r
700 IA32_DESCRIPTOR X64Idtr;\r
701 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
702 EFI_STATUS Status;\r
703\r
704 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
705\r
706 mSmmS3Flag = TRUE;\r
707\r
0bdc9e75
SZ
708 //\r
709 // See if there is enough context to resume PEI Phase\r
710 //\r
711 if (mSmmS3ResumeState == NULL) {\r
712 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
713 CpuDeadLoop ();\r
714 }\r
715\r
716 SmmS3ResumeState = mSmmS3ResumeState;\r
717 ASSERT (SmmS3ResumeState != NULL);\r
718\r
719 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
720 //\r
721 // Save the IA32 IDT Descriptor\r
722 //\r
723 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
724\r
725 //\r
726 // Setup X64 IDT table\r
727 //\r
728 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
729 X64Idtr.Base = (UINTN) IdtEntryTable;\r
730 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
731 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
732\r
733 //\r
734 // Setup the default exception handler\r
735 //\r
736 Status = InitializeCpuExceptionHandlers (NULL);\r
737 ASSERT_EFI_ERROR (Status);\r
738\r
739 //\r
740 // Initialize Debug Agent to support source level debug\r
741 //\r
742 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
743 }\r
744\r
745 //\r
746 // Skip initialization if mAcpiCpuData is not valid\r
747 //\r
748 if (mAcpiCpuData.NumberOfCpus > 0) {\r
749 //\r
750 // First time microcode load and restore MTRRs\r
751 //\r
94744aa2 752 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
753 }\r
754\r
755 //\r
756 // Restore SMBASE for BSP and all APs\r
757 //\r
758 SmmRelocateBases ();\r
759\r
760 //\r
761 // Skip initialization if mAcpiCpuData is not valid\r
762 //\r
763 if (mAcpiCpuData.NumberOfCpus > 0) {\r
764 //\r
765 // Restore MSRs for BSP and all APs\r
766 //\r
94744aa2 767 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
768 }\r
769\r
770 //\r
771 // Set a flag to restore SMM configuration in S3 path.\r
772 //\r
773 mRestoreSmmConfigurationInS3 = TRUE;\r
774\r
775 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
776 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
777 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
778 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
779 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
780\r
781 //\r
782 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
783 //\r
784 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
785 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
786\r
787 SwitchStack (\r
788 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
789 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
790 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
791 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
792 );\r
793 }\r
794\r
795 //\r
796 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
797 //\r
798 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
799 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
800 //\r
801 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
802 //\r
803 SaveAndSetDebugTimerInterrupt (FALSE);\r
804 //\r
805 // Restore IA32 IDT table\r
806 //\r
807 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
808 AsmDisablePaging64 (\r
809 SmmS3ResumeState->ReturnCs,\r
810 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
811 (UINT32)SmmS3ResumeState->ReturnContext1,\r
812 (UINT32)SmmS3ResumeState->ReturnContext2,\r
813 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
814 );\r
815 }\r
816\r
817 //\r
818 // Can not resume PEI Phase\r
819 //\r
820 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
821 CpuDeadLoop ();\r
822}\r
823\r
824/**\r
825 Initialize SMM S3 resume state structure used during S3 Resume.\r
826\r
827 @param[in] Cr3 The base address of the page tables to use in SMM.\r
828\r
829**/\r
830VOID\r
831InitSmmS3ResumeState (\r
832 IN UINT32 Cr3\r
833 )\r
834{\r
835 VOID *GuidHob;\r
836 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
837 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
838 EFI_PHYSICAL_ADDRESS Address;\r
839 EFI_STATUS Status;\r
0bdc9e75 840\r
b10d5ddc
SZ
841 if (!mAcpiS3Enable) {\r
842 return;\r
843 }\r
844\r
0bdc9e75 845 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
a95c9cfd
JW
846 if (GuidHob == NULL) {\r
847 DEBUG ((\r
848 DEBUG_ERROR,\r
849 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
850 __FUNCTION__,\r
851 &gEfiAcpiVariableGuid\r
852 ));\r
853 CpuDeadLoop ();\r
854 } else {\r
0bdc9e75
SZ
855 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
856\r
857 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
858 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
859\r
860 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
861 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
862\r
863 mSmmS3ResumeState = SmmS3ResumeState;\r
864 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
865\r
866 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
867\r
868 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
869 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
870 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
871 SmmS3ResumeState->SmmS3StackSize = 0;\r
872 }\r
873\r
f0053e83 874 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
0bdc9e75 875 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
351b49c1 876 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
0bdc9e75
SZ
877\r
878 if (sizeof (UINTN) == sizeof (UINT64)) {\r
879 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
880 }\r
881 if (sizeof (UINTN) == sizeof (UINT32)) {\r
882 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
883 }\r
0bdc9e75 884\r
16d84657
JW
885 //\r
886 // Patch SmmS3ResumeState->SmmS3Cr3\r
887 //\r
888 InitSmmS3Cr3 ();\r
889 }\r
4a0f88dd
JF
890\r
891 //\r
892 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
893 // protected mode on S3 path\r
894 //\r
895 Address = BASE_4GB - 1;\r
896 Status = gBS->AllocatePages (\r
897 AllocateMaxAddress,\r
898 EfiACPIMemoryNVS,\r
899 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
900 &Address\r
901 );\r
902 ASSERT_EFI_ERROR (Status);\r
903 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
904}\r
905\r
906/**\r
907 Copy register table from ACPI NVS memory into SMRAM.\r
908\r
909 @param[in] DestinationRegisterTableList Points to destination register table.\r
910 @param[in] SourceRegisterTableList Points to source register table.\r
911 @param[in] NumberOfCpus Number of CPUs.\r
912\r
913**/\r
914VOID\r
915CopyRegisterTable (\r
916 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
917 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
918 IN UINT32 NumberOfCpus\r
919 )\r
920{\r
921 UINTN Index;\r
0bdc9e75
SZ
922 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
923\r
924 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
925 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
926 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
927 RegisterTableEntry = AllocateCopyPool (\r
928 DestinationRegisterTableList[Index].AllocatedSize,\r
929 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
930 );\r
931 ASSERT (RegisterTableEntry != NULL);\r
932 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
0bdc9e75
SZ
933 }\r
934 }\r
935}\r
936\r
937/**\r
938 Get ACPI CPU data.\r
939\r
940**/\r
941VOID\r
942GetAcpiCpuData (\r
943 VOID\r
944 )\r
945{\r
946 ACPI_CPU_DATA *AcpiCpuData;\r
947 IA32_DESCRIPTOR *Gdtr;\r
948 IA32_DESCRIPTOR *Idtr;\r
293f8766
ED
949 VOID *GdtForAp;\r
950 VOID *IdtForAp;\r
951 VOID *MachineCheckHandlerForAp;\r
93324390 952 CPU_STATUS_INFORMATION *CpuStatus;\r
0bdc9e75 953\r
b10d5ddc
SZ
954 if (!mAcpiS3Enable) {\r
955 return;\r
956 }\r
957\r
0bdc9e75
SZ
958 //\r
959 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
960 //\r
961 mAcpiCpuData.NumberOfCpus = 0;\r
962\r
963 //\r
964 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
965 //\r
966 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
967 if (AcpiCpuData == 0) {\r
968 return;\r
969 }\r
970\r
971 //\r
972 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
973 //\r
974 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
975\r
976 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
977 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
978\r
979 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
980\r
981 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
982 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
983\r
984 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
985\r
986 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
987 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
988\r
989 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
990\r
991 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
992 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
993\r
994 CopyRegisterTable (\r
995 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
996 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
997 mAcpiCpuData.NumberOfCpus\r
998 );\r
999\r
1000 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
1001 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
1002\r
1003 CopyRegisterTable (\r
1004 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
1005 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
1006 mAcpiCpuData.NumberOfCpus\r
1007 );\r
1008\r
1009 //\r
1010 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
1011 //\r
1012 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
1013 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
1014\r
293f8766
ED
1015 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
1016 ASSERT (GdtForAp != NULL);\r
1017 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
1018 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
1019\r
1020 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
1021 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
1022 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
0bdc9e75 1023\r
293f8766
ED
1024 Gdtr->Base = (UINTN)GdtForAp;\r
1025 Idtr->Base = (UINTN)IdtForAp;\r
1026 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
93324390
ED
1027\r
1028 CpuStatus = &mAcpiCpuData.CpuStatus;\r
1029 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r
1030 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {\r
1031 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1032 sizeof (UINT32) * CpuStatus->PackageCount,\r
1033 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage\r
1034 );\r
1035 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);\r
1036 }\r
1037 if (AcpiCpuData->ApLocation != 0) {\r
1038 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1039 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
1040 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation\r
1041 );\r
1042 ASSERT (mAcpiCpuData.ApLocation != 0);\r
1043 }\r
1044 if (CpuStatus->PackageCount != 0) {\r
9bae7811
ED
1045 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (\r
1046 sizeof (UINT32) * CpuStatus->PackageCount *\r
1047 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1048 );\r
1049 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);\r
1050 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (\r
1051 sizeof (UINT32) * CpuStatus->PackageCount *\r
1052 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1053 );\r
1054 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r
93324390
ED
1055 }\r
1056 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);\r
1057 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.ConsoleLogLock);\r
0bdc9e75 1058}\r
b10d5ddc
SZ
1059\r
1060/**\r
1061 Get ACPI S3 enable flag.\r
1062\r
1063**/\r
1064VOID\r
1065GetAcpiS3EnableFlag (\r
1066 VOID\r
1067 )\r
1068{\r
1069 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
1070}\r