]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Combine CR read/write action.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
02031cfc 4Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>\r
0acd8697 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
6\r
7**/\r
8\r
9#include "PiSmmCpuDxeSmm.h"\r
10\r
e21e355e 11#pragma pack(1)\r
529a5a86
MK
12typedef struct {\r
13 UINTN Lock;\r
14 VOID *StackStart;\r
15 UINTN StackSize;\r
16 VOID *ApFunction;\r
17 IA32_DESCRIPTOR GdtrProfile;\r
18 IA32_DESCRIPTOR IdtrProfile;\r
19 UINT32 BufferStart;\r
20 UINT32 Cr3;\r
e21e355e 21 UINTN InitializeFloatingPointUnitsAddress;\r
529a5a86 22} MP_CPU_EXCHANGE_INFO;\r
e21e355e 23#pragma pack()\r
529a5a86
MK
24\r
25typedef struct {\r
26 UINT8 *RendezvousFunnelAddress;\r
27 UINTN PModeEntryOffset;\r
28 UINTN FlatJumpOffset;\r
29 UINTN Size;\r
30 UINTN LModeEntryOffset;\r
31 UINTN LongJumpOffset;\r
32} MP_ASSEMBLY_ADDRESS_MAP;\r
33\r
6c4c15fa 34//\r
93324390 35// Flags used when program the register.\r
6c4c15fa 36//\r
93324390 37typedef struct {\r
9bae7811
ED
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
40 // core level semaphore.\r
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
42 // package level semaphore.\r
93324390 43} PROGRAM_CPU_REGISTER_FLAGS;\r
6c4c15fa 44\r
7677b4db
ED
45//\r
46// Signal that SMM BASE relocation is complete.\r
47//\r
48volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
49\r
529a5a86
MK
50/**\r
51 Get starting address and size of the rendezvous entry for APs.\r
52 Information for fixing a jump instruction in the code is also returned.\r
53\r
54 @param AddressMap Output buffer for address map information.\r
55**/\r
56VOID *\r
57EFIAPI\r
58AsmGetAddressMap (\r
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
60 );\r
61\r
62#define LEGACY_REGION_SIZE (2 * 0x1000)\r
63#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86 64\r
93324390 65PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
529a5a86 66ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 67volatile UINT32 mNumberToFinish;\r
529a5a86
MK
68MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
69BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
529a5a86 70\r
0bdc9e75
SZ
71//\r
72// S3 boot flag\r
73//\r
74BOOLEAN mSmmS3Flag = FALSE;\r
75\r
76//\r
77// Pointer to structure used during S3 Resume\r
78//\r
79SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
80\r
b10d5ddc
SZ
81BOOLEAN mAcpiS3Enable = TRUE;\r
82\r
4a0f88dd
JF
83UINT8 *mApHltLoopCode = NULL;\r
84UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
87 0xFA, // cli\r
88 0xF4, // hlt\r
89 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
90 };\r
91\r
529a5a86
MK
92/**\r
93 Sync up the MTRR values for all processors.\r
94\r
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
96**/\r
97VOID\r
98EFIAPI\r
99LoadMtrrData (\r
100 EFI_PHYSICAL_ADDRESS MtrrTable\r
101 )\r
102/*++\r
103\r
104Routine Description:\r
105\r
106 Sync up the MTRR values for all processors.\r
107\r
108Arguments:\r
109\r
110Returns:\r
111 None\r
112\r
113--*/\r
114{\r
115 MTRR_SETTINGS *MtrrSettings;\r
116\r
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
118 MtrrSetAllMtrrs (MtrrSettings);\r
119}\r
120\r
121/**\r
93324390 122 Increment semaphore by 1.\r
529a5a86 123\r
93324390 124 @param Sem IN: 32-bit unsigned integer\r
529a5a86 125\r
93324390
ED
126**/\r
127VOID\r
128S3ReleaseSemaphore (\r
129 IN OUT volatile UINT32 *Sem\r
130 )\r
131{\r
132 InterlockedIncrement (Sem);\r
133}\r
134\r
135/**\r
136 Decrement the semaphore by 1 if it is not zero.\r
529a5a86 137\r
93324390
ED
138 Performs an atomic decrement operation for semaphore.\r
139 The compare exchange operation must be performed using\r
140 MP safe mechanisms.\r
141\r
142 @param Sem IN: 32-bit unsigned integer\r
143\r
144**/\r
145VOID\r
146S3WaitForSemaphore (\r
147 IN OUT volatile UINT32 *Sem\r
148 )\r
149{\r
150 UINT32 Value;\r
151\r
152 do {\r
153 Value = *Sem;\r
154 } while (Value == 0 ||\r
155 InterlockedCompareExchange32 (\r
156 Sem,\r
157 Value,\r
158 Value - 1\r
159 ) != Value);\r
160}\r
161\r
ef21a304
ED
162/**\r
163 Read / write CR value.\r
164\r
165 @param[in] CrIndex The CR index which need to read/write.\r
166 @param[in] Read Read or write. TRUE is read.\r
167 @param[in,out] CrValue CR value.\r
168\r
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.\r
170**/\r
171UINTN\r
172ReadWriteCr (\r
173 IN UINT32 CrIndex,\r
174 IN BOOLEAN Read,\r
175 IN OUT UINTN *CrValue\r
176 )\r
177{\r
178 switch (CrIndex) {\r
179 case 0:\r
180 if (Read) {\r
181 *CrValue = AsmReadCr0 ();\r
182 } else {\r
183 AsmWriteCr0 (*CrValue);\r
184 }\r
185 break;\r
186 case 2:\r
187 if (Read) {\r
188 *CrValue = AsmReadCr2 ();\r
189 } else {\r
190 AsmWriteCr2 (*CrValue);\r
191 }\r
192 break;\r
193 case 3:\r
194 if (Read) {\r
195 *CrValue = AsmReadCr3 ();\r
196 } else {\r
197 AsmWriteCr3 (*CrValue);\r
198 }\r
199 break;\r
200 case 4:\r
201 if (Read) {\r
202 *CrValue = AsmReadCr4 ();\r
203 } else {\r
204 AsmWriteCr4 (*CrValue);\r
205 }\r
206 break;\r
207 default:\r
208 return EFI_UNSUPPORTED;;\r
209 }\r
210\r
211 return EFI_SUCCESS;\r
212}\r
213\r
93324390
ED
214/**\r
215 Initialize the CPU registers from a register table.\r
216\r
217 @param[in] RegisterTable The register table for this AP.\r
218 @param[in] ApLocation AP location info for this ap.\r
219 @param[in] CpuStatus CPU status info for this CPU.\r
220 @param[in] CpuFlags Flags data structure used when program the register.\r
221\r
222 @note This service could be called by BSP/APs.\r
529a5a86
MK
223**/\r
224VOID\r
93324390
ED
225ProgramProcessorRegister (\r
226 IN CPU_REGISTER_TABLE *RegisterTable,\r
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
228 IN CPU_STATUS_INFORMATION *CpuStatus,\r
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
529a5a86
MK
230 )\r
231{\r
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
233 UINTN Index;\r
234 UINTN Value;\r
93324390
ED
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r
236 volatile UINT32 *SemaphorePtr;\r
237 UINT32 FirstThread;\r
238 UINT32 PackageThreadsCount;\r
239 UINT32 CurrentThread;\r
240 UINTN ProcessorIndex;\r
93324390
ED
241 UINTN ValidThreadCount;\r
242 UINT32 *ValidCoreCountPerPackage;\r
ef21a304 243 EFI_STATUS Status;\r
529a5a86
MK
244\r
245 //\r
246 // Traverse Register Table of this logical processor\r
247 //\r
93324390
ED
248 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
249\r
250 for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
251\r
252 RegisterTableEntry = &RegisterTableEntryHead[Index];\r
253\r
529a5a86
MK
254 //\r
255 // Check the type of specified register\r
256 //\r
257 switch (RegisterTableEntry->RegisterType) {\r
258 //\r
259 // The specified register is Control Register\r
260 //\r
261 case ControlRegister:\r
ef21a304
ED
262 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r
263 if (EFI_ERROR (Status)) {\r
529a5a86
MK
264 break;\r
265 }\r
ef21a304
ED
266 Value = (UINTN) BitFieldWrite64 (\r
267 Value,\r
268 RegisterTableEntry->ValidBitStart,\r
269 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
270 RegisterTableEntry->Value\r
271 );\r
272 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r
529a5a86
MK
273 break;\r
274 //\r
275 // The specified register is Model Specific Register\r
276 //\r
277 case Msr:\r
278 //\r
279 // If this function is called to restore register setting after INIT signal,\r
280 // there is no need to restore MSRs in register table.\r
281 //\r
282 if (RegisterTableEntry->ValidBitLength >= 64) {\r
283 //\r
284 // If length is not less than 64 bits, then directly write without reading\r
285 //\r
286 AsmWriteMsr64 (\r
287 RegisterTableEntry->Index,\r
288 RegisterTableEntry->Value\r
289 );\r
290 } else {\r
529a5a86
MK
291 //\r
292 // Set the bit section according to bit start and length\r
293 //\r
294 AsmMsrBitFieldWrite64 (\r
295 RegisterTableEntry->Index,\r
296 RegisterTableEntry->ValidBitStart,\r
297 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
298 RegisterTableEntry->Value\r
299 );\r
529a5a86
MK
300 }\r
301 break;\r
302 //\r
6c4c15fa
JF
303 // MemoryMapped operations\r
304 //\r
305 case MemoryMapped:\r
93324390 306 AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa 307 MmioBitFieldWrite32 (\r
30b7a50b 308 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
309 RegisterTableEntry->ValidBitStart,\r
310 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
311 (UINT32)RegisterTableEntry->Value\r
312 );\r
93324390 313 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa
JF
314 break;\r
315 //\r
529a5a86
MK
316 // Enable or disable cache\r
317 //\r
318 case CacheControl:\r
319 //\r
320 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
321 //\r
322 if (RegisterTableEntry->Value == 0) {\r
323 AsmDisableCache ();\r
324 } else {\r
325 AsmEnableCache ();\r
326 }\r
327 break;\r
328\r
93324390
ED
329 case Semaphore:\r
330 // Semaphore works logic like below:\r
331 //\r
332 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
333 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
334 //\r
335 // All threads (T0...Tn) waits in P() line and continues running\r
336 // together.\r
337 //\r
338 //\r
339 // T0 T1 ... Tn\r
340 //\r
341 // V(0...n) V(0...n) ... V(0...n)\r
342 // n * P(0) n * P(1) ... n * P(n)\r
343 //\r
344 ASSERT (\r
e07e3ceb 345 (ApLocation != NULL) &&\r
93324390 346 (CpuStatus->ValidCoreCountPerPackage != 0) &&\r
9bae7811
ED
347 (CpuFlags->CoreSemaphoreCount != NULL) &&\r
348 (CpuFlags->PackageSemaphoreCount != NULL)\r
93324390 349 );\r
93324390
ED
350 switch (RegisterTableEntry->Value) {\r
351 case CoreDepType:\r
9bae7811 352 SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
93324390
ED
353 //\r
354 // Get Offset info for the first thread in the core which current thread belongs to.\r
355 //\r
356 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;\r
357 CurrentThread = FirstThread + ApLocation->Thread;\r
358 //\r
359 // First Notify all threads in current Core that this thread has ready.\r
360 //\r
361 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
362 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
363 }\r
364 //\r
365 // Second, check whether all valid threads in current core have ready.\r
366 //\r
367 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
368 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
369 }\r
370 break;\r
371\r
372 case PackageDepType:\r
9bae7811 373 SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
93324390
ED
374 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;\r
375 //\r
376 // Get Offset info for the first thread in the package which current thread belongs to.\r
377 //\r
378 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
379 //\r
380 // Get the possible threads count for current package.\r
381 //\r
382 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;\r
383 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
384 //\r
385 // Get the valid thread count for current package.\r
386 //\r
387 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];\r
388\r
389 //\r
390 // Different packages may have different valid cores in them. If driver maintail clearly\r
391 // cores number in different packages, the logic will be much complicated.\r
392 // Here driver just simply records the max core number in all packages and use it as expect\r
393 // core number for all packages.\r
394 // In below two steps logic, first current thread will Release semaphore for each thread\r
395 // in current package. Maybe some threads are not valid in this package, but driver don't\r
396 // care. Second, driver will let current thread wait semaphore for all valid threads in\r
397 // current package. Because only the valid threads will do release semaphore for this\r
398 // thread, driver here only need to wait the valid thread count.\r
399 //\r
400\r
401 //\r
402 // First Notify all threads in current package that this thread has ready.\r
403 //\r
404 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {\r
405 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
406 }\r
407 //\r
408 // Second, check whether all valid threads in current package have ready.\r
409 //\r
410 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {\r
411 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
412 }\r
413 break;\r
414\r
415 default:\r
416 break;\r
417 }\r
418 break;\r
419\r
529a5a86
MK
420 default:\r
421 break;\r
422 }\r
423 }\r
424}\r
425\r
93324390
ED
426/**\r
427\r
428 Set Processor register for one AP.\r
e07e3ceb 429\r
93324390
ED
430 @param PreSmmRegisterTable Use pre Smm register table or register table.\r
431\r
432**/\r
433VOID\r
434SetRegister (\r
435 IN BOOLEAN PreSmmRegisterTable\r
436 )\r
437{\r
438 CPU_REGISTER_TABLE *RegisterTable;\r
439 CPU_REGISTER_TABLE *RegisterTables;\r
440 UINT32 InitApicId;\r
441 UINTN ProcIndex;\r
442 UINTN Index;\r
443\r
444 if (PreSmmRegisterTable) {\r
445 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;\r
446 } else {\r
447 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;\r
448 }\r
449\r
450 InitApicId = GetInitialApicId ();\r
451 RegisterTable = NULL;\r
7db4034f 452 ProcIndex = (UINTN)-1;\r
93324390
ED
453 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
454 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
455 RegisterTable = &RegisterTables[Index];\r
456 ProcIndex = Index;\r
457 break;\r
458 }\r
459 }\r
460 ASSERT (RegisterTable != NULL);\r
461\r
462 if (mAcpiCpuData.ApLocation != 0) {\r
463 ProgramProcessorRegister (\r
464 RegisterTable,\r
465 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,\r
466 &mAcpiCpuData.CpuStatus,\r
467 &mCpuFlags\r
468 );\r
469 } else {\r
470 ProgramProcessorRegister (\r
471 RegisterTable,\r
472 NULL,\r
473 &mAcpiCpuData.CpuStatus,\r
474 &mCpuFlags\r
475 );\r
476 }\r
477}\r
478\r
529a5a86 479/**\r
7677b4db 480 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
481**/\r
482VOID\r
94744aa2 483InitializeAp (\r
529a5a86
MK
484 VOID\r
485 )\r
486{\r
7677b4db
ED
487 UINTN TopOfStack;\r
488 UINT8 Stack[128];\r
529a5a86
MK
489\r
490 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
491\r
93324390 492 SetRegister (TRUE);\r
7677b4db 493\r
529a5a86
MK
494 //\r
495 // Count down the number with lock mechanism.\r
496 //\r
497 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 498\r
7677b4db
ED
499 //\r
500 // Wait for BSP to signal SMM Base relocation done.\r
501 //\r
502 while (!mInitApsAfterSmmBaseReloc) {\r
503 CpuPause ();\r
504 }\r
529a5a86
MK
505\r
506 ProgramVirtualWireMode ();\r
507 DisableLvtInterrupts ();\r
508\r
93324390 509 SetRegister (FALSE);\r
529a5a86
MK
510\r
511 //\r
ec8a3877 512 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 513 //\r
672b80c8
MK
514 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
515 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 516 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 517 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
518}\r
519\r
520/**\r
521 Prepares startup vector for APs.\r
522\r
523 This function prepares startup vector for APs.\r
524\r
525 @param WorkingBuffer The address of the work buffer.\r
526**/\r
527VOID\r
528PrepareApStartupVector (\r
529 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
530 )\r
531{\r
532 EFI_PHYSICAL_ADDRESS StartupVector;\r
533 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
534\r
535 //\r
536 // Get the address map of startup code for AP,\r
537 // including code size, and offset of long jump instructions to redirect.\r
538 //\r
539 ZeroMem (&AddressMap, sizeof (AddressMap));\r
540 AsmGetAddressMap (&AddressMap);\r
541\r
542 StartupVector = WorkingBuffer;\r
543\r
544 //\r
545 // Copy AP startup code to startup vector, and then redirect the long jump\r
546 // instructions for mode switching.\r
547 //\r
548 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
549 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
550 if (AddressMap.LongJumpOffset != 0) {\r
551 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
552 }\r
553\r
554 //\r
555 // Get the start address of exchange data between BSP and AP.\r
556 //\r
557 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
558 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
559\r
560 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
561 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
562\r
529a5a86
MK
563 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
564 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
565 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
566 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
e21e355e 567 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
529a5a86
MK
568}\r
569\r
570/**\r
571 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
572\r
573 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
574 and restores MTRRs for both BSP and APs.\r
575\r
576**/\r
577VOID\r
94744aa2 578InitializeCpuBeforeRebase (\r
529a5a86
MK
579 VOID\r
580 )\r
581{\r
529a5a86
MK
582 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
583\r
93324390 584 SetRegister (TRUE);\r
529a5a86
MK
585\r
586 ProgramVirtualWireMode ();\r
587\r
588 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
589\r
590 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
94744aa2 591 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
592\r
593 //\r
594 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
595 //\r
596 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
597\r
598 //\r
599 // Send INIT IPI - SIPI to all APs\r
600 //\r
601 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
602\r
603 while (mNumberToFinish > 0) {\r
604 CpuPause ();\r
605 }\r
606}\r
607\r
608/**\r
609 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
610\r
611 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
612 data saved by normal boot path for both BSP and APs.\r
613\r
614**/\r
615VOID\r
94744aa2 616InitializeCpuAfterRebase (\r
529a5a86
MK
617 VOID\r
618 )\r
619{\r
529a5a86 620 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
621\r
622 //\r
93324390 623 // Signal that SMM base relocation is complete and to continue initialization for all APs.\r
529a5a86 624 //\r
7677b4db 625 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86 626\r
93324390
ED
627 //\r
628 // Must begin set register after all APs have continue their initialization.\r
629 // This is a requirement to support semaphore mechanism in register table.\r
630 // Because if semaphore's dependence type is package type, semaphore will wait\r
631 // for all Aps in one package finishing their tasks before set next register\r
632 // for all APs. If the Aps not begin its task during BSP doing its task, the\r
633 // BSP thread will hang because it is waiting for other Aps in the same\r
634 // package finishing their task.\r
635 //\r
636 SetRegister (FALSE);\r
637\r
529a5a86
MK
638 while (mNumberToFinish > 0) {\r
639 CpuPause ();\r
640 }\r
641}\r
0bdc9e75
SZ
642\r
643/**\r
644 Restore SMM Configuration in S3 boot path.\r
645\r
646**/\r
647VOID\r
648RestoreSmmConfigurationInS3 (\r
649 VOID\r
650 )\r
651{\r
b10d5ddc
SZ
652 if (!mAcpiS3Enable) {\r
653 return;\r
654 }\r
655\r
0bdc9e75
SZ
656 //\r
657 // Restore SMM Configuration in S3 boot path.\r
658 //\r
659 if (mRestoreSmmConfigurationInS3) {\r
660 //\r
661 // Need make sure gSmst is correct because below function may use them.\r
662 //\r
663 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
664 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
665 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
666 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
667 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
668\r
669 //\r
670 // Configure SMM Code Access Check feature if available.\r
671 //\r
672 ConfigSmmCodeAccessCheck ();\r
673\r
674 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
675\r
676 mRestoreSmmConfigurationInS3 = FALSE;\r
677 }\r
678}\r
679\r
680/**\r
681 Perform SMM initialization for all processors in the S3 boot path.\r
682\r
683 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
684**/\r
685VOID\r
686EFIAPI\r
687SmmRestoreCpu (\r
688 VOID\r
689 )\r
690{\r
691 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
692 IA32_DESCRIPTOR Ia32Idtr;\r
693 IA32_DESCRIPTOR X64Idtr;\r
694 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
695 EFI_STATUS Status;\r
696\r
697 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
698\r
699 mSmmS3Flag = TRUE;\r
700\r
0bdc9e75
SZ
701 //\r
702 // See if there is enough context to resume PEI Phase\r
703 //\r
704 if (mSmmS3ResumeState == NULL) {\r
705 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
706 CpuDeadLoop ();\r
707 }\r
708\r
709 SmmS3ResumeState = mSmmS3ResumeState;\r
710 ASSERT (SmmS3ResumeState != NULL);\r
711\r
712 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
713 //\r
714 // Save the IA32 IDT Descriptor\r
715 //\r
716 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
717\r
718 //\r
719 // Setup X64 IDT table\r
720 //\r
721 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
722 X64Idtr.Base = (UINTN) IdtEntryTable;\r
723 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
724 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
725\r
726 //\r
727 // Setup the default exception handler\r
728 //\r
729 Status = InitializeCpuExceptionHandlers (NULL);\r
730 ASSERT_EFI_ERROR (Status);\r
731\r
732 //\r
733 // Initialize Debug Agent to support source level debug\r
734 //\r
735 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
736 }\r
737\r
738 //\r
739 // Skip initialization if mAcpiCpuData is not valid\r
740 //\r
741 if (mAcpiCpuData.NumberOfCpus > 0) {\r
742 //\r
743 // First time microcode load and restore MTRRs\r
744 //\r
94744aa2 745 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
746 }\r
747\r
748 //\r
749 // Restore SMBASE for BSP and all APs\r
750 //\r
751 SmmRelocateBases ();\r
752\r
753 //\r
754 // Skip initialization if mAcpiCpuData is not valid\r
755 //\r
756 if (mAcpiCpuData.NumberOfCpus > 0) {\r
757 //\r
758 // Restore MSRs for BSP and all APs\r
759 //\r
94744aa2 760 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
761 }\r
762\r
763 //\r
764 // Set a flag to restore SMM configuration in S3 path.\r
765 //\r
766 mRestoreSmmConfigurationInS3 = TRUE;\r
767\r
768 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
769 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
770 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
771 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
772 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
773\r
774 //\r
775 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
776 //\r
777 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
778 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
779\r
780 SwitchStack (\r
781 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
782 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
783 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
784 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
785 );\r
786 }\r
787\r
788 //\r
789 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
790 //\r
791 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
792 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
793 //\r
794 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
795 //\r
796 SaveAndSetDebugTimerInterrupt (FALSE);\r
797 //\r
798 // Restore IA32 IDT table\r
799 //\r
800 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
801 AsmDisablePaging64 (\r
802 SmmS3ResumeState->ReturnCs,\r
803 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
804 (UINT32)SmmS3ResumeState->ReturnContext1,\r
805 (UINT32)SmmS3ResumeState->ReturnContext2,\r
806 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
807 );\r
808 }\r
809\r
810 //\r
811 // Can not resume PEI Phase\r
812 //\r
813 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
814 CpuDeadLoop ();\r
815}\r
816\r
817/**\r
818 Initialize SMM S3 resume state structure used during S3 Resume.\r
819\r
820 @param[in] Cr3 The base address of the page tables to use in SMM.\r
821\r
822**/\r
823VOID\r
824InitSmmS3ResumeState (\r
825 IN UINT32 Cr3\r
826 )\r
827{\r
828 VOID *GuidHob;\r
829 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
830 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
831 EFI_PHYSICAL_ADDRESS Address;\r
832 EFI_STATUS Status;\r
0bdc9e75 833\r
b10d5ddc
SZ
834 if (!mAcpiS3Enable) {\r
835 return;\r
836 }\r
837\r
0bdc9e75 838 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
a95c9cfd
JW
839 if (GuidHob == NULL) {\r
840 DEBUG ((\r
841 DEBUG_ERROR,\r
842 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
843 __FUNCTION__,\r
844 &gEfiAcpiVariableGuid\r
845 ));\r
846 CpuDeadLoop ();\r
847 } else {\r
0bdc9e75
SZ
848 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
849\r
850 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
851 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
852\r
853 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
854 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
855\r
856 mSmmS3ResumeState = SmmS3ResumeState;\r
857 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
858\r
859 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
860\r
861 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
862 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
863 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
864 SmmS3ResumeState->SmmS3StackSize = 0;\r
865 }\r
866\r
f0053e83 867 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
0bdc9e75 868 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
351b49c1 869 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
0bdc9e75
SZ
870\r
871 if (sizeof (UINTN) == sizeof (UINT64)) {\r
872 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
873 }\r
874 if (sizeof (UINTN) == sizeof (UINT32)) {\r
875 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
876 }\r
0bdc9e75 877\r
16d84657
JW
878 //\r
879 // Patch SmmS3ResumeState->SmmS3Cr3\r
880 //\r
881 InitSmmS3Cr3 ();\r
882 }\r
4a0f88dd
JF
883\r
884 //\r
885 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
886 // protected mode on S3 path\r
887 //\r
888 Address = BASE_4GB - 1;\r
889 Status = gBS->AllocatePages (\r
890 AllocateMaxAddress,\r
891 EfiACPIMemoryNVS,\r
892 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
893 &Address\r
894 );\r
895 ASSERT_EFI_ERROR (Status);\r
896 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
897}\r
898\r
899/**\r
900 Copy register table from ACPI NVS memory into SMRAM.\r
901\r
902 @param[in] DestinationRegisterTableList Points to destination register table.\r
903 @param[in] SourceRegisterTableList Points to source register table.\r
904 @param[in] NumberOfCpus Number of CPUs.\r
905\r
906**/\r
907VOID\r
908CopyRegisterTable (\r
909 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
910 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
911 IN UINT32 NumberOfCpus\r
912 )\r
913{\r
914 UINTN Index;\r
0bdc9e75
SZ
915 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
916\r
917 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
918 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
919 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
920 RegisterTableEntry = AllocateCopyPool (\r
921 DestinationRegisterTableList[Index].AllocatedSize,\r
922 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
923 );\r
924 ASSERT (RegisterTableEntry != NULL);\r
925 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
0bdc9e75
SZ
926 }\r
927 }\r
928}\r
929\r
930/**\r
931 Get ACPI CPU data.\r
932\r
933**/\r
934VOID\r
935GetAcpiCpuData (\r
936 VOID\r
937 )\r
938{\r
939 ACPI_CPU_DATA *AcpiCpuData;\r
940 IA32_DESCRIPTOR *Gdtr;\r
941 IA32_DESCRIPTOR *Idtr;\r
293f8766
ED
942 VOID *GdtForAp;\r
943 VOID *IdtForAp;\r
944 VOID *MachineCheckHandlerForAp;\r
93324390 945 CPU_STATUS_INFORMATION *CpuStatus;\r
0bdc9e75 946\r
b10d5ddc
SZ
947 if (!mAcpiS3Enable) {\r
948 return;\r
949 }\r
950\r
0bdc9e75
SZ
951 //\r
952 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
953 //\r
954 mAcpiCpuData.NumberOfCpus = 0;\r
955\r
956 //\r
957 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
958 //\r
959 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
960 if (AcpiCpuData == 0) {\r
961 return;\r
962 }\r
963\r
964 //\r
965 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
966 //\r
967 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
968\r
969 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
970 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
971\r
972 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
973\r
974 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
975 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
976\r
977 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
978\r
979 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
980 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
981\r
982 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
983\r
984 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
985 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
986\r
987 CopyRegisterTable (\r
988 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
989 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
990 mAcpiCpuData.NumberOfCpus\r
991 );\r
992\r
993 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
994 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
995\r
996 CopyRegisterTable (\r
997 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
998 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
999 mAcpiCpuData.NumberOfCpus\r
1000 );\r
1001\r
1002 //\r
1003 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
1004 //\r
1005 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
1006 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
1007\r
293f8766
ED
1008 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
1009 ASSERT (GdtForAp != NULL);\r
1010 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
1011 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
1012\r
1013 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
1014 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
1015 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
0bdc9e75 1016\r
293f8766
ED
1017 Gdtr->Base = (UINTN)GdtForAp;\r
1018 Idtr->Base = (UINTN)IdtForAp;\r
1019 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
93324390
ED
1020\r
1021 CpuStatus = &mAcpiCpuData.CpuStatus;\r
1022 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r
1023 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {\r
1024 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1025 sizeof (UINT32) * CpuStatus->PackageCount,\r
1026 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage\r
1027 );\r
1028 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);\r
1029 }\r
1030 if (AcpiCpuData->ApLocation != 0) {\r
1031 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1032 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
1033 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation\r
1034 );\r
1035 ASSERT (mAcpiCpuData.ApLocation != 0);\r
1036 }\r
1037 if (CpuStatus->PackageCount != 0) {\r
9bae7811
ED
1038 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (\r
1039 sizeof (UINT32) * CpuStatus->PackageCount *\r
1040 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1041 );\r
1042 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);\r
1043 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (\r
1044 sizeof (UINT32) * CpuStatus->PackageCount *\r
1045 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1046 );\r
1047 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r
93324390
ED
1048 }\r
1049 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);\r
0bdc9e75 1050}\r
b10d5ddc
SZ
1051\r
1052/**\r
1053 Get ACPI S3 enable flag.\r
1054\r
1055**/\r
1056VOID\r
1057GetAcpiS3EnableFlag (\r
1058 VOID\r
1059 )\r
1060{\r
1061 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
1062}\r