]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/MpInitLib: Remove redundant microcode fields in CPU_MP_DATA
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
02031cfc 4Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>\r
0acd8697 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
6\r
7**/\r
8\r
9#include "PiSmmCpuDxeSmm.h"\r
10\r
e21e355e 11#pragma pack(1)\r
529a5a86
MK
12typedef struct {\r
13 UINTN Lock;\r
14 VOID *StackStart;\r
15 UINTN StackSize;\r
16 VOID *ApFunction;\r
17 IA32_DESCRIPTOR GdtrProfile;\r
18 IA32_DESCRIPTOR IdtrProfile;\r
19 UINT32 BufferStart;\r
20 UINT32 Cr3;\r
e21e355e 21 UINTN InitializeFloatingPointUnitsAddress;\r
529a5a86 22} MP_CPU_EXCHANGE_INFO;\r
e21e355e 23#pragma pack()\r
529a5a86
MK
24\r
25typedef struct {\r
26 UINT8 *RendezvousFunnelAddress;\r
27 UINTN PModeEntryOffset;\r
28 UINTN FlatJumpOffset;\r
29 UINTN Size;\r
30 UINTN LModeEntryOffset;\r
31 UINTN LongJumpOffset;\r
32} MP_ASSEMBLY_ADDRESS_MAP;\r
33\r
6c4c15fa 34//\r
93324390 35// Flags used when program the register.\r
6c4c15fa 36//\r
93324390 37typedef struct {\r
9bae7811
ED
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
40 // core level semaphore.\r
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
42 // package level semaphore.\r
93324390 43} PROGRAM_CPU_REGISTER_FLAGS;\r
6c4c15fa 44\r
7677b4db
ED
45//\r
46// Signal that SMM BASE relocation is complete.\r
47//\r
48volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
49\r
529a5a86
MK
50/**\r
51 Get starting address and size of the rendezvous entry for APs.\r
52 Information for fixing a jump instruction in the code is also returned.\r
53\r
54 @param AddressMap Output buffer for address map information.\r
55**/\r
56VOID *\r
57EFIAPI\r
58AsmGetAddressMap (\r
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
60 );\r
61\r
62#define LEGACY_REGION_SIZE (2 * 0x1000)\r
63#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86 64\r
93324390 65PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
529a5a86 66ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 67volatile UINT32 mNumberToFinish;\r
529a5a86
MK
68MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
69BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
529a5a86 70\r
0bdc9e75
SZ
71//\r
72// S3 boot flag\r
73//\r
74BOOLEAN mSmmS3Flag = FALSE;\r
75\r
76//\r
77// Pointer to structure used during S3 Resume\r
78//\r
79SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
80\r
b10d5ddc
SZ
81BOOLEAN mAcpiS3Enable = TRUE;\r
82\r
4a0f88dd
JF
83UINT8 *mApHltLoopCode = NULL;\r
84UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
87 0xFA, // cli\r
88 0xF4, // hlt\r
89 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
90 };\r
91\r
529a5a86
MK
92/**\r
93 Sync up the MTRR values for all processors.\r
94\r
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
96**/\r
97VOID\r
98EFIAPI\r
99LoadMtrrData (\r
100 EFI_PHYSICAL_ADDRESS MtrrTable\r
101 )\r
102/*++\r
103\r
104Routine Description:\r
105\r
106 Sync up the MTRR values for all processors.\r
107\r
108Arguments:\r
109\r
110Returns:\r
111 None\r
112\r
113--*/\r
114{\r
115 MTRR_SETTINGS *MtrrSettings;\r
116\r
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
118 MtrrSetAllMtrrs (MtrrSettings);\r
119}\r
120\r
121/**\r
93324390 122 Increment semaphore by 1.\r
529a5a86 123\r
93324390 124 @param Sem IN: 32-bit unsigned integer\r
529a5a86 125\r
93324390
ED
126**/\r
127VOID\r
128S3ReleaseSemaphore (\r
129 IN OUT volatile UINT32 *Sem\r
130 )\r
131{\r
132 InterlockedIncrement (Sem);\r
133}\r
134\r
135/**\r
136 Decrement the semaphore by 1 if it is not zero.\r
529a5a86 137\r
93324390
ED
138 Performs an atomic decrement operation for semaphore.\r
139 The compare exchange operation must be performed using\r
140 MP safe mechanisms.\r
141\r
142 @param Sem IN: 32-bit unsigned integer\r
143\r
144**/\r
145VOID\r
146S3WaitForSemaphore (\r
147 IN OUT volatile UINT32 *Sem\r
148 )\r
149{\r
150 UINT32 Value;\r
151\r
152 do {\r
153 Value = *Sem;\r
154 } while (Value == 0 ||\r
155 InterlockedCompareExchange32 (\r
156 Sem,\r
157 Value,\r
158 Value - 1\r
159 ) != Value);\r
160}\r
161\r
ef21a304
ED
162/**\r
163 Read / write CR value.\r
164\r
165 @param[in] CrIndex The CR index which need to read/write.\r
166 @param[in] Read Read or write. TRUE is read.\r
167 @param[in,out] CrValue CR value.\r
168\r
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.\r
170**/\r
171UINTN\r
172ReadWriteCr (\r
173 IN UINT32 CrIndex,\r
174 IN BOOLEAN Read,\r
175 IN OUT UINTN *CrValue\r
176 )\r
177{\r
178 switch (CrIndex) {\r
179 case 0:\r
180 if (Read) {\r
181 *CrValue = AsmReadCr0 ();\r
182 } else {\r
183 AsmWriteCr0 (*CrValue);\r
184 }\r
185 break;\r
186 case 2:\r
187 if (Read) {\r
188 *CrValue = AsmReadCr2 ();\r
189 } else {\r
190 AsmWriteCr2 (*CrValue);\r
191 }\r
192 break;\r
193 case 3:\r
194 if (Read) {\r
195 *CrValue = AsmReadCr3 ();\r
196 } else {\r
197 AsmWriteCr3 (*CrValue);\r
198 }\r
199 break;\r
200 case 4:\r
201 if (Read) {\r
202 *CrValue = AsmReadCr4 ();\r
203 } else {\r
204 AsmWriteCr4 (*CrValue);\r
205 }\r
206 break;\r
207 default:\r
208 return EFI_UNSUPPORTED;;\r
209 }\r
210\r
211 return EFI_SUCCESS;\r
212}\r
213\r
93324390
ED
214/**\r
215 Initialize the CPU registers from a register table.\r
216\r
217 @param[in] RegisterTable The register table for this AP.\r
218 @param[in] ApLocation AP location info for this ap.\r
219 @param[in] CpuStatus CPU status info for this CPU.\r
220 @param[in] CpuFlags Flags data structure used when program the register.\r
221\r
222 @note This service could be called by BSP/APs.\r
529a5a86
MK
223**/\r
224VOID\r
93324390
ED
225ProgramProcessorRegister (\r
226 IN CPU_REGISTER_TABLE *RegisterTable,\r
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
228 IN CPU_STATUS_INFORMATION *CpuStatus,\r
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
529a5a86
MK
230 )\r
231{\r
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
233 UINTN Index;\r
234 UINTN Value;\r
93324390
ED
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r
236 volatile UINT32 *SemaphorePtr;\r
237 UINT32 FirstThread;\r
238 UINT32 PackageThreadsCount;\r
239 UINT32 CurrentThread;\r
240 UINTN ProcessorIndex;\r
93324390
ED
241 UINTN ValidThreadCount;\r
242 UINT32 *ValidCoreCountPerPackage;\r
ef21a304 243 EFI_STATUS Status;\r
cfbcaad2 244 UINT64 CurrentValue;\r
529a5a86
MK
245\r
246 //\r
247 // Traverse Register Table of this logical processor\r
248 //\r
93324390
ED
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
250\r
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
252\r
253 RegisterTableEntry = &RegisterTableEntryHead[Index];\r
254\r
529a5a86
MK
255 //\r
256 // Check the type of specified register\r
257 //\r
258 switch (RegisterTableEntry->RegisterType) {\r
259 //\r
260 // The specified register is Control Register\r
261 //\r
262 case ControlRegister:\r
ef21a304
ED
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r
264 if (EFI_ERROR (Status)) {\r
529a5a86
MK
265 break;\r
266 }\r
cfbcaad2
ED
267 if (RegisterTableEntry->TestThenWrite) {\r
268 CurrentValue = BitFieldRead64 (\r
269 Value,\r
270 RegisterTableEntry->ValidBitStart,\r
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
272 );\r
273 if (CurrentValue == RegisterTableEntry->Value) {\r
274 break;\r
275 }\r
276 }\r
ef21a304
ED
277 Value = (UINTN) BitFieldWrite64 (\r
278 Value,\r
279 RegisterTableEntry->ValidBitStart,\r
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
281 RegisterTableEntry->Value\r
282 );\r
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r
529a5a86
MK
284 break;\r
285 //\r
286 // The specified register is Model Specific Register\r
287 //\r
288 case Msr:\r
cfbcaad2
ED
289 if (RegisterTableEntry->TestThenWrite) {\r
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);\r
291 if (RegisterTableEntry->ValidBitLength >= 64) {\r
292 if (Value == RegisterTableEntry->Value) {\r
293 break;\r
294 }\r
295 } else {\r
296 CurrentValue = BitFieldRead64 (\r
297 Value,\r
298 RegisterTableEntry->ValidBitStart,\r
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
300 );\r
301 if (CurrentValue == RegisterTableEntry->Value) {\r
302 break;\r
303 }\r
304 }\r
305 }\r
306\r
529a5a86
MK
307 //\r
308 // If this function is called to restore register setting after INIT signal,\r
309 // there is no need to restore MSRs in register table.\r
310 //\r
311 if (RegisterTableEntry->ValidBitLength >= 64) {\r
312 //\r
313 // If length is not less than 64 bits, then directly write without reading\r
314 //\r
315 AsmWriteMsr64 (\r
316 RegisterTableEntry->Index,\r
317 RegisterTableEntry->Value\r
318 );\r
319 } else {\r
529a5a86
MK
320 //\r
321 // Set the bit section according to bit start and length\r
322 //\r
323 AsmMsrBitFieldWrite64 (\r
324 RegisterTableEntry->Index,\r
325 RegisterTableEntry->ValidBitStart,\r
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
327 RegisterTableEntry->Value\r
328 );\r
529a5a86
MK
329 }\r
330 break;\r
331 //\r
6c4c15fa
JF
332 // MemoryMapped operations\r
333 //\r
334 case MemoryMapped:\r
93324390 335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa 336 MmioBitFieldWrite32 (\r
30b7a50b 337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
338 RegisterTableEntry->ValidBitStart,\r
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
340 (UINT32)RegisterTableEntry->Value\r
341 );\r
93324390 342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa
JF
343 break;\r
344 //\r
529a5a86
MK
345 // Enable or disable cache\r
346 //\r
347 case CacheControl:\r
348 //\r
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
350 //\r
351 if (RegisterTableEntry->Value == 0) {\r
352 AsmDisableCache ();\r
353 } else {\r
354 AsmEnableCache ();\r
355 }\r
356 break;\r
357\r
93324390
ED
358 case Semaphore:\r
359 // Semaphore works logic like below:\r
360 //\r
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
363 //\r
364 // All threads (T0...Tn) waits in P() line and continues running\r
365 // together.\r
366 //\r
367 //\r
368 // T0 T1 ... Tn\r
369 //\r
370 // V(0...n) V(0...n) ... V(0...n)\r
371 // n * P(0) n * P(1) ... n * P(n)\r
372 //\r
373 ASSERT (\r
e07e3ceb 374 (ApLocation != NULL) &&\r
93324390 375 (CpuStatus->ValidCoreCountPerPackage != 0) &&\r
9bae7811
ED
376 (CpuFlags->CoreSemaphoreCount != NULL) &&\r
377 (CpuFlags->PackageSemaphoreCount != NULL)\r
93324390 378 );\r
93324390
ED
379 switch (RegisterTableEntry->Value) {\r
380 case CoreDepType:\r
9bae7811 381 SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
93324390
ED
382 //\r
383 // Get Offset info for the first thread in the core which current thread belongs to.\r
384 //\r
385 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;\r
386 CurrentThread = FirstThread + ApLocation->Thread;\r
387 //\r
388 // First Notify all threads in current Core that this thread has ready.\r
389 //\r
390 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
391 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
392 }\r
393 //\r
394 // Second, check whether all valid threads in current core have ready.\r
395 //\r
396 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
397 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
398 }\r
399 break;\r
400\r
401 case PackageDepType:\r
9bae7811 402 SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
93324390
ED
403 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;\r
404 //\r
405 // Get Offset info for the first thread in the package which current thread belongs to.\r
406 //\r
407 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
408 //\r
409 // Get the possible threads count for current package.\r
410 //\r
411 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;\r
412 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
413 //\r
414 // Get the valid thread count for current package.\r
415 //\r
416 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];\r
417\r
418 //\r
419 // Different packages may have different valid cores in them. If driver maintail clearly\r
420 // cores number in different packages, the logic will be much complicated.\r
421 // Here driver just simply records the max core number in all packages and use it as expect\r
422 // core number for all packages.\r
423 // In below two steps logic, first current thread will Release semaphore for each thread\r
424 // in current package. Maybe some threads are not valid in this package, but driver don't\r
425 // care. Second, driver will let current thread wait semaphore for all valid threads in\r
426 // current package. Because only the valid threads will do release semaphore for this\r
427 // thread, driver here only need to wait the valid thread count.\r
428 //\r
429\r
430 //\r
431 // First Notify all threads in current package that this thread has ready.\r
432 //\r
433 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {\r
434 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
435 }\r
436 //\r
437 // Second, check whether all valid threads in current package have ready.\r
438 //\r
439 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {\r
440 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
441 }\r
442 break;\r
443\r
444 default:\r
445 break;\r
446 }\r
447 break;\r
448\r
529a5a86
MK
449 default:\r
450 break;\r
451 }\r
452 }\r
453}\r
454\r
93324390
ED
455/**\r
456\r
457 Set Processor register for one AP.\r
e07e3ceb 458\r
93324390
ED
459 @param PreSmmRegisterTable Use pre Smm register table or register table.\r
460\r
461**/\r
462VOID\r
463SetRegister (\r
464 IN BOOLEAN PreSmmRegisterTable\r
465 )\r
466{\r
467 CPU_REGISTER_TABLE *RegisterTable;\r
468 CPU_REGISTER_TABLE *RegisterTables;\r
469 UINT32 InitApicId;\r
470 UINTN ProcIndex;\r
471 UINTN Index;\r
472\r
473 if (PreSmmRegisterTable) {\r
474 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;\r
475 } else {\r
476 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;\r
477 }\r
478\r
479 InitApicId = GetInitialApicId ();\r
480 RegisterTable = NULL;\r
7db4034f 481 ProcIndex = (UINTN)-1;\r
93324390
ED
482 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
483 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
484 RegisterTable = &RegisterTables[Index];\r
485 ProcIndex = Index;\r
486 break;\r
487 }\r
488 }\r
489 ASSERT (RegisterTable != NULL);\r
490\r
491 if (mAcpiCpuData.ApLocation != 0) {\r
492 ProgramProcessorRegister (\r
493 RegisterTable,\r
494 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,\r
495 &mAcpiCpuData.CpuStatus,\r
496 &mCpuFlags\r
497 );\r
498 } else {\r
499 ProgramProcessorRegister (\r
500 RegisterTable,\r
501 NULL,\r
502 &mAcpiCpuData.CpuStatus,\r
503 &mCpuFlags\r
504 );\r
505 }\r
506}\r
507\r
529a5a86 508/**\r
7677b4db 509 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
510**/\r
511VOID\r
94744aa2 512InitializeAp (\r
529a5a86
MK
513 VOID\r
514 )\r
515{\r
7677b4db
ED
516 UINTN TopOfStack;\r
517 UINT8 Stack[128];\r
529a5a86
MK
518\r
519 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
520\r
93324390 521 SetRegister (TRUE);\r
7677b4db 522\r
529a5a86
MK
523 //\r
524 // Count down the number with lock mechanism.\r
525 //\r
526 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 527\r
7677b4db
ED
528 //\r
529 // Wait for BSP to signal SMM Base relocation done.\r
530 //\r
531 while (!mInitApsAfterSmmBaseReloc) {\r
532 CpuPause ();\r
533 }\r
529a5a86
MK
534\r
535 ProgramVirtualWireMode ();\r
536 DisableLvtInterrupts ();\r
537\r
93324390 538 SetRegister (FALSE);\r
529a5a86
MK
539\r
540 //\r
ec8a3877 541 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 542 //\r
672b80c8
MK
543 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
544 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 545 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 546 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
547}\r
548\r
549/**\r
550 Prepares startup vector for APs.\r
551\r
552 This function prepares startup vector for APs.\r
553\r
554 @param WorkingBuffer The address of the work buffer.\r
555**/\r
556VOID\r
557PrepareApStartupVector (\r
558 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
559 )\r
560{\r
561 EFI_PHYSICAL_ADDRESS StartupVector;\r
562 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
563\r
564 //\r
565 // Get the address map of startup code for AP,\r
566 // including code size, and offset of long jump instructions to redirect.\r
567 //\r
568 ZeroMem (&AddressMap, sizeof (AddressMap));\r
569 AsmGetAddressMap (&AddressMap);\r
570\r
571 StartupVector = WorkingBuffer;\r
572\r
573 //\r
574 // Copy AP startup code to startup vector, and then redirect the long jump\r
575 // instructions for mode switching.\r
576 //\r
577 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
578 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
579 if (AddressMap.LongJumpOffset != 0) {\r
580 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
581 }\r
582\r
583 //\r
584 // Get the start address of exchange data between BSP and AP.\r
585 //\r
586 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
587 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
588\r
589 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
590 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
591\r
529a5a86
MK
592 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
593 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
594 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
595 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
e21e355e 596 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
529a5a86
MK
597}\r
598\r
599/**\r
600 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
601\r
602 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
603 and restores MTRRs for both BSP and APs.\r
604\r
605**/\r
606VOID\r
94744aa2 607InitializeCpuBeforeRebase (\r
529a5a86
MK
608 VOID\r
609 )\r
610{\r
529a5a86
MK
611 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
612\r
93324390 613 SetRegister (TRUE);\r
529a5a86
MK
614\r
615 ProgramVirtualWireMode ();\r
616\r
617 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
618\r
619 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
94744aa2 620 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
621\r
622 //\r
623 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
624 //\r
625 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
626\r
627 //\r
628 // Send INIT IPI - SIPI to all APs\r
629 //\r
630 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
631\r
632 while (mNumberToFinish > 0) {\r
633 CpuPause ();\r
634 }\r
635}\r
636\r
637/**\r
638 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
639\r
640 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
641 data saved by normal boot path for both BSP and APs.\r
642\r
643**/\r
644VOID\r
94744aa2 645InitializeCpuAfterRebase (\r
529a5a86
MK
646 VOID\r
647 )\r
648{\r
529a5a86 649 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;\r
529a5a86
MK
650\r
651 //\r
93324390 652 // Signal that SMM base relocation is complete and to continue initialization for all APs.\r
529a5a86 653 //\r
7677b4db 654 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86 655\r
93324390
ED
656 //\r
657 // Must begin set register after all APs have continue their initialization.\r
658 // This is a requirement to support semaphore mechanism in register table.\r
659 // Because if semaphore's dependence type is package type, semaphore will wait\r
660 // for all Aps in one package finishing their tasks before set next register\r
661 // for all APs. If the Aps not begin its task during BSP doing its task, the\r
662 // BSP thread will hang because it is waiting for other Aps in the same\r
663 // package finishing their task.\r
664 //\r
665 SetRegister (FALSE);\r
666\r
529a5a86
MK
667 while (mNumberToFinish > 0) {\r
668 CpuPause ();\r
669 }\r
670}\r
0bdc9e75
SZ
671\r
672/**\r
673 Restore SMM Configuration in S3 boot path.\r
674\r
675**/\r
676VOID\r
677RestoreSmmConfigurationInS3 (\r
678 VOID\r
679 )\r
680{\r
b10d5ddc
SZ
681 if (!mAcpiS3Enable) {\r
682 return;\r
683 }\r
684\r
0bdc9e75
SZ
685 //\r
686 // Restore SMM Configuration in S3 boot path.\r
687 //\r
688 if (mRestoreSmmConfigurationInS3) {\r
689 //\r
690 // Need make sure gSmst is correct because below function may use them.\r
691 //\r
692 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
693 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
694 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
695 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
696 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
697\r
698 //\r
699 // Configure SMM Code Access Check feature if available.\r
700 //\r
701 ConfigSmmCodeAccessCheck ();\r
702\r
703 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
704\r
705 mRestoreSmmConfigurationInS3 = FALSE;\r
706 }\r
707}\r
708\r
709/**\r
710 Perform SMM initialization for all processors in the S3 boot path.\r
711\r
712 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
713**/\r
714VOID\r
715EFIAPI\r
716SmmRestoreCpu (\r
717 VOID\r
718 )\r
719{\r
720 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
721 IA32_DESCRIPTOR Ia32Idtr;\r
722 IA32_DESCRIPTOR X64Idtr;\r
723 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
724 EFI_STATUS Status;\r
725\r
726 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
727\r
728 mSmmS3Flag = TRUE;\r
729\r
0bdc9e75
SZ
730 //\r
731 // See if there is enough context to resume PEI Phase\r
732 //\r
733 if (mSmmS3ResumeState == NULL) {\r
734 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
735 CpuDeadLoop ();\r
736 }\r
737\r
738 SmmS3ResumeState = mSmmS3ResumeState;\r
739 ASSERT (SmmS3ResumeState != NULL);\r
740\r
741 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
742 //\r
743 // Save the IA32 IDT Descriptor\r
744 //\r
745 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
746\r
747 //\r
748 // Setup X64 IDT table\r
749 //\r
750 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
751 X64Idtr.Base = (UINTN) IdtEntryTable;\r
752 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
753 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
754\r
755 //\r
756 // Setup the default exception handler\r
757 //\r
758 Status = InitializeCpuExceptionHandlers (NULL);\r
759 ASSERT_EFI_ERROR (Status);\r
760\r
761 //\r
762 // Initialize Debug Agent to support source level debug\r
763 //\r
764 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
765 }\r
766\r
767 //\r
768 // Skip initialization if mAcpiCpuData is not valid\r
769 //\r
770 if (mAcpiCpuData.NumberOfCpus > 0) {\r
771 //\r
772 // First time microcode load and restore MTRRs\r
773 //\r
94744aa2 774 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
775 }\r
776\r
777 //\r
778 // Restore SMBASE for BSP and all APs\r
779 //\r
780 SmmRelocateBases ();\r
781\r
782 //\r
783 // Skip initialization if mAcpiCpuData is not valid\r
784 //\r
785 if (mAcpiCpuData.NumberOfCpus > 0) {\r
786 //\r
787 // Restore MSRs for BSP and all APs\r
788 //\r
94744aa2 789 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
790 }\r
791\r
792 //\r
793 // Set a flag to restore SMM configuration in S3 path.\r
794 //\r
795 mRestoreSmmConfigurationInS3 = TRUE;\r
796\r
797 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
798 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
799 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
800 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
801 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
802\r
803 //\r
804 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
805 //\r
806 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
807 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
808\r
809 SwitchStack (\r
810 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
811 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
812 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
813 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
814 );\r
815 }\r
816\r
817 //\r
818 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
819 //\r
820 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
821 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
822 //\r
823 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
824 //\r
825 SaveAndSetDebugTimerInterrupt (FALSE);\r
826 //\r
827 // Restore IA32 IDT table\r
828 //\r
829 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
830 AsmDisablePaging64 (\r
831 SmmS3ResumeState->ReturnCs,\r
832 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
833 (UINT32)SmmS3ResumeState->ReturnContext1,\r
834 (UINT32)SmmS3ResumeState->ReturnContext2,\r
835 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
836 );\r
837 }\r
838\r
839 //\r
840 // Can not resume PEI Phase\r
841 //\r
842 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
843 CpuDeadLoop ();\r
844}\r
845\r
846/**\r
847 Initialize SMM S3 resume state structure used during S3 Resume.\r
848\r
849 @param[in] Cr3 The base address of the page tables to use in SMM.\r
850\r
851**/\r
852VOID\r
853InitSmmS3ResumeState (\r
854 IN UINT32 Cr3\r
855 )\r
856{\r
857 VOID *GuidHob;\r
858 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
859 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
860 EFI_PHYSICAL_ADDRESS Address;\r
861 EFI_STATUS Status;\r
0bdc9e75 862\r
b10d5ddc
SZ
863 if (!mAcpiS3Enable) {\r
864 return;\r
865 }\r
866\r
0bdc9e75 867 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
a95c9cfd
JW
868 if (GuidHob == NULL) {\r
869 DEBUG ((\r
870 DEBUG_ERROR,\r
871 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
872 __FUNCTION__,\r
873 &gEfiAcpiVariableGuid\r
874 ));\r
875 CpuDeadLoop ();\r
876 } else {\r
0bdc9e75
SZ
877 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
878\r
879 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
880 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
881\r
882 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
883 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
884\r
885 mSmmS3ResumeState = SmmS3ResumeState;\r
886 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
887\r
888 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
889\r
890 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
891 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
892 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
893 SmmS3ResumeState->SmmS3StackSize = 0;\r
894 }\r
895\r
f0053e83 896 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
0bdc9e75 897 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
351b49c1 898 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
0bdc9e75
SZ
899\r
900 if (sizeof (UINTN) == sizeof (UINT64)) {\r
901 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
902 }\r
903 if (sizeof (UINTN) == sizeof (UINT32)) {\r
904 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
905 }\r
0bdc9e75 906\r
16d84657
JW
907 //\r
908 // Patch SmmS3ResumeState->SmmS3Cr3\r
909 //\r
910 InitSmmS3Cr3 ();\r
911 }\r
4a0f88dd
JF
912\r
913 //\r
914 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
915 // protected mode on S3 path\r
916 //\r
917 Address = BASE_4GB - 1;\r
918 Status = gBS->AllocatePages (\r
919 AllocateMaxAddress,\r
920 EfiACPIMemoryNVS,\r
921 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
922 &Address\r
923 );\r
924 ASSERT_EFI_ERROR (Status);\r
925 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
926}\r
927\r
928/**\r
929 Copy register table from ACPI NVS memory into SMRAM.\r
930\r
931 @param[in] DestinationRegisterTableList Points to destination register table.\r
932 @param[in] SourceRegisterTableList Points to source register table.\r
933 @param[in] NumberOfCpus Number of CPUs.\r
934\r
935**/\r
936VOID\r
937CopyRegisterTable (\r
938 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
939 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
940 IN UINT32 NumberOfCpus\r
941 )\r
942{\r
943 UINTN Index;\r
0bdc9e75
SZ
944 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
945\r
946 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
947 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
948 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
949 RegisterTableEntry = AllocateCopyPool (\r
950 DestinationRegisterTableList[Index].AllocatedSize,\r
951 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
952 );\r
953 ASSERT (RegisterTableEntry != NULL);\r
954 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
0bdc9e75
SZ
955 }\r
956 }\r
957}\r
958\r
959/**\r
960 Get ACPI CPU data.\r
961\r
962**/\r
963VOID\r
964GetAcpiCpuData (\r
965 VOID\r
966 )\r
967{\r
968 ACPI_CPU_DATA *AcpiCpuData;\r
969 IA32_DESCRIPTOR *Gdtr;\r
970 IA32_DESCRIPTOR *Idtr;\r
293f8766
ED
971 VOID *GdtForAp;\r
972 VOID *IdtForAp;\r
973 VOID *MachineCheckHandlerForAp;\r
93324390 974 CPU_STATUS_INFORMATION *CpuStatus;\r
0bdc9e75 975\r
b10d5ddc
SZ
976 if (!mAcpiS3Enable) {\r
977 return;\r
978 }\r
979\r
0bdc9e75
SZ
980 //\r
981 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
982 //\r
983 mAcpiCpuData.NumberOfCpus = 0;\r
984\r
985 //\r
986 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
987 //\r
988 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
989 if (AcpiCpuData == 0) {\r
990 return;\r
991 }\r
992\r
993 //\r
994 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
995 //\r
996 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
997\r
998 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
999 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
1000\r
1001 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
1002\r
1003 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
1004 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
1005\r
1006 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
1007\r
1008 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
1009 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
1010\r
1011 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
1012\r
1013 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
1014 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
1015\r
1016 CopyRegisterTable (\r
1017 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
1018 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
1019 mAcpiCpuData.NumberOfCpus\r
1020 );\r
1021\r
1022 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
1023 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
1024\r
1025 CopyRegisterTable (\r
1026 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
1027 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
1028 mAcpiCpuData.NumberOfCpus\r
1029 );\r
1030\r
1031 //\r
1032 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
1033 //\r
1034 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
1035 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
1036\r
293f8766
ED
1037 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
1038 ASSERT (GdtForAp != NULL);\r
1039 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
1040 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
1041\r
1042 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
1043 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
1044 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
0bdc9e75 1045\r
293f8766
ED
1046 Gdtr->Base = (UINTN)GdtForAp;\r
1047 Idtr->Base = (UINTN)IdtForAp;\r
1048 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
93324390
ED
1049\r
1050 CpuStatus = &mAcpiCpuData.CpuStatus;\r
1051 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r
1052 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {\r
1053 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1054 sizeof (UINT32) * CpuStatus->PackageCount,\r
1055 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage\r
1056 );\r
1057 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);\r
1058 }\r
1059 if (AcpiCpuData->ApLocation != 0) {\r
1060 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1061 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
1062 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation\r
1063 );\r
1064 ASSERT (mAcpiCpuData.ApLocation != 0);\r
1065 }\r
1066 if (CpuStatus->PackageCount != 0) {\r
9bae7811
ED
1067 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (\r
1068 sizeof (UINT32) * CpuStatus->PackageCount *\r
1069 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1070 );\r
1071 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);\r
1072 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (\r
1073 sizeof (UINT32) * CpuStatus->PackageCount *\r
1074 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1075 );\r
1076 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r
93324390
ED
1077 }\r
1078 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);\r
0bdc9e75 1079}\r
b10d5ddc
SZ
1080\r
1081/**\r
1082 Get ACPI S3 enable flag.\r
1083\r
1084**/\r
1085VOID\r
1086GetAcpiS3EnableFlag (\r
1087 VOID\r
1088 )\r
1089{\r
1090 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
1091}\r