]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
NetworkPkg: Add NETWORK_HTTP_ENABLE macro
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
CommitLineData
529a5a86
MK
1/** @file\r
2Code for Processor S3 restoration\r
3\r
02031cfc 4Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>\r
0acd8697 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
6\r
7**/\r
8\r
9#include "PiSmmCpuDxeSmm.h"\r
10\r
e21e355e 11#pragma pack(1)\r
529a5a86
MK
12typedef struct {\r
13 UINTN Lock;\r
14 VOID *StackStart;\r
15 UINTN StackSize;\r
16 VOID *ApFunction;\r
17 IA32_DESCRIPTOR GdtrProfile;\r
18 IA32_DESCRIPTOR IdtrProfile;\r
19 UINT32 BufferStart;\r
20 UINT32 Cr3;\r
e21e355e 21 UINTN InitializeFloatingPointUnitsAddress;\r
529a5a86 22} MP_CPU_EXCHANGE_INFO;\r
e21e355e 23#pragma pack()\r
529a5a86
MK
24\r
25typedef struct {\r
26 UINT8 *RendezvousFunnelAddress;\r
27 UINTN PModeEntryOffset;\r
28 UINTN FlatJumpOffset;\r
29 UINTN Size;\r
30 UINTN LModeEntryOffset;\r
31 UINTN LongJumpOffset;\r
32} MP_ASSEMBLY_ADDRESS_MAP;\r
33\r
6c4c15fa 34//\r
93324390 35// Flags used when program the register.\r
6c4c15fa 36//\r
93324390 37typedef struct {\r
9bae7811
ED
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r
40 // core level semaphore.\r
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r
42 // package level semaphore.\r
93324390 43} PROGRAM_CPU_REGISTER_FLAGS;\r
6c4c15fa 44\r
7677b4db
ED
45//\r
46// Signal that SMM BASE relocation is complete.\r
47//\r
48volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r
49\r
529a5a86
MK
50/**\r
51 Get starting address and size of the rendezvous entry for APs.\r
52 Information for fixing a jump instruction in the code is also returned.\r
53\r
54 @param AddressMap Output buffer for address map information.\r
55**/\r
56VOID *\r
57EFIAPI\r
58AsmGetAddressMap (\r
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r
60 );\r
61\r
62#define LEGACY_REGION_SIZE (2 * 0x1000)\r
63#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r
529a5a86 64\r
93324390 65PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r
529a5a86 66ACPI_CPU_DATA mAcpiCpuData;\r
c773514d 67volatile UINT32 mNumberToFinish;\r
529a5a86
MK
68MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r
69BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r
529a5a86 70\r
0bdc9e75
SZ
71//\r
72// S3 boot flag\r
73//\r
74BOOLEAN mSmmS3Flag = FALSE;\r
75\r
76//\r
77// Pointer to structure used during S3 Resume\r
78//\r
79SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
80\r
b10d5ddc
SZ
81BOOLEAN mAcpiS3Enable = TRUE;\r
82\r
4a0f88dd
JF
83UINT8 *mApHltLoopCode = NULL;\r
84UINT8 mApHltLoopCodeTemplate[] = {\r
ec8a3877
JF
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r
87 0xFA, // cli\r
88 0xF4, // hlt\r
89 0xEB, 0xFC // jmp $-2\r
4a0f88dd
JF
90 };\r
91\r
529a5a86
MK
92/**\r
93 Sync up the MTRR values for all processors.\r
94\r
95 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r
96**/\r
97VOID\r
98EFIAPI\r
99LoadMtrrData (\r
100 EFI_PHYSICAL_ADDRESS MtrrTable\r
101 )\r
102/*++\r
103\r
104Routine Description:\r
105\r
106 Sync up the MTRR values for all processors.\r
107\r
108Arguments:\r
109\r
110Returns:\r
111 None\r
112\r
113--*/\r
114{\r
115 MTRR_SETTINGS *MtrrSettings;\r
116\r
117 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;\r
118 MtrrSetAllMtrrs (MtrrSettings);\r
119}\r
120\r
121/**\r
93324390 122 Increment semaphore by 1.\r
529a5a86 123\r
93324390 124 @param Sem IN: 32-bit unsigned integer\r
529a5a86 125\r
93324390
ED
126**/\r
127VOID\r
128S3ReleaseSemaphore (\r
129 IN OUT volatile UINT32 *Sem\r
130 )\r
131{\r
132 InterlockedIncrement (Sem);\r
133}\r
134\r
135/**\r
136 Decrement the semaphore by 1 if it is not zero.\r
529a5a86 137\r
93324390
ED
138 Performs an atomic decrement operation for semaphore.\r
139 The compare exchange operation must be performed using\r
140 MP safe mechanisms.\r
141\r
142 @param Sem IN: 32-bit unsigned integer\r
143\r
144**/\r
145VOID\r
146S3WaitForSemaphore (\r
147 IN OUT volatile UINT32 *Sem\r
148 )\r
149{\r
150 UINT32 Value;\r
151\r
152 do {\r
153 Value = *Sem;\r
154 } while (Value == 0 ||\r
155 InterlockedCompareExchange32 (\r
156 Sem,\r
157 Value,\r
158 Value - 1\r
159 ) != Value);\r
160}\r
161\r
ef21a304
ED
162/**\r
163 Read / write CR value.\r
164\r
165 @param[in] CrIndex The CR index which need to read/write.\r
166 @param[in] Read Read or write. TRUE is read.\r
167 @param[in,out] CrValue CR value.\r
168\r
169 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.\r
170**/\r
171UINTN\r
172ReadWriteCr (\r
173 IN UINT32 CrIndex,\r
174 IN BOOLEAN Read,\r
175 IN OUT UINTN *CrValue\r
176 )\r
177{\r
178 switch (CrIndex) {\r
179 case 0:\r
180 if (Read) {\r
181 *CrValue = AsmReadCr0 ();\r
182 } else {\r
183 AsmWriteCr0 (*CrValue);\r
184 }\r
185 break;\r
186 case 2:\r
187 if (Read) {\r
188 *CrValue = AsmReadCr2 ();\r
189 } else {\r
190 AsmWriteCr2 (*CrValue);\r
191 }\r
192 break;\r
193 case 3:\r
194 if (Read) {\r
195 *CrValue = AsmReadCr3 ();\r
196 } else {\r
197 AsmWriteCr3 (*CrValue);\r
198 }\r
199 break;\r
200 case 4:\r
201 if (Read) {\r
202 *CrValue = AsmReadCr4 ();\r
203 } else {\r
204 AsmWriteCr4 (*CrValue);\r
205 }\r
206 break;\r
207 default:\r
208 return EFI_UNSUPPORTED;;\r
209 }\r
210\r
211 return EFI_SUCCESS;\r
212}\r
213\r
93324390
ED
214/**\r
215 Initialize the CPU registers from a register table.\r
216\r
217 @param[in] RegisterTable The register table for this AP.\r
218 @param[in] ApLocation AP location info for this ap.\r
219 @param[in] CpuStatus CPU status info for this CPU.\r
220 @param[in] CpuFlags Flags data structure used when program the register.\r
221\r
222 @note This service could be called by BSP/APs.\r
529a5a86
MK
223**/\r
224VOID\r
93324390
ED
225ProgramProcessorRegister (\r
226 IN CPU_REGISTER_TABLE *RegisterTable,\r
227 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r
228 IN CPU_STATUS_INFORMATION *CpuStatus,\r
229 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r
529a5a86
MK
230 )\r
231{\r
232 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
233 UINTN Index;\r
234 UINTN Value;\r
93324390
ED
235 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r
236 volatile UINT32 *SemaphorePtr;\r
237 UINT32 FirstThread;\r
238 UINT32 PackageThreadsCount;\r
239 UINT32 CurrentThread;\r
240 UINTN ProcessorIndex;\r
93324390
ED
241 UINTN ValidThreadCount;\r
242 UINT32 *ValidCoreCountPerPackage;\r
ef21a304 243 EFI_STATUS Status;\r
cfbcaad2 244 UINT64 CurrentValue;\r
529a5a86
MK
245\r
246 //\r
247 // Traverse Register Table of this logical processor\r
248 //\r
93324390
ED
249 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;\r
250\r
251 for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r
252\r
253 RegisterTableEntry = &RegisterTableEntryHead[Index];\r
254\r
529a5a86
MK
255 //\r
256 // Check the type of specified register\r
257 //\r
258 switch (RegisterTableEntry->RegisterType) {\r
259 //\r
260 // The specified register is Control Register\r
261 //\r
262 case ControlRegister:\r
ef21a304
ED
263 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r
264 if (EFI_ERROR (Status)) {\r
529a5a86
MK
265 break;\r
266 }\r
cfbcaad2
ED
267 if (RegisterTableEntry->TestThenWrite) {\r
268 CurrentValue = BitFieldRead64 (\r
269 Value,\r
270 RegisterTableEntry->ValidBitStart,\r
271 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
272 );\r
273 if (CurrentValue == RegisterTableEntry->Value) {\r
274 break;\r
275 }\r
276 }\r
ef21a304
ED
277 Value = (UINTN) BitFieldWrite64 (\r
278 Value,\r
279 RegisterTableEntry->ValidBitStart,\r
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
281 RegisterTableEntry->Value\r
282 );\r
283 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r
529a5a86
MK
284 break;\r
285 //\r
286 // The specified register is Model Specific Register\r
287 //\r
288 case Msr:\r
cfbcaad2
ED
289 if (RegisterTableEntry->TestThenWrite) {\r
290 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);\r
291 if (RegisterTableEntry->ValidBitLength >= 64) {\r
292 if (Value == RegisterTableEntry->Value) {\r
293 break;\r
294 }\r
295 } else {\r
296 CurrentValue = BitFieldRead64 (\r
297 Value,\r
298 RegisterTableEntry->ValidBitStart,\r
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r
300 );\r
301 if (CurrentValue == RegisterTableEntry->Value) {\r
302 break;\r
303 }\r
304 }\r
305 }\r
306\r
529a5a86
MK
307 //\r
308 // If this function is called to restore register setting after INIT signal,\r
309 // there is no need to restore MSRs in register table.\r
310 //\r
311 if (RegisterTableEntry->ValidBitLength >= 64) {\r
312 //\r
313 // If length is not less than 64 bits, then directly write without reading\r
314 //\r
315 AsmWriteMsr64 (\r
316 RegisterTableEntry->Index,\r
317 RegisterTableEntry->Value\r
318 );\r
319 } else {\r
529a5a86
MK
320 //\r
321 // Set the bit section according to bit start and length\r
322 //\r
323 AsmMsrBitFieldWrite64 (\r
324 RegisterTableEntry->Index,\r
325 RegisterTableEntry->ValidBitStart,\r
326 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
327 RegisterTableEntry->Value\r
328 );\r
529a5a86
MK
329 }\r
330 break;\r
331 //\r
6c4c15fa
JF
332 // MemoryMapped operations\r
333 //\r
334 case MemoryMapped:\r
93324390 335 AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa 336 MmioBitFieldWrite32 (\r
30b7a50b 337 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r
6c4c15fa
JF
338 RegisterTableEntry->ValidBitStart,\r
339 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r
340 (UINT32)RegisterTableEntry->Value\r
341 );\r
93324390 342 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r
6c4c15fa
JF
343 break;\r
344 //\r
529a5a86
MK
345 // Enable or disable cache\r
346 //\r
347 case CacheControl:\r
348 //\r
349 // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r
350 //\r
351 if (RegisterTableEntry->Value == 0) {\r
352 AsmDisableCache ();\r
353 } else {\r
354 AsmEnableCache ();\r
355 }\r
356 break;\r
357\r
93324390
ED
358 case Semaphore:\r
359 // Semaphore works logic like below:\r
360 //\r
361 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r
362 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r
363 //\r
364 // All threads (T0...Tn) waits in P() line and continues running\r
365 // together.\r
366 //\r
367 //\r
368 // T0 T1 ... Tn\r
369 //\r
370 // V(0...n) V(0...n) ... V(0...n)\r
371 // n * P(0) n * P(1) ... n * P(n)\r
372 //\r
373 ASSERT (\r
e07e3ceb 374 (ApLocation != NULL) &&\r
93324390 375 (CpuStatus->ValidCoreCountPerPackage != 0) &&\r
9bae7811
ED
376 (CpuFlags->CoreSemaphoreCount != NULL) &&\r
377 (CpuFlags->PackageSemaphoreCount != NULL)\r
93324390 378 );\r
93324390
ED
379 switch (RegisterTableEntry->Value) {\r
380 case CoreDepType:\r
9bae7811 381 SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r
93324390
ED
382 //\r
383 // Get Offset info for the first thread in the core which current thread belongs to.\r
384 //\r
385 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;\r
386 CurrentThread = FirstThread + ApLocation->Thread;\r
387 //\r
388 // First Notify all threads in current Core that this thread has ready.\r
389 //\r
390 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
391 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
392 }\r
393 //\r
394 // Second, check whether all valid threads in current core have ready.\r
395 //\r
396 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {\r
397 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
398 }\r
399 break;\r
400\r
401 case PackageDepType:\r
9bae7811 402 SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r
93324390
ED
403 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;\r
404 //\r
405 // Get Offset info for the first thread in the package which current thread belongs to.\r
406 //\r
407 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r
408 //\r
409 // Get the possible threads count for current package.\r
410 //\r
411 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;\r
412 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r
413 //\r
414 // Get the valid thread count for current package.\r
415 //\r
416 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];\r
417\r
418 //\r
419 // Different packages may have different valid cores in them. If driver maintail clearly\r
420 // cores number in different packages, the logic will be much complicated.\r
421 // Here driver just simply records the max core number in all packages and use it as expect\r
422 // core number for all packages.\r
423 // In below two steps logic, first current thread will Release semaphore for each thread\r
424 // in current package. Maybe some threads are not valid in this package, but driver don't\r
425 // care. Second, driver will let current thread wait semaphore for all valid threads in\r
426 // current package. Because only the valid threads will do release semaphore for this\r
427 // thread, driver here only need to wait the valid thread count.\r
428 //\r
429\r
430 //\r
431 // First Notify all threads in current package that this thread has ready.\r
432 //\r
433 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {\r
434 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r
435 }\r
436 //\r
437 // Second, check whether all valid threads in current package have ready.\r
438 //\r
439 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {\r
440 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r
441 }\r
442 break;\r
443\r
444 default:\r
445 break;\r
446 }\r
447 break;\r
448\r
529a5a86
MK
449 default:\r
450 break;\r
451 }\r
452 }\r
453}\r
454\r
93324390
ED
455/**\r
456\r
457 Set Processor register for one AP.\r
e07e3ceb 458\r
93324390
ED
459 @param PreSmmRegisterTable Use pre Smm register table or register table.\r
460\r
461**/\r
462VOID\r
463SetRegister (\r
464 IN BOOLEAN PreSmmRegisterTable\r
465 )\r
466{\r
467 CPU_REGISTER_TABLE *RegisterTable;\r
468 CPU_REGISTER_TABLE *RegisterTables;\r
469 UINT32 InitApicId;\r
470 UINTN ProcIndex;\r
471 UINTN Index;\r
472\r
473 if (PreSmmRegisterTable) {\r
474 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;\r
475 } else {\r
476 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;\r
477 }\r
478\r
479 InitApicId = GetInitialApicId ();\r
480 RegisterTable = NULL;\r
7db4034f 481 ProcIndex = (UINTN)-1;\r
93324390
ED
482 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r
483 if (RegisterTables[Index].InitialApicId == InitApicId) {\r
484 RegisterTable = &RegisterTables[Index];\r
485 ProcIndex = Index;\r
486 break;\r
487 }\r
488 }\r
489 ASSERT (RegisterTable != NULL);\r
490\r
491 if (mAcpiCpuData.ApLocation != 0) {\r
492 ProgramProcessorRegister (\r
493 RegisterTable,\r
494 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,\r
495 &mAcpiCpuData.CpuStatus,\r
496 &mCpuFlags\r
497 );\r
498 } else {\r
499 ProgramProcessorRegister (\r
500 RegisterTable,\r
501 NULL,\r
502 &mAcpiCpuData.CpuStatus,\r
503 &mCpuFlags\r
504 );\r
505 }\r
506}\r
507\r
529a5a86 508/**\r
7677b4db 509 AP initialization before then after SMBASE relocation in the S3 boot path.\r
529a5a86
MK
510**/\r
511VOID\r
94744aa2 512InitializeAp (\r
529a5a86
MK
513 VOID\r
514 )\r
515{\r
7677b4db
ED
516 UINTN TopOfStack;\r
517 UINT8 Stack[128];\r
529a5a86
MK
518\r
519 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
520\r
93324390 521 SetRegister (TRUE);\r
7677b4db 522\r
529a5a86
MK
523 //\r
524 // Count down the number with lock mechanism.\r
525 //\r
526 InterlockedDecrement (&mNumberToFinish);\r
529a5a86 527\r
7677b4db
ED
528 //\r
529 // Wait for BSP to signal SMM Base relocation done.\r
530 //\r
531 while (!mInitApsAfterSmmBaseReloc) {\r
532 CpuPause ();\r
533 }\r
529a5a86
MK
534\r
535 ProgramVirtualWireMode ();\r
536 DisableLvtInterrupts ();\r
537\r
93324390 538 SetRegister (FALSE);\r
529a5a86
MK
539\r
540 //\r
ec8a3877 541 // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r
4a0f88dd 542 //\r
672b80c8
MK
543 TopOfStack = (UINTN) Stack + sizeof (Stack);\r
544 TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);\r
4a0f88dd 545 CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r
672b80c8 546 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r
529a5a86
MK
547}\r
548\r
549/**\r
550 Prepares startup vector for APs.\r
551\r
552 This function prepares startup vector for APs.\r
553\r
554 @param WorkingBuffer The address of the work buffer.\r
555**/\r
556VOID\r
557PrepareApStartupVector (\r
558 EFI_PHYSICAL_ADDRESS WorkingBuffer\r
559 )\r
560{\r
561 EFI_PHYSICAL_ADDRESS StartupVector;\r
562 MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r
563\r
564 //\r
565 // Get the address map of startup code for AP,\r
566 // including code size, and offset of long jump instructions to redirect.\r
567 //\r
568 ZeroMem (&AddressMap, sizeof (AddressMap));\r
569 AsmGetAddressMap (&AddressMap);\r
570\r
571 StartupVector = WorkingBuffer;\r
572\r
573 //\r
574 // Copy AP startup code to startup vector, and then redirect the long jump\r
575 // instructions for mode switching.\r
576 //\r
577 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r
578 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);\r
579 if (AddressMap.LongJumpOffset != 0) {\r
580 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);\r
581 }\r
582\r
583 //\r
584 // Get the start address of exchange data between BSP and AP.\r
585 //\r
586 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);\r
587 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r
588\r
589 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
590 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
591\r
529a5a86
MK
592 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;\r
593 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r
594 mExchangeInfo->BufferStart = (UINT32) StartupVector;\r
595 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());\r
e21e355e 596 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r
529a5a86
MK
597}\r
598\r
599/**\r
600 The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r
601\r
602 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r
603 and restores MTRRs for both BSP and APs.\r
604\r
605**/\r
606VOID\r
94744aa2 607InitializeCpuBeforeRebase (\r
529a5a86
MK
608 VOID\r
609 )\r
610{\r
529a5a86
MK
611 LoadMtrrData (mAcpiCpuData.MtrrTable);\r
612\r
93324390 613 SetRegister (TRUE);\r
529a5a86
MK
614\r
615 ProgramVirtualWireMode ();\r
616\r
617 PrepareApStartupVector (mAcpiCpuData.StartupVector);\r
618\r
90e11edd
LE
619 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
620 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r
621 } else {\r
622 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
623 }\r
624 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
94744aa2 625 mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;\r
7677b4db
ED
626\r
627 //\r
628 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r
629 //\r
630 mInitApsAfterSmmBaseReloc = FALSE;\r
529a5a86
MK
631\r
632 //\r
633 // Send INIT IPI - SIPI to all APs\r
634 //\r
635 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r
636\r
637 while (mNumberToFinish > 0) {\r
638 CpuPause ();\r
639 }\r
640}\r
641\r
642/**\r
643 The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r
644\r
645 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r
646 data saved by normal boot path for both BSP and APs.\r
647\r
648**/\r
649VOID\r
94744aa2 650InitializeCpuAfterRebase (\r
529a5a86
MK
651 VOID\r
652 )\r
653{\r
90e11edd
LE
654 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
655 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r
656 } else {\r
657 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r
658 }\r
659 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r
529a5a86
MK
660\r
661 //\r
93324390 662 // Signal that SMM base relocation is complete and to continue initialization for all APs.\r
529a5a86 663 //\r
7677b4db 664 mInitApsAfterSmmBaseReloc = TRUE;\r
529a5a86 665\r
93324390
ED
666 //\r
667 // Must begin set register after all APs have continue their initialization.\r
668 // This is a requirement to support semaphore mechanism in register table.\r
669 // Because if semaphore's dependence type is package type, semaphore will wait\r
670 // for all Aps in one package finishing their tasks before set next register\r
671 // for all APs. If the Aps not begin its task during BSP doing its task, the\r
672 // BSP thread will hang because it is waiting for other Aps in the same\r
673 // package finishing their task.\r
674 //\r
675 SetRegister (FALSE);\r
676\r
529a5a86
MK
677 while (mNumberToFinish > 0) {\r
678 CpuPause ();\r
679 }\r
680}\r
0bdc9e75
SZ
681\r
682/**\r
683 Restore SMM Configuration in S3 boot path.\r
684\r
685**/\r
686VOID\r
687RestoreSmmConfigurationInS3 (\r
688 VOID\r
689 )\r
690{\r
b10d5ddc
SZ
691 if (!mAcpiS3Enable) {\r
692 return;\r
693 }\r
694\r
0bdc9e75
SZ
695 //\r
696 // Restore SMM Configuration in S3 boot path.\r
697 //\r
698 if (mRestoreSmmConfigurationInS3) {\r
699 //\r
700 // Need make sure gSmst is correct because below function may use them.\r
701 //\r
702 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
703 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
704 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
705 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
706 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
707\r
708 //\r
709 // Configure SMM Code Access Check feature if available.\r
710 //\r
711 ConfigSmmCodeAccessCheck ();\r
712\r
713 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
714\r
715 mRestoreSmmConfigurationInS3 = FALSE;\r
716 }\r
717}\r
718\r
719/**\r
720 Perform SMM initialization for all processors in the S3 boot path.\r
721\r
722 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
723**/\r
724VOID\r
725EFIAPI\r
726SmmRestoreCpu (\r
727 VOID\r
728 )\r
729{\r
730 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
731 IA32_DESCRIPTOR Ia32Idtr;\r
732 IA32_DESCRIPTOR X64Idtr;\r
733 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
734 EFI_STATUS Status;\r
735\r
736 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
737\r
738 mSmmS3Flag = TRUE;\r
739\r
0bdc9e75
SZ
740 //\r
741 // See if there is enough context to resume PEI Phase\r
742 //\r
743 if (mSmmS3ResumeState == NULL) {\r
744 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
745 CpuDeadLoop ();\r
746 }\r
747\r
748 SmmS3ResumeState = mSmmS3ResumeState;\r
749 ASSERT (SmmS3ResumeState != NULL);\r
750\r
751 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
752 //\r
753 // Save the IA32 IDT Descriptor\r
754 //\r
755 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
756\r
757 //\r
758 // Setup X64 IDT table\r
759 //\r
760 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
761 X64Idtr.Base = (UINTN) IdtEntryTable;\r
762 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
763 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
764\r
765 //\r
766 // Setup the default exception handler\r
767 //\r
768 Status = InitializeCpuExceptionHandlers (NULL);\r
769 ASSERT_EFI_ERROR (Status);\r
770\r
771 //\r
772 // Initialize Debug Agent to support source level debug\r
773 //\r
774 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
775 }\r
776\r
777 //\r
778 // Skip initialization if mAcpiCpuData is not valid\r
779 //\r
780 if (mAcpiCpuData.NumberOfCpus > 0) {\r
781 //\r
782 // First time microcode load and restore MTRRs\r
783 //\r
94744aa2 784 InitializeCpuBeforeRebase ();\r
0bdc9e75
SZ
785 }\r
786\r
787 //\r
788 // Restore SMBASE for BSP and all APs\r
789 //\r
790 SmmRelocateBases ();\r
791\r
792 //\r
793 // Skip initialization if mAcpiCpuData is not valid\r
794 //\r
795 if (mAcpiCpuData.NumberOfCpus > 0) {\r
796 //\r
797 // Restore MSRs for BSP and all APs\r
798 //\r
94744aa2 799 InitializeCpuAfterRebase ();\r
0bdc9e75
SZ
800 }\r
801\r
802 //\r
803 // Set a flag to restore SMM configuration in S3 path.\r
804 //\r
805 mRestoreSmmConfigurationInS3 = TRUE;\r
806\r
807 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
808 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
809 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
810 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
811 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
812\r
813 //\r
814 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
815 //\r
816 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
817 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
818\r
819 SwitchStack (\r
820 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
821 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
822 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
823 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
824 );\r
825 }\r
826\r
827 //\r
828 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
829 //\r
830 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
831 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
832 //\r
833 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
834 //\r
835 SaveAndSetDebugTimerInterrupt (FALSE);\r
836 //\r
837 // Restore IA32 IDT table\r
838 //\r
839 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
840 AsmDisablePaging64 (\r
841 SmmS3ResumeState->ReturnCs,\r
842 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
843 (UINT32)SmmS3ResumeState->ReturnContext1,\r
844 (UINT32)SmmS3ResumeState->ReturnContext2,\r
845 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
846 );\r
847 }\r
848\r
849 //\r
850 // Can not resume PEI Phase\r
851 //\r
852 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
853 CpuDeadLoop ();\r
854}\r
855\r
856/**\r
857 Initialize SMM S3 resume state structure used during S3 Resume.\r
858\r
859 @param[in] Cr3 The base address of the page tables to use in SMM.\r
860\r
861**/\r
862VOID\r
863InitSmmS3ResumeState (\r
864 IN UINT32 Cr3\r
865 )\r
866{\r
867 VOID *GuidHob;\r
868 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
869 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
4a0f88dd
JF
870 EFI_PHYSICAL_ADDRESS Address;\r
871 EFI_STATUS Status;\r
0bdc9e75 872\r
b10d5ddc
SZ
873 if (!mAcpiS3Enable) {\r
874 return;\r
875 }\r
876\r
0bdc9e75 877 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
a95c9cfd
JW
878 if (GuidHob == NULL) {\r
879 DEBUG ((\r
880 DEBUG_ERROR,\r
881 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r
882 __FUNCTION__,\r
883 &gEfiAcpiVariableGuid\r
884 ));\r
885 CpuDeadLoop ();\r
886 } else {\r
0bdc9e75
SZ
887 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
888\r
889 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
890 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
891\r
892 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
893 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
894\r
895 mSmmS3ResumeState = SmmS3ResumeState;\r
896 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
897\r
898 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
899\r
900 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
901 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
902 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
903 SmmS3ResumeState->SmmS3StackSize = 0;\r
904 }\r
905\r
f0053e83 906 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r
0bdc9e75 907 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
351b49c1 908 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r
0bdc9e75
SZ
909\r
910 if (sizeof (UINTN) == sizeof (UINT64)) {\r
911 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
912 }\r
913 if (sizeof (UINTN) == sizeof (UINT32)) {\r
914 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
915 }\r
0bdc9e75 916\r
16d84657
JW
917 //\r
918 // Patch SmmS3ResumeState->SmmS3Cr3\r
919 //\r
920 InitSmmS3Cr3 ();\r
921 }\r
4a0f88dd
JF
922\r
923 //\r
924 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r
925 // protected mode on S3 path\r
926 //\r
927 Address = BASE_4GB - 1;\r
928 Status = gBS->AllocatePages (\r
929 AllocateMaxAddress,\r
930 EfiACPIMemoryNVS,\r
931 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r
932 &Address\r
933 );\r
934 ASSERT_EFI_ERROR (Status);\r
935 mApHltLoopCode = (UINT8 *) (UINTN) Address;\r
0bdc9e75
SZ
936}\r
937\r
938/**\r
939 Copy register table from ACPI NVS memory into SMRAM.\r
940\r
941 @param[in] DestinationRegisterTableList Points to destination register table.\r
942 @param[in] SourceRegisterTableList Points to source register table.\r
943 @param[in] NumberOfCpus Number of CPUs.\r
944\r
945**/\r
946VOID\r
947CopyRegisterTable (\r
948 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
949 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
950 IN UINT32 NumberOfCpus\r
951 )\r
952{\r
953 UINTN Index;\r
0bdc9e75
SZ
954 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
955\r
956 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
957 for (Index = 0; Index < NumberOfCpus; Index++) {\r
30d995ee
JF
958 if (DestinationRegisterTableList[Index].AllocatedSize != 0) {\r
959 RegisterTableEntry = AllocateCopyPool (\r
960 DestinationRegisterTableList[Index].AllocatedSize,\r
961 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r
962 );\r
963 ASSERT (RegisterTableEntry != NULL);\r
964 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r
0bdc9e75
SZ
965 }\r
966 }\r
967}\r
968\r
969/**\r
970 Get ACPI CPU data.\r
971\r
972**/\r
973VOID\r
974GetAcpiCpuData (\r
975 VOID\r
976 )\r
977{\r
978 ACPI_CPU_DATA *AcpiCpuData;\r
979 IA32_DESCRIPTOR *Gdtr;\r
980 IA32_DESCRIPTOR *Idtr;\r
293f8766
ED
981 VOID *GdtForAp;\r
982 VOID *IdtForAp;\r
983 VOID *MachineCheckHandlerForAp;\r
93324390 984 CPU_STATUS_INFORMATION *CpuStatus;\r
0bdc9e75 985\r
b10d5ddc
SZ
986 if (!mAcpiS3Enable) {\r
987 return;\r
988 }\r
989\r
0bdc9e75
SZ
990 //\r
991 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
992 //\r
993 mAcpiCpuData.NumberOfCpus = 0;\r
994\r
995 //\r
996 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
997 //\r
998 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
999 if (AcpiCpuData == 0) {\r
1000 return;\r
1001 }\r
1002\r
1003 //\r
1004 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
1005 //\r
1006 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
1007\r
1008 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
1009 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
1010\r
1011 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
1012\r
1013 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
1014 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
1015\r
1016 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
1017\r
1018 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
1019 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
1020\r
1021 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
1022\r
1023 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
1024 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
1025\r
1026 CopyRegisterTable (\r
1027 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
1028 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
1029 mAcpiCpuData.NumberOfCpus\r
1030 );\r
1031\r
1032 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
1033 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
1034\r
1035 CopyRegisterTable (\r
1036 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
1037 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
1038 mAcpiCpuData.NumberOfCpus\r
1039 );\r
1040\r
1041 //\r
1042 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
1043 //\r
1044 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
1045 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
1046\r
293f8766
ED
1047 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
1048 ASSERT (GdtForAp != NULL);\r
1049 IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));\r
1050 MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));\r
1051\r
1052 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
1053 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
1054 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
0bdc9e75 1055\r
293f8766
ED
1056 Gdtr->Base = (UINTN)GdtForAp;\r
1057 Idtr->Base = (UINTN)IdtForAp;\r
1058 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r
93324390
ED
1059\r
1060 CpuStatus = &mAcpiCpuData.CpuStatus;\r
1061 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r
1062 if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {\r
1063 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1064 sizeof (UINT32) * CpuStatus->PackageCount,\r
1065 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage\r
1066 );\r
1067 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);\r
1068 }\r
1069 if (AcpiCpuData->ApLocation != 0) {\r
1070 mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r
1071 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r
1072 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation\r
1073 );\r
1074 ASSERT (mAcpiCpuData.ApLocation != 0);\r
1075 }\r
1076 if (CpuStatus->PackageCount != 0) {\r
9bae7811
ED
1077 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (\r
1078 sizeof (UINT32) * CpuStatus->PackageCount *\r
1079 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1080 );\r
1081 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);\r
1082 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (\r
1083 sizeof (UINT32) * CpuStatus->PackageCount *\r
1084 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r
1085 );\r
1086 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r
93324390
ED
1087 }\r
1088 InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);\r
0bdc9e75 1089}\r
b10d5ddc
SZ
1090\r
1091/**\r
1092 Get ACPI S3 enable flag.\r
1093\r
1094**/\r
1095VOID\r
1096GetAcpiS3EnableFlag (\r
1097 VOID\r
1098 )\r
1099{\r
1100 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r
1101}\r