]>
Commit | Line | Data |
---|---|---|
529a5a86 MK |
1 | /** @file\r |
2 | Code for Processor S3 restoration\r | |
3 | \r | |
e992cc3f | 4 | Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>\r |
0acd8697 | 5 | SPDX-License-Identifier: BSD-2-Clause-Patent\r |
529a5a86 MK |
6 | \r |
7 | **/\r | |
8 | \r | |
9 | #include "PiSmmCpuDxeSmm.h"\r | |
10 | \r | |
e21e355e | 11 | #pragma pack(1)\r |
529a5a86 | 12 | typedef struct {\r |
053e878b MK |
13 | UINTN Lock;\r |
14 | VOID *StackStart;\r | |
15 | UINTN StackSize;\r | |
16 | VOID *ApFunction;\r | |
17 | IA32_DESCRIPTOR GdtrProfile;\r | |
18 | IA32_DESCRIPTOR IdtrProfile;\r | |
19 | UINT32 BufferStart;\r | |
20 | UINT32 Cr3;\r | |
21 | UINTN InitializeFloatingPointUnitsAddress;\r | |
529a5a86 | 22 | } MP_CPU_EXCHANGE_INFO;\r |
e21e355e | 23 | #pragma pack()\r |
529a5a86 MK |
24 | \r |
25 | typedef struct {\r | |
053e878b MK |
26 | UINT8 *RendezvousFunnelAddress;\r |
27 | UINTN PModeEntryOffset;\r | |
28 | UINTN FlatJumpOffset;\r | |
29 | UINTN Size;\r | |
30 | UINTN LModeEntryOffset;\r | |
31 | UINTN LongJumpOffset;\r | |
529a5a86 MK |
32 | } MP_ASSEMBLY_ADDRESS_MAP;\r |
33 | \r | |
6c4c15fa | 34 | //\r |
93324390 | 35 | // Flags used when program the register.\r |
6c4c15fa | 36 | //\r |
93324390 | 37 | typedef struct {\r |
053e878b MK |
38 | volatile UINTN MemoryMappedLock; // Spinlock used to program mmio\r |
39 | volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program\r | |
9bae7811 | 40 | // core level semaphore.\r |
053e878b | 41 | volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program\r |
9bae7811 | 42 | // package level semaphore.\r |
93324390 | 43 | } PROGRAM_CPU_REGISTER_FLAGS;\r |
6c4c15fa | 44 | \r |
7677b4db ED |
45 | //\r |
46 | // Signal that SMM BASE relocation is complete.\r | |
47 | //\r | |
053e878b | 48 | volatile BOOLEAN mInitApsAfterSmmBaseReloc;\r |
7677b4db | 49 | \r |
529a5a86 MK |
50 | /**\r |
51 | Get starting address and size of the rendezvous entry for APs.\r | |
52 | Information for fixing a jump instruction in the code is also returned.\r | |
53 | \r | |
54 | @param AddressMap Output buffer for address map information.\r | |
55 | **/\r | |
56 | VOID *\r | |
57 | EFIAPI\r | |
58 | AsmGetAddressMap (\r | |
053e878b | 59 | MP_ASSEMBLY_ADDRESS_MAP *AddressMap\r |
529a5a86 MK |
60 | );\r |
61 | \r | |
053e878b MK |
62 | #define LEGACY_REGION_SIZE (2 * 0x1000)\r |
63 | #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)\r | |
529a5a86 | 64 | \r |
053e878b MK |
65 | PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;\r |
66 | ACPI_CPU_DATA mAcpiCpuData;\r | |
67 | volatile UINT32 mNumberToFinish;\r | |
68 | MP_CPU_EXCHANGE_INFO *mExchangeInfo;\r | |
69 | BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;\r | |
529a5a86 | 70 | \r |
0bdc9e75 SZ |
71 | //\r |
72 | // S3 boot flag\r | |
73 | //\r | |
053e878b | 74 | BOOLEAN mSmmS3Flag = FALSE;\r |
0bdc9e75 SZ |
75 | \r |
76 | //\r | |
77 | // Pointer to structure used during S3 Resume\r | |
78 | //\r | |
053e878b | 79 | SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r |
0bdc9e75 | 80 | \r |
053e878b | 81 | BOOLEAN mAcpiS3Enable = TRUE;\r |
b10d5ddc | 82 | \r |
053e878b MK |
83 | UINT8 *mApHltLoopCode = NULL;\r |
84 | UINT8 mApHltLoopCodeTemplate[] = {\r | |
85 | 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]\r | |
86 | 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]\r | |
87 | 0xFA, // cli\r | |
88 | 0xF4, // hlt\r | |
89 | 0xEB, 0xFC // jmp $-2\r | |
90 | };\r | |
4a0f88dd | 91 | \r |
529a5a86 MK |
92 | /**\r |
93 | Sync up the MTRR values for all processors.\r | |
94 | \r | |
95 | @param MtrrTable Table holding fixed/variable MTRR values to be loaded.\r | |
96 | **/\r | |
97 | VOID\r | |
98 | EFIAPI\r | |
99 | LoadMtrrData (\r | |
053e878b | 100 | EFI_PHYSICAL_ADDRESS MtrrTable\r |
529a5a86 | 101 | )\r |
053e878b | 102 | \r |
529a5a86 MK |
103 | /*++\r |
104 | \r | |
105 | Routine Description:\r | |
106 | \r | |
107 | Sync up the MTRR values for all processors.\r | |
108 | \r | |
109 | Arguments:\r | |
110 | \r | |
111 | Returns:\r | |
112 | None\r | |
113 | \r | |
114 | --*/\r | |
115 | {\r | |
053e878b | 116 | MTRR_SETTINGS *MtrrSettings;\r |
529a5a86 | 117 | \r |
053e878b | 118 | MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;\r |
529a5a86 MK |
119 | MtrrSetAllMtrrs (MtrrSettings);\r |
120 | }\r | |
121 | \r | |
122 | /**\r | |
93324390 | 123 | Increment semaphore by 1.\r |
529a5a86 | 124 | \r |
93324390 | 125 | @param Sem IN: 32-bit unsigned integer\r |
529a5a86 | 126 | \r |
93324390 ED |
127 | **/\r |
128 | VOID\r | |
129 | S3ReleaseSemaphore (\r | |
053e878b | 130 | IN OUT volatile UINT32 *Sem\r |
93324390 ED |
131 | )\r |
132 | {\r | |
133 | InterlockedIncrement (Sem);\r | |
134 | }\r | |
135 | \r | |
136 | /**\r | |
137 | Decrement the semaphore by 1 if it is not zero.\r | |
529a5a86 | 138 | \r |
93324390 ED |
139 | Performs an atomic decrement operation for semaphore.\r |
140 | The compare exchange operation must be performed using\r | |
141 | MP safe mechanisms.\r | |
142 | \r | |
143 | @param Sem IN: 32-bit unsigned integer\r | |
144 | \r | |
145 | **/\r | |
146 | VOID\r | |
147 | S3WaitForSemaphore (\r | |
053e878b | 148 | IN OUT volatile UINT32 *Sem\r |
93324390 ED |
149 | )\r |
150 | {\r | |
151 | UINT32 Value;\r | |
152 | \r | |
153 | do {\r | |
154 | Value = *Sem;\r | |
155 | } while (Value == 0 ||\r | |
156 | InterlockedCompareExchange32 (\r | |
157 | Sem,\r | |
158 | Value,\r | |
159 | Value - 1\r | |
160 | ) != Value);\r | |
161 | }\r | |
162 | \r | |
ef21a304 ED |
163 | /**\r |
164 | Read / write CR value.\r | |
165 | \r | |
166 | @param[in] CrIndex The CR index which need to read/write.\r | |
167 | @param[in] Read Read or write. TRUE is read.\r | |
168 | @param[in,out] CrValue CR value.\r | |
169 | \r | |
170 | @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.\r | |
171 | **/\r | |
172 | UINTN\r | |
173 | ReadWriteCr (\r | |
053e878b MK |
174 | IN UINT32 CrIndex,\r |
175 | IN BOOLEAN Read,\r | |
176 | IN OUT UINTN *CrValue\r | |
ef21a304 ED |
177 | )\r |
178 | {\r | |
179 | switch (CrIndex) {\r | |
053e878b MK |
180 | case 0:\r |
181 | if (Read) {\r | |
182 | *CrValue = AsmReadCr0 ();\r | |
183 | } else {\r | |
184 | AsmWriteCr0 (*CrValue);\r | |
185 | }\r | |
186 | \r | |
187 | break;\r | |
188 | case 2:\r | |
189 | if (Read) {\r | |
190 | *CrValue = AsmReadCr2 ();\r | |
191 | } else {\r | |
192 | AsmWriteCr2 (*CrValue);\r | |
193 | }\r | |
194 | \r | |
195 | break;\r | |
196 | case 3:\r | |
197 | if (Read) {\r | |
198 | *CrValue = AsmReadCr3 ();\r | |
199 | } else {\r | |
200 | AsmWriteCr3 (*CrValue);\r | |
201 | }\r | |
202 | \r | |
203 | break;\r | |
204 | case 4:\r | |
205 | if (Read) {\r | |
206 | *CrValue = AsmReadCr4 ();\r | |
207 | } else {\r | |
208 | AsmWriteCr4 (*CrValue);\r | |
209 | }\r | |
210 | \r | |
211 | break;\r | |
212 | default:\r | |
213 | return EFI_UNSUPPORTED;\r | |
ef21a304 ED |
214 | }\r |
215 | \r | |
216 | return EFI_SUCCESS;\r | |
217 | }\r | |
218 | \r | |
93324390 ED |
219 | /**\r |
220 | Initialize the CPU registers from a register table.\r | |
221 | \r | |
222 | @param[in] RegisterTable The register table for this AP.\r | |
223 | @param[in] ApLocation AP location info for this ap.\r | |
224 | @param[in] CpuStatus CPU status info for this CPU.\r | |
225 | @param[in] CpuFlags Flags data structure used when program the register.\r | |
226 | \r | |
227 | @note This service could be called by BSP/APs.\r | |
529a5a86 MK |
228 | **/\r |
229 | VOID\r | |
93324390 | 230 | ProgramProcessorRegister (\r |
053e878b MK |
231 | IN CPU_REGISTER_TABLE *RegisterTable,\r |
232 | IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,\r | |
233 | IN CPU_STATUS_INFORMATION *CpuStatus,\r | |
234 | IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags\r | |
529a5a86 MK |
235 | )\r |
236 | {\r | |
237 | CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r | |
238 | UINTN Index;\r | |
239 | UINTN Value;\r | |
93324390 ED |
240 | CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;\r |
241 | volatile UINT32 *SemaphorePtr;\r | |
242 | UINT32 FirstThread;\r | |
93324390 | 243 | UINT32 CurrentThread;\r |
6af76adb | 244 | UINT32 CurrentCore;\r |
93324390 | 245 | UINTN ProcessorIndex;\r |
6af76adb RN |
246 | UINT32 *ThreadCountPerPackage;\r |
247 | UINT8 *ThreadCountPerCore;\r | |
ef21a304 | 248 | EFI_STATUS Status;\r |
cfbcaad2 | 249 | UINT64 CurrentValue;\r |
529a5a86 MK |
250 | \r |
251 | //\r | |
252 | // Traverse Register Table of this logical processor\r | |
253 | //\r | |
053e878b | 254 | RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;\r |
93324390 ED |
255 | \r |
256 | for (Index = 0; Index < RegisterTable->TableLength; Index++) {\r | |
93324390 ED |
257 | RegisterTableEntry = &RegisterTableEntryHead[Index];\r |
258 | \r | |
529a5a86 MK |
259 | //\r |
260 | // Check the type of specified register\r | |
261 | //\r | |
262 | switch (RegisterTableEntry->RegisterType) {\r | |
053e878b MK |
263 | //\r |
264 | // The specified register is Control Register\r | |
265 | //\r | |
266 | case ControlRegister:\r | |
267 | Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);\r | |
268 | if (EFI_ERROR (Status)) {\r | |
cfbcaad2 ED |
269 | break;\r |
270 | }\r | |
053e878b MK |
271 | \r |
272 | if (RegisterTableEntry->TestThenWrite) {\r | |
cfbcaad2 ED |
273 | CurrentValue = BitFieldRead64 (\r |
274 | Value,\r | |
275 | RegisterTableEntry->ValidBitStart,\r | |
276 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r | |
277 | );\r | |
278 | if (CurrentValue == RegisterTableEntry->Value) {\r | |
279 | break;\r | |
280 | }\r | |
281 | }\r | |
cfbcaad2 | 282 | \r |
053e878b MK |
283 | Value = (UINTN)BitFieldWrite64 (\r |
284 | Value,\r | |
285 | RegisterTableEntry->ValidBitStart,\r | |
286 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
287 | RegisterTableEntry->Value\r | |
288 | );\r | |
289 | ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);\r | |
290 | break;\r | |
529a5a86 | 291 | //\r |
053e878b | 292 | // The specified register is Model Specific Register\r |
529a5a86 | 293 | //\r |
053e878b MK |
294 | case Msr:\r |
295 | if (RegisterTableEntry->TestThenWrite) {\r | |
296 | Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);\r | |
297 | if (RegisterTableEntry->ValidBitLength >= 64) {\r | |
298 | if (Value == RegisterTableEntry->Value) {\r | |
299 | break;\r | |
300 | }\r | |
301 | } else {\r | |
302 | CurrentValue = BitFieldRead64 (\r | |
303 | Value,\r | |
304 | RegisterTableEntry->ValidBitStart,\r | |
305 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1\r | |
306 | );\r | |
307 | if (CurrentValue == RegisterTableEntry->Value) {\r | |
308 | break;\r | |
309 | }\r | |
310 | }\r | |
311 | }\r | |
312 | \r | |
529a5a86 | 313 | //\r |
053e878b MK |
314 | // If this function is called to restore register setting after INIT signal,\r |
315 | // there is no need to restore MSRs in register table.\r | |
529a5a86 | 316 | //\r |
053e878b MK |
317 | if (RegisterTableEntry->ValidBitLength >= 64) {\r |
318 | //\r | |
319 | // If length is not less than 64 bits, then directly write without reading\r | |
320 | //\r | |
321 | AsmWriteMsr64 (\r | |
322 | RegisterTableEntry->Index,\r | |
323 | RegisterTableEntry->Value\r | |
324 | );\r | |
325 | } else {\r | |
326 | //\r | |
327 | // Set the bit section according to bit start and length\r | |
328 | //\r | |
329 | AsmMsrBitFieldWrite64 (\r | |
330 | RegisterTableEntry->Index,\r | |
331 | RegisterTableEntry->ValidBitStart,\r | |
332 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
333 | RegisterTableEntry->Value\r | |
334 | );\r | |
335 | }\r | |
529a5a86 | 336 | \r |
053e878b | 337 | break;\r |
93324390 | 338 | //\r |
053e878b | 339 | // MemoryMapped operations\r |
93324390 | 340 | //\r |
053e878b MK |
341 | case MemoryMapped:\r |
342 | AcquireSpinLock (&CpuFlags->MemoryMappedLock);\r | |
343 | MmioBitFieldWrite32 (\r | |
344 | (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),\r | |
345 | RegisterTableEntry->ValidBitStart,\r | |
346 | RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,\r | |
347 | (UINT32)RegisterTableEntry->Value\r | |
348 | );\r | |
349 | ReleaseSpinLock (&CpuFlags->MemoryMappedLock);\r | |
350 | break;\r | |
93324390 | 351 | //\r |
053e878b | 352 | // Enable or disable cache\r |
93324390 | 353 | //\r |
053e878b | 354 | case CacheControl:\r |
93324390 | 355 | //\r |
053e878b | 356 | // If value of the entry is 0, then disable cache. Otherwise, enable cache.\r |
6af76adb | 357 | //\r |
053e878b MK |
358 | if (RegisterTableEntry->Value == 0) {\r |
359 | AsmDisableCache ();\r | |
360 | } else {\r | |
361 | AsmEnableCache ();\r | |
93324390 | 362 | }\r |
053e878b | 363 | \r |
93324390 ED |
364 | break;\r |
365 | \r | |
053e878b MK |
366 | case Semaphore:\r |
367 | // Semaphore works logic like below:\r | |
93324390 | 368 | //\r |
053e878b MK |
369 | // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);\r |
370 | // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);\r | |
93324390 | 371 | //\r |
053e878b MK |
372 | // All threads (T0...Tn) waits in P() line and continues running\r |
373 | // together.\r | |
93324390 | 374 | //\r |
93324390 | 375 | //\r |
053e878b | 376 | // T0 T1 ... Tn\r |
93324390 | 377 | //\r |
053e878b MK |
378 | // V(0...n) V(0...n) ... V(0...n)\r |
379 | // n * P(0) n * P(1) ... n * P(n)\r | |
93324390 | 380 | //\r |
053e878b MK |
381 | ASSERT (\r |
382 | (ApLocation != NULL) &&\r | |
383 | (CpuStatus->ThreadCountPerPackage != 0) &&\r | |
384 | (CpuStatus->ThreadCountPerCore != 0) &&\r | |
385 | (CpuFlags->CoreSemaphoreCount != NULL) &&\r | |
386 | (CpuFlags->PackageSemaphoreCount != NULL)\r | |
387 | );\r | |
388 | switch (RegisterTableEntry->Value) {\r | |
389 | case CoreDepType:\r | |
390 | SemaphorePtr = CpuFlags->CoreSemaphoreCount;\r | |
391 | ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;\r | |
392 | \r | |
393 | CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;\r | |
394 | //\r | |
395 | // Get Offset info for the first thread in the core which current thread belongs to.\r | |
396 | //\r | |
397 | FirstThread = CurrentCore * CpuStatus->MaxThreadCount;\r | |
398 | CurrentThread = FirstThread + ApLocation->Thread;\r | |
399 | \r | |
400 | //\r | |
401 | // Different cores may have different valid threads in them. If driver maintail clearly\r | |
402 | // thread index in different cores, the logic will be much complicated.\r | |
403 | // Here driver just simply records the max thread number in all cores and use it as expect\r | |
404 | // thread number for all cores.\r | |
405 | // In below two steps logic, first current thread will Release semaphore for each thread\r | |
406 | // in current core. Maybe some threads are not valid in this core, but driver don't\r | |
407 | // care. Second, driver will let current thread wait semaphore for all valid threads in\r | |
408 | // current core. Because only the valid threads will do release semaphore for this\r | |
409 | // thread, driver here only need to wait the valid thread count.\r | |
410 | //\r | |
411 | \r | |
412 | //\r | |
413 | // First Notify ALL THREADs in current Core that this thread is ready.\r | |
414 | //\r | |
415 | for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {\r | |
416 | S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r | |
417 | }\r | |
418 | \r | |
419 | //\r | |
420 | // Second, check whether all VALID THREADs (not all threads) in current core are ready.\r | |
421 | //\r | |
422 | for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {\r | |
423 | S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r | |
424 | }\r | |
93324390 | 425 | \r |
053e878b MK |
426 | break;\r |
427 | \r | |
428 | case PackageDepType:\r | |
429 | SemaphorePtr = CpuFlags->PackageSemaphoreCount;\r | |
430 | ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;\r | |
431 | //\r | |
432 | // Get Offset info for the first thread in the package which current thread belongs to.\r | |
433 | //\r | |
434 | FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;\r | |
435 | //\r | |
436 | // Get the possible threads count for current package.\r | |
437 | //\r | |
438 | CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;\r | |
439 | \r | |
440 | //\r | |
441 | // Different packages may have different valid threads in them. If driver maintail clearly\r | |
442 | // thread index in different packages, the logic will be much complicated.\r | |
443 | // Here driver just simply records the max thread number in all packages and use it as expect\r | |
444 | // thread number for all packages.\r | |
445 | // In below two steps logic, first current thread will Release semaphore for each thread\r | |
446 | // in current package. Maybe some threads are not valid in this package, but driver don't\r | |
447 | // care. Second, driver will let current thread wait semaphore for all valid threads in\r | |
448 | // current package. Because only the valid threads will do release semaphore for this\r | |
449 | // thread, driver here only need to wait the valid thread count.\r | |
450 | //\r | |
451 | \r | |
452 | //\r | |
453 | // First Notify ALL THREADS in current package that this thread is ready.\r | |
454 | //\r | |
455 | for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {\r | |
456 | S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);\r | |
457 | }\r | |
458 | \r | |
459 | //\r | |
460 | // Second, check whether VALID THREADS (not all threads) in current package are ready.\r | |
461 | //\r | |
462 | for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {\r | |
463 | S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);\r | |
464 | }\r | |
465 | \r | |
466 | break;\r | |
467 | \r | |
468 | default:\r | |
469 | break;\r | |
93324390 | 470 | }\r |
053e878b | 471 | \r |
93324390 ED |
472 | break;\r |
473 | \r | |
474 | default:\r | |
475 | break;\r | |
529a5a86 MK |
476 | }\r |
477 | }\r | |
478 | }\r | |
479 | \r | |
93324390 ED |
480 | /**\r |
481 | \r | |
482 | Set Processor register for one AP.\r | |
e07e3ceb | 483 | \r |
93324390 ED |
484 | @param PreSmmRegisterTable Use pre Smm register table or register table.\r |
485 | \r | |
486 | **/\r | |
487 | VOID\r | |
488 | SetRegister (\r | |
053e878b | 489 | IN BOOLEAN PreSmmRegisterTable\r |
93324390 ED |
490 | )\r |
491 | {\r | |
053e878b MK |
492 | CPU_FEATURE_INIT_DATA *FeatureInitData;\r |
493 | CPU_REGISTER_TABLE *RegisterTable;\r | |
494 | CPU_REGISTER_TABLE *RegisterTables;\r | |
495 | UINT32 InitApicId;\r | |
496 | UINTN ProcIndex;\r | |
497 | UINTN Index;\r | |
93324390 | 498 | \r |
010753b7 LY |
499 | FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;\r |
500 | \r | |
93324390 | 501 | if (PreSmmRegisterTable) {\r |
010753b7 | 502 | RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;\r |
93324390 | 503 | } else {\r |
010753b7 | 504 | RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;\r |
93324390 | 505 | }\r |
053e878b | 506 | \r |
e992cc3f SZ |
507 | if (RegisterTables == NULL) {\r |
508 | return;\r | |
509 | }\r | |
93324390 | 510 | \r |
053e878b | 511 | InitApicId = GetInitialApicId ();\r |
93324390 | 512 | RegisterTable = NULL;\r |
053e878b | 513 | ProcIndex = (UINTN)-1;\r |
93324390 ED |
514 | for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {\r |
515 | if (RegisterTables[Index].InitialApicId == InitApicId) {\r | |
516 | RegisterTable = &RegisterTables[Index];\r | |
053e878b | 517 | ProcIndex = Index;\r |
93324390 ED |
518 | break;\r |
519 | }\r | |
520 | }\r | |
053e878b | 521 | \r |
93324390 ED |
522 | ASSERT (RegisterTable != NULL);\r |
523 | \r | |
010753b7 | 524 | if (FeatureInitData->ApLocation != 0) {\r |
93324390 ED |
525 | ProgramProcessorRegister (\r |
526 | RegisterTable,\r | |
010753b7 LY |
527 | (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,\r |
528 | &FeatureInitData->CpuStatus,\r | |
93324390 ED |
529 | &mCpuFlags\r |
530 | );\r | |
531 | } else {\r | |
532 | ProgramProcessorRegister (\r | |
533 | RegisterTable,\r | |
534 | NULL,\r | |
010753b7 | 535 | &FeatureInitData->CpuStatus,\r |
93324390 ED |
536 | &mCpuFlags\r |
537 | );\r | |
538 | }\r | |
539 | }\r | |
540 | \r | |
529a5a86 | 541 | /**\r |
7677b4db | 542 | AP initialization before then after SMBASE relocation in the S3 boot path.\r |
529a5a86 MK |
543 | **/\r |
544 | VOID\r | |
94744aa2 | 545 | InitializeAp (\r |
529a5a86 MK |
546 | VOID\r |
547 | )\r | |
548 | {\r | |
053e878b MK |
549 | UINTN TopOfStack;\r |
550 | UINT8 Stack[128];\r | |
529a5a86 MK |
551 | \r |
552 | LoadMtrrData (mAcpiCpuData.MtrrTable);\r | |
553 | \r | |
93324390 | 554 | SetRegister (TRUE);\r |
7677b4db | 555 | \r |
529a5a86 MK |
556 | //\r |
557 | // Count down the number with lock mechanism.\r | |
558 | //\r | |
559 | InterlockedDecrement (&mNumberToFinish);\r | |
529a5a86 | 560 | \r |
7677b4db ED |
561 | //\r |
562 | // Wait for BSP to signal SMM Base relocation done.\r | |
563 | //\r | |
564 | while (!mInitApsAfterSmmBaseReloc) {\r | |
565 | CpuPause ();\r | |
566 | }\r | |
529a5a86 MK |
567 | \r |
568 | ProgramVirtualWireMode ();\r | |
569 | DisableLvtInterrupts ();\r | |
570 | \r | |
93324390 | 571 | SetRegister (FALSE);\r |
529a5a86 MK |
572 | \r |
573 | //\r | |
ec8a3877 | 574 | // Place AP into the safe code, count down the number with lock mechanism in the safe code.\r |
4a0f88dd | 575 | //\r |
053e878b MK |
576 | TopOfStack = (UINTN)Stack + sizeof (Stack);\r |
577 | TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);\r | |
578 | CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));\r | |
672b80c8 | 579 | TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);\r |
529a5a86 MK |
580 | }\r |
581 | \r | |
582 | /**\r | |
583 | Prepares startup vector for APs.\r | |
584 | \r | |
585 | This function prepares startup vector for APs.\r | |
586 | \r | |
587 | @param WorkingBuffer The address of the work buffer.\r | |
588 | **/\r | |
589 | VOID\r | |
590 | PrepareApStartupVector (\r | |
591 | EFI_PHYSICAL_ADDRESS WorkingBuffer\r | |
592 | )\r | |
593 | {\r | |
053e878b MK |
594 | EFI_PHYSICAL_ADDRESS StartupVector;\r |
595 | MP_ASSEMBLY_ADDRESS_MAP AddressMap;\r | |
529a5a86 MK |
596 | \r |
597 | //\r | |
598 | // Get the address map of startup code for AP,\r | |
599 | // including code size, and offset of long jump instructions to redirect.\r | |
600 | //\r | |
601 | ZeroMem (&AddressMap, sizeof (AddressMap));\r | |
602 | AsmGetAddressMap (&AddressMap);\r | |
603 | \r | |
604 | StartupVector = WorkingBuffer;\r | |
605 | \r | |
606 | //\r | |
607 | // Copy AP startup code to startup vector, and then redirect the long jump\r | |
608 | // instructions for mode switching.\r | |
609 | //\r | |
053e878b MK |
610 | CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);\r |
611 | *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);\r | |
529a5a86 | 612 | if (AddressMap.LongJumpOffset != 0) {\r |
053e878b | 613 | *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);\r |
529a5a86 MK |
614 | }\r |
615 | \r | |
616 | //\r | |
617 | // Get the start address of exchange data between BSP and AP.\r | |
618 | //\r | |
053e878b MK |
619 | mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);\r |
620 | ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));\r | |
529a5a86 | 621 | \r |
053e878b MK |
622 | CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));\r |
623 | CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
529a5a86 | 624 | \r |
053e878b MK |
625 | mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;\r |
626 | mExchangeInfo->StackSize = mAcpiCpuData.StackSize;\r | |
627 | mExchangeInfo->BufferStart = (UINT32)StartupVector;\r | |
628 | mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());\r | |
e21e355e | 629 | mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;\r |
529a5a86 MK |
630 | }\r |
631 | \r | |
632 | /**\r | |
633 | The function is invoked before SMBASE relocation in S3 path to restores CPU status.\r | |
634 | \r | |
635 | The function is invoked before SMBASE relocation in S3 path. It does first time microcode load\r | |
636 | and restores MTRRs for both BSP and APs.\r | |
637 | \r | |
638 | **/\r | |
639 | VOID\r | |
94744aa2 | 640 | InitializeCpuBeforeRebase (\r |
529a5a86 MK |
641 | VOID\r |
642 | )\r | |
643 | {\r | |
529a5a86 MK |
644 | LoadMtrrData (mAcpiCpuData.MtrrTable);\r |
645 | \r | |
93324390 | 646 | SetRegister (TRUE);\r |
529a5a86 MK |
647 | \r |
648 | ProgramVirtualWireMode ();\r | |
649 | \r | |
650 | PrepareApStartupVector (mAcpiCpuData.StartupVector);\r | |
651 | \r | |
90e11edd LE |
652 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r |
653 | ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r | |
654 | } else {\r | |
655 | ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r | |
656 | }\r | |
053e878b MK |
657 | \r |
658 | mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r | |
659 | mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeAp;\r | |
7677b4db ED |
660 | \r |
661 | //\r | |
662 | // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.\r | |
663 | //\r | |
664 | mInitApsAfterSmmBaseReloc = FALSE;\r | |
529a5a86 MK |
665 | \r |
666 | //\r | |
667 | // Send INIT IPI - SIPI to all APs\r | |
668 | //\r | |
669 | SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);\r | |
670 | \r | |
671 | while (mNumberToFinish > 0) {\r | |
672 | CpuPause ();\r | |
673 | }\r | |
674 | }\r | |
675 | \r | |
676 | /**\r | |
677 | The function is invoked after SMBASE relocation in S3 path to restores CPU status.\r | |
678 | \r | |
679 | The function is invoked after SMBASE relocation in S3 path. It restores configuration according to\r | |
680 | data saved by normal boot path for both BSP and APs.\r | |
681 | \r | |
682 | **/\r | |
683 | VOID\r | |
94744aa2 | 684 | InitializeCpuAfterRebase (\r |
529a5a86 MK |
685 | VOID\r |
686 | )\r | |
687 | {\r | |
90e11edd LE |
688 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r |
689 | ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);\r | |
690 | } else {\r | |
691 | ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);\r | |
692 | }\r | |
053e878b | 693 | \r |
90e11edd | 694 | mNumberToFinish = (UINT32)(mNumberOfCpus - 1);\r |
529a5a86 MK |
695 | \r |
696 | //\r | |
93324390 | 697 | // Signal that SMM base relocation is complete and to continue initialization for all APs.\r |
529a5a86 | 698 | //\r |
7677b4db | 699 | mInitApsAfterSmmBaseReloc = TRUE;\r |
529a5a86 | 700 | \r |
93324390 ED |
701 | //\r |
702 | // Must begin set register after all APs have continue their initialization.\r | |
703 | // This is a requirement to support semaphore mechanism in register table.\r | |
704 | // Because if semaphore's dependence type is package type, semaphore will wait\r | |
705 | // for all Aps in one package finishing their tasks before set next register\r | |
706 | // for all APs. If the Aps not begin its task during BSP doing its task, the\r | |
707 | // BSP thread will hang because it is waiting for other Aps in the same\r | |
708 | // package finishing their task.\r | |
709 | //\r | |
710 | SetRegister (FALSE);\r | |
711 | \r | |
529a5a86 MK |
712 | while (mNumberToFinish > 0) {\r |
713 | CpuPause ();\r | |
714 | }\r | |
715 | }\r | |
0bdc9e75 SZ |
716 | \r |
717 | /**\r | |
718 | Restore SMM Configuration in S3 boot path.\r | |
719 | \r | |
720 | **/\r | |
721 | VOID\r | |
722 | RestoreSmmConfigurationInS3 (\r | |
723 | VOID\r | |
724 | )\r | |
725 | {\r | |
b10d5ddc SZ |
726 | if (!mAcpiS3Enable) {\r |
727 | return;\r | |
728 | }\r | |
729 | \r | |
0bdc9e75 SZ |
730 | //\r |
731 | // Restore SMM Configuration in S3 boot path.\r | |
732 | //\r | |
733 | if (mRestoreSmmConfigurationInS3) {\r | |
734 | //\r | |
735 | // Need make sure gSmst is correct because below function may use them.\r | |
736 | //\r | |
737 | gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r | |
738 | gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r | |
739 | gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r | |
740 | gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r | |
741 | gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r | |
742 | \r | |
743 | //\r | |
744 | // Configure SMM Code Access Check feature if available.\r | |
745 | //\r | |
746 | ConfigSmmCodeAccessCheck ();\r | |
747 | \r | |
748 | SmmCpuFeaturesCompleteSmmReadyToLock ();\r | |
749 | \r | |
750 | mRestoreSmmConfigurationInS3 = FALSE;\r | |
751 | }\r | |
752 | }\r | |
753 | \r | |
754 | /**\r | |
755 | Perform SMM initialization for all processors in the S3 boot path.\r | |
756 | \r | |
757 | For a native platform, MP initialization in the S3 boot path is also performed in this function.\r | |
758 | **/\r | |
759 | VOID\r | |
760 | EFIAPI\r | |
761 | SmmRestoreCpu (\r | |
762 | VOID\r | |
763 | )\r | |
764 | {\r | |
053e878b MK |
765 | SMM_S3_RESUME_STATE *SmmS3ResumeState;\r |
766 | IA32_DESCRIPTOR Ia32Idtr;\r | |
767 | IA32_DESCRIPTOR X64Idtr;\r | |
768 | IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r | |
769 | EFI_STATUS Status;\r | |
0bdc9e75 | 770 | \r |
96e1cba5 | 771 | DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));\r |
0bdc9e75 SZ |
772 | \r |
773 | mSmmS3Flag = TRUE;\r | |
774 | \r | |
0bdc9e75 SZ |
775 | //\r |
776 | // See if there is enough context to resume PEI Phase\r | |
777 | //\r | |
778 | if (mSmmS3ResumeState == NULL) {\r | |
96e1cba5 | 779 | DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));\r |
0bdc9e75 SZ |
780 | CpuDeadLoop ();\r |
781 | }\r | |
782 | \r | |
783 | SmmS3ResumeState = mSmmS3ResumeState;\r | |
784 | ASSERT (SmmS3ResumeState != NULL);\r | |
785 | \r | |
786 | if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r | |
787 | //\r | |
788 | // Save the IA32 IDT Descriptor\r | |
789 | //\r | |
053e878b | 790 | AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);\r |
0bdc9e75 SZ |
791 | \r |
792 | //\r | |
793 | // Setup X64 IDT table\r | |
794 | //\r | |
795 | ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r | |
053e878b MK |
796 | X64Idtr.Base = (UINTN)IdtEntryTable;\r |
797 | X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r | |
798 | AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);\r | |
0bdc9e75 SZ |
799 | \r |
800 | //\r | |
801 | // Setup the default exception handler\r | |
802 | //\r | |
803 | Status = InitializeCpuExceptionHandlers (NULL);\r | |
804 | ASSERT_EFI_ERROR (Status);\r | |
805 | \r | |
806 | //\r | |
807 | // Initialize Debug Agent to support source level debug\r | |
808 | //\r | |
809 | InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r | |
810 | }\r | |
811 | \r | |
812 | //\r | |
813 | // Skip initialization if mAcpiCpuData is not valid\r | |
814 | //\r | |
815 | if (mAcpiCpuData.NumberOfCpus > 0) {\r | |
816 | //\r | |
817 | // First time microcode load and restore MTRRs\r | |
818 | //\r | |
94744aa2 | 819 | InitializeCpuBeforeRebase ();\r |
0bdc9e75 SZ |
820 | }\r |
821 | \r | |
822 | //\r | |
823 | // Restore SMBASE for BSP and all APs\r | |
824 | //\r | |
825 | SmmRelocateBases ();\r | |
826 | \r | |
827 | //\r | |
828 | // Skip initialization if mAcpiCpuData is not valid\r | |
829 | //\r | |
830 | if (mAcpiCpuData.NumberOfCpus > 0) {\r | |
831 | //\r | |
832 | // Restore MSRs for BSP and all APs\r | |
833 | //\r | |
94744aa2 | 834 | InitializeCpuAfterRebase ();\r |
0bdc9e75 SZ |
835 | }\r |
836 | \r | |
837 | //\r | |
838 | // Set a flag to restore SMM configuration in S3 path.\r | |
839 | //\r | |
840 | mRestoreSmmConfigurationInS3 = TRUE;\r | |
841 | \r | |
053e878b MK |
842 | DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r |
843 | DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r | |
844 | DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r | |
845 | DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r | |
846 | DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r | |
0bdc9e75 SZ |
847 | \r |
848 | //\r | |
849 | // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r | |
850 | //\r | |
851 | if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r | |
96e1cba5 | 852 | DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r |
0bdc9e75 SZ |
853 | \r |
854 | SwitchStack (\r | |
855 | (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r | |
856 | (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r | |
857 | (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r | |
858 | (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r | |
859 | );\r | |
860 | }\r | |
861 | \r | |
862 | //\r | |
863 | // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r | |
864 | //\r | |
865 | if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r | |
96e1cba5 | 866 | DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r |
0bdc9e75 SZ |
867 | //\r |
868 | // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r | |
869 | //\r | |
870 | SaveAndSetDebugTimerInterrupt (FALSE);\r | |
871 | //\r | |
872 | // Restore IA32 IDT table\r | |
873 | //\r | |
053e878b | 874 | AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);\r |
0bdc9e75 SZ |
875 | AsmDisablePaging64 (\r |
876 | SmmS3ResumeState->ReturnCs,\r | |
877 | (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r | |
878 | (UINT32)SmmS3ResumeState->ReturnContext1,\r | |
879 | (UINT32)SmmS3ResumeState->ReturnContext2,\r | |
880 | (UINT32)SmmS3ResumeState->ReturnStackPointer\r | |
881 | );\r | |
882 | }\r | |
883 | \r | |
884 | //\r | |
885 | // Can not resume PEI Phase\r | |
886 | //\r | |
96e1cba5 | 887 | DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));\r |
0bdc9e75 SZ |
888 | CpuDeadLoop ();\r |
889 | }\r | |
890 | \r | |
891 | /**\r | |
892 | Initialize SMM S3 resume state structure used during S3 Resume.\r | |
893 | \r | |
894 | @param[in] Cr3 The base address of the page tables to use in SMM.\r | |
895 | \r | |
896 | **/\r | |
897 | VOID\r | |
898 | InitSmmS3ResumeState (\r | |
899 | IN UINT32 Cr3\r | |
900 | )\r | |
901 | {\r | |
053e878b MK |
902 | VOID *GuidHob;\r |
903 | EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r | |
904 | SMM_S3_RESUME_STATE *SmmS3ResumeState;\r | |
905 | EFI_PHYSICAL_ADDRESS Address;\r | |
906 | EFI_STATUS Status;\r | |
0bdc9e75 | 907 | \r |
b10d5ddc SZ |
908 | if (!mAcpiS3Enable) {\r |
909 | return;\r | |
910 | }\r | |
911 | \r | |
0bdc9e75 | 912 | GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r |
a95c9cfd JW |
913 | if (GuidHob == NULL) {\r |
914 | DEBUG ((\r | |
915 | DEBUG_ERROR,\r | |
916 | "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",\r | |
917 | __FUNCTION__,\r | |
918 | &gEfiAcpiVariableGuid\r | |
053e878b | 919 | ));\r |
a95c9cfd JW |
920 | CpuDeadLoop ();\r |
921 | } else {\r | |
053e878b | 922 | SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);\r |
0bdc9e75 | 923 | \r |
96e1cba5 MK |
924 | DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r |
925 | DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r | |
0bdc9e75 SZ |
926 | \r |
927 | SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r | |
928 | ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r | |
929 | \r | |
053e878b | 930 | mSmmS3ResumeState = SmmS3ResumeState;\r |
0bdc9e75 SZ |
931 | SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r |
932 | \r | |
933 | SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r | |
934 | \r | |
935 | SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r | |
936 | SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r | |
937 | if (SmmS3ResumeState->SmmS3StackBase == 0) {\r | |
938 | SmmS3ResumeState->SmmS3StackSize = 0;\r | |
939 | }\r | |
940 | \r | |
f0053e83 | 941 | SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;\r |
0bdc9e75 | 942 | SmmS3ResumeState->SmmS3Cr3 = Cr3;\r |
351b49c1 | 943 | SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;\r |
0bdc9e75 SZ |
944 | \r |
945 | if (sizeof (UINTN) == sizeof (UINT64)) {\r | |
946 | SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r | |
947 | }\r | |
053e878b | 948 | \r |
0bdc9e75 SZ |
949 | if (sizeof (UINTN) == sizeof (UINT32)) {\r |
950 | SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r | |
951 | }\r | |
0bdc9e75 | 952 | \r |
16d84657 JW |
953 | //\r |
954 | // Patch SmmS3ResumeState->SmmS3Cr3\r | |
955 | //\r | |
956 | InitSmmS3Cr3 ();\r | |
957 | }\r | |
4a0f88dd JF |
958 | \r |
959 | //\r | |
960 | // Allocate safe memory in ACPI NVS for AP to execute hlt loop in\r | |
961 | // protected mode on S3 path\r | |
962 | //\r | |
963 | Address = BASE_4GB - 1;\r | |
964 | Status = gBS->AllocatePages (\r | |
965 | AllocateMaxAddress,\r | |
966 | EfiACPIMemoryNVS,\r | |
967 | EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),\r | |
968 | &Address\r | |
969 | );\r | |
970 | ASSERT_EFI_ERROR (Status);\r | |
053e878b | 971 | mApHltLoopCode = (UINT8 *)(UINTN)Address;\r |
0bdc9e75 SZ |
972 | }\r |
973 | \r | |
974 | /**\r | |
e992cc3f | 975 | Copy register table from non-SMRAM into SMRAM.\r |
0bdc9e75 SZ |
976 | \r |
977 | @param[in] DestinationRegisterTableList Points to destination register table.\r | |
978 | @param[in] SourceRegisterTableList Points to source register table.\r | |
979 | @param[in] NumberOfCpus Number of CPUs.\r | |
980 | \r | |
981 | **/\r | |
982 | VOID\r | |
983 | CopyRegisterTable (\r | |
053e878b MK |
984 | IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r |
985 | IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r | |
986 | IN UINT32 NumberOfCpus\r | |
0bdc9e75 SZ |
987 | )\r |
988 | {\r | |
053e878b MK |
989 | UINTN Index;\r |
990 | CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r | |
0bdc9e75 SZ |
991 | \r |
992 | CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r | |
993 | for (Index = 0; Index < NumberOfCpus; Index++) {\r | |
e992cc3f SZ |
994 | if (DestinationRegisterTableList[Index].TableLength != 0) {\r |
995 | DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);\r | |
053e878b MK |
996 | RegisterTableEntry = AllocateCopyPool (\r |
997 | DestinationRegisterTableList[Index].AllocatedSize,\r | |
998 | (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry\r | |
999 | );\r | |
30d995ee JF |
1000 | ASSERT (RegisterTableEntry != NULL);\r |
1001 | DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;\r | |
0bdc9e75 SZ |
1002 | }\r |
1003 | }\r | |
1004 | }\r | |
1005 | \r | |
e992cc3f SZ |
1006 | /**\r |
1007 | Check whether the register table is empty or not.\r | |
1008 | \r | |
1009 | @param[in] RegisterTable Point to the register table.\r | |
1010 | @param[in] NumberOfCpus Number of CPUs.\r | |
1011 | \r | |
1012 | @retval TRUE The register table is empty.\r | |
1013 | @retval FALSE The register table is not empty.\r | |
1014 | **/\r | |
1015 | BOOLEAN\r | |
1016 | IsRegisterTableEmpty (\r | |
053e878b MK |
1017 | IN CPU_REGISTER_TABLE *RegisterTable,\r |
1018 | IN UINT32 NumberOfCpus\r | |
e992cc3f SZ |
1019 | )\r |
1020 | {\r | |
053e878b | 1021 | UINTN Index;\r |
e992cc3f SZ |
1022 | \r |
1023 | if (RegisterTable != NULL) {\r | |
1024 | for (Index = 0; Index < NumberOfCpus; Index++) {\r | |
1025 | if (RegisterTable[Index].TableLength != 0) {\r | |
1026 | return FALSE;\r | |
1027 | }\r | |
1028 | }\r | |
1029 | }\r | |
1030 | \r | |
1031 | return TRUE;\r | |
1032 | }\r | |
1033 | \r | |
010753b7 LY |
1034 | /**\r |
1035 | Copy the data used to initialize processor register into SMRAM.\r | |
1036 | \r | |
1037 | @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.\r | |
1038 | @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.\r | |
1039 | \r | |
1040 | **/\r | |
1041 | VOID\r | |
1042 | CopyCpuFeatureInitDatatoSmram (\r | |
053e878b MK |
1043 | IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,\r |
1044 | IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc\r | |
010753b7 LY |
1045 | )\r |
1046 | {\r | |
053e878b | 1047 | CPU_STATUS_INFORMATION *CpuStatus;\r |
010753b7 LY |
1048 | \r |
1049 | if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {\r | |
1050 | CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r | |
1051 | ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);\r | |
1052 | \r | |
1053 | CopyRegisterTable (\r | |
1054 | (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,\r | |
1055 | (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,\r | |
1056 | mAcpiCpuData.NumberOfCpus\r | |
1057 | );\r | |
1058 | }\r | |
1059 | \r | |
1060 | if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {\r | |
1061 | CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r | |
1062 | ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);\r | |
1063 | \r | |
1064 | CopyRegisterTable (\r | |
1065 | (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,\r | |
1066 | (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,\r | |
1067 | mAcpiCpuData.NumberOfCpus\r | |
1068 | );\r | |
1069 | }\r | |
1070 | \r | |
1071 | CpuStatus = &CpuFeatureInitDataDst->CpuStatus;\r | |
1072 | CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));\r | |
1073 | \r | |
1074 | if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {\r | |
1075 | CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r | |
053e878b MK |
1076 | sizeof (UINT32) * CpuStatus->PackageCount,\r |
1077 | (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage\r | |
1078 | );\r | |
010753b7 LY |
1079 | ASSERT (CpuStatus->ThreadCountPerPackage != 0);\r |
1080 | }\r | |
1081 | \r | |
1082 | if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {\r | |
1083 | CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r | |
053e878b MK |
1084 | sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),\r |
1085 | (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore\r | |
1086 | );\r | |
010753b7 LY |
1087 | ASSERT (CpuStatus->ThreadCountPerCore != 0);\r |
1088 | }\r | |
1089 | \r | |
1090 | if (CpuFeatureInitDataSrc->ApLocation != 0) {\r | |
1091 | CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (\r | |
053e878b MK |
1092 | mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),\r |
1093 | (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation\r | |
1094 | );\r | |
010753b7 LY |
1095 | ASSERT (CpuFeatureInitDataDst->ApLocation != 0);\r |
1096 | }\r | |
1097 | }\r | |
1098 | \r | |
0bdc9e75 SZ |
1099 | /**\r |
1100 | Get ACPI CPU data.\r | |
1101 | \r | |
1102 | **/\r | |
1103 | VOID\r | |
1104 | GetAcpiCpuData (\r | |
1105 | VOID\r | |
1106 | )\r | |
1107 | {\r | |
053e878b MK |
1108 | ACPI_CPU_DATA *AcpiCpuData;\r |
1109 | IA32_DESCRIPTOR *Gdtr;\r | |
1110 | IA32_DESCRIPTOR *Idtr;\r | |
1111 | VOID *GdtForAp;\r | |
1112 | VOID *IdtForAp;\r | |
1113 | VOID *MachineCheckHandlerForAp;\r | |
1114 | CPU_STATUS_INFORMATION *CpuStatus;\r | |
0bdc9e75 | 1115 | \r |
b10d5ddc SZ |
1116 | if (!mAcpiS3Enable) {\r |
1117 | return;\r | |
1118 | }\r | |
1119 | \r | |
0bdc9e75 SZ |
1120 | //\r |
1121 | // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r | |
1122 | //\r | |
1123 | mAcpiCpuData.NumberOfCpus = 0;\r | |
1124 | \r | |
1125 | //\r | |
1126 | // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r | |
1127 | //\r | |
1128 | AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r | |
1129 | if (AcpiCpuData == 0) {\r | |
1130 | return;\r | |
1131 | }\r | |
1132 | \r | |
1133 | //\r | |
1134 | // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r | |
1135 | //\r | |
1136 | CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r | |
1137 | \r | |
1138 | mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r | |
1139 | ASSERT (mAcpiCpuData.MtrrTable != 0);\r | |
1140 | \r | |
1141 | CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r | |
1142 | \r | |
1143 | mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r | |
1144 | ASSERT (mAcpiCpuData.GdtrProfile != 0);\r | |
1145 | \r | |
1146 | CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
1147 | \r | |
1148 | mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r | |
1149 | ASSERT (mAcpiCpuData.IdtrProfile != 0);\r | |
1150 | \r | |
1151 | CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r | |
1152 | \r | |
0bdc9e75 SZ |
1153 | //\r |
1154 | // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r | |
1155 | //\r | |
1156 | Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r | |
1157 | Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r | |
1158 | \r | |
010753b7 | 1159 | GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r |
293f8766 | 1160 | ASSERT (GdtForAp != NULL);\r |
053e878b MK |
1161 | IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));\r |
1162 | MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));\r | |
293f8766 ED |
1163 | \r |
1164 | CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r | |
1165 | CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r | |
1166 | CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r | |
0bdc9e75 | 1167 | \r |
053e878b MK |
1168 | Gdtr->Base = (UINTN)GdtForAp;\r |
1169 | Idtr->Base = (UINTN)IdtForAp;\r | |
293f8766 | 1170 | mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;\r |
93324390 | 1171 | \r |
010753b7 | 1172 | ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));\r |
010753b7 | 1173 | \r |
89f7ed8b LY |
1174 | if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {\r |
1175 | //\r | |
1176 | // If the CPU features will not be initialized by CpuFeaturesPei module during\r | |
1177 | // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,\r | |
1178 | // which will be consumed in SmmRestoreCpu during next S3 resume.\r | |
1179 | //\r | |
1180 | CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);\r | |
010753b7 | 1181 | \r |
89f7ed8b | 1182 | CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;\r |
010753b7 | 1183 | \r |
89f7ed8b LY |
1184 | mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (\r |
1185 | sizeof (UINT32) * CpuStatus->PackageCount *\r | |
1186 | CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r | |
1187 | );\r | |
1188 | ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);\r | |
010753b7 | 1189 | \r |
89f7ed8b LY |
1190 | mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (\r |
1191 | sizeof (UINT32) * CpuStatus->PackageCount *\r | |
1192 | CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount\r | |
1193 | );\r | |
1194 | ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);\r | |
1195 | \r | |
053e878b | 1196 | InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);\r |
89f7ed8b | 1197 | }\r |
0bdc9e75 | 1198 | }\r |
b10d5ddc SZ |
1199 | \r |
1200 | /**\r | |
1201 | Get ACPI S3 enable flag.\r | |
1202 | \r | |
1203 | **/\r | |
1204 | VOID\r | |
1205 | GetAcpiS3EnableFlag (\r | |
1206 | VOID\r | |
1207 | )\r | |
1208 | {\r | |
1209 | mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);\r | |
1210 | }\r |