]>
Commit | Line | Data |
---|---|---|
529a5a86 MK |
1 | /** @file\r |
2 | SMM MP service implementation\r | |
3 | \r | |
e62a0eb6 | 4 | Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r |
241f9149 LD |
5 | Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r |
6 | \r | |
529a5a86 MK |
7 | This program and the accompanying materials\r |
8 | are licensed and made available under the terms and conditions of the BSD License\r | |
9 | which accompanies this distribution. The full text of the license may be found at\r | |
10 | http://opensource.org/licenses/bsd-license.php\r | |
11 | \r | |
12 | THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
13 | WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
14 | \r | |
15 | **/\r | |
16 | \r | |
17 | #include "PiSmmCpuDxeSmm.h"\r | |
18 | \r | |
19 | //\r | |
20 | // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r | |
21 | //\r | |
26ab5ac3 | 22 | MTRR_SETTINGS gSmiMtrrs;\r |
529a5a86 MK |
23 | UINT64 gPhyMask;\r |
24 | SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r | |
25 | UINTN mSmmMpSyncDataSize;\r | |
1d648531 JF |
26 | SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r |
27 | UINTN mSemaphoreSize;\r | |
fe3a75bc | 28 | SPIN_LOCK *mPFLock = NULL;\r |
b43dd229 | 29 | SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r |
ba40cb31 | 30 | BOOLEAN mMachineCheckSupported = FALSE;\r |
529a5a86 MK |
31 | \r |
32 | /**\r | |
33 | Performs an atomic compare exchange operation to get semaphore.\r | |
34 | The compare exchange operation must be performed using\r | |
35 | MP safe mechanisms.\r | |
36 | \r | |
37 | @param Sem IN: 32-bit unsigned integer\r | |
38 | OUT: original integer - 1\r | |
39 | @return Original integer - 1\r | |
40 | \r | |
41 | **/\r | |
42 | UINT32\r | |
43 | WaitForSemaphore (\r | |
44 | IN OUT volatile UINT32 *Sem\r | |
45 | )\r | |
46 | {\r | |
47 | UINT32 Value;\r | |
48 | \r | |
49 | do {\r | |
50 | Value = *Sem;\r | |
51 | } while (Value == 0 ||\r | |
52 | InterlockedCompareExchange32 (\r | |
53 | (UINT32*)Sem,\r | |
54 | Value,\r | |
55 | Value - 1\r | |
56 | ) != Value);\r | |
57 | return Value - 1;\r | |
58 | }\r | |
59 | \r | |
60 | \r | |
61 | /**\r | |
62 | Performs an atomic compare exchange operation to release semaphore.\r | |
63 | The compare exchange operation must be performed using\r | |
64 | MP safe mechanisms.\r | |
65 | \r | |
66 | @param Sem IN: 32-bit unsigned integer\r | |
67 | OUT: original integer + 1\r | |
68 | @return Original integer + 1\r | |
69 | \r | |
70 | **/\r | |
71 | UINT32\r | |
72 | ReleaseSemaphore (\r | |
73 | IN OUT volatile UINT32 *Sem\r | |
74 | )\r | |
75 | {\r | |
76 | UINT32 Value;\r | |
77 | \r | |
78 | do {\r | |
79 | Value = *Sem;\r | |
80 | } while (Value + 1 != 0 &&\r | |
81 | InterlockedCompareExchange32 (\r | |
82 | (UINT32*)Sem,\r | |
83 | Value,\r | |
84 | Value + 1\r | |
85 | ) != Value);\r | |
86 | return Value + 1;\r | |
87 | }\r | |
88 | \r | |
89 | /**\r | |
90 | Performs an atomic compare exchange operation to lock semaphore.\r | |
91 | The compare exchange operation must be performed using\r | |
92 | MP safe mechanisms.\r | |
93 | \r | |
94 | @param Sem IN: 32-bit unsigned integer\r | |
95 | OUT: -1\r | |
96 | @return Original integer\r | |
97 | \r | |
98 | **/\r | |
99 | UINT32\r | |
100 | LockdownSemaphore (\r | |
101 | IN OUT volatile UINT32 *Sem\r | |
102 | )\r | |
103 | {\r | |
104 | UINT32 Value;\r | |
105 | \r | |
106 | do {\r | |
107 | Value = *Sem;\r | |
108 | } while (InterlockedCompareExchange32 (\r | |
109 | (UINT32*)Sem,\r | |
110 | Value, (UINT32)-1\r | |
111 | ) != Value);\r | |
112 | return Value;\r | |
113 | }\r | |
114 | \r | |
115 | /**\r | |
116 | Wait all APs to performs an atomic compare exchange operation to release semaphore.\r | |
117 | \r | |
118 | @param NumberOfAPs AP number\r | |
119 | \r | |
120 | **/\r | |
121 | VOID\r | |
122 | WaitForAllAPs (\r | |
123 | IN UINTN NumberOfAPs\r | |
124 | )\r | |
125 | {\r | |
126 | UINTN BspIndex;\r | |
127 | \r | |
128 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
129 | while (NumberOfAPs-- > 0) {\r | |
ed3d5ecb | 130 | WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r |
529a5a86 MK |
131 | }\r |
132 | }\r | |
133 | \r | |
134 | /**\r | |
135 | Performs an atomic compare exchange operation to release semaphore\r | |
136 | for each AP.\r | |
137 | \r | |
138 | **/\r | |
139 | VOID\r | |
140 | ReleaseAllAPs (\r | |
141 | VOID\r | |
142 | )\r | |
143 | {\r | |
144 | UINTN Index;\r | |
145 | UINTN BspIndex;\r | |
146 | \r | |
147 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
148 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
ed3d5ecb JF |
149 | if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r |
150 | ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r | |
529a5a86 MK |
151 | }\r |
152 | }\r | |
153 | }\r | |
154 | \r | |
155 | /**\r | |
156 | Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r | |
157 | \r | |
158 | @param Exceptions CPU Arrival exception flags.\r | |
159 | \r | |
160 | @retval TRUE if all CPUs the have checked in.\r | |
161 | @retval FALSE if at least one Normal AP hasn't checked in.\r | |
162 | \r | |
163 | **/\r | |
164 | BOOLEAN\r | |
165 | AllCpusInSmmWithExceptions (\r | |
166 | SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r | |
167 | )\r | |
168 | {\r | |
169 | UINTN Index;\r | |
170 | SMM_CPU_DATA_BLOCK *CpuData;\r | |
171 | EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r | |
172 | \r | |
fe3a75bc | 173 | ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r |
529a5a86 | 174 | \r |
fe3a75bc | 175 | if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r |
529a5a86 MK |
176 | return TRUE;\r |
177 | }\r | |
178 | \r | |
179 | CpuData = mSmmMpSyncData->CpuData;\r | |
180 | ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r | |
181 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
ed3d5ecb | 182 | if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r |
529a5a86 MK |
183 | if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r |
184 | continue;\r | |
185 | }\r | |
186 | if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r | |
187 | continue;\r | |
188 | }\r | |
189 | if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r | |
190 | continue;\r | |
191 | }\r | |
192 | return FALSE;\r | |
193 | }\r | |
194 | }\r | |
195 | \r | |
196 | \r | |
197 | return TRUE;\r | |
198 | }\r | |
199 | \r | |
12c66382 ED |
200 | /**\r |
201 | Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r | |
202 | \r | |
203 | @retval TRUE Os enable lmce.\r | |
204 | @retval FALSE Os not enable lmce.\r | |
205 | \r | |
206 | **/\r | |
207 | BOOLEAN\r | |
208 | IsLmceOsEnabled (\r | |
209 | VOID\r | |
210 | )\r | |
211 | {\r | |
212 | MSR_IA32_MCG_CAP_REGISTER McgCap;\r | |
213 | MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r | |
214 | MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r | |
215 | \r | |
216 | McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r | |
217 | if (McgCap.Bits.MCG_LMCE_P == 0) {\r | |
218 | return FALSE;\r | |
219 | }\r | |
220 | \r | |
221 | FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r | |
222 | if (FeatureCtrl.Bits.LmceOn == 0) {\r | |
223 | return FALSE;\r | |
224 | }\r | |
225 | \r | |
226 | McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r | |
227 | return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r | |
228 | }\r | |
229 | \r | |
230 | /**\r | |
231 | Return if Local machine check exception signaled. \r | |
232 | \r | |
233 | Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was \r | |
234 | delivered to only the logical processor.\r | |
235 | \r | |
236 | @retval TRUE LMCE was signaled.\r | |
237 | @retval FALSE LMCE was not signaled.\r | |
238 | \r | |
239 | **/\r | |
240 | BOOLEAN\r | |
241 | IsLmceSignaled (\r | |
242 | VOID\r | |
243 | )\r | |
244 | {\r | |
245 | MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r | |
246 | \r | |
247 | McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r | |
248 | return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r | |
249 | }\r | |
529a5a86 MK |
250 | \r |
251 | /**\r | |
252 | Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r | |
253 | entering SMM, except SMI disabled APs.\r | |
254 | \r | |
255 | **/\r | |
256 | VOID\r | |
257 | SmmWaitForApArrival (\r | |
258 | VOID\r | |
259 | )\r | |
260 | {\r | |
261 | UINT64 Timer;\r | |
262 | UINTN Index;\r | |
12c66382 ED |
263 | BOOLEAN LmceEn;\r |
264 | BOOLEAN LmceSignal;\r | |
529a5a86 | 265 | \r |
fe3a75bc | 266 | ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r |
529a5a86 | 267 | \r |
ba40cb31 MK |
268 | LmceEn = FALSE;\r |
269 | LmceSignal = FALSE;\r | |
270 | if (mMachineCheckSupported) {\r | |
271 | LmceEn = IsLmceOsEnabled ();\r | |
272 | LmceSignal = IsLmceSignaled();\r | |
273 | }\r | |
12c66382 | 274 | \r |
529a5a86 MK |
275 | //\r |
276 | // Platform implementor should choose a timeout value appropriately:\r | |
277 | // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r | |
278 | // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r | |
279 | // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r | |
280 | // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r | |
281 | // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r | |
282 | // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r | |
283 | // - The timeout value must be longer than longest possible IO operation in the system\r | |
284 | //\r | |
285 | \r | |
286 | //\r | |
287 | // Sync with APs 1st timeout\r | |
288 | //\r | |
289 | for (Timer = StartSyncTimer ();\r | |
12c66382 | 290 | !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r |
529a5a86 MK |
291 | !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r |
292 | ) {\r | |
293 | CpuPause ();\r | |
294 | }\r | |
295 | \r | |
296 | //\r | |
297 | // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r | |
298 | // because:\r | |
299 | // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r | |
300 | // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r | |
301 | // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r | |
302 | // work while SMI handling is on-going.\r | |
303 | // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r | |
304 | // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r | |
305 | // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r | |
306 | // mode work while SMI handling is on-going.\r | |
307 | // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r | |
308 | // - In traditional flow, SMI disabling is discouraged.\r | |
309 | // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r | |
310 | // In both cases, adding SMI-disabling checking code increases overhead.\r | |
311 | //\r | |
fe3a75bc | 312 | if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r |
529a5a86 MK |
313 | //\r |
314 | // Send SMI IPIs to bring outside processors in\r | |
315 | //\r | |
316 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
ed3d5ecb | 317 | if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r |
529a5a86 MK |
318 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r |
319 | }\r | |
320 | }\r | |
321 | \r | |
322 | //\r | |
323 | // Sync with APs 2nd timeout.\r | |
324 | //\r | |
325 | for (Timer = StartSyncTimer ();\r | |
326 | !IsSyncTimerTimeout (Timer) &&\r | |
327 | !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r | |
328 | ) {\r | |
329 | CpuPause ();\r | |
330 | }\r | |
331 | }\r | |
332 | \r | |
333 | return;\r | |
334 | }\r | |
335 | \r | |
336 | \r | |
337 | /**\r | |
338 | Replace OS MTRR's with SMI MTRR's.\r | |
339 | \r | |
340 | @param CpuIndex Processor Index\r | |
341 | \r | |
342 | **/\r | |
343 | VOID\r | |
344 | ReplaceOSMtrrs (\r | |
345 | IN UINTN CpuIndex\r | |
346 | )\r | |
347 | {\r | |
529a5a86 MK |
348 | SmmCpuFeaturesDisableSmrr ();\r |
349 | \r | |
350 | //\r | |
351 | // Replace all MTRRs registers\r | |
352 | //\r | |
26ab5ac3 | 353 | MtrrSetAllMtrrs (&gSmiMtrrs);\r |
529a5a86 MK |
354 | }\r |
355 | \r | |
356 | /**\r | |
357 | SMI handler for BSP.\r | |
358 | \r | |
359 | @param CpuIndex BSP processor Index\r | |
360 | @param SyncMode SMM MP sync mode\r | |
361 | \r | |
362 | **/\r | |
363 | VOID\r | |
364 | BSPHandler (\r | |
365 | IN UINTN CpuIndex,\r | |
366 | IN SMM_CPU_SYNC_MODE SyncMode\r | |
367 | )\r | |
368 | {\r | |
369 | UINTN Index;\r | |
370 | MTRR_SETTINGS Mtrrs;\r | |
371 | UINTN ApCount;\r | |
372 | BOOLEAN ClearTopLevelSmiResult;\r | |
373 | UINTN PresentCount;\r | |
374 | \r | |
375 | ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r | |
376 | ApCount = 0;\r | |
377 | \r | |
378 | //\r | |
379 | // Flag BSP's presence\r | |
380 | //\r | |
fe3a75bc | 381 | *mSmmMpSyncData->InsideSmm = TRUE;\r |
529a5a86 MK |
382 | \r |
383 | //\r | |
384 | // Initialize Debug Agent to start source level debug in BSP handler\r | |
385 | //\r | |
386 | InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r | |
387 | \r | |
388 | //\r | |
389 | // Mark this processor's presence\r | |
390 | //\r | |
ed3d5ecb | 391 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r |
529a5a86 MK |
392 | \r |
393 | //\r | |
394 | // Clear platform top level SMI status bit before calling SMI handlers. If\r | |
395 | // we cleared it after SMI handlers are run, we would miss the SMI that\r | |
396 | // occurs after SMI handlers are done and before SMI status bit is cleared.\r | |
397 | //\r | |
398 | ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r | |
399 | ASSERT (ClearTopLevelSmiResult == TRUE);\r | |
400 | \r | |
401 | //\r | |
402 | // Set running processor index\r | |
403 | //\r | |
404 | gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r | |
405 | \r | |
406 | //\r | |
407 | // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r | |
408 | //\r | |
409 | if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
410 | \r | |
411 | //\r | |
412 | // Wait for APs to arrive\r | |
413 | //\r | |
414 | SmmWaitForApArrival();\r | |
415 | \r | |
416 | //\r | |
417 | // Lock the counter down and retrieve the number of APs\r | |
418 | //\r | |
fe3a75bc JF |
419 | *mSmmMpSyncData->AllCpusInSync = TRUE;\r |
420 | ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r | |
529a5a86 MK |
421 | \r |
422 | //\r | |
423 | // Wait for all APs to get ready for programming MTRRs\r | |
424 | //\r | |
425 | WaitForAllAPs (ApCount);\r | |
426 | \r | |
427 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
428 | //\r | |
429 | // Signal all APs it's time for backup MTRRs\r | |
430 | //\r | |
431 | ReleaseAllAPs ();\r | |
432 | \r | |
433 | //\r | |
434 | // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r | |
435 | // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r | |
436 | // to a large enough value to avoid this situation.\r | |
437 | // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r | |
438 | // We do the backup first and then set MTRR to avoid race condition for threads\r | |
439 | // in the same core.\r | |
440 | //\r | |
441 | MtrrGetAllMtrrs(&Mtrrs);\r | |
442 | \r | |
443 | //\r | |
444 | // Wait for all APs to complete their MTRR saving\r | |
445 | //\r | |
446 | WaitForAllAPs (ApCount);\r | |
447 | \r | |
448 | //\r | |
449 | // Let all processors program SMM MTRRs together\r | |
450 | //\r | |
451 | ReleaseAllAPs ();\r | |
452 | \r | |
453 | //\r | |
454 | // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r | |
455 | // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r | |
456 | // to a large enough value to avoid this situation.\r | |
457 | //\r | |
458 | ReplaceOSMtrrs (CpuIndex);\r | |
459 | \r | |
460 | //\r | |
461 | // Wait for all APs to complete their MTRR programming\r | |
462 | //\r | |
463 | WaitForAllAPs (ApCount);\r | |
464 | }\r | |
465 | }\r | |
466 | \r | |
467 | //\r | |
468 | // The BUSY lock is initialized to Acquired state\r | |
469 | //\r | |
170a3c1e | 470 | AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r |
529a5a86 MK |
471 | \r |
472 | //\r | |
9f419739 | 473 | // Perform the pre tasks\r |
529a5a86 | 474 | //\r |
9f419739 | 475 | PerformPreTasks ();\r |
529a5a86 MK |
476 | \r |
477 | //\r | |
478 | // Invoke SMM Foundation EntryPoint with the processor information context.\r | |
479 | //\r | |
480 | gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r | |
481 | \r | |
482 | //\r | |
483 | // Make sure all APs have completed their pending none-block tasks\r | |
484 | //\r | |
485 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
ed3d5ecb JF |
486 | if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r |
487 | AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r | |
488 | ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r | |
529a5a86 MK |
489 | }\r |
490 | }\r | |
491 | \r | |
492 | //\r | |
493 | // Perform the remaining tasks\r | |
494 | //\r | |
495 | PerformRemainingTasks ();\r | |
496 | \r | |
497 | //\r | |
498 | // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r | |
499 | // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r | |
500 | // will run through freely.\r | |
501 | //\r | |
502 | if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
503 | \r | |
504 | //\r | |
505 | // Lock the counter down and retrieve the number of APs\r | |
506 | //\r | |
fe3a75bc JF |
507 | *mSmmMpSyncData->AllCpusInSync = TRUE;\r |
508 | ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r | |
529a5a86 MK |
509 | //\r |
510 | // Make sure all APs have their Present flag set\r | |
511 | //\r | |
512 | while (TRUE) {\r | |
513 | PresentCount = 0;\r | |
514 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
ed3d5ecb | 515 | if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r |
529a5a86 MK |
516 | PresentCount ++;\r |
517 | }\r | |
518 | }\r | |
519 | if (PresentCount > ApCount) {\r | |
520 | break;\r | |
521 | }\r | |
522 | }\r | |
523 | }\r | |
524 | \r | |
525 | //\r | |
526 | // Notify all APs to exit\r | |
527 | //\r | |
fe3a75bc | 528 | *mSmmMpSyncData->InsideSmm = FALSE;\r |
529a5a86 MK |
529 | ReleaseAllAPs ();\r |
530 | \r | |
531 | //\r | |
532 | // Wait for all APs to complete their pending tasks\r | |
533 | //\r | |
534 | WaitForAllAPs (ApCount);\r | |
535 | \r | |
536 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
537 | //\r | |
538 | // Signal APs to restore MTRRs\r | |
539 | //\r | |
540 | ReleaseAllAPs ();\r | |
541 | \r | |
542 | //\r | |
543 | // Restore OS MTRRs\r | |
544 | //\r | |
545 | SmmCpuFeaturesReenableSmrr ();\r | |
546 | MtrrSetAllMtrrs(&Mtrrs);\r | |
547 | \r | |
548 | //\r | |
549 | // Wait for all APs to complete MTRR programming\r | |
550 | //\r | |
551 | WaitForAllAPs (ApCount);\r | |
552 | }\r | |
553 | \r | |
554 | //\r | |
555 | // Stop source level debug in BSP handler, the code below will not be\r | |
556 | // debugged.\r | |
557 | //\r | |
558 | InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r | |
559 | \r | |
560 | //\r | |
561 | // Signal APs to Reset states/semaphore for this processor\r | |
562 | //\r | |
563 | ReleaseAllAPs ();\r | |
564 | \r | |
565 | //\r | |
566 | // Perform pending operations for hot-plug\r | |
567 | //\r | |
568 | SmmCpuUpdate ();\r | |
569 | \r | |
570 | //\r | |
571 | // Clear the Present flag of BSP\r | |
572 | //\r | |
ed3d5ecb | 573 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r |
529a5a86 MK |
574 | \r |
575 | //\r | |
576 | // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r | |
577 | // WaitForAllAps does not depend on the Present flag.\r | |
578 | //\r | |
579 | WaitForAllAPs (ApCount);\r | |
580 | \r | |
581 | //\r | |
582 | // Reset BspIndex to -1, meaning BSP has not been elected.\r | |
583 | //\r | |
584 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r | |
585 | mSmmMpSyncData->BspIndex = (UINT32)-1;\r | |
586 | }\r | |
587 | \r | |
588 | //\r | |
589 | // Allow APs to check in from this point on\r | |
590 | //\r | |
fe3a75bc JF |
591 | *mSmmMpSyncData->Counter = 0;\r |
592 | *mSmmMpSyncData->AllCpusInSync = FALSE;\r | |
529a5a86 MK |
593 | }\r |
594 | \r | |
595 | /**\r | |
596 | SMI handler for AP.\r | |
597 | \r | |
598 | @param CpuIndex AP processor Index.\r | |
599 | @param ValidSmi Indicates that current SMI is a valid SMI or not.\r | |
600 | @param SyncMode SMM MP sync mode.\r | |
601 | \r | |
602 | **/\r | |
603 | VOID\r | |
604 | APHandler (\r | |
605 | IN UINTN CpuIndex,\r | |
606 | IN BOOLEAN ValidSmi,\r | |
607 | IN SMM_CPU_SYNC_MODE SyncMode\r | |
608 | )\r | |
609 | {\r | |
610 | UINT64 Timer;\r | |
611 | UINTN BspIndex;\r | |
612 | MTRR_SETTINGS Mtrrs;\r | |
613 | \r | |
614 | //\r | |
615 | // Timeout BSP\r | |
616 | //\r | |
617 | for (Timer = StartSyncTimer ();\r | |
618 | !IsSyncTimerTimeout (Timer) &&\r | |
fe3a75bc | 619 | !(*mSmmMpSyncData->InsideSmm);\r |
529a5a86 MK |
620 | ) {\r |
621 | CpuPause ();\r | |
622 | }\r | |
623 | \r | |
fe3a75bc | 624 | if (!(*mSmmMpSyncData->InsideSmm)) {\r |
529a5a86 MK |
625 | //\r |
626 | // BSP timeout in the first round\r | |
627 | //\r | |
628 | if (mSmmMpSyncData->BspIndex != -1) {\r | |
629 | //\r | |
630 | // BSP Index is known\r | |
631 | //\r | |
632 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
633 | ASSERT (CpuIndex != BspIndex);\r | |
634 | \r | |
635 | //\r | |
636 | // Send SMI IPI to bring BSP in\r | |
637 | //\r | |
638 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r | |
639 | \r | |
640 | //\r | |
641 | // Now clock BSP for the 2nd time\r | |
642 | //\r | |
643 | for (Timer = StartSyncTimer ();\r | |
644 | !IsSyncTimerTimeout (Timer) &&\r | |
fe3a75bc | 645 | !(*mSmmMpSyncData->InsideSmm);\r |
529a5a86 MK |
646 | ) {\r |
647 | CpuPause ();\r | |
648 | }\r | |
649 | \r | |
fe3a75bc | 650 | if (!(*mSmmMpSyncData->InsideSmm)) {\r |
529a5a86 MK |
651 | //\r |
652 | // Give up since BSP is unable to enter SMM\r | |
653 | // and signal the completion of this AP\r | |
fe3a75bc | 654 | WaitForSemaphore (mSmmMpSyncData->Counter);\r |
529a5a86 MK |
655 | return;\r |
656 | }\r | |
657 | } else {\r | |
658 | //\r | |
659 | // Don't know BSP index. Give up without sending IPI to BSP.\r | |
660 | //\r | |
fe3a75bc | 661 | WaitForSemaphore (mSmmMpSyncData->Counter);\r |
529a5a86 MK |
662 | return;\r |
663 | }\r | |
664 | }\r | |
665 | \r | |
666 | //\r | |
667 | // BSP is available\r | |
668 | //\r | |
669 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
670 | ASSERT (CpuIndex != BspIndex);\r | |
671 | \r | |
672 | //\r | |
673 | // Mark this processor's presence\r | |
674 | //\r | |
ed3d5ecb | 675 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r |
529a5a86 MK |
676 | \r |
677 | if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
678 | //\r | |
679 | // Notify BSP of arrival at this point\r | |
680 | //\r | |
ed3d5ecb | 681 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r |
529a5a86 MK |
682 | }\r |
683 | \r | |
684 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
685 | //\r | |
686 | // Wait for the signal from BSP to backup MTRRs\r | |
687 | //\r | |
ed3d5ecb | 688 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r |
529a5a86 MK |
689 | \r |
690 | //\r | |
691 | // Backup OS MTRRs\r | |
692 | //\r | |
693 | MtrrGetAllMtrrs(&Mtrrs);\r | |
694 | \r | |
695 | //\r | |
696 | // Signal BSP the completion of this AP\r | |
697 | //\r | |
ed3d5ecb | 698 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r |
529a5a86 MK |
699 | \r |
700 | //\r | |
701 | // Wait for BSP's signal to program MTRRs\r | |
702 | //\r | |
ed3d5ecb | 703 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r |
529a5a86 MK |
704 | \r |
705 | //\r | |
706 | // Replace OS MTRRs with SMI MTRRs\r | |
707 | //\r | |
708 | ReplaceOSMtrrs (CpuIndex);\r | |
709 | \r | |
710 | //\r | |
711 | // Signal BSP the completion of this AP\r | |
712 | //\r | |
ed3d5ecb | 713 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r |
529a5a86 MK |
714 | }\r |
715 | \r | |
716 | while (TRUE) {\r | |
717 | //\r | |
718 | // Wait for something to happen\r | |
719 | //\r | |
ed3d5ecb | 720 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r |
529a5a86 MK |
721 | \r |
722 | //\r | |
723 | // Check if BSP wants to exit SMM\r | |
724 | //\r | |
fe3a75bc | 725 | if (!(*mSmmMpSyncData->InsideSmm)) {\r |
529a5a86 MK |
726 | break;\r |
727 | }\r | |
728 | \r | |
729 | //\r | |
730 | // BUSY should be acquired by SmmStartupThisAp()\r | |
731 | //\r | |
732 | ASSERT (\r | |
ed3d5ecb | 733 | !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r |
529a5a86 MK |
734 | );\r |
735 | \r | |
736 | //\r | |
737 | // Invoke the scheduled procedure\r | |
738 | //\r | |
739 | (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r | |
740 | (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r | |
741 | );\r | |
742 | \r | |
743 | //\r | |
744 | // Release BUSY\r | |
745 | //\r | |
ed3d5ecb | 746 | ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r |
529a5a86 MK |
747 | }\r |
748 | \r | |
749 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
750 | //\r | |
751 | // Notify BSP the readiness of this AP to program MTRRs\r | |
752 | //\r | |
ed3d5ecb | 753 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r |
529a5a86 MK |
754 | \r |
755 | //\r | |
756 | // Wait for the signal from BSP to program MTRRs\r | |
757 | //\r | |
ed3d5ecb | 758 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r |
529a5a86 MK |
759 | \r |
760 | //\r | |
761 | // Restore OS MTRRs\r | |
762 | //\r | |
763 | SmmCpuFeaturesReenableSmrr ();\r | |
764 | MtrrSetAllMtrrs(&Mtrrs);\r | |
765 | }\r | |
766 | \r | |
767 | //\r | |
768 | // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r | |
769 | //\r | |
ed3d5ecb | 770 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r |
529a5a86 MK |
771 | \r |
772 | //\r | |
773 | // Wait for the signal from BSP to Reset states/semaphore for this processor\r | |
774 | //\r | |
ed3d5ecb | 775 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r |
529a5a86 MK |
776 | \r |
777 | //\r | |
778 | // Reset states/semaphore for this processor\r | |
779 | //\r | |
ed3d5ecb | 780 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r |
529a5a86 MK |
781 | \r |
782 | //\r | |
783 | // Notify BSP the readiness of this AP to exit SMM\r | |
784 | //\r | |
ed3d5ecb | 785 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r |
529a5a86 MK |
786 | \r |
787 | }\r | |
788 | \r | |
789 | /**\r | |
790 | Create 4G PageTable in SMRAM.\r | |
791 | \r | |
717fb604 | 792 | @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r |
529a5a86 MK |
793 | @return PageTable Address\r |
794 | \r | |
795 | **/\r | |
796 | UINT32\r | |
797 | Gen4GPageTable (\r | |
881520ea | 798 | IN BOOLEAN Is32BitPageTable\r |
529a5a86 MK |
799 | )\r |
800 | {\r | |
801 | VOID *PageTable;\r | |
802 | UINTN Index;\r | |
803 | UINT64 *Pte;\r | |
804 | UINTN PagesNeeded;\r | |
805 | UINTN Low2MBoundary;\r | |
806 | UINTN High2MBoundary;\r | |
807 | UINTN Pages;\r | |
808 | UINTN GuardPage;\r | |
809 | UINT64 *Pdpte;\r | |
810 | UINTN PageIndex;\r | |
811 | UINTN PageAddress;\r | |
812 | \r | |
813 | Low2MBoundary = 0;\r | |
814 | High2MBoundary = 0;\r | |
815 | PagesNeeded = 0;\r | |
816 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r | |
817 | //\r | |
818 | // Add one more page for known good stack, then find the lower 2MB aligned address.\r | |
819 | //\r | |
820 | Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r | |
821 | //\r | |
822 | // Add two more pages for known good stack and stack guard page,\r | |
823 | // then find the lower 2MB aligned address.\r | |
824 | //\r | |
825 | High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r | |
826 | PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r | |
827 | }\r | |
828 | //\r | |
829 | // Allocate the page table\r | |
830 | //\r | |
717fb604 | 831 | PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r |
529a5a86 MK |
832 | ASSERT (PageTable != NULL);\r |
833 | \r | |
717fb604 | 834 | PageTable = (VOID *)((UINTN)PageTable);\r |
529a5a86 MK |
835 | Pte = (UINT64*)PageTable;\r |
836 | \r | |
837 | //\r | |
838 | // Zero out all page table entries first\r | |
839 | //\r | |
840 | ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r | |
841 | \r | |
842 | //\r | |
843 | // Set Page Directory Pointers\r | |
844 | //\r | |
845 | for (Index = 0; Index < 4; Index++) {\r | |
e62a0eb6 | 846 | Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r |
241f9149 | 847 | (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r |
529a5a86 MK |
848 | }\r |
849 | Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r | |
850 | \r | |
851 | //\r | |
852 | // Fill in Page Directory Entries\r | |
853 | //\r | |
854 | for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r | |
241f9149 | 855 | Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r |
529a5a86 MK |
856 | }\r |
857 | \r | |
f8c1133b | 858 | Pdpte = (UINT64*)PageTable;\r |
529a5a86 MK |
859 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r |
860 | Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r | |
861 | GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r | |
529a5a86 | 862 | for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r |
241f9149 LD |
863 | Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r |
864 | Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r | |
529a5a86 MK |
865 | //\r |
866 | // Fill in Page Table Entries\r | |
867 | //\r | |
868 | Pte = (UINT64*)Pages;\r | |
869 | PageAddress = PageIndex;\r | |
870 | for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r | |
871 | if (PageAddress == GuardPage) {\r | |
872 | //\r | |
873 | // Mark the guard page as non-present\r | |
874 | //\r | |
241f9149 | 875 | Pte[Index] = PageAddress | mAddressEncMask;\r |
529a5a86 MK |
876 | GuardPage += mSmmStackSize;\r |
877 | if (GuardPage > mSmmStackArrayEnd) {\r | |
878 | GuardPage = 0;\r | |
879 | }\r | |
880 | } else {\r | |
241f9149 | 881 | Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r |
529a5a86 MK |
882 | }\r |
883 | PageAddress+= EFI_PAGE_SIZE;\r | |
884 | }\r | |
885 | Pages += EFI_PAGE_SIZE;\r | |
886 | }\r | |
887 | }\r | |
888 | \r | |
f8c1133b JW |
889 | if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r |
890 | Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r | |
891 | if ((Pte[0] & IA32_PG_PS) == 0) {\r | |
892 | // 4K-page entries are already mapped. Just hide the first one anyway.\r | |
893 | Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r | |
79da2d28 | 894 | Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r |
f8c1133b JW |
895 | } else {\r |
896 | // Create 4K-page entries\r | |
897 | Pages = (UINTN)AllocatePageTableMemory (1);\r | |
898 | ASSERT (Pages != 0);\r | |
899 | \r | |
900 | Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r | |
901 | \r | |
902 | Pte = (UINT64*)Pages;\r | |
903 | PageAddress = 0;\r | |
904 | Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r | |
905 | for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r | |
906 | PageAddress += EFI_PAGE_SIZE;\r | |
907 | Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r | |
908 | }\r | |
909 | }\r | |
910 | }\r | |
911 | \r | |
529a5a86 MK |
912 | return (UINT32)(UINTN)PageTable;\r |
913 | }\r | |
914 | \r | |
529a5a86 MK |
915 | /**\r |
916 | Schedule a procedure to run on the specified CPU.\r | |
917 | \r | |
717fb604 JY |
918 | @param[in] Procedure The address of the procedure to run\r |
919 | @param[in] CpuIndex Target CPU Index\r | |
367284e7 | 920 | @param[in, out] ProcArguments The parameter to pass to the procedure\r |
717fb604 | 921 | @param[in] BlockingMode Startup AP in blocking mode or not\r |
529a5a86 MK |
922 | \r |
923 | @retval EFI_INVALID_PARAMETER CpuNumber not valid\r | |
924 | @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r | |
925 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r | |
926 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r | |
927 | @retval EFI_SUCCESS The procedure has been successfully scheduled\r | |
928 | \r | |
929 | **/\r | |
930 | EFI_STATUS\r | |
717fb604 | 931 | InternalSmmStartupThisAp (\r |
529a5a86 MK |
932 | IN EFI_AP_PROCEDURE Procedure,\r |
933 | IN UINTN CpuIndex,\r | |
717fb604 JY |
934 | IN OUT VOID *ProcArguments OPTIONAL,\r |
935 | IN BOOLEAN BlockingMode\r | |
529a5a86 MK |
936 | )\r |
937 | {\r | |
717fb604 JY |
938 | if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r |
939 | DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r | |
940 | return EFI_INVALID_PARAMETER;\r | |
941 | }\r | |
942 | if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r | |
943 | DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r | |
529a5a86 MK |
944 | return EFI_INVALID_PARAMETER;\r |
945 | }\r | |
b7025df8 JF |
946 | if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r |
947 | return EFI_INVALID_PARAMETER;\r | |
948 | }\r | |
717fb604 JY |
949 | if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r |
950 | if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r | |
951 | DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r | |
952 | }\r | |
953 | return EFI_INVALID_PARAMETER;\r | |
954 | }\r | |
955 | if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r | |
956 | if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r | |
957 | DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r | |
958 | }\r | |
959 | return EFI_INVALID_PARAMETER;\r | |
960 | }\r | |
961 | \r | |
962 | if (BlockingMode) {\r | |
963 | AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r | |
964 | } else {\r | |
965 | if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r | |
966 | DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r | |
967 | return EFI_INVALID_PARAMETER;\r | |
968 | }\r | |
969 | }\r | |
529a5a86 MK |
970 | \r |
971 | mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r | |
972 | mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r | |
ed3d5ecb | 973 | ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r |
529a5a86 | 974 | \r |
717fb604 | 975 | if (BlockingMode) {\r |
ed3d5ecb JF |
976 | AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r |
977 | ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r | |
529a5a86 MK |
978 | }\r |
979 | return EFI_SUCCESS;\r | |
980 | }\r | |
981 | \r | |
717fb604 JY |
982 | /**\r |
983 | Schedule a procedure to run on the specified CPU in blocking mode.\r | |
984 | \r | |
985 | @param[in] Procedure The address of the procedure to run\r | |
986 | @param[in] CpuIndex Target CPU Index\r | |
987 | @param[in, out] ProcArguments The parameter to pass to the procedure\r | |
988 | \r | |
989 | @retval EFI_INVALID_PARAMETER CpuNumber not valid\r | |
990 | @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r | |
991 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r | |
992 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r | |
993 | @retval EFI_SUCCESS The procedure has been successfully scheduled\r | |
994 | \r | |
995 | **/\r | |
996 | EFI_STATUS\r | |
997 | EFIAPI\r | |
998 | SmmBlockingStartupThisAp (\r | |
999 | IN EFI_AP_PROCEDURE Procedure,\r | |
1000 | IN UINTN CpuIndex,\r | |
1001 | IN OUT VOID *ProcArguments OPTIONAL\r | |
1002 | )\r | |
1003 | {\r | |
1004 | return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r | |
1005 | }\r | |
1006 | \r | |
1007 | /**\r | |
1008 | Schedule a procedure to run on the specified CPU.\r | |
1009 | \r | |
1010 | @param Procedure The address of the procedure to run\r | |
1011 | @param CpuIndex Target CPU Index\r | |
1012 | @param ProcArguments The parameter to pass to the procedure\r | |
1013 | \r | |
1014 | @retval EFI_INVALID_PARAMETER CpuNumber not valid\r | |
1015 | @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r | |
1016 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r | |
1017 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r | |
1018 | @retval EFI_SUCCESS The procedure has been successfully scheduled\r | |
1019 | \r | |
1020 | **/\r | |
1021 | EFI_STATUS\r | |
1022 | EFIAPI\r | |
1023 | SmmStartupThisAp (\r | |
1024 | IN EFI_AP_PROCEDURE Procedure,\r | |
1025 | IN UINTN CpuIndex,\r | |
1026 | IN OUT VOID *ProcArguments OPTIONAL\r | |
1027 | )\r | |
1028 | {\r | |
1029 | return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r | |
1030 | }\r | |
1031 | \r | |
f45f2d4a | 1032 | /**\r |
3eed6dda | 1033 | This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r |
f45f2d4a JY |
1034 | They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r |
1035 | \r | |
1036 | NOTE: It might not be appreciated in runtime since it might\r | |
1037 | conflict with OS debugging facilities. Turn them off in RELEASE.\r | |
1038 | \r | |
1039 | @param CpuIndex CPU Index\r | |
1040 | \r | |
1041 | **/\r | |
1042 | VOID\r | |
1043 | EFIAPI\r | |
1044 | CpuSmmDebugEntry (\r | |
1045 | IN UINTN CpuIndex\r | |
1046 | )\r | |
1047 | {\r | |
1048 | SMRAM_SAVE_STATE_MAP *CpuSaveState;\r | |
1049 | \r | |
1050 | if (FeaturePcdGet (PcdCpuSmmDebug)) {\r | |
717fb604 | 1051 | ASSERT(CpuIndex < mMaxNumberOfCpus);\r |
3eed6dda | 1052 | CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r |
f45f2d4a JY |
1053 | if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r |
1054 | AsmWriteDr6 (CpuSaveState->x86._DR6);\r | |
1055 | AsmWriteDr7 (CpuSaveState->x86._DR7);\r | |
1056 | } else {\r | |
1057 | AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r | |
1058 | AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r | |
1059 | }\r | |
1060 | }\r | |
1061 | }\r | |
1062 | \r | |
1063 | /**\r | |
3eed6dda | 1064 | This function restores DR6 & DR7 to SMM save state.\r |
f45f2d4a JY |
1065 | \r |
1066 | NOTE: It might not be appreciated in runtime since it might\r | |
1067 | conflict with OS debugging facilities. Turn them off in RELEASE.\r | |
1068 | \r | |
1069 | @param CpuIndex CPU Index\r | |
1070 | \r | |
1071 | **/\r | |
1072 | VOID\r | |
1073 | EFIAPI\r | |
1074 | CpuSmmDebugExit (\r | |
1075 | IN UINTN CpuIndex\r | |
1076 | )\r | |
1077 | {\r | |
1078 | SMRAM_SAVE_STATE_MAP *CpuSaveState;\r | |
1079 | \r | |
1080 | if (FeaturePcdGet (PcdCpuSmmDebug)) {\r | |
717fb604 | 1081 | ASSERT(CpuIndex < mMaxNumberOfCpus);\r |
3eed6dda | 1082 | CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r |
f45f2d4a JY |
1083 | if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r |
1084 | CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r | |
1085 | CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r | |
1086 | } else {\r | |
1087 | CpuSaveState->x64._DR7 = AsmReadDr7 ();\r | |
1088 | CpuSaveState->x64._DR6 = AsmReadDr6 ();\r | |
1089 | }\r | |
1090 | }\r | |
1091 | }\r | |
1092 | \r | |
529a5a86 MK |
1093 | /**\r |
1094 | C function for SMI entry, each processor comes here upon SMI trigger.\r | |
1095 | \r | |
1096 | @param CpuIndex CPU Index\r | |
1097 | \r | |
1098 | **/\r | |
1099 | VOID\r | |
1100 | EFIAPI\r | |
1101 | SmiRendezvous (\r | |
1102 | IN UINTN CpuIndex\r | |
1103 | )\r | |
1104 | {\r | |
f85d3ce2 JF |
1105 | EFI_STATUS Status;\r |
1106 | BOOLEAN ValidSmi;\r | |
1107 | BOOLEAN IsBsp;\r | |
1108 | BOOLEAN BspInProgress;\r | |
1109 | UINTN Index;\r | |
1110 | UINTN Cr2;\r | |
717fb604 JY |
1111 | \r |
1112 | ASSERT(CpuIndex < mMaxNumberOfCpus);\r | |
529a5a86 MK |
1113 | \r |
1114 | //\r | |
1115 | // Save Cr2 because Page Fault exception in SMM may override its value\r | |
1116 | //\r | |
1117 | Cr2 = AsmReadCr2 ();\r | |
1118 | \r | |
1119 | //\r | |
1120 | // Perform CPU specific entry hooks\r | |
1121 | //\r | |
1122 | SmmCpuFeaturesRendezvousEntry (CpuIndex);\r | |
1123 | \r | |
1124 | //\r | |
1125 | // Determine if this is a valid SMI\r | |
1126 | //\r | |
1127 | ValidSmi = PlatformValidSmi();\r | |
1128 | \r | |
1129 | //\r | |
1130 | // Determine if BSP has been already in progress. Note this must be checked after\r | |
1131 | // ValidSmi because BSP may clear a valid SMI source after checking in.\r | |
1132 | //\r | |
fe3a75bc | 1133 | BspInProgress = *mSmmMpSyncData->InsideSmm;\r |
529a5a86 MK |
1134 | \r |
1135 | if (!BspInProgress && !ValidSmi) {\r | |
1136 | //\r | |
1137 | // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r | |
1138 | // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r | |
1139 | // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r | |
1140 | // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r | |
1141 | // is nothing we need to do.\r | |
1142 | //\r | |
1143 | goto Exit;\r | |
1144 | } else {\r | |
1145 | //\r | |
1146 | // Signal presence of this processor\r | |
1147 | //\r | |
fe3a75bc | 1148 | if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r |
529a5a86 MK |
1149 | //\r |
1150 | // BSP has already ended the synchronization, so QUIT!!!\r | |
1151 | //\r | |
1152 | \r | |
1153 | //\r | |
1154 | // Wait for BSP's signal to finish SMI\r | |
1155 | //\r | |
fe3a75bc | 1156 | while (*mSmmMpSyncData->AllCpusInSync) {\r |
529a5a86 MK |
1157 | CpuPause ();\r |
1158 | }\r | |
1159 | goto Exit;\r | |
1160 | } else {\r | |
1161 | \r | |
1162 | //\r | |
1163 | // The BUSY lock is initialized to Released state.\r | |
1164 | // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r | |
1165 | // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r | |
1166 | // after AP's present flag is detected.\r | |
1167 | //\r | |
ed3d5ecb | 1168 | InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r |
529a5a86 MK |
1169 | }\r |
1170 | \r | |
529a5a86 MK |
1171 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r |
1172 | ActivateSmmProfile (CpuIndex);\r | |
1173 | }\r | |
1174 | \r | |
1175 | if (BspInProgress) {\r | |
1176 | //\r | |
1177 | // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r | |
1178 | // as BSP may have cleared the SMI status\r | |
1179 | //\r | |
1180 | APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r | |
1181 | } else {\r | |
1182 | //\r | |
1183 | // We have a valid SMI\r | |
1184 | //\r | |
1185 | \r | |
1186 | //\r | |
1187 | // Elect BSP\r | |
1188 | //\r | |
1189 | IsBsp = FALSE;\r | |
1190 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r | |
1191 | if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r | |
1192 | //\r | |
1193 | // Call platform hook to do BSP election\r | |
1194 | //\r | |
1195 | Status = PlatformSmmBspElection (&IsBsp);\r | |
1196 | if (EFI_SUCCESS == Status) {\r | |
1197 | //\r | |
1198 | // Platform hook determines successfully\r | |
1199 | //\r | |
1200 | if (IsBsp) {\r | |
1201 | mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r | |
1202 | }\r | |
1203 | } else {\r | |
1204 | //\r | |
1205 | // Platform hook fails to determine, use default BSP election method\r | |
1206 | //\r | |
1207 | InterlockedCompareExchange32 (\r | |
1208 | (UINT32*)&mSmmMpSyncData->BspIndex,\r | |
1209 | (UINT32)-1,\r | |
1210 | (UINT32)CpuIndex\r | |
1211 | );\r | |
1212 | }\r | |
1213 | }\r | |
1214 | }\r | |
1215 | \r | |
1216 | //\r | |
1217 | // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r | |
1218 | //\r | |
1219 | if (mSmmMpSyncData->BspIndex == CpuIndex) {\r | |
1220 | \r | |
1221 | //\r | |
1222 | // Clear last request for SwitchBsp.\r | |
1223 | //\r | |
1224 | if (mSmmMpSyncData->SwitchBsp) {\r | |
1225 | mSmmMpSyncData->SwitchBsp = FALSE;\r | |
1226 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r | |
1227 | mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r | |
1228 | }\r | |
1229 | }\r | |
1230 | \r | |
1231 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r | |
1232 | SmmProfileRecordSmiNum ();\r | |
1233 | }\r | |
1234 | \r | |
1235 | //\r | |
1236 | // BSP Handler is always called with a ValidSmi == TRUE\r | |
1237 | //\r | |
1238 | BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r | |
529a5a86 MK |
1239 | } else {\r |
1240 | APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r | |
1241 | }\r | |
1242 | }\r | |
1243 | \r | |
ed3d5ecb | 1244 | ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r |
529a5a86 MK |
1245 | \r |
1246 | //\r | |
1247 | // Wait for BSP's signal to exit SMI\r | |
1248 | //\r | |
fe3a75bc | 1249 | while (*mSmmMpSyncData->AllCpusInSync) {\r |
529a5a86 MK |
1250 | CpuPause ();\r |
1251 | }\r | |
1252 | }\r | |
1253 | \r | |
1254 | Exit:\r | |
1255 | SmmCpuFeaturesRendezvousExit (CpuIndex);\r | |
1256 | //\r | |
1257 | // Restore Cr2\r | |
1258 | //\r | |
1259 | AsmWriteCr2 (Cr2);\r | |
1260 | }\r | |
1261 | \r | |
1d648531 JF |
1262 | /**\r |
1263 | Allocate buffer for all semaphores and spin locks.\r | |
1264 | \r | |
1265 | **/\r | |
1266 | VOID\r | |
1267 | InitializeSmmCpuSemaphores (\r | |
1268 | VOID\r | |
1269 | )\r | |
1270 | {\r | |
1271 | UINTN ProcessorCount;\r | |
1272 | UINTN TotalSize;\r | |
1273 | UINTN GlobalSemaphoresSize;\r | |
4e920581 | 1274 | UINTN CpuSemaphoresSize;\r |
695e62d1 | 1275 | UINTN MsrSemahporeSize;\r |
1d648531 JF |
1276 | UINTN SemaphoreSize;\r |
1277 | UINTN Pages;\r | |
1278 | UINTN *SemaphoreBlock;\r | |
1279 | UINTN SemaphoreAddr;\r | |
1280 | \r | |
1281 | SemaphoreSize = GetSpinLockProperties ();\r | |
1282 | ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r | |
1283 | GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r | |
4e920581 | 1284 | CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r |
695e62d1 JF |
1285 | MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r |
1286 | TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r | |
1d648531 JF |
1287 | DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r |
1288 | DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r | |
1289 | Pages = EFI_SIZE_TO_PAGES (TotalSize);\r | |
1290 | SemaphoreBlock = AllocatePages (Pages);\r | |
1291 | ASSERT (SemaphoreBlock != NULL);\r | |
1292 | ZeroMem (SemaphoreBlock, TotalSize);\r | |
1293 | \r | |
1294 | SemaphoreAddr = (UINTN)SemaphoreBlock;\r | |
1295 | mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r | |
1296 | SemaphoreAddr += SemaphoreSize;\r | |
1297 | mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r | |
1298 | SemaphoreAddr += SemaphoreSize;\r | |
1299 | mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r | |
1300 | SemaphoreAddr += SemaphoreSize;\r | |
1301 | mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r | |
1302 | SemaphoreAddr += SemaphoreSize;\r | |
1303 | mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r | |
1304 | = (SPIN_LOCK *)SemaphoreAddr;\r | |
6c4c15fa JF |
1305 | SemaphoreAddr += SemaphoreSize;\r |
1306 | mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r | |
1307 | = (SPIN_LOCK *)SemaphoreAddr;\r | |
1308 | \r | |
4e920581 JF |
1309 | SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r |
1310 | mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r | |
1311 | SemaphoreAddr += ProcessorCount * SemaphoreSize;\r | |
1312 | mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r | |
1313 | SemaphoreAddr += ProcessorCount * SemaphoreSize;\r | |
1314 | mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r | |
1315 | \r | |
695e62d1 JF |
1316 | SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r |
1317 | mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r | |
1318 | mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r | |
1319 | ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r | |
1320 | ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r | |
1321 | \r | |
fe3a75bc JF |
1322 | mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r |
1323 | mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r | |
6c4c15fa | 1324 | mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r |
fe3a75bc | 1325 | \r |
1d648531 JF |
1326 | mSemaphoreSize = SemaphoreSize;\r |
1327 | }\r | |
529a5a86 MK |
1328 | \r |
1329 | /**\r | |
1330 | Initialize un-cacheable data.\r | |
1331 | \r | |
1332 | **/\r | |
1333 | VOID\r | |
1334 | EFIAPI\r | |
1335 | InitializeMpSyncData (\r | |
1336 | VOID\r | |
1337 | )\r | |
1338 | {\r | |
8b9311b7 JF |
1339 | UINTN CpuIndex;\r |
1340 | \r | |
529a5a86 | 1341 | if (mSmmMpSyncData != NULL) {\r |
e78a2a49 JF |
1342 | //\r |
1343 | // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r | |
1344 | // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r | |
1345 | //\r | |
1346 | ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r | |
529a5a86 MK |
1347 | mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r |
1348 | mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r | |
1349 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r | |
1350 | //\r | |
1351 | // Enable BSP election by setting BspIndex to -1\r | |
1352 | //\r | |
1353 | mSmmMpSyncData->BspIndex = (UINT32)-1;\r | |
1354 | }\r | |
b43dd229 | 1355 | mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r |
1d648531 | 1356 | \r |
8b9311b7 JF |
1357 | mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r |
1358 | mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r | |
1359 | mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r | |
1360 | ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r | |
1361 | mSmmMpSyncData->AllCpusInSync != NULL);\r | |
1362 | *mSmmMpSyncData->Counter = 0;\r | |
1363 | *mSmmMpSyncData->InsideSmm = FALSE;\r | |
1364 | *mSmmMpSyncData->AllCpusInSync = FALSE;\r | |
1365 | \r | |
1366 | for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r | |
1367 | mSmmMpSyncData->CpuData[CpuIndex].Busy =\r | |
1368 | (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r | |
1369 | mSmmMpSyncData->CpuData[CpuIndex].Run =\r | |
1370 | (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r | |
1371 | mSmmMpSyncData->CpuData[CpuIndex].Present =\r | |
1372 | (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r | |
56e4a7d7 JF |
1373 | *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r |
1374 | *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r | |
1375 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r | |
8b9311b7 | 1376 | }\r |
529a5a86 MK |
1377 | }\r |
1378 | }\r | |
1379 | \r | |
1380 | /**\r | |
1381 | Initialize global data for MP synchronization.\r | |
1382 | \r | |
1383 | @param Stacks Base address of SMI stack buffer for all processors.\r | |
1384 | @param StackSize Stack size for each processor in SMM.\r | |
1385 | \r | |
1386 | **/\r | |
1387 | UINT32\r | |
1388 | InitializeMpServiceData (\r | |
1389 | IN VOID *Stacks,\r | |
1390 | IN UINTN StackSize\r | |
1391 | )\r | |
1392 | {\r | |
1393 | UINT32 Cr3;\r | |
1394 | UINTN Index;\r | |
529a5a86 | 1395 | UINT8 *GdtTssTables;\r |
529a5a86 | 1396 | UINTN GdtTableStepSize;\r |
ba40cb31 MK |
1397 | CPUID_VERSION_INFO_EDX RegEdx;\r |
1398 | \r | |
1399 | //\r | |
1400 | // Determine if this CPU supports machine check\r | |
1401 | //\r | |
1402 | AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r | |
1403 | mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r | |
529a5a86 | 1404 | \r |
8b9311b7 JF |
1405 | //\r |
1406 | // Allocate memory for all locks and semaphores\r | |
1407 | //\r | |
1408 | InitializeSmmCpuSemaphores ();\r | |
1409 | \r | |
d67b73cc JF |
1410 | //\r |
1411 | // Initialize mSmmMpSyncData\r | |
1412 | //\r | |
1413 | mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r | |
1414 | (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r | |
1415 | mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r | |
1416 | ASSERT (mSmmMpSyncData != NULL);\r | |
b43dd229 | 1417 | mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r |
d67b73cc JF |
1418 | InitializeMpSyncData ();\r |
1419 | \r | |
529a5a86 MK |
1420 | //\r |
1421 | // Initialize physical address mask\r | |
1422 | // NOTE: Physical memory above virtual address limit is not supported !!!\r | |
1423 | //\r | |
1424 | AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r | |
1425 | gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r | |
1426 | gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r | |
1427 | \r | |
1428 | //\r | |
1429 | // Create page tables\r | |
1430 | //\r | |
1431 | Cr3 = SmmInitPageTable ();\r | |
1432 | \r | |
fe5f1949 | 1433 | GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r |
529a5a86 MK |
1434 | \r |
1435 | //\r | |
f12367a0 | 1436 | // Install SMI handler for each CPU\r |
529a5a86 MK |
1437 | //\r |
1438 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r | |
529a5a86 MK |
1439 | InstallSmiHandler (\r |
1440 | Index,\r | |
1441 | (UINT32)mCpuHotPlugData.SmBase[Index],\r | |
1442 | (VOID*)((UINTN)Stacks + (StackSize * Index)),\r | |
1443 | StackSize,\r | |
f12367a0 MK |
1444 | (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r |
1445 | gcSmiGdtr.Limit + 1,\r | |
529a5a86 MK |
1446 | gcSmiIdtr.Base,\r |
1447 | gcSmiIdtr.Limit + 1,\r | |
1448 | Cr3\r | |
1449 | );\r | |
1450 | }\r | |
1451 | \r | |
529a5a86 MK |
1452 | //\r |
1453 | // Record current MTRR settings\r | |
1454 | //\r | |
26ab5ac3 MK |
1455 | ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r |
1456 | MtrrGetAllMtrrs (&gSmiMtrrs);\r | |
529a5a86 MK |
1457 | \r |
1458 | return Cr3;\r | |
1459 | }\r | |
1460 | \r | |
1461 | /**\r | |
1462 | \r | |
1463 | Register the SMM Foundation entry point.\r | |
1464 | \r | |
1465 | @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r | |
1466 | @param SmmEntryPoint SMM Foundation EntryPoint\r | |
1467 | \r | |
1468 | @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r | |
1469 | \r | |
1470 | **/\r | |
1471 | EFI_STATUS\r | |
1472 | EFIAPI\r | |
1473 | RegisterSmmEntry (\r | |
1474 | IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r | |
1475 | IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r | |
1476 | )\r | |
1477 | {\r | |
1478 | //\r | |
1479 | // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r | |
1480 | //\r | |
1481 | gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r | |
1482 | return EFI_SUCCESS;\r | |
1483 | }\r |