]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg: Change use of EFI_D_* to DEBUG_*
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
54ba08c6 4Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
26ab5ac3 16MTRR_SETTINGS gSmiMtrrs;\r
529a5a86
MK
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
1d648531
JF
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
fe3a75bc 22SPIN_LOCK *mPFLock = NULL;\r
b43dd229 23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
ba40cb31 24BOOLEAN mMachineCheckSupported = FALSE;\r
54ba08c6 25MM_COMPLETION mSmmStartupThisApToken;\r
529a5a86 26\r
ef91b073
SW
27extern UINTN mSmmShadowStackSize;\r
28\r
529a5a86
MK
29/**\r
30 Performs an atomic compare exchange operation to get semaphore.\r
31 The compare exchange operation must be performed using\r
32 MP safe mechanisms.\r
33\r
34 @param Sem IN: 32-bit unsigned integer\r
35 OUT: original integer - 1\r
36 @return Original integer - 1\r
37\r
38**/\r
39UINT32\r
40WaitForSemaphore (\r
41 IN OUT volatile UINT32 *Sem\r
42 )\r
43{\r
44 UINT32 Value;\r
45\r
9001b750 46 for (;;) {\r
529a5a86 47 Value = *Sem;\r
9001b750
LE
48 if (Value != 0 &&\r
49 InterlockedCompareExchange32 (\r
50 (UINT32*)Sem,\r
51 Value,\r
52 Value - 1\r
53 ) == Value) {\r
54 break;\r
55 }\r
56 CpuPause ();\r
57 }\r
529a5a86
MK
58 return Value - 1;\r
59}\r
60\r
61\r
62/**\r
63 Performs an atomic compare exchange operation to release semaphore.\r
64 The compare exchange operation must be performed using\r
65 MP safe mechanisms.\r
66\r
67 @param Sem IN: 32-bit unsigned integer\r
68 OUT: original integer + 1\r
69 @return Original integer + 1\r
70\r
71**/\r
72UINT32\r
73ReleaseSemaphore (\r
74 IN OUT volatile UINT32 *Sem\r
75 )\r
76{\r
77 UINT32 Value;\r
78\r
79 do {\r
80 Value = *Sem;\r
81 } while (Value + 1 != 0 &&\r
82 InterlockedCompareExchange32 (\r
83 (UINT32*)Sem,\r
84 Value,\r
85 Value + 1\r
86 ) != Value);\r
87 return Value + 1;\r
88}\r
89\r
90/**\r
91 Performs an atomic compare exchange operation to lock semaphore.\r
92 The compare exchange operation must be performed using\r
93 MP safe mechanisms.\r
94\r
95 @param Sem IN: 32-bit unsigned integer\r
96 OUT: -1\r
97 @return Original integer\r
98\r
99**/\r
100UINT32\r
101LockdownSemaphore (\r
102 IN OUT volatile UINT32 *Sem\r
103 )\r
104{\r
105 UINT32 Value;\r
106\r
107 do {\r
108 Value = *Sem;\r
109 } while (InterlockedCompareExchange32 (\r
110 (UINT32*)Sem,\r
111 Value, (UINT32)-1\r
112 ) != Value);\r
113 return Value;\r
114}\r
115\r
116/**\r
117 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
118\r
119 @param NumberOfAPs AP number\r
120\r
121**/\r
122VOID\r
123WaitForAllAPs (\r
124 IN UINTN NumberOfAPs\r
125 )\r
126{\r
127 UINTN BspIndex;\r
128\r
129 BspIndex = mSmmMpSyncData->BspIndex;\r
130 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 131 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
132 }\r
133}\r
134\r
135/**\r
136 Performs an atomic compare exchange operation to release semaphore\r
137 for each AP.\r
138\r
139**/\r
140VOID\r
141ReleaseAllAPs (\r
142 VOID\r
143 )\r
144{\r
145 UINTN Index;\r
529a5a86 146\r
70911f1f 147 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a 148 if (IsPresentAp (Index)) {\r
ed3d5ecb 149 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
150 }\r
151 }\r
152}\r
153\r
154/**\r
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
156\r
157 @param Exceptions CPU Arrival exception flags.\r
158\r
159 @retval TRUE if all CPUs the have checked in.\r
160 @retval FALSE if at least one Normal AP hasn't checked in.\r
161\r
162**/\r
163BOOLEAN\r
164AllCpusInSmmWithExceptions (\r
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
166 )\r
167{\r
168 UINTN Index;\r
169 SMM_CPU_DATA_BLOCK *CpuData;\r
170 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
171\r
fe3a75bc 172 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 173\r
fe3a75bc 174 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
175 return TRUE;\r
176 }\r
177\r
178 CpuData = mSmmMpSyncData->CpuData;\r
179 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
70911f1f 180 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 181 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
182 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
183 continue;\r
184 }\r
185 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
186 continue;\r
187 }\r
188 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
189 continue;\r
190 }\r
191 return FALSE;\r
192 }\r
193 }\r
194\r
195\r
196 return TRUE;\r
197}\r
198\r
12c66382
ED
199/**\r
200 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
7367cc6c 201\r
12c66382
ED
202 @retval TRUE Os enable lmce.\r
203 @retval FALSE Os not enable lmce.\r
204\r
205**/\r
206BOOLEAN\r
207IsLmceOsEnabled (\r
208 VOID\r
209 )\r
210{\r
211 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
212 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
213 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
214\r
215 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
216 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
217 return FALSE;\r
218 }\r
219\r
220 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
221 if (FeatureCtrl.Bits.LmceOn == 0) {\r
222 return FALSE;\r
223 }\r
224\r
225 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
226 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
227}\r
228\r
229/**\r
7367cc6c 230 Return if Local machine check exception signaled.\r
12c66382 231\r
7367cc6c 232 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
12c66382
ED
233 delivered to only the logical processor.\r
234\r
235 @retval TRUE LMCE was signaled.\r
236 @retval FALSE LMCE was not signaled.\r
237\r
238**/\r
239BOOLEAN\r
240IsLmceSignaled (\r
241 VOID\r
242 )\r
243{\r
244 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
245\r
246 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
247 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
248}\r
529a5a86
MK
249\r
250/**\r
251 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
252 entering SMM, except SMI disabled APs.\r
253\r
254**/\r
255VOID\r
256SmmWaitForApArrival (\r
257 VOID\r
258 )\r
259{\r
260 UINT64 Timer;\r
261 UINTN Index;\r
12c66382
ED
262 BOOLEAN LmceEn;\r
263 BOOLEAN LmceSignal;\r
529a5a86 264\r
fe3a75bc 265 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 266\r
ba40cb31
MK
267 LmceEn = FALSE;\r
268 LmceSignal = FALSE;\r
269 if (mMachineCheckSupported) {\r
270 LmceEn = IsLmceOsEnabled ();\r
271 LmceSignal = IsLmceSignaled();\r
272 }\r
12c66382 273\r
529a5a86
MK
274 //\r
275 // Platform implementor should choose a timeout value appropriately:\r
276 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
277 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
278 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
279 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
280 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
281 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
282 // - The timeout value must be longer than longest possible IO operation in the system\r
283 //\r
284\r
285 //\r
286 // Sync with APs 1st timeout\r
287 //\r
288 for (Timer = StartSyncTimer ();\r
12c66382 289 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
529a5a86
MK
290 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
291 ) {\r
292 CpuPause ();\r
293 }\r
294\r
295 //\r
296 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
297 // because:\r
298 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
299 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
300 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
301 // work while SMI handling is on-going.\r
302 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
303 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
304 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
305 // mode work while SMI handling is on-going.\r
306 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
307 // - In traditional flow, SMI disabling is discouraged.\r
308 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
309 // In both cases, adding SMI-disabling checking code increases overhead.\r
310 //\r
fe3a75bc 311 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
312 //\r
313 // Send SMI IPIs to bring outside processors in\r
314 //\r
70911f1f 315 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 316 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
317 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
318 }\r
319 }\r
320\r
321 //\r
322 // Sync with APs 2nd timeout.\r
323 //\r
324 for (Timer = StartSyncTimer ();\r
325 !IsSyncTimerTimeout (Timer) &&\r
326 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
327 ) {\r
328 CpuPause ();\r
329 }\r
330 }\r
331\r
332 return;\r
333}\r
334\r
335\r
336/**\r
337 Replace OS MTRR's with SMI MTRR's.\r
338\r
339 @param CpuIndex Processor Index\r
340\r
341**/\r
342VOID\r
343ReplaceOSMtrrs (\r
344 IN UINTN CpuIndex\r
345 )\r
346{\r
529a5a86
MK
347 SmmCpuFeaturesDisableSmrr ();\r
348\r
349 //\r
350 // Replace all MTRRs registers\r
351 //\r
26ab5ac3 352 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
353}\r
354\r
51dd408a
ED
355/**\r
356 Wheck whether task has been finished by all APs.\r
357\r
358 @param BlockMode Whether did it in block mode or non-block mode.\r
359\r
360 @retval TRUE Task has been finished by all APs.\r
361 @retval FALSE Task not has been finished by all APs.\r
362\r
363**/\r
364BOOLEAN\r
365WaitForAllAPsNotBusy (\r
366 IN BOOLEAN BlockMode\r
367 )\r
368{\r
369 UINTN Index;\r
370\r
70911f1f 371 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
372 //\r
373 // Ignore BSP and APs which not call in SMM.\r
374 //\r
375 if (!IsPresentAp(Index)) {\r
376 continue;\r
377 }\r
378\r
379 if (BlockMode) {\r
380 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
381 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
382 } else {\r
383 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
384 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
385 } else {\r
386 return FALSE;\r
387 }\r
388 }\r
389 }\r
390\r
391 return TRUE;\r
392}\r
393\r
394/**\r
395 Check whether it is an present AP.\r
396\r
397 @param CpuIndex The AP index which calls this function.\r
398\r
399 @retval TRUE It's a present AP.\r
400 @retval TRUE This is not an AP or it is not present.\r
401\r
402**/\r
403BOOLEAN\r
404IsPresentAp (\r
405 IN UINTN CpuIndex\r
406 )\r
407{\r
408 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
409 *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
410}\r
411\r
51dd408a
ED
412/**\r
413 Clean up the status flags used during executing the procedure.\r
414\r
415 @param CpuIndex The AP index which calls this function.\r
416\r
417**/\r
418VOID\r
419ReleaseToken (\r
420 IN UINTN CpuIndex\r
421 )\r
422{\r
a457823f 423 PROCEDURE_TOKEN *Token;\r
51dd408a 424\r
a457823f
ED
425 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;\r
426\r
427 if (InterlockedDecrement (&Token->RunningApCount) == 0) {\r
428 ReleaseSpinLock (Token->SpinLock);\r
51dd408a 429 }\r
a457823f
ED
430\r
431 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
51dd408a
ED
432}\r
433\r
434/**\r
435 Free the tokens in the maintained list.\r
436\r
437**/\r
438VOID\r
b948a496 439ResetTokens (\r
51dd408a
ED
440 VOID\r
441 )\r
442{\r
3fdc47c6
RN
443 //\r
444 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.\r
445 //\r
446 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
51dd408a
ED
447}\r
448\r
529a5a86
MK
449/**\r
450 SMI handler for BSP.\r
451\r
452 @param CpuIndex BSP processor Index\r
453 @param SyncMode SMM MP sync mode\r
454\r
455**/\r
456VOID\r
457BSPHandler (\r
458 IN UINTN CpuIndex,\r
459 IN SMM_CPU_SYNC_MODE SyncMode\r
460 )\r
461{\r
462 UINTN Index;\r
463 MTRR_SETTINGS Mtrrs;\r
464 UINTN ApCount;\r
465 BOOLEAN ClearTopLevelSmiResult;\r
466 UINTN PresentCount;\r
467\r
468 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
469 ApCount = 0;\r
470\r
471 //\r
472 // Flag BSP's presence\r
473 //\r
fe3a75bc 474 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
475\r
476 //\r
477 // Initialize Debug Agent to start source level debug in BSP handler\r
478 //\r
479 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
480\r
481 //\r
482 // Mark this processor's presence\r
483 //\r
ed3d5ecb 484 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
485\r
486 //\r
487 // Clear platform top level SMI status bit before calling SMI handlers. If\r
488 // we cleared it after SMI handlers are run, we would miss the SMI that\r
489 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
490 //\r
491 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
492 ASSERT (ClearTopLevelSmiResult == TRUE);\r
493\r
494 //\r
495 // Set running processor index\r
496 //\r
497 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
498\r
499 //\r
500 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
501 //\r
502 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
503\r
504 //\r
505 // Wait for APs to arrive\r
506 //\r
507 SmmWaitForApArrival();\r
508\r
509 //\r
510 // Lock the counter down and retrieve the number of APs\r
511 //\r
fe3a75bc
JF
512 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
513 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
514\r
515 //\r
516 // Wait for all APs to get ready for programming MTRRs\r
517 //\r
518 WaitForAllAPs (ApCount);\r
519\r
520 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
521 //\r
522 // Signal all APs it's time for backup MTRRs\r
523 //\r
524 ReleaseAllAPs ();\r
525\r
526 //\r
527 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
528 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
529 // to a large enough value to avoid this situation.\r
530 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
531 // We do the backup first and then set MTRR to avoid race condition for threads\r
532 // in the same core.\r
533 //\r
534 MtrrGetAllMtrrs(&Mtrrs);\r
535\r
536 //\r
537 // Wait for all APs to complete their MTRR saving\r
538 //\r
539 WaitForAllAPs (ApCount);\r
540\r
541 //\r
542 // Let all processors program SMM MTRRs together\r
543 //\r
544 ReleaseAllAPs ();\r
545\r
546 //\r
547 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
548 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
549 // to a large enough value to avoid this situation.\r
550 //\r
551 ReplaceOSMtrrs (CpuIndex);\r
552\r
553 //\r
554 // Wait for all APs to complete their MTRR programming\r
555 //\r
556 WaitForAllAPs (ApCount);\r
557 }\r
558 }\r
559\r
560 //\r
561 // The BUSY lock is initialized to Acquired state\r
562 //\r
170a3c1e 563 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
564\r
565 //\r
9f419739 566 // Perform the pre tasks\r
529a5a86 567 //\r
9f419739 568 PerformPreTasks ();\r
529a5a86
MK
569\r
570 //\r
571 // Invoke SMM Foundation EntryPoint with the processor information context.\r
572 //\r
573 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
574\r
575 //\r
576 // Make sure all APs have completed their pending none-block tasks\r
577 //\r
51dd408a 578 WaitForAllAPsNotBusy (TRUE);\r
529a5a86
MK
579\r
580 //\r
581 // Perform the remaining tasks\r
582 //\r
583 PerformRemainingTasks ();\r
584\r
585 //\r
586 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
587 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
588 // will run through freely.\r
589 //\r
590 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
591\r
592 //\r
593 // Lock the counter down and retrieve the number of APs\r
594 //\r
fe3a75bc
JF
595 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
596 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
597 //\r
598 // Make sure all APs have their Present flag set\r
599 //\r
600 while (TRUE) {\r
601 PresentCount = 0;\r
70911f1f 602 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 603 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
604 PresentCount ++;\r
605 }\r
606 }\r
607 if (PresentCount > ApCount) {\r
608 break;\r
609 }\r
610 }\r
611 }\r
612\r
613 //\r
614 // Notify all APs to exit\r
615 //\r
fe3a75bc 616 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
617 ReleaseAllAPs ();\r
618\r
619 //\r
620 // Wait for all APs to complete their pending tasks\r
621 //\r
622 WaitForAllAPs (ApCount);\r
623\r
624 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
625 //\r
626 // Signal APs to restore MTRRs\r
627 //\r
628 ReleaseAllAPs ();\r
629\r
630 //\r
631 // Restore OS MTRRs\r
632 //\r
633 SmmCpuFeaturesReenableSmrr ();\r
634 MtrrSetAllMtrrs(&Mtrrs);\r
635\r
636 //\r
637 // Wait for all APs to complete MTRR programming\r
638 //\r
639 WaitForAllAPs (ApCount);\r
640 }\r
641\r
642 //\r
643 // Stop source level debug in BSP handler, the code below will not be\r
644 // debugged.\r
645 //\r
646 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
647\r
648 //\r
649 // Signal APs to Reset states/semaphore for this processor\r
650 //\r
651 ReleaseAllAPs ();\r
652\r
653 //\r
654 // Perform pending operations for hot-plug\r
655 //\r
656 SmmCpuUpdate ();\r
657\r
658 //\r
659 // Clear the Present flag of BSP\r
660 //\r
ed3d5ecb 661 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
662\r
663 //\r
664 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
665 // WaitForAllAps does not depend on the Present flag.\r
666 //\r
667 WaitForAllAPs (ApCount);\r
668\r
51dd408a 669 //\r
b948a496 670 // Reset the tokens buffer.\r
51dd408a 671 //\r
b948a496 672 ResetTokens ();\r
51dd408a 673\r
529a5a86
MK
674 //\r
675 // Reset BspIndex to -1, meaning BSP has not been elected.\r
676 //\r
677 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
678 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
679 }\r
680\r
681 //\r
682 // Allow APs to check in from this point on\r
683 //\r
fe3a75bc
JF
684 *mSmmMpSyncData->Counter = 0;\r
685 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
686}\r
687\r
688/**\r
689 SMI handler for AP.\r
690\r
691 @param CpuIndex AP processor Index.\r
692 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
693 @param SyncMode SMM MP sync mode.\r
694\r
695**/\r
696VOID\r
697APHandler (\r
698 IN UINTN CpuIndex,\r
699 IN BOOLEAN ValidSmi,\r
700 IN SMM_CPU_SYNC_MODE SyncMode\r
701 )\r
702{\r
703 UINT64 Timer;\r
704 UINTN BspIndex;\r
705 MTRR_SETTINGS Mtrrs;\r
51dd408a 706 EFI_STATUS ProcedureStatus;\r
529a5a86
MK
707\r
708 //\r
709 // Timeout BSP\r
710 //\r
711 for (Timer = StartSyncTimer ();\r
712 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 713 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
714 ) {\r
715 CpuPause ();\r
716 }\r
717\r
fe3a75bc 718 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
719 //\r
720 // BSP timeout in the first round\r
721 //\r
722 if (mSmmMpSyncData->BspIndex != -1) {\r
723 //\r
724 // BSP Index is known\r
725 //\r
726 BspIndex = mSmmMpSyncData->BspIndex;\r
727 ASSERT (CpuIndex != BspIndex);\r
728\r
729 //\r
730 // Send SMI IPI to bring BSP in\r
731 //\r
732 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
733\r
734 //\r
735 // Now clock BSP for the 2nd time\r
736 //\r
737 for (Timer = StartSyncTimer ();\r
738 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 739 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
740 ) {\r
741 CpuPause ();\r
742 }\r
743\r
fe3a75bc 744 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
745 //\r
746 // Give up since BSP is unable to enter SMM\r
747 // and signal the completion of this AP\r
fe3a75bc 748 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
749 return;\r
750 }\r
751 } else {\r
752 //\r
753 // Don't know BSP index. Give up without sending IPI to BSP.\r
754 //\r
fe3a75bc 755 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
756 return;\r
757 }\r
758 }\r
759\r
760 //\r
761 // BSP is available\r
762 //\r
763 BspIndex = mSmmMpSyncData->BspIndex;\r
764 ASSERT (CpuIndex != BspIndex);\r
765\r
766 //\r
767 // Mark this processor's presence\r
768 //\r
ed3d5ecb 769 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
770\r
771 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
772 //\r
773 // Notify BSP of arrival at this point\r
774 //\r
ed3d5ecb 775 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
776 }\r
777\r
778 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
779 //\r
780 // Wait for the signal from BSP to backup MTRRs\r
781 //\r
ed3d5ecb 782 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
783\r
784 //\r
785 // Backup OS MTRRs\r
786 //\r
787 MtrrGetAllMtrrs(&Mtrrs);\r
788\r
789 //\r
790 // Signal BSP the completion of this AP\r
791 //\r
ed3d5ecb 792 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
793\r
794 //\r
795 // Wait for BSP's signal to program MTRRs\r
796 //\r
ed3d5ecb 797 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
798\r
799 //\r
800 // Replace OS MTRRs with SMI MTRRs\r
801 //\r
802 ReplaceOSMtrrs (CpuIndex);\r
803\r
804 //\r
805 // Signal BSP the completion of this AP\r
806 //\r
ed3d5ecb 807 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
808 }\r
809\r
810 while (TRUE) {\r
811 //\r
812 // Wait for something to happen\r
813 //\r
ed3d5ecb 814 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
815\r
816 //\r
817 // Check if BSP wants to exit SMM\r
818 //\r
fe3a75bc 819 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
820 break;\r
821 }\r
822\r
823 //\r
824 // BUSY should be acquired by SmmStartupThisAp()\r
825 //\r
826 ASSERT (\r
ed3d5ecb 827 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
828 );\r
829\r
830 //\r
831 // Invoke the scheduled procedure\r
832 //\r
51dd408a
ED
833 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
834 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
835 );\r
836 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
837 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
838 }\r
529a5a86 839\r
a457823f
ED
840 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
841 ReleaseToken (CpuIndex);\r
842 }\r
843\r
529a5a86
MK
844 //\r
845 // Release BUSY\r
846 //\r
ed3d5ecb 847 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
848 }\r
849\r
850 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
851 //\r
852 // Notify BSP the readiness of this AP to program MTRRs\r
853 //\r
ed3d5ecb 854 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
855\r
856 //\r
857 // Wait for the signal from BSP to program MTRRs\r
858 //\r
ed3d5ecb 859 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
860\r
861 //\r
862 // Restore OS MTRRs\r
863 //\r
864 SmmCpuFeaturesReenableSmrr ();\r
865 MtrrSetAllMtrrs(&Mtrrs);\r
866 }\r
867\r
868 //\r
869 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
870 //\r
ed3d5ecb 871 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
872\r
873 //\r
874 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
875 //\r
ed3d5ecb 876 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
877\r
878 //\r
879 // Reset states/semaphore for this processor\r
880 //\r
ed3d5ecb 881 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
882\r
883 //\r
884 // Notify BSP the readiness of this AP to exit SMM\r
885 //\r
ed3d5ecb 886 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
887\r
888}\r
889\r
890/**\r
891 Create 4G PageTable in SMRAM.\r
892\r
717fb604 893 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
894 @return PageTable Address\r
895\r
896**/\r
897UINT32\r
898Gen4GPageTable (\r
881520ea 899 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
900 )\r
901{\r
902 VOID *PageTable;\r
903 UINTN Index;\r
904 UINT64 *Pte;\r
905 UINTN PagesNeeded;\r
906 UINTN Low2MBoundary;\r
907 UINTN High2MBoundary;\r
908 UINTN Pages;\r
909 UINTN GuardPage;\r
910 UINT64 *Pdpte;\r
911 UINTN PageIndex;\r
912 UINTN PageAddress;\r
913\r
914 Low2MBoundary = 0;\r
915 High2MBoundary = 0;\r
916 PagesNeeded = 0;\r
917 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
918 //\r
919 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
920 //\r
921 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
922 //\r
923 // Add two more pages for known good stack and stack guard page,\r
924 // then find the lower 2MB aligned address.\r
925 //\r
ef91b073 926 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
529a5a86
MK
927 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
928 }\r
929 //\r
930 // Allocate the page table\r
931 //\r
717fb604 932 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
933 ASSERT (PageTable != NULL);\r
934\r
717fb604 935 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
936 Pte = (UINT64*)PageTable;\r
937\r
938 //\r
939 // Zero out all page table entries first\r
940 //\r
941 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
942\r
943 //\r
944 // Set Page Directory Pointers\r
945 //\r
946 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 947 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
241f9149 948 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
949 }\r
950 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
951\r
952 //\r
953 // Fill in Page Directory Entries\r
954 //\r
955 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 956 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
957 }\r
958\r
f8c1133b 959 Pdpte = (UINT64*)PageTable;\r
529a5a86
MK
960 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
961 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
962 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
529a5a86 963 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
241f9149
LD
964 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
965 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
966 //\r
967 // Fill in Page Table Entries\r
968 //\r
969 Pte = (UINT64*)Pages;\r
970 PageAddress = PageIndex;\r
971 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
972 if (PageAddress == GuardPage) {\r
973 //\r
974 // Mark the guard page as non-present\r
975 //\r
241f9149 976 Pte[Index] = PageAddress | mAddressEncMask;\r
ef91b073 977 GuardPage += (mSmmStackSize + mSmmShadowStackSize);\r
529a5a86
MK
978 if (GuardPage > mSmmStackArrayEnd) {\r
979 GuardPage = 0;\r
980 }\r
981 } else {\r
241f9149 982 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
983 }\r
984 PageAddress+= EFI_PAGE_SIZE;\r
985 }\r
986 Pages += EFI_PAGE_SIZE;\r
987 }\r
988 }\r
989\r
f8c1133b
JW
990 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
991 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
992 if ((Pte[0] & IA32_PG_PS) == 0) {\r
993 // 4K-page entries are already mapped. Just hide the first one anyway.\r
994 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
79da2d28 995 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
f8c1133b
JW
996 } else {\r
997 // Create 4K-page entries\r
998 Pages = (UINTN)AllocatePageTableMemory (1);\r
999 ASSERT (Pages != 0);\r
1000\r
1001 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
1002\r
1003 Pte = (UINT64*)Pages;\r
1004 PageAddress = 0;\r
1005 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
1006 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1007 PageAddress += EFI_PAGE_SIZE;\r
1008 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1009 }\r
1010 }\r
1011 }\r
1012\r
529a5a86
MK
1013 return (UINT32)(UINTN)PageTable;\r
1014}\r
1015\r
51dd408a
ED
1016/**\r
1017 Checks whether the input token is the current used token.\r
1018\r
1019 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1020 BroadcastProcedure.\r
1021\r
1022 @retval TRUE The input token is the current used token.\r
1023 @retval FALSE The input token is not the current used token.\r
1024**/\r
1025BOOLEAN\r
1026IsTokenInUse (\r
1027 IN SPIN_LOCK *Token\r
1028 )\r
1029{\r
1030 LIST_ENTRY *Link;\r
1031 PROCEDURE_TOKEN *ProcToken;\r
1032\r
1033 if (Token == NULL) {\r
1034 return FALSE;\r
1035 }\r
1036\r
1037 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
d84f090f
ED
1038 //\r
1039 // Only search used tokens.\r
1040 //\r
1041 while (Link != gSmmCpuPrivate->FirstFreeToken) {\r
51dd408a
ED
1042 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1043\r
d84f090f 1044 if (ProcToken->SpinLock == Token) {\r
51dd408a
ED
1045 return TRUE;\r
1046 }\r
1047\r
1048 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
1049 }\r
1050\r
1051 return FALSE;\r
1052}\r
1053\r
1054/**\r
b948a496 1055 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.\r
51dd408a 1056\r
3fdc47c6 1057 @return First token of the token buffer.\r
51dd408a 1058**/\r
3fdc47c6 1059LIST_ENTRY *\r
b948a496
ED
1060AllocateTokenBuffer (\r
1061 VOID\r
51dd408a
ED
1062 )\r
1063{\r
51dd408a 1064 UINTN SpinLockSize;\r
9caaa79d 1065 UINT32 TokenCountPerChunk;\r
b948a496 1066 UINTN Index;\r
b948a496
ED
1067 SPIN_LOCK *SpinLock;\r
1068 UINT8 *SpinLockBuffer;\r
3fdc47c6 1069 PROCEDURE_TOKEN *ProcTokens;\r
51dd408a
ED
1070\r
1071 SpinLockSize = GetSpinLockProperties ();\r
b948a496 1072\r
9caaa79d 1073 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
b948a496
ED
1074 ASSERT (TokenCountPerChunk != 0);\r
1075 if (TokenCountPerChunk == 0) {\r
1076 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
1077 CpuDeadLoop ();\r
1078 }\r
1079 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
9caaa79d 1080\r
b948a496
ED
1081 //\r
1082 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.\r
1083 //\r
1084 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
1085 ASSERT (SpinLockBuffer != NULL);\r
9caaa79d 1086\r
3fdc47c6
RN
1087 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);\r
1088 ASSERT (ProcTokens != NULL);\r
b948a496
ED
1089\r
1090 for (Index = 0; Index < TokenCountPerChunk; Index++) {\r
1091 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);\r
1092 InitializeSpinLock (SpinLock);\r
1093\r
3fdc47c6
RN
1094 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;\r
1095 ProcTokens[Index].SpinLock = SpinLock;\r
3fdc47c6 1096 ProcTokens[Index].RunningApCount = 0;\r
b948a496 1097\r
3fdc47c6 1098 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);\r
b948a496 1099 }\r
9caaa79d 1100\r
3fdc47c6 1101 return &ProcTokens[0].Link;\r
b948a496
ED
1102}\r
1103\r
1104/**\r
1105 Get the free token.\r
1106\r
1107 If no free token, allocate new tokens then return the free one.\r
1108\r
e1879256
ED
1109 @param RunningApsCount The Running Aps count for this token.\r
1110\r
b948a496 1111 @retval return the first free PROCEDURE_TOKEN.\r
9caaa79d 1112\r
b948a496
ED
1113**/\r
1114PROCEDURE_TOKEN *\r
1115GetFreeToken (\r
1116 IN UINT32 RunningApsCount\r
1117 )\r
1118{\r
1119 PROCEDURE_TOKEN *NewToken;\r
51dd408a 1120\r
3fdc47c6
RN
1121 //\r
1122 // If FirstFreeToken meets the end of token list, enlarge the token list.\r
1123 // Set FirstFreeToken to the first free token.\r
1124 //\r
1125 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {\r
1126 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
b948a496 1127 }\r
3fdc47c6
RN
1128 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);\r
1129 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);\r
51dd408a 1130\r
b948a496
ED
1131 NewToken->RunningApCount = RunningApsCount;\r
1132 AcquireSpinLock (NewToken->SpinLock);\r
51dd408a 1133\r
b948a496 1134 return NewToken;\r
51dd408a
ED
1135}\r
1136\r
1137/**\r
1138 Checks status of specified AP.\r
1139\r
1140 This function checks whether the specified AP has finished the task assigned\r
1141 by StartupThisAP(), and whether timeout expires.\r
1142\r
1143 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1144 BroadcastProcedure.\r
1145\r
1146 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
1147 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
1148**/\r
1149EFI_STATUS\r
1150IsApReady (\r
1151 IN SPIN_LOCK *Token\r
1152 )\r
1153{\r
1154 if (AcquireSpinLockOrFail (Token)) {\r
1155 ReleaseSpinLock (Token);\r
1156 return EFI_SUCCESS;\r
1157 }\r
1158\r
1159 return EFI_NOT_READY;\r
1160}\r
1161\r
529a5a86
MK
1162/**\r
1163 Schedule a procedure to run on the specified CPU.\r
1164\r
717fb604
JY
1165 @param[in] Procedure The address of the procedure to run\r
1166 @param[in] CpuIndex Target CPU Index\r
51dd408a
ED
1167 @param[in,out] ProcArguments The parameter to pass to the procedure\r
1168 @param[in] Token This is an optional parameter that allows the caller to execute the\r
1169 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1170 call is blocking, and the call will not return until the AP has\r
1171 completed the procedure. If the token is not NULL, the call will\r
1172 return immediately. The caller can check whether the procedure has\r
1173 completed with CheckOnProcedure or WaitForProcedure.\r
1174 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
1175 execution of Procedure, either for blocking or non-blocking mode.\r
1176 Zero means infinity. If the timeout expires before all APs return\r
1177 from Procedure, then Procedure on the failed APs is terminated. If\r
1178 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
1179 If the timeout expires in non-blocking mode, the timeout determined\r
1180 can be through CheckOnProcedure or WaitForProcedure.\r
1181 Note that timeout support is optional. Whether an implementation\r
1182 supports this feature can be determined via the Attributes data\r
1183 member.\r
1184 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
1185 by Procedure when it completes execution on the target AP, or with\r
1186 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1187 timeout. The implementation will update this variable with\r
1188 EFI_NOT_READY prior to starting Procedure on the target AP.\r
529a5a86
MK
1189\r
1190 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1191 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1192 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1193 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1194 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1195\r
1196**/\r
1197EFI_STATUS\r
717fb604 1198InternalSmmStartupThisAp (\r
51dd408a
ED
1199 IN EFI_AP_PROCEDURE2 Procedure,\r
1200 IN UINTN CpuIndex,\r
1201 IN OUT VOID *ProcArguments OPTIONAL,\r
1202 IN MM_COMPLETION *Token,\r
1203 IN UINTN TimeoutInMicroseconds,\r
1204 IN OUT EFI_STATUS *CpuStatus\r
529a5a86
MK
1205 )\r
1206{\r
a457823f
ED
1207 PROCEDURE_TOKEN *ProcToken;\r
1208\r
717fb604
JY
1209 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
1210 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
1211 return EFI_INVALID_PARAMETER;\r
1212 }\r
1213 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1214 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
1215 return EFI_INVALID_PARAMETER;\r
1216 }\r
b7025df8
JF
1217 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
1218 return EFI_INVALID_PARAMETER;\r
1219 }\r
717fb604
JY
1220 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
1221 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
1222 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
1223 }\r
1224 return EFI_INVALID_PARAMETER;\r
1225 }\r
1226 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
1227 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1228 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
1229 }\r
1230 return EFI_INVALID_PARAMETER;\r
1231 }\r
51dd408a
ED
1232 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1233 return EFI_INVALID_PARAMETER;\r
1234 }\r
1235 if (Procedure == NULL) {\r
1236 return EFI_INVALID_PARAMETER;\r
1237 }\r
717fb604 1238\r
832c4c7a 1239 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
51dd408a 1240\r
529a5a86
MK
1241 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
1242 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
51dd408a 1243 if (Token != NULL) {\r
54ba08c6
RN
1244 if (Token != &mSmmStartupThisApToken) {\r
1245 //\r
1246 // When Token points to mSmmStartupThisApToken, this routine is called\r
1247 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).\r
1248 //\r
1249 // In this case, caller wants to startup AP procedure in non-blocking\r
1250 // mode and cannot get the completion status from the Token because there\r
1251 // is no way to return the Token to caller from SmmStartupThisAp().\r
1252 // Caller needs to use its implementation specific way to query the completion status.\r
1253 //\r
1254 // There is no need to allocate a token for such case so the 3 overheads\r
1255 // can be avoided:\r
1256 // 1. Call AllocateTokenBuffer() when there is no free token.\r
1257 // 2. Get a free token from the token buffer.\r
1258 // 3. Call ReleaseToken() in APHandler().\r
1259 //\r
1260 ProcToken = GetFreeToken (1);\r
1261 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
1262 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
1263 }\r
51dd408a
ED
1264 }\r
1265 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
1266 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
1267 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
1268 }\r
1269\r
ed3d5ecb 1270 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 1271\r
51dd408a 1272 if (Token == NULL) {\r
ed3d5ecb
JF
1273 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1274 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86 1275 }\r
51dd408a
ED
1276\r
1277 return EFI_SUCCESS;\r
1278}\r
1279\r
1280/**\r
1281 Worker function to execute a caller provided function on all enabled APs.\r
1282\r
1283 @param[in] Procedure A pointer to the function to be run on\r
1284 enabled APs of the system.\r
1285 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
1286 APs to return from Procedure, either for\r
1287 blocking or non-blocking mode.\r
1288 @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
1289 all APs.\r
1290 @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
1291 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1292 call is blocking, and the call will not return until the AP has\r
1293 completed the procedure. If the token is not NULL, the call will\r
1294 return immediately. The caller can check whether the procedure has\r
1295 completed with CheckOnProcedure or WaitForProcedure.\r
1296 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
1297 by Procedure when it completes execution on the target AP, or with\r
1298 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1299 timeout. The implementation will update this variable with\r
1300 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1301\r
1302\r
1303 @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
1304 the timeout expired.\r
1305 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
1306 to all enabled APs.\r
1307 @retval others Failed to Startup all APs.\r
1308\r
1309**/\r
1310EFI_STATUS\r
1311InternalSmmStartupAllAPs (\r
1312 IN EFI_AP_PROCEDURE2 Procedure,\r
1313 IN UINTN TimeoutInMicroseconds,\r
1314 IN OUT VOID *ProcedureArguments OPTIONAL,\r
1315 IN OUT MM_COMPLETION *Token,\r
1316 IN OUT EFI_STATUS *CPUStatus\r
1317 )\r
1318{\r
1319 UINTN Index;\r
1320 UINTN CpuCount;\r
a457823f 1321 PROCEDURE_TOKEN *ProcToken;\r
51dd408a
ED
1322\r
1323 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1324 return EFI_INVALID_PARAMETER;\r
1325 }\r
1326 if (Procedure == NULL) {\r
1327 return EFI_INVALID_PARAMETER;\r
1328 }\r
1329\r
1330 CpuCount = 0;\r
70911f1f 1331 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1332 if (IsPresentAp (Index)) {\r
1333 CpuCount ++;\r
1334\r
1335 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
1336 return EFI_INVALID_PARAMETER;\r
1337 }\r
1338\r
1339 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {\r
1340 return EFI_NOT_READY;\r
1341 }\r
1342 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1343 }\r
1344 }\r
1345 if (CpuCount == 0) {\r
1346 return EFI_NOT_STARTED;\r
1347 }\r
1348\r
1349 if (Token != NULL) {\r
b948a496 1350 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);\r
a457823f
ED
1351 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
1352 } else {\r
1353 ProcToken = NULL;\r
51dd408a
ED
1354 }\r
1355\r
1356 //\r
1357 // Make sure all BUSY should be acquired.\r
1358 //\r
1359 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
1360 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
1361 // block mode.\r
1362 //\r
70911f1f 1363 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1364 if (IsPresentAp (Index)) {\r
1365 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1366 }\r
1367 }\r
1368\r
70911f1f 1369 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1370 if (IsPresentAp (Index)) {\r
1371 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;\r
1372 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
a457823f
ED
1373 if (ProcToken != NULL) {\r
1374 mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
51dd408a
ED
1375 }\r
1376 if (CPUStatus != NULL) {\r
1377 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
1378 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
1379 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
1380 }\r
1381 }\r
1382 } else {\r
1383 //\r
1384 // PI spec requirement:\r
1385 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
1386 //\r
1387 if (CPUStatus != NULL) {\r
1388 CPUStatus[Index] = EFI_NOT_STARTED;\r
1389 }\r
a457823f
ED
1390\r
1391 //\r
1392 // Decrease the count to mark this processor(AP or BSP) as finished.\r
1393 //\r
1394 if (ProcToken != NULL) {\r
1395 WaitForSemaphore (&ProcToken->RunningApCount);\r
1396 }\r
51dd408a
ED
1397 }\r
1398 }\r
1399\r
1400 ReleaseAllAPs ();\r
1401\r
1402 if (Token == NULL) {\r
1403 //\r
1404 // Make sure all APs have completed their tasks.\r
1405 //\r
1406 WaitForAllAPsNotBusy (TRUE);\r
1407 }\r
1408\r
1409 return EFI_SUCCESS;\r
1410}\r
1411\r
1412/**\r
1413 ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
1414 If the function is defined with a type that is not compatible with\r
1415 the type (of the expression) pointed to by the expression that\r
1416 denotes the called function, the behavior is undefined.\r
1417\r
1418 So add below wrapper function to convert between EFI_AP_PROCEDURE\r
1419 and EFI_AP_PROCEDURE2.\r
1420\r
1421 Wrapper for Procedures.\r
1422\r
1423 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
1424\r
1425**/\r
1426EFI_STATUS\r
1427EFIAPI\r
1428ProcedureWrapper (\r
5ed4c46f 1429 IN VOID *Buffer\r
51dd408a
ED
1430 )\r
1431{\r
1432 PROCEDURE_WRAPPER *Wrapper;\r
1433\r
1434 Wrapper = Buffer;\r
1435 Wrapper->Procedure (Wrapper->ProcedureArgument);\r
1436\r
529a5a86
MK
1437 return EFI_SUCCESS;\r
1438}\r
1439\r
717fb604
JY
1440/**\r
1441 Schedule a procedure to run on the specified CPU in blocking mode.\r
1442\r
1443 @param[in] Procedure The address of the procedure to run\r
1444 @param[in] CpuIndex Target CPU Index\r
1445 @param[in, out] ProcArguments The parameter to pass to the procedure\r
1446\r
1447 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1448 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1449 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1450 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1451 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1452\r
1453**/\r
1454EFI_STATUS\r
1455EFIAPI\r
1456SmmBlockingStartupThisAp (\r
1457 IN EFI_AP_PROCEDURE Procedure,\r
1458 IN UINTN CpuIndex,\r
1459 IN OUT VOID *ProcArguments OPTIONAL\r
1460 )\r
1461{\r
51dd408a
ED
1462 PROCEDURE_WRAPPER Wrapper;\r
1463\r
1464 Wrapper.Procedure = Procedure;\r
1465 Wrapper.ProcedureArgument = ProcArguments;\r
1466\r
1467 //\r
1468 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1469 //\r
1470 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
717fb604
JY
1471}\r
1472\r
1473/**\r
1474 Schedule a procedure to run on the specified CPU.\r
1475\r
1476 @param Procedure The address of the procedure to run\r
1477 @param CpuIndex Target CPU Index\r
1478 @param ProcArguments The parameter to pass to the procedure\r
1479\r
1480 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1481 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1482 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1483 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1484 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1485\r
1486**/\r
1487EFI_STATUS\r
1488EFIAPI\r
1489SmmStartupThisAp (\r
1490 IN EFI_AP_PROCEDURE Procedure,\r
1491 IN UINTN CpuIndex,\r
1492 IN OUT VOID *ProcArguments OPTIONAL\r
1493 )\r
1494{\r
51dd408a
ED
1495 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
1496 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
1497\r
1498 //\r
1499 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1500 //\r
1501 return InternalSmmStartupThisAp (\r
1502 ProcedureWrapper,\r
1503 CpuIndex,\r
1504 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
54ba08c6 1505 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,\r
51dd408a
ED
1506 0,\r
1507 NULL\r
1508 );\r
717fb604
JY
1509}\r
1510\r
f45f2d4a 1511/**\r
3eed6dda 1512 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1513 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1514\r
1515 NOTE: It might not be appreciated in runtime since it might\r
1516 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1517\r
1518 @param CpuIndex CPU Index\r
1519\r
1520**/\r
1521VOID\r
1522EFIAPI\r
1523CpuSmmDebugEntry (\r
1524 IN UINTN CpuIndex\r
1525 )\r
1526{\r
1527 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
7367cc6c 1528\r
f45f2d4a 1529 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1530 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1531 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1532 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1533 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1534 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1535 } else {\r
1536 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1537 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1538 }\r
1539 }\r
1540}\r
1541\r
1542/**\r
3eed6dda 1543 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1544\r
1545 NOTE: It might not be appreciated in runtime since it might\r
1546 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1547\r
1548 @param CpuIndex CPU Index\r
1549\r
1550**/\r
1551VOID\r
1552EFIAPI\r
1553CpuSmmDebugExit (\r
1554 IN UINTN CpuIndex\r
1555 )\r
1556{\r
1557 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1558\r
1559 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1560 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1561 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1562 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1563 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1564 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1565 } else {\r
1566 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1567 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1568 }\r
1569 }\r
1570}\r
1571\r
529a5a86
MK
1572/**\r
1573 C function for SMI entry, each processor comes here upon SMI trigger.\r
1574\r
1575 @param CpuIndex CPU Index\r
1576\r
1577**/\r
1578VOID\r
1579EFIAPI\r
1580SmiRendezvous (\r
1581 IN UINTN CpuIndex\r
1582 )\r
1583{\r
f85d3ce2
JF
1584 EFI_STATUS Status;\r
1585 BOOLEAN ValidSmi;\r
1586 BOOLEAN IsBsp;\r
1587 BOOLEAN BspInProgress;\r
1588 UINTN Index;\r
1589 UINTN Cr2;\r
717fb604
JY
1590\r
1591 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1592\r
1593 //\r
37f9fea5
VN
1594 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1595 // when using on-demand paging for above 4G memory.\r
529a5a86 1596 //\r
37f9fea5
VN
1597 Cr2 = 0;\r
1598 SaveCr2 (&Cr2);\r
529a5a86 1599\r
51dd408a
ED
1600 //\r
1601 // Call the user register Startup function first.\r
1602 //\r
1603 if (mSmmMpSyncData->StartupProcedure != NULL) {\r
1604 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
1605 }\r
1606\r
529a5a86
MK
1607 //\r
1608 // Perform CPU specific entry hooks\r
1609 //\r
1610 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1611\r
1612 //\r
1613 // Determine if this is a valid SMI\r
1614 //\r
1615 ValidSmi = PlatformValidSmi();\r
1616\r
1617 //\r
1618 // Determine if BSP has been already in progress. Note this must be checked after\r
1619 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1620 //\r
fe3a75bc 1621 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1622\r
1623 if (!BspInProgress && !ValidSmi) {\r
1624 //\r
1625 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1626 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1627 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1628 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1629 // is nothing we need to do.\r
1630 //\r
1631 goto Exit;\r
1632 } else {\r
1633 //\r
1634 // Signal presence of this processor\r
1635 //\r
fe3a75bc 1636 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1637 //\r
1638 // BSP has already ended the synchronization, so QUIT!!!\r
1639 //\r
1640\r
1641 //\r
1642 // Wait for BSP's signal to finish SMI\r
1643 //\r
fe3a75bc 1644 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1645 CpuPause ();\r
1646 }\r
1647 goto Exit;\r
1648 } else {\r
1649\r
1650 //\r
1651 // The BUSY lock is initialized to Released state.\r
1652 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1653 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1654 // after AP's present flag is detected.\r
1655 //\r
ed3d5ecb 1656 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1657 }\r
1658\r
529a5a86
MK
1659 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1660 ActivateSmmProfile (CpuIndex);\r
1661 }\r
1662\r
1663 if (BspInProgress) {\r
1664 //\r
1665 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1666 // as BSP may have cleared the SMI status\r
1667 //\r
1668 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1669 } else {\r
1670 //\r
1671 // We have a valid SMI\r
1672 //\r
1673\r
1674 //\r
1675 // Elect BSP\r
1676 //\r
1677 IsBsp = FALSE;\r
1678 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1679 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1680 //\r
1681 // Call platform hook to do BSP election\r
1682 //\r
1683 Status = PlatformSmmBspElection (&IsBsp);\r
1684 if (EFI_SUCCESS == Status) {\r
1685 //\r
1686 // Platform hook determines successfully\r
1687 //\r
1688 if (IsBsp) {\r
1689 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1690 }\r
1691 } else {\r
1692 //\r
1693 // Platform hook fails to determine, use default BSP election method\r
1694 //\r
1695 InterlockedCompareExchange32 (\r
1696 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1697 (UINT32)-1,\r
1698 (UINT32)CpuIndex\r
1699 );\r
1700 }\r
1701 }\r
1702 }\r
1703\r
1704 //\r
1705 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1706 //\r
1707 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1708\r
1709 //\r
1710 // Clear last request for SwitchBsp.\r
1711 //\r
1712 if (mSmmMpSyncData->SwitchBsp) {\r
1713 mSmmMpSyncData->SwitchBsp = FALSE;\r
1714 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1715 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1716 }\r
1717 }\r
1718\r
1719 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1720 SmmProfileRecordSmiNum ();\r
1721 }\r
1722\r
1723 //\r
1724 // BSP Handler is always called with a ValidSmi == TRUE\r
1725 //\r
1726 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1727 } else {\r
1728 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1729 }\r
1730 }\r
1731\r
ed3d5ecb 1732 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1733\r
1734 //\r
1735 // Wait for BSP's signal to exit SMI\r
1736 //\r
fe3a75bc 1737 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1738 CpuPause ();\r
1739 }\r
1740 }\r
1741\r
1742Exit:\r
1743 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
37f9fea5 1744\r
529a5a86
MK
1745 //\r
1746 // Restore Cr2\r
1747 //\r
37f9fea5 1748 RestoreCr2 (Cr2);\r
529a5a86
MK
1749}\r
1750\r
51dd408a
ED
1751/**\r
1752 Allocate buffer for SpinLock and Wrapper function buffer.\r
1753\r
1754**/\r
1755VOID\r
1756InitializeDataForMmMp (\r
1757 VOID\r
1758 )\r
1759{\r
1760 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1761 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
1762\r
1763 InitializeListHead (&gSmmCpuPrivate->TokenList);\r
b948a496 1764\r
3fdc47c6 1765 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
51dd408a
ED
1766}\r
1767\r
1d648531
JF
1768/**\r
1769 Allocate buffer for all semaphores and spin locks.\r
1770\r
1771**/\r
1772VOID\r
1773InitializeSmmCpuSemaphores (\r
1774 VOID\r
1775 )\r
1776{\r
1777 UINTN ProcessorCount;\r
1778 UINTN TotalSize;\r
1779 UINTN GlobalSemaphoresSize;\r
4e920581 1780 UINTN CpuSemaphoresSize;\r
1d648531
JF
1781 UINTN SemaphoreSize;\r
1782 UINTN Pages;\r
1783 UINTN *SemaphoreBlock;\r
1784 UINTN SemaphoreAddr;\r
1785\r
1786 SemaphoreSize = GetSpinLockProperties ();\r
1787 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1788 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1789 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
31fb3334 1790 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
96e1cba5
MK
1791 DEBUG((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1792 DEBUG((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1d648531
JF
1793 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1794 SemaphoreBlock = AllocatePages (Pages);\r
1795 ASSERT (SemaphoreBlock != NULL);\r
1796 ZeroMem (SemaphoreBlock, TotalSize);\r
1797\r
1798 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1799 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1800 SemaphoreAddr += SemaphoreSize;\r
1801 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1802 SemaphoreAddr += SemaphoreSize;\r
1803 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1804 SemaphoreAddr += SemaphoreSize;\r
1805 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1806 SemaphoreAddr += SemaphoreSize;\r
1807 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1808 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa 1809 SemaphoreAddr += SemaphoreSize;\r
6c4c15fa 1810\r
4e920581
JF
1811 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1812 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1813 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1814 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1815 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1816 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1817\r
fe3a75bc
JF
1818 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1819 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1820\r
1d648531
JF
1821 mSemaphoreSize = SemaphoreSize;\r
1822}\r
529a5a86
MK
1823\r
1824/**\r
1825 Initialize un-cacheable data.\r
1826\r
1827**/\r
1828VOID\r
1829EFIAPI\r
1830InitializeMpSyncData (\r
1831 VOID\r
1832 )\r
1833{\r
8b9311b7
JF
1834 UINTN CpuIndex;\r
1835\r
529a5a86 1836 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1837 //\r
1838 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1839 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1840 //\r
1841 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1842 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1843 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1844 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1845 //\r
1846 // Enable BSP election by setting BspIndex to -1\r
1847 //\r
1848 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1849 }\r
b43dd229 1850 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1851\r
8b9311b7
JF
1852 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1853 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1854 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1855 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1856 mSmmMpSyncData->AllCpusInSync != NULL);\r
1857 *mSmmMpSyncData->Counter = 0;\r
1858 *mSmmMpSyncData->InsideSmm = FALSE;\r
1859 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1860\r
1861 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1862 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1863 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1864 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1865 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1866 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1867 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1868 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1869 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1870 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1871 }\r
529a5a86
MK
1872 }\r
1873}\r
1874\r
1875/**\r
1876 Initialize global data for MP synchronization.\r
1877\r
3eb69b08
JY
1878 @param Stacks Base address of SMI stack buffer for all processors.\r
1879 @param StackSize Stack size for each processor in SMM.\r
1880 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
529a5a86
MK
1881\r
1882**/\r
1883UINT32\r
1884InitializeMpServiceData (\r
1885 IN VOID *Stacks,\r
3eb69b08
JY
1886 IN UINTN StackSize,\r
1887 IN UINTN ShadowStackSize\r
529a5a86
MK
1888 )\r
1889{\r
29e300ff
RN
1890 UINT32 Cr3;\r
1891 UINTN Index;\r
1892 UINT8 *GdtTssTables;\r
1893 UINTN GdtTableStepSize;\r
1894 CPUID_VERSION_INFO_EDX RegEdx;\r
1895 UINT32 MaxExtendedFunction;\r
1896 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
ba40cb31
MK
1897\r
1898 //\r
1899 // Determine if this CPU supports machine check\r
1900 //\r
1901 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1902 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
529a5a86 1903\r
8b9311b7
JF
1904 //\r
1905 // Allocate memory for all locks and semaphores\r
1906 //\r
1907 InitializeSmmCpuSemaphores ();\r
1908\r
d67b73cc
JF
1909 //\r
1910 // Initialize mSmmMpSyncData\r
1911 //\r
1912 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1913 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1914 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1915 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1916 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1917 InitializeMpSyncData ();\r
1918\r
529a5a86
MK
1919 //\r
1920 // Initialize physical address mask\r
1921 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1922 //\r
29e300ff
RN
1923 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);\r
1924 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
1925 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
1926 } else {\r
1927 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
1928 }\r
1929 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;\r
1930 //\r
1931 // Clear the low 12 bits\r
1932 //\r
1933 gPhyMask &= 0xfffffffffffff000ULL;\r
529a5a86
MK
1934\r
1935 //\r
1936 // Create page tables\r
1937 //\r
1938 Cr3 = SmmInitPageTable ();\r
1939\r
fe5f1949 1940 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1941\r
1942 //\r
f12367a0 1943 // Install SMI handler for each CPU\r
529a5a86
MK
1944 //\r
1945 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1946 InstallSmiHandler (\r
1947 Index,\r
1948 (UINT32)mCpuHotPlugData.SmBase[Index],\r
3eb69b08 1949 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
529a5a86 1950 StackSize,\r
f12367a0
MK
1951 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1952 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1953 gcSmiIdtr.Base,\r
1954 gcSmiIdtr.Limit + 1,\r
1955 Cr3\r
1956 );\r
1957 }\r
1958\r
529a5a86
MK
1959 //\r
1960 // Record current MTRR settings\r
1961 //\r
26ab5ac3
MK
1962 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1963 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
1964\r
1965 return Cr3;\r
1966}\r
1967\r
1968/**\r
1969\r
1970 Register the SMM Foundation entry point.\r
1971\r
1972 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1973 @param SmmEntryPoint SMM Foundation EntryPoint\r
1974\r
1975 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1976\r
1977**/\r
1978EFI_STATUS\r
1979EFIAPI\r
1980RegisterSmmEntry (\r
1981 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1982 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1983 )\r
1984{\r
1985 //\r
1986 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1987 //\r
1988 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1989 return EFI_SUCCESS;\r
1990}\r
51dd408a
ED
1991\r
1992/**\r
1993\r
1994 Register the SMM Foundation entry point.\r
1995\r
1996 @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
1997 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
1998 with the related definitions of\r
1999 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
2000 If caller may pass a value of NULL to deregister any existing\r
2001 startup procedure.\r
073f2ced 2002 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
51dd408a
ED
2003 run by the AP. It is an optional common mailbox between APs and\r
2004 the caller to share information\r
2005\r
2006 @retval EFI_SUCCESS The Procedure has been set successfully.\r
2007 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
2008\r
2009**/\r
2010EFI_STATUS\r
2011RegisterStartupProcedure (\r
073f2ced
SZ
2012 IN EFI_AP_PROCEDURE Procedure,\r
2013 IN OUT VOID *ProcedureArguments OPTIONAL\r
51dd408a
ED
2014 )\r
2015{\r
2016 if (Procedure == NULL && ProcedureArguments != NULL) {\r
2017 return EFI_INVALID_PARAMETER;\r
2018 }\r
2019 if (mSmmMpSyncData == NULL) {\r
2020 return EFI_NOT_READY;\r
2021 }\r
2022\r
2023 mSmmMpSyncData->StartupProcedure = Procedure;\r
2024 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
2025\r
2026 return EFI_SUCCESS;\r
2027}\r