]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
MdeModulePkg/VariableStandaloneMm: Set PcdFlashNvStorageVariableBase to Pcd
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
70911f1f 4Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
26ab5ac3 16MTRR_SETTINGS gSmiMtrrs;\r
529a5a86
MK
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
1d648531
JF
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
fe3a75bc 22SPIN_LOCK *mPFLock = NULL;\r
b43dd229 23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
ba40cb31 24BOOLEAN mMachineCheckSupported = FALSE;\r
529a5a86
MK
25\r
26/**\r
27 Performs an atomic compare exchange operation to get semaphore.\r
28 The compare exchange operation must be performed using\r
29 MP safe mechanisms.\r
30\r
31 @param Sem IN: 32-bit unsigned integer\r
32 OUT: original integer - 1\r
33 @return Original integer - 1\r
34\r
35**/\r
36UINT32\r
37WaitForSemaphore (\r
38 IN OUT volatile UINT32 *Sem\r
39 )\r
40{\r
41 UINT32 Value;\r
42\r
9001b750 43 for (;;) {\r
529a5a86 44 Value = *Sem;\r
9001b750
LE
45 if (Value != 0 &&\r
46 InterlockedCompareExchange32 (\r
47 (UINT32*)Sem,\r
48 Value,\r
49 Value - 1\r
50 ) == Value) {\r
51 break;\r
52 }\r
53 CpuPause ();\r
54 }\r
529a5a86
MK
55 return Value - 1;\r
56}\r
57\r
58\r
59/**\r
60 Performs an atomic compare exchange operation to release semaphore.\r
61 The compare exchange operation must be performed using\r
62 MP safe mechanisms.\r
63\r
64 @param Sem IN: 32-bit unsigned integer\r
65 OUT: original integer + 1\r
66 @return Original integer + 1\r
67\r
68**/\r
69UINT32\r
70ReleaseSemaphore (\r
71 IN OUT volatile UINT32 *Sem\r
72 )\r
73{\r
74 UINT32 Value;\r
75\r
76 do {\r
77 Value = *Sem;\r
78 } while (Value + 1 != 0 &&\r
79 InterlockedCompareExchange32 (\r
80 (UINT32*)Sem,\r
81 Value,\r
82 Value + 1\r
83 ) != Value);\r
84 return Value + 1;\r
85}\r
86\r
87/**\r
88 Performs an atomic compare exchange operation to lock semaphore.\r
89 The compare exchange operation must be performed using\r
90 MP safe mechanisms.\r
91\r
92 @param Sem IN: 32-bit unsigned integer\r
93 OUT: -1\r
94 @return Original integer\r
95\r
96**/\r
97UINT32\r
98LockdownSemaphore (\r
99 IN OUT volatile UINT32 *Sem\r
100 )\r
101{\r
102 UINT32 Value;\r
103\r
104 do {\r
105 Value = *Sem;\r
106 } while (InterlockedCompareExchange32 (\r
107 (UINT32*)Sem,\r
108 Value, (UINT32)-1\r
109 ) != Value);\r
110 return Value;\r
111}\r
112\r
113/**\r
114 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
115\r
116 @param NumberOfAPs AP number\r
117\r
118**/\r
119VOID\r
120WaitForAllAPs (\r
121 IN UINTN NumberOfAPs\r
122 )\r
123{\r
124 UINTN BspIndex;\r
125\r
126 BspIndex = mSmmMpSyncData->BspIndex;\r
127 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 128 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
129 }\r
130}\r
131\r
132/**\r
133 Performs an atomic compare exchange operation to release semaphore\r
134 for each AP.\r
135\r
136**/\r
137VOID\r
138ReleaseAllAPs (\r
139 VOID\r
140 )\r
141{\r
142 UINTN Index;\r
529a5a86 143\r
70911f1f 144 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a 145 if (IsPresentAp (Index)) {\r
ed3d5ecb 146 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
147 }\r
148 }\r
149}\r
150\r
151/**\r
152 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
153\r
154 @param Exceptions CPU Arrival exception flags.\r
155\r
156 @retval TRUE if all CPUs the have checked in.\r
157 @retval FALSE if at least one Normal AP hasn't checked in.\r
158\r
159**/\r
160BOOLEAN\r
161AllCpusInSmmWithExceptions (\r
162 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
163 )\r
164{\r
165 UINTN Index;\r
166 SMM_CPU_DATA_BLOCK *CpuData;\r
167 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
168\r
fe3a75bc 169 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 170\r
fe3a75bc 171 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
172 return TRUE;\r
173 }\r
174\r
175 CpuData = mSmmMpSyncData->CpuData;\r
176 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
70911f1f 177 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 178 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
179 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
180 continue;\r
181 }\r
182 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
183 continue;\r
184 }\r
185 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
186 continue;\r
187 }\r
188 return FALSE;\r
189 }\r
190 }\r
191\r
192\r
193 return TRUE;\r
194}\r
195\r
12c66382
ED
196/**\r
197 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
7367cc6c 198\r
12c66382
ED
199 @retval TRUE Os enable lmce.\r
200 @retval FALSE Os not enable lmce.\r
201\r
202**/\r
203BOOLEAN\r
204IsLmceOsEnabled (\r
205 VOID\r
206 )\r
207{\r
208 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
209 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
210 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
211\r
212 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
213 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
214 return FALSE;\r
215 }\r
216\r
217 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
218 if (FeatureCtrl.Bits.LmceOn == 0) {\r
219 return FALSE;\r
220 }\r
221\r
222 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
223 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
224}\r
225\r
226/**\r
7367cc6c 227 Return if Local machine check exception signaled.\r
12c66382 228\r
7367cc6c 229 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
12c66382
ED
230 delivered to only the logical processor.\r
231\r
232 @retval TRUE LMCE was signaled.\r
233 @retval FALSE LMCE was not signaled.\r
234\r
235**/\r
236BOOLEAN\r
237IsLmceSignaled (\r
238 VOID\r
239 )\r
240{\r
241 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
242\r
243 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
244 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
245}\r
529a5a86
MK
246\r
247/**\r
248 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
249 entering SMM, except SMI disabled APs.\r
250\r
251**/\r
252VOID\r
253SmmWaitForApArrival (\r
254 VOID\r
255 )\r
256{\r
257 UINT64 Timer;\r
258 UINTN Index;\r
12c66382
ED
259 BOOLEAN LmceEn;\r
260 BOOLEAN LmceSignal;\r
529a5a86 261\r
fe3a75bc 262 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 263\r
ba40cb31
MK
264 LmceEn = FALSE;\r
265 LmceSignal = FALSE;\r
266 if (mMachineCheckSupported) {\r
267 LmceEn = IsLmceOsEnabled ();\r
268 LmceSignal = IsLmceSignaled();\r
269 }\r
12c66382 270\r
529a5a86
MK
271 //\r
272 // Platform implementor should choose a timeout value appropriately:\r
273 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
274 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
275 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
276 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
277 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
278 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
279 // - The timeout value must be longer than longest possible IO operation in the system\r
280 //\r
281\r
282 //\r
283 // Sync with APs 1st timeout\r
284 //\r
285 for (Timer = StartSyncTimer ();\r
12c66382 286 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
529a5a86
MK
287 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
288 ) {\r
289 CpuPause ();\r
290 }\r
291\r
292 //\r
293 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
294 // because:\r
295 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
296 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
297 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
298 // work while SMI handling is on-going.\r
299 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
300 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
301 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
302 // mode work while SMI handling is on-going.\r
303 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
304 // - In traditional flow, SMI disabling is discouraged.\r
305 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
306 // In both cases, adding SMI-disabling checking code increases overhead.\r
307 //\r
fe3a75bc 308 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
309 //\r
310 // Send SMI IPIs to bring outside processors in\r
311 //\r
70911f1f 312 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 313 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
314 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
315 }\r
316 }\r
317\r
318 //\r
319 // Sync with APs 2nd timeout.\r
320 //\r
321 for (Timer = StartSyncTimer ();\r
322 !IsSyncTimerTimeout (Timer) &&\r
323 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
324 ) {\r
325 CpuPause ();\r
326 }\r
327 }\r
328\r
329 return;\r
330}\r
331\r
332\r
333/**\r
334 Replace OS MTRR's with SMI MTRR's.\r
335\r
336 @param CpuIndex Processor Index\r
337\r
338**/\r
339VOID\r
340ReplaceOSMtrrs (\r
341 IN UINTN CpuIndex\r
342 )\r
343{\r
529a5a86
MK
344 SmmCpuFeaturesDisableSmrr ();\r
345\r
346 //\r
347 // Replace all MTRRs registers\r
348 //\r
26ab5ac3 349 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
350}\r
351\r
51dd408a
ED
352/**\r
353 Wheck whether task has been finished by all APs.\r
354\r
355 @param BlockMode Whether did it in block mode or non-block mode.\r
356\r
357 @retval TRUE Task has been finished by all APs.\r
358 @retval FALSE Task not has been finished by all APs.\r
359\r
360**/\r
361BOOLEAN\r
362WaitForAllAPsNotBusy (\r
363 IN BOOLEAN BlockMode\r
364 )\r
365{\r
366 UINTN Index;\r
367\r
70911f1f 368 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
369 //\r
370 // Ignore BSP and APs which not call in SMM.\r
371 //\r
372 if (!IsPresentAp(Index)) {\r
373 continue;\r
374 }\r
375\r
376 if (BlockMode) {\r
377 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
378 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
379 } else {\r
380 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
381 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
382 } else {\r
383 return FALSE;\r
384 }\r
385 }\r
386 }\r
387\r
388 return TRUE;\r
389}\r
390\r
391/**\r
392 Check whether it is an present AP.\r
393\r
394 @param CpuIndex The AP index which calls this function.\r
395\r
396 @retval TRUE It's a present AP.\r
397 @retval TRUE This is not an AP or it is not present.\r
398\r
399**/\r
400BOOLEAN\r
401IsPresentAp (\r
402 IN UINTN CpuIndex\r
403 )\r
404{\r
405 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
406 *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
407}\r
408\r
51dd408a
ED
409/**\r
410 Clean up the status flags used during executing the procedure.\r
411\r
412 @param CpuIndex The AP index which calls this function.\r
413\r
414**/\r
415VOID\r
416ReleaseToken (\r
417 IN UINTN CpuIndex\r
418 )\r
419{\r
a457823f 420 PROCEDURE_TOKEN *Token;\r
51dd408a 421\r
a457823f
ED
422 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;\r
423\r
424 if (InterlockedDecrement (&Token->RunningApCount) == 0) {\r
425 ReleaseSpinLock (Token->SpinLock);\r
51dd408a 426 }\r
a457823f
ED
427\r
428 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
51dd408a
ED
429}\r
430\r
431/**\r
432 Free the tokens in the maintained list.\r
433\r
434**/\r
435VOID\r
b948a496 436ResetTokens (\r
51dd408a
ED
437 VOID\r
438 )\r
439{\r
3fdc47c6
RN
440 //\r
441 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.\r
442 //\r
443 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
51dd408a
ED
444}\r
445\r
529a5a86
MK
446/**\r
447 SMI handler for BSP.\r
448\r
449 @param CpuIndex BSP processor Index\r
450 @param SyncMode SMM MP sync mode\r
451\r
452**/\r
453VOID\r
454BSPHandler (\r
455 IN UINTN CpuIndex,\r
456 IN SMM_CPU_SYNC_MODE SyncMode\r
457 )\r
458{\r
459 UINTN Index;\r
460 MTRR_SETTINGS Mtrrs;\r
461 UINTN ApCount;\r
462 BOOLEAN ClearTopLevelSmiResult;\r
463 UINTN PresentCount;\r
464\r
465 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
466 ApCount = 0;\r
467\r
468 //\r
469 // Flag BSP's presence\r
470 //\r
fe3a75bc 471 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
472\r
473 //\r
474 // Initialize Debug Agent to start source level debug in BSP handler\r
475 //\r
476 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
477\r
478 //\r
479 // Mark this processor's presence\r
480 //\r
ed3d5ecb 481 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
482\r
483 //\r
484 // Clear platform top level SMI status bit before calling SMI handlers. If\r
485 // we cleared it after SMI handlers are run, we would miss the SMI that\r
486 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
487 //\r
488 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
489 ASSERT (ClearTopLevelSmiResult == TRUE);\r
490\r
491 //\r
492 // Set running processor index\r
493 //\r
494 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
495\r
496 //\r
497 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
498 //\r
499 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
500\r
501 //\r
502 // Wait for APs to arrive\r
503 //\r
504 SmmWaitForApArrival();\r
505\r
506 //\r
507 // Lock the counter down and retrieve the number of APs\r
508 //\r
fe3a75bc
JF
509 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
510 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
511\r
512 //\r
513 // Wait for all APs to get ready for programming MTRRs\r
514 //\r
515 WaitForAllAPs (ApCount);\r
516\r
517 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
518 //\r
519 // Signal all APs it's time for backup MTRRs\r
520 //\r
521 ReleaseAllAPs ();\r
522\r
523 //\r
524 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
525 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
526 // to a large enough value to avoid this situation.\r
527 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
528 // We do the backup first and then set MTRR to avoid race condition for threads\r
529 // in the same core.\r
530 //\r
531 MtrrGetAllMtrrs(&Mtrrs);\r
532\r
533 //\r
534 // Wait for all APs to complete their MTRR saving\r
535 //\r
536 WaitForAllAPs (ApCount);\r
537\r
538 //\r
539 // Let all processors program SMM MTRRs together\r
540 //\r
541 ReleaseAllAPs ();\r
542\r
543 //\r
544 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
545 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
546 // to a large enough value to avoid this situation.\r
547 //\r
548 ReplaceOSMtrrs (CpuIndex);\r
549\r
550 //\r
551 // Wait for all APs to complete their MTRR programming\r
552 //\r
553 WaitForAllAPs (ApCount);\r
554 }\r
555 }\r
556\r
557 //\r
558 // The BUSY lock is initialized to Acquired state\r
559 //\r
170a3c1e 560 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
561\r
562 //\r
9f419739 563 // Perform the pre tasks\r
529a5a86 564 //\r
9f419739 565 PerformPreTasks ();\r
529a5a86
MK
566\r
567 //\r
568 // Invoke SMM Foundation EntryPoint with the processor information context.\r
569 //\r
570 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
571\r
572 //\r
573 // Make sure all APs have completed their pending none-block tasks\r
574 //\r
51dd408a 575 WaitForAllAPsNotBusy (TRUE);\r
529a5a86
MK
576\r
577 //\r
578 // Perform the remaining tasks\r
579 //\r
580 PerformRemainingTasks ();\r
581\r
582 //\r
583 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
584 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
585 // will run through freely.\r
586 //\r
587 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
588\r
589 //\r
590 // Lock the counter down and retrieve the number of APs\r
591 //\r
fe3a75bc
JF
592 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
593 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
594 //\r
595 // Make sure all APs have their Present flag set\r
596 //\r
597 while (TRUE) {\r
598 PresentCount = 0;\r
70911f1f 599 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 600 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
601 PresentCount ++;\r
602 }\r
603 }\r
604 if (PresentCount > ApCount) {\r
605 break;\r
606 }\r
607 }\r
608 }\r
609\r
610 //\r
611 // Notify all APs to exit\r
612 //\r
fe3a75bc 613 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
614 ReleaseAllAPs ();\r
615\r
616 //\r
617 // Wait for all APs to complete their pending tasks\r
618 //\r
619 WaitForAllAPs (ApCount);\r
620\r
621 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
622 //\r
623 // Signal APs to restore MTRRs\r
624 //\r
625 ReleaseAllAPs ();\r
626\r
627 //\r
628 // Restore OS MTRRs\r
629 //\r
630 SmmCpuFeaturesReenableSmrr ();\r
631 MtrrSetAllMtrrs(&Mtrrs);\r
632\r
633 //\r
634 // Wait for all APs to complete MTRR programming\r
635 //\r
636 WaitForAllAPs (ApCount);\r
637 }\r
638\r
639 //\r
640 // Stop source level debug in BSP handler, the code below will not be\r
641 // debugged.\r
642 //\r
643 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
644\r
645 //\r
646 // Signal APs to Reset states/semaphore for this processor\r
647 //\r
648 ReleaseAllAPs ();\r
649\r
650 //\r
651 // Perform pending operations for hot-plug\r
652 //\r
653 SmmCpuUpdate ();\r
654\r
655 //\r
656 // Clear the Present flag of BSP\r
657 //\r
ed3d5ecb 658 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
659\r
660 //\r
661 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
662 // WaitForAllAps does not depend on the Present flag.\r
663 //\r
664 WaitForAllAPs (ApCount);\r
665\r
51dd408a 666 //\r
b948a496 667 // Reset the tokens buffer.\r
51dd408a 668 //\r
b948a496 669 ResetTokens ();\r
51dd408a 670\r
529a5a86
MK
671 //\r
672 // Reset BspIndex to -1, meaning BSP has not been elected.\r
673 //\r
674 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
675 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
676 }\r
677\r
678 //\r
679 // Allow APs to check in from this point on\r
680 //\r
fe3a75bc
JF
681 *mSmmMpSyncData->Counter = 0;\r
682 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
683}\r
684\r
685/**\r
686 SMI handler for AP.\r
687\r
688 @param CpuIndex AP processor Index.\r
689 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
690 @param SyncMode SMM MP sync mode.\r
691\r
692**/\r
693VOID\r
694APHandler (\r
695 IN UINTN CpuIndex,\r
696 IN BOOLEAN ValidSmi,\r
697 IN SMM_CPU_SYNC_MODE SyncMode\r
698 )\r
699{\r
700 UINT64 Timer;\r
701 UINTN BspIndex;\r
702 MTRR_SETTINGS Mtrrs;\r
51dd408a 703 EFI_STATUS ProcedureStatus;\r
529a5a86
MK
704\r
705 //\r
706 // Timeout BSP\r
707 //\r
708 for (Timer = StartSyncTimer ();\r
709 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 710 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
711 ) {\r
712 CpuPause ();\r
713 }\r
714\r
fe3a75bc 715 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
716 //\r
717 // BSP timeout in the first round\r
718 //\r
719 if (mSmmMpSyncData->BspIndex != -1) {\r
720 //\r
721 // BSP Index is known\r
722 //\r
723 BspIndex = mSmmMpSyncData->BspIndex;\r
724 ASSERT (CpuIndex != BspIndex);\r
725\r
726 //\r
727 // Send SMI IPI to bring BSP in\r
728 //\r
729 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
730\r
731 //\r
732 // Now clock BSP for the 2nd time\r
733 //\r
734 for (Timer = StartSyncTimer ();\r
735 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 736 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
737 ) {\r
738 CpuPause ();\r
739 }\r
740\r
fe3a75bc 741 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
742 //\r
743 // Give up since BSP is unable to enter SMM\r
744 // and signal the completion of this AP\r
fe3a75bc 745 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
746 return;\r
747 }\r
748 } else {\r
749 //\r
750 // Don't know BSP index. Give up without sending IPI to BSP.\r
751 //\r
fe3a75bc 752 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
753 return;\r
754 }\r
755 }\r
756\r
757 //\r
758 // BSP is available\r
759 //\r
760 BspIndex = mSmmMpSyncData->BspIndex;\r
761 ASSERT (CpuIndex != BspIndex);\r
762\r
763 //\r
764 // Mark this processor's presence\r
765 //\r
ed3d5ecb 766 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
767\r
768 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
769 //\r
770 // Notify BSP of arrival at this point\r
771 //\r
ed3d5ecb 772 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
773 }\r
774\r
775 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
776 //\r
777 // Wait for the signal from BSP to backup MTRRs\r
778 //\r
ed3d5ecb 779 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
780\r
781 //\r
782 // Backup OS MTRRs\r
783 //\r
784 MtrrGetAllMtrrs(&Mtrrs);\r
785\r
786 //\r
787 // Signal BSP the completion of this AP\r
788 //\r
ed3d5ecb 789 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
790\r
791 //\r
792 // Wait for BSP's signal to program MTRRs\r
793 //\r
ed3d5ecb 794 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
795\r
796 //\r
797 // Replace OS MTRRs with SMI MTRRs\r
798 //\r
799 ReplaceOSMtrrs (CpuIndex);\r
800\r
801 //\r
802 // Signal BSP the completion of this AP\r
803 //\r
ed3d5ecb 804 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
805 }\r
806\r
807 while (TRUE) {\r
808 //\r
809 // Wait for something to happen\r
810 //\r
ed3d5ecb 811 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
812\r
813 //\r
814 // Check if BSP wants to exit SMM\r
815 //\r
fe3a75bc 816 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
817 break;\r
818 }\r
819\r
820 //\r
821 // BUSY should be acquired by SmmStartupThisAp()\r
822 //\r
823 ASSERT (\r
ed3d5ecb 824 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
825 );\r
826\r
827 //\r
828 // Invoke the scheduled procedure\r
829 //\r
51dd408a
ED
830 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
831 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
832 );\r
833 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
834 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
835 }\r
529a5a86 836\r
a457823f
ED
837 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
838 ReleaseToken (CpuIndex);\r
839 }\r
840\r
529a5a86
MK
841 //\r
842 // Release BUSY\r
843 //\r
ed3d5ecb 844 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
845 }\r
846\r
847 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
848 //\r
849 // Notify BSP the readiness of this AP to program MTRRs\r
850 //\r
ed3d5ecb 851 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
852\r
853 //\r
854 // Wait for the signal from BSP to program MTRRs\r
855 //\r
ed3d5ecb 856 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
857\r
858 //\r
859 // Restore OS MTRRs\r
860 //\r
861 SmmCpuFeaturesReenableSmrr ();\r
862 MtrrSetAllMtrrs(&Mtrrs);\r
863 }\r
864\r
865 //\r
866 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
867 //\r
ed3d5ecb 868 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
869\r
870 //\r
871 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
872 //\r
ed3d5ecb 873 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
874\r
875 //\r
876 // Reset states/semaphore for this processor\r
877 //\r
ed3d5ecb 878 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
879\r
880 //\r
881 // Notify BSP the readiness of this AP to exit SMM\r
882 //\r
ed3d5ecb 883 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
884\r
885}\r
886\r
887/**\r
888 Create 4G PageTable in SMRAM.\r
889\r
717fb604 890 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
891 @return PageTable Address\r
892\r
893**/\r
894UINT32\r
895Gen4GPageTable (\r
881520ea 896 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
897 )\r
898{\r
899 VOID *PageTable;\r
900 UINTN Index;\r
901 UINT64 *Pte;\r
902 UINTN PagesNeeded;\r
903 UINTN Low2MBoundary;\r
904 UINTN High2MBoundary;\r
905 UINTN Pages;\r
906 UINTN GuardPage;\r
907 UINT64 *Pdpte;\r
908 UINTN PageIndex;\r
909 UINTN PageAddress;\r
910\r
911 Low2MBoundary = 0;\r
912 High2MBoundary = 0;\r
913 PagesNeeded = 0;\r
914 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
915 //\r
916 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
917 //\r
918 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
919 //\r
920 // Add two more pages for known good stack and stack guard page,\r
921 // then find the lower 2MB aligned address.\r
922 //\r
923 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
924 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
925 }\r
926 //\r
927 // Allocate the page table\r
928 //\r
717fb604 929 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
930 ASSERT (PageTable != NULL);\r
931\r
717fb604 932 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
933 Pte = (UINT64*)PageTable;\r
934\r
935 //\r
936 // Zero out all page table entries first\r
937 //\r
938 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
939\r
940 //\r
941 // Set Page Directory Pointers\r
942 //\r
943 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 944 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
241f9149 945 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
946 }\r
947 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
948\r
949 //\r
950 // Fill in Page Directory Entries\r
951 //\r
952 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 953 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
954 }\r
955\r
f8c1133b 956 Pdpte = (UINT64*)PageTable;\r
529a5a86
MK
957 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
958 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
959 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
529a5a86 960 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
241f9149
LD
961 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
962 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
963 //\r
964 // Fill in Page Table Entries\r
965 //\r
966 Pte = (UINT64*)Pages;\r
967 PageAddress = PageIndex;\r
968 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
969 if (PageAddress == GuardPage) {\r
970 //\r
971 // Mark the guard page as non-present\r
972 //\r
241f9149 973 Pte[Index] = PageAddress | mAddressEncMask;\r
529a5a86
MK
974 GuardPage += mSmmStackSize;\r
975 if (GuardPage > mSmmStackArrayEnd) {\r
976 GuardPage = 0;\r
977 }\r
978 } else {\r
241f9149 979 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
980 }\r
981 PageAddress+= EFI_PAGE_SIZE;\r
982 }\r
983 Pages += EFI_PAGE_SIZE;\r
984 }\r
985 }\r
986\r
f8c1133b
JW
987 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
988 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
989 if ((Pte[0] & IA32_PG_PS) == 0) {\r
990 // 4K-page entries are already mapped. Just hide the first one anyway.\r
991 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
79da2d28 992 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
f8c1133b
JW
993 } else {\r
994 // Create 4K-page entries\r
995 Pages = (UINTN)AllocatePageTableMemory (1);\r
996 ASSERT (Pages != 0);\r
997\r
998 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
999\r
1000 Pte = (UINT64*)Pages;\r
1001 PageAddress = 0;\r
1002 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
1003 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1004 PageAddress += EFI_PAGE_SIZE;\r
1005 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1006 }\r
1007 }\r
1008 }\r
1009\r
529a5a86
MK
1010 return (UINT32)(UINTN)PageTable;\r
1011}\r
1012\r
51dd408a
ED
1013/**\r
1014 Checks whether the input token is the current used token.\r
1015\r
1016 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1017 BroadcastProcedure.\r
1018\r
1019 @retval TRUE The input token is the current used token.\r
1020 @retval FALSE The input token is not the current used token.\r
1021**/\r
1022BOOLEAN\r
1023IsTokenInUse (\r
1024 IN SPIN_LOCK *Token\r
1025 )\r
1026{\r
1027 LIST_ENTRY *Link;\r
1028 PROCEDURE_TOKEN *ProcToken;\r
1029\r
1030 if (Token == NULL) {\r
1031 return FALSE;\r
1032 }\r
1033\r
1034 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
d84f090f
ED
1035 //\r
1036 // Only search used tokens.\r
1037 //\r
1038 while (Link != gSmmCpuPrivate->FirstFreeToken) {\r
51dd408a
ED
1039 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1040\r
d84f090f 1041 if (ProcToken->SpinLock == Token) {\r
51dd408a
ED
1042 return TRUE;\r
1043 }\r
1044\r
1045 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
1046 }\r
1047\r
1048 return FALSE;\r
1049}\r
1050\r
1051/**\r
b948a496 1052 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.\r
51dd408a 1053\r
3fdc47c6 1054 @return First token of the token buffer.\r
51dd408a 1055**/\r
3fdc47c6 1056LIST_ENTRY *\r
b948a496
ED
1057AllocateTokenBuffer (\r
1058 VOID\r
51dd408a
ED
1059 )\r
1060{\r
51dd408a 1061 UINTN SpinLockSize;\r
9caaa79d 1062 UINT32 TokenCountPerChunk;\r
b948a496 1063 UINTN Index;\r
b948a496
ED
1064 SPIN_LOCK *SpinLock;\r
1065 UINT8 *SpinLockBuffer;\r
3fdc47c6 1066 PROCEDURE_TOKEN *ProcTokens;\r
51dd408a
ED
1067\r
1068 SpinLockSize = GetSpinLockProperties ();\r
b948a496 1069\r
9caaa79d 1070 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
b948a496
ED
1071 ASSERT (TokenCountPerChunk != 0);\r
1072 if (TokenCountPerChunk == 0) {\r
1073 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
1074 CpuDeadLoop ();\r
1075 }\r
1076 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
9caaa79d 1077\r
b948a496
ED
1078 //\r
1079 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.\r
1080 //\r
1081 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
1082 ASSERT (SpinLockBuffer != NULL);\r
9caaa79d 1083\r
3fdc47c6
RN
1084 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);\r
1085 ASSERT (ProcTokens != NULL);\r
b948a496
ED
1086\r
1087 for (Index = 0; Index < TokenCountPerChunk; Index++) {\r
1088 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);\r
1089 InitializeSpinLock (SpinLock);\r
1090\r
3fdc47c6
RN
1091 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;\r
1092 ProcTokens[Index].SpinLock = SpinLock;\r
3fdc47c6 1093 ProcTokens[Index].RunningApCount = 0;\r
b948a496 1094\r
3fdc47c6 1095 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);\r
b948a496 1096 }\r
9caaa79d 1097\r
3fdc47c6 1098 return &ProcTokens[0].Link;\r
b948a496
ED
1099}\r
1100\r
1101/**\r
1102 Get the free token.\r
1103\r
1104 If no free token, allocate new tokens then return the free one.\r
1105\r
e1879256
ED
1106 @param RunningApsCount The Running Aps count for this token.\r
1107\r
b948a496 1108 @retval return the first free PROCEDURE_TOKEN.\r
9caaa79d 1109\r
b948a496
ED
1110**/\r
1111PROCEDURE_TOKEN *\r
1112GetFreeToken (\r
1113 IN UINT32 RunningApsCount\r
1114 )\r
1115{\r
1116 PROCEDURE_TOKEN *NewToken;\r
51dd408a 1117\r
3fdc47c6
RN
1118 //\r
1119 // If FirstFreeToken meets the end of token list, enlarge the token list.\r
1120 // Set FirstFreeToken to the first free token.\r
1121 //\r
1122 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {\r
1123 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
b948a496 1124 }\r
3fdc47c6
RN
1125 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);\r
1126 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);\r
51dd408a 1127\r
b948a496
ED
1128 NewToken->RunningApCount = RunningApsCount;\r
1129 AcquireSpinLock (NewToken->SpinLock);\r
51dd408a 1130\r
b948a496 1131 return NewToken;\r
51dd408a
ED
1132}\r
1133\r
1134/**\r
1135 Checks status of specified AP.\r
1136\r
1137 This function checks whether the specified AP has finished the task assigned\r
1138 by StartupThisAP(), and whether timeout expires.\r
1139\r
1140 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1141 BroadcastProcedure.\r
1142\r
1143 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
1144 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
1145**/\r
1146EFI_STATUS\r
1147IsApReady (\r
1148 IN SPIN_LOCK *Token\r
1149 )\r
1150{\r
1151 if (AcquireSpinLockOrFail (Token)) {\r
1152 ReleaseSpinLock (Token);\r
1153 return EFI_SUCCESS;\r
1154 }\r
1155\r
1156 return EFI_NOT_READY;\r
1157}\r
1158\r
529a5a86
MK
1159/**\r
1160 Schedule a procedure to run on the specified CPU.\r
1161\r
717fb604
JY
1162 @param[in] Procedure The address of the procedure to run\r
1163 @param[in] CpuIndex Target CPU Index\r
51dd408a
ED
1164 @param[in,out] ProcArguments The parameter to pass to the procedure\r
1165 @param[in] Token This is an optional parameter that allows the caller to execute the\r
1166 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1167 call is blocking, and the call will not return until the AP has\r
1168 completed the procedure. If the token is not NULL, the call will\r
1169 return immediately. The caller can check whether the procedure has\r
1170 completed with CheckOnProcedure or WaitForProcedure.\r
1171 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
1172 execution of Procedure, either for blocking or non-blocking mode.\r
1173 Zero means infinity. If the timeout expires before all APs return\r
1174 from Procedure, then Procedure on the failed APs is terminated. If\r
1175 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
1176 If the timeout expires in non-blocking mode, the timeout determined\r
1177 can be through CheckOnProcedure or WaitForProcedure.\r
1178 Note that timeout support is optional. Whether an implementation\r
1179 supports this feature can be determined via the Attributes data\r
1180 member.\r
1181 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
1182 by Procedure when it completes execution on the target AP, or with\r
1183 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1184 timeout. The implementation will update this variable with\r
1185 EFI_NOT_READY prior to starting Procedure on the target AP.\r
529a5a86
MK
1186\r
1187 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1188 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1189 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1190 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1191 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1192\r
1193**/\r
1194EFI_STATUS\r
717fb604 1195InternalSmmStartupThisAp (\r
51dd408a
ED
1196 IN EFI_AP_PROCEDURE2 Procedure,\r
1197 IN UINTN CpuIndex,\r
1198 IN OUT VOID *ProcArguments OPTIONAL,\r
1199 IN MM_COMPLETION *Token,\r
1200 IN UINTN TimeoutInMicroseconds,\r
1201 IN OUT EFI_STATUS *CpuStatus\r
529a5a86
MK
1202 )\r
1203{\r
a457823f
ED
1204 PROCEDURE_TOKEN *ProcToken;\r
1205\r
717fb604
JY
1206 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
1207 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
1208 return EFI_INVALID_PARAMETER;\r
1209 }\r
1210 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1211 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
1212 return EFI_INVALID_PARAMETER;\r
1213 }\r
b7025df8
JF
1214 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
1215 return EFI_INVALID_PARAMETER;\r
1216 }\r
717fb604
JY
1217 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
1218 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
1219 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
1220 }\r
1221 return EFI_INVALID_PARAMETER;\r
1222 }\r
1223 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
1224 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1225 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
1226 }\r
1227 return EFI_INVALID_PARAMETER;\r
1228 }\r
51dd408a
ED
1229 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1230 return EFI_INVALID_PARAMETER;\r
1231 }\r
1232 if (Procedure == NULL) {\r
1233 return EFI_INVALID_PARAMETER;\r
1234 }\r
717fb604 1235\r
832c4c7a 1236 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
51dd408a 1237\r
529a5a86
MK
1238 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
1239 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
51dd408a 1240 if (Token != NULL) {\r
b948a496 1241 ProcToken= GetFreeToken (1);\r
a457823f
ED
1242 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
1243 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
51dd408a
ED
1244 }\r
1245 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
1246 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
1247 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
1248 }\r
1249\r
ed3d5ecb 1250 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 1251\r
51dd408a 1252 if (Token == NULL) {\r
ed3d5ecb
JF
1253 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1254 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86 1255 }\r
51dd408a
ED
1256\r
1257 return EFI_SUCCESS;\r
1258}\r
1259\r
1260/**\r
1261 Worker function to execute a caller provided function on all enabled APs.\r
1262\r
1263 @param[in] Procedure A pointer to the function to be run on\r
1264 enabled APs of the system.\r
1265 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
1266 APs to return from Procedure, either for\r
1267 blocking or non-blocking mode.\r
1268 @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
1269 all APs.\r
1270 @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
1271 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1272 call is blocking, and the call will not return until the AP has\r
1273 completed the procedure. If the token is not NULL, the call will\r
1274 return immediately. The caller can check whether the procedure has\r
1275 completed with CheckOnProcedure or WaitForProcedure.\r
1276 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
1277 by Procedure when it completes execution on the target AP, or with\r
1278 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1279 timeout. The implementation will update this variable with\r
1280 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1281\r
1282\r
1283 @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
1284 the timeout expired.\r
1285 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
1286 to all enabled APs.\r
1287 @retval others Failed to Startup all APs.\r
1288\r
1289**/\r
1290EFI_STATUS\r
1291InternalSmmStartupAllAPs (\r
1292 IN EFI_AP_PROCEDURE2 Procedure,\r
1293 IN UINTN TimeoutInMicroseconds,\r
1294 IN OUT VOID *ProcedureArguments OPTIONAL,\r
1295 IN OUT MM_COMPLETION *Token,\r
1296 IN OUT EFI_STATUS *CPUStatus\r
1297 )\r
1298{\r
1299 UINTN Index;\r
1300 UINTN CpuCount;\r
a457823f 1301 PROCEDURE_TOKEN *ProcToken;\r
51dd408a
ED
1302\r
1303 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1304 return EFI_INVALID_PARAMETER;\r
1305 }\r
1306 if (Procedure == NULL) {\r
1307 return EFI_INVALID_PARAMETER;\r
1308 }\r
1309\r
1310 CpuCount = 0;\r
70911f1f 1311 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1312 if (IsPresentAp (Index)) {\r
1313 CpuCount ++;\r
1314\r
1315 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
1316 return EFI_INVALID_PARAMETER;\r
1317 }\r
1318\r
1319 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {\r
1320 return EFI_NOT_READY;\r
1321 }\r
1322 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1323 }\r
1324 }\r
1325 if (CpuCount == 0) {\r
1326 return EFI_NOT_STARTED;\r
1327 }\r
1328\r
1329 if (Token != NULL) {\r
b948a496 1330 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);\r
a457823f
ED
1331 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
1332 } else {\r
1333 ProcToken = NULL;\r
51dd408a
ED
1334 }\r
1335\r
1336 //\r
1337 // Make sure all BUSY should be acquired.\r
1338 //\r
1339 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
1340 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
1341 // block mode.\r
1342 //\r
70911f1f 1343 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1344 if (IsPresentAp (Index)) {\r
1345 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1346 }\r
1347 }\r
1348\r
70911f1f 1349 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1350 if (IsPresentAp (Index)) {\r
1351 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;\r
1352 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
a457823f
ED
1353 if (ProcToken != NULL) {\r
1354 mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
51dd408a
ED
1355 }\r
1356 if (CPUStatus != NULL) {\r
1357 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
1358 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
1359 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
1360 }\r
1361 }\r
1362 } else {\r
1363 //\r
1364 // PI spec requirement:\r
1365 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
1366 //\r
1367 if (CPUStatus != NULL) {\r
1368 CPUStatus[Index] = EFI_NOT_STARTED;\r
1369 }\r
a457823f
ED
1370\r
1371 //\r
1372 // Decrease the count to mark this processor(AP or BSP) as finished.\r
1373 //\r
1374 if (ProcToken != NULL) {\r
1375 WaitForSemaphore (&ProcToken->RunningApCount);\r
1376 }\r
51dd408a
ED
1377 }\r
1378 }\r
1379\r
1380 ReleaseAllAPs ();\r
1381\r
1382 if (Token == NULL) {\r
1383 //\r
1384 // Make sure all APs have completed their tasks.\r
1385 //\r
1386 WaitForAllAPsNotBusy (TRUE);\r
1387 }\r
1388\r
1389 return EFI_SUCCESS;\r
1390}\r
1391\r
1392/**\r
1393 ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
1394 If the function is defined with a type that is not compatible with\r
1395 the type (of the expression) pointed to by the expression that\r
1396 denotes the called function, the behavior is undefined.\r
1397\r
1398 So add below wrapper function to convert between EFI_AP_PROCEDURE\r
1399 and EFI_AP_PROCEDURE2.\r
1400\r
1401 Wrapper for Procedures.\r
1402\r
1403 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
1404\r
1405**/\r
1406EFI_STATUS\r
1407EFIAPI\r
1408ProcedureWrapper (\r
5ed4c46f 1409 IN VOID *Buffer\r
51dd408a
ED
1410 )\r
1411{\r
1412 PROCEDURE_WRAPPER *Wrapper;\r
1413\r
1414 Wrapper = Buffer;\r
1415 Wrapper->Procedure (Wrapper->ProcedureArgument);\r
1416\r
529a5a86
MK
1417 return EFI_SUCCESS;\r
1418}\r
1419\r
717fb604
JY
1420/**\r
1421 Schedule a procedure to run on the specified CPU in blocking mode.\r
1422\r
1423 @param[in] Procedure The address of the procedure to run\r
1424 @param[in] CpuIndex Target CPU Index\r
1425 @param[in, out] ProcArguments The parameter to pass to the procedure\r
1426\r
1427 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1428 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1429 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1430 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1431 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1432\r
1433**/\r
1434EFI_STATUS\r
1435EFIAPI\r
1436SmmBlockingStartupThisAp (\r
1437 IN EFI_AP_PROCEDURE Procedure,\r
1438 IN UINTN CpuIndex,\r
1439 IN OUT VOID *ProcArguments OPTIONAL\r
1440 )\r
1441{\r
51dd408a
ED
1442 PROCEDURE_WRAPPER Wrapper;\r
1443\r
1444 Wrapper.Procedure = Procedure;\r
1445 Wrapper.ProcedureArgument = ProcArguments;\r
1446\r
1447 //\r
1448 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1449 //\r
1450 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
717fb604
JY
1451}\r
1452\r
1453/**\r
1454 Schedule a procedure to run on the specified CPU.\r
1455\r
1456 @param Procedure The address of the procedure to run\r
1457 @param CpuIndex Target CPU Index\r
1458 @param ProcArguments The parameter to pass to the procedure\r
1459\r
1460 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1461 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1462 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1463 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1464 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1465\r
1466**/\r
1467EFI_STATUS\r
1468EFIAPI\r
1469SmmStartupThisAp (\r
1470 IN EFI_AP_PROCEDURE Procedure,\r
1471 IN UINTN CpuIndex,\r
1472 IN OUT VOID *ProcArguments OPTIONAL\r
1473 )\r
1474{\r
51dd408a
ED
1475 MM_COMPLETION Token;\r
1476\r
1477 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
1478 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
1479\r
1480 //\r
1481 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1482 //\r
1483 return InternalSmmStartupThisAp (\r
1484 ProcedureWrapper,\r
1485 CpuIndex,\r
1486 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
1487 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,\r
1488 0,\r
1489 NULL\r
1490 );\r
717fb604
JY
1491}\r
1492\r
f45f2d4a 1493/**\r
3eed6dda 1494 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1495 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1496\r
1497 NOTE: It might not be appreciated in runtime since it might\r
1498 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1499\r
1500 @param CpuIndex CPU Index\r
1501\r
1502**/\r
1503VOID\r
1504EFIAPI\r
1505CpuSmmDebugEntry (\r
1506 IN UINTN CpuIndex\r
1507 )\r
1508{\r
1509 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
7367cc6c 1510\r
f45f2d4a 1511 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1512 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1513 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1514 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1515 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1516 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1517 } else {\r
1518 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1519 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1520 }\r
1521 }\r
1522}\r
1523\r
1524/**\r
3eed6dda 1525 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1526\r
1527 NOTE: It might not be appreciated in runtime since it might\r
1528 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1529\r
1530 @param CpuIndex CPU Index\r
1531\r
1532**/\r
1533VOID\r
1534EFIAPI\r
1535CpuSmmDebugExit (\r
1536 IN UINTN CpuIndex\r
1537 )\r
1538{\r
1539 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1540\r
1541 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1542 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1543 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1544 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1545 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1546 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1547 } else {\r
1548 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1549 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1550 }\r
1551 }\r
1552}\r
1553\r
529a5a86
MK
1554/**\r
1555 C function for SMI entry, each processor comes here upon SMI trigger.\r
1556\r
1557 @param CpuIndex CPU Index\r
1558\r
1559**/\r
1560VOID\r
1561EFIAPI\r
1562SmiRendezvous (\r
1563 IN UINTN CpuIndex\r
1564 )\r
1565{\r
f85d3ce2
JF
1566 EFI_STATUS Status;\r
1567 BOOLEAN ValidSmi;\r
1568 BOOLEAN IsBsp;\r
1569 BOOLEAN BspInProgress;\r
1570 UINTN Index;\r
1571 UINTN Cr2;\r
717fb604
JY
1572\r
1573 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1574\r
1575 //\r
37f9fea5
VN
1576 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1577 // when using on-demand paging for above 4G memory.\r
529a5a86 1578 //\r
37f9fea5
VN
1579 Cr2 = 0;\r
1580 SaveCr2 (&Cr2);\r
529a5a86 1581\r
51dd408a
ED
1582 //\r
1583 // Call the user register Startup function first.\r
1584 //\r
1585 if (mSmmMpSyncData->StartupProcedure != NULL) {\r
1586 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
1587 }\r
1588\r
529a5a86
MK
1589 //\r
1590 // Perform CPU specific entry hooks\r
1591 //\r
1592 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1593\r
1594 //\r
1595 // Determine if this is a valid SMI\r
1596 //\r
1597 ValidSmi = PlatformValidSmi();\r
1598\r
1599 //\r
1600 // Determine if BSP has been already in progress. Note this must be checked after\r
1601 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1602 //\r
fe3a75bc 1603 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1604\r
1605 if (!BspInProgress && !ValidSmi) {\r
1606 //\r
1607 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1608 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1609 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1610 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1611 // is nothing we need to do.\r
1612 //\r
1613 goto Exit;\r
1614 } else {\r
1615 //\r
1616 // Signal presence of this processor\r
1617 //\r
fe3a75bc 1618 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1619 //\r
1620 // BSP has already ended the synchronization, so QUIT!!!\r
1621 //\r
1622\r
1623 //\r
1624 // Wait for BSP's signal to finish SMI\r
1625 //\r
fe3a75bc 1626 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1627 CpuPause ();\r
1628 }\r
1629 goto Exit;\r
1630 } else {\r
1631\r
1632 //\r
1633 // The BUSY lock is initialized to Released state.\r
1634 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1635 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1636 // after AP's present flag is detected.\r
1637 //\r
ed3d5ecb 1638 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1639 }\r
1640\r
529a5a86
MK
1641 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1642 ActivateSmmProfile (CpuIndex);\r
1643 }\r
1644\r
1645 if (BspInProgress) {\r
1646 //\r
1647 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1648 // as BSP may have cleared the SMI status\r
1649 //\r
1650 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1651 } else {\r
1652 //\r
1653 // We have a valid SMI\r
1654 //\r
1655\r
1656 //\r
1657 // Elect BSP\r
1658 //\r
1659 IsBsp = FALSE;\r
1660 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1661 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1662 //\r
1663 // Call platform hook to do BSP election\r
1664 //\r
1665 Status = PlatformSmmBspElection (&IsBsp);\r
1666 if (EFI_SUCCESS == Status) {\r
1667 //\r
1668 // Platform hook determines successfully\r
1669 //\r
1670 if (IsBsp) {\r
1671 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1672 }\r
1673 } else {\r
1674 //\r
1675 // Platform hook fails to determine, use default BSP election method\r
1676 //\r
1677 InterlockedCompareExchange32 (\r
1678 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1679 (UINT32)-1,\r
1680 (UINT32)CpuIndex\r
1681 );\r
1682 }\r
1683 }\r
1684 }\r
1685\r
1686 //\r
1687 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1688 //\r
1689 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1690\r
1691 //\r
1692 // Clear last request for SwitchBsp.\r
1693 //\r
1694 if (mSmmMpSyncData->SwitchBsp) {\r
1695 mSmmMpSyncData->SwitchBsp = FALSE;\r
1696 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1697 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1698 }\r
1699 }\r
1700\r
1701 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1702 SmmProfileRecordSmiNum ();\r
1703 }\r
1704\r
1705 //\r
1706 // BSP Handler is always called with a ValidSmi == TRUE\r
1707 //\r
1708 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1709 } else {\r
1710 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1711 }\r
1712 }\r
1713\r
ed3d5ecb 1714 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1715\r
1716 //\r
1717 // Wait for BSP's signal to exit SMI\r
1718 //\r
fe3a75bc 1719 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1720 CpuPause ();\r
1721 }\r
1722 }\r
1723\r
1724Exit:\r
1725 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
37f9fea5 1726\r
529a5a86
MK
1727 //\r
1728 // Restore Cr2\r
1729 //\r
37f9fea5 1730 RestoreCr2 (Cr2);\r
529a5a86
MK
1731}\r
1732\r
51dd408a
ED
1733/**\r
1734 Allocate buffer for SpinLock and Wrapper function buffer.\r
1735\r
1736**/\r
1737VOID\r
1738InitializeDataForMmMp (\r
1739 VOID\r
1740 )\r
1741{\r
1742 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1743 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
1744\r
1745 InitializeListHead (&gSmmCpuPrivate->TokenList);\r
b948a496 1746\r
3fdc47c6 1747 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
51dd408a
ED
1748}\r
1749\r
1d648531
JF
1750/**\r
1751 Allocate buffer for all semaphores and spin locks.\r
1752\r
1753**/\r
1754VOID\r
1755InitializeSmmCpuSemaphores (\r
1756 VOID\r
1757 )\r
1758{\r
1759 UINTN ProcessorCount;\r
1760 UINTN TotalSize;\r
1761 UINTN GlobalSemaphoresSize;\r
4e920581 1762 UINTN CpuSemaphoresSize;\r
1d648531
JF
1763 UINTN SemaphoreSize;\r
1764 UINTN Pages;\r
1765 UINTN *SemaphoreBlock;\r
1766 UINTN SemaphoreAddr;\r
1767\r
1768 SemaphoreSize = GetSpinLockProperties ();\r
1769 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1770 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1771 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
31fb3334 1772 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
1d648531
JF
1773 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1774 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1775 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1776 SemaphoreBlock = AllocatePages (Pages);\r
1777 ASSERT (SemaphoreBlock != NULL);\r
1778 ZeroMem (SemaphoreBlock, TotalSize);\r
1779\r
1780 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1781 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1782 SemaphoreAddr += SemaphoreSize;\r
1783 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1784 SemaphoreAddr += SemaphoreSize;\r
1785 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1786 SemaphoreAddr += SemaphoreSize;\r
1787 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1788 SemaphoreAddr += SemaphoreSize;\r
1789 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1790 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa 1791 SemaphoreAddr += SemaphoreSize;\r
6c4c15fa 1792\r
4e920581
JF
1793 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1794 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1795 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1796 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1797 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1798 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1799\r
fe3a75bc
JF
1800 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1801 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1802\r
1d648531
JF
1803 mSemaphoreSize = SemaphoreSize;\r
1804}\r
529a5a86
MK
1805\r
1806/**\r
1807 Initialize un-cacheable data.\r
1808\r
1809**/\r
1810VOID\r
1811EFIAPI\r
1812InitializeMpSyncData (\r
1813 VOID\r
1814 )\r
1815{\r
8b9311b7
JF
1816 UINTN CpuIndex;\r
1817\r
529a5a86 1818 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1819 //\r
1820 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1821 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1822 //\r
1823 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1824 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1825 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1826 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1827 //\r
1828 // Enable BSP election by setting BspIndex to -1\r
1829 //\r
1830 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1831 }\r
b43dd229 1832 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1833\r
8b9311b7
JF
1834 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1835 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1836 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1837 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1838 mSmmMpSyncData->AllCpusInSync != NULL);\r
1839 *mSmmMpSyncData->Counter = 0;\r
1840 *mSmmMpSyncData->InsideSmm = FALSE;\r
1841 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1842\r
1843 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1844 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1845 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1846 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1847 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1848 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1849 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1850 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1851 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1852 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1853 }\r
529a5a86
MK
1854 }\r
1855}\r
1856\r
1857/**\r
1858 Initialize global data for MP synchronization.\r
1859\r
3eb69b08
JY
1860 @param Stacks Base address of SMI stack buffer for all processors.\r
1861 @param StackSize Stack size for each processor in SMM.\r
1862 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
529a5a86
MK
1863\r
1864**/\r
1865UINT32\r
1866InitializeMpServiceData (\r
1867 IN VOID *Stacks,\r
3eb69b08
JY
1868 IN UINTN StackSize,\r
1869 IN UINTN ShadowStackSize\r
529a5a86
MK
1870 )\r
1871{\r
1872 UINT32 Cr3;\r
1873 UINTN Index;\r
529a5a86 1874 UINT8 *GdtTssTables;\r
529a5a86 1875 UINTN GdtTableStepSize;\r
ba40cb31
MK
1876 CPUID_VERSION_INFO_EDX RegEdx;\r
1877\r
1878 //\r
1879 // Determine if this CPU supports machine check\r
1880 //\r
1881 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1882 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
529a5a86 1883\r
8b9311b7
JF
1884 //\r
1885 // Allocate memory for all locks and semaphores\r
1886 //\r
1887 InitializeSmmCpuSemaphores ();\r
1888\r
d67b73cc
JF
1889 //\r
1890 // Initialize mSmmMpSyncData\r
1891 //\r
1892 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1893 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1894 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1895 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1896 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1897 InitializeMpSyncData ();\r
1898\r
529a5a86
MK
1899 //\r
1900 // Initialize physical address mask\r
1901 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1902 //\r
1903 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1904 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1905 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1906\r
1907 //\r
1908 // Create page tables\r
1909 //\r
1910 Cr3 = SmmInitPageTable ();\r
1911\r
fe5f1949 1912 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1913\r
1914 //\r
f12367a0 1915 // Install SMI handler for each CPU\r
529a5a86
MK
1916 //\r
1917 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1918 InstallSmiHandler (\r
1919 Index,\r
1920 (UINT32)mCpuHotPlugData.SmBase[Index],\r
3eb69b08 1921 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
529a5a86 1922 StackSize,\r
f12367a0
MK
1923 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1924 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1925 gcSmiIdtr.Base,\r
1926 gcSmiIdtr.Limit + 1,\r
1927 Cr3\r
1928 );\r
1929 }\r
1930\r
529a5a86
MK
1931 //\r
1932 // Record current MTRR settings\r
1933 //\r
26ab5ac3
MK
1934 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1935 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
1936\r
1937 return Cr3;\r
1938}\r
1939\r
1940/**\r
1941\r
1942 Register the SMM Foundation entry point.\r
1943\r
1944 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1945 @param SmmEntryPoint SMM Foundation EntryPoint\r
1946\r
1947 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1948\r
1949**/\r
1950EFI_STATUS\r
1951EFIAPI\r
1952RegisterSmmEntry (\r
1953 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1954 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1955 )\r
1956{\r
1957 //\r
1958 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1959 //\r
1960 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1961 return EFI_SUCCESS;\r
1962}\r
51dd408a
ED
1963\r
1964/**\r
1965\r
1966 Register the SMM Foundation entry point.\r
1967\r
1968 @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
1969 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
1970 with the related definitions of\r
1971 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
1972 If caller may pass a value of NULL to deregister any existing\r
1973 startup procedure.\r
073f2ced 1974 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
51dd408a
ED
1975 run by the AP. It is an optional common mailbox between APs and\r
1976 the caller to share information\r
1977\r
1978 @retval EFI_SUCCESS The Procedure has been set successfully.\r
1979 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
1980\r
1981**/\r
1982EFI_STATUS\r
1983RegisterStartupProcedure (\r
073f2ced
SZ
1984 IN EFI_AP_PROCEDURE Procedure,\r
1985 IN OUT VOID *ProcedureArguments OPTIONAL\r
51dd408a
ED
1986 )\r
1987{\r
1988 if (Procedure == NULL && ProcedureArguments != NULL) {\r
1989 return EFI_INVALID_PARAMETER;\r
1990 }\r
1991 if (mSmmMpSyncData == NULL) {\r
1992 return EFI_NOT_READY;\r
1993 }\r
1994\r
1995 mSmmMpSyncData->StartupProcedure = Procedure;\r
1996 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
1997\r
1998 return EFI_SUCCESS;\r
1999}\r