]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg: Check SMM Delayed/Blocked AP Count
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25 MM_COMPLETION mSmmStartupThisApToken;
26
27 //
28 // Processor specified by mPackageFirstThreadIndex[PackageIndex] will do the package-scope register check.
29 //
30 UINT32 *mPackageFirstThreadIndex = NULL;
31
32 extern UINTN mSmmShadowStackSize;
33
34 /**
35 Performs an atomic compare exchange operation to get semaphore.
36 The compare exchange operation must be performed using
37 MP safe mechanisms.
38
39 @param Sem IN: 32-bit unsigned integer
40 OUT: original integer - 1
41 @return Original integer - 1
42
43 **/
44 UINT32
45 WaitForSemaphore (
46 IN OUT volatile UINT32 *Sem
47 )
48 {
49 UINT32 Value;
50
51 for ( ; ;) {
52 Value = *Sem;
53 if ((Value != 0) &&
54 (InterlockedCompareExchange32 (
55 (UINT32 *)Sem,
56 Value,
57 Value - 1
58 ) == Value))
59 {
60 break;
61 }
62
63 CpuPause ();
64 }
65
66 return Value - 1;
67 }
68
69 /**
70 Performs an atomic compare exchange operation to release semaphore.
71 The compare exchange operation must be performed using
72 MP safe mechanisms.
73
74 @param Sem IN: 32-bit unsigned integer
75 OUT: original integer + 1
76 @return Original integer + 1
77
78 **/
79 UINT32
80 ReleaseSemaphore (
81 IN OUT volatile UINT32 *Sem
82 )
83 {
84 UINT32 Value;
85
86 do {
87 Value = *Sem;
88 } while (Value + 1 != 0 &&
89 InterlockedCompareExchange32 (
90 (UINT32 *)Sem,
91 Value,
92 Value + 1
93 ) != Value);
94
95 return Value + 1;
96 }
97
98 /**
99 Performs an atomic compare exchange operation to lock semaphore.
100 The compare exchange operation must be performed using
101 MP safe mechanisms.
102
103 @param Sem IN: 32-bit unsigned integer
104 OUT: -1
105 @return Original integer
106
107 **/
108 UINT32
109 LockdownSemaphore (
110 IN OUT volatile UINT32 *Sem
111 )
112 {
113 UINT32 Value;
114
115 do {
116 Value = *Sem;
117 } while (InterlockedCompareExchange32 (
118 (UINT32 *)Sem,
119 Value,
120 (UINT32)-1
121 ) != Value);
122
123 return Value;
124 }
125
126 /**
127 Wait all APs to performs an atomic compare exchange operation to release semaphore.
128
129 @param NumberOfAPs AP number
130
131 **/
132 VOID
133 WaitForAllAPs (
134 IN UINTN NumberOfAPs
135 )
136 {
137 UINTN BspIndex;
138
139 BspIndex = mSmmMpSyncData->BspIndex;
140 while (NumberOfAPs-- > 0) {
141 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
142 }
143 }
144
145 /**
146 Performs an atomic compare exchange operation to release semaphore
147 for each AP.
148
149 **/
150 VOID
151 ReleaseAllAPs (
152 VOID
153 )
154 {
155 UINTN Index;
156
157 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
158 if (IsPresentAp (Index)) {
159 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
160 }
161 }
162 }
163
164 /**
165 Check whether the index of CPU perform the package level register
166 programming during System Management Mode initialization.
167
168 The index of Processor specified by mPackageFirstThreadIndex[PackageIndex]
169 will do the package-scope register programming.
170
171 @param[in] CpuIndex Processor Index.
172
173 @retval TRUE Perform the package level register programming.
174 @retval FALSE Don't perform the package level register programming.
175
176 **/
177 BOOLEAN
178 IsPackageFirstThread (
179 IN UINTN CpuIndex
180 )
181 {
182 UINT32 PackageIndex;
183
184 PackageIndex = gSmmCpuPrivate->ProcessorInfo[CpuIndex].Location.Package;
185
186 ASSERT (mPackageFirstThreadIndex != NULL);
187
188 //
189 // Set the value of mPackageFirstThreadIndex[PackageIndex].
190 // The package-scope register are checked by the first processor (CpuIndex) in Package.
191 //
192 // If mPackageFirstThreadIndex[PackageIndex] equals to (UINT32)-1, then update
193 // to current CpuIndex. If it doesn't equal to (UINT32)-1, don't change it.
194 //
195 if (mPackageFirstThreadIndex[PackageIndex] == (UINT32)-1) {
196 mPackageFirstThreadIndex[PackageIndex] = (UINT32)CpuIndex;
197 }
198
199 return (BOOLEAN)(mPackageFirstThreadIndex[PackageIndex] == CpuIndex);
200 }
201
202 /**
203 Returns the Number of SMM Delayed & Blocked & Disabled Thread Count.
204
205 @param[in,out] DelayedCount The Number of SMM Delayed Thread Count.
206 @param[in,out] BlockedCount The Number of SMM Blocked Thread Count.
207 @param[in,out] DisabledCount The Number of SMM Disabled Thread Count.
208
209 **/
210 VOID
211 GetSmmDelayedBlockedDisabledCount (
212 IN OUT UINT32 *DelayedCount,
213 IN OUT UINT32 *BlockedCount,
214 IN OUT UINT32 *DisabledCount
215 )
216 {
217 UINTN Index;
218
219 for (Index = 0; Index < mNumberOfCpus; Index++) {
220 if (IsPackageFirstThread (Index)) {
221 if (DelayedCount != NULL) {
222 *DelayedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed);
223 }
224
225 if (BlockedCount != NULL) {
226 *BlockedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked);
227 }
228
229 if (DisabledCount != NULL) {
230 *DisabledCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable);
231 }
232 }
233 }
234 }
235
236 /**
237 Checks if all CPUs (except Blocked & Disabled) have checked in for this SMI run
238
239 @retval TRUE if all CPUs the have checked in.
240 @retval FALSE if at least one Normal AP hasn't checked in.
241
242 **/
243 BOOLEAN
244 AllCpusInSmmExceptBlockedDisabled (
245 VOID
246 )
247 {
248 UINT32 BlockedCount;
249 UINT32 DisabledCount;
250
251 BlockedCount = 0;
252 DisabledCount = 0;
253
254 //
255 // Check to make sure mSmmMpSyncData->Counter is valid and not locked.
256 //
257 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
258
259 //
260 // Check whether all CPUs in SMM.
261 //
262 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
263 return TRUE;
264 }
265
266 //
267 // Check for the Blocked & Disabled Exceptions Case.
268 //
269 GetSmmDelayedBlockedDisabledCount (NULL, &BlockedCount, &DisabledCount);
270
271 //
272 // *mSmmMpSyncData->Counter might be updated by all APs concurrently. The value
273 // can be dynamic changed. If some Aps enter the SMI after the BlockedCount &
274 // DisabledCount check, then the *mSmmMpSyncData->Counter will be increased, thus
275 // leading the *mSmmMpSyncData->Counter + BlockedCount + DisabledCount > mNumberOfCpus.
276 // since the BlockedCount & DisabledCount are local variable, it's ok here only for
277 // the checking of all CPUs In Smm.
278 //
279 if (*mSmmMpSyncData->Counter + BlockedCount + DisabledCount >= mNumberOfCpus) {
280 return TRUE;
281 }
282
283 return FALSE;
284 }
285
286 /**
287 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
288
289 @retval TRUE Os enable lmce.
290 @retval FALSE Os not enable lmce.
291
292 **/
293 BOOLEAN
294 IsLmceOsEnabled (
295 VOID
296 )
297 {
298 MSR_IA32_MCG_CAP_REGISTER McgCap;
299 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
300 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
301
302 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
303 if (McgCap.Bits.MCG_LMCE_P == 0) {
304 return FALSE;
305 }
306
307 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
308 if (FeatureCtrl.Bits.LmceOn == 0) {
309 return FALSE;
310 }
311
312 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
313 return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);
314 }
315
316 /**
317 Return if Local machine check exception signaled.
318
319 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
320 delivered to only the logical processor.
321
322 @retval TRUE LMCE was signaled.
323 @retval FALSE LMCE was not signaled.
324
325 **/
326 BOOLEAN
327 IsLmceSignaled (
328 VOID
329 )
330 {
331 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
332
333 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
334 return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);
335 }
336
337 /**
338 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
339 entering SMM, except SMI disabled APs.
340
341 **/
342 VOID
343 SmmWaitForApArrival (
344 VOID
345 )
346 {
347 UINT64 Timer;
348 UINTN Index;
349 BOOLEAN LmceEn;
350 BOOLEAN LmceSignal;
351 UINT32 DelayedCount;
352 UINT32 BlockedCount;
353
354 DelayedCount = 0;
355 BlockedCount = 0;
356
357 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
358
359 LmceEn = FALSE;
360 LmceSignal = FALSE;
361 if (mMachineCheckSupported) {
362 LmceEn = IsLmceOsEnabled ();
363 LmceSignal = IsLmceSignaled ();
364 }
365
366 //
367 // Platform implementor should choose a timeout value appropriately:
368 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
369 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
370 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
371 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
372 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
373 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
374 // - The timeout value must be longer than longest possible IO operation in the system
375 //
376
377 //
378 // Sync with APs 1st timeout
379 //
380 for (Timer = StartSyncTimer ();
381 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);
382 )
383 {
384 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
385 if (mSmmMpSyncData->AllApArrivedWithException) {
386 break;
387 }
388
389 CpuPause ();
390 }
391
392 //
393 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
394 // because:
395 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
396 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
397 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
398 // work while SMI handling is on-going.
399 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
400 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
401 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
402 // mode work while SMI handling is on-going.
403 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
404 // - In traditional flow, SMI disabling is discouraged.
405 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
406 // In both cases, adding SMI-disabling checking code increases overhead.
407 //
408 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
409 //
410 // Send SMI IPIs to bring outside processors in
411 //
412 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
413 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
414 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
415 }
416 }
417
418 //
419 // Sync with APs 2nd timeout.
420 //
421 for (Timer = StartSyncTimer ();
422 !IsSyncTimerTimeout (Timer);
423 )
424 {
425 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
426 if (mSmmMpSyncData->AllApArrivedWithException) {
427 break;
428 }
429
430 CpuPause ();
431 }
432 }
433
434 if (!mSmmMpSyncData->AllApArrivedWithException) {
435 //
436 // Check for the Blocked & Delayed Case.
437 //
438 GetSmmDelayedBlockedDisabledCount (&DelayedCount, &BlockedCount, NULL);
439 DEBUG ((DEBUG_INFO, "SmmWaitForApArrival: Delayed AP Count = %d, Blocked AP Count = %d\n", DelayedCount, BlockedCount));
440 }
441
442 return;
443 }
444
445 /**
446 Replace OS MTRR's with SMI MTRR's.
447
448 @param CpuIndex Processor Index
449
450 **/
451 VOID
452 ReplaceOSMtrrs (
453 IN UINTN CpuIndex
454 )
455 {
456 SmmCpuFeaturesDisableSmrr ();
457
458 //
459 // Replace all MTRRs registers
460 //
461 MtrrSetAllMtrrs (&gSmiMtrrs);
462 }
463
464 /**
465 Wheck whether task has been finished by all APs.
466
467 @param BlockMode Whether did it in block mode or non-block mode.
468
469 @retval TRUE Task has been finished by all APs.
470 @retval FALSE Task not has been finished by all APs.
471
472 **/
473 BOOLEAN
474 WaitForAllAPsNotBusy (
475 IN BOOLEAN BlockMode
476 )
477 {
478 UINTN Index;
479
480 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
481 //
482 // Ignore BSP and APs which not call in SMM.
483 //
484 if (!IsPresentAp (Index)) {
485 continue;
486 }
487
488 if (BlockMode) {
489 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
490 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
491 } else {
492 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
493 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
494 } else {
495 return FALSE;
496 }
497 }
498 }
499
500 return TRUE;
501 }
502
503 /**
504 Check whether it is an present AP.
505
506 @param CpuIndex The AP index which calls this function.
507
508 @retval TRUE It's a present AP.
509 @retval TRUE This is not an AP or it is not present.
510
511 **/
512 BOOLEAN
513 IsPresentAp (
514 IN UINTN CpuIndex
515 )
516 {
517 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
518 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
519 }
520
521 /**
522 Clean up the status flags used during executing the procedure.
523
524 @param CpuIndex The AP index which calls this function.
525
526 **/
527 VOID
528 ReleaseToken (
529 IN UINTN CpuIndex
530 )
531 {
532 PROCEDURE_TOKEN *Token;
533
534 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
535
536 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
537 ReleaseSpinLock (Token->SpinLock);
538 }
539
540 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
541 }
542
543 /**
544 Free the tokens in the maintained list.
545
546 **/
547 VOID
548 ResetTokens (
549 VOID
550 )
551 {
552 //
553 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
554 //
555 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
556 }
557
558 /**
559 SMI handler for BSP.
560
561 @param CpuIndex BSP processor Index
562 @param SyncMode SMM MP sync mode
563
564 **/
565 VOID
566 BSPHandler (
567 IN UINTN CpuIndex,
568 IN SMM_CPU_SYNC_MODE SyncMode
569 )
570 {
571 UINTN Index;
572 MTRR_SETTINGS Mtrrs;
573 UINTN ApCount;
574 BOOLEAN ClearTopLevelSmiResult;
575 UINTN PresentCount;
576
577 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
578 ApCount = 0;
579
580 //
581 // Flag BSP's presence
582 //
583 *mSmmMpSyncData->InsideSmm = TRUE;
584
585 //
586 // Initialize Debug Agent to start source level debug in BSP handler
587 //
588 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
589
590 //
591 // Mark this processor's presence
592 //
593 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
594
595 //
596 // Clear platform top level SMI status bit before calling SMI handlers. If
597 // we cleared it after SMI handlers are run, we would miss the SMI that
598 // occurs after SMI handlers are done and before SMI status bit is cleared.
599 //
600 ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();
601 ASSERT (ClearTopLevelSmiResult == TRUE);
602
603 //
604 // Set running processor index
605 //
606 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
607
608 //
609 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
610 //
611 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
612 //
613 // Wait for APs to arrive
614 //
615 SmmWaitForApArrival ();
616
617 //
618 // Lock the counter down and retrieve the number of APs
619 //
620 *mSmmMpSyncData->AllCpusInSync = TRUE;
621 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
622
623 //
624 // Wait for all APs to get ready for programming MTRRs
625 //
626 WaitForAllAPs (ApCount);
627
628 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
629 //
630 // Signal all APs it's time for backup MTRRs
631 //
632 ReleaseAllAPs ();
633
634 //
635 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
636 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
637 // to a large enough value to avoid this situation.
638 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
639 // We do the backup first and then set MTRR to avoid race condition for threads
640 // in the same core.
641 //
642 MtrrGetAllMtrrs (&Mtrrs);
643
644 //
645 // Wait for all APs to complete their MTRR saving
646 //
647 WaitForAllAPs (ApCount);
648
649 //
650 // Let all processors program SMM MTRRs together
651 //
652 ReleaseAllAPs ();
653
654 //
655 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
656 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
657 // to a large enough value to avoid this situation.
658 //
659 ReplaceOSMtrrs (CpuIndex);
660
661 //
662 // Wait for all APs to complete their MTRR programming
663 //
664 WaitForAllAPs (ApCount);
665 }
666 }
667
668 //
669 // The BUSY lock is initialized to Acquired state
670 //
671 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
672
673 //
674 // Perform the pre tasks
675 //
676 PerformPreTasks ();
677
678 //
679 // Invoke SMM Foundation EntryPoint with the processor information context.
680 //
681 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
682
683 //
684 // Make sure all APs have completed their pending none-block tasks
685 //
686 WaitForAllAPsNotBusy (TRUE);
687
688 //
689 // Perform the remaining tasks
690 //
691 PerformRemainingTasks ();
692
693 //
694 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
695 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
696 // will run through freely.
697 //
698 if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {
699 //
700 // Lock the counter down and retrieve the number of APs
701 //
702 *mSmmMpSyncData->AllCpusInSync = TRUE;
703 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
704 //
705 // Make sure all APs have their Present flag set
706 //
707 while (TRUE) {
708 PresentCount = 0;
709 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
710 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
711 PresentCount++;
712 }
713 }
714
715 if (PresentCount > ApCount) {
716 break;
717 }
718 }
719 }
720
721 //
722 // Notify all APs to exit
723 //
724 *mSmmMpSyncData->InsideSmm = FALSE;
725 ReleaseAllAPs ();
726
727 //
728 // Wait for all APs to complete their pending tasks
729 //
730 WaitForAllAPs (ApCount);
731
732 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
733 //
734 // Signal APs to restore MTRRs
735 //
736 ReleaseAllAPs ();
737
738 //
739 // Restore OS MTRRs
740 //
741 SmmCpuFeaturesReenableSmrr ();
742 MtrrSetAllMtrrs (&Mtrrs);
743
744 //
745 // Wait for all APs to complete MTRR programming
746 //
747 WaitForAllAPs (ApCount);
748 }
749
750 //
751 // Stop source level debug in BSP handler, the code below will not be
752 // debugged.
753 //
754 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
755
756 //
757 // Signal APs to Reset states/semaphore for this processor
758 //
759 ReleaseAllAPs ();
760
761 //
762 // Perform pending operations for hot-plug
763 //
764 SmmCpuUpdate ();
765
766 //
767 // Clear the Present flag of BSP
768 //
769 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
770
771 //
772 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
773 // WaitForAllAps does not depend on the Present flag.
774 //
775 WaitForAllAPs (ApCount);
776
777 //
778 // Reset the tokens buffer.
779 //
780 ResetTokens ();
781
782 //
783 // Reset BspIndex to -1, meaning BSP has not been elected.
784 //
785 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
786 mSmmMpSyncData->BspIndex = (UINT32)-1;
787 }
788
789 //
790 // Allow APs to check in from this point on
791 //
792 *mSmmMpSyncData->Counter = 0;
793 *mSmmMpSyncData->AllCpusInSync = FALSE;
794 mSmmMpSyncData->AllApArrivedWithException = FALSE;
795 }
796
797 /**
798 SMI handler for AP.
799
800 @param CpuIndex AP processor Index.
801 @param ValidSmi Indicates that current SMI is a valid SMI or not.
802 @param SyncMode SMM MP sync mode.
803
804 **/
805 VOID
806 APHandler (
807 IN UINTN CpuIndex,
808 IN BOOLEAN ValidSmi,
809 IN SMM_CPU_SYNC_MODE SyncMode
810 )
811 {
812 UINT64 Timer;
813 UINTN BspIndex;
814 MTRR_SETTINGS Mtrrs;
815 EFI_STATUS ProcedureStatus;
816
817 //
818 // Timeout BSP
819 //
820 for (Timer = StartSyncTimer ();
821 !IsSyncTimerTimeout (Timer) &&
822 !(*mSmmMpSyncData->InsideSmm);
823 )
824 {
825 CpuPause ();
826 }
827
828 if (!(*mSmmMpSyncData->InsideSmm)) {
829 //
830 // BSP timeout in the first round
831 //
832 if (mSmmMpSyncData->BspIndex != -1) {
833 //
834 // BSP Index is known
835 // Existing AP is in SMI now but BSP not in, so, try bring BSP in SMM.
836 //
837 BspIndex = mSmmMpSyncData->BspIndex;
838 ASSERT (CpuIndex != BspIndex);
839
840 //
841 // Send SMI IPI to bring BSP in
842 //
843 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
844
845 //
846 // Now clock BSP for the 2nd time
847 //
848 for (Timer = StartSyncTimer ();
849 !IsSyncTimerTimeout (Timer) &&
850 !(*mSmmMpSyncData->InsideSmm);
851 )
852 {
853 CpuPause ();
854 }
855
856 if (!(*mSmmMpSyncData->InsideSmm)) {
857 //
858 // Give up since BSP is unable to enter SMM
859 // and signal the completion of this AP
860 // Reduce the mSmmMpSyncData->Counter!
861 //
862 WaitForSemaphore (mSmmMpSyncData->Counter);
863 return;
864 }
865 } else {
866 //
867 // Don't know BSP index. Give up without sending IPI to BSP.
868 // Reduce the mSmmMpSyncData->Counter!
869 //
870 WaitForSemaphore (mSmmMpSyncData->Counter);
871 return;
872 }
873 }
874
875 //
876 // BSP is available
877 //
878 BspIndex = mSmmMpSyncData->BspIndex;
879 ASSERT (CpuIndex != BspIndex);
880
881 //
882 // Mark this processor's presence
883 //
884 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
885
886 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
887 //
888 // Notify BSP of arrival at this point
889 //
890 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
891 }
892
893 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
894 //
895 // Wait for the signal from BSP to backup MTRRs
896 //
897 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
898
899 //
900 // Backup OS MTRRs
901 //
902 MtrrGetAllMtrrs (&Mtrrs);
903
904 //
905 // Signal BSP the completion of this AP
906 //
907 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
908
909 //
910 // Wait for BSP's signal to program MTRRs
911 //
912 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
913
914 //
915 // Replace OS MTRRs with SMI MTRRs
916 //
917 ReplaceOSMtrrs (CpuIndex);
918
919 //
920 // Signal BSP the completion of this AP
921 //
922 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
923 }
924
925 while (TRUE) {
926 //
927 // Wait for something to happen
928 //
929 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
930
931 //
932 // Check if BSP wants to exit SMM
933 //
934 if (!(*mSmmMpSyncData->InsideSmm)) {
935 break;
936 }
937
938 //
939 // BUSY should be acquired by SmmStartupThisAp()
940 //
941 ASSERT (
942 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
943 );
944
945 //
946 // Invoke the scheduled procedure
947 //
948 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(
949 (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter
950 );
951 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
952 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
953 }
954
955 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
956 ReleaseToken (CpuIndex);
957 }
958
959 //
960 // Release BUSY
961 //
962 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
963 }
964
965 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
966 //
967 // Notify BSP the readiness of this AP to program MTRRs
968 //
969 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
970
971 //
972 // Wait for the signal from BSP to program MTRRs
973 //
974 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
975
976 //
977 // Restore OS MTRRs
978 //
979 SmmCpuFeaturesReenableSmrr ();
980 MtrrSetAllMtrrs (&Mtrrs);
981 }
982
983 //
984 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
985 //
986 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
987
988 //
989 // Wait for the signal from BSP to Reset states/semaphore for this processor
990 //
991 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
992
993 //
994 // Reset states/semaphore for this processor
995 //
996 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
997
998 //
999 // Notify BSP the readiness of this AP to exit SMM
1000 //
1001 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
1002 }
1003
1004 /**
1005 Create 4G PageTable in SMRAM.
1006
1007 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
1008 @return PageTable Address
1009
1010 **/
1011 UINT32
1012 Gen4GPageTable (
1013 IN BOOLEAN Is32BitPageTable
1014 )
1015 {
1016 VOID *PageTable;
1017 UINTN Index;
1018 UINT64 *Pte;
1019 UINTN PagesNeeded;
1020 UINTN Low2MBoundary;
1021 UINTN High2MBoundary;
1022 UINTN Pages;
1023 UINTN GuardPage;
1024 UINT64 *Pdpte;
1025 UINTN PageIndex;
1026 UINTN PageAddress;
1027
1028 Low2MBoundary = 0;
1029 High2MBoundary = 0;
1030 PagesNeeded = 0;
1031 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1032 //
1033 // Add one more page for known good stack, then find the lower 2MB aligned address.
1034 //
1035 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
1036 //
1037 // Add two more pages for known good stack and stack guard page,
1038 // then find the lower 2MB aligned address.
1039 //
1040 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
1041 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
1042 }
1043
1044 //
1045 // Allocate the page table
1046 //
1047 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
1048 ASSERT (PageTable != NULL);
1049
1050 PageTable = (VOID *)((UINTN)PageTable);
1051 Pte = (UINT64 *)PageTable;
1052
1053 //
1054 // Zero out all page table entries first
1055 //
1056 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
1057
1058 //
1059 // Set Page Directory Pointers
1060 //
1061 for (Index = 0; Index < 4; Index++) {
1062 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
1063 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
1064 }
1065
1066 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
1067
1068 //
1069 // Fill in Page Directory Entries
1070 //
1071 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
1072 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
1073 }
1074
1075 Pdpte = (UINT64 *)PageTable;
1076 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1077 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
1078 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
1079 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
1080 Pte = (UINT64 *)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1081 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1082 //
1083 // Fill in Page Table Entries
1084 //
1085 Pte = (UINT64 *)Pages;
1086 PageAddress = PageIndex;
1087 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1088 if (PageAddress == GuardPage) {
1089 //
1090 // Mark the guard page as non-present
1091 //
1092 Pte[Index] = PageAddress | mAddressEncMask;
1093 GuardPage += (mSmmStackSize + mSmmShadowStackSize);
1094 if (GuardPage > mSmmStackArrayEnd) {
1095 GuardPage = 0;
1096 }
1097 } else {
1098 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1099 }
1100
1101 PageAddress += EFI_PAGE_SIZE;
1102 }
1103
1104 Pages += EFI_PAGE_SIZE;
1105 }
1106 }
1107
1108 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1109 Pte = (UINT64 *)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1110 if ((Pte[0] & IA32_PG_PS) == 0) {
1111 // 4K-page entries are already mapped. Just hide the first one anyway.
1112 Pte = (UINT64 *)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1113 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1114 } else {
1115 // Create 4K-page entries
1116 Pages = (UINTN)AllocatePageTableMemory (1);
1117 ASSERT (Pages != 0);
1118
1119 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1120
1121 Pte = (UINT64 *)Pages;
1122 PageAddress = 0;
1123 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1124 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1125 PageAddress += EFI_PAGE_SIZE;
1126 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1127 }
1128 }
1129 }
1130
1131 return (UINT32)(UINTN)PageTable;
1132 }
1133
1134 /**
1135 Checks whether the input token is the current used token.
1136
1137 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1138 BroadcastProcedure.
1139
1140 @retval TRUE The input token is the current used token.
1141 @retval FALSE The input token is not the current used token.
1142 **/
1143 BOOLEAN
1144 IsTokenInUse (
1145 IN SPIN_LOCK *Token
1146 )
1147 {
1148 LIST_ENTRY *Link;
1149 PROCEDURE_TOKEN *ProcToken;
1150
1151 if (Token == NULL) {
1152 return FALSE;
1153 }
1154
1155 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1156 //
1157 // Only search used tokens.
1158 //
1159 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1160 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1161
1162 if (ProcToken->SpinLock == Token) {
1163 return TRUE;
1164 }
1165
1166 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1167 }
1168
1169 return FALSE;
1170 }
1171
1172 /**
1173 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1174
1175 @return First token of the token buffer.
1176 **/
1177 LIST_ENTRY *
1178 AllocateTokenBuffer (
1179 VOID
1180 )
1181 {
1182 UINTN SpinLockSize;
1183 UINT32 TokenCountPerChunk;
1184 UINTN Index;
1185 SPIN_LOCK *SpinLock;
1186 UINT8 *SpinLockBuffer;
1187 PROCEDURE_TOKEN *ProcTokens;
1188
1189 SpinLockSize = GetSpinLockProperties ();
1190
1191 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1192 ASSERT (TokenCountPerChunk != 0);
1193 if (TokenCountPerChunk == 0) {
1194 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1195 CpuDeadLoop ();
1196 }
1197
1198 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1199
1200 //
1201 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1202 //
1203 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1204 ASSERT (SpinLockBuffer != NULL);
1205
1206 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1207 ASSERT (ProcTokens != NULL);
1208
1209 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1210 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1211 InitializeSpinLock (SpinLock);
1212
1213 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1214 ProcTokens[Index].SpinLock = SpinLock;
1215 ProcTokens[Index].RunningApCount = 0;
1216
1217 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1218 }
1219
1220 return &ProcTokens[0].Link;
1221 }
1222
1223 /**
1224 Get the free token.
1225
1226 If no free token, allocate new tokens then return the free one.
1227
1228 @param RunningApsCount The Running Aps count for this token.
1229
1230 @retval return the first free PROCEDURE_TOKEN.
1231
1232 **/
1233 PROCEDURE_TOKEN *
1234 GetFreeToken (
1235 IN UINT32 RunningApsCount
1236 )
1237 {
1238 PROCEDURE_TOKEN *NewToken;
1239
1240 //
1241 // If FirstFreeToken meets the end of token list, enlarge the token list.
1242 // Set FirstFreeToken to the first free token.
1243 //
1244 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1245 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1246 }
1247
1248 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1249 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1250
1251 NewToken->RunningApCount = RunningApsCount;
1252 AcquireSpinLock (NewToken->SpinLock);
1253
1254 return NewToken;
1255 }
1256
1257 /**
1258 Checks status of specified AP.
1259
1260 This function checks whether the specified AP has finished the task assigned
1261 by StartupThisAP(), and whether timeout expires.
1262
1263 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1264 BroadcastProcedure.
1265
1266 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1267 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1268 **/
1269 EFI_STATUS
1270 IsApReady (
1271 IN SPIN_LOCK *Token
1272 )
1273 {
1274 if (AcquireSpinLockOrFail (Token)) {
1275 ReleaseSpinLock (Token);
1276 return EFI_SUCCESS;
1277 }
1278
1279 return EFI_NOT_READY;
1280 }
1281
1282 /**
1283 Schedule a procedure to run on the specified CPU.
1284
1285 @param[in] Procedure The address of the procedure to run
1286 @param[in] CpuIndex Target CPU Index
1287 @param[in,out] ProcArguments The parameter to pass to the procedure
1288 @param[in] Token This is an optional parameter that allows the caller to execute the
1289 procedure in a blocking or non-blocking fashion. If it is NULL the
1290 call is blocking, and the call will not return until the AP has
1291 completed the procedure. If the token is not NULL, the call will
1292 return immediately. The caller can check whether the procedure has
1293 completed with CheckOnProcedure or WaitForProcedure.
1294 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1295 execution of Procedure, either for blocking or non-blocking mode.
1296 Zero means infinity. If the timeout expires before all APs return
1297 from Procedure, then Procedure on the failed APs is terminated. If
1298 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1299 If the timeout expires in non-blocking mode, the timeout determined
1300 can be through CheckOnProcedure or WaitForProcedure.
1301 Note that timeout support is optional. Whether an implementation
1302 supports this feature can be determined via the Attributes data
1303 member.
1304 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1305 by Procedure when it completes execution on the target AP, or with
1306 EFI_TIMEOUT if the Procedure fails to complete within the optional
1307 timeout. The implementation will update this variable with
1308 EFI_NOT_READY prior to starting Procedure on the target AP.
1309
1310 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1311 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1312 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1313 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1314 @retval EFI_SUCCESS The procedure has been successfully scheduled
1315
1316 **/
1317 EFI_STATUS
1318 InternalSmmStartupThisAp (
1319 IN EFI_AP_PROCEDURE2 Procedure,
1320 IN UINTN CpuIndex,
1321 IN OUT VOID *ProcArguments OPTIONAL,
1322 IN MM_COMPLETION *Token,
1323 IN UINTN TimeoutInMicroseconds,
1324 IN OUT EFI_STATUS *CpuStatus
1325 )
1326 {
1327 PROCEDURE_TOKEN *ProcToken;
1328
1329 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1330 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1331 return EFI_INVALID_PARAMETER;
1332 }
1333
1334 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1335 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1336 return EFI_INVALID_PARAMETER;
1337 }
1338
1339 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1340 return EFI_INVALID_PARAMETER;
1341 }
1342
1343 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1344 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1345 DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1346 }
1347
1348 return EFI_INVALID_PARAMETER;
1349 }
1350
1351 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1352 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1353 DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1354 }
1355
1356 return EFI_INVALID_PARAMETER;
1357 }
1358
1359 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1360 return EFI_INVALID_PARAMETER;
1361 }
1362
1363 if (Procedure == NULL) {
1364 return EFI_INVALID_PARAMETER;
1365 }
1366
1367 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1368
1369 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1370 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1371 if (Token != NULL) {
1372 if (Token != &mSmmStartupThisApToken) {
1373 //
1374 // When Token points to mSmmStartupThisApToken, this routine is called
1375 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1376 //
1377 // In this case, caller wants to startup AP procedure in non-blocking
1378 // mode and cannot get the completion status from the Token because there
1379 // is no way to return the Token to caller from SmmStartupThisAp().
1380 // Caller needs to use its implementation specific way to query the completion status.
1381 //
1382 // There is no need to allocate a token for such case so the 3 overheads
1383 // can be avoided:
1384 // 1. Call AllocateTokenBuffer() when there is no free token.
1385 // 2. Get a free token from the token buffer.
1386 // 3. Call ReleaseToken() in APHandler().
1387 //
1388 ProcToken = GetFreeToken (1);
1389 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1390 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1391 }
1392 }
1393
1394 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1395 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1396 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1397 }
1398
1399 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1400
1401 if (Token == NULL) {
1402 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1403 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1404 }
1405
1406 return EFI_SUCCESS;
1407 }
1408
1409 /**
1410 Worker function to execute a caller provided function on all enabled APs.
1411
1412 @param[in] Procedure A pointer to the function to be run on
1413 enabled APs of the system.
1414 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1415 APs to return from Procedure, either for
1416 blocking or non-blocking mode.
1417 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1418 all APs.
1419 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1420 procedure in a blocking or non-blocking fashion. If it is NULL the
1421 call is blocking, and the call will not return until the AP has
1422 completed the procedure. If the token is not NULL, the call will
1423 return immediately. The caller can check whether the procedure has
1424 completed with CheckOnProcedure or WaitForProcedure.
1425 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1426 by Procedure when it completes execution on the target AP, or with
1427 EFI_TIMEOUT if the Procedure fails to complete within the optional
1428 timeout. The implementation will update this variable with
1429 EFI_NOT_READY prior to starting Procedure on the target AP.
1430
1431
1432 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1433 the timeout expired.
1434 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1435 to all enabled APs.
1436 @retval others Failed to Startup all APs.
1437
1438 **/
1439 EFI_STATUS
1440 InternalSmmStartupAllAPs (
1441 IN EFI_AP_PROCEDURE2 Procedure,
1442 IN UINTN TimeoutInMicroseconds,
1443 IN OUT VOID *ProcedureArguments OPTIONAL,
1444 IN OUT MM_COMPLETION *Token,
1445 IN OUT EFI_STATUS *CPUStatus
1446 )
1447 {
1448 UINTN Index;
1449 UINTN CpuCount;
1450 PROCEDURE_TOKEN *ProcToken;
1451
1452 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1453 return EFI_INVALID_PARAMETER;
1454 }
1455
1456 if (Procedure == NULL) {
1457 return EFI_INVALID_PARAMETER;
1458 }
1459
1460 CpuCount = 0;
1461 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1462 if (IsPresentAp (Index)) {
1463 CpuCount++;
1464
1465 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1466 return EFI_INVALID_PARAMETER;
1467 }
1468
1469 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
1470 return EFI_NOT_READY;
1471 }
1472
1473 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1474 }
1475 }
1476
1477 if (CpuCount == 0) {
1478 return EFI_NOT_STARTED;
1479 }
1480
1481 if (Token != NULL) {
1482 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1483 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1484 } else {
1485 ProcToken = NULL;
1486 }
1487
1488 //
1489 // Make sure all BUSY should be acquired.
1490 //
1491 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1492 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1493 // block mode.
1494 //
1495 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1496 if (IsPresentAp (Index)) {
1497 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1498 }
1499 }
1500
1501 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1502 if (IsPresentAp (Index)) {
1503 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;
1504 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1505 if (ProcToken != NULL) {
1506 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1507 }
1508
1509 if (CPUStatus != NULL) {
1510 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1511 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1512 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1513 }
1514 }
1515 } else {
1516 //
1517 // PI spec requirement:
1518 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1519 //
1520 if (CPUStatus != NULL) {
1521 CPUStatus[Index] = EFI_NOT_STARTED;
1522 }
1523
1524 //
1525 // Decrease the count to mark this processor(AP or BSP) as finished.
1526 //
1527 if (ProcToken != NULL) {
1528 WaitForSemaphore (&ProcToken->RunningApCount);
1529 }
1530 }
1531 }
1532
1533 ReleaseAllAPs ();
1534
1535 if (Token == NULL) {
1536 //
1537 // Make sure all APs have completed their tasks.
1538 //
1539 WaitForAllAPsNotBusy (TRUE);
1540 }
1541
1542 return EFI_SUCCESS;
1543 }
1544
1545 /**
1546 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1547 If the function is defined with a type that is not compatible with
1548 the type (of the expression) pointed to by the expression that
1549 denotes the called function, the behavior is undefined.
1550
1551 So add below wrapper function to convert between EFI_AP_PROCEDURE
1552 and EFI_AP_PROCEDURE2.
1553
1554 Wrapper for Procedures.
1555
1556 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1557
1558 **/
1559 EFI_STATUS
1560 EFIAPI
1561 ProcedureWrapper (
1562 IN VOID *Buffer
1563 )
1564 {
1565 PROCEDURE_WRAPPER *Wrapper;
1566
1567 Wrapper = Buffer;
1568 Wrapper->Procedure (Wrapper->ProcedureArgument);
1569
1570 return EFI_SUCCESS;
1571 }
1572
1573 /**
1574 Schedule a procedure to run on the specified CPU in blocking mode.
1575
1576 @param[in] Procedure The address of the procedure to run
1577 @param[in] CpuIndex Target CPU Index
1578 @param[in, out] ProcArguments The parameter to pass to the procedure
1579
1580 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1581 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1582 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1583 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1584 @retval EFI_SUCCESS The procedure has been successfully scheduled
1585
1586 **/
1587 EFI_STATUS
1588 EFIAPI
1589 SmmBlockingStartupThisAp (
1590 IN EFI_AP_PROCEDURE Procedure,
1591 IN UINTN CpuIndex,
1592 IN OUT VOID *ProcArguments OPTIONAL
1593 )
1594 {
1595 PROCEDURE_WRAPPER Wrapper;
1596
1597 Wrapper.Procedure = Procedure;
1598 Wrapper.ProcedureArgument = ProcArguments;
1599
1600 //
1601 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1602 //
1603 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1604 }
1605
1606 /**
1607 Schedule a procedure to run on the specified CPU.
1608
1609 @param Procedure The address of the procedure to run
1610 @param CpuIndex Target CPU Index
1611 @param ProcArguments The parameter to pass to the procedure
1612
1613 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1614 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1615 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1616 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1617 @retval EFI_SUCCESS The procedure has been successfully scheduled
1618
1619 **/
1620 EFI_STATUS
1621 EFIAPI
1622 SmmStartupThisAp (
1623 IN EFI_AP_PROCEDURE Procedure,
1624 IN UINTN CpuIndex,
1625 IN OUT VOID *ProcArguments OPTIONAL
1626 )
1627 {
1628 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1629 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1630
1631 //
1632 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1633 //
1634 return InternalSmmStartupThisAp (
1635 ProcedureWrapper,
1636 CpuIndex,
1637 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1638 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,
1639 0,
1640 NULL
1641 );
1642 }
1643
1644 /**
1645 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1646 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1647
1648 NOTE: It might not be appreciated in runtime since it might
1649 conflict with OS debugging facilities. Turn them off in RELEASE.
1650
1651 @param CpuIndex CPU Index
1652
1653 **/
1654 VOID
1655 EFIAPI
1656 CpuSmmDebugEntry (
1657 IN UINTN CpuIndex
1658 )
1659 {
1660 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1661
1662 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1663 ASSERT (CpuIndex < mMaxNumberOfCpus);
1664 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1665 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1666 AsmWriteDr6 (CpuSaveState->x86._DR6);
1667 AsmWriteDr7 (CpuSaveState->x86._DR7);
1668 } else {
1669 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1670 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1671 }
1672 }
1673 }
1674
1675 /**
1676 This function restores DR6 & DR7 to SMM save state.
1677
1678 NOTE: It might not be appreciated in runtime since it might
1679 conflict with OS debugging facilities. Turn them off in RELEASE.
1680
1681 @param CpuIndex CPU Index
1682
1683 **/
1684 VOID
1685 EFIAPI
1686 CpuSmmDebugExit (
1687 IN UINTN CpuIndex
1688 )
1689 {
1690 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1691
1692 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1693 ASSERT (CpuIndex < mMaxNumberOfCpus);
1694 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1695 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1696 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1697 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1698 } else {
1699 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1700 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1701 }
1702 }
1703 }
1704
1705 /**
1706 C function for SMI entry, each processor comes here upon SMI trigger.
1707
1708 @param CpuIndex CPU Index
1709
1710 **/
1711 VOID
1712 EFIAPI
1713 SmiRendezvous (
1714 IN UINTN CpuIndex
1715 )
1716 {
1717 EFI_STATUS Status;
1718 BOOLEAN ValidSmi;
1719 BOOLEAN IsBsp;
1720 BOOLEAN BspInProgress;
1721 UINTN Index;
1722 UINTN Cr2;
1723
1724 ASSERT (CpuIndex < mMaxNumberOfCpus);
1725
1726 //
1727 // Save Cr2 because Page Fault exception in SMM may override its value,
1728 // when using on-demand paging for above 4G memory.
1729 //
1730 Cr2 = 0;
1731 SaveCr2 (&Cr2);
1732
1733 //
1734 // Call the user register Startup function first.
1735 //
1736 if (mSmmMpSyncData->StartupProcedure != NULL) {
1737 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1738 }
1739
1740 //
1741 // Perform CPU specific entry hooks
1742 //
1743 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1744
1745 //
1746 // Determine if this is a valid SMI
1747 //
1748 ValidSmi = PlatformValidSmi ();
1749
1750 //
1751 // Determine if BSP has been already in progress. Note this must be checked after
1752 // ValidSmi because BSP may clear a valid SMI source after checking in.
1753 //
1754 BspInProgress = *mSmmMpSyncData->InsideSmm;
1755
1756 if (!BspInProgress && !ValidSmi) {
1757 //
1758 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1759 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1760 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1761 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1762 // is nothing we need to do.
1763 //
1764 goto Exit;
1765 } else {
1766 //
1767 // Signal presence of this processor
1768 // mSmmMpSyncData->Counter is increased here!
1769 // "ReleaseSemaphore (mSmmMpSyncData->Counter) == 0" means BSP has already ended the synchronization.
1770 //
1771 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1772 //
1773 // BSP has already ended the synchronization, so QUIT!!!
1774 // Existing AP is too late now to enter SMI since BSP has already ended the synchronization!!!
1775 //
1776
1777 //
1778 // Wait for BSP's signal to finish SMI
1779 //
1780 while (*mSmmMpSyncData->AllCpusInSync) {
1781 CpuPause ();
1782 }
1783
1784 goto Exit;
1785 } else {
1786 //
1787 // The BUSY lock is initialized to Released state.
1788 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1789 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1790 // after AP's present flag is detected.
1791 //
1792 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1793 }
1794
1795 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1796 ActivateSmmProfile (CpuIndex);
1797 }
1798
1799 if (BspInProgress) {
1800 //
1801 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1802 // as BSP may have cleared the SMI status
1803 //
1804 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1805 } else {
1806 //
1807 // We have a valid SMI
1808 //
1809
1810 //
1811 // Elect BSP
1812 //
1813 IsBsp = FALSE;
1814 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1815 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1816 //
1817 // Call platform hook to do BSP election
1818 //
1819 Status = PlatformSmmBspElection (&IsBsp);
1820 if (EFI_SUCCESS == Status) {
1821 //
1822 // Platform hook determines successfully
1823 //
1824 if (IsBsp) {
1825 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1826 }
1827 } else {
1828 //
1829 // Platform hook fails to determine, use default BSP election method
1830 //
1831 InterlockedCompareExchange32 (
1832 (UINT32 *)&mSmmMpSyncData->BspIndex,
1833 (UINT32)-1,
1834 (UINT32)CpuIndex
1835 );
1836 }
1837 }
1838 }
1839
1840 //
1841 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1842 //
1843 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1844 //
1845 // Clear last request for SwitchBsp.
1846 //
1847 if (mSmmMpSyncData->SwitchBsp) {
1848 mSmmMpSyncData->SwitchBsp = FALSE;
1849 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1850 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1851 }
1852 }
1853
1854 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1855 SmmProfileRecordSmiNum ();
1856 }
1857
1858 //
1859 // BSP Handler is always called with a ValidSmi == TRUE
1860 //
1861 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1862 } else {
1863 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1864 }
1865 }
1866
1867 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1868
1869 //
1870 // Wait for BSP's signal to exit SMI
1871 //
1872 while (*mSmmMpSyncData->AllCpusInSync) {
1873 CpuPause ();
1874 }
1875 }
1876
1877 Exit:
1878 SmmCpuFeaturesRendezvousExit (CpuIndex);
1879
1880 //
1881 // Restore Cr2
1882 //
1883 RestoreCr2 (Cr2);
1884 }
1885
1886 /**
1887 Initialize PackageBsp Info. Processor specified by mPackageFirstThreadIndex[PackageIndex]
1888 will do the package-scope register programming. Set default CpuIndex to (UINT32)-1, which
1889 means not specified yet.
1890
1891 **/
1892 VOID
1893 InitPackageFirstThreadIndexInfo (
1894 VOID
1895 )
1896 {
1897 UINT32 Index;
1898 UINT32 PackageId;
1899 UINT32 PackageCount;
1900
1901 PackageId = 0;
1902 PackageCount = 0;
1903
1904 //
1905 // Count the number of package, set to max PackageId + 1
1906 //
1907 for (Index = 0; Index < mNumberOfCpus; Index++) {
1908 if (PackageId < gSmmCpuPrivate->ProcessorInfo[Index].Location.Package) {
1909 PackageId = gSmmCpuPrivate->ProcessorInfo[Index].Location.Package;
1910 }
1911 }
1912
1913 PackageCount = PackageId + 1;
1914
1915 mPackageFirstThreadIndex = (UINT32 *)AllocatePool (sizeof (UINT32) * PackageCount);
1916 ASSERT (mPackageFirstThreadIndex != NULL);
1917 if (mPackageFirstThreadIndex == NULL) {
1918 return;
1919 }
1920
1921 //
1922 // Set default CpuIndex to (UINT32)-1, which means not specified yet.
1923 //
1924 SetMem32 (mPackageFirstThreadIndex, sizeof (UINT32) * PackageCount, (UINT32)-1);
1925 }
1926
1927 /**
1928 Allocate buffer for SpinLock and Wrapper function buffer.
1929
1930 **/
1931 VOID
1932 InitializeDataForMmMp (
1933 VOID
1934 )
1935 {
1936 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1937 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1938
1939 InitializeListHead (&gSmmCpuPrivate->TokenList);
1940
1941 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1942 }
1943
1944 /**
1945 Allocate buffer for all semaphores and spin locks.
1946
1947 **/
1948 VOID
1949 InitializeSmmCpuSemaphores (
1950 VOID
1951 )
1952 {
1953 UINTN ProcessorCount;
1954 UINTN TotalSize;
1955 UINTN GlobalSemaphoresSize;
1956 UINTN CpuSemaphoresSize;
1957 UINTN SemaphoreSize;
1958 UINTN Pages;
1959 UINTN *SemaphoreBlock;
1960 UINTN SemaphoreAddr;
1961
1962 SemaphoreSize = GetSpinLockProperties ();
1963 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1964 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1965 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1966 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1967 DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1968 DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1969 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1970 SemaphoreBlock = AllocatePages (Pages);
1971 ASSERT (SemaphoreBlock != NULL);
1972 ZeroMem (SemaphoreBlock, TotalSize);
1973
1974 SemaphoreAddr = (UINTN)SemaphoreBlock;
1975 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1976 SemaphoreAddr += SemaphoreSize;
1977 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1978 SemaphoreAddr += SemaphoreSize;
1979 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1980 SemaphoreAddr += SemaphoreSize;
1981 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1982 SemaphoreAddr += SemaphoreSize;
1983 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1984 = (SPIN_LOCK *)SemaphoreAddr;
1985 SemaphoreAddr += SemaphoreSize;
1986
1987 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1988 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1989 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1990 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1991 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1992 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1993
1994 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1995 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1996
1997 mSemaphoreSize = SemaphoreSize;
1998 }
1999
2000 /**
2001 Initialize un-cacheable data.
2002
2003 **/
2004 VOID
2005 EFIAPI
2006 InitializeMpSyncData (
2007 VOID
2008 )
2009 {
2010 UINTN CpuIndex;
2011
2012 if (mSmmMpSyncData != NULL) {
2013 //
2014 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
2015 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
2016 //
2017 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
2018 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
2019 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
2020 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
2021 //
2022 // Enable BSP election by setting BspIndex to -1
2023 //
2024 mSmmMpSyncData->BspIndex = (UINT32)-1;
2025 }
2026
2027 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
2028
2029 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
2030 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
2031 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
2032 ASSERT (
2033 mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
2034 mSmmMpSyncData->AllCpusInSync != NULL
2035 );
2036 *mSmmMpSyncData->Counter = 0;
2037 *mSmmMpSyncData->InsideSmm = FALSE;
2038 *mSmmMpSyncData->AllCpusInSync = FALSE;
2039
2040 mSmmMpSyncData->AllApArrivedWithException = FALSE;
2041
2042 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {
2043 mSmmMpSyncData->CpuData[CpuIndex].Busy =
2044 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
2045 mSmmMpSyncData->CpuData[CpuIndex].Run =
2046 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
2047 mSmmMpSyncData->CpuData[CpuIndex].Present =
2048 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
2049 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
2050 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
2051 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
2052 }
2053 }
2054 }
2055
2056 /**
2057 Initialize global data for MP synchronization.
2058
2059 @param Stacks Base address of SMI stack buffer for all processors.
2060 @param StackSize Stack size for each processor in SMM.
2061 @param ShadowStackSize Shadow Stack size for each processor in SMM.
2062
2063 **/
2064 UINT32
2065 InitializeMpServiceData (
2066 IN VOID *Stacks,
2067 IN UINTN StackSize,
2068 IN UINTN ShadowStackSize
2069 )
2070 {
2071 UINT32 Cr3;
2072 UINTN Index;
2073 UINT8 *GdtTssTables;
2074 UINTN GdtTableStepSize;
2075 CPUID_VERSION_INFO_EDX RegEdx;
2076 UINT32 MaxExtendedFunction;
2077 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
2078
2079 //
2080 // Determine if this CPU supports machine check
2081 //
2082 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
2083 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
2084
2085 //
2086 // Allocate memory for all locks and semaphores
2087 //
2088 InitializeSmmCpuSemaphores ();
2089
2090 //
2091 // Initialize mSmmMpSyncData
2092 //
2093 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
2094 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
2095 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
2096 ASSERT (mSmmMpSyncData != NULL);
2097 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
2098 InitializeMpSyncData ();
2099
2100 //
2101 // Initialize physical address mask
2102 // NOTE: Physical memory above virtual address limit is not supported !!!
2103 //
2104 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);
2105 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {
2106 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
2107 } else {
2108 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
2109 }
2110
2111 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;
2112 //
2113 // Clear the low 12 bits
2114 //
2115 gPhyMask &= 0xfffffffffffff000ULL;
2116
2117 //
2118 // Create page tables
2119 //
2120 Cr3 = SmmInitPageTable ();
2121
2122 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
2123
2124 //
2125 // Install SMI handler for each CPU
2126 //
2127 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
2128 InstallSmiHandler (
2129 Index,
2130 (UINT32)mCpuHotPlugData.SmBase[Index],
2131 (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
2132 StackSize,
2133 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
2134 gcSmiGdtr.Limit + 1,
2135 gcSmiIdtr.Base,
2136 gcSmiIdtr.Limit + 1,
2137 Cr3
2138 );
2139 }
2140
2141 //
2142 // Record current MTRR settings
2143 //
2144 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
2145 MtrrGetAllMtrrs (&gSmiMtrrs);
2146
2147 return Cr3;
2148 }
2149
2150 /**
2151
2152 Register the SMM Foundation entry point.
2153
2154 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
2155 @param SmmEntryPoint SMM Foundation EntryPoint
2156
2157 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
2158
2159 **/
2160 EFI_STATUS
2161 EFIAPI
2162 RegisterSmmEntry (
2163 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
2164 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2165 )
2166 {
2167 //
2168 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2169 //
2170 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
2171 return EFI_SUCCESS;
2172 }
2173
2174 /**
2175
2176 Register the SMM Foundation entry point.
2177
2178 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2179 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2180 with the related definitions of
2181 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2182 If caller may pass a value of NULL to deregister any existing
2183 startup procedure.
2184 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2185 run by the AP. It is an optional common mailbox between APs and
2186 the caller to share information
2187
2188 @retval EFI_SUCCESS The Procedure has been set successfully.
2189 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2190
2191 **/
2192 EFI_STATUS
2193 RegisterStartupProcedure (
2194 IN EFI_AP_PROCEDURE Procedure,
2195 IN OUT VOID *ProcedureArguments OPTIONAL
2196 )
2197 {
2198 if ((Procedure == NULL) && (ProcedureArguments != NULL)) {
2199 return EFI_INVALID_PARAMETER;
2200 }
2201
2202 if (mSmmMpSyncData == NULL) {
2203 return EFI_NOT_READY;
2204 }
2205
2206 mSmmMpSyncData->StartupProcedure = Procedure;
2207 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2208
2209 return EFI_SUCCESS;
2210 }