]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25 MM_COMPLETION mSmmStartupThisApToken;
26
27 //
28 // Processor specified by mPackageFirstThreadIndex[PackageIndex] will do the package-scope register check.
29 //
30 UINT32 *mPackageFirstThreadIndex = NULL;
31
32 extern UINTN mSmmShadowStackSize;
33
34 /**
35 Performs an atomic compare exchange operation to get semaphore.
36 The compare exchange operation must be performed using
37 MP safe mechanisms.
38
39 @param Sem IN: 32-bit unsigned integer
40 OUT: original integer - 1
41 @return Original integer - 1
42
43 **/
44 UINT32
45 WaitForSemaphore (
46 IN OUT volatile UINT32 *Sem
47 )
48 {
49 UINT32 Value;
50
51 for ( ; ;) {
52 Value = *Sem;
53 if ((Value != 0) &&
54 (InterlockedCompareExchange32 (
55 (UINT32 *)Sem,
56 Value,
57 Value - 1
58 ) == Value))
59 {
60 break;
61 }
62
63 CpuPause ();
64 }
65
66 return Value - 1;
67 }
68
69 /**
70 Performs an atomic compare exchange operation to release semaphore.
71 The compare exchange operation must be performed using
72 MP safe mechanisms.
73
74 @param Sem IN: 32-bit unsigned integer
75 OUT: original integer + 1
76 @return Original integer + 1
77
78 **/
79 UINT32
80 ReleaseSemaphore (
81 IN OUT volatile UINT32 *Sem
82 )
83 {
84 UINT32 Value;
85
86 do {
87 Value = *Sem;
88 } while (Value + 1 != 0 &&
89 InterlockedCompareExchange32 (
90 (UINT32 *)Sem,
91 Value,
92 Value + 1
93 ) != Value);
94
95 return Value + 1;
96 }
97
98 /**
99 Performs an atomic compare exchange operation to lock semaphore.
100 The compare exchange operation must be performed using
101 MP safe mechanisms.
102
103 @param Sem IN: 32-bit unsigned integer
104 OUT: -1
105 @return Original integer
106
107 **/
108 UINT32
109 LockdownSemaphore (
110 IN OUT volatile UINT32 *Sem
111 )
112 {
113 UINT32 Value;
114
115 do {
116 Value = *Sem;
117 } while (InterlockedCompareExchange32 (
118 (UINT32 *)Sem,
119 Value,
120 (UINT32)-1
121 ) != Value);
122
123 return Value;
124 }
125
126 /**
127 Wait all APs to performs an atomic compare exchange operation to release semaphore.
128
129 @param NumberOfAPs AP number
130
131 **/
132 VOID
133 WaitForAllAPs (
134 IN UINTN NumberOfAPs
135 )
136 {
137 UINTN BspIndex;
138
139 BspIndex = mSmmMpSyncData->BspIndex;
140 while (NumberOfAPs-- > 0) {
141 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
142 }
143 }
144
145 /**
146 Performs an atomic compare exchange operation to release semaphore
147 for each AP.
148
149 **/
150 VOID
151 ReleaseAllAPs (
152 VOID
153 )
154 {
155 UINTN Index;
156
157 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
158 if (IsPresentAp (Index)) {
159 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
160 }
161 }
162 }
163
164 /**
165 Check whether the index of CPU perform the package level register
166 programming during System Management Mode initialization.
167
168 The index of Processor specified by mPackageFirstThreadIndex[PackageIndex]
169 will do the package-scope register programming.
170
171 @param[in] CpuIndex Processor Index.
172
173 @retval TRUE Perform the package level register programming.
174 @retval FALSE Don't perform the package level register programming.
175
176 **/
177 BOOLEAN
178 IsPackageFirstThread (
179 IN UINTN CpuIndex
180 )
181 {
182 UINT32 PackageIndex;
183
184 PackageIndex = gSmmCpuPrivate->ProcessorInfo[CpuIndex].Location.Package;
185
186 ASSERT (mPackageFirstThreadIndex != NULL);
187
188 //
189 // Set the value of mPackageFirstThreadIndex[PackageIndex].
190 // The package-scope register are checked by the first processor (CpuIndex) in Package.
191 //
192 // If mPackageFirstThreadIndex[PackageIndex] equals to (UINT32)-1, then update
193 // to current CpuIndex. If it doesn't equal to (UINT32)-1, don't change it.
194 //
195 if (mPackageFirstThreadIndex[PackageIndex] == (UINT32)-1) {
196 mPackageFirstThreadIndex[PackageIndex] = (UINT32)CpuIndex;
197 }
198
199 return (BOOLEAN)(mPackageFirstThreadIndex[PackageIndex] == CpuIndex);
200 }
201
202 /**
203 Returns the Number of SMM Delayed & Blocked & Disabled Thread Count.
204
205 @param[in,out] DelayedCount The Number of SMM Delayed Thread Count.
206 @param[in,out] BlockedCount The Number of SMM Blocked Thread Count.
207 @param[in,out] DisabledCount The Number of SMM Disabled Thread Count.
208
209 **/
210 VOID
211 GetSmmDelayedBlockedDisabledCount (
212 IN OUT UINT32 *DelayedCount,
213 IN OUT UINT32 *BlockedCount,
214 IN OUT UINT32 *DisabledCount
215 )
216 {
217 UINTN Index;
218
219 for (Index = 0; Index < mNumberOfCpus; Index++) {
220 if (IsPackageFirstThread (Index)) {
221 if (DelayedCount != NULL) {
222 *DelayedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed);
223 }
224
225 if (BlockedCount != NULL) {
226 *BlockedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked);
227 }
228
229 if (DisabledCount != NULL) {
230 *DisabledCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable);
231 }
232 }
233 }
234 }
235
236 /**
237 Checks if all CPUs (except Blocked & Disabled) have checked in for this SMI run
238
239 @retval TRUE if all CPUs the have checked in.
240 @retval FALSE if at least one Normal AP hasn't checked in.
241
242 **/
243 BOOLEAN
244 AllCpusInSmmExceptBlockedDisabled (
245 VOID
246 )
247 {
248 UINT32 BlockedCount;
249 UINT32 DisabledCount;
250
251 BlockedCount = 0;
252 DisabledCount = 0;
253
254 //
255 // Check to make sure mSmmMpSyncData->Counter is valid and not locked.
256 //
257 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
258
259 //
260 // Check whether all CPUs in SMM.
261 //
262 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
263 return TRUE;
264 }
265
266 //
267 // Check for the Blocked & Disabled Exceptions Case.
268 //
269 GetSmmDelayedBlockedDisabledCount (NULL, &BlockedCount, &DisabledCount);
270
271 //
272 // *mSmmMpSyncData->Counter might be updated by all APs concurrently. The value
273 // can be dynamic changed. If some Aps enter the SMI after the BlockedCount &
274 // DisabledCount check, then the *mSmmMpSyncData->Counter will be increased, thus
275 // leading the *mSmmMpSyncData->Counter + BlockedCount + DisabledCount > mNumberOfCpus.
276 // since the BlockedCount & DisabledCount are local variable, it's ok here only for
277 // the checking of all CPUs In Smm.
278 //
279 if (*mSmmMpSyncData->Counter + BlockedCount + DisabledCount >= mNumberOfCpus) {
280 return TRUE;
281 }
282
283 return FALSE;
284 }
285
286 /**
287 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
288
289 @retval TRUE Os enable lmce.
290 @retval FALSE Os not enable lmce.
291
292 **/
293 BOOLEAN
294 IsLmceOsEnabled (
295 VOID
296 )
297 {
298 MSR_IA32_MCG_CAP_REGISTER McgCap;
299 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
300 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
301
302 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
303 if (McgCap.Bits.MCG_LMCE_P == 0) {
304 return FALSE;
305 }
306
307 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
308 if (FeatureCtrl.Bits.LmceOn == 0) {
309 return FALSE;
310 }
311
312 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
313 return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);
314 }
315
316 /**
317 Return if Local machine check exception signaled.
318
319 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
320 delivered to only the logical processor.
321
322 @retval TRUE LMCE was signaled.
323 @retval FALSE LMCE was not signaled.
324
325 **/
326 BOOLEAN
327 IsLmceSignaled (
328 VOID
329 )
330 {
331 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
332
333 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
334 return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);
335 }
336
337 /**
338 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
339 entering SMM, except SMI disabled APs.
340
341 **/
342 VOID
343 SmmWaitForApArrival (
344 VOID
345 )
346 {
347 UINT64 Timer;
348 UINTN Index;
349 BOOLEAN LmceEn;
350 BOOLEAN LmceSignal;
351 UINT32 DelayedCount;
352 UINT32 BlockedCount;
353
354 DelayedCount = 0;
355 BlockedCount = 0;
356
357 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
358
359 LmceEn = FALSE;
360 LmceSignal = FALSE;
361 if (mMachineCheckSupported) {
362 LmceEn = IsLmceOsEnabled ();
363 LmceSignal = IsLmceSignaled ();
364 }
365
366 //
367 // Platform implementor should choose a timeout value appropriately:
368 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
369 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
370 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
371 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
372 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
373 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
374 // - The timeout value must be longer than longest possible IO operation in the system
375 //
376
377 //
378 // Sync with APs 1st timeout
379 //
380 for (Timer = StartSyncTimer ();
381 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);
382 )
383 {
384 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
385 if (mSmmMpSyncData->AllApArrivedWithException) {
386 break;
387 }
388
389 CpuPause ();
390 }
391
392 //
393 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
394 // because:
395 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
396 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
397 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
398 // work while SMI handling is on-going.
399 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
400 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
401 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
402 // mode work while SMI handling is on-going.
403 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
404 // - In traditional flow, SMI disabling is discouraged.
405 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
406 // In both cases, adding SMI-disabling checking code increases overhead.
407 //
408 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
409 //
410 // Send SMI IPIs to bring outside processors in
411 //
412 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
413 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
414 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
415 }
416 }
417
418 //
419 // Sync with APs 2nd timeout.
420 //
421 for (Timer = StartSyncTimer ();
422 !IsSyncTimerTimeout (Timer);
423 )
424 {
425 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
426 if (mSmmMpSyncData->AllApArrivedWithException) {
427 break;
428 }
429
430 CpuPause ();
431 }
432 }
433
434 if (!mSmmMpSyncData->AllApArrivedWithException) {
435 //
436 // Check for the Blocked & Delayed Case.
437 //
438 GetSmmDelayedBlockedDisabledCount (&DelayedCount, &BlockedCount, NULL);
439 DEBUG ((DEBUG_INFO, "SmmWaitForApArrival: Delayed AP Count = %d, Blocked AP Count = %d\n", DelayedCount, BlockedCount));
440 }
441
442 return;
443 }
444
445 /**
446 Replace OS MTRR's with SMI MTRR's.
447
448 @param CpuIndex Processor Index
449
450 **/
451 VOID
452 ReplaceOSMtrrs (
453 IN UINTN CpuIndex
454 )
455 {
456 SmmCpuFeaturesDisableSmrr ();
457
458 //
459 // Replace all MTRRs registers
460 //
461 MtrrSetAllMtrrs (&gSmiMtrrs);
462 }
463
464 /**
465 Wheck whether task has been finished by all APs.
466
467 @param BlockMode Whether did it in block mode or non-block mode.
468
469 @retval TRUE Task has been finished by all APs.
470 @retval FALSE Task not has been finished by all APs.
471
472 **/
473 BOOLEAN
474 WaitForAllAPsNotBusy (
475 IN BOOLEAN BlockMode
476 )
477 {
478 UINTN Index;
479
480 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
481 //
482 // Ignore BSP and APs which not call in SMM.
483 //
484 if (!IsPresentAp (Index)) {
485 continue;
486 }
487
488 if (BlockMode) {
489 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
490 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
491 } else {
492 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
493 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
494 } else {
495 return FALSE;
496 }
497 }
498 }
499
500 return TRUE;
501 }
502
503 /**
504 Check whether it is an present AP.
505
506 @param CpuIndex The AP index which calls this function.
507
508 @retval TRUE It's a present AP.
509 @retval TRUE This is not an AP or it is not present.
510
511 **/
512 BOOLEAN
513 IsPresentAp (
514 IN UINTN CpuIndex
515 )
516 {
517 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
518 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
519 }
520
521 /**
522 Clean up the status flags used during executing the procedure.
523
524 @param CpuIndex The AP index which calls this function.
525
526 **/
527 VOID
528 ReleaseToken (
529 IN UINTN CpuIndex
530 )
531 {
532 PROCEDURE_TOKEN *Token;
533
534 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
535
536 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
537 ReleaseSpinLock (Token->SpinLock);
538 }
539
540 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
541 }
542
543 /**
544 Free the tokens in the maintained list.
545
546 **/
547 VOID
548 ResetTokens (
549 VOID
550 )
551 {
552 //
553 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
554 //
555 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
556 }
557
558 /**
559 SMI handler for BSP.
560
561 @param CpuIndex BSP processor Index
562 @param SyncMode SMM MP sync mode
563
564 **/
565 VOID
566 BSPHandler (
567 IN UINTN CpuIndex,
568 IN SMM_CPU_SYNC_MODE SyncMode
569 )
570 {
571 UINTN Index;
572 MTRR_SETTINGS Mtrrs;
573 UINTN ApCount;
574 BOOLEAN ClearTopLevelSmiResult;
575 UINTN PresentCount;
576
577 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
578 ApCount = 0;
579
580 //
581 // Flag BSP's presence
582 //
583 *mSmmMpSyncData->InsideSmm = TRUE;
584
585 //
586 // Initialize Debug Agent to start source level debug in BSP handler
587 //
588 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
589
590 //
591 // Mark this processor's presence
592 //
593 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
594
595 //
596 // Clear platform top level SMI status bit before calling SMI handlers. If
597 // we cleared it after SMI handlers are run, we would miss the SMI that
598 // occurs after SMI handlers are done and before SMI status bit is cleared.
599 //
600 ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();
601 ASSERT (ClearTopLevelSmiResult == TRUE);
602
603 //
604 // Set running processor index
605 //
606 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
607
608 //
609 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
610 //
611 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
612 //
613 // Wait for APs to arrive
614 //
615 SmmWaitForApArrival ();
616
617 //
618 // Lock the counter down and retrieve the number of APs
619 //
620 *mSmmMpSyncData->AllCpusInSync = TRUE;
621 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
622
623 //
624 // Wait for all APs to get ready for programming MTRRs
625 //
626 WaitForAllAPs (ApCount);
627
628 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
629 //
630 // Signal all APs it's time for backup MTRRs
631 //
632 ReleaseAllAPs ();
633
634 //
635 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
636 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
637 // to a large enough value to avoid this situation.
638 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
639 // We do the backup first and then set MTRR to avoid race condition for threads
640 // in the same core.
641 //
642 MtrrGetAllMtrrs (&Mtrrs);
643
644 //
645 // Wait for all APs to complete their MTRR saving
646 //
647 WaitForAllAPs (ApCount);
648
649 //
650 // Let all processors program SMM MTRRs together
651 //
652 ReleaseAllAPs ();
653
654 //
655 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
656 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
657 // to a large enough value to avoid this situation.
658 //
659 ReplaceOSMtrrs (CpuIndex);
660
661 //
662 // Wait for all APs to complete their MTRR programming
663 //
664 WaitForAllAPs (ApCount);
665 }
666 }
667
668 //
669 // The BUSY lock is initialized to Acquired state
670 //
671 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
672
673 //
674 // Perform the pre tasks
675 //
676 PerformPreTasks ();
677
678 //
679 // Invoke SMM Foundation EntryPoint with the processor information context.
680 //
681 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
682
683 //
684 // Make sure all APs have completed their pending none-block tasks
685 //
686 WaitForAllAPsNotBusy (TRUE);
687
688 //
689 // Perform the remaining tasks
690 //
691 PerformRemainingTasks ();
692
693 //
694 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
695 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
696 // will run through freely.
697 //
698 if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {
699 //
700 // Lock the counter down and retrieve the number of APs
701 //
702 *mSmmMpSyncData->AllCpusInSync = TRUE;
703 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
704 //
705 // Make sure all APs have their Present flag set
706 //
707 while (TRUE) {
708 PresentCount = 0;
709 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
710 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
711 PresentCount++;
712 }
713 }
714
715 if (PresentCount > ApCount) {
716 break;
717 }
718 }
719 }
720
721 //
722 // Notify all APs to exit
723 //
724 *mSmmMpSyncData->InsideSmm = FALSE;
725 ReleaseAllAPs ();
726
727 //
728 // Wait for all APs to complete their pending tasks
729 //
730 WaitForAllAPs (ApCount);
731
732 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
733 //
734 // Signal APs to restore MTRRs
735 //
736 ReleaseAllAPs ();
737
738 //
739 // Restore OS MTRRs
740 //
741 SmmCpuFeaturesReenableSmrr ();
742 MtrrSetAllMtrrs (&Mtrrs);
743
744 //
745 // Wait for all APs to complete MTRR programming
746 //
747 WaitForAllAPs (ApCount);
748 }
749
750 //
751 // Stop source level debug in BSP handler, the code below will not be
752 // debugged.
753 //
754 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
755
756 //
757 // Signal APs to Reset states/semaphore for this processor
758 //
759 ReleaseAllAPs ();
760
761 //
762 // Perform pending operations for hot-plug
763 //
764 SmmCpuUpdate ();
765
766 //
767 // Clear the Present flag of BSP
768 //
769 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
770
771 //
772 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
773 // WaitForAllAps does not depend on the Present flag.
774 //
775 WaitForAllAPs (ApCount);
776
777 //
778 // Reset the tokens buffer.
779 //
780 ResetTokens ();
781
782 //
783 // Reset BspIndex to -1, meaning BSP has not been elected.
784 //
785 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
786 mSmmMpSyncData->BspIndex = (UINT32)-1;
787 }
788
789 //
790 // Allow APs to check in from this point on
791 //
792 *mSmmMpSyncData->Counter = 0;
793 *mSmmMpSyncData->AllCpusInSync = FALSE;
794 mSmmMpSyncData->AllApArrivedWithException = FALSE;
795 }
796
797 /**
798 SMI handler for AP.
799
800 @param CpuIndex AP processor Index.
801 @param ValidSmi Indicates that current SMI is a valid SMI or not.
802 @param SyncMode SMM MP sync mode.
803
804 **/
805 VOID
806 APHandler (
807 IN UINTN CpuIndex,
808 IN BOOLEAN ValidSmi,
809 IN SMM_CPU_SYNC_MODE SyncMode
810 )
811 {
812 UINT64 Timer;
813 UINTN BspIndex;
814 MTRR_SETTINGS Mtrrs;
815 EFI_STATUS ProcedureStatus;
816
817 //
818 // Timeout BSP
819 //
820 for (Timer = StartSyncTimer ();
821 !IsSyncTimerTimeout (Timer) &&
822 !(*mSmmMpSyncData->InsideSmm);
823 )
824 {
825 CpuPause ();
826 }
827
828 if (!(*mSmmMpSyncData->InsideSmm)) {
829 //
830 // BSP timeout in the first round
831 //
832 if (mSmmMpSyncData->BspIndex != -1) {
833 //
834 // BSP Index is known
835 // Existing AP is in SMI now but BSP not in, so, try bring BSP in SMM.
836 //
837 BspIndex = mSmmMpSyncData->BspIndex;
838 ASSERT (CpuIndex != BspIndex);
839
840 //
841 // Send SMI IPI to bring BSP in
842 //
843 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
844
845 //
846 // Now clock BSP for the 2nd time
847 //
848 for (Timer = StartSyncTimer ();
849 !IsSyncTimerTimeout (Timer) &&
850 !(*mSmmMpSyncData->InsideSmm);
851 )
852 {
853 CpuPause ();
854 }
855
856 if (!(*mSmmMpSyncData->InsideSmm)) {
857 //
858 // Give up since BSP is unable to enter SMM
859 // and signal the completion of this AP
860 // Reduce the mSmmMpSyncData->Counter!
861 //
862 WaitForSemaphore (mSmmMpSyncData->Counter);
863 return;
864 }
865 } else {
866 //
867 // Don't know BSP index. Give up without sending IPI to BSP.
868 // Reduce the mSmmMpSyncData->Counter!
869 //
870 WaitForSemaphore (mSmmMpSyncData->Counter);
871 return;
872 }
873 }
874
875 //
876 // BSP is available
877 //
878 BspIndex = mSmmMpSyncData->BspIndex;
879 ASSERT (CpuIndex != BspIndex);
880
881 //
882 // Mark this processor's presence
883 //
884 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
885
886 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
887 //
888 // Notify BSP of arrival at this point
889 //
890 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
891 }
892
893 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
894 //
895 // Wait for the signal from BSP to backup MTRRs
896 //
897 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
898
899 //
900 // Backup OS MTRRs
901 //
902 MtrrGetAllMtrrs (&Mtrrs);
903
904 //
905 // Signal BSP the completion of this AP
906 //
907 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
908
909 //
910 // Wait for BSP's signal to program MTRRs
911 //
912 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
913
914 //
915 // Replace OS MTRRs with SMI MTRRs
916 //
917 ReplaceOSMtrrs (CpuIndex);
918
919 //
920 // Signal BSP the completion of this AP
921 //
922 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
923 }
924
925 while (TRUE) {
926 //
927 // Wait for something to happen
928 //
929 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
930
931 //
932 // Check if BSP wants to exit SMM
933 //
934 if (!(*mSmmMpSyncData->InsideSmm)) {
935 break;
936 }
937
938 //
939 // BUSY should be acquired by SmmStartupThisAp()
940 //
941 ASSERT (
942 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
943 );
944
945 //
946 // Invoke the scheduled procedure
947 //
948 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(
949 (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter
950 );
951 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
952 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
953 }
954
955 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
956 ReleaseToken (CpuIndex);
957 }
958
959 //
960 // Release BUSY
961 //
962 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
963 }
964
965 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
966 //
967 // Notify BSP the readiness of this AP to program MTRRs
968 //
969 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
970
971 //
972 // Wait for the signal from BSP to program MTRRs
973 //
974 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
975
976 //
977 // Restore OS MTRRs
978 //
979 SmmCpuFeaturesReenableSmrr ();
980 MtrrSetAllMtrrs (&Mtrrs);
981 }
982
983 //
984 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
985 //
986 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
987
988 //
989 // Wait for the signal from BSP to Reset states/semaphore for this processor
990 //
991 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
992
993 //
994 // Reset states/semaphore for this processor
995 //
996 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
997
998 //
999 // Notify BSP the readiness of this AP to exit SMM
1000 //
1001 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
1002 }
1003
1004 /**
1005 Create 4G PageTable in SMRAM.
1006
1007 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
1008 @return PageTable Address
1009
1010 **/
1011 UINT32
1012 Gen4GPageTable (
1013 IN BOOLEAN Is32BitPageTable
1014 )
1015 {
1016 VOID *PageTable;
1017 UINTN Index;
1018 UINT64 *Pte;
1019 UINTN PagesNeeded;
1020 UINTN Low2MBoundary;
1021 UINTN High2MBoundary;
1022 UINTN Pages;
1023 UINTN GuardPage;
1024 UINT64 *Pdpte;
1025 UINTN PageIndex;
1026 UINTN PageAddress;
1027
1028 Low2MBoundary = 0;
1029 High2MBoundary = 0;
1030 PagesNeeded = 0;
1031 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1032 //
1033 // Add one more page for known good stack, then find the lower 2MB aligned address.
1034 //
1035 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
1036 //
1037 // Add two more pages for known good stack and stack guard page,
1038 // then find the lower 2MB aligned address.
1039 //
1040 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
1041 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
1042 }
1043
1044 //
1045 // Allocate the page table
1046 //
1047 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
1048 ASSERT (PageTable != NULL);
1049
1050 PageTable = (VOID *)((UINTN)PageTable);
1051 Pte = (UINT64 *)PageTable;
1052
1053 //
1054 // Zero out all page table entries first
1055 //
1056 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
1057
1058 //
1059 // Set Page Directory Pointers
1060 //
1061 for (Index = 0; Index < 4; Index++) {
1062 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
1063 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
1064 }
1065
1066 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
1067
1068 //
1069 // Fill in Page Directory Entries
1070 //
1071 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
1072 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
1073 }
1074
1075 Pdpte = (UINT64 *)PageTable;
1076 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1077 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
1078 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
1079 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
1080 Pte = (UINT64 *)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1081 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1082 //
1083 // Fill in Page Table Entries
1084 //
1085 Pte = (UINT64 *)Pages;
1086 PageAddress = PageIndex;
1087 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1088 if (PageAddress == GuardPage) {
1089 //
1090 // Mark the guard page as non-present
1091 //
1092 Pte[Index] = PageAddress | mAddressEncMask;
1093 GuardPage += (mSmmStackSize + mSmmShadowStackSize);
1094 if (GuardPage > mSmmStackArrayEnd) {
1095 GuardPage = 0;
1096 }
1097 } else {
1098 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1099 }
1100
1101 PageAddress += EFI_PAGE_SIZE;
1102 }
1103
1104 Pages += EFI_PAGE_SIZE;
1105 }
1106 }
1107
1108 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1109 Pte = (UINT64 *)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1110 if ((Pte[0] & IA32_PG_PS) == 0) {
1111 // 4K-page entries are already mapped. Just hide the first one anyway.
1112 Pte = (UINT64 *)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1113 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1114 } else {
1115 // Create 4K-page entries
1116 Pages = (UINTN)AllocatePageTableMemory (1);
1117 ASSERT (Pages != 0);
1118
1119 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1120
1121 Pte = (UINT64 *)Pages;
1122 PageAddress = 0;
1123 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1124 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1125 PageAddress += EFI_PAGE_SIZE;
1126 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1127 }
1128 }
1129 }
1130
1131 return (UINT32)(UINTN)PageTable;
1132 }
1133
1134 /**
1135 Checks whether the input token is the current used token.
1136
1137 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1138 BroadcastProcedure.
1139
1140 @retval TRUE The input token is the current used token.
1141 @retval FALSE The input token is not the current used token.
1142 **/
1143 BOOLEAN
1144 IsTokenInUse (
1145 IN SPIN_LOCK *Token
1146 )
1147 {
1148 LIST_ENTRY *Link;
1149 PROCEDURE_TOKEN *ProcToken;
1150
1151 if (Token == NULL) {
1152 return FALSE;
1153 }
1154
1155 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1156 //
1157 // Only search used tokens.
1158 //
1159 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1160 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1161
1162 if (ProcToken->SpinLock == Token) {
1163 return TRUE;
1164 }
1165
1166 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1167 }
1168
1169 return FALSE;
1170 }
1171
1172 /**
1173 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1174
1175 @return First token of the token buffer.
1176 **/
1177 LIST_ENTRY *
1178 AllocateTokenBuffer (
1179 VOID
1180 )
1181 {
1182 UINTN SpinLockSize;
1183 UINT32 TokenCountPerChunk;
1184 UINTN Index;
1185 SPIN_LOCK *SpinLock;
1186 UINT8 *SpinLockBuffer;
1187 PROCEDURE_TOKEN *ProcTokens;
1188
1189 SpinLockSize = GetSpinLockProperties ();
1190
1191 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1192 ASSERT (TokenCountPerChunk != 0);
1193 if (TokenCountPerChunk == 0) {
1194 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1195 CpuDeadLoop ();
1196 }
1197
1198 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1199
1200 //
1201 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1202 //
1203 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1204 ASSERT (SpinLockBuffer != NULL);
1205
1206 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1207 ASSERT (ProcTokens != NULL);
1208
1209 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1210 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1211 InitializeSpinLock (SpinLock);
1212
1213 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1214 ProcTokens[Index].SpinLock = SpinLock;
1215 ProcTokens[Index].RunningApCount = 0;
1216
1217 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1218 }
1219
1220 return &ProcTokens[0].Link;
1221 }
1222
1223 /**
1224 Get the free token.
1225
1226 If no free token, allocate new tokens then return the free one.
1227
1228 @param RunningApsCount The Running Aps count for this token.
1229
1230 @retval return the first free PROCEDURE_TOKEN.
1231
1232 **/
1233 PROCEDURE_TOKEN *
1234 GetFreeToken (
1235 IN UINT32 RunningApsCount
1236 )
1237 {
1238 PROCEDURE_TOKEN *NewToken;
1239
1240 //
1241 // If FirstFreeToken meets the end of token list, enlarge the token list.
1242 // Set FirstFreeToken to the first free token.
1243 //
1244 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1245 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1246 }
1247
1248 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1249 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1250
1251 NewToken->RunningApCount = RunningApsCount;
1252 AcquireSpinLock (NewToken->SpinLock);
1253
1254 return NewToken;
1255 }
1256
1257 /**
1258 Checks status of specified AP.
1259
1260 This function checks whether the specified AP has finished the task assigned
1261 by StartupThisAP(), and whether timeout expires.
1262
1263 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1264 BroadcastProcedure.
1265
1266 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1267 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1268 **/
1269 EFI_STATUS
1270 IsApReady (
1271 IN SPIN_LOCK *Token
1272 )
1273 {
1274 if (AcquireSpinLockOrFail (Token)) {
1275 ReleaseSpinLock (Token);
1276 return EFI_SUCCESS;
1277 }
1278
1279 return EFI_NOT_READY;
1280 }
1281
1282 /**
1283 Schedule a procedure to run on the specified CPU.
1284
1285 @param[in] Procedure The address of the procedure to run
1286 @param[in] CpuIndex Target CPU Index
1287 @param[in,out] ProcArguments The parameter to pass to the procedure
1288 @param[in] Token This is an optional parameter that allows the caller to execute the
1289 procedure in a blocking or non-blocking fashion. If it is NULL the
1290 call is blocking, and the call will not return until the AP has
1291 completed the procedure. If the token is not NULL, the call will
1292 return immediately. The caller can check whether the procedure has
1293 completed with CheckOnProcedure or WaitForProcedure.
1294 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1295 execution of Procedure, either for blocking or non-blocking mode.
1296 Zero means infinity. If the timeout expires before all APs return
1297 from Procedure, then Procedure on the failed APs is terminated. If
1298 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1299 If the timeout expires in non-blocking mode, the timeout determined
1300 can be through CheckOnProcedure or WaitForProcedure.
1301 Note that timeout support is optional. Whether an implementation
1302 supports this feature can be determined via the Attributes data
1303 member.
1304 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1305 by Procedure when it completes execution on the target AP, or with
1306 EFI_TIMEOUT if the Procedure fails to complete within the optional
1307 timeout. The implementation will update this variable with
1308 EFI_NOT_READY prior to starting Procedure on the target AP.
1309
1310 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1311 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1312 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1313 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1314 @retval EFI_SUCCESS The procedure has been successfully scheduled
1315
1316 **/
1317 EFI_STATUS
1318 InternalSmmStartupThisAp (
1319 IN EFI_AP_PROCEDURE2 Procedure,
1320 IN UINTN CpuIndex,
1321 IN OUT VOID *ProcArguments OPTIONAL,
1322 IN MM_COMPLETION *Token,
1323 IN UINTN TimeoutInMicroseconds,
1324 IN OUT EFI_STATUS *CpuStatus
1325 )
1326 {
1327 PROCEDURE_TOKEN *ProcToken;
1328
1329 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1330 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1331 return EFI_INVALID_PARAMETER;
1332 }
1333
1334 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1335 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1336 return EFI_INVALID_PARAMETER;
1337 }
1338
1339 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1340 return EFI_INVALID_PARAMETER;
1341 }
1342
1343 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1344 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1345 DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1346 }
1347
1348 return EFI_INVALID_PARAMETER;
1349 }
1350
1351 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1352 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1353 DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1354 }
1355
1356 return EFI_INVALID_PARAMETER;
1357 }
1358
1359 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1360 return EFI_INVALID_PARAMETER;
1361 }
1362
1363 if (Procedure == NULL) {
1364 return EFI_INVALID_PARAMETER;
1365 }
1366
1367 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1368
1369 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1370 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1371 if (Token != NULL) {
1372 if (Token != &mSmmStartupThisApToken) {
1373 //
1374 // When Token points to mSmmStartupThisApToken, this routine is called
1375 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1376 //
1377 // In this case, caller wants to startup AP procedure in non-blocking
1378 // mode and cannot get the completion status from the Token because there
1379 // is no way to return the Token to caller from SmmStartupThisAp().
1380 // Caller needs to use its implementation specific way to query the completion status.
1381 //
1382 // There is no need to allocate a token for such case so the 3 overheads
1383 // can be avoided:
1384 // 1. Call AllocateTokenBuffer() when there is no free token.
1385 // 2. Get a free token from the token buffer.
1386 // 3. Call ReleaseToken() in APHandler().
1387 //
1388 ProcToken = GetFreeToken (1);
1389 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1390 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1391 }
1392 }
1393
1394 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1395 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1396 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1397 }
1398
1399 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1400
1401 if (Token == NULL) {
1402 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1403 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1404 }
1405
1406 return EFI_SUCCESS;
1407 }
1408
1409 /**
1410 Worker function to execute a caller provided function on all enabled APs.
1411
1412 @param[in] Procedure A pointer to the function to be run on
1413 enabled APs of the system.
1414 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1415 APs to return from Procedure, either for
1416 blocking or non-blocking mode.
1417 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1418 all APs.
1419 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1420 procedure in a blocking or non-blocking fashion. If it is NULL the
1421 call is blocking, and the call will not return until the AP has
1422 completed the procedure. If the token is not NULL, the call will
1423 return immediately. The caller can check whether the procedure has
1424 completed with CheckOnProcedure or WaitForProcedure.
1425 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1426 by Procedure when it completes execution on the target AP, or with
1427 EFI_TIMEOUT if the Procedure fails to complete within the optional
1428 timeout. The implementation will update this variable with
1429 EFI_NOT_READY prior to starting Procedure on the target AP.
1430
1431
1432 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1433 the timeout expired.
1434 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1435 to all enabled APs.
1436 @retval others Failed to Startup all APs.
1437
1438 **/
1439 EFI_STATUS
1440 InternalSmmStartupAllAPs (
1441 IN EFI_AP_PROCEDURE2 Procedure,
1442 IN UINTN TimeoutInMicroseconds,
1443 IN OUT VOID *ProcedureArguments OPTIONAL,
1444 IN OUT MM_COMPLETION *Token,
1445 IN OUT EFI_STATUS *CPUStatus
1446 )
1447 {
1448 UINTN Index;
1449 UINTN CpuCount;
1450 PROCEDURE_TOKEN *ProcToken;
1451
1452 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1453 return EFI_INVALID_PARAMETER;
1454 }
1455
1456 if (Procedure == NULL) {
1457 return EFI_INVALID_PARAMETER;
1458 }
1459
1460 CpuCount = 0;
1461 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1462 if (IsPresentAp (Index)) {
1463 CpuCount++;
1464
1465 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1466 return EFI_INVALID_PARAMETER;
1467 }
1468
1469 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
1470 return EFI_NOT_READY;
1471 }
1472
1473 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1474 }
1475 }
1476
1477 if (CpuCount == 0) {
1478 return EFI_NOT_STARTED;
1479 }
1480
1481 if (Token != NULL) {
1482 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1483 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1484 } else {
1485 ProcToken = NULL;
1486 }
1487
1488 //
1489 // Make sure all BUSY should be acquired.
1490 //
1491 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1492 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1493 // block mode.
1494 //
1495 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1496 if (IsPresentAp (Index)) {
1497 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1498 }
1499 }
1500
1501 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1502 if (IsPresentAp (Index)) {
1503 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;
1504 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1505 if (ProcToken != NULL) {
1506 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1507 }
1508
1509 if (CPUStatus != NULL) {
1510 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1511 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1512 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1513 }
1514 }
1515 } else {
1516 //
1517 // PI spec requirement:
1518 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1519 //
1520 if (CPUStatus != NULL) {
1521 CPUStatus[Index] = EFI_NOT_STARTED;
1522 }
1523
1524 //
1525 // Decrease the count to mark this processor(AP or BSP) as finished.
1526 //
1527 if (ProcToken != NULL) {
1528 WaitForSemaphore (&ProcToken->RunningApCount);
1529 }
1530 }
1531 }
1532
1533 ReleaseAllAPs ();
1534
1535 if (Token == NULL) {
1536 //
1537 // Make sure all APs have completed their tasks.
1538 //
1539 WaitForAllAPsNotBusy (TRUE);
1540 }
1541
1542 return EFI_SUCCESS;
1543 }
1544
1545 /**
1546 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1547 If the function is defined with a type that is not compatible with
1548 the type (of the expression) pointed to by the expression that
1549 denotes the called function, the behavior is undefined.
1550
1551 So add below wrapper function to convert between EFI_AP_PROCEDURE
1552 and EFI_AP_PROCEDURE2.
1553
1554 Wrapper for Procedures.
1555
1556 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1557
1558 **/
1559 EFI_STATUS
1560 EFIAPI
1561 ProcedureWrapper (
1562 IN VOID *Buffer
1563 )
1564 {
1565 PROCEDURE_WRAPPER *Wrapper;
1566
1567 Wrapper = Buffer;
1568 Wrapper->Procedure (Wrapper->ProcedureArgument);
1569
1570 return EFI_SUCCESS;
1571 }
1572
1573 /**
1574 Schedule a procedure to run on the specified CPU in blocking mode.
1575
1576 @param[in] Procedure The address of the procedure to run
1577 @param[in] CpuIndex Target CPU Index
1578 @param[in, out] ProcArguments The parameter to pass to the procedure
1579
1580 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1581 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1582 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1583 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1584 @retval EFI_SUCCESS The procedure has been successfully scheduled
1585
1586 **/
1587 EFI_STATUS
1588 EFIAPI
1589 SmmBlockingStartupThisAp (
1590 IN EFI_AP_PROCEDURE Procedure,
1591 IN UINTN CpuIndex,
1592 IN OUT VOID *ProcArguments OPTIONAL
1593 )
1594 {
1595 PROCEDURE_WRAPPER Wrapper;
1596
1597 Wrapper.Procedure = Procedure;
1598 Wrapper.ProcedureArgument = ProcArguments;
1599
1600 //
1601 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1602 //
1603 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1604 }
1605
1606 /**
1607 Schedule a procedure to run on the specified CPU.
1608
1609 @param Procedure The address of the procedure to run
1610 @param CpuIndex Target CPU Index
1611 @param ProcArguments The parameter to pass to the procedure
1612
1613 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1614 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1615 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1616 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1617 @retval EFI_SUCCESS The procedure has been successfully scheduled
1618
1619 **/
1620 EFI_STATUS
1621 EFIAPI
1622 SmmStartupThisAp (
1623 IN EFI_AP_PROCEDURE Procedure,
1624 IN UINTN CpuIndex,
1625 IN OUT VOID *ProcArguments OPTIONAL
1626 )
1627 {
1628 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1629 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1630
1631 //
1632 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1633 //
1634 return InternalSmmStartupThisAp (
1635 ProcedureWrapper,
1636 CpuIndex,
1637 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1638 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,
1639 0,
1640 NULL
1641 );
1642 }
1643
1644 /**
1645 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1646 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1647
1648 NOTE: It might not be appreciated in runtime since it might
1649 conflict with OS debugging facilities. Turn them off in RELEASE.
1650
1651 @param CpuIndex CPU Index
1652
1653 **/
1654 VOID
1655 EFIAPI
1656 CpuSmmDebugEntry (
1657 IN UINTN CpuIndex
1658 )
1659 {
1660 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1661
1662 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1663 ASSERT (CpuIndex < mMaxNumberOfCpus);
1664 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1665 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1666 AsmWriteDr6 (CpuSaveState->x86._DR6);
1667 AsmWriteDr7 (CpuSaveState->x86._DR7);
1668 } else {
1669 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1670 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1671 }
1672 }
1673 }
1674
1675 /**
1676 This function restores DR6 & DR7 to SMM save state.
1677
1678 NOTE: It might not be appreciated in runtime since it might
1679 conflict with OS debugging facilities. Turn them off in RELEASE.
1680
1681 @param CpuIndex CPU Index
1682
1683 **/
1684 VOID
1685 EFIAPI
1686 CpuSmmDebugExit (
1687 IN UINTN CpuIndex
1688 )
1689 {
1690 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1691
1692 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1693 ASSERT (CpuIndex < mMaxNumberOfCpus);
1694 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1695 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1696 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1697 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1698 } else {
1699 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1700 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1701 }
1702 }
1703 }
1704
1705 /**
1706 C function for SMI entry, each processor comes here upon SMI trigger.
1707
1708 @param CpuIndex CPU Index
1709
1710 **/
1711 VOID
1712 EFIAPI
1713 SmiRendezvous (
1714 IN UINTN CpuIndex
1715 )
1716 {
1717 EFI_STATUS Status;
1718 BOOLEAN ValidSmi;
1719 BOOLEAN IsBsp;
1720 BOOLEAN BspInProgress;
1721 UINTN Index;
1722 UINTN Cr2;
1723
1724 ASSERT (CpuIndex < mMaxNumberOfCpus);
1725
1726 if (mSmmRelocated) {
1727 ASSERT (mSmmInitialized != NULL);
1728 }
1729
1730 //
1731 // Save Cr2 because Page Fault exception in SMM may override its value,
1732 // when using on-demand paging for above 4G memory.
1733 //
1734 Cr2 = 0;
1735 SaveCr2 (&Cr2);
1736
1737 if (mSmmRelocated && !mSmmInitialized[CpuIndex]) {
1738 //
1739 // Perform SmmInitHandler for CpuIndex
1740 //
1741 SmmInitHandler ();
1742
1743 //
1744 // Restore Cr2
1745 //
1746 RestoreCr2 (Cr2);
1747
1748 //
1749 // Mark the first SMI init for CpuIndex has been done so as to avoid the reentry.
1750 //
1751 mSmmInitialized[CpuIndex] = TRUE;
1752
1753 return;
1754 }
1755
1756 //
1757 // Call the user register Startup function first.
1758 //
1759 if (mSmmMpSyncData->StartupProcedure != NULL) {
1760 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1761 }
1762
1763 //
1764 // Perform CPU specific entry hooks
1765 //
1766 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1767
1768 //
1769 // Determine if this is a valid SMI
1770 //
1771 ValidSmi = PlatformValidSmi ();
1772
1773 //
1774 // Determine if BSP has been already in progress. Note this must be checked after
1775 // ValidSmi because BSP may clear a valid SMI source after checking in.
1776 //
1777 BspInProgress = *mSmmMpSyncData->InsideSmm;
1778
1779 if (!BspInProgress && !ValidSmi) {
1780 //
1781 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1782 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1783 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1784 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1785 // is nothing we need to do.
1786 //
1787 goto Exit;
1788 } else {
1789 //
1790 // Signal presence of this processor
1791 // mSmmMpSyncData->Counter is increased here!
1792 // "ReleaseSemaphore (mSmmMpSyncData->Counter) == 0" means BSP has already ended the synchronization.
1793 //
1794 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1795 //
1796 // BSP has already ended the synchronization, so QUIT!!!
1797 // Existing AP is too late now to enter SMI since BSP has already ended the synchronization!!!
1798 //
1799
1800 //
1801 // Wait for BSP's signal to finish SMI
1802 //
1803 while (*mSmmMpSyncData->AllCpusInSync) {
1804 CpuPause ();
1805 }
1806
1807 goto Exit;
1808 } else {
1809 //
1810 // The BUSY lock is initialized to Released state.
1811 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1812 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1813 // after AP's present flag is detected.
1814 //
1815 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1816 }
1817
1818 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1819 ActivateSmmProfile (CpuIndex);
1820 }
1821
1822 if (BspInProgress) {
1823 //
1824 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1825 // as BSP may have cleared the SMI status
1826 //
1827 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1828 } else {
1829 //
1830 // We have a valid SMI
1831 //
1832
1833 //
1834 // Elect BSP
1835 //
1836 IsBsp = FALSE;
1837 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1838 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1839 //
1840 // Call platform hook to do BSP election
1841 //
1842 Status = PlatformSmmBspElection (&IsBsp);
1843 if (EFI_SUCCESS == Status) {
1844 //
1845 // Platform hook determines successfully
1846 //
1847 if (IsBsp) {
1848 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1849 }
1850 } else {
1851 //
1852 // Platform hook fails to determine, use default BSP election method
1853 //
1854 InterlockedCompareExchange32 (
1855 (UINT32 *)&mSmmMpSyncData->BspIndex,
1856 (UINT32)-1,
1857 (UINT32)CpuIndex
1858 );
1859 }
1860 }
1861 }
1862
1863 //
1864 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1865 //
1866 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1867 //
1868 // Clear last request for SwitchBsp.
1869 //
1870 if (mSmmMpSyncData->SwitchBsp) {
1871 mSmmMpSyncData->SwitchBsp = FALSE;
1872 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1873 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1874 }
1875 }
1876
1877 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1878 SmmProfileRecordSmiNum ();
1879 }
1880
1881 //
1882 // BSP Handler is always called with a ValidSmi == TRUE
1883 //
1884 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1885 } else {
1886 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1887 }
1888 }
1889
1890 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1891
1892 //
1893 // Wait for BSP's signal to exit SMI
1894 //
1895 while (*mSmmMpSyncData->AllCpusInSync) {
1896 CpuPause ();
1897 }
1898 }
1899
1900 Exit:
1901 SmmCpuFeaturesRendezvousExit (CpuIndex);
1902
1903 //
1904 // Restore Cr2
1905 //
1906 RestoreCr2 (Cr2);
1907 }
1908
1909 /**
1910 Initialize PackageBsp Info. Processor specified by mPackageFirstThreadIndex[PackageIndex]
1911 will do the package-scope register programming. Set default CpuIndex to (UINT32)-1, which
1912 means not specified yet.
1913
1914 **/
1915 VOID
1916 InitPackageFirstThreadIndexInfo (
1917 VOID
1918 )
1919 {
1920 UINT32 Index;
1921 UINT32 PackageId;
1922 UINT32 PackageCount;
1923
1924 PackageId = 0;
1925 PackageCount = 0;
1926
1927 //
1928 // Count the number of package, set to max PackageId + 1
1929 //
1930 for (Index = 0; Index < mNumberOfCpus; Index++) {
1931 if (PackageId < gSmmCpuPrivate->ProcessorInfo[Index].Location.Package) {
1932 PackageId = gSmmCpuPrivate->ProcessorInfo[Index].Location.Package;
1933 }
1934 }
1935
1936 PackageCount = PackageId + 1;
1937
1938 mPackageFirstThreadIndex = (UINT32 *)AllocatePool (sizeof (UINT32) * PackageCount);
1939 ASSERT (mPackageFirstThreadIndex != NULL);
1940 if (mPackageFirstThreadIndex == NULL) {
1941 return;
1942 }
1943
1944 //
1945 // Set default CpuIndex to (UINT32)-1, which means not specified yet.
1946 //
1947 SetMem32 (mPackageFirstThreadIndex, sizeof (UINT32) * PackageCount, (UINT32)-1);
1948 }
1949
1950 /**
1951 Allocate buffer for SpinLock and Wrapper function buffer.
1952
1953 **/
1954 VOID
1955 InitializeDataForMmMp (
1956 VOID
1957 )
1958 {
1959 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1960 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1961
1962 InitializeListHead (&gSmmCpuPrivate->TokenList);
1963
1964 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1965 }
1966
1967 /**
1968 Allocate buffer for all semaphores and spin locks.
1969
1970 **/
1971 VOID
1972 InitializeSmmCpuSemaphores (
1973 VOID
1974 )
1975 {
1976 UINTN ProcessorCount;
1977 UINTN TotalSize;
1978 UINTN GlobalSemaphoresSize;
1979 UINTN CpuSemaphoresSize;
1980 UINTN SemaphoreSize;
1981 UINTN Pages;
1982 UINTN *SemaphoreBlock;
1983 UINTN SemaphoreAddr;
1984
1985 SemaphoreSize = GetSpinLockProperties ();
1986 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1987 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1988 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1989 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1990 DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1991 DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1992 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1993 SemaphoreBlock = AllocatePages (Pages);
1994 ASSERT (SemaphoreBlock != NULL);
1995 ZeroMem (SemaphoreBlock, TotalSize);
1996
1997 SemaphoreAddr = (UINTN)SemaphoreBlock;
1998 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1999 SemaphoreAddr += SemaphoreSize;
2000 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
2001 SemaphoreAddr += SemaphoreSize;
2002 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
2003 SemaphoreAddr += SemaphoreSize;
2004 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
2005 SemaphoreAddr += SemaphoreSize;
2006 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
2007 = (SPIN_LOCK *)SemaphoreAddr;
2008 SemaphoreAddr += SemaphoreSize;
2009
2010 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
2011 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
2012 SemaphoreAddr += ProcessorCount * SemaphoreSize;
2013 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
2014 SemaphoreAddr += ProcessorCount * SemaphoreSize;
2015 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
2016
2017 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
2018 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
2019
2020 mSemaphoreSize = SemaphoreSize;
2021 }
2022
2023 /**
2024 Initialize un-cacheable data.
2025
2026 **/
2027 VOID
2028 EFIAPI
2029 InitializeMpSyncData (
2030 VOID
2031 )
2032 {
2033 UINTN CpuIndex;
2034
2035 if (mSmmMpSyncData != NULL) {
2036 //
2037 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
2038 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
2039 //
2040 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
2041 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
2042 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
2043 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
2044 //
2045 // Enable BSP election by setting BspIndex to -1
2046 //
2047 mSmmMpSyncData->BspIndex = (UINT32)-1;
2048 }
2049
2050 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
2051
2052 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
2053 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
2054 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
2055 ASSERT (
2056 mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
2057 mSmmMpSyncData->AllCpusInSync != NULL
2058 );
2059 *mSmmMpSyncData->Counter = 0;
2060 *mSmmMpSyncData->InsideSmm = FALSE;
2061 *mSmmMpSyncData->AllCpusInSync = FALSE;
2062
2063 mSmmMpSyncData->AllApArrivedWithException = FALSE;
2064
2065 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {
2066 mSmmMpSyncData->CpuData[CpuIndex].Busy =
2067 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
2068 mSmmMpSyncData->CpuData[CpuIndex].Run =
2069 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
2070 mSmmMpSyncData->CpuData[CpuIndex].Present =
2071 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
2072 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
2073 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
2074 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
2075 }
2076 }
2077 }
2078
2079 /**
2080 Initialize global data for MP synchronization.
2081
2082 @param Stacks Base address of SMI stack buffer for all processors.
2083 @param StackSize Stack size for each processor in SMM.
2084 @param ShadowStackSize Shadow Stack size for each processor in SMM.
2085
2086 **/
2087 UINT32
2088 InitializeMpServiceData (
2089 IN VOID *Stacks,
2090 IN UINTN StackSize,
2091 IN UINTN ShadowStackSize
2092 )
2093 {
2094 UINT32 Cr3;
2095 UINTN Index;
2096 UINT8 *GdtTssTables;
2097 UINTN GdtTableStepSize;
2098 CPUID_VERSION_INFO_EDX RegEdx;
2099 UINT32 MaxExtendedFunction;
2100 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
2101
2102 //
2103 // Determine if this CPU supports machine check
2104 //
2105 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
2106 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
2107
2108 //
2109 // Allocate memory for all locks and semaphores
2110 //
2111 InitializeSmmCpuSemaphores ();
2112
2113 //
2114 // Initialize mSmmMpSyncData
2115 //
2116 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
2117 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
2118 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
2119 ASSERT (mSmmMpSyncData != NULL);
2120 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
2121 InitializeMpSyncData ();
2122
2123 //
2124 // Initialize physical address mask
2125 // NOTE: Physical memory above virtual address limit is not supported !!!
2126 //
2127 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);
2128 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {
2129 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
2130 } else {
2131 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
2132 }
2133
2134 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;
2135 //
2136 // Clear the low 12 bits
2137 //
2138 gPhyMask &= 0xfffffffffffff000ULL;
2139
2140 //
2141 // Create page tables
2142 //
2143 Cr3 = SmmInitPageTable ();
2144
2145 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
2146
2147 //
2148 // Install SMI handler for each CPU
2149 //
2150 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
2151 InstallSmiHandler (
2152 Index,
2153 (UINT32)mCpuHotPlugData.SmBase[Index],
2154 (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
2155 StackSize,
2156 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
2157 gcSmiGdtr.Limit + 1,
2158 gcSmiIdtr.Base,
2159 gcSmiIdtr.Limit + 1,
2160 Cr3
2161 );
2162 }
2163
2164 //
2165 // Record current MTRR settings
2166 //
2167 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
2168 MtrrGetAllMtrrs (&gSmiMtrrs);
2169
2170 return Cr3;
2171 }
2172
2173 /**
2174
2175 Register the SMM Foundation entry point.
2176
2177 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
2178 @param SmmEntryPoint SMM Foundation EntryPoint
2179
2180 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
2181
2182 **/
2183 EFI_STATUS
2184 EFIAPI
2185 RegisterSmmEntry (
2186 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
2187 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2188 )
2189 {
2190 //
2191 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2192 //
2193 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
2194 return EFI_SUCCESS;
2195 }
2196
2197 /**
2198
2199 Register the SMM Foundation entry point.
2200
2201 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2202 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2203 with the related definitions of
2204 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2205 If caller may pass a value of NULL to deregister any existing
2206 startup procedure.
2207 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2208 run by the AP. It is an optional common mailbox between APs and
2209 the caller to share information
2210
2211 @retval EFI_SUCCESS The Procedure has been set successfully.
2212 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2213
2214 **/
2215 EFI_STATUS
2216 RegisterStartupProcedure (
2217 IN EFI_AP_PROCEDURE Procedure,
2218 IN OUT VOID *ProcedureArguments OPTIONAL
2219 )
2220 {
2221 if ((Procedure == NULL) && (ProcedureArguments != NULL)) {
2222 return EFI_INVALID_PARAMETER;
2223 }
2224
2225 if (mSmmMpSyncData == NULL) {
2226 return EFI_NOT_READY;
2227 }
2228
2229 mSmmMpSyncData->StartupProcedure = Procedure;
2230 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2231
2232 return EFI_SUCCESS;
2233 }