]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25 MM_COMPLETION mSmmStartupThisApToken;
26
27 extern UINTN mSmmShadowStackSize;
28
29 /**
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
32 MP safe mechanisms.
33
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
37
38 **/
39 UINT32
40 WaitForSemaphore (
41 IN OUT volatile UINT32 *Sem
42 )
43 {
44 UINT32 Value;
45
46 for ( ; ;) {
47 Value = *Sem;
48 if ((Value != 0) &&
49 (InterlockedCompareExchange32 (
50 (UINT32 *)Sem,
51 Value,
52 Value - 1
53 ) == Value))
54 {
55 break;
56 }
57
58 CpuPause ();
59 }
60
61 return Value - 1;
62 }
63
64 /**
65 Performs an atomic compare exchange operation to release semaphore.
66 The compare exchange operation must be performed using
67 MP safe mechanisms.
68
69 @param Sem IN: 32-bit unsigned integer
70 OUT: original integer + 1
71 @return Original integer + 1
72
73 **/
74 UINT32
75 ReleaseSemaphore (
76 IN OUT volatile UINT32 *Sem
77 )
78 {
79 UINT32 Value;
80
81 do {
82 Value = *Sem;
83 } while (Value + 1 != 0 &&
84 InterlockedCompareExchange32 (
85 (UINT32 *)Sem,
86 Value,
87 Value + 1
88 ) != Value);
89
90 return Value + 1;
91 }
92
93 /**
94 Performs an atomic compare exchange operation to lock semaphore.
95 The compare exchange operation must be performed using
96 MP safe mechanisms.
97
98 @param Sem IN: 32-bit unsigned integer
99 OUT: -1
100 @return Original integer
101
102 **/
103 UINT32
104 LockdownSemaphore (
105 IN OUT volatile UINT32 *Sem
106 )
107 {
108 UINT32 Value;
109
110 do {
111 Value = *Sem;
112 } while (InterlockedCompareExchange32 (
113 (UINT32 *)Sem,
114 Value,
115 (UINT32)-1
116 ) != Value);
117
118 return Value;
119 }
120
121 /**
122 Wait all APs to performs an atomic compare exchange operation to release semaphore.
123
124 @param NumberOfAPs AP number
125
126 **/
127 VOID
128 WaitForAllAPs (
129 IN UINTN NumberOfAPs
130 )
131 {
132 UINTN BspIndex;
133
134 BspIndex = mSmmMpSyncData->BspIndex;
135 while (NumberOfAPs-- > 0) {
136 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
137 }
138 }
139
140 /**
141 Performs an atomic compare exchange operation to release semaphore
142 for each AP.
143
144 **/
145 VOID
146 ReleaseAllAPs (
147 VOID
148 )
149 {
150 UINTN Index;
151
152 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
153 if (IsPresentAp (Index)) {
154 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
155 }
156 }
157 }
158
159 /**
160 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
161
162 @param Exceptions CPU Arrival exception flags.
163
164 @retval TRUE if all CPUs the have checked in.
165 @retval FALSE if at least one Normal AP hasn't checked in.
166
167 **/
168 BOOLEAN
169 AllCpusInSmmWithExceptions (
170 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
171 )
172 {
173 UINTN Index;
174 SMM_CPU_DATA_BLOCK *CpuData;
175 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
176
177 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
178
179 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
180 return TRUE;
181 }
182
183 CpuData = mSmmMpSyncData->CpuData;
184 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
185 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
186 if (!(*(CpuData[Index].Present)) && (ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
187 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0)) {
188 continue;
189 }
190
191 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0)) {
192 continue;
193 }
194
195 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0)) {
196 continue;
197 }
198
199 return FALSE;
200 }
201 }
202
203 return TRUE;
204 }
205
206 /**
207 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
208
209 @retval TRUE Os enable lmce.
210 @retval FALSE Os not enable lmce.
211
212 **/
213 BOOLEAN
214 IsLmceOsEnabled (
215 VOID
216 )
217 {
218 MSR_IA32_MCG_CAP_REGISTER McgCap;
219 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
220 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
221
222 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
223 if (McgCap.Bits.MCG_LMCE_P == 0) {
224 return FALSE;
225 }
226
227 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
228 if (FeatureCtrl.Bits.LmceOn == 0) {
229 return FALSE;
230 }
231
232 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
233 return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);
234 }
235
236 /**
237 Return if Local machine check exception signaled.
238
239 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
240 delivered to only the logical processor.
241
242 @retval TRUE LMCE was signaled.
243 @retval FALSE LMCE was not signaled.
244
245 **/
246 BOOLEAN
247 IsLmceSignaled (
248 VOID
249 )
250 {
251 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
252
253 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
254 return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);
255 }
256
257 /**
258 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
259 entering SMM, except SMI disabled APs.
260
261 **/
262 VOID
263 SmmWaitForApArrival (
264 VOID
265 )
266 {
267 UINT64 Timer;
268 UINTN Index;
269 BOOLEAN LmceEn;
270 BOOLEAN LmceSignal;
271
272 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
273
274 LmceEn = FALSE;
275 LmceSignal = FALSE;
276 if (mMachineCheckSupported) {
277 LmceEn = IsLmceOsEnabled ();
278 LmceSignal = IsLmceSignaled ();
279 }
280
281 //
282 // Platform implementor should choose a timeout value appropriately:
283 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
284 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
285 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
286 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
287 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
288 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
289 // - The timeout value must be longer than longest possible IO operation in the system
290 //
291
292 //
293 // Sync with APs 1st timeout
294 //
295 for (Timer = StartSyncTimer ();
296 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);
297 )
298 {
299 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
300 if (mSmmMpSyncData->AllApArrivedWithException) {
301 break;
302 }
303
304 CpuPause ();
305 }
306
307 //
308 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
309 // because:
310 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
311 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
312 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
313 // work while SMI handling is on-going.
314 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
315 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
316 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
317 // mode work while SMI handling is on-going.
318 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
319 // - In traditional flow, SMI disabling is discouraged.
320 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
321 // In both cases, adding SMI-disabling checking code increases overhead.
322 //
323 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
324 //
325 // Send SMI IPIs to bring outside processors in
326 //
327 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
328 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
329 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
330 }
331 }
332
333 //
334 // Sync with APs 2nd timeout.
335 //
336 for (Timer = StartSyncTimer ();
337 !IsSyncTimerTimeout (Timer);
338 )
339 {
340 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
341 if (mSmmMpSyncData->AllApArrivedWithException) {
342 break;
343 }
344
345 CpuPause ();
346 }
347 }
348
349 return;
350 }
351
352 /**
353 Replace OS MTRR's with SMI MTRR's.
354
355 @param CpuIndex Processor Index
356
357 **/
358 VOID
359 ReplaceOSMtrrs (
360 IN UINTN CpuIndex
361 )
362 {
363 SmmCpuFeaturesDisableSmrr ();
364
365 //
366 // Replace all MTRRs registers
367 //
368 MtrrSetAllMtrrs (&gSmiMtrrs);
369 }
370
371 /**
372 Wheck whether task has been finished by all APs.
373
374 @param BlockMode Whether did it in block mode or non-block mode.
375
376 @retval TRUE Task has been finished by all APs.
377 @retval FALSE Task not has been finished by all APs.
378
379 **/
380 BOOLEAN
381 WaitForAllAPsNotBusy (
382 IN BOOLEAN BlockMode
383 )
384 {
385 UINTN Index;
386
387 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
388 //
389 // Ignore BSP and APs which not call in SMM.
390 //
391 if (!IsPresentAp (Index)) {
392 continue;
393 }
394
395 if (BlockMode) {
396 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
397 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
398 } else {
399 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
400 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
401 } else {
402 return FALSE;
403 }
404 }
405 }
406
407 return TRUE;
408 }
409
410 /**
411 Check whether it is an present AP.
412
413 @param CpuIndex The AP index which calls this function.
414
415 @retval TRUE It's a present AP.
416 @retval TRUE This is not an AP or it is not present.
417
418 **/
419 BOOLEAN
420 IsPresentAp (
421 IN UINTN CpuIndex
422 )
423 {
424 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
425 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
426 }
427
428 /**
429 Clean up the status flags used during executing the procedure.
430
431 @param CpuIndex The AP index which calls this function.
432
433 **/
434 VOID
435 ReleaseToken (
436 IN UINTN CpuIndex
437 )
438 {
439 PROCEDURE_TOKEN *Token;
440
441 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
442
443 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
444 ReleaseSpinLock (Token->SpinLock);
445 }
446
447 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
448 }
449
450 /**
451 Free the tokens in the maintained list.
452
453 **/
454 VOID
455 ResetTokens (
456 VOID
457 )
458 {
459 //
460 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
461 //
462 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
463 }
464
465 /**
466 SMI handler for BSP.
467
468 @param CpuIndex BSP processor Index
469 @param SyncMode SMM MP sync mode
470
471 **/
472 VOID
473 BSPHandler (
474 IN UINTN CpuIndex,
475 IN SMM_CPU_SYNC_MODE SyncMode
476 )
477 {
478 UINTN Index;
479 MTRR_SETTINGS Mtrrs;
480 UINTN ApCount;
481 BOOLEAN ClearTopLevelSmiResult;
482 UINTN PresentCount;
483
484 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
485 ApCount = 0;
486
487 //
488 // Flag BSP's presence
489 //
490 *mSmmMpSyncData->InsideSmm = TRUE;
491
492 //
493 // Initialize Debug Agent to start source level debug in BSP handler
494 //
495 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
496
497 //
498 // Mark this processor's presence
499 //
500 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
501
502 //
503 // Clear platform top level SMI status bit before calling SMI handlers. If
504 // we cleared it after SMI handlers are run, we would miss the SMI that
505 // occurs after SMI handlers are done and before SMI status bit is cleared.
506 //
507 ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();
508 ASSERT (ClearTopLevelSmiResult == TRUE);
509
510 //
511 // Set running processor index
512 //
513 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
514
515 //
516 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
517 //
518 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
519 //
520 // Wait for APs to arrive
521 //
522 SmmWaitForApArrival ();
523
524 //
525 // Lock the counter down and retrieve the number of APs
526 //
527 *mSmmMpSyncData->AllCpusInSync = TRUE;
528 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
529
530 //
531 // Wait for all APs to get ready for programming MTRRs
532 //
533 WaitForAllAPs (ApCount);
534
535 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
536 //
537 // Signal all APs it's time for backup MTRRs
538 //
539 ReleaseAllAPs ();
540
541 //
542 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
543 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
544 // to a large enough value to avoid this situation.
545 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
546 // We do the backup first and then set MTRR to avoid race condition for threads
547 // in the same core.
548 //
549 MtrrGetAllMtrrs (&Mtrrs);
550
551 //
552 // Wait for all APs to complete their MTRR saving
553 //
554 WaitForAllAPs (ApCount);
555
556 //
557 // Let all processors program SMM MTRRs together
558 //
559 ReleaseAllAPs ();
560
561 //
562 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
563 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
564 // to a large enough value to avoid this situation.
565 //
566 ReplaceOSMtrrs (CpuIndex);
567
568 //
569 // Wait for all APs to complete their MTRR programming
570 //
571 WaitForAllAPs (ApCount);
572 }
573 }
574
575 //
576 // The BUSY lock is initialized to Acquired state
577 //
578 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
579
580 //
581 // Perform the pre tasks
582 //
583 PerformPreTasks ();
584
585 //
586 // Invoke SMM Foundation EntryPoint with the processor information context.
587 //
588 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
589
590 //
591 // Make sure all APs have completed their pending none-block tasks
592 //
593 WaitForAllAPsNotBusy (TRUE);
594
595 //
596 // Perform the remaining tasks
597 //
598 PerformRemainingTasks ();
599
600 //
601 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
602 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
603 // will run through freely.
604 //
605 if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {
606 //
607 // Lock the counter down and retrieve the number of APs
608 //
609 *mSmmMpSyncData->AllCpusInSync = TRUE;
610 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
611 //
612 // Make sure all APs have their Present flag set
613 //
614 while (TRUE) {
615 PresentCount = 0;
616 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
617 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
618 PresentCount++;
619 }
620 }
621
622 if (PresentCount > ApCount) {
623 break;
624 }
625 }
626 }
627
628 //
629 // Notify all APs to exit
630 //
631 *mSmmMpSyncData->InsideSmm = FALSE;
632 ReleaseAllAPs ();
633
634 //
635 // Wait for all APs to complete their pending tasks
636 //
637 WaitForAllAPs (ApCount);
638
639 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
640 //
641 // Signal APs to restore MTRRs
642 //
643 ReleaseAllAPs ();
644
645 //
646 // Restore OS MTRRs
647 //
648 SmmCpuFeaturesReenableSmrr ();
649 MtrrSetAllMtrrs (&Mtrrs);
650
651 //
652 // Wait for all APs to complete MTRR programming
653 //
654 WaitForAllAPs (ApCount);
655 }
656
657 //
658 // Stop source level debug in BSP handler, the code below will not be
659 // debugged.
660 //
661 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
662
663 //
664 // Signal APs to Reset states/semaphore for this processor
665 //
666 ReleaseAllAPs ();
667
668 //
669 // Perform pending operations for hot-plug
670 //
671 SmmCpuUpdate ();
672
673 //
674 // Clear the Present flag of BSP
675 //
676 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
677
678 //
679 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
680 // WaitForAllAps does not depend on the Present flag.
681 //
682 WaitForAllAPs (ApCount);
683
684 //
685 // Reset the tokens buffer.
686 //
687 ResetTokens ();
688
689 //
690 // Reset BspIndex to -1, meaning BSP has not been elected.
691 //
692 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
693 mSmmMpSyncData->BspIndex = (UINT32)-1;
694 }
695
696 //
697 // Allow APs to check in from this point on
698 //
699 *mSmmMpSyncData->Counter = 0;
700 *mSmmMpSyncData->AllCpusInSync = FALSE;
701 }
702
703 /**
704 SMI handler for AP.
705
706 @param CpuIndex AP processor Index.
707 @param ValidSmi Indicates that current SMI is a valid SMI or not.
708 @param SyncMode SMM MP sync mode.
709
710 **/
711 VOID
712 APHandler (
713 IN UINTN CpuIndex,
714 IN BOOLEAN ValidSmi,
715 IN SMM_CPU_SYNC_MODE SyncMode
716 )
717 {
718 UINT64 Timer;
719 UINTN BspIndex;
720 MTRR_SETTINGS Mtrrs;
721 EFI_STATUS ProcedureStatus;
722
723 //
724 // Timeout BSP
725 //
726 for (Timer = StartSyncTimer ();
727 !IsSyncTimerTimeout (Timer) &&
728 !(*mSmmMpSyncData->InsideSmm);
729 )
730 {
731 CpuPause ();
732 }
733
734 if (!(*mSmmMpSyncData->InsideSmm)) {
735 //
736 // BSP timeout in the first round
737 //
738 if (mSmmMpSyncData->BspIndex != -1) {
739 //
740 // BSP Index is known
741 //
742 BspIndex = mSmmMpSyncData->BspIndex;
743 ASSERT (CpuIndex != BspIndex);
744
745 //
746 // Send SMI IPI to bring BSP in
747 //
748 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
749
750 //
751 // Now clock BSP for the 2nd time
752 //
753 for (Timer = StartSyncTimer ();
754 !IsSyncTimerTimeout (Timer) &&
755 !(*mSmmMpSyncData->InsideSmm);
756 )
757 {
758 CpuPause ();
759 }
760
761 if (!(*mSmmMpSyncData->InsideSmm)) {
762 //
763 // Give up since BSP is unable to enter SMM
764 // and signal the completion of this AP
765 WaitForSemaphore (mSmmMpSyncData->Counter);
766 return;
767 }
768 } else {
769 //
770 // Don't know BSP index. Give up without sending IPI to BSP.
771 //
772 WaitForSemaphore (mSmmMpSyncData->Counter);
773 return;
774 }
775 }
776
777 //
778 // BSP is available
779 //
780 BspIndex = mSmmMpSyncData->BspIndex;
781 ASSERT (CpuIndex != BspIndex);
782
783 //
784 // Mark this processor's presence
785 //
786 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
787
788 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
789 //
790 // Notify BSP of arrival at this point
791 //
792 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
793 }
794
795 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
796 //
797 // Wait for the signal from BSP to backup MTRRs
798 //
799 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
800
801 //
802 // Backup OS MTRRs
803 //
804 MtrrGetAllMtrrs (&Mtrrs);
805
806 //
807 // Signal BSP the completion of this AP
808 //
809 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
810
811 //
812 // Wait for BSP's signal to program MTRRs
813 //
814 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
815
816 //
817 // Replace OS MTRRs with SMI MTRRs
818 //
819 ReplaceOSMtrrs (CpuIndex);
820
821 //
822 // Signal BSP the completion of this AP
823 //
824 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
825 }
826
827 while (TRUE) {
828 //
829 // Wait for something to happen
830 //
831 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
832
833 //
834 // Check if BSP wants to exit SMM
835 //
836 if (!(*mSmmMpSyncData->InsideSmm)) {
837 break;
838 }
839
840 //
841 // BUSY should be acquired by SmmStartupThisAp()
842 //
843 ASSERT (
844 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
845 );
846
847 //
848 // Invoke the scheduled procedure
849 //
850 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(
851 (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter
852 );
853 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
854 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
855 }
856
857 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
858 ReleaseToken (CpuIndex);
859 }
860
861 //
862 // Release BUSY
863 //
864 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
865 }
866
867 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
868 //
869 // Notify BSP the readiness of this AP to program MTRRs
870 //
871 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
872
873 //
874 // Wait for the signal from BSP to program MTRRs
875 //
876 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
877
878 //
879 // Restore OS MTRRs
880 //
881 SmmCpuFeaturesReenableSmrr ();
882 MtrrSetAllMtrrs (&Mtrrs);
883 }
884
885 //
886 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
887 //
888 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
889
890 //
891 // Wait for the signal from BSP to Reset states/semaphore for this processor
892 //
893 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
894
895 //
896 // Reset states/semaphore for this processor
897 //
898 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
899
900 //
901 // Notify BSP the readiness of this AP to exit SMM
902 //
903 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
904 }
905
906 /**
907 Create 4G PageTable in SMRAM.
908
909 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
910 @return PageTable Address
911
912 **/
913 UINT32
914 Gen4GPageTable (
915 IN BOOLEAN Is32BitPageTable
916 )
917 {
918 VOID *PageTable;
919 UINTN Index;
920 UINT64 *Pte;
921 UINTN PagesNeeded;
922 UINTN Low2MBoundary;
923 UINTN High2MBoundary;
924 UINTN Pages;
925 UINTN GuardPage;
926 UINT64 *Pdpte;
927 UINTN PageIndex;
928 UINTN PageAddress;
929
930 Low2MBoundary = 0;
931 High2MBoundary = 0;
932 PagesNeeded = 0;
933 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
934 //
935 // Add one more page for known good stack, then find the lower 2MB aligned address.
936 //
937 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
938 //
939 // Add two more pages for known good stack and stack guard page,
940 // then find the lower 2MB aligned address.
941 //
942 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
943 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
944 }
945
946 //
947 // Allocate the page table
948 //
949 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
950 ASSERT (PageTable != NULL);
951
952 PageTable = (VOID *)((UINTN)PageTable);
953 Pte = (UINT64 *)PageTable;
954
955 //
956 // Zero out all page table entries first
957 //
958 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
959
960 //
961 // Set Page Directory Pointers
962 //
963 for (Index = 0; Index < 4; Index++) {
964 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
965 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
966 }
967
968 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
969
970 //
971 // Fill in Page Directory Entries
972 //
973 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
974 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
975 }
976
977 Pdpte = (UINT64 *)PageTable;
978 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
979 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
980 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
981 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
982 Pte = (UINT64 *)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
983 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
984 //
985 // Fill in Page Table Entries
986 //
987 Pte = (UINT64 *)Pages;
988 PageAddress = PageIndex;
989 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
990 if (PageAddress == GuardPage) {
991 //
992 // Mark the guard page as non-present
993 //
994 Pte[Index] = PageAddress | mAddressEncMask;
995 GuardPage += (mSmmStackSize + mSmmShadowStackSize);
996 if (GuardPage > mSmmStackArrayEnd) {
997 GuardPage = 0;
998 }
999 } else {
1000 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1001 }
1002
1003 PageAddress += EFI_PAGE_SIZE;
1004 }
1005
1006 Pages += EFI_PAGE_SIZE;
1007 }
1008 }
1009
1010 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1011 Pte = (UINT64 *)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1012 if ((Pte[0] & IA32_PG_PS) == 0) {
1013 // 4K-page entries are already mapped. Just hide the first one anyway.
1014 Pte = (UINT64 *)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1015 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1016 } else {
1017 // Create 4K-page entries
1018 Pages = (UINTN)AllocatePageTableMemory (1);
1019 ASSERT (Pages != 0);
1020
1021 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1022
1023 Pte = (UINT64 *)Pages;
1024 PageAddress = 0;
1025 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1026 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1027 PageAddress += EFI_PAGE_SIZE;
1028 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1029 }
1030 }
1031 }
1032
1033 return (UINT32)(UINTN)PageTable;
1034 }
1035
1036 /**
1037 Checks whether the input token is the current used token.
1038
1039 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1040 BroadcastProcedure.
1041
1042 @retval TRUE The input token is the current used token.
1043 @retval FALSE The input token is not the current used token.
1044 **/
1045 BOOLEAN
1046 IsTokenInUse (
1047 IN SPIN_LOCK *Token
1048 )
1049 {
1050 LIST_ENTRY *Link;
1051 PROCEDURE_TOKEN *ProcToken;
1052
1053 if (Token == NULL) {
1054 return FALSE;
1055 }
1056
1057 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1058 //
1059 // Only search used tokens.
1060 //
1061 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1062 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1063
1064 if (ProcToken->SpinLock == Token) {
1065 return TRUE;
1066 }
1067
1068 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1069 }
1070
1071 return FALSE;
1072 }
1073
1074 /**
1075 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1076
1077 @return First token of the token buffer.
1078 **/
1079 LIST_ENTRY *
1080 AllocateTokenBuffer (
1081 VOID
1082 )
1083 {
1084 UINTN SpinLockSize;
1085 UINT32 TokenCountPerChunk;
1086 UINTN Index;
1087 SPIN_LOCK *SpinLock;
1088 UINT8 *SpinLockBuffer;
1089 PROCEDURE_TOKEN *ProcTokens;
1090
1091 SpinLockSize = GetSpinLockProperties ();
1092
1093 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1094 ASSERT (TokenCountPerChunk != 0);
1095 if (TokenCountPerChunk == 0) {
1096 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1097 CpuDeadLoop ();
1098 }
1099
1100 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1101
1102 //
1103 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1104 //
1105 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1106 ASSERT (SpinLockBuffer != NULL);
1107
1108 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1109 ASSERT (ProcTokens != NULL);
1110
1111 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1112 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1113 InitializeSpinLock (SpinLock);
1114
1115 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1116 ProcTokens[Index].SpinLock = SpinLock;
1117 ProcTokens[Index].RunningApCount = 0;
1118
1119 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1120 }
1121
1122 return &ProcTokens[0].Link;
1123 }
1124
1125 /**
1126 Get the free token.
1127
1128 If no free token, allocate new tokens then return the free one.
1129
1130 @param RunningApsCount The Running Aps count for this token.
1131
1132 @retval return the first free PROCEDURE_TOKEN.
1133
1134 **/
1135 PROCEDURE_TOKEN *
1136 GetFreeToken (
1137 IN UINT32 RunningApsCount
1138 )
1139 {
1140 PROCEDURE_TOKEN *NewToken;
1141
1142 //
1143 // If FirstFreeToken meets the end of token list, enlarge the token list.
1144 // Set FirstFreeToken to the first free token.
1145 //
1146 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1147 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1148 }
1149
1150 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1151 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1152
1153 NewToken->RunningApCount = RunningApsCount;
1154 AcquireSpinLock (NewToken->SpinLock);
1155
1156 return NewToken;
1157 }
1158
1159 /**
1160 Checks status of specified AP.
1161
1162 This function checks whether the specified AP has finished the task assigned
1163 by StartupThisAP(), and whether timeout expires.
1164
1165 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1166 BroadcastProcedure.
1167
1168 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1169 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1170 **/
1171 EFI_STATUS
1172 IsApReady (
1173 IN SPIN_LOCK *Token
1174 )
1175 {
1176 if (AcquireSpinLockOrFail (Token)) {
1177 ReleaseSpinLock (Token);
1178 return EFI_SUCCESS;
1179 }
1180
1181 return EFI_NOT_READY;
1182 }
1183
1184 /**
1185 Schedule a procedure to run on the specified CPU.
1186
1187 @param[in] Procedure The address of the procedure to run
1188 @param[in] CpuIndex Target CPU Index
1189 @param[in,out] ProcArguments The parameter to pass to the procedure
1190 @param[in] Token This is an optional parameter that allows the caller to execute the
1191 procedure in a blocking or non-blocking fashion. If it is NULL the
1192 call is blocking, and the call will not return until the AP has
1193 completed the procedure. If the token is not NULL, the call will
1194 return immediately. The caller can check whether the procedure has
1195 completed with CheckOnProcedure or WaitForProcedure.
1196 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1197 execution of Procedure, either for blocking or non-blocking mode.
1198 Zero means infinity. If the timeout expires before all APs return
1199 from Procedure, then Procedure on the failed APs is terminated. If
1200 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1201 If the timeout expires in non-blocking mode, the timeout determined
1202 can be through CheckOnProcedure or WaitForProcedure.
1203 Note that timeout support is optional. Whether an implementation
1204 supports this feature can be determined via the Attributes data
1205 member.
1206 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1207 by Procedure when it completes execution on the target AP, or with
1208 EFI_TIMEOUT if the Procedure fails to complete within the optional
1209 timeout. The implementation will update this variable with
1210 EFI_NOT_READY prior to starting Procedure on the target AP.
1211
1212 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1213 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1214 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1215 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1216 @retval EFI_SUCCESS The procedure has been successfully scheduled
1217
1218 **/
1219 EFI_STATUS
1220 InternalSmmStartupThisAp (
1221 IN EFI_AP_PROCEDURE2 Procedure,
1222 IN UINTN CpuIndex,
1223 IN OUT VOID *ProcArguments OPTIONAL,
1224 IN MM_COMPLETION *Token,
1225 IN UINTN TimeoutInMicroseconds,
1226 IN OUT EFI_STATUS *CpuStatus
1227 )
1228 {
1229 PROCEDURE_TOKEN *ProcToken;
1230
1231 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1232 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1233 return EFI_INVALID_PARAMETER;
1234 }
1235
1236 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1237 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1238 return EFI_INVALID_PARAMETER;
1239 }
1240
1241 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1242 return EFI_INVALID_PARAMETER;
1243 }
1244
1245 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1246 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1247 DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1248 }
1249
1250 return EFI_INVALID_PARAMETER;
1251 }
1252
1253 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1254 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1255 DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1256 }
1257
1258 return EFI_INVALID_PARAMETER;
1259 }
1260
1261 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1262 return EFI_INVALID_PARAMETER;
1263 }
1264
1265 if (Procedure == NULL) {
1266 return EFI_INVALID_PARAMETER;
1267 }
1268
1269 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1270
1271 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1272 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1273 if (Token != NULL) {
1274 if (Token != &mSmmStartupThisApToken) {
1275 //
1276 // When Token points to mSmmStartupThisApToken, this routine is called
1277 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1278 //
1279 // In this case, caller wants to startup AP procedure in non-blocking
1280 // mode and cannot get the completion status from the Token because there
1281 // is no way to return the Token to caller from SmmStartupThisAp().
1282 // Caller needs to use its implementation specific way to query the completion status.
1283 //
1284 // There is no need to allocate a token for such case so the 3 overheads
1285 // can be avoided:
1286 // 1. Call AllocateTokenBuffer() when there is no free token.
1287 // 2. Get a free token from the token buffer.
1288 // 3. Call ReleaseToken() in APHandler().
1289 //
1290 ProcToken = GetFreeToken (1);
1291 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1292 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1293 }
1294 }
1295
1296 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1297 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1298 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1299 }
1300
1301 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1302
1303 if (Token == NULL) {
1304 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1305 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1306 }
1307
1308 return EFI_SUCCESS;
1309 }
1310
1311 /**
1312 Worker function to execute a caller provided function on all enabled APs.
1313
1314 @param[in] Procedure A pointer to the function to be run on
1315 enabled APs of the system.
1316 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1317 APs to return from Procedure, either for
1318 blocking or non-blocking mode.
1319 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1320 all APs.
1321 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1322 procedure in a blocking or non-blocking fashion. If it is NULL the
1323 call is blocking, and the call will not return until the AP has
1324 completed the procedure. If the token is not NULL, the call will
1325 return immediately. The caller can check whether the procedure has
1326 completed with CheckOnProcedure or WaitForProcedure.
1327 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1328 by Procedure when it completes execution on the target AP, or with
1329 EFI_TIMEOUT if the Procedure fails to complete within the optional
1330 timeout. The implementation will update this variable with
1331 EFI_NOT_READY prior to starting Procedure on the target AP.
1332
1333
1334 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1335 the timeout expired.
1336 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1337 to all enabled APs.
1338 @retval others Failed to Startup all APs.
1339
1340 **/
1341 EFI_STATUS
1342 InternalSmmStartupAllAPs (
1343 IN EFI_AP_PROCEDURE2 Procedure,
1344 IN UINTN TimeoutInMicroseconds,
1345 IN OUT VOID *ProcedureArguments OPTIONAL,
1346 IN OUT MM_COMPLETION *Token,
1347 IN OUT EFI_STATUS *CPUStatus
1348 )
1349 {
1350 UINTN Index;
1351 UINTN CpuCount;
1352 PROCEDURE_TOKEN *ProcToken;
1353
1354 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1355 return EFI_INVALID_PARAMETER;
1356 }
1357
1358 if (Procedure == NULL) {
1359 return EFI_INVALID_PARAMETER;
1360 }
1361
1362 CpuCount = 0;
1363 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1364 if (IsPresentAp (Index)) {
1365 CpuCount++;
1366
1367 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1368 return EFI_INVALID_PARAMETER;
1369 }
1370
1371 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
1372 return EFI_NOT_READY;
1373 }
1374
1375 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1376 }
1377 }
1378
1379 if (CpuCount == 0) {
1380 return EFI_NOT_STARTED;
1381 }
1382
1383 if (Token != NULL) {
1384 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1385 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1386 } else {
1387 ProcToken = NULL;
1388 }
1389
1390 //
1391 // Make sure all BUSY should be acquired.
1392 //
1393 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1394 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1395 // block mode.
1396 //
1397 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1398 if (IsPresentAp (Index)) {
1399 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1400 }
1401 }
1402
1403 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1404 if (IsPresentAp (Index)) {
1405 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;
1406 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1407 if (ProcToken != NULL) {
1408 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1409 }
1410
1411 if (CPUStatus != NULL) {
1412 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1413 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1414 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1415 }
1416 }
1417 } else {
1418 //
1419 // PI spec requirement:
1420 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1421 //
1422 if (CPUStatus != NULL) {
1423 CPUStatus[Index] = EFI_NOT_STARTED;
1424 }
1425
1426 //
1427 // Decrease the count to mark this processor(AP or BSP) as finished.
1428 //
1429 if (ProcToken != NULL) {
1430 WaitForSemaphore (&ProcToken->RunningApCount);
1431 }
1432 }
1433 }
1434
1435 ReleaseAllAPs ();
1436
1437 if (Token == NULL) {
1438 //
1439 // Make sure all APs have completed their tasks.
1440 //
1441 WaitForAllAPsNotBusy (TRUE);
1442 }
1443
1444 return EFI_SUCCESS;
1445 }
1446
1447 /**
1448 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1449 If the function is defined with a type that is not compatible with
1450 the type (of the expression) pointed to by the expression that
1451 denotes the called function, the behavior is undefined.
1452
1453 So add below wrapper function to convert between EFI_AP_PROCEDURE
1454 and EFI_AP_PROCEDURE2.
1455
1456 Wrapper for Procedures.
1457
1458 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1459
1460 **/
1461 EFI_STATUS
1462 EFIAPI
1463 ProcedureWrapper (
1464 IN VOID *Buffer
1465 )
1466 {
1467 PROCEDURE_WRAPPER *Wrapper;
1468
1469 Wrapper = Buffer;
1470 Wrapper->Procedure (Wrapper->ProcedureArgument);
1471
1472 return EFI_SUCCESS;
1473 }
1474
1475 /**
1476 Schedule a procedure to run on the specified CPU in blocking mode.
1477
1478 @param[in] Procedure The address of the procedure to run
1479 @param[in] CpuIndex Target CPU Index
1480 @param[in, out] ProcArguments The parameter to pass to the procedure
1481
1482 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1483 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1484 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1485 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1486 @retval EFI_SUCCESS The procedure has been successfully scheduled
1487
1488 **/
1489 EFI_STATUS
1490 EFIAPI
1491 SmmBlockingStartupThisAp (
1492 IN EFI_AP_PROCEDURE Procedure,
1493 IN UINTN CpuIndex,
1494 IN OUT VOID *ProcArguments OPTIONAL
1495 )
1496 {
1497 PROCEDURE_WRAPPER Wrapper;
1498
1499 Wrapper.Procedure = Procedure;
1500 Wrapper.ProcedureArgument = ProcArguments;
1501
1502 //
1503 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1504 //
1505 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1506 }
1507
1508 /**
1509 Schedule a procedure to run on the specified CPU.
1510
1511 @param Procedure The address of the procedure to run
1512 @param CpuIndex Target CPU Index
1513 @param ProcArguments The parameter to pass to the procedure
1514
1515 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1516 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1517 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1518 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1519 @retval EFI_SUCCESS The procedure has been successfully scheduled
1520
1521 **/
1522 EFI_STATUS
1523 EFIAPI
1524 SmmStartupThisAp (
1525 IN EFI_AP_PROCEDURE Procedure,
1526 IN UINTN CpuIndex,
1527 IN OUT VOID *ProcArguments OPTIONAL
1528 )
1529 {
1530 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1531 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1532
1533 //
1534 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1535 //
1536 return InternalSmmStartupThisAp (
1537 ProcedureWrapper,
1538 CpuIndex,
1539 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1540 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,
1541 0,
1542 NULL
1543 );
1544 }
1545
1546 /**
1547 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1548 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1549
1550 NOTE: It might not be appreciated in runtime since it might
1551 conflict with OS debugging facilities. Turn them off in RELEASE.
1552
1553 @param CpuIndex CPU Index
1554
1555 **/
1556 VOID
1557 EFIAPI
1558 CpuSmmDebugEntry (
1559 IN UINTN CpuIndex
1560 )
1561 {
1562 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1563
1564 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1565 ASSERT (CpuIndex < mMaxNumberOfCpus);
1566 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1567 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1568 AsmWriteDr6 (CpuSaveState->x86._DR6);
1569 AsmWriteDr7 (CpuSaveState->x86._DR7);
1570 } else {
1571 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1572 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1573 }
1574 }
1575 }
1576
1577 /**
1578 This function restores DR6 & DR7 to SMM save state.
1579
1580 NOTE: It might not be appreciated in runtime since it might
1581 conflict with OS debugging facilities. Turn them off in RELEASE.
1582
1583 @param CpuIndex CPU Index
1584
1585 **/
1586 VOID
1587 EFIAPI
1588 CpuSmmDebugExit (
1589 IN UINTN CpuIndex
1590 )
1591 {
1592 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1593
1594 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1595 ASSERT (CpuIndex < mMaxNumberOfCpus);
1596 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1597 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1598 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1599 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1600 } else {
1601 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1602 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1603 }
1604 }
1605 }
1606
1607 /**
1608 C function for SMI entry, each processor comes here upon SMI trigger.
1609
1610 @param CpuIndex CPU Index
1611
1612 **/
1613 VOID
1614 EFIAPI
1615 SmiRendezvous (
1616 IN UINTN CpuIndex
1617 )
1618 {
1619 EFI_STATUS Status;
1620 BOOLEAN ValidSmi;
1621 BOOLEAN IsBsp;
1622 BOOLEAN BspInProgress;
1623 UINTN Index;
1624 UINTN Cr2;
1625
1626 ASSERT (CpuIndex < mMaxNumberOfCpus);
1627
1628 //
1629 // Save Cr2 because Page Fault exception in SMM may override its value,
1630 // when using on-demand paging for above 4G memory.
1631 //
1632 Cr2 = 0;
1633 SaveCr2 (&Cr2);
1634
1635 //
1636 // Call the user register Startup function first.
1637 //
1638 if (mSmmMpSyncData->StartupProcedure != NULL) {
1639 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1640 }
1641
1642 //
1643 // Perform CPU specific entry hooks
1644 //
1645 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1646
1647 //
1648 // Determine if this is a valid SMI
1649 //
1650 ValidSmi = PlatformValidSmi ();
1651
1652 //
1653 // Determine if BSP has been already in progress. Note this must be checked after
1654 // ValidSmi because BSP may clear a valid SMI source after checking in.
1655 //
1656 BspInProgress = *mSmmMpSyncData->InsideSmm;
1657
1658 if (!BspInProgress && !ValidSmi) {
1659 //
1660 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1661 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1662 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1663 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1664 // is nothing we need to do.
1665 //
1666 goto Exit;
1667 } else {
1668 //
1669 // Signal presence of this processor
1670 //
1671 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1672 //
1673 // BSP has already ended the synchronization, so QUIT!!!
1674 //
1675
1676 //
1677 // Wait for BSP's signal to finish SMI
1678 //
1679 while (*mSmmMpSyncData->AllCpusInSync) {
1680 CpuPause ();
1681 }
1682
1683 goto Exit;
1684 } else {
1685 //
1686 // The BUSY lock is initialized to Released state.
1687 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1688 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1689 // after AP's present flag is detected.
1690 //
1691 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1692 }
1693
1694 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1695 ActivateSmmProfile (CpuIndex);
1696 }
1697
1698 if (BspInProgress) {
1699 //
1700 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1701 // as BSP may have cleared the SMI status
1702 //
1703 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1704 } else {
1705 //
1706 // We have a valid SMI
1707 //
1708
1709 //
1710 // Elect BSP
1711 //
1712 IsBsp = FALSE;
1713 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1714 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1715 //
1716 // Call platform hook to do BSP election
1717 //
1718 Status = PlatformSmmBspElection (&IsBsp);
1719 if (EFI_SUCCESS == Status) {
1720 //
1721 // Platform hook determines successfully
1722 //
1723 if (IsBsp) {
1724 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1725 }
1726 } else {
1727 //
1728 // Platform hook fails to determine, use default BSP election method
1729 //
1730 InterlockedCompareExchange32 (
1731 (UINT32 *)&mSmmMpSyncData->BspIndex,
1732 (UINT32)-1,
1733 (UINT32)CpuIndex
1734 );
1735 }
1736 }
1737 }
1738
1739 //
1740 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1741 //
1742 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1743 //
1744 // Clear last request for SwitchBsp.
1745 //
1746 if (mSmmMpSyncData->SwitchBsp) {
1747 mSmmMpSyncData->SwitchBsp = FALSE;
1748 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1749 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1750 }
1751 }
1752
1753 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1754 SmmProfileRecordSmiNum ();
1755 }
1756
1757 //
1758 // BSP Handler is always called with a ValidSmi == TRUE
1759 //
1760 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1761 } else {
1762 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1763 }
1764 }
1765
1766 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1767
1768 //
1769 // Wait for BSP's signal to exit SMI
1770 //
1771 while (*mSmmMpSyncData->AllCpusInSync) {
1772 CpuPause ();
1773 }
1774 }
1775
1776 Exit:
1777 SmmCpuFeaturesRendezvousExit (CpuIndex);
1778
1779 //
1780 // Restore Cr2
1781 //
1782 RestoreCr2 (Cr2);
1783 }
1784
1785 /**
1786 Allocate buffer for SpinLock and Wrapper function buffer.
1787
1788 **/
1789 VOID
1790 InitializeDataForMmMp (
1791 VOID
1792 )
1793 {
1794 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1795 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1796
1797 InitializeListHead (&gSmmCpuPrivate->TokenList);
1798
1799 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1800 }
1801
1802 /**
1803 Allocate buffer for all semaphores and spin locks.
1804
1805 **/
1806 VOID
1807 InitializeSmmCpuSemaphores (
1808 VOID
1809 )
1810 {
1811 UINTN ProcessorCount;
1812 UINTN TotalSize;
1813 UINTN GlobalSemaphoresSize;
1814 UINTN CpuSemaphoresSize;
1815 UINTN SemaphoreSize;
1816 UINTN Pages;
1817 UINTN *SemaphoreBlock;
1818 UINTN SemaphoreAddr;
1819
1820 SemaphoreSize = GetSpinLockProperties ();
1821 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1822 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1823 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1824 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1825 DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1826 DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1827 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1828 SemaphoreBlock = AllocatePages (Pages);
1829 ASSERT (SemaphoreBlock != NULL);
1830 ZeroMem (SemaphoreBlock, TotalSize);
1831
1832 SemaphoreAddr = (UINTN)SemaphoreBlock;
1833 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1834 SemaphoreAddr += SemaphoreSize;
1835 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1836 SemaphoreAddr += SemaphoreSize;
1837 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1838 SemaphoreAddr += SemaphoreSize;
1839 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1840 SemaphoreAddr += SemaphoreSize;
1841 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1842 = (SPIN_LOCK *)SemaphoreAddr;
1843 SemaphoreAddr += SemaphoreSize;
1844
1845 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1846 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1847 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1848 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1849 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1850 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1851
1852 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1853 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1854
1855 mSemaphoreSize = SemaphoreSize;
1856 }
1857
1858 /**
1859 Initialize un-cacheable data.
1860
1861 **/
1862 VOID
1863 EFIAPI
1864 InitializeMpSyncData (
1865 VOID
1866 )
1867 {
1868 UINTN CpuIndex;
1869
1870 if (mSmmMpSyncData != NULL) {
1871 //
1872 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1873 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1874 //
1875 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1876 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1877 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1878 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1879 //
1880 // Enable BSP election by setting BspIndex to -1
1881 //
1882 mSmmMpSyncData->BspIndex = (UINT32)-1;
1883 }
1884
1885 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1886
1887 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1888 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1889 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1890 ASSERT (
1891 mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1892 mSmmMpSyncData->AllCpusInSync != NULL
1893 );
1894 *mSmmMpSyncData->Counter = 0;
1895 *mSmmMpSyncData->InsideSmm = FALSE;
1896 *mSmmMpSyncData->AllCpusInSync = FALSE;
1897
1898 mSmmMpSyncData->AllApArrivedWithException = FALSE;
1899
1900 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {
1901 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1902 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1903 mSmmMpSyncData->CpuData[CpuIndex].Run =
1904 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1905 mSmmMpSyncData->CpuData[CpuIndex].Present =
1906 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1907 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1908 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1909 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1910 }
1911 }
1912 }
1913
1914 /**
1915 Initialize global data for MP synchronization.
1916
1917 @param Stacks Base address of SMI stack buffer for all processors.
1918 @param StackSize Stack size for each processor in SMM.
1919 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1920
1921 **/
1922 UINT32
1923 InitializeMpServiceData (
1924 IN VOID *Stacks,
1925 IN UINTN StackSize,
1926 IN UINTN ShadowStackSize
1927 )
1928 {
1929 UINT32 Cr3;
1930 UINTN Index;
1931 UINT8 *GdtTssTables;
1932 UINTN GdtTableStepSize;
1933 CPUID_VERSION_INFO_EDX RegEdx;
1934 UINT32 MaxExtendedFunction;
1935 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
1936
1937 //
1938 // Determine if this CPU supports machine check
1939 //
1940 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1941 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1942
1943 //
1944 // Allocate memory for all locks and semaphores
1945 //
1946 InitializeSmmCpuSemaphores ();
1947
1948 //
1949 // Initialize mSmmMpSyncData
1950 //
1951 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1952 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1953 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1954 ASSERT (mSmmMpSyncData != NULL);
1955 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1956 InitializeMpSyncData ();
1957
1958 //
1959 // Initialize physical address mask
1960 // NOTE: Physical memory above virtual address limit is not supported !!!
1961 //
1962 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);
1963 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {
1964 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
1965 } else {
1966 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
1967 }
1968
1969 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;
1970 //
1971 // Clear the low 12 bits
1972 //
1973 gPhyMask &= 0xfffffffffffff000ULL;
1974
1975 //
1976 // Create page tables
1977 //
1978 Cr3 = SmmInitPageTable ();
1979
1980 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1981
1982 //
1983 // Install SMI handler for each CPU
1984 //
1985 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1986 InstallSmiHandler (
1987 Index,
1988 (UINT32)mCpuHotPlugData.SmBase[Index],
1989 (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1990 StackSize,
1991 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1992 gcSmiGdtr.Limit + 1,
1993 gcSmiIdtr.Base,
1994 gcSmiIdtr.Limit + 1,
1995 Cr3
1996 );
1997 }
1998
1999 //
2000 // Record current MTRR settings
2001 //
2002 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
2003 MtrrGetAllMtrrs (&gSmiMtrrs);
2004
2005 return Cr3;
2006 }
2007
2008 /**
2009
2010 Register the SMM Foundation entry point.
2011
2012 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
2013 @param SmmEntryPoint SMM Foundation EntryPoint
2014
2015 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
2016
2017 **/
2018 EFI_STATUS
2019 EFIAPI
2020 RegisterSmmEntry (
2021 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
2022 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2023 )
2024 {
2025 //
2026 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2027 //
2028 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
2029 return EFI_SUCCESS;
2030 }
2031
2032 /**
2033
2034 Register the SMM Foundation entry point.
2035
2036 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2037 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2038 with the related definitions of
2039 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2040 If caller may pass a value of NULL to deregister any existing
2041 startup procedure.
2042 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2043 run by the AP. It is an optional common mailbox between APs and
2044 the caller to share information
2045
2046 @retval EFI_SUCCESS The Procedure has been set successfully.
2047 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2048
2049 **/
2050 EFI_STATUS
2051 RegisterStartupProcedure (
2052 IN EFI_AP_PROCEDURE Procedure,
2053 IN OUT VOID *ProcedureArguments OPTIONAL
2054 )
2055 {
2056 if ((Procedure == NULL) && (ProcedureArguments != NULL)) {
2057 return EFI_INVALID_PARAMETER;
2058 }
2059
2060 if (mSmmMpSyncData == NULL) {
2061 return EFI_NOT_READY;
2062 }
2063
2064 mSmmMpSyncData->StartupProcedure = Procedure;
2065 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2066
2067 return EFI_SUCCESS;
2068 }