]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Avoid allocate Token every time
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 /**
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
29 MP safe mechanisms.
30
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
34
35 **/
36 UINT32
37 WaitForSemaphore (
38 IN OUT volatile UINT32 *Sem
39 )
40 {
41 UINT32 Value;
42
43 do {
44 Value = *Sem;
45 } while (Value == 0 ||
46 InterlockedCompareExchange32 (
47 (UINT32*)Sem,
48 Value,
49 Value - 1
50 ) != Value);
51 return Value - 1;
52 }
53
54
55 /**
56 Performs an atomic compare exchange operation to release semaphore.
57 The compare exchange operation must be performed using
58 MP safe mechanisms.
59
60 @param Sem IN: 32-bit unsigned integer
61 OUT: original integer + 1
62 @return Original integer + 1
63
64 **/
65 UINT32
66 ReleaseSemaphore (
67 IN OUT volatile UINT32 *Sem
68 )
69 {
70 UINT32 Value;
71
72 do {
73 Value = *Sem;
74 } while (Value + 1 != 0 &&
75 InterlockedCompareExchange32 (
76 (UINT32*)Sem,
77 Value,
78 Value + 1
79 ) != Value);
80 return Value + 1;
81 }
82
83 /**
84 Performs an atomic compare exchange operation to lock semaphore.
85 The compare exchange operation must be performed using
86 MP safe mechanisms.
87
88 @param Sem IN: 32-bit unsigned integer
89 OUT: -1
90 @return Original integer
91
92 **/
93 UINT32
94 LockdownSemaphore (
95 IN OUT volatile UINT32 *Sem
96 )
97 {
98 UINT32 Value;
99
100 do {
101 Value = *Sem;
102 } while (InterlockedCompareExchange32 (
103 (UINT32*)Sem,
104 Value, (UINT32)-1
105 ) != Value);
106 return Value;
107 }
108
109 /**
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.
111
112 @param NumberOfAPs AP number
113
114 **/
115 VOID
116 WaitForAllAPs (
117 IN UINTN NumberOfAPs
118 )
119 {
120 UINTN BspIndex;
121
122 BspIndex = mSmmMpSyncData->BspIndex;
123 while (NumberOfAPs-- > 0) {
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
125 }
126 }
127
128 /**
129 Performs an atomic compare exchange operation to release semaphore
130 for each AP.
131
132 **/
133 VOID
134 ReleaseAllAPs (
135 VOID
136 )
137 {
138 UINTN Index;
139
140 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
141 if (IsPresentAp (Index)) {
142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
143 }
144 }
145 }
146
147 /**
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
149
150 @param Exceptions CPU Arrival exception flags.
151
152 @retval TRUE if all CPUs the have checked in.
153 @retval FALSE if at least one Normal AP hasn't checked in.
154
155 **/
156 BOOLEAN
157 AllCpusInSmmWithExceptions (
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
159 )
160 {
161 UINTN Index;
162 SMM_CPU_DATA_BLOCK *CpuData;
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
164
165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
166
167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
168 return TRUE;
169 }
170
171 CpuData = mSmmMpSyncData->CpuData;
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
173 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
176 continue;
177 }
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
179 continue;
180 }
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
182 continue;
183 }
184 return FALSE;
185 }
186 }
187
188
189 return TRUE;
190 }
191
192 /**
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
194
195 @retval TRUE Os enable lmce.
196 @retval FALSE Os not enable lmce.
197
198 **/
199 BOOLEAN
200 IsLmceOsEnabled (
201 VOID
202 )
203 {
204 MSR_IA32_MCG_CAP_REGISTER McgCap;
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
207
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
209 if (McgCap.Bits.MCG_LMCE_P == 0) {
210 return FALSE;
211 }
212
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
214 if (FeatureCtrl.Bits.LmceOn == 0) {
215 return FALSE;
216 }
217
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
220 }
221
222 /**
223 Return if Local machine check exception signaled.
224
225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
226 delivered to only the logical processor.
227
228 @retval TRUE LMCE was signaled.
229 @retval FALSE LMCE was not signaled.
230
231 **/
232 BOOLEAN
233 IsLmceSignaled (
234 VOID
235 )
236 {
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
238
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
241 }
242
243 /**
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
245 entering SMM, except SMI disabled APs.
246
247 **/
248 VOID
249 SmmWaitForApArrival (
250 VOID
251 )
252 {
253 UINT64 Timer;
254 UINTN Index;
255 BOOLEAN LmceEn;
256 BOOLEAN LmceSignal;
257
258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
259
260 LmceEn = FALSE;
261 LmceSignal = FALSE;
262 if (mMachineCheckSupported) {
263 LmceEn = IsLmceOsEnabled ();
264 LmceSignal = IsLmceSignaled();
265 }
266
267 //
268 // Platform implementor should choose a timeout value appropriately:
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
275 // - The timeout value must be longer than longest possible IO operation in the system
276 //
277
278 //
279 // Sync with APs 1st timeout
280 //
281 for (Timer = StartSyncTimer ();
282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
284 ) {
285 CpuPause ();
286 }
287
288 //
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
290 // because:
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
294 // work while SMI handling is on-going.
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
298 // mode work while SMI handling is on-going.
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
300 // - In traditional flow, SMI disabling is discouraged.
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
302 // In both cases, adding SMI-disabling checking code increases overhead.
303 //
304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
305 //
306 // Send SMI IPIs to bring outside processors in
307 //
308 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
311 }
312 }
313
314 //
315 // Sync with APs 2nd timeout.
316 //
317 for (Timer = StartSyncTimer ();
318 !IsSyncTimerTimeout (Timer) &&
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
320 ) {
321 CpuPause ();
322 }
323 }
324
325 return;
326 }
327
328
329 /**
330 Replace OS MTRR's with SMI MTRR's.
331
332 @param CpuIndex Processor Index
333
334 **/
335 VOID
336 ReplaceOSMtrrs (
337 IN UINTN CpuIndex
338 )
339 {
340 SmmCpuFeaturesDisableSmrr ();
341
342 //
343 // Replace all MTRRs registers
344 //
345 MtrrSetAllMtrrs (&gSmiMtrrs);
346 }
347
348 /**
349 Wheck whether task has been finished by all APs.
350
351 @param BlockMode Whether did it in block mode or non-block mode.
352
353 @retval TRUE Task has been finished by all APs.
354 @retval FALSE Task not has been finished by all APs.
355
356 **/
357 BOOLEAN
358 WaitForAllAPsNotBusy (
359 IN BOOLEAN BlockMode
360 )
361 {
362 UINTN Index;
363
364 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
365 //
366 // Ignore BSP and APs which not call in SMM.
367 //
368 if (!IsPresentAp(Index)) {
369 continue;
370 }
371
372 if (BlockMode) {
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
375 } else {
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
378 } else {
379 return FALSE;
380 }
381 }
382 }
383
384 return TRUE;
385 }
386
387 /**
388 Check whether it is an present AP.
389
390 @param CpuIndex The AP index which calls this function.
391
392 @retval TRUE It's a present AP.
393 @retval TRUE This is not an AP or it is not present.
394
395 **/
396 BOOLEAN
397 IsPresentAp (
398 IN UINTN CpuIndex
399 )
400 {
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
403 }
404
405 /**
406 Check whether execute in single AP or all APs.
407
408 Compare two Tokens used by different APs to know whether in StartAllAps call.
409
410 Whether is an valid AP base on AP's Present flag.
411
412 @retval TRUE IN StartAllAps call.
413 @retval FALSE Not in StartAllAps call.
414
415 **/
416 BOOLEAN
417 InStartAllApsCall (
418 VOID
419 )
420 {
421 UINTN ApIndex;
422 UINTN ApIndex2;
423
424 for (ApIndex = mMaxNumberOfCpus; ApIndex-- > 0;) {
425 if (IsPresentAp (ApIndex) && (mSmmMpSyncData->CpuData[ApIndex].Token != NULL)) {
426 for (ApIndex2 = ApIndex; ApIndex2-- > 0;) {
427 if (IsPresentAp (ApIndex2) && (mSmmMpSyncData->CpuData[ApIndex2].Token != NULL)) {
428 return mSmmMpSyncData->CpuData[ApIndex2].Token == mSmmMpSyncData->CpuData[ApIndex].Token;
429 }
430 }
431 }
432 }
433
434 return FALSE;
435 }
436
437 /**
438 Clean up the status flags used during executing the procedure.
439
440 @param CpuIndex The AP index which calls this function.
441
442 **/
443 VOID
444 ReleaseToken (
445 IN UINTN CpuIndex
446 )
447 {
448 UINTN Index;
449 BOOLEAN Released;
450
451 if (InStartAllApsCall ()) {
452 //
453 // In Start All APs mode, make sure all APs have finished task.
454 //
455 if (WaitForAllAPsNotBusy (FALSE)) {
456 //
457 // Clean the flags update in the function call.
458 //
459 Released = FALSE;
460 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
461 //
462 // Only In SMM APs need to be clean up.
463 //
464 if (mSmmMpSyncData->CpuData[Index].Present && mSmmMpSyncData->CpuData[Index].Token != NULL) {
465 if (!Released) {
466 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Token);
467 Released = TRUE;
468 }
469 mSmmMpSyncData->CpuData[Index].Token = NULL;
470 }
471 }
472 }
473 } else {
474 //
475 // In single AP mode.
476 //
477 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
478 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Token);
479 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
480 }
481 }
482 }
483
484 /**
485 Free the tokens in the maintained list.
486
487 **/
488 VOID
489 FreeTokens (
490 VOID
491 )
492 {
493 LIST_ENTRY *Link;
494 PROCEDURE_TOKEN *ProcToken;
495 TOKEN_BUFFER *TokenBuf;
496
497 //
498 // Only free the token buffer recorded in the OldTOkenBufList
499 // upon exiting SMI. Current token buffer stays allocated so
500 // next SMI doesn't need to re-allocate.
501 //
502 gSmmCpuPrivate->UsedTokenNum = 0;
503
504 Link = GetFirstNode (&gSmmCpuPrivate->OldTokenBufList);
505 while (!IsNull (&gSmmCpuPrivate->OldTokenBufList, Link)) {
506 TokenBuf = TOKEN_BUFFER_FROM_LINK (Link);
507
508 Link = RemoveEntryList (&TokenBuf->Link);
509
510 FreePool (TokenBuf->Buffer);
511 FreePool (TokenBuf);
512 }
513
514 while (!IsListEmpty (&gSmmCpuPrivate->TokenList)) {
515 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
516 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
517
518 RemoveEntryList (&ProcToken->Link);
519
520 FreePool (ProcToken);
521 }
522 }
523
524 /**
525 SMI handler for BSP.
526
527 @param CpuIndex BSP processor Index
528 @param SyncMode SMM MP sync mode
529
530 **/
531 VOID
532 BSPHandler (
533 IN UINTN CpuIndex,
534 IN SMM_CPU_SYNC_MODE SyncMode
535 )
536 {
537 UINTN Index;
538 MTRR_SETTINGS Mtrrs;
539 UINTN ApCount;
540 BOOLEAN ClearTopLevelSmiResult;
541 UINTN PresentCount;
542
543 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
544 ApCount = 0;
545
546 //
547 // Flag BSP's presence
548 //
549 *mSmmMpSyncData->InsideSmm = TRUE;
550
551 //
552 // Initialize Debug Agent to start source level debug in BSP handler
553 //
554 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
555
556 //
557 // Mark this processor's presence
558 //
559 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
560
561 //
562 // Clear platform top level SMI status bit before calling SMI handlers. If
563 // we cleared it after SMI handlers are run, we would miss the SMI that
564 // occurs after SMI handlers are done and before SMI status bit is cleared.
565 //
566 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
567 ASSERT (ClearTopLevelSmiResult == TRUE);
568
569 //
570 // Set running processor index
571 //
572 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
573
574 //
575 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
576 //
577 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
578
579 //
580 // Wait for APs to arrive
581 //
582 SmmWaitForApArrival();
583
584 //
585 // Lock the counter down and retrieve the number of APs
586 //
587 *mSmmMpSyncData->AllCpusInSync = TRUE;
588 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
589
590 //
591 // Wait for all APs to get ready for programming MTRRs
592 //
593 WaitForAllAPs (ApCount);
594
595 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
596 //
597 // Signal all APs it's time for backup MTRRs
598 //
599 ReleaseAllAPs ();
600
601 //
602 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
603 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
604 // to a large enough value to avoid this situation.
605 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
606 // We do the backup first and then set MTRR to avoid race condition for threads
607 // in the same core.
608 //
609 MtrrGetAllMtrrs(&Mtrrs);
610
611 //
612 // Wait for all APs to complete their MTRR saving
613 //
614 WaitForAllAPs (ApCount);
615
616 //
617 // Let all processors program SMM MTRRs together
618 //
619 ReleaseAllAPs ();
620
621 //
622 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
623 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
624 // to a large enough value to avoid this situation.
625 //
626 ReplaceOSMtrrs (CpuIndex);
627
628 //
629 // Wait for all APs to complete their MTRR programming
630 //
631 WaitForAllAPs (ApCount);
632 }
633 }
634
635 //
636 // The BUSY lock is initialized to Acquired state
637 //
638 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
639
640 //
641 // Perform the pre tasks
642 //
643 PerformPreTasks ();
644
645 //
646 // Invoke SMM Foundation EntryPoint with the processor information context.
647 //
648 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
649
650 //
651 // Make sure all APs have completed their pending none-block tasks
652 //
653 WaitForAllAPsNotBusy (TRUE);
654
655 //
656 // Perform the remaining tasks
657 //
658 PerformRemainingTasks ();
659
660 //
661 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
662 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
663 // will run through freely.
664 //
665 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
666
667 //
668 // Lock the counter down and retrieve the number of APs
669 //
670 *mSmmMpSyncData->AllCpusInSync = TRUE;
671 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
672 //
673 // Make sure all APs have their Present flag set
674 //
675 while (TRUE) {
676 PresentCount = 0;
677 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
678 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
679 PresentCount ++;
680 }
681 }
682 if (PresentCount > ApCount) {
683 break;
684 }
685 }
686 }
687
688 //
689 // Notify all APs to exit
690 //
691 *mSmmMpSyncData->InsideSmm = FALSE;
692 ReleaseAllAPs ();
693
694 //
695 // Wait for all APs to complete their pending tasks
696 //
697 WaitForAllAPs (ApCount);
698
699 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
700 //
701 // Signal APs to restore MTRRs
702 //
703 ReleaseAllAPs ();
704
705 //
706 // Restore OS MTRRs
707 //
708 SmmCpuFeaturesReenableSmrr ();
709 MtrrSetAllMtrrs(&Mtrrs);
710
711 //
712 // Wait for all APs to complete MTRR programming
713 //
714 WaitForAllAPs (ApCount);
715 }
716
717 //
718 // Stop source level debug in BSP handler, the code below will not be
719 // debugged.
720 //
721 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
722
723 //
724 // Signal APs to Reset states/semaphore for this processor
725 //
726 ReleaseAllAPs ();
727
728 //
729 // Perform pending operations for hot-plug
730 //
731 SmmCpuUpdate ();
732
733 //
734 // Clear the Present flag of BSP
735 //
736 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
737
738 //
739 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
740 // WaitForAllAps does not depend on the Present flag.
741 //
742 WaitForAllAPs (ApCount);
743
744 //
745 // Clean the tokens buffer.
746 //
747 FreeTokens ();
748
749 //
750 // Reset BspIndex to -1, meaning BSP has not been elected.
751 //
752 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
753 mSmmMpSyncData->BspIndex = (UINT32)-1;
754 }
755
756 //
757 // Allow APs to check in from this point on
758 //
759 *mSmmMpSyncData->Counter = 0;
760 *mSmmMpSyncData->AllCpusInSync = FALSE;
761 }
762
763 /**
764 SMI handler for AP.
765
766 @param CpuIndex AP processor Index.
767 @param ValidSmi Indicates that current SMI is a valid SMI or not.
768 @param SyncMode SMM MP sync mode.
769
770 **/
771 VOID
772 APHandler (
773 IN UINTN CpuIndex,
774 IN BOOLEAN ValidSmi,
775 IN SMM_CPU_SYNC_MODE SyncMode
776 )
777 {
778 UINT64 Timer;
779 UINTN BspIndex;
780 MTRR_SETTINGS Mtrrs;
781 EFI_STATUS ProcedureStatus;
782
783 //
784 // Timeout BSP
785 //
786 for (Timer = StartSyncTimer ();
787 !IsSyncTimerTimeout (Timer) &&
788 !(*mSmmMpSyncData->InsideSmm);
789 ) {
790 CpuPause ();
791 }
792
793 if (!(*mSmmMpSyncData->InsideSmm)) {
794 //
795 // BSP timeout in the first round
796 //
797 if (mSmmMpSyncData->BspIndex != -1) {
798 //
799 // BSP Index is known
800 //
801 BspIndex = mSmmMpSyncData->BspIndex;
802 ASSERT (CpuIndex != BspIndex);
803
804 //
805 // Send SMI IPI to bring BSP in
806 //
807 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
808
809 //
810 // Now clock BSP for the 2nd time
811 //
812 for (Timer = StartSyncTimer ();
813 !IsSyncTimerTimeout (Timer) &&
814 !(*mSmmMpSyncData->InsideSmm);
815 ) {
816 CpuPause ();
817 }
818
819 if (!(*mSmmMpSyncData->InsideSmm)) {
820 //
821 // Give up since BSP is unable to enter SMM
822 // and signal the completion of this AP
823 WaitForSemaphore (mSmmMpSyncData->Counter);
824 return;
825 }
826 } else {
827 //
828 // Don't know BSP index. Give up without sending IPI to BSP.
829 //
830 WaitForSemaphore (mSmmMpSyncData->Counter);
831 return;
832 }
833 }
834
835 //
836 // BSP is available
837 //
838 BspIndex = mSmmMpSyncData->BspIndex;
839 ASSERT (CpuIndex != BspIndex);
840
841 //
842 // Mark this processor's presence
843 //
844 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
845
846 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
847 //
848 // Notify BSP of arrival at this point
849 //
850 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
851 }
852
853 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
854 //
855 // Wait for the signal from BSP to backup MTRRs
856 //
857 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
858
859 //
860 // Backup OS MTRRs
861 //
862 MtrrGetAllMtrrs(&Mtrrs);
863
864 //
865 // Signal BSP the completion of this AP
866 //
867 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
868
869 //
870 // Wait for BSP's signal to program MTRRs
871 //
872 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
873
874 //
875 // Replace OS MTRRs with SMI MTRRs
876 //
877 ReplaceOSMtrrs (CpuIndex);
878
879 //
880 // Signal BSP the completion of this AP
881 //
882 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
883 }
884
885 while (TRUE) {
886 //
887 // Wait for something to happen
888 //
889 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
890
891 //
892 // Check if BSP wants to exit SMM
893 //
894 if (!(*mSmmMpSyncData->InsideSmm)) {
895 break;
896 }
897
898 //
899 // BUSY should be acquired by SmmStartupThisAp()
900 //
901 ASSERT (
902 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
903 );
904
905 //
906 // Invoke the scheduled procedure
907 //
908 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
909 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
910 );
911 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
912 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
913 }
914
915 //
916 // Release BUSY
917 //
918 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
919
920 ReleaseToken (CpuIndex);
921 }
922
923 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
924 //
925 // Notify BSP the readiness of this AP to program MTRRs
926 //
927 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
928
929 //
930 // Wait for the signal from BSP to program MTRRs
931 //
932 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
933
934 //
935 // Restore OS MTRRs
936 //
937 SmmCpuFeaturesReenableSmrr ();
938 MtrrSetAllMtrrs(&Mtrrs);
939 }
940
941 //
942 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
943 //
944 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
945
946 //
947 // Wait for the signal from BSP to Reset states/semaphore for this processor
948 //
949 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
950
951 //
952 // Reset states/semaphore for this processor
953 //
954 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
955
956 //
957 // Notify BSP the readiness of this AP to exit SMM
958 //
959 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
960
961 }
962
963 /**
964 Create 4G PageTable in SMRAM.
965
966 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
967 @return PageTable Address
968
969 **/
970 UINT32
971 Gen4GPageTable (
972 IN BOOLEAN Is32BitPageTable
973 )
974 {
975 VOID *PageTable;
976 UINTN Index;
977 UINT64 *Pte;
978 UINTN PagesNeeded;
979 UINTN Low2MBoundary;
980 UINTN High2MBoundary;
981 UINTN Pages;
982 UINTN GuardPage;
983 UINT64 *Pdpte;
984 UINTN PageIndex;
985 UINTN PageAddress;
986
987 Low2MBoundary = 0;
988 High2MBoundary = 0;
989 PagesNeeded = 0;
990 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
991 //
992 // Add one more page for known good stack, then find the lower 2MB aligned address.
993 //
994 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
995 //
996 // Add two more pages for known good stack and stack guard page,
997 // then find the lower 2MB aligned address.
998 //
999 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
1000 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
1001 }
1002 //
1003 // Allocate the page table
1004 //
1005 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
1006 ASSERT (PageTable != NULL);
1007
1008 PageTable = (VOID *)((UINTN)PageTable);
1009 Pte = (UINT64*)PageTable;
1010
1011 //
1012 // Zero out all page table entries first
1013 //
1014 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
1015
1016 //
1017 // Set Page Directory Pointers
1018 //
1019 for (Index = 0; Index < 4; Index++) {
1020 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
1021 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
1022 }
1023 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
1024
1025 //
1026 // Fill in Page Directory Entries
1027 //
1028 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
1029 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
1030 }
1031
1032 Pdpte = (UINT64*)PageTable;
1033 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1034 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
1035 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
1036 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
1037 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1038 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1039 //
1040 // Fill in Page Table Entries
1041 //
1042 Pte = (UINT64*)Pages;
1043 PageAddress = PageIndex;
1044 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1045 if (PageAddress == GuardPage) {
1046 //
1047 // Mark the guard page as non-present
1048 //
1049 Pte[Index] = PageAddress | mAddressEncMask;
1050 GuardPage += mSmmStackSize;
1051 if (GuardPage > mSmmStackArrayEnd) {
1052 GuardPage = 0;
1053 }
1054 } else {
1055 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1056 }
1057 PageAddress+= EFI_PAGE_SIZE;
1058 }
1059 Pages += EFI_PAGE_SIZE;
1060 }
1061 }
1062
1063 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1064 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1065 if ((Pte[0] & IA32_PG_PS) == 0) {
1066 // 4K-page entries are already mapped. Just hide the first one anyway.
1067 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1068 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1069 } else {
1070 // Create 4K-page entries
1071 Pages = (UINTN)AllocatePageTableMemory (1);
1072 ASSERT (Pages != 0);
1073
1074 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1075
1076 Pte = (UINT64*)Pages;
1077 PageAddress = 0;
1078 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1079 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1080 PageAddress += EFI_PAGE_SIZE;
1081 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1082 }
1083 }
1084 }
1085
1086 return (UINT32)(UINTN)PageTable;
1087 }
1088
1089 /**
1090 Checks whether the input token is the current used token.
1091
1092 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1093 BroadcastProcedure.
1094
1095 @retval TRUE The input token is the current used token.
1096 @retval FALSE The input token is not the current used token.
1097 **/
1098 BOOLEAN
1099 IsTokenInUse (
1100 IN SPIN_LOCK *Token
1101 )
1102 {
1103 LIST_ENTRY *Link;
1104 PROCEDURE_TOKEN *ProcToken;
1105
1106 if (Token == NULL) {
1107 return FALSE;
1108 }
1109
1110 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1111 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
1112 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1113
1114 if (ProcToken->ProcedureToken == Token) {
1115 return TRUE;
1116 }
1117
1118 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1119 }
1120
1121 return FALSE;
1122 }
1123
1124 /**
1125 create token and save it to the maintain list.
1126
1127 @retval return the spin lock used as token.
1128
1129 **/
1130 SPIN_LOCK *
1131 CreateToken (
1132 VOID
1133 )
1134 {
1135 PROCEDURE_TOKEN *ProcToken;
1136 SPIN_LOCK *CpuToken;
1137 UINTN SpinLockSize;
1138 TOKEN_BUFFER *TokenBuf;
1139 UINT32 TokenCountPerChunk;
1140
1141 SpinLockSize = GetSpinLockProperties ();
1142 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1143
1144 if (gSmmCpuPrivate->UsedTokenNum == TokenCountPerChunk) {
1145 DEBUG ((DEBUG_VERBOSE, "CpuSmm: No free token buffer, allocate new buffer!\n"));
1146
1147 //
1148 // Record current token buffer for later free action usage.
1149 // Current used token buffer not in this list.
1150 //
1151 TokenBuf = AllocatePool (sizeof (TOKEN_BUFFER));
1152 ASSERT (TokenBuf != NULL);
1153 TokenBuf->Signature = TOKEN_BUFFER_SIGNATURE;
1154 TokenBuf->Buffer = gSmmCpuPrivate->CurrentTokenBuf;
1155
1156 InsertTailList (&gSmmCpuPrivate->OldTokenBufList, &TokenBuf->Link);
1157
1158 gSmmCpuPrivate->CurrentTokenBuf = AllocatePool (SpinLockSize * TokenCountPerChunk);
1159 ASSERT (gSmmCpuPrivate->CurrentTokenBuf != NULL);
1160 gSmmCpuPrivate->UsedTokenNum = 0;
1161 }
1162
1163 CpuToken = (SPIN_LOCK *)(gSmmCpuPrivate->CurrentTokenBuf + SpinLockSize * gSmmCpuPrivate->UsedTokenNum);
1164 gSmmCpuPrivate->UsedTokenNum++;
1165
1166 InitializeSpinLock (CpuToken);
1167 AcquireSpinLock (CpuToken);
1168
1169 ProcToken = AllocatePool (sizeof (PROCEDURE_TOKEN));
1170 ASSERT (ProcToken != NULL);
1171 ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;
1172 ProcToken->ProcedureToken = CpuToken;
1173
1174 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);
1175
1176 return CpuToken;
1177 }
1178
1179 /**
1180 Checks status of specified AP.
1181
1182 This function checks whether the specified AP has finished the task assigned
1183 by StartupThisAP(), and whether timeout expires.
1184
1185 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1186 BroadcastProcedure.
1187
1188 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1189 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1190 **/
1191 EFI_STATUS
1192 IsApReady (
1193 IN SPIN_LOCK *Token
1194 )
1195 {
1196 if (AcquireSpinLockOrFail (Token)) {
1197 ReleaseSpinLock (Token);
1198 return EFI_SUCCESS;
1199 }
1200
1201 return EFI_NOT_READY;
1202 }
1203
1204 /**
1205 Schedule a procedure to run on the specified CPU.
1206
1207 @param[in] Procedure The address of the procedure to run
1208 @param[in] CpuIndex Target CPU Index
1209 @param[in,out] ProcArguments The parameter to pass to the procedure
1210 @param[in] Token This is an optional parameter that allows the caller to execute the
1211 procedure in a blocking or non-blocking fashion. If it is NULL the
1212 call is blocking, and the call will not return until the AP has
1213 completed the procedure. If the token is not NULL, the call will
1214 return immediately. The caller can check whether the procedure has
1215 completed with CheckOnProcedure or WaitForProcedure.
1216 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1217 execution of Procedure, either for blocking or non-blocking mode.
1218 Zero means infinity. If the timeout expires before all APs return
1219 from Procedure, then Procedure on the failed APs is terminated. If
1220 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1221 If the timeout expires in non-blocking mode, the timeout determined
1222 can be through CheckOnProcedure or WaitForProcedure.
1223 Note that timeout support is optional. Whether an implementation
1224 supports this feature can be determined via the Attributes data
1225 member.
1226 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1227 by Procedure when it completes execution on the target AP, or with
1228 EFI_TIMEOUT if the Procedure fails to complete within the optional
1229 timeout. The implementation will update this variable with
1230 EFI_NOT_READY prior to starting Procedure on the target AP.
1231
1232 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1233 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1234 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1235 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1236 @retval EFI_SUCCESS The procedure has been successfully scheduled
1237
1238 **/
1239 EFI_STATUS
1240 InternalSmmStartupThisAp (
1241 IN EFI_AP_PROCEDURE2 Procedure,
1242 IN UINTN CpuIndex,
1243 IN OUT VOID *ProcArguments OPTIONAL,
1244 IN MM_COMPLETION *Token,
1245 IN UINTN TimeoutInMicroseconds,
1246 IN OUT EFI_STATUS *CpuStatus
1247 )
1248 {
1249 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1250 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1251 return EFI_INVALID_PARAMETER;
1252 }
1253 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1254 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1255 return EFI_INVALID_PARAMETER;
1256 }
1257 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1258 return EFI_INVALID_PARAMETER;
1259 }
1260 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1261 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1262 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1263 }
1264 return EFI_INVALID_PARAMETER;
1265 }
1266 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1267 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1268 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1269 }
1270 return EFI_INVALID_PARAMETER;
1271 }
1272 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1273 return EFI_INVALID_PARAMETER;
1274 }
1275 if (Procedure == NULL) {
1276 return EFI_INVALID_PARAMETER;
1277 }
1278
1279 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1280
1281 if (Token != NULL) {
1282 *Token = (MM_COMPLETION) CreateToken ();
1283 }
1284
1285 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1286 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1287 if (Token != NULL) {
1288 mSmmMpSyncData->CpuData[CpuIndex].Token = (SPIN_LOCK *)(*Token);
1289 }
1290 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1291 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1292 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1293 }
1294
1295 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1296
1297 if (Token == NULL) {
1298 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1299 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1300 }
1301
1302 return EFI_SUCCESS;
1303 }
1304
1305 /**
1306 Worker function to execute a caller provided function on all enabled APs.
1307
1308 @param[in] Procedure A pointer to the function to be run on
1309 enabled APs of the system.
1310 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1311 APs to return from Procedure, either for
1312 blocking or non-blocking mode.
1313 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1314 all APs.
1315 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1316 procedure in a blocking or non-blocking fashion. If it is NULL the
1317 call is blocking, and the call will not return until the AP has
1318 completed the procedure. If the token is not NULL, the call will
1319 return immediately. The caller can check whether the procedure has
1320 completed with CheckOnProcedure or WaitForProcedure.
1321 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1322 by Procedure when it completes execution on the target AP, or with
1323 EFI_TIMEOUT if the Procedure fails to complete within the optional
1324 timeout. The implementation will update this variable with
1325 EFI_NOT_READY prior to starting Procedure on the target AP.
1326
1327
1328 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1329 the timeout expired.
1330 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1331 to all enabled APs.
1332 @retval others Failed to Startup all APs.
1333
1334 **/
1335 EFI_STATUS
1336 InternalSmmStartupAllAPs (
1337 IN EFI_AP_PROCEDURE2 Procedure,
1338 IN UINTN TimeoutInMicroseconds,
1339 IN OUT VOID *ProcedureArguments OPTIONAL,
1340 IN OUT MM_COMPLETION *Token,
1341 IN OUT EFI_STATUS *CPUStatus
1342 )
1343 {
1344 UINTN Index;
1345 UINTN CpuCount;
1346
1347 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1348 return EFI_INVALID_PARAMETER;
1349 }
1350 if (Procedure == NULL) {
1351 return EFI_INVALID_PARAMETER;
1352 }
1353
1354 CpuCount = 0;
1355 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
1356 if (IsPresentAp (Index)) {
1357 CpuCount ++;
1358
1359 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1360 return EFI_INVALID_PARAMETER;
1361 }
1362
1363 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1364 return EFI_NOT_READY;
1365 }
1366 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1367 }
1368 }
1369 if (CpuCount == 0) {
1370 return EFI_NOT_STARTED;
1371 }
1372
1373 if (Token != NULL) {
1374 *Token = (MM_COMPLETION) CreateToken ();
1375 }
1376
1377 //
1378 // Make sure all BUSY should be acquired.
1379 //
1380 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1381 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1382 // block mode.
1383 //
1384 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
1385 if (IsPresentAp (Index)) {
1386 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1387 }
1388 }
1389
1390 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
1391 if (IsPresentAp (Index)) {
1392 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1393 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1394 if (Token != NULL) {
1395 mSmmMpSyncData->CpuData[Index].Token = (SPIN_LOCK *)(*Token);
1396 }
1397 if (CPUStatus != NULL) {
1398 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1399 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1400 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1401 }
1402 }
1403 } else {
1404 //
1405 // PI spec requirement:
1406 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1407 //
1408 if (CPUStatus != NULL) {
1409 CPUStatus[Index] = EFI_NOT_STARTED;
1410 }
1411 }
1412 }
1413
1414 ReleaseAllAPs ();
1415
1416 if (Token == NULL) {
1417 //
1418 // Make sure all APs have completed their tasks.
1419 //
1420 WaitForAllAPsNotBusy (TRUE);
1421 }
1422
1423 return EFI_SUCCESS;
1424 }
1425
1426 /**
1427 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1428 If the function is defined with a type that is not compatible with
1429 the type (of the expression) pointed to by the expression that
1430 denotes the called function, the behavior is undefined.
1431
1432 So add below wrapper function to convert between EFI_AP_PROCEDURE
1433 and EFI_AP_PROCEDURE2.
1434
1435 Wrapper for Procedures.
1436
1437 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1438
1439 **/
1440 EFI_STATUS
1441 EFIAPI
1442 ProcedureWrapper (
1443 IN VOID *Buffer
1444 )
1445 {
1446 PROCEDURE_WRAPPER *Wrapper;
1447
1448 Wrapper = Buffer;
1449 Wrapper->Procedure (Wrapper->ProcedureArgument);
1450
1451 return EFI_SUCCESS;
1452 }
1453
1454 /**
1455 Schedule a procedure to run on the specified CPU in blocking mode.
1456
1457 @param[in] Procedure The address of the procedure to run
1458 @param[in] CpuIndex Target CPU Index
1459 @param[in, out] ProcArguments The parameter to pass to the procedure
1460
1461 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1462 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1463 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1464 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1465 @retval EFI_SUCCESS The procedure has been successfully scheduled
1466
1467 **/
1468 EFI_STATUS
1469 EFIAPI
1470 SmmBlockingStartupThisAp (
1471 IN EFI_AP_PROCEDURE Procedure,
1472 IN UINTN CpuIndex,
1473 IN OUT VOID *ProcArguments OPTIONAL
1474 )
1475 {
1476 PROCEDURE_WRAPPER Wrapper;
1477
1478 Wrapper.Procedure = Procedure;
1479 Wrapper.ProcedureArgument = ProcArguments;
1480
1481 //
1482 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1483 //
1484 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1485 }
1486
1487 /**
1488 Schedule a procedure to run on the specified CPU.
1489
1490 @param Procedure The address of the procedure to run
1491 @param CpuIndex Target CPU Index
1492 @param ProcArguments The parameter to pass to the procedure
1493
1494 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1495 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1496 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1497 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1498 @retval EFI_SUCCESS The procedure has been successfully scheduled
1499
1500 **/
1501 EFI_STATUS
1502 EFIAPI
1503 SmmStartupThisAp (
1504 IN EFI_AP_PROCEDURE Procedure,
1505 IN UINTN CpuIndex,
1506 IN OUT VOID *ProcArguments OPTIONAL
1507 )
1508 {
1509 MM_COMPLETION Token;
1510
1511 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1512 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1513
1514 //
1515 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1516 //
1517 return InternalSmmStartupThisAp (
1518 ProcedureWrapper,
1519 CpuIndex,
1520 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1521 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
1522 0,
1523 NULL
1524 );
1525 }
1526
1527 /**
1528 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1529 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1530
1531 NOTE: It might not be appreciated in runtime since it might
1532 conflict with OS debugging facilities. Turn them off in RELEASE.
1533
1534 @param CpuIndex CPU Index
1535
1536 **/
1537 VOID
1538 EFIAPI
1539 CpuSmmDebugEntry (
1540 IN UINTN CpuIndex
1541 )
1542 {
1543 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1544
1545 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1546 ASSERT(CpuIndex < mMaxNumberOfCpus);
1547 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1548 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1549 AsmWriteDr6 (CpuSaveState->x86._DR6);
1550 AsmWriteDr7 (CpuSaveState->x86._DR7);
1551 } else {
1552 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1553 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1554 }
1555 }
1556 }
1557
1558 /**
1559 This function restores DR6 & DR7 to SMM save state.
1560
1561 NOTE: It might not be appreciated in runtime since it might
1562 conflict with OS debugging facilities. Turn them off in RELEASE.
1563
1564 @param CpuIndex CPU Index
1565
1566 **/
1567 VOID
1568 EFIAPI
1569 CpuSmmDebugExit (
1570 IN UINTN CpuIndex
1571 )
1572 {
1573 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1574
1575 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1576 ASSERT(CpuIndex < mMaxNumberOfCpus);
1577 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1578 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1579 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1580 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1581 } else {
1582 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1583 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1584 }
1585 }
1586 }
1587
1588 /**
1589 C function for SMI entry, each processor comes here upon SMI trigger.
1590
1591 @param CpuIndex CPU Index
1592
1593 **/
1594 VOID
1595 EFIAPI
1596 SmiRendezvous (
1597 IN UINTN CpuIndex
1598 )
1599 {
1600 EFI_STATUS Status;
1601 BOOLEAN ValidSmi;
1602 BOOLEAN IsBsp;
1603 BOOLEAN BspInProgress;
1604 UINTN Index;
1605 UINTN Cr2;
1606
1607 ASSERT(CpuIndex < mMaxNumberOfCpus);
1608
1609 //
1610 // Save Cr2 because Page Fault exception in SMM may override its value,
1611 // when using on-demand paging for above 4G memory.
1612 //
1613 Cr2 = 0;
1614 SaveCr2 (&Cr2);
1615
1616 //
1617 // Call the user register Startup function first.
1618 //
1619 if (mSmmMpSyncData->StartupProcedure != NULL) {
1620 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1621 }
1622
1623 //
1624 // Perform CPU specific entry hooks
1625 //
1626 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1627
1628 //
1629 // Determine if this is a valid SMI
1630 //
1631 ValidSmi = PlatformValidSmi();
1632
1633 //
1634 // Determine if BSP has been already in progress. Note this must be checked after
1635 // ValidSmi because BSP may clear a valid SMI source after checking in.
1636 //
1637 BspInProgress = *mSmmMpSyncData->InsideSmm;
1638
1639 if (!BspInProgress && !ValidSmi) {
1640 //
1641 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1642 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1643 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1644 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1645 // is nothing we need to do.
1646 //
1647 goto Exit;
1648 } else {
1649 //
1650 // Signal presence of this processor
1651 //
1652 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1653 //
1654 // BSP has already ended the synchronization, so QUIT!!!
1655 //
1656
1657 //
1658 // Wait for BSP's signal to finish SMI
1659 //
1660 while (*mSmmMpSyncData->AllCpusInSync) {
1661 CpuPause ();
1662 }
1663 goto Exit;
1664 } else {
1665
1666 //
1667 // The BUSY lock is initialized to Released state.
1668 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1669 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1670 // after AP's present flag is detected.
1671 //
1672 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1673 }
1674
1675 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1676 ActivateSmmProfile (CpuIndex);
1677 }
1678
1679 if (BspInProgress) {
1680 //
1681 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1682 // as BSP may have cleared the SMI status
1683 //
1684 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1685 } else {
1686 //
1687 // We have a valid SMI
1688 //
1689
1690 //
1691 // Elect BSP
1692 //
1693 IsBsp = FALSE;
1694 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1695 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1696 //
1697 // Call platform hook to do BSP election
1698 //
1699 Status = PlatformSmmBspElection (&IsBsp);
1700 if (EFI_SUCCESS == Status) {
1701 //
1702 // Platform hook determines successfully
1703 //
1704 if (IsBsp) {
1705 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1706 }
1707 } else {
1708 //
1709 // Platform hook fails to determine, use default BSP election method
1710 //
1711 InterlockedCompareExchange32 (
1712 (UINT32*)&mSmmMpSyncData->BspIndex,
1713 (UINT32)-1,
1714 (UINT32)CpuIndex
1715 );
1716 }
1717 }
1718 }
1719
1720 //
1721 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1722 //
1723 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1724
1725 //
1726 // Clear last request for SwitchBsp.
1727 //
1728 if (mSmmMpSyncData->SwitchBsp) {
1729 mSmmMpSyncData->SwitchBsp = FALSE;
1730 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1731 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1732 }
1733 }
1734
1735 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1736 SmmProfileRecordSmiNum ();
1737 }
1738
1739 //
1740 // BSP Handler is always called with a ValidSmi == TRUE
1741 //
1742 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1743 } else {
1744 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1745 }
1746 }
1747
1748 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1749
1750 //
1751 // Wait for BSP's signal to exit SMI
1752 //
1753 while (*mSmmMpSyncData->AllCpusInSync) {
1754 CpuPause ();
1755 }
1756 }
1757
1758 Exit:
1759 SmmCpuFeaturesRendezvousExit (CpuIndex);
1760
1761 //
1762 // Restore Cr2
1763 //
1764 RestoreCr2 (Cr2);
1765 }
1766
1767 /**
1768 Allocate buffer for SpinLock and Wrapper function buffer.
1769
1770 **/
1771 VOID
1772 InitializeDataForMmMp (
1773 VOID
1774 )
1775 {
1776 UINTN SpinLockSize;
1777 UINT32 TokenCountPerChunk;
1778
1779 SpinLockSize = GetSpinLockProperties ();
1780 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1781 ASSERT (TokenCountPerChunk != 0);
1782 if (TokenCountPerChunk == 0) {
1783 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1784 CpuDeadLoop ();
1785 }
1786 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1787
1788 gSmmCpuPrivate->CurrentTokenBuf = AllocatePool (SpinLockSize * TokenCountPerChunk);
1789 ASSERT (gSmmCpuPrivate->CurrentTokenBuf != NULL);
1790
1791 gSmmCpuPrivate->UsedTokenNum = 0;
1792
1793 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1794 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1795
1796 InitializeListHead (&gSmmCpuPrivate->TokenList);
1797 InitializeListHead (&gSmmCpuPrivate->OldTokenBufList);
1798 }
1799
1800 /**
1801 Allocate buffer for all semaphores and spin locks.
1802
1803 **/
1804 VOID
1805 InitializeSmmCpuSemaphores (
1806 VOID
1807 )
1808 {
1809 UINTN ProcessorCount;
1810 UINTN TotalSize;
1811 UINTN GlobalSemaphoresSize;
1812 UINTN CpuSemaphoresSize;
1813 UINTN SemaphoreSize;
1814 UINTN Pages;
1815 UINTN *SemaphoreBlock;
1816 UINTN SemaphoreAddr;
1817
1818 SemaphoreSize = GetSpinLockProperties ();
1819 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1820 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1821 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1822 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1823 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1824 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1825 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1826 SemaphoreBlock = AllocatePages (Pages);
1827 ASSERT (SemaphoreBlock != NULL);
1828 ZeroMem (SemaphoreBlock, TotalSize);
1829
1830 SemaphoreAddr = (UINTN)SemaphoreBlock;
1831 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1832 SemaphoreAddr += SemaphoreSize;
1833 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1834 SemaphoreAddr += SemaphoreSize;
1835 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1836 SemaphoreAddr += SemaphoreSize;
1837 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1838 SemaphoreAddr += SemaphoreSize;
1839 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1840 = (SPIN_LOCK *)SemaphoreAddr;
1841 SemaphoreAddr += SemaphoreSize;
1842
1843 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1844 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1845 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1846 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1847 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1848 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1849
1850 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1851 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1852
1853 mSemaphoreSize = SemaphoreSize;
1854 }
1855
1856 /**
1857 Initialize un-cacheable data.
1858
1859 **/
1860 VOID
1861 EFIAPI
1862 InitializeMpSyncData (
1863 VOID
1864 )
1865 {
1866 UINTN CpuIndex;
1867
1868 if (mSmmMpSyncData != NULL) {
1869 //
1870 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1871 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1872 //
1873 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1874 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1875 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1876 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1877 //
1878 // Enable BSP election by setting BspIndex to -1
1879 //
1880 mSmmMpSyncData->BspIndex = (UINT32)-1;
1881 }
1882 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1883
1884 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1885 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1886 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1887 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1888 mSmmMpSyncData->AllCpusInSync != NULL);
1889 *mSmmMpSyncData->Counter = 0;
1890 *mSmmMpSyncData->InsideSmm = FALSE;
1891 *mSmmMpSyncData->AllCpusInSync = FALSE;
1892
1893 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1894 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1895 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1896 mSmmMpSyncData->CpuData[CpuIndex].Run =
1897 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1898 mSmmMpSyncData->CpuData[CpuIndex].Present =
1899 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1900 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1901 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1902 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1903 }
1904 }
1905 }
1906
1907 /**
1908 Initialize global data for MP synchronization.
1909
1910 @param Stacks Base address of SMI stack buffer for all processors.
1911 @param StackSize Stack size for each processor in SMM.
1912 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1913
1914 **/
1915 UINT32
1916 InitializeMpServiceData (
1917 IN VOID *Stacks,
1918 IN UINTN StackSize,
1919 IN UINTN ShadowStackSize
1920 )
1921 {
1922 UINT32 Cr3;
1923 UINTN Index;
1924 UINT8 *GdtTssTables;
1925 UINTN GdtTableStepSize;
1926 CPUID_VERSION_INFO_EDX RegEdx;
1927
1928 //
1929 // Determine if this CPU supports machine check
1930 //
1931 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1932 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1933
1934 //
1935 // Allocate memory for all locks and semaphores
1936 //
1937 InitializeSmmCpuSemaphores ();
1938
1939 //
1940 // Initialize mSmmMpSyncData
1941 //
1942 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1943 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1944 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1945 ASSERT (mSmmMpSyncData != NULL);
1946 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1947 InitializeMpSyncData ();
1948
1949 //
1950 // Initialize physical address mask
1951 // NOTE: Physical memory above virtual address limit is not supported !!!
1952 //
1953 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1954 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1955 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1956
1957 //
1958 // Create page tables
1959 //
1960 Cr3 = SmmInitPageTable ();
1961
1962 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1963
1964 //
1965 // Install SMI handler for each CPU
1966 //
1967 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1968 InstallSmiHandler (
1969 Index,
1970 (UINT32)mCpuHotPlugData.SmBase[Index],
1971 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1972 StackSize,
1973 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1974 gcSmiGdtr.Limit + 1,
1975 gcSmiIdtr.Base,
1976 gcSmiIdtr.Limit + 1,
1977 Cr3
1978 );
1979 }
1980
1981 //
1982 // Record current MTRR settings
1983 //
1984 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1985 MtrrGetAllMtrrs (&gSmiMtrrs);
1986
1987 return Cr3;
1988 }
1989
1990 /**
1991
1992 Register the SMM Foundation entry point.
1993
1994 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1995 @param SmmEntryPoint SMM Foundation EntryPoint
1996
1997 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1998
1999 **/
2000 EFI_STATUS
2001 EFIAPI
2002 RegisterSmmEntry (
2003 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
2004 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2005 )
2006 {
2007 //
2008 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2009 //
2010 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
2011 return EFI_SUCCESS;
2012 }
2013
2014 /**
2015
2016 Register the SMM Foundation entry point.
2017
2018 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2019 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2020 with the related definitions of
2021 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2022 If caller may pass a value of NULL to deregister any existing
2023 startup procedure.
2024 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2025 run by the AP. It is an optional common mailbox between APs and
2026 the caller to share information
2027
2028 @retval EFI_SUCCESS The Procedure has been set successfully.
2029 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2030
2031 **/
2032 EFI_STATUS
2033 RegisterStartupProcedure (
2034 IN EFI_AP_PROCEDURE Procedure,
2035 IN OUT VOID *ProcedureArguments OPTIONAL
2036 )
2037 {
2038 if (Procedure == NULL && ProcedureArguments != NULL) {
2039 return EFI_INVALID_PARAMETER;
2040 }
2041 if (mSmmMpSyncData == NULL) {
2042 return EFI_NOT_READY;
2043 }
2044
2045 mSmmMpSyncData->StartupProcedure = Procedure;
2046 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2047
2048 return EFI_SUCCESS;
2049 }