]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add missed comments for parameter.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 /**
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
29 MP safe mechanisms.
30
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
34
35 **/
36 UINT32
37 WaitForSemaphore (
38 IN OUT volatile UINT32 *Sem
39 )
40 {
41 UINT32 Value;
42
43 do {
44 Value = *Sem;
45 } while (Value == 0 ||
46 InterlockedCompareExchange32 (
47 (UINT32*)Sem,
48 Value,
49 Value - 1
50 ) != Value);
51 return Value - 1;
52 }
53
54
55 /**
56 Performs an atomic compare exchange operation to release semaphore.
57 The compare exchange operation must be performed using
58 MP safe mechanisms.
59
60 @param Sem IN: 32-bit unsigned integer
61 OUT: original integer + 1
62 @return Original integer + 1
63
64 **/
65 UINT32
66 ReleaseSemaphore (
67 IN OUT volatile UINT32 *Sem
68 )
69 {
70 UINT32 Value;
71
72 do {
73 Value = *Sem;
74 } while (Value + 1 != 0 &&
75 InterlockedCompareExchange32 (
76 (UINT32*)Sem,
77 Value,
78 Value + 1
79 ) != Value);
80 return Value + 1;
81 }
82
83 /**
84 Performs an atomic compare exchange operation to lock semaphore.
85 The compare exchange operation must be performed using
86 MP safe mechanisms.
87
88 @param Sem IN: 32-bit unsigned integer
89 OUT: -1
90 @return Original integer
91
92 **/
93 UINT32
94 LockdownSemaphore (
95 IN OUT volatile UINT32 *Sem
96 )
97 {
98 UINT32 Value;
99
100 do {
101 Value = *Sem;
102 } while (InterlockedCompareExchange32 (
103 (UINT32*)Sem,
104 Value, (UINT32)-1
105 ) != Value);
106 return Value;
107 }
108
109 /**
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.
111
112 @param NumberOfAPs AP number
113
114 **/
115 VOID
116 WaitForAllAPs (
117 IN UINTN NumberOfAPs
118 )
119 {
120 UINTN BspIndex;
121
122 BspIndex = mSmmMpSyncData->BspIndex;
123 while (NumberOfAPs-- > 0) {
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
125 }
126 }
127
128 /**
129 Performs an atomic compare exchange operation to release semaphore
130 for each AP.
131
132 **/
133 VOID
134 ReleaseAllAPs (
135 VOID
136 )
137 {
138 UINTN Index;
139
140 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
141 if (IsPresentAp (Index)) {
142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
143 }
144 }
145 }
146
147 /**
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
149
150 @param Exceptions CPU Arrival exception flags.
151
152 @retval TRUE if all CPUs the have checked in.
153 @retval FALSE if at least one Normal AP hasn't checked in.
154
155 **/
156 BOOLEAN
157 AllCpusInSmmWithExceptions (
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
159 )
160 {
161 UINTN Index;
162 SMM_CPU_DATA_BLOCK *CpuData;
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
164
165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
166
167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
168 return TRUE;
169 }
170
171 CpuData = mSmmMpSyncData->CpuData;
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
173 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
176 continue;
177 }
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
179 continue;
180 }
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
182 continue;
183 }
184 return FALSE;
185 }
186 }
187
188
189 return TRUE;
190 }
191
192 /**
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
194
195 @retval TRUE Os enable lmce.
196 @retval FALSE Os not enable lmce.
197
198 **/
199 BOOLEAN
200 IsLmceOsEnabled (
201 VOID
202 )
203 {
204 MSR_IA32_MCG_CAP_REGISTER McgCap;
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
207
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
209 if (McgCap.Bits.MCG_LMCE_P == 0) {
210 return FALSE;
211 }
212
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
214 if (FeatureCtrl.Bits.LmceOn == 0) {
215 return FALSE;
216 }
217
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
220 }
221
222 /**
223 Return if Local machine check exception signaled.
224
225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
226 delivered to only the logical processor.
227
228 @retval TRUE LMCE was signaled.
229 @retval FALSE LMCE was not signaled.
230
231 **/
232 BOOLEAN
233 IsLmceSignaled (
234 VOID
235 )
236 {
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
238
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
241 }
242
243 /**
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
245 entering SMM, except SMI disabled APs.
246
247 **/
248 VOID
249 SmmWaitForApArrival (
250 VOID
251 )
252 {
253 UINT64 Timer;
254 UINTN Index;
255 BOOLEAN LmceEn;
256 BOOLEAN LmceSignal;
257
258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
259
260 LmceEn = FALSE;
261 LmceSignal = FALSE;
262 if (mMachineCheckSupported) {
263 LmceEn = IsLmceOsEnabled ();
264 LmceSignal = IsLmceSignaled();
265 }
266
267 //
268 // Platform implementor should choose a timeout value appropriately:
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
275 // - The timeout value must be longer than longest possible IO operation in the system
276 //
277
278 //
279 // Sync with APs 1st timeout
280 //
281 for (Timer = StartSyncTimer ();
282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
284 ) {
285 CpuPause ();
286 }
287
288 //
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
290 // because:
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
294 // work while SMI handling is on-going.
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
298 // mode work while SMI handling is on-going.
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
300 // - In traditional flow, SMI disabling is discouraged.
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
302 // In both cases, adding SMI-disabling checking code increases overhead.
303 //
304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
305 //
306 // Send SMI IPIs to bring outside processors in
307 //
308 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
311 }
312 }
313
314 //
315 // Sync with APs 2nd timeout.
316 //
317 for (Timer = StartSyncTimer ();
318 !IsSyncTimerTimeout (Timer) &&
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
320 ) {
321 CpuPause ();
322 }
323 }
324
325 return;
326 }
327
328
329 /**
330 Replace OS MTRR's with SMI MTRR's.
331
332 @param CpuIndex Processor Index
333
334 **/
335 VOID
336 ReplaceOSMtrrs (
337 IN UINTN CpuIndex
338 )
339 {
340 SmmCpuFeaturesDisableSmrr ();
341
342 //
343 // Replace all MTRRs registers
344 //
345 MtrrSetAllMtrrs (&gSmiMtrrs);
346 }
347
348 /**
349 Wheck whether task has been finished by all APs.
350
351 @param BlockMode Whether did it in block mode or non-block mode.
352
353 @retval TRUE Task has been finished by all APs.
354 @retval FALSE Task not has been finished by all APs.
355
356 **/
357 BOOLEAN
358 WaitForAllAPsNotBusy (
359 IN BOOLEAN BlockMode
360 )
361 {
362 UINTN Index;
363
364 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
365 //
366 // Ignore BSP and APs which not call in SMM.
367 //
368 if (!IsPresentAp(Index)) {
369 continue;
370 }
371
372 if (BlockMode) {
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
375 } else {
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
378 } else {
379 return FALSE;
380 }
381 }
382 }
383
384 return TRUE;
385 }
386
387 /**
388 Check whether it is an present AP.
389
390 @param CpuIndex The AP index which calls this function.
391
392 @retval TRUE It's a present AP.
393 @retval TRUE This is not an AP or it is not present.
394
395 **/
396 BOOLEAN
397 IsPresentAp (
398 IN UINTN CpuIndex
399 )
400 {
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
403 }
404
405 /**
406 Clean up the status flags used during executing the procedure.
407
408 @param CpuIndex The AP index which calls this function.
409
410 **/
411 VOID
412 ReleaseToken (
413 IN UINTN CpuIndex
414 )
415 {
416 PROCEDURE_TOKEN *Token;
417
418 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
419
420 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
421 ReleaseSpinLock (Token->SpinLock);
422 }
423
424 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
425 }
426
427 /**
428 Free the tokens in the maintained list.
429
430 **/
431 VOID
432 ResetTokens (
433 VOID
434 )
435 {
436 LIST_ENTRY *Link;
437 PROCEDURE_TOKEN *ProcToken;
438
439 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
440 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
441 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
442
443 ProcToken->RunningApCount = 0;
444 ProcToken->Used = FALSE;
445
446 //
447 // Check the spinlock status and release it if not released yet.
448 //
449 if (!AcquireSpinLockOrFail(ProcToken->SpinLock)) {
450 DEBUG((DEBUG_ERROR, "Risk::SpinLock still not released!"));
451 }
452 ReleaseSpinLock (ProcToken->SpinLock);
453
454 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
455 }
456 }
457
458 /**
459 SMI handler for BSP.
460
461 @param CpuIndex BSP processor Index
462 @param SyncMode SMM MP sync mode
463
464 **/
465 VOID
466 BSPHandler (
467 IN UINTN CpuIndex,
468 IN SMM_CPU_SYNC_MODE SyncMode
469 )
470 {
471 UINTN Index;
472 MTRR_SETTINGS Mtrrs;
473 UINTN ApCount;
474 BOOLEAN ClearTopLevelSmiResult;
475 UINTN PresentCount;
476
477 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
478 ApCount = 0;
479
480 //
481 // Flag BSP's presence
482 //
483 *mSmmMpSyncData->InsideSmm = TRUE;
484
485 //
486 // Initialize Debug Agent to start source level debug in BSP handler
487 //
488 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
489
490 //
491 // Mark this processor's presence
492 //
493 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
494
495 //
496 // Clear platform top level SMI status bit before calling SMI handlers. If
497 // we cleared it after SMI handlers are run, we would miss the SMI that
498 // occurs after SMI handlers are done and before SMI status bit is cleared.
499 //
500 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
501 ASSERT (ClearTopLevelSmiResult == TRUE);
502
503 //
504 // Set running processor index
505 //
506 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
507
508 //
509 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
510 //
511 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
512
513 //
514 // Wait for APs to arrive
515 //
516 SmmWaitForApArrival();
517
518 //
519 // Lock the counter down and retrieve the number of APs
520 //
521 *mSmmMpSyncData->AllCpusInSync = TRUE;
522 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
523
524 //
525 // Wait for all APs to get ready for programming MTRRs
526 //
527 WaitForAllAPs (ApCount);
528
529 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
530 //
531 // Signal all APs it's time for backup MTRRs
532 //
533 ReleaseAllAPs ();
534
535 //
536 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
537 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
538 // to a large enough value to avoid this situation.
539 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
540 // We do the backup first and then set MTRR to avoid race condition for threads
541 // in the same core.
542 //
543 MtrrGetAllMtrrs(&Mtrrs);
544
545 //
546 // Wait for all APs to complete their MTRR saving
547 //
548 WaitForAllAPs (ApCount);
549
550 //
551 // Let all processors program SMM MTRRs together
552 //
553 ReleaseAllAPs ();
554
555 //
556 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
557 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
558 // to a large enough value to avoid this situation.
559 //
560 ReplaceOSMtrrs (CpuIndex);
561
562 //
563 // Wait for all APs to complete their MTRR programming
564 //
565 WaitForAllAPs (ApCount);
566 }
567 }
568
569 //
570 // The BUSY lock is initialized to Acquired state
571 //
572 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
573
574 //
575 // Perform the pre tasks
576 //
577 PerformPreTasks ();
578
579 //
580 // Invoke SMM Foundation EntryPoint with the processor information context.
581 //
582 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
583
584 //
585 // Make sure all APs have completed their pending none-block tasks
586 //
587 WaitForAllAPsNotBusy (TRUE);
588
589 //
590 // Perform the remaining tasks
591 //
592 PerformRemainingTasks ();
593
594 //
595 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
596 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
597 // will run through freely.
598 //
599 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
600
601 //
602 // Lock the counter down and retrieve the number of APs
603 //
604 *mSmmMpSyncData->AllCpusInSync = TRUE;
605 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
606 //
607 // Make sure all APs have their Present flag set
608 //
609 while (TRUE) {
610 PresentCount = 0;
611 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
612 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
613 PresentCount ++;
614 }
615 }
616 if (PresentCount > ApCount) {
617 break;
618 }
619 }
620 }
621
622 //
623 // Notify all APs to exit
624 //
625 *mSmmMpSyncData->InsideSmm = FALSE;
626 ReleaseAllAPs ();
627
628 //
629 // Wait for all APs to complete their pending tasks
630 //
631 WaitForAllAPs (ApCount);
632
633 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
634 //
635 // Signal APs to restore MTRRs
636 //
637 ReleaseAllAPs ();
638
639 //
640 // Restore OS MTRRs
641 //
642 SmmCpuFeaturesReenableSmrr ();
643 MtrrSetAllMtrrs(&Mtrrs);
644
645 //
646 // Wait for all APs to complete MTRR programming
647 //
648 WaitForAllAPs (ApCount);
649 }
650
651 //
652 // Stop source level debug in BSP handler, the code below will not be
653 // debugged.
654 //
655 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
656
657 //
658 // Signal APs to Reset states/semaphore for this processor
659 //
660 ReleaseAllAPs ();
661
662 //
663 // Perform pending operations for hot-plug
664 //
665 SmmCpuUpdate ();
666
667 //
668 // Clear the Present flag of BSP
669 //
670 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
671
672 //
673 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
674 // WaitForAllAps does not depend on the Present flag.
675 //
676 WaitForAllAPs (ApCount);
677
678 //
679 // Reset the tokens buffer.
680 //
681 ResetTokens ();
682
683 //
684 // Reset BspIndex to -1, meaning BSP has not been elected.
685 //
686 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
687 mSmmMpSyncData->BspIndex = (UINT32)-1;
688 }
689
690 //
691 // Allow APs to check in from this point on
692 //
693 *mSmmMpSyncData->Counter = 0;
694 *mSmmMpSyncData->AllCpusInSync = FALSE;
695 }
696
697 /**
698 SMI handler for AP.
699
700 @param CpuIndex AP processor Index.
701 @param ValidSmi Indicates that current SMI is a valid SMI or not.
702 @param SyncMode SMM MP sync mode.
703
704 **/
705 VOID
706 APHandler (
707 IN UINTN CpuIndex,
708 IN BOOLEAN ValidSmi,
709 IN SMM_CPU_SYNC_MODE SyncMode
710 )
711 {
712 UINT64 Timer;
713 UINTN BspIndex;
714 MTRR_SETTINGS Mtrrs;
715 EFI_STATUS ProcedureStatus;
716
717 //
718 // Timeout BSP
719 //
720 for (Timer = StartSyncTimer ();
721 !IsSyncTimerTimeout (Timer) &&
722 !(*mSmmMpSyncData->InsideSmm);
723 ) {
724 CpuPause ();
725 }
726
727 if (!(*mSmmMpSyncData->InsideSmm)) {
728 //
729 // BSP timeout in the first round
730 //
731 if (mSmmMpSyncData->BspIndex != -1) {
732 //
733 // BSP Index is known
734 //
735 BspIndex = mSmmMpSyncData->BspIndex;
736 ASSERT (CpuIndex != BspIndex);
737
738 //
739 // Send SMI IPI to bring BSP in
740 //
741 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
742
743 //
744 // Now clock BSP for the 2nd time
745 //
746 for (Timer = StartSyncTimer ();
747 !IsSyncTimerTimeout (Timer) &&
748 !(*mSmmMpSyncData->InsideSmm);
749 ) {
750 CpuPause ();
751 }
752
753 if (!(*mSmmMpSyncData->InsideSmm)) {
754 //
755 // Give up since BSP is unable to enter SMM
756 // and signal the completion of this AP
757 WaitForSemaphore (mSmmMpSyncData->Counter);
758 return;
759 }
760 } else {
761 //
762 // Don't know BSP index. Give up without sending IPI to BSP.
763 //
764 WaitForSemaphore (mSmmMpSyncData->Counter);
765 return;
766 }
767 }
768
769 //
770 // BSP is available
771 //
772 BspIndex = mSmmMpSyncData->BspIndex;
773 ASSERT (CpuIndex != BspIndex);
774
775 //
776 // Mark this processor's presence
777 //
778 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
779
780 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
781 //
782 // Notify BSP of arrival at this point
783 //
784 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
785 }
786
787 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
788 //
789 // Wait for the signal from BSP to backup MTRRs
790 //
791 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
792
793 //
794 // Backup OS MTRRs
795 //
796 MtrrGetAllMtrrs(&Mtrrs);
797
798 //
799 // Signal BSP the completion of this AP
800 //
801 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
802
803 //
804 // Wait for BSP's signal to program MTRRs
805 //
806 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
807
808 //
809 // Replace OS MTRRs with SMI MTRRs
810 //
811 ReplaceOSMtrrs (CpuIndex);
812
813 //
814 // Signal BSP the completion of this AP
815 //
816 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
817 }
818
819 while (TRUE) {
820 //
821 // Wait for something to happen
822 //
823 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
824
825 //
826 // Check if BSP wants to exit SMM
827 //
828 if (!(*mSmmMpSyncData->InsideSmm)) {
829 break;
830 }
831
832 //
833 // BUSY should be acquired by SmmStartupThisAp()
834 //
835 ASSERT (
836 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
837 );
838
839 //
840 // Invoke the scheduled procedure
841 //
842 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
843 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
844 );
845 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
846 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
847 }
848
849 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
850 ReleaseToken (CpuIndex);
851 }
852
853 //
854 // Release BUSY
855 //
856 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
857 }
858
859 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
860 //
861 // Notify BSP the readiness of this AP to program MTRRs
862 //
863 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
864
865 //
866 // Wait for the signal from BSP to program MTRRs
867 //
868 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
869
870 //
871 // Restore OS MTRRs
872 //
873 SmmCpuFeaturesReenableSmrr ();
874 MtrrSetAllMtrrs(&Mtrrs);
875 }
876
877 //
878 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
879 //
880 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
881
882 //
883 // Wait for the signal from BSP to Reset states/semaphore for this processor
884 //
885 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
886
887 //
888 // Reset states/semaphore for this processor
889 //
890 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
891
892 //
893 // Notify BSP the readiness of this AP to exit SMM
894 //
895 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
896
897 }
898
899 /**
900 Create 4G PageTable in SMRAM.
901
902 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
903 @return PageTable Address
904
905 **/
906 UINT32
907 Gen4GPageTable (
908 IN BOOLEAN Is32BitPageTable
909 )
910 {
911 VOID *PageTable;
912 UINTN Index;
913 UINT64 *Pte;
914 UINTN PagesNeeded;
915 UINTN Low2MBoundary;
916 UINTN High2MBoundary;
917 UINTN Pages;
918 UINTN GuardPage;
919 UINT64 *Pdpte;
920 UINTN PageIndex;
921 UINTN PageAddress;
922
923 Low2MBoundary = 0;
924 High2MBoundary = 0;
925 PagesNeeded = 0;
926 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
927 //
928 // Add one more page for known good stack, then find the lower 2MB aligned address.
929 //
930 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
931 //
932 // Add two more pages for known good stack and stack guard page,
933 // then find the lower 2MB aligned address.
934 //
935 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
936 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
937 }
938 //
939 // Allocate the page table
940 //
941 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
942 ASSERT (PageTable != NULL);
943
944 PageTable = (VOID *)((UINTN)PageTable);
945 Pte = (UINT64*)PageTable;
946
947 //
948 // Zero out all page table entries first
949 //
950 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
951
952 //
953 // Set Page Directory Pointers
954 //
955 for (Index = 0; Index < 4; Index++) {
956 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
957 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
958 }
959 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
960
961 //
962 // Fill in Page Directory Entries
963 //
964 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
965 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
966 }
967
968 Pdpte = (UINT64*)PageTable;
969 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
970 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
971 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
972 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
973 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
974 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
975 //
976 // Fill in Page Table Entries
977 //
978 Pte = (UINT64*)Pages;
979 PageAddress = PageIndex;
980 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
981 if (PageAddress == GuardPage) {
982 //
983 // Mark the guard page as non-present
984 //
985 Pte[Index] = PageAddress | mAddressEncMask;
986 GuardPage += mSmmStackSize;
987 if (GuardPage > mSmmStackArrayEnd) {
988 GuardPage = 0;
989 }
990 } else {
991 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
992 }
993 PageAddress+= EFI_PAGE_SIZE;
994 }
995 Pages += EFI_PAGE_SIZE;
996 }
997 }
998
999 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1000 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1001 if ((Pte[0] & IA32_PG_PS) == 0) {
1002 // 4K-page entries are already mapped. Just hide the first one anyway.
1003 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1004 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1005 } else {
1006 // Create 4K-page entries
1007 Pages = (UINTN)AllocatePageTableMemory (1);
1008 ASSERT (Pages != 0);
1009
1010 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1011
1012 Pte = (UINT64*)Pages;
1013 PageAddress = 0;
1014 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1015 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1016 PageAddress += EFI_PAGE_SIZE;
1017 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1018 }
1019 }
1020 }
1021
1022 return (UINT32)(UINTN)PageTable;
1023 }
1024
1025 /**
1026 Checks whether the input token is the current used token.
1027
1028 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1029 BroadcastProcedure.
1030
1031 @retval TRUE The input token is the current used token.
1032 @retval FALSE The input token is not the current used token.
1033 **/
1034 BOOLEAN
1035 IsTokenInUse (
1036 IN SPIN_LOCK *Token
1037 )
1038 {
1039 LIST_ENTRY *Link;
1040 PROCEDURE_TOKEN *ProcToken;
1041
1042 if (Token == NULL) {
1043 return FALSE;
1044 }
1045
1046 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1047 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
1048 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1049
1050 if (ProcToken->Used && ProcToken->SpinLock == Token) {
1051 return TRUE;
1052 }
1053
1054 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1055 }
1056
1057 return FALSE;
1058 }
1059
1060 /**
1061 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1062
1063 **/
1064 VOID
1065 AllocateTokenBuffer (
1066 VOID
1067 )
1068 {
1069 UINTN SpinLockSize;
1070 UINT32 TokenCountPerChunk;
1071 UINTN ProcTokenSize;
1072 UINTN Index;
1073 PROCEDURE_TOKEN *ProcToken;
1074 SPIN_LOCK *SpinLock;
1075 UINT8 *SpinLockBuffer;
1076 UINT8 *ProcTokenBuffer;
1077
1078 SpinLockSize = GetSpinLockProperties ();
1079 ProcTokenSize = sizeof (PROCEDURE_TOKEN);
1080
1081 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1082 ASSERT (TokenCountPerChunk != 0);
1083 if (TokenCountPerChunk == 0) {
1084 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1085 CpuDeadLoop ();
1086 }
1087 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1088
1089 //
1090 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1091 //
1092 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1093 ASSERT (SpinLockBuffer != NULL);
1094
1095 ProcTokenBuffer = AllocatePool (ProcTokenSize * TokenCountPerChunk);
1096 ASSERT (ProcTokenBuffer != NULL);
1097
1098 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1099 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1100 InitializeSpinLock (SpinLock);
1101
1102 ProcToken = (PROCEDURE_TOKEN *)(ProcTokenBuffer + ProcTokenSize * Index);
1103 ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;
1104 ProcToken->SpinLock = SpinLock;
1105 ProcToken->Used = FALSE;
1106 ProcToken->RunningApCount = 0;
1107
1108 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);
1109 }
1110 }
1111
1112 /**
1113 Find first free token in the allocated token list.
1114
1115 @retval return the first free PROCEDURE_TOKEN.
1116
1117 **/
1118 PROCEDURE_TOKEN *
1119 FindFirstFreeToken (
1120 VOID
1121 )
1122 {
1123 LIST_ENTRY *Link;
1124 PROCEDURE_TOKEN *ProcToken;
1125
1126 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1127 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
1128 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1129
1130 if (!ProcToken->Used) {
1131 return ProcToken;
1132 }
1133
1134 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1135 }
1136
1137 return NULL;
1138 }
1139
1140 /**
1141 Get the free token.
1142
1143 If no free token, allocate new tokens then return the free one.
1144
1145 @param RunningApsCount The Running Aps count for this token.
1146
1147 @retval return the first free PROCEDURE_TOKEN.
1148
1149 **/
1150 PROCEDURE_TOKEN *
1151 GetFreeToken (
1152 IN UINT32 RunningApsCount
1153 )
1154 {
1155 PROCEDURE_TOKEN *NewToken;
1156
1157 NewToken = FindFirstFreeToken ();
1158 if (NewToken == NULL) {
1159 AllocateTokenBuffer ();
1160 NewToken = FindFirstFreeToken ();
1161 }
1162 ASSERT (NewToken != NULL);
1163
1164 NewToken->Used = TRUE;
1165 NewToken->RunningApCount = RunningApsCount;
1166 AcquireSpinLock (NewToken->SpinLock);
1167
1168 return NewToken;
1169 }
1170
1171 /**
1172 Checks status of specified AP.
1173
1174 This function checks whether the specified AP has finished the task assigned
1175 by StartupThisAP(), and whether timeout expires.
1176
1177 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1178 BroadcastProcedure.
1179
1180 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1181 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1182 **/
1183 EFI_STATUS
1184 IsApReady (
1185 IN SPIN_LOCK *Token
1186 )
1187 {
1188 if (AcquireSpinLockOrFail (Token)) {
1189 ReleaseSpinLock (Token);
1190 return EFI_SUCCESS;
1191 }
1192
1193 return EFI_NOT_READY;
1194 }
1195
1196 /**
1197 Schedule a procedure to run on the specified CPU.
1198
1199 @param[in] Procedure The address of the procedure to run
1200 @param[in] CpuIndex Target CPU Index
1201 @param[in,out] ProcArguments The parameter to pass to the procedure
1202 @param[in] Token This is an optional parameter that allows the caller to execute the
1203 procedure in a blocking or non-blocking fashion. If it is NULL the
1204 call is blocking, and the call will not return until the AP has
1205 completed the procedure. If the token is not NULL, the call will
1206 return immediately. The caller can check whether the procedure has
1207 completed with CheckOnProcedure or WaitForProcedure.
1208 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1209 execution of Procedure, either for blocking or non-blocking mode.
1210 Zero means infinity. If the timeout expires before all APs return
1211 from Procedure, then Procedure on the failed APs is terminated. If
1212 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1213 If the timeout expires in non-blocking mode, the timeout determined
1214 can be through CheckOnProcedure or WaitForProcedure.
1215 Note that timeout support is optional. Whether an implementation
1216 supports this feature can be determined via the Attributes data
1217 member.
1218 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1219 by Procedure when it completes execution on the target AP, or with
1220 EFI_TIMEOUT if the Procedure fails to complete within the optional
1221 timeout. The implementation will update this variable with
1222 EFI_NOT_READY prior to starting Procedure on the target AP.
1223
1224 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1225 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1226 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1227 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1228 @retval EFI_SUCCESS The procedure has been successfully scheduled
1229
1230 **/
1231 EFI_STATUS
1232 InternalSmmStartupThisAp (
1233 IN EFI_AP_PROCEDURE2 Procedure,
1234 IN UINTN CpuIndex,
1235 IN OUT VOID *ProcArguments OPTIONAL,
1236 IN MM_COMPLETION *Token,
1237 IN UINTN TimeoutInMicroseconds,
1238 IN OUT EFI_STATUS *CpuStatus
1239 )
1240 {
1241 PROCEDURE_TOKEN *ProcToken;
1242
1243 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1244 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1245 return EFI_INVALID_PARAMETER;
1246 }
1247 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1248 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1249 return EFI_INVALID_PARAMETER;
1250 }
1251 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1252 return EFI_INVALID_PARAMETER;
1253 }
1254 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1255 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1256 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1257 }
1258 return EFI_INVALID_PARAMETER;
1259 }
1260 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1261 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1262 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1263 }
1264 return EFI_INVALID_PARAMETER;
1265 }
1266 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1267 return EFI_INVALID_PARAMETER;
1268 }
1269 if (Procedure == NULL) {
1270 return EFI_INVALID_PARAMETER;
1271 }
1272
1273 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1274
1275 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1276 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1277 if (Token != NULL) {
1278 ProcToken= GetFreeToken (1);
1279 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1280 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1281 }
1282 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1283 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1284 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1285 }
1286
1287 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1288
1289 if (Token == NULL) {
1290 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1291 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1292 }
1293
1294 return EFI_SUCCESS;
1295 }
1296
1297 /**
1298 Worker function to execute a caller provided function on all enabled APs.
1299
1300 @param[in] Procedure A pointer to the function to be run on
1301 enabled APs of the system.
1302 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1303 APs to return from Procedure, either for
1304 blocking or non-blocking mode.
1305 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1306 all APs.
1307 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1308 procedure in a blocking or non-blocking fashion. If it is NULL the
1309 call is blocking, and the call will not return until the AP has
1310 completed the procedure. If the token is not NULL, the call will
1311 return immediately. The caller can check whether the procedure has
1312 completed with CheckOnProcedure or WaitForProcedure.
1313 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1314 by Procedure when it completes execution on the target AP, or with
1315 EFI_TIMEOUT if the Procedure fails to complete within the optional
1316 timeout. The implementation will update this variable with
1317 EFI_NOT_READY prior to starting Procedure on the target AP.
1318
1319
1320 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1321 the timeout expired.
1322 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1323 to all enabled APs.
1324 @retval others Failed to Startup all APs.
1325
1326 **/
1327 EFI_STATUS
1328 InternalSmmStartupAllAPs (
1329 IN EFI_AP_PROCEDURE2 Procedure,
1330 IN UINTN TimeoutInMicroseconds,
1331 IN OUT VOID *ProcedureArguments OPTIONAL,
1332 IN OUT MM_COMPLETION *Token,
1333 IN OUT EFI_STATUS *CPUStatus
1334 )
1335 {
1336 UINTN Index;
1337 UINTN CpuCount;
1338 PROCEDURE_TOKEN *ProcToken;
1339
1340 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1341 return EFI_INVALID_PARAMETER;
1342 }
1343 if (Procedure == NULL) {
1344 return EFI_INVALID_PARAMETER;
1345 }
1346
1347 CpuCount = 0;
1348 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1349 if (IsPresentAp (Index)) {
1350 CpuCount ++;
1351
1352 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1353 return EFI_INVALID_PARAMETER;
1354 }
1355
1356 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1357 return EFI_NOT_READY;
1358 }
1359 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1360 }
1361 }
1362 if (CpuCount == 0) {
1363 return EFI_NOT_STARTED;
1364 }
1365
1366 if (Token != NULL) {
1367 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1368 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1369 } else {
1370 ProcToken = NULL;
1371 }
1372
1373 //
1374 // Make sure all BUSY should be acquired.
1375 //
1376 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1377 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1378 // block mode.
1379 //
1380 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1381 if (IsPresentAp (Index)) {
1382 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1383 }
1384 }
1385
1386 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1387 if (IsPresentAp (Index)) {
1388 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1389 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1390 if (ProcToken != NULL) {
1391 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1392 }
1393 if (CPUStatus != NULL) {
1394 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1395 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1396 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1397 }
1398 }
1399 } else {
1400 //
1401 // PI spec requirement:
1402 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1403 //
1404 if (CPUStatus != NULL) {
1405 CPUStatus[Index] = EFI_NOT_STARTED;
1406 }
1407
1408 //
1409 // Decrease the count to mark this processor(AP or BSP) as finished.
1410 //
1411 if (ProcToken != NULL) {
1412 WaitForSemaphore (&ProcToken->RunningApCount);
1413 }
1414 }
1415 }
1416
1417 ReleaseAllAPs ();
1418
1419 if (Token == NULL) {
1420 //
1421 // Make sure all APs have completed their tasks.
1422 //
1423 WaitForAllAPsNotBusy (TRUE);
1424 }
1425
1426 return EFI_SUCCESS;
1427 }
1428
1429 /**
1430 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1431 If the function is defined with a type that is not compatible with
1432 the type (of the expression) pointed to by the expression that
1433 denotes the called function, the behavior is undefined.
1434
1435 So add below wrapper function to convert between EFI_AP_PROCEDURE
1436 and EFI_AP_PROCEDURE2.
1437
1438 Wrapper for Procedures.
1439
1440 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1441
1442 **/
1443 EFI_STATUS
1444 EFIAPI
1445 ProcedureWrapper (
1446 IN VOID *Buffer
1447 )
1448 {
1449 PROCEDURE_WRAPPER *Wrapper;
1450
1451 Wrapper = Buffer;
1452 Wrapper->Procedure (Wrapper->ProcedureArgument);
1453
1454 return EFI_SUCCESS;
1455 }
1456
1457 /**
1458 Schedule a procedure to run on the specified CPU in blocking mode.
1459
1460 @param[in] Procedure The address of the procedure to run
1461 @param[in] CpuIndex Target CPU Index
1462 @param[in, out] ProcArguments The parameter to pass to the procedure
1463
1464 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1465 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1466 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1467 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1468 @retval EFI_SUCCESS The procedure has been successfully scheduled
1469
1470 **/
1471 EFI_STATUS
1472 EFIAPI
1473 SmmBlockingStartupThisAp (
1474 IN EFI_AP_PROCEDURE Procedure,
1475 IN UINTN CpuIndex,
1476 IN OUT VOID *ProcArguments OPTIONAL
1477 )
1478 {
1479 PROCEDURE_WRAPPER Wrapper;
1480
1481 Wrapper.Procedure = Procedure;
1482 Wrapper.ProcedureArgument = ProcArguments;
1483
1484 //
1485 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1486 //
1487 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1488 }
1489
1490 /**
1491 Schedule a procedure to run on the specified CPU.
1492
1493 @param Procedure The address of the procedure to run
1494 @param CpuIndex Target CPU Index
1495 @param ProcArguments The parameter to pass to the procedure
1496
1497 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1498 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1499 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1500 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1501 @retval EFI_SUCCESS The procedure has been successfully scheduled
1502
1503 **/
1504 EFI_STATUS
1505 EFIAPI
1506 SmmStartupThisAp (
1507 IN EFI_AP_PROCEDURE Procedure,
1508 IN UINTN CpuIndex,
1509 IN OUT VOID *ProcArguments OPTIONAL
1510 )
1511 {
1512 MM_COMPLETION Token;
1513
1514 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1515 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1516
1517 //
1518 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1519 //
1520 return InternalSmmStartupThisAp (
1521 ProcedureWrapper,
1522 CpuIndex,
1523 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1524 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
1525 0,
1526 NULL
1527 );
1528 }
1529
1530 /**
1531 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1532 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1533
1534 NOTE: It might not be appreciated in runtime since it might
1535 conflict with OS debugging facilities. Turn them off in RELEASE.
1536
1537 @param CpuIndex CPU Index
1538
1539 **/
1540 VOID
1541 EFIAPI
1542 CpuSmmDebugEntry (
1543 IN UINTN CpuIndex
1544 )
1545 {
1546 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1547
1548 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1549 ASSERT(CpuIndex < mMaxNumberOfCpus);
1550 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1551 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1552 AsmWriteDr6 (CpuSaveState->x86._DR6);
1553 AsmWriteDr7 (CpuSaveState->x86._DR7);
1554 } else {
1555 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1556 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1557 }
1558 }
1559 }
1560
1561 /**
1562 This function restores DR6 & DR7 to SMM save state.
1563
1564 NOTE: It might not be appreciated in runtime since it might
1565 conflict with OS debugging facilities. Turn them off in RELEASE.
1566
1567 @param CpuIndex CPU Index
1568
1569 **/
1570 VOID
1571 EFIAPI
1572 CpuSmmDebugExit (
1573 IN UINTN CpuIndex
1574 )
1575 {
1576 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1577
1578 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1579 ASSERT(CpuIndex < mMaxNumberOfCpus);
1580 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1581 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1582 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1583 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1584 } else {
1585 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1586 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1587 }
1588 }
1589 }
1590
1591 /**
1592 C function for SMI entry, each processor comes here upon SMI trigger.
1593
1594 @param CpuIndex CPU Index
1595
1596 **/
1597 VOID
1598 EFIAPI
1599 SmiRendezvous (
1600 IN UINTN CpuIndex
1601 )
1602 {
1603 EFI_STATUS Status;
1604 BOOLEAN ValidSmi;
1605 BOOLEAN IsBsp;
1606 BOOLEAN BspInProgress;
1607 UINTN Index;
1608 UINTN Cr2;
1609
1610 ASSERT(CpuIndex < mMaxNumberOfCpus);
1611
1612 //
1613 // Save Cr2 because Page Fault exception in SMM may override its value,
1614 // when using on-demand paging for above 4G memory.
1615 //
1616 Cr2 = 0;
1617 SaveCr2 (&Cr2);
1618
1619 //
1620 // Call the user register Startup function first.
1621 //
1622 if (mSmmMpSyncData->StartupProcedure != NULL) {
1623 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1624 }
1625
1626 //
1627 // Perform CPU specific entry hooks
1628 //
1629 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1630
1631 //
1632 // Determine if this is a valid SMI
1633 //
1634 ValidSmi = PlatformValidSmi();
1635
1636 //
1637 // Determine if BSP has been already in progress. Note this must be checked after
1638 // ValidSmi because BSP may clear a valid SMI source after checking in.
1639 //
1640 BspInProgress = *mSmmMpSyncData->InsideSmm;
1641
1642 if (!BspInProgress && !ValidSmi) {
1643 //
1644 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1645 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1646 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1647 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1648 // is nothing we need to do.
1649 //
1650 goto Exit;
1651 } else {
1652 //
1653 // Signal presence of this processor
1654 //
1655 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1656 //
1657 // BSP has already ended the synchronization, so QUIT!!!
1658 //
1659
1660 //
1661 // Wait for BSP's signal to finish SMI
1662 //
1663 while (*mSmmMpSyncData->AllCpusInSync) {
1664 CpuPause ();
1665 }
1666 goto Exit;
1667 } else {
1668
1669 //
1670 // The BUSY lock is initialized to Released state.
1671 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1672 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1673 // after AP's present flag is detected.
1674 //
1675 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1676 }
1677
1678 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1679 ActivateSmmProfile (CpuIndex);
1680 }
1681
1682 if (BspInProgress) {
1683 //
1684 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1685 // as BSP may have cleared the SMI status
1686 //
1687 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1688 } else {
1689 //
1690 // We have a valid SMI
1691 //
1692
1693 //
1694 // Elect BSP
1695 //
1696 IsBsp = FALSE;
1697 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1698 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1699 //
1700 // Call platform hook to do BSP election
1701 //
1702 Status = PlatformSmmBspElection (&IsBsp);
1703 if (EFI_SUCCESS == Status) {
1704 //
1705 // Platform hook determines successfully
1706 //
1707 if (IsBsp) {
1708 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1709 }
1710 } else {
1711 //
1712 // Platform hook fails to determine, use default BSP election method
1713 //
1714 InterlockedCompareExchange32 (
1715 (UINT32*)&mSmmMpSyncData->BspIndex,
1716 (UINT32)-1,
1717 (UINT32)CpuIndex
1718 );
1719 }
1720 }
1721 }
1722
1723 //
1724 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1725 //
1726 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1727
1728 //
1729 // Clear last request for SwitchBsp.
1730 //
1731 if (mSmmMpSyncData->SwitchBsp) {
1732 mSmmMpSyncData->SwitchBsp = FALSE;
1733 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1734 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1735 }
1736 }
1737
1738 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1739 SmmProfileRecordSmiNum ();
1740 }
1741
1742 //
1743 // BSP Handler is always called with a ValidSmi == TRUE
1744 //
1745 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1746 } else {
1747 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1748 }
1749 }
1750
1751 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1752
1753 //
1754 // Wait for BSP's signal to exit SMI
1755 //
1756 while (*mSmmMpSyncData->AllCpusInSync) {
1757 CpuPause ();
1758 }
1759 }
1760
1761 Exit:
1762 SmmCpuFeaturesRendezvousExit (CpuIndex);
1763
1764 //
1765 // Restore Cr2
1766 //
1767 RestoreCr2 (Cr2);
1768 }
1769
1770 /**
1771 Allocate buffer for SpinLock and Wrapper function buffer.
1772
1773 **/
1774 VOID
1775 InitializeDataForMmMp (
1776 VOID
1777 )
1778 {
1779 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1780 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1781
1782 InitializeListHead (&gSmmCpuPrivate->TokenList);
1783
1784 AllocateTokenBuffer ();
1785 }
1786
1787 /**
1788 Allocate buffer for all semaphores and spin locks.
1789
1790 **/
1791 VOID
1792 InitializeSmmCpuSemaphores (
1793 VOID
1794 )
1795 {
1796 UINTN ProcessorCount;
1797 UINTN TotalSize;
1798 UINTN GlobalSemaphoresSize;
1799 UINTN CpuSemaphoresSize;
1800 UINTN SemaphoreSize;
1801 UINTN Pages;
1802 UINTN *SemaphoreBlock;
1803 UINTN SemaphoreAddr;
1804
1805 SemaphoreSize = GetSpinLockProperties ();
1806 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1807 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1808 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1809 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1810 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1811 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1812 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1813 SemaphoreBlock = AllocatePages (Pages);
1814 ASSERT (SemaphoreBlock != NULL);
1815 ZeroMem (SemaphoreBlock, TotalSize);
1816
1817 SemaphoreAddr = (UINTN)SemaphoreBlock;
1818 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1819 SemaphoreAddr += SemaphoreSize;
1820 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1821 SemaphoreAddr += SemaphoreSize;
1822 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1823 SemaphoreAddr += SemaphoreSize;
1824 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1825 SemaphoreAddr += SemaphoreSize;
1826 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1827 = (SPIN_LOCK *)SemaphoreAddr;
1828 SemaphoreAddr += SemaphoreSize;
1829
1830 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1831 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1832 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1833 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1834 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1835 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1836
1837 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1838 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1839
1840 mSemaphoreSize = SemaphoreSize;
1841 }
1842
1843 /**
1844 Initialize un-cacheable data.
1845
1846 **/
1847 VOID
1848 EFIAPI
1849 InitializeMpSyncData (
1850 VOID
1851 )
1852 {
1853 UINTN CpuIndex;
1854
1855 if (mSmmMpSyncData != NULL) {
1856 //
1857 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1858 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1859 //
1860 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1861 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1862 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1863 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1864 //
1865 // Enable BSP election by setting BspIndex to -1
1866 //
1867 mSmmMpSyncData->BspIndex = (UINT32)-1;
1868 }
1869 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1870
1871 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1872 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1873 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1874 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1875 mSmmMpSyncData->AllCpusInSync != NULL);
1876 *mSmmMpSyncData->Counter = 0;
1877 *mSmmMpSyncData->InsideSmm = FALSE;
1878 *mSmmMpSyncData->AllCpusInSync = FALSE;
1879
1880 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1881 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1882 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1883 mSmmMpSyncData->CpuData[CpuIndex].Run =
1884 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1885 mSmmMpSyncData->CpuData[CpuIndex].Present =
1886 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1887 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1888 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1889 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1890 }
1891 }
1892 }
1893
1894 /**
1895 Initialize global data for MP synchronization.
1896
1897 @param Stacks Base address of SMI stack buffer for all processors.
1898 @param StackSize Stack size for each processor in SMM.
1899 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1900
1901 **/
1902 UINT32
1903 InitializeMpServiceData (
1904 IN VOID *Stacks,
1905 IN UINTN StackSize,
1906 IN UINTN ShadowStackSize
1907 )
1908 {
1909 UINT32 Cr3;
1910 UINTN Index;
1911 UINT8 *GdtTssTables;
1912 UINTN GdtTableStepSize;
1913 CPUID_VERSION_INFO_EDX RegEdx;
1914
1915 //
1916 // Determine if this CPU supports machine check
1917 //
1918 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1919 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1920
1921 //
1922 // Allocate memory for all locks and semaphores
1923 //
1924 InitializeSmmCpuSemaphores ();
1925
1926 //
1927 // Initialize mSmmMpSyncData
1928 //
1929 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1930 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1931 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1932 ASSERT (mSmmMpSyncData != NULL);
1933 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1934 InitializeMpSyncData ();
1935
1936 //
1937 // Initialize physical address mask
1938 // NOTE: Physical memory above virtual address limit is not supported !!!
1939 //
1940 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1941 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1942 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1943
1944 //
1945 // Create page tables
1946 //
1947 Cr3 = SmmInitPageTable ();
1948
1949 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1950
1951 //
1952 // Install SMI handler for each CPU
1953 //
1954 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1955 InstallSmiHandler (
1956 Index,
1957 (UINT32)mCpuHotPlugData.SmBase[Index],
1958 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1959 StackSize,
1960 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1961 gcSmiGdtr.Limit + 1,
1962 gcSmiIdtr.Base,
1963 gcSmiIdtr.Limit + 1,
1964 Cr3
1965 );
1966 }
1967
1968 //
1969 // Record current MTRR settings
1970 //
1971 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1972 MtrrGetAllMtrrs (&gSmiMtrrs);
1973
1974 return Cr3;
1975 }
1976
1977 /**
1978
1979 Register the SMM Foundation entry point.
1980
1981 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1982 @param SmmEntryPoint SMM Foundation EntryPoint
1983
1984 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1985
1986 **/
1987 EFI_STATUS
1988 EFIAPI
1989 RegisterSmmEntry (
1990 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1991 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1992 )
1993 {
1994 //
1995 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1996 //
1997 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1998 return EFI_SUCCESS;
1999 }
2000
2001 /**
2002
2003 Register the SMM Foundation entry point.
2004
2005 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2006 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2007 with the related definitions of
2008 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2009 If caller may pass a value of NULL to deregister any existing
2010 startup procedure.
2011 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2012 run by the AP. It is an optional common mailbox between APs and
2013 the caller to share information
2014
2015 @retval EFI_SUCCESS The Procedure has been set successfully.
2016 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2017
2018 **/
2019 EFI_STATUS
2020 RegisterStartupProcedure (
2021 IN EFI_AP_PROCEDURE Procedure,
2022 IN OUT VOID *ProcedureArguments OPTIONAL
2023 )
2024 {
2025 if (Procedure == NULL && ProcedureArguments != NULL) {
2026 return EFI_INVALID_PARAMETER;
2027 }
2028 if (mSmmMpSyncData == NULL) {
2029 return EFI_NOT_READY;
2030 }
2031
2032 mSmmMpSyncData->StartupProcedure = Procedure;
2033 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2034
2035 return EFI_SUCCESS;
2036 }