]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
f445d7b0300bce9ed9f1c8c8e21f041f87559df2
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 /**
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
29 MP safe mechanisms.
30
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
34
35 **/
36 UINT32
37 WaitForSemaphore (
38 IN OUT volatile UINT32 *Sem
39 )
40 {
41 UINT32 Value;
42
43 do {
44 Value = *Sem;
45 } while (Value == 0 ||
46 InterlockedCompareExchange32 (
47 (UINT32*)Sem,
48 Value,
49 Value - 1
50 ) != Value);
51 return Value - 1;
52 }
53
54
55 /**
56 Performs an atomic compare exchange operation to release semaphore.
57 The compare exchange operation must be performed using
58 MP safe mechanisms.
59
60 @param Sem IN: 32-bit unsigned integer
61 OUT: original integer + 1
62 @return Original integer + 1
63
64 **/
65 UINT32
66 ReleaseSemaphore (
67 IN OUT volatile UINT32 *Sem
68 )
69 {
70 UINT32 Value;
71
72 do {
73 Value = *Sem;
74 } while (Value + 1 != 0 &&
75 InterlockedCompareExchange32 (
76 (UINT32*)Sem,
77 Value,
78 Value + 1
79 ) != Value);
80 return Value + 1;
81 }
82
83 /**
84 Performs an atomic compare exchange operation to lock semaphore.
85 The compare exchange operation must be performed using
86 MP safe mechanisms.
87
88 @param Sem IN: 32-bit unsigned integer
89 OUT: -1
90 @return Original integer
91
92 **/
93 UINT32
94 LockdownSemaphore (
95 IN OUT volatile UINT32 *Sem
96 )
97 {
98 UINT32 Value;
99
100 do {
101 Value = *Sem;
102 } while (InterlockedCompareExchange32 (
103 (UINT32*)Sem,
104 Value, (UINT32)-1
105 ) != Value);
106 return Value;
107 }
108
109 /**
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.
111
112 @param NumberOfAPs AP number
113
114 **/
115 VOID
116 WaitForAllAPs (
117 IN UINTN NumberOfAPs
118 )
119 {
120 UINTN BspIndex;
121
122 BspIndex = mSmmMpSyncData->BspIndex;
123 while (NumberOfAPs-- > 0) {
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
125 }
126 }
127
128 /**
129 Performs an atomic compare exchange operation to release semaphore
130 for each AP.
131
132 **/
133 VOID
134 ReleaseAllAPs (
135 VOID
136 )
137 {
138 UINTN Index;
139
140 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
141 if (IsPresentAp (Index)) {
142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
143 }
144 }
145 }
146
147 /**
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
149
150 @param Exceptions CPU Arrival exception flags.
151
152 @retval TRUE if all CPUs the have checked in.
153 @retval FALSE if at least one Normal AP hasn't checked in.
154
155 **/
156 BOOLEAN
157 AllCpusInSmmWithExceptions (
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
159 )
160 {
161 UINTN Index;
162 SMM_CPU_DATA_BLOCK *CpuData;
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
164
165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
166
167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
168 return TRUE;
169 }
170
171 CpuData = mSmmMpSyncData->CpuData;
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
173 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
176 continue;
177 }
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
179 continue;
180 }
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
182 continue;
183 }
184 return FALSE;
185 }
186 }
187
188
189 return TRUE;
190 }
191
192 /**
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
194
195 @retval TRUE Os enable lmce.
196 @retval FALSE Os not enable lmce.
197
198 **/
199 BOOLEAN
200 IsLmceOsEnabled (
201 VOID
202 )
203 {
204 MSR_IA32_MCG_CAP_REGISTER McgCap;
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
207
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
209 if (McgCap.Bits.MCG_LMCE_P == 0) {
210 return FALSE;
211 }
212
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
214 if (FeatureCtrl.Bits.LmceOn == 0) {
215 return FALSE;
216 }
217
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
220 }
221
222 /**
223 Return if Local machine check exception signaled.
224
225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
226 delivered to only the logical processor.
227
228 @retval TRUE LMCE was signaled.
229 @retval FALSE LMCE was not signaled.
230
231 **/
232 BOOLEAN
233 IsLmceSignaled (
234 VOID
235 )
236 {
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
238
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
241 }
242
243 /**
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
245 entering SMM, except SMI disabled APs.
246
247 **/
248 VOID
249 SmmWaitForApArrival (
250 VOID
251 )
252 {
253 UINT64 Timer;
254 UINTN Index;
255 BOOLEAN LmceEn;
256 BOOLEAN LmceSignal;
257
258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
259
260 LmceEn = FALSE;
261 LmceSignal = FALSE;
262 if (mMachineCheckSupported) {
263 LmceEn = IsLmceOsEnabled ();
264 LmceSignal = IsLmceSignaled();
265 }
266
267 //
268 // Platform implementor should choose a timeout value appropriately:
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
275 // - The timeout value must be longer than longest possible IO operation in the system
276 //
277
278 //
279 // Sync with APs 1st timeout
280 //
281 for (Timer = StartSyncTimer ();
282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
284 ) {
285 CpuPause ();
286 }
287
288 //
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
290 // because:
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
294 // work while SMI handling is on-going.
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
298 // mode work while SMI handling is on-going.
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
300 // - In traditional flow, SMI disabling is discouraged.
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
302 // In both cases, adding SMI-disabling checking code increases overhead.
303 //
304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
305 //
306 // Send SMI IPIs to bring outside processors in
307 //
308 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
311 }
312 }
313
314 //
315 // Sync with APs 2nd timeout.
316 //
317 for (Timer = StartSyncTimer ();
318 !IsSyncTimerTimeout (Timer) &&
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
320 ) {
321 CpuPause ();
322 }
323 }
324
325 return;
326 }
327
328
329 /**
330 Replace OS MTRR's with SMI MTRR's.
331
332 @param CpuIndex Processor Index
333
334 **/
335 VOID
336 ReplaceOSMtrrs (
337 IN UINTN CpuIndex
338 )
339 {
340 SmmCpuFeaturesDisableSmrr ();
341
342 //
343 // Replace all MTRRs registers
344 //
345 MtrrSetAllMtrrs (&gSmiMtrrs);
346 }
347
348 /**
349 Wheck whether task has been finished by all APs.
350
351 @param BlockMode Whether did it in block mode or non-block mode.
352
353 @retval TRUE Task has been finished by all APs.
354 @retval FALSE Task not has been finished by all APs.
355
356 **/
357 BOOLEAN
358 WaitForAllAPsNotBusy (
359 IN BOOLEAN BlockMode
360 )
361 {
362 UINTN Index;
363
364 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
365 //
366 // Ignore BSP and APs which not call in SMM.
367 //
368 if (!IsPresentAp(Index)) {
369 continue;
370 }
371
372 if (BlockMode) {
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
375 } else {
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
378 } else {
379 return FALSE;
380 }
381 }
382 }
383
384 return TRUE;
385 }
386
387 /**
388 Check whether it is an present AP.
389
390 @param CpuIndex The AP index which calls this function.
391
392 @retval TRUE It's a present AP.
393 @retval TRUE This is not an AP or it is not present.
394
395 **/
396 BOOLEAN
397 IsPresentAp (
398 IN UINTN CpuIndex
399 )
400 {
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
403 }
404
405 /**
406 Clean up the status flags used during executing the procedure.
407
408 @param CpuIndex The AP index which calls this function.
409
410 **/
411 VOID
412 ReleaseToken (
413 IN UINTN CpuIndex
414 )
415 {
416 PROCEDURE_TOKEN *Token;
417
418 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
419
420 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
421 ReleaseSpinLock (Token->SpinLock);
422 }
423
424 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
425 }
426
427 /**
428 Free the tokens in the maintained list.
429
430 **/
431 VOID
432 ResetTokens (
433 VOID
434 )
435 {
436 LIST_ENTRY *Link;
437 PROCEDURE_TOKEN *ProcToken;
438
439 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
440 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
441 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
442
443 ProcToken->RunningApCount = 0;
444 ProcToken->Used = FALSE;
445
446 //
447 // Check the spinlock status and release it if not released yet.
448 //
449 if (!AcquireSpinLockOrFail(ProcToken->SpinLock)) {
450 DEBUG((DEBUG_ERROR, "Risk::SpinLock still not released!"));
451 }
452 ReleaseSpinLock (ProcToken->SpinLock);
453
454 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
455 }
456 }
457
458 /**
459 SMI handler for BSP.
460
461 @param CpuIndex BSP processor Index
462 @param SyncMode SMM MP sync mode
463
464 **/
465 VOID
466 BSPHandler (
467 IN UINTN CpuIndex,
468 IN SMM_CPU_SYNC_MODE SyncMode
469 )
470 {
471 UINTN Index;
472 MTRR_SETTINGS Mtrrs;
473 UINTN ApCount;
474 BOOLEAN ClearTopLevelSmiResult;
475 UINTN PresentCount;
476
477 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
478 ApCount = 0;
479
480 //
481 // Flag BSP's presence
482 //
483 *mSmmMpSyncData->InsideSmm = TRUE;
484
485 //
486 // Initialize Debug Agent to start source level debug in BSP handler
487 //
488 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
489
490 //
491 // Mark this processor's presence
492 //
493 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
494
495 //
496 // Clear platform top level SMI status bit before calling SMI handlers. If
497 // we cleared it after SMI handlers are run, we would miss the SMI that
498 // occurs after SMI handlers are done and before SMI status bit is cleared.
499 //
500 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
501 ASSERT (ClearTopLevelSmiResult == TRUE);
502
503 //
504 // Set running processor index
505 //
506 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
507
508 //
509 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
510 //
511 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
512
513 //
514 // Wait for APs to arrive
515 //
516 SmmWaitForApArrival();
517
518 //
519 // Lock the counter down and retrieve the number of APs
520 //
521 *mSmmMpSyncData->AllCpusInSync = TRUE;
522 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
523
524 //
525 // Wait for all APs to get ready for programming MTRRs
526 //
527 WaitForAllAPs (ApCount);
528
529 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
530 //
531 // Signal all APs it's time for backup MTRRs
532 //
533 ReleaseAllAPs ();
534
535 //
536 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
537 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
538 // to a large enough value to avoid this situation.
539 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
540 // We do the backup first and then set MTRR to avoid race condition for threads
541 // in the same core.
542 //
543 MtrrGetAllMtrrs(&Mtrrs);
544
545 //
546 // Wait for all APs to complete their MTRR saving
547 //
548 WaitForAllAPs (ApCount);
549
550 //
551 // Let all processors program SMM MTRRs together
552 //
553 ReleaseAllAPs ();
554
555 //
556 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
557 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
558 // to a large enough value to avoid this situation.
559 //
560 ReplaceOSMtrrs (CpuIndex);
561
562 //
563 // Wait for all APs to complete their MTRR programming
564 //
565 WaitForAllAPs (ApCount);
566 }
567 }
568
569 //
570 // The BUSY lock is initialized to Acquired state
571 //
572 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
573
574 //
575 // Perform the pre tasks
576 //
577 PerformPreTasks ();
578
579 //
580 // Invoke SMM Foundation EntryPoint with the processor information context.
581 //
582 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
583
584 //
585 // Make sure all APs have completed their pending none-block tasks
586 //
587 WaitForAllAPsNotBusy (TRUE);
588
589 //
590 // Perform the remaining tasks
591 //
592 PerformRemainingTasks ();
593
594 //
595 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
596 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
597 // will run through freely.
598 //
599 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
600
601 //
602 // Lock the counter down and retrieve the number of APs
603 //
604 *mSmmMpSyncData->AllCpusInSync = TRUE;
605 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
606 //
607 // Make sure all APs have their Present flag set
608 //
609 while (TRUE) {
610 PresentCount = 0;
611 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
612 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
613 PresentCount ++;
614 }
615 }
616 if (PresentCount > ApCount) {
617 break;
618 }
619 }
620 }
621
622 //
623 // Notify all APs to exit
624 //
625 *mSmmMpSyncData->InsideSmm = FALSE;
626 ReleaseAllAPs ();
627
628 //
629 // Wait for all APs to complete their pending tasks
630 //
631 WaitForAllAPs (ApCount);
632
633 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
634 //
635 // Signal APs to restore MTRRs
636 //
637 ReleaseAllAPs ();
638
639 //
640 // Restore OS MTRRs
641 //
642 SmmCpuFeaturesReenableSmrr ();
643 MtrrSetAllMtrrs(&Mtrrs);
644
645 //
646 // Wait for all APs to complete MTRR programming
647 //
648 WaitForAllAPs (ApCount);
649 }
650
651 //
652 // Stop source level debug in BSP handler, the code below will not be
653 // debugged.
654 //
655 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
656
657 //
658 // Signal APs to Reset states/semaphore for this processor
659 //
660 ReleaseAllAPs ();
661
662 //
663 // Perform pending operations for hot-plug
664 //
665 SmmCpuUpdate ();
666
667 //
668 // Clear the Present flag of BSP
669 //
670 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
671
672 //
673 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
674 // WaitForAllAps does not depend on the Present flag.
675 //
676 WaitForAllAPs (ApCount);
677
678 //
679 // Reset the tokens buffer.
680 //
681 ResetTokens ();
682
683 //
684 // Reset BspIndex to -1, meaning BSP has not been elected.
685 //
686 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
687 mSmmMpSyncData->BspIndex = (UINT32)-1;
688 }
689
690 //
691 // Allow APs to check in from this point on
692 //
693 *mSmmMpSyncData->Counter = 0;
694 *mSmmMpSyncData->AllCpusInSync = FALSE;
695 }
696
697 /**
698 SMI handler for AP.
699
700 @param CpuIndex AP processor Index.
701 @param ValidSmi Indicates that current SMI is a valid SMI or not.
702 @param SyncMode SMM MP sync mode.
703
704 **/
705 VOID
706 APHandler (
707 IN UINTN CpuIndex,
708 IN BOOLEAN ValidSmi,
709 IN SMM_CPU_SYNC_MODE SyncMode
710 )
711 {
712 UINT64 Timer;
713 UINTN BspIndex;
714 MTRR_SETTINGS Mtrrs;
715 EFI_STATUS ProcedureStatus;
716
717 //
718 // Timeout BSP
719 //
720 for (Timer = StartSyncTimer ();
721 !IsSyncTimerTimeout (Timer) &&
722 !(*mSmmMpSyncData->InsideSmm);
723 ) {
724 CpuPause ();
725 }
726
727 if (!(*mSmmMpSyncData->InsideSmm)) {
728 //
729 // BSP timeout in the first round
730 //
731 if (mSmmMpSyncData->BspIndex != -1) {
732 //
733 // BSP Index is known
734 //
735 BspIndex = mSmmMpSyncData->BspIndex;
736 ASSERT (CpuIndex != BspIndex);
737
738 //
739 // Send SMI IPI to bring BSP in
740 //
741 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
742
743 //
744 // Now clock BSP for the 2nd time
745 //
746 for (Timer = StartSyncTimer ();
747 !IsSyncTimerTimeout (Timer) &&
748 !(*mSmmMpSyncData->InsideSmm);
749 ) {
750 CpuPause ();
751 }
752
753 if (!(*mSmmMpSyncData->InsideSmm)) {
754 //
755 // Give up since BSP is unable to enter SMM
756 // and signal the completion of this AP
757 WaitForSemaphore (mSmmMpSyncData->Counter);
758 return;
759 }
760 } else {
761 //
762 // Don't know BSP index. Give up without sending IPI to BSP.
763 //
764 WaitForSemaphore (mSmmMpSyncData->Counter);
765 return;
766 }
767 }
768
769 //
770 // BSP is available
771 //
772 BspIndex = mSmmMpSyncData->BspIndex;
773 ASSERT (CpuIndex != BspIndex);
774
775 //
776 // Mark this processor's presence
777 //
778 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
779
780 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
781 //
782 // Notify BSP of arrival at this point
783 //
784 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
785 }
786
787 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
788 //
789 // Wait for the signal from BSP to backup MTRRs
790 //
791 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
792
793 //
794 // Backup OS MTRRs
795 //
796 MtrrGetAllMtrrs(&Mtrrs);
797
798 //
799 // Signal BSP the completion of this AP
800 //
801 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
802
803 //
804 // Wait for BSP's signal to program MTRRs
805 //
806 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
807
808 //
809 // Replace OS MTRRs with SMI MTRRs
810 //
811 ReplaceOSMtrrs (CpuIndex);
812
813 //
814 // Signal BSP the completion of this AP
815 //
816 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
817 }
818
819 while (TRUE) {
820 //
821 // Wait for something to happen
822 //
823 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
824
825 //
826 // Check if BSP wants to exit SMM
827 //
828 if (!(*mSmmMpSyncData->InsideSmm)) {
829 break;
830 }
831
832 //
833 // BUSY should be acquired by SmmStartupThisAp()
834 //
835 ASSERT (
836 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
837 );
838
839 //
840 // Invoke the scheduled procedure
841 //
842 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
843 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
844 );
845 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
846 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
847 }
848
849 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
850 ReleaseToken (CpuIndex);
851 }
852
853 //
854 // Release BUSY
855 //
856 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
857 }
858
859 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
860 //
861 // Notify BSP the readiness of this AP to program MTRRs
862 //
863 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
864
865 //
866 // Wait for the signal from BSP to program MTRRs
867 //
868 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
869
870 //
871 // Restore OS MTRRs
872 //
873 SmmCpuFeaturesReenableSmrr ();
874 MtrrSetAllMtrrs(&Mtrrs);
875 }
876
877 //
878 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
879 //
880 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
881
882 //
883 // Wait for the signal from BSP to Reset states/semaphore for this processor
884 //
885 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
886
887 //
888 // Reset states/semaphore for this processor
889 //
890 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
891
892 //
893 // Notify BSP the readiness of this AP to exit SMM
894 //
895 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
896
897 }
898
899 /**
900 Create 4G PageTable in SMRAM.
901
902 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
903 @return PageTable Address
904
905 **/
906 UINT32
907 Gen4GPageTable (
908 IN BOOLEAN Is32BitPageTable
909 )
910 {
911 VOID *PageTable;
912 UINTN Index;
913 UINT64 *Pte;
914 UINTN PagesNeeded;
915 UINTN Low2MBoundary;
916 UINTN High2MBoundary;
917 UINTN Pages;
918 UINTN GuardPage;
919 UINT64 *Pdpte;
920 UINTN PageIndex;
921 UINTN PageAddress;
922
923 Low2MBoundary = 0;
924 High2MBoundary = 0;
925 PagesNeeded = 0;
926 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
927 //
928 // Add one more page for known good stack, then find the lower 2MB aligned address.
929 //
930 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
931 //
932 // Add two more pages for known good stack and stack guard page,
933 // then find the lower 2MB aligned address.
934 //
935 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
936 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
937 }
938 //
939 // Allocate the page table
940 //
941 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
942 ASSERT (PageTable != NULL);
943
944 PageTable = (VOID *)((UINTN)PageTable);
945 Pte = (UINT64*)PageTable;
946
947 //
948 // Zero out all page table entries first
949 //
950 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
951
952 //
953 // Set Page Directory Pointers
954 //
955 for (Index = 0; Index < 4; Index++) {
956 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
957 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
958 }
959 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
960
961 //
962 // Fill in Page Directory Entries
963 //
964 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
965 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
966 }
967
968 Pdpte = (UINT64*)PageTable;
969 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
970 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
971 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
972 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
973 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
974 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
975 //
976 // Fill in Page Table Entries
977 //
978 Pte = (UINT64*)Pages;
979 PageAddress = PageIndex;
980 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
981 if (PageAddress == GuardPage) {
982 //
983 // Mark the guard page as non-present
984 //
985 Pte[Index] = PageAddress | mAddressEncMask;
986 GuardPage += mSmmStackSize;
987 if (GuardPage > mSmmStackArrayEnd) {
988 GuardPage = 0;
989 }
990 } else {
991 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
992 }
993 PageAddress+= EFI_PAGE_SIZE;
994 }
995 Pages += EFI_PAGE_SIZE;
996 }
997 }
998
999 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1000 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1001 if ((Pte[0] & IA32_PG_PS) == 0) {
1002 // 4K-page entries are already mapped. Just hide the first one anyway.
1003 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1004 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1005 } else {
1006 // Create 4K-page entries
1007 Pages = (UINTN)AllocatePageTableMemory (1);
1008 ASSERT (Pages != 0);
1009
1010 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1011
1012 Pte = (UINT64*)Pages;
1013 PageAddress = 0;
1014 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1015 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1016 PageAddress += EFI_PAGE_SIZE;
1017 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1018 }
1019 }
1020 }
1021
1022 return (UINT32)(UINTN)PageTable;
1023 }
1024
1025 /**
1026 Checks whether the input token is the current used token.
1027
1028 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1029 BroadcastProcedure.
1030
1031 @retval TRUE The input token is the current used token.
1032 @retval FALSE The input token is not the current used token.
1033 **/
1034 BOOLEAN
1035 IsTokenInUse (
1036 IN SPIN_LOCK *Token
1037 )
1038 {
1039 LIST_ENTRY *Link;
1040 PROCEDURE_TOKEN *ProcToken;
1041
1042 if (Token == NULL) {
1043 return FALSE;
1044 }
1045
1046 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1047 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
1048 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1049
1050 if (ProcToken->Used && ProcToken->SpinLock == Token) {
1051 return TRUE;
1052 }
1053
1054 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1055 }
1056
1057 return FALSE;
1058 }
1059
1060 /**
1061 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1062
1063 **/
1064 VOID
1065 AllocateTokenBuffer (
1066 VOID
1067 )
1068 {
1069 UINTN SpinLockSize;
1070 UINT32 TokenCountPerChunk;
1071 UINTN ProcTokenSize;
1072 UINTN Index;
1073 PROCEDURE_TOKEN *ProcToken;
1074 SPIN_LOCK *SpinLock;
1075 UINT8 *SpinLockBuffer;
1076 UINT8 *ProcTokenBuffer;
1077
1078 SpinLockSize = GetSpinLockProperties ();
1079 ProcTokenSize = sizeof (PROCEDURE_TOKEN);
1080
1081 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1082 ASSERT (TokenCountPerChunk != 0);
1083 if (TokenCountPerChunk == 0) {
1084 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1085 CpuDeadLoop ();
1086 }
1087 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1088
1089 //
1090 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1091 //
1092 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1093 ASSERT (SpinLockBuffer != NULL);
1094
1095 ProcTokenBuffer = AllocatePool (ProcTokenSize * TokenCountPerChunk);
1096 ASSERT (ProcTokenBuffer != NULL);
1097
1098 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1099 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1100 InitializeSpinLock (SpinLock);
1101
1102 ProcToken = (PROCEDURE_TOKEN *)(ProcTokenBuffer + ProcTokenSize * Index);
1103 ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;
1104 ProcToken->SpinLock = SpinLock;
1105 ProcToken->Used = FALSE;
1106 ProcToken->RunningApCount = 0;
1107
1108 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);
1109 }
1110 }
1111
1112 /**
1113 Find first free token in the allocated token list.
1114
1115 @retval return the first free PROCEDURE_TOKEN.
1116
1117 **/
1118 PROCEDURE_TOKEN *
1119 FindFirstFreeToken (
1120 VOID
1121 )
1122 {
1123 LIST_ENTRY *Link;
1124 PROCEDURE_TOKEN *ProcToken;
1125
1126 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1127 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
1128 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1129
1130 if (!ProcToken->Used) {
1131 return ProcToken;
1132 }
1133
1134 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1135 }
1136
1137 return NULL;
1138 }
1139
1140 /**
1141 Get the free token.
1142
1143 If no free token, allocate new tokens then return the free one.
1144
1145 @retval return the first free PROCEDURE_TOKEN.
1146
1147 **/
1148 PROCEDURE_TOKEN *
1149 GetFreeToken (
1150 IN UINT32 RunningApsCount
1151 )
1152 {
1153 PROCEDURE_TOKEN *NewToken;
1154
1155 NewToken = FindFirstFreeToken ();
1156 if (NewToken == NULL) {
1157 AllocateTokenBuffer ();
1158 NewToken = FindFirstFreeToken ();
1159 }
1160 ASSERT (NewToken != NULL);
1161
1162 NewToken->Used = TRUE;
1163 NewToken->RunningApCount = RunningApsCount;
1164 AcquireSpinLock (NewToken->SpinLock);
1165
1166 return NewToken;
1167 }
1168
1169 /**
1170 Checks status of specified AP.
1171
1172 This function checks whether the specified AP has finished the task assigned
1173 by StartupThisAP(), and whether timeout expires.
1174
1175 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1176 BroadcastProcedure.
1177
1178 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1179 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1180 **/
1181 EFI_STATUS
1182 IsApReady (
1183 IN SPIN_LOCK *Token
1184 )
1185 {
1186 if (AcquireSpinLockOrFail (Token)) {
1187 ReleaseSpinLock (Token);
1188 return EFI_SUCCESS;
1189 }
1190
1191 return EFI_NOT_READY;
1192 }
1193
1194 /**
1195 Schedule a procedure to run on the specified CPU.
1196
1197 @param[in] Procedure The address of the procedure to run
1198 @param[in] CpuIndex Target CPU Index
1199 @param[in,out] ProcArguments The parameter to pass to the procedure
1200 @param[in] Token This is an optional parameter that allows the caller to execute the
1201 procedure in a blocking or non-blocking fashion. If it is NULL the
1202 call is blocking, and the call will not return until the AP has
1203 completed the procedure. If the token is not NULL, the call will
1204 return immediately. The caller can check whether the procedure has
1205 completed with CheckOnProcedure or WaitForProcedure.
1206 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1207 execution of Procedure, either for blocking or non-blocking mode.
1208 Zero means infinity. If the timeout expires before all APs return
1209 from Procedure, then Procedure on the failed APs is terminated. If
1210 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1211 If the timeout expires in non-blocking mode, the timeout determined
1212 can be through CheckOnProcedure or WaitForProcedure.
1213 Note that timeout support is optional. Whether an implementation
1214 supports this feature can be determined via the Attributes data
1215 member.
1216 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1217 by Procedure when it completes execution on the target AP, or with
1218 EFI_TIMEOUT if the Procedure fails to complete within the optional
1219 timeout. The implementation will update this variable with
1220 EFI_NOT_READY prior to starting Procedure on the target AP.
1221
1222 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1223 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1224 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1225 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1226 @retval EFI_SUCCESS The procedure has been successfully scheduled
1227
1228 **/
1229 EFI_STATUS
1230 InternalSmmStartupThisAp (
1231 IN EFI_AP_PROCEDURE2 Procedure,
1232 IN UINTN CpuIndex,
1233 IN OUT VOID *ProcArguments OPTIONAL,
1234 IN MM_COMPLETION *Token,
1235 IN UINTN TimeoutInMicroseconds,
1236 IN OUT EFI_STATUS *CpuStatus
1237 )
1238 {
1239 PROCEDURE_TOKEN *ProcToken;
1240
1241 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1242 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1243 return EFI_INVALID_PARAMETER;
1244 }
1245 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1246 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1247 return EFI_INVALID_PARAMETER;
1248 }
1249 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1250 return EFI_INVALID_PARAMETER;
1251 }
1252 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1253 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1254 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1255 }
1256 return EFI_INVALID_PARAMETER;
1257 }
1258 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1259 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1260 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1261 }
1262 return EFI_INVALID_PARAMETER;
1263 }
1264 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1265 return EFI_INVALID_PARAMETER;
1266 }
1267 if (Procedure == NULL) {
1268 return EFI_INVALID_PARAMETER;
1269 }
1270
1271 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1272
1273 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1274 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1275 if (Token != NULL) {
1276 ProcToken= GetFreeToken (1);
1277 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1278 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1279 }
1280 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1281 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1282 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1283 }
1284
1285 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1286
1287 if (Token == NULL) {
1288 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1289 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1290 }
1291
1292 return EFI_SUCCESS;
1293 }
1294
1295 /**
1296 Worker function to execute a caller provided function on all enabled APs.
1297
1298 @param[in] Procedure A pointer to the function to be run on
1299 enabled APs of the system.
1300 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1301 APs to return from Procedure, either for
1302 blocking or non-blocking mode.
1303 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1304 all APs.
1305 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1306 procedure in a blocking or non-blocking fashion. If it is NULL the
1307 call is blocking, and the call will not return until the AP has
1308 completed the procedure. If the token is not NULL, the call will
1309 return immediately. The caller can check whether the procedure has
1310 completed with CheckOnProcedure or WaitForProcedure.
1311 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1312 by Procedure when it completes execution on the target AP, or with
1313 EFI_TIMEOUT if the Procedure fails to complete within the optional
1314 timeout. The implementation will update this variable with
1315 EFI_NOT_READY prior to starting Procedure on the target AP.
1316
1317
1318 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1319 the timeout expired.
1320 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1321 to all enabled APs.
1322 @retval others Failed to Startup all APs.
1323
1324 **/
1325 EFI_STATUS
1326 InternalSmmStartupAllAPs (
1327 IN EFI_AP_PROCEDURE2 Procedure,
1328 IN UINTN TimeoutInMicroseconds,
1329 IN OUT VOID *ProcedureArguments OPTIONAL,
1330 IN OUT MM_COMPLETION *Token,
1331 IN OUT EFI_STATUS *CPUStatus
1332 )
1333 {
1334 UINTN Index;
1335 UINTN CpuCount;
1336 PROCEDURE_TOKEN *ProcToken;
1337
1338 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1339 return EFI_INVALID_PARAMETER;
1340 }
1341 if (Procedure == NULL) {
1342 return EFI_INVALID_PARAMETER;
1343 }
1344
1345 CpuCount = 0;
1346 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
1347 if (IsPresentAp (Index)) {
1348 CpuCount ++;
1349
1350 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1351 return EFI_INVALID_PARAMETER;
1352 }
1353
1354 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1355 return EFI_NOT_READY;
1356 }
1357 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1358 }
1359 }
1360 if (CpuCount == 0) {
1361 return EFI_NOT_STARTED;
1362 }
1363
1364 if (Token != NULL) {
1365 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1366 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1367 } else {
1368 ProcToken = NULL;
1369 }
1370
1371 //
1372 // Make sure all BUSY should be acquired.
1373 //
1374 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1375 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1376 // block mode.
1377 //
1378 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
1379 if (IsPresentAp (Index)) {
1380 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1381 }
1382 }
1383
1384 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
1385 if (IsPresentAp (Index)) {
1386 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1387 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1388 if (ProcToken != NULL) {
1389 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1390 }
1391 if (CPUStatus != NULL) {
1392 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1393 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1394 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1395 }
1396 }
1397 } else {
1398 //
1399 // PI spec requirement:
1400 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1401 //
1402 if (CPUStatus != NULL) {
1403 CPUStatus[Index] = EFI_NOT_STARTED;
1404 }
1405
1406 //
1407 // Decrease the count to mark this processor(AP or BSP) as finished.
1408 //
1409 if (ProcToken != NULL) {
1410 WaitForSemaphore (&ProcToken->RunningApCount);
1411 }
1412 }
1413 }
1414
1415 ReleaseAllAPs ();
1416
1417 if (Token == NULL) {
1418 //
1419 // Make sure all APs have completed their tasks.
1420 //
1421 WaitForAllAPsNotBusy (TRUE);
1422 }
1423
1424 return EFI_SUCCESS;
1425 }
1426
1427 /**
1428 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1429 If the function is defined with a type that is not compatible with
1430 the type (of the expression) pointed to by the expression that
1431 denotes the called function, the behavior is undefined.
1432
1433 So add below wrapper function to convert between EFI_AP_PROCEDURE
1434 and EFI_AP_PROCEDURE2.
1435
1436 Wrapper for Procedures.
1437
1438 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1439
1440 **/
1441 EFI_STATUS
1442 EFIAPI
1443 ProcedureWrapper (
1444 IN VOID *Buffer
1445 )
1446 {
1447 PROCEDURE_WRAPPER *Wrapper;
1448
1449 Wrapper = Buffer;
1450 Wrapper->Procedure (Wrapper->ProcedureArgument);
1451
1452 return EFI_SUCCESS;
1453 }
1454
1455 /**
1456 Schedule a procedure to run on the specified CPU in blocking mode.
1457
1458 @param[in] Procedure The address of the procedure to run
1459 @param[in] CpuIndex Target CPU Index
1460 @param[in, out] ProcArguments The parameter to pass to the procedure
1461
1462 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1463 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1464 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1465 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1466 @retval EFI_SUCCESS The procedure has been successfully scheduled
1467
1468 **/
1469 EFI_STATUS
1470 EFIAPI
1471 SmmBlockingStartupThisAp (
1472 IN EFI_AP_PROCEDURE Procedure,
1473 IN UINTN CpuIndex,
1474 IN OUT VOID *ProcArguments OPTIONAL
1475 )
1476 {
1477 PROCEDURE_WRAPPER Wrapper;
1478
1479 Wrapper.Procedure = Procedure;
1480 Wrapper.ProcedureArgument = ProcArguments;
1481
1482 //
1483 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1484 //
1485 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1486 }
1487
1488 /**
1489 Schedule a procedure to run on the specified CPU.
1490
1491 @param Procedure The address of the procedure to run
1492 @param CpuIndex Target CPU Index
1493 @param ProcArguments The parameter to pass to the procedure
1494
1495 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1496 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1497 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1498 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1499 @retval EFI_SUCCESS The procedure has been successfully scheduled
1500
1501 **/
1502 EFI_STATUS
1503 EFIAPI
1504 SmmStartupThisAp (
1505 IN EFI_AP_PROCEDURE Procedure,
1506 IN UINTN CpuIndex,
1507 IN OUT VOID *ProcArguments OPTIONAL
1508 )
1509 {
1510 MM_COMPLETION Token;
1511
1512 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1513 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1514
1515 //
1516 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1517 //
1518 return InternalSmmStartupThisAp (
1519 ProcedureWrapper,
1520 CpuIndex,
1521 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1522 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
1523 0,
1524 NULL
1525 );
1526 }
1527
1528 /**
1529 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1530 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1531
1532 NOTE: It might not be appreciated in runtime since it might
1533 conflict with OS debugging facilities. Turn them off in RELEASE.
1534
1535 @param CpuIndex CPU Index
1536
1537 **/
1538 VOID
1539 EFIAPI
1540 CpuSmmDebugEntry (
1541 IN UINTN CpuIndex
1542 )
1543 {
1544 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1545
1546 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1547 ASSERT(CpuIndex < mMaxNumberOfCpus);
1548 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1549 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1550 AsmWriteDr6 (CpuSaveState->x86._DR6);
1551 AsmWriteDr7 (CpuSaveState->x86._DR7);
1552 } else {
1553 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1554 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1555 }
1556 }
1557 }
1558
1559 /**
1560 This function restores DR6 & DR7 to SMM save state.
1561
1562 NOTE: It might not be appreciated in runtime since it might
1563 conflict with OS debugging facilities. Turn them off in RELEASE.
1564
1565 @param CpuIndex CPU Index
1566
1567 **/
1568 VOID
1569 EFIAPI
1570 CpuSmmDebugExit (
1571 IN UINTN CpuIndex
1572 )
1573 {
1574 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1575
1576 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1577 ASSERT(CpuIndex < mMaxNumberOfCpus);
1578 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1579 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1580 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1581 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1582 } else {
1583 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1584 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1585 }
1586 }
1587 }
1588
1589 /**
1590 C function for SMI entry, each processor comes here upon SMI trigger.
1591
1592 @param CpuIndex CPU Index
1593
1594 **/
1595 VOID
1596 EFIAPI
1597 SmiRendezvous (
1598 IN UINTN CpuIndex
1599 )
1600 {
1601 EFI_STATUS Status;
1602 BOOLEAN ValidSmi;
1603 BOOLEAN IsBsp;
1604 BOOLEAN BspInProgress;
1605 UINTN Index;
1606 UINTN Cr2;
1607
1608 ASSERT(CpuIndex < mMaxNumberOfCpus);
1609
1610 //
1611 // Save Cr2 because Page Fault exception in SMM may override its value,
1612 // when using on-demand paging for above 4G memory.
1613 //
1614 Cr2 = 0;
1615 SaveCr2 (&Cr2);
1616
1617 //
1618 // Call the user register Startup function first.
1619 //
1620 if (mSmmMpSyncData->StartupProcedure != NULL) {
1621 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1622 }
1623
1624 //
1625 // Perform CPU specific entry hooks
1626 //
1627 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1628
1629 //
1630 // Determine if this is a valid SMI
1631 //
1632 ValidSmi = PlatformValidSmi();
1633
1634 //
1635 // Determine if BSP has been already in progress. Note this must be checked after
1636 // ValidSmi because BSP may clear a valid SMI source after checking in.
1637 //
1638 BspInProgress = *mSmmMpSyncData->InsideSmm;
1639
1640 if (!BspInProgress && !ValidSmi) {
1641 //
1642 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1643 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1644 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1645 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1646 // is nothing we need to do.
1647 //
1648 goto Exit;
1649 } else {
1650 //
1651 // Signal presence of this processor
1652 //
1653 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1654 //
1655 // BSP has already ended the synchronization, so QUIT!!!
1656 //
1657
1658 //
1659 // Wait for BSP's signal to finish SMI
1660 //
1661 while (*mSmmMpSyncData->AllCpusInSync) {
1662 CpuPause ();
1663 }
1664 goto Exit;
1665 } else {
1666
1667 //
1668 // The BUSY lock is initialized to Released state.
1669 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1670 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1671 // after AP's present flag is detected.
1672 //
1673 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1674 }
1675
1676 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1677 ActivateSmmProfile (CpuIndex);
1678 }
1679
1680 if (BspInProgress) {
1681 //
1682 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1683 // as BSP may have cleared the SMI status
1684 //
1685 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1686 } else {
1687 //
1688 // We have a valid SMI
1689 //
1690
1691 //
1692 // Elect BSP
1693 //
1694 IsBsp = FALSE;
1695 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1696 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1697 //
1698 // Call platform hook to do BSP election
1699 //
1700 Status = PlatformSmmBspElection (&IsBsp);
1701 if (EFI_SUCCESS == Status) {
1702 //
1703 // Platform hook determines successfully
1704 //
1705 if (IsBsp) {
1706 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1707 }
1708 } else {
1709 //
1710 // Platform hook fails to determine, use default BSP election method
1711 //
1712 InterlockedCompareExchange32 (
1713 (UINT32*)&mSmmMpSyncData->BspIndex,
1714 (UINT32)-1,
1715 (UINT32)CpuIndex
1716 );
1717 }
1718 }
1719 }
1720
1721 //
1722 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1723 //
1724 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1725
1726 //
1727 // Clear last request for SwitchBsp.
1728 //
1729 if (mSmmMpSyncData->SwitchBsp) {
1730 mSmmMpSyncData->SwitchBsp = FALSE;
1731 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1732 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1733 }
1734 }
1735
1736 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1737 SmmProfileRecordSmiNum ();
1738 }
1739
1740 //
1741 // BSP Handler is always called with a ValidSmi == TRUE
1742 //
1743 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1744 } else {
1745 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1746 }
1747 }
1748
1749 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1750
1751 //
1752 // Wait for BSP's signal to exit SMI
1753 //
1754 while (*mSmmMpSyncData->AllCpusInSync) {
1755 CpuPause ();
1756 }
1757 }
1758
1759 Exit:
1760 SmmCpuFeaturesRendezvousExit (CpuIndex);
1761
1762 //
1763 // Restore Cr2
1764 //
1765 RestoreCr2 (Cr2);
1766 }
1767
1768 /**
1769 Allocate buffer for SpinLock and Wrapper function buffer.
1770
1771 **/
1772 VOID
1773 InitializeDataForMmMp (
1774 VOID
1775 )
1776 {
1777 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1778 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1779
1780 InitializeListHead (&gSmmCpuPrivate->TokenList);
1781
1782 AllocateTokenBuffer ();
1783 }
1784
1785 /**
1786 Allocate buffer for all semaphores and spin locks.
1787
1788 **/
1789 VOID
1790 InitializeSmmCpuSemaphores (
1791 VOID
1792 )
1793 {
1794 UINTN ProcessorCount;
1795 UINTN TotalSize;
1796 UINTN GlobalSemaphoresSize;
1797 UINTN CpuSemaphoresSize;
1798 UINTN SemaphoreSize;
1799 UINTN Pages;
1800 UINTN *SemaphoreBlock;
1801 UINTN SemaphoreAddr;
1802
1803 SemaphoreSize = GetSpinLockProperties ();
1804 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1805 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1806 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1807 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1808 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1809 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1810 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1811 SemaphoreBlock = AllocatePages (Pages);
1812 ASSERT (SemaphoreBlock != NULL);
1813 ZeroMem (SemaphoreBlock, TotalSize);
1814
1815 SemaphoreAddr = (UINTN)SemaphoreBlock;
1816 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1817 SemaphoreAddr += SemaphoreSize;
1818 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1819 SemaphoreAddr += SemaphoreSize;
1820 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1821 SemaphoreAddr += SemaphoreSize;
1822 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1823 SemaphoreAddr += SemaphoreSize;
1824 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1825 = (SPIN_LOCK *)SemaphoreAddr;
1826 SemaphoreAddr += SemaphoreSize;
1827
1828 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1829 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1830 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1831 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1832 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1833 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1834
1835 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1836 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1837
1838 mSemaphoreSize = SemaphoreSize;
1839 }
1840
1841 /**
1842 Initialize un-cacheable data.
1843
1844 **/
1845 VOID
1846 EFIAPI
1847 InitializeMpSyncData (
1848 VOID
1849 )
1850 {
1851 UINTN CpuIndex;
1852
1853 if (mSmmMpSyncData != NULL) {
1854 //
1855 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1856 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1857 //
1858 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1859 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1860 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1861 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1862 //
1863 // Enable BSP election by setting BspIndex to -1
1864 //
1865 mSmmMpSyncData->BspIndex = (UINT32)-1;
1866 }
1867 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1868
1869 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1870 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1871 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1872 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1873 mSmmMpSyncData->AllCpusInSync != NULL);
1874 *mSmmMpSyncData->Counter = 0;
1875 *mSmmMpSyncData->InsideSmm = FALSE;
1876 *mSmmMpSyncData->AllCpusInSync = FALSE;
1877
1878 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1879 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1880 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1881 mSmmMpSyncData->CpuData[CpuIndex].Run =
1882 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1883 mSmmMpSyncData->CpuData[CpuIndex].Present =
1884 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1885 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1886 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1887 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1888 }
1889 }
1890 }
1891
1892 /**
1893 Initialize global data for MP synchronization.
1894
1895 @param Stacks Base address of SMI stack buffer for all processors.
1896 @param StackSize Stack size for each processor in SMM.
1897 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1898
1899 **/
1900 UINT32
1901 InitializeMpServiceData (
1902 IN VOID *Stacks,
1903 IN UINTN StackSize,
1904 IN UINTN ShadowStackSize
1905 )
1906 {
1907 UINT32 Cr3;
1908 UINTN Index;
1909 UINT8 *GdtTssTables;
1910 UINTN GdtTableStepSize;
1911 CPUID_VERSION_INFO_EDX RegEdx;
1912
1913 //
1914 // Determine if this CPU supports machine check
1915 //
1916 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1917 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1918
1919 //
1920 // Allocate memory for all locks and semaphores
1921 //
1922 InitializeSmmCpuSemaphores ();
1923
1924 //
1925 // Initialize mSmmMpSyncData
1926 //
1927 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1928 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1929 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1930 ASSERT (mSmmMpSyncData != NULL);
1931 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1932 InitializeMpSyncData ();
1933
1934 //
1935 // Initialize physical address mask
1936 // NOTE: Physical memory above virtual address limit is not supported !!!
1937 //
1938 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1939 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1940 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1941
1942 //
1943 // Create page tables
1944 //
1945 Cr3 = SmmInitPageTable ();
1946
1947 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1948
1949 //
1950 // Install SMI handler for each CPU
1951 //
1952 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1953 InstallSmiHandler (
1954 Index,
1955 (UINT32)mCpuHotPlugData.SmBase[Index],
1956 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1957 StackSize,
1958 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1959 gcSmiGdtr.Limit + 1,
1960 gcSmiIdtr.Base,
1961 gcSmiIdtr.Limit + 1,
1962 Cr3
1963 );
1964 }
1965
1966 //
1967 // Record current MTRR settings
1968 //
1969 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1970 MtrrGetAllMtrrs (&gSmiMtrrs);
1971
1972 return Cr3;
1973 }
1974
1975 /**
1976
1977 Register the SMM Foundation entry point.
1978
1979 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1980 @param SmmEntryPoint SMM Foundation EntryPoint
1981
1982 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1983
1984 **/
1985 EFI_STATUS
1986 EFIAPI
1987 RegisterSmmEntry (
1988 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1989 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1990 )
1991 {
1992 //
1993 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1994 //
1995 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1996 return EFI_SUCCESS;
1997 }
1998
1999 /**
2000
2001 Register the SMM Foundation entry point.
2002
2003 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2004 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2005 with the related definitions of
2006 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2007 If caller may pass a value of NULL to deregister any existing
2008 startup procedure.
2009 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2010 run by the AP. It is an optional common mailbox between APs and
2011 the caller to share information
2012
2013 @retval EFI_SUCCESS The Procedure has been set successfully.
2014 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2015
2016 **/
2017 EFI_STATUS
2018 RegisterStartupProcedure (
2019 IN EFI_AP_PROCEDURE Procedure,
2020 IN OUT VOID *ProcedureArguments OPTIONAL
2021 )
2022 {
2023 if (Procedure == NULL && ProcedureArguments != NULL) {
2024 return EFI_INVALID_PARAMETER;
2025 }
2026 if (mSmmMpSyncData == NULL) {
2027 return EFI_NOT_READY;
2028 }
2029
2030 mSmmMpSyncData->StartupProcedure = Procedure;
2031 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2032
2033 return EFI_SUCCESS;
2034 }