]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
305bffa9bcfd1645e1ddee4ea71b4311dfb7ed11
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 /**
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
29 MP safe mechanisms.
30
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
34
35 **/
36 UINT32
37 WaitForSemaphore (
38 IN OUT volatile UINT32 *Sem
39 )
40 {
41 UINT32 Value;
42
43 do {
44 Value = *Sem;
45 } while (Value == 0 ||
46 InterlockedCompareExchange32 (
47 (UINT32*)Sem,
48 Value,
49 Value - 1
50 ) != Value);
51 return Value - 1;
52 }
53
54
55 /**
56 Performs an atomic compare exchange operation to release semaphore.
57 The compare exchange operation must be performed using
58 MP safe mechanisms.
59
60 @param Sem IN: 32-bit unsigned integer
61 OUT: original integer + 1
62 @return Original integer + 1
63
64 **/
65 UINT32
66 ReleaseSemaphore (
67 IN OUT volatile UINT32 *Sem
68 )
69 {
70 UINT32 Value;
71
72 do {
73 Value = *Sem;
74 } while (Value + 1 != 0 &&
75 InterlockedCompareExchange32 (
76 (UINT32*)Sem,
77 Value,
78 Value + 1
79 ) != Value);
80 return Value + 1;
81 }
82
83 /**
84 Performs an atomic compare exchange operation to lock semaphore.
85 The compare exchange operation must be performed using
86 MP safe mechanisms.
87
88 @param Sem IN: 32-bit unsigned integer
89 OUT: -1
90 @return Original integer
91
92 **/
93 UINT32
94 LockdownSemaphore (
95 IN OUT volatile UINT32 *Sem
96 )
97 {
98 UINT32 Value;
99
100 do {
101 Value = *Sem;
102 } while (InterlockedCompareExchange32 (
103 (UINT32*)Sem,
104 Value, (UINT32)-1
105 ) != Value);
106 return Value;
107 }
108
109 /**
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.
111
112 @param NumberOfAPs AP number
113
114 **/
115 VOID
116 WaitForAllAPs (
117 IN UINTN NumberOfAPs
118 )
119 {
120 UINTN BspIndex;
121
122 BspIndex = mSmmMpSyncData->BspIndex;
123 while (NumberOfAPs-- > 0) {
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
125 }
126 }
127
128 /**
129 Performs an atomic compare exchange operation to release semaphore
130 for each AP.
131
132 **/
133 VOID
134 ReleaseAllAPs (
135 VOID
136 )
137 {
138 UINTN Index;
139
140 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
141 if (IsPresentAp (Index)) {
142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
143 }
144 }
145 }
146
147 /**
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
149
150 @param Exceptions CPU Arrival exception flags.
151
152 @retval TRUE if all CPUs the have checked in.
153 @retval FALSE if at least one Normal AP hasn't checked in.
154
155 **/
156 BOOLEAN
157 AllCpusInSmmWithExceptions (
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
159 )
160 {
161 UINTN Index;
162 SMM_CPU_DATA_BLOCK *CpuData;
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
164
165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
166
167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
168 return TRUE;
169 }
170
171 CpuData = mSmmMpSyncData->CpuData;
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
173 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
176 continue;
177 }
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
179 continue;
180 }
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
182 continue;
183 }
184 return FALSE;
185 }
186 }
187
188
189 return TRUE;
190 }
191
192 /**
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
194
195 @retval TRUE Os enable lmce.
196 @retval FALSE Os not enable lmce.
197
198 **/
199 BOOLEAN
200 IsLmceOsEnabled (
201 VOID
202 )
203 {
204 MSR_IA32_MCG_CAP_REGISTER McgCap;
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
207
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
209 if (McgCap.Bits.MCG_LMCE_P == 0) {
210 return FALSE;
211 }
212
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
214 if (FeatureCtrl.Bits.LmceOn == 0) {
215 return FALSE;
216 }
217
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
220 }
221
222 /**
223 Return if Local machine check exception signaled.
224
225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
226 delivered to only the logical processor.
227
228 @retval TRUE LMCE was signaled.
229 @retval FALSE LMCE was not signaled.
230
231 **/
232 BOOLEAN
233 IsLmceSignaled (
234 VOID
235 )
236 {
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
238
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
241 }
242
243 /**
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
245 entering SMM, except SMI disabled APs.
246
247 **/
248 VOID
249 SmmWaitForApArrival (
250 VOID
251 )
252 {
253 UINT64 Timer;
254 UINTN Index;
255 BOOLEAN LmceEn;
256 BOOLEAN LmceSignal;
257
258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
259
260 LmceEn = FALSE;
261 LmceSignal = FALSE;
262 if (mMachineCheckSupported) {
263 LmceEn = IsLmceOsEnabled ();
264 LmceSignal = IsLmceSignaled();
265 }
266
267 //
268 // Platform implementor should choose a timeout value appropriately:
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
275 // - The timeout value must be longer than longest possible IO operation in the system
276 //
277
278 //
279 // Sync with APs 1st timeout
280 //
281 for (Timer = StartSyncTimer ();
282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
284 ) {
285 CpuPause ();
286 }
287
288 //
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
290 // because:
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
294 // work while SMI handling is on-going.
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
298 // mode work while SMI handling is on-going.
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
300 // - In traditional flow, SMI disabling is discouraged.
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
302 // In both cases, adding SMI-disabling checking code increases overhead.
303 //
304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
305 //
306 // Send SMI IPIs to bring outside processors in
307 //
308 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
311 }
312 }
313
314 //
315 // Sync with APs 2nd timeout.
316 //
317 for (Timer = StartSyncTimer ();
318 !IsSyncTimerTimeout (Timer) &&
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
320 ) {
321 CpuPause ();
322 }
323 }
324
325 return;
326 }
327
328
329 /**
330 Replace OS MTRR's with SMI MTRR's.
331
332 @param CpuIndex Processor Index
333
334 **/
335 VOID
336 ReplaceOSMtrrs (
337 IN UINTN CpuIndex
338 )
339 {
340 SmmCpuFeaturesDisableSmrr ();
341
342 //
343 // Replace all MTRRs registers
344 //
345 MtrrSetAllMtrrs (&gSmiMtrrs);
346 }
347
348 /**
349 Wheck whether task has been finished by all APs.
350
351 @param BlockMode Whether did it in block mode or non-block mode.
352
353 @retval TRUE Task has been finished by all APs.
354 @retval FALSE Task not has been finished by all APs.
355
356 **/
357 BOOLEAN
358 WaitForAllAPsNotBusy (
359 IN BOOLEAN BlockMode
360 )
361 {
362 UINTN Index;
363
364 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
365 //
366 // Ignore BSP and APs which not call in SMM.
367 //
368 if (!IsPresentAp(Index)) {
369 continue;
370 }
371
372 if (BlockMode) {
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
375 } else {
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
378 } else {
379 return FALSE;
380 }
381 }
382 }
383
384 return TRUE;
385 }
386
387 /**
388 Check whether it is an present AP.
389
390 @param CpuIndex The AP index which calls this function.
391
392 @retval TRUE It's a present AP.
393 @retval TRUE This is not an AP or it is not present.
394
395 **/
396 BOOLEAN
397 IsPresentAp (
398 IN UINTN CpuIndex
399 )
400 {
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
403 }
404
405 /**
406 Clean up the status flags used during executing the procedure.
407
408 @param CpuIndex The AP index which calls this function.
409
410 **/
411 VOID
412 ReleaseToken (
413 IN UINTN CpuIndex
414 )
415 {
416 PROCEDURE_TOKEN *Token;
417
418 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
419
420 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
421 ReleaseSpinLock (Token->SpinLock);
422 }
423
424 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
425 }
426
427 /**
428 Free the tokens in the maintained list.
429
430 **/
431 VOID
432 ResetTokens (
433 VOID
434 )
435 {
436 LIST_ENTRY *Link;
437 PROCEDURE_TOKEN *ProcToken;
438
439 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
440 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
441 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
442
443 ProcToken->RunningApCount = 0;
444
445 //
446 // Check the spinlock status and release it if not released yet.
447 //
448 if (!AcquireSpinLockOrFail(ProcToken->SpinLock)) {
449 DEBUG((DEBUG_ERROR, "Risk::SpinLock still not released!"));
450 }
451 ReleaseSpinLock (ProcToken->SpinLock);
452
453 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
454 }
455
456 //
457 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
458 //
459 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
460 }
461
462 /**
463 SMI handler for BSP.
464
465 @param CpuIndex BSP processor Index
466 @param SyncMode SMM MP sync mode
467
468 **/
469 VOID
470 BSPHandler (
471 IN UINTN CpuIndex,
472 IN SMM_CPU_SYNC_MODE SyncMode
473 )
474 {
475 UINTN Index;
476 MTRR_SETTINGS Mtrrs;
477 UINTN ApCount;
478 BOOLEAN ClearTopLevelSmiResult;
479 UINTN PresentCount;
480
481 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
482 ApCount = 0;
483
484 //
485 // Flag BSP's presence
486 //
487 *mSmmMpSyncData->InsideSmm = TRUE;
488
489 //
490 // Initialize Debug Agent to start source level debug in BSP handler
491 //
492 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
493
494 //
495 // Mark this processor's presence
496 //
497 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
498
499 //
500 // Clear platform top level SMI status bit before calling SMI handlers. If
501 // we cleared it after SMI handlers are run, we would miss the SMI that
502 // occurs after SMI handlers are done and before SMI status bit is cleared.
503 //
504 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
505 ASSERT (ClearTopLevelSmiResult == TRUE);
506
507 //
508 // Set running processor index
509 //
510 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
511
512 //
513 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
514 //
515 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
516
517 //
518 // Wait for APs to arrive
519 //
520 SmmWaitForApArrival();
521
522 //
523 // Lock the counter down and retrieve the number of APs
524 //
525 *mSmmMpSyncData->AllCpusInSync = TRUE;
526 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
527
528 //
529 // Wait for all APs to get ready for programming MTRRs
530 //
531 WaitForAllAPs (ApCount);
532
533 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
534 //
535 // Signal all APs it's time for backup MTRRs
536 //
537 ReleaseAllAPs ();
538
539 //
540 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
541 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
542 // to a large enough value to avoid this situation.
543 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
544 // We do the backup first and then set MTRR to avoid race condition for threads
545 // in the same core.
546 //
547 MtrrGetAllMtrrs(&Mtrrs);
548
549 //
550 // Wait for all APs to complete their MTRR saving
551 //
552 WaitForAllAPs (ApCount);
553
554 //
555 // Let all processors program SMM MTRRs together
556 //
557 ReleaseAllAPs ();
558
559 //
560 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
561 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
562 // to a large enough value to avoid this situation.
563 //
564 ReplaceOSMtrrs (CpuIndex);
565
566 //
567 // Wait for all APs to complete their MTRR programming
568 //
569 WaitForAllAPs (ApCount);
570 }
571 }
572
573 //
574 // The BUSY lock is initialized to Acquired state
575 //
576 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
577
578 //
579 // Perform the pre tasks
580 //
581 PerformPreTasks ();
582
583 //
584 // Invoke SMM Foundation EntryPoint with the processor information context.
585 //
586 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
587
588 //
589 // Make sure all APs have completed their pending none-block tasks
590 //
591 WaitForAllAPsNotBusy (TRUE);
592
593 //
594 // Perform the remaining tasks
595 //
596 PerformRemainingTasks ();
597
598 //
599 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
600 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
601 // will run through freely.
602 //
603 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
604
605 //
606 // Lock the counter down and retrieve the number of APs
607 //
608 *mSmmMpSyncData->AllCpusInSync = TRUE;
609 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
610 //
611 // Make sure all APs have their Present flag set
612 //
613 while (TRUE) {
614 PresentCount = 0;
615 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
616 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
617 PresentCount ++;
618 }
619 }
620 if (PresentCount > ApCount) {
621 break;
622 }
623 }
624 }
625
626 //
627 // Notify all APs to exit
628 //
629 *mSmmMpSyncData->InsideSmm = FALSE;
630 ReleaseAllAPs ();
631
632 //
633 // Wait for all APs to complete their pending tasks
634 //
635 WaitForAllAPs (ApCount);
636
637 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
638 //
639 // Signal APs to restore MTRRs
640 //
641 ReleaseAllAPs ();
642
643 //
644 // Restore OS MTRRs
645 //
646 SmmCpuFeaturesReenableSmrr ();
647 MtrrSetAllMtrrs(&Mtrrs);
648
649 //
650 // Wait for all APs to complete MTRR programming
651 //
652 WaitForAllAPs (ApCount);
653 }
654
655 //
656 // Stop source level debug in BSP handler, the code below will not be
657 // debugged.
658 //
659 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
660
661 //
662 // Signal APs to Reset states/semaphore for this processor
663 //
664 ReleaseAllAPs ();
665
666 //
667 // Perform pending operations for hot-plug
668 //
669 SmmCpuUpdate ();
670
671 //
672 // Clear the Present flag of BSP
673 //
674 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
675
676 //
677 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
678 // WaitForAllAps does not depend on the Present flag.
679 //
680 WaitForAllAPs (ApCount);
681
682 //
683 // Reset the tokens buffer.
684 //
685 ResetTokens ();
686
687 //
688 // Reset BspIndex to -1, meaning BSP has not been elected.
689 //
690 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
691 mSmmMpSyncData->BspIndex = (UINT32)-1;
692 }
693
694 //
695 // Allow APs to check in from this point on
696 //
697 *mSmmMpSyncData->Counter = 0;
698 *mSmmMpSyncData->AllCpusInSync = FALSE;
699 }
700
701 /**
702 SMI handler for AP.
703
704 @param CpuIndex AP processor Index.
705 @param ValidSmi Indicates that current SMI is a valid SMI or not.
706 @param SyncMode SMM MP sync mode.
707
708 **/
709 VOID
710 APHandler (
711 IN UINTN CpuIndex,
712 IN BOOLEAN ValidSmi,
713 IN SMM_CPU_SYNC_MODE SyncMode
714 )
715 {
716 UINT64 Timer;
717 UINTN BspIndex;
718 MTRR_SETTINGS Mtrrs;
719 EFI_STATUS ProcedureStatus;
720
721 //
722 // Timeout BSP
723 //
724 for (Timer = StartSyncTimer ();
725 !IsSyncTimerTimeout (Timer) &&
726 !(*mSmmMpSyncData->InsideSmm);
727 ) {
728 CpuPause ();
729 }
730
731 if (!(*mSmmMpSyncData->InsideSmm)) {
732 //
733 // BSP timeout in the first round
734 //
735 if (mSmmMpSyncData->BspIndex != -1) {
736 //
737 // BSP Index is known
738 //
739 BspIndex = mSmmMpSyncData->BspIndex;
740 ASSERT (CpuIndex != BspIndex);
741
742 //
743 // Send SMI IPI to bring BSP in
744 //
745 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
746
747 //
748 // Now clock BSP for the 2nd time
749 //
750 for (Timer = StartSyncTimer ();
751 !IsSyncTimerTimeout (Timer) &&
752 !(*mSmmMpSyncData->InsideSmm);
753 ) {
754 CpuPause ();
755 }
756
757 if (!(*mSmmMpSyncData->InsideSmm)) {
758 //
759 // Give up since BSP is unable to enter SMM
760 // and signal the completion of this AP
761 WaitForSemaphore (mSmmMpSyncData->Counter);
762 return;
763 }
764 } else {
765 //
766 // Don't know BSP index. Give up without sending IPI to BSP.
767 //
768 WaitForSemaphore (mSmmMpSyncData->Counter);
769 return;
770 }
771 }
772
773 //
774 // BSP is available
775 //
776 BspIndex = mSmmMpSyncData->BspIndex;
777 ASSERT (CpuIndex != BspIndex);
778
779 //
780 // Mark this processor's presence
781 //
782 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
783
784 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
785 //
786 // Notify BSP of arrival at this point
787 //
788 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
789 }
790
791 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
792 //
793 // Wait for the signal from BSP to backup MTRRs
794 //
795 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
796
797 //
798 // Backup OS MTRRs
799 //
800 MtrrGetAllMtrrs(&Mtrrs);
801
802 //
803 // Signal BSP the completion of this AP
804 //
805 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
806
807 //
808 // Wait for BSP's signal to program MTRRs
809 //
810 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
811
812 //
813 // Replace OS MTRRs with SMI MTRRs
814 //
815 ReplaceOSMtrrs (CpuIndex);
816
817 //
818 // Signal BSP the completion of this AP
819 //
820 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
821 }
822
823 while (TRUE) {
824 //
825 // Wait for something to happen
826 //
827 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
828
829 //
830 // Check if BSP wants to exit SMM
831 //
832 if (!(*mSmmMpSyncData->InsideSmm)) {
833 break;
834 }
835
836 //
837 // BUSY should be acquired by SmmStartupThisAp()
838 //
839 ASSERT (
840 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
841 );
842
843 //
844 // Invoke the scheduled procedure
845 //
846 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
847 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
848 );
849 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
850 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
851 }
852
853 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
854 ReleaseToken (CpuIndex);
855 }
856
857 //
858 // Release BUSY
859 //
860 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
861 }
862
863 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
864 //
865 // Notify BSP the readiness of this AP to program MTRRs
866 //
867 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
868
869 //
870 // Wait for the signal from BSP to program MTRRs
871 //
872 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
873
874 //
875 // Restore OS MTRRs
876 //
877 SmmCpuFeaturesReenableSmrr ();
878 MtrrSetAllMtrrs(&Mtrrs);
879 }
880
881 //
882 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
883 //
884 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
885
886 //
887 // Wait for the signal from BSP to Reset states/semaphore for this processor
888 //
889 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
890
891 //
892 // Reset states/semaphore for this processor
893 //
894 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
895
896 //
897 // Notify BSP the readiness of this AP to exit SMM
898 //
899 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
900
901 }
902
903 /**
904 Create 4G PageTable in SMRAM.
905
906 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
907 @return PageTable Address
908
909 **/
910 UINT32
911 Gen4GPageTable (
912 IN BOOLEAN Is32BitPageTable
913 )
914 {
915 VOID *PageTable;
916 UINTN Index;
917 UINT64 *Pte;
918 UINTN PagesNeeded;
919 UINTN Low2MBoundary;
920 UINTN High2MBoundary;
921 UINTN Pages;
922 UINTN GuardPage;
923 UINT64 *Pdpte;
924 UINTN PageIndex;
925 UINTN PageAddress;
926
927 Low2MBoundary = 0;
928 High2MBoundary = 0;
929 PagesNeeded = 0;
930 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
931 //
932 // Add one more page for known good stack, then find the lower 2MB aligned address.
933 //
934 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
935 //
936 // Add two more pages for known good stack and stack guard page,
937 // then find the lower 2MB aligned address.
938 //
939 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
940 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
941 }
942 //
943 // Allocate the page table
944 //
945 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
946 ASSERT (PageTable != NULL);
947
948 PageTable = (VOID *)((UINTN)PageTable);
949 Pte = (UINT64*)PageTable;
950
951 //
952 // Zero out all page table entries first
953 //
954 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
955
956 //
957 // Set Page Directory Pointers
958 //
959 for (Index = 0; Index < 4; Index++) {
960 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
961 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
962 }
963 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
964
965 //
966 // Fill in Page Directory Entries
967 //
968 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
969 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
970 }
971
972 Pdpte = (UINT64*)PageTable;
973 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
974 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
975 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
976 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
977 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
978 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
979 //
980 // Fill in Page Table Entries
981 //
982 Pte = (UINT64*)Pages;
983 PageAddress = PageIndex;
984 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
985 if (PageAddress == GuardPage) {
986 //
987 // Mark the guard page as non-present
988 //
989 Pte[Index] = PageAddress | mAddressEncMask;
990 GuardPage += mSmmStackSize;
991 if (GuardPage > mSmmStackArrayEnd) {
992 GuardPage = 0;
993 }
994 } else {
995 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
996 }
997 PageAddress+= EFI_PAGE_SIZE;
998 }
999 Pages += EFI_PAGE_SIZE;
1000 }
1001 }
1002
1003 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1004 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1005 if ((Pte[0] & IA32_PG_PS) == 0) {
1006 // 4K-page entries are already mapped. Just hide the first one anyway.
1007 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1008 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1009 } else {
1010 // Create 4K-page entries
1011 Pages = (UINTN)AllocatePageTableMemory (1);
1012 ASSERT (Pages != 0);
1013
1014 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1015
1016 Pte = (UINT64*)Pages;
1017 PageAddress = 0;
1018 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1019 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1020 PageAddress += EFI_PAGE_SIZE;
1021 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1022 }
1023 }
1024 }
1025
1026 return (UINT32)(UINTN)PageTable;
1027 }
1028
1029 /**
1030 Checks whether the input token is the current used token.
1031
1032 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1033 BroadcastProcedure.
1034
1035 @retval TRUE The input token is the current used token.
1036 @retval FALSE The input token is not the current used token.
1037 **/
1038 BOOLEAN
1039 IsTokenInUse (
1040 IN SPIN_LOCK *Token
1041 )
1042 {
1043 LIST_ENTRY *Link;
1044 PROCEDURE_TOKEN *ProcToken;
1045
1046 if (Token == NULL) {
1047 return FALSE;
1048 }
1049
1050 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1051 //
1052 // Only search used tokens.
1053 //
1054 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1055 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1056
1057 if (ProcToken->SpinLock == Token) {
1058 return TRUE;
1059 }
1060
1061 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1062 }
1063
1064 return FALSE;
1065 }
1066
1067 /**
1068 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1069
1070 @return First token of the token buffer.
1071 **/
1072 LIST_ENTRY *
1073 AllocateTokenBuffer (
1074 VOID
1075 )
1076 {
1077 UINTN SpinLockSize;
1078 UINT32 TokenCountPerChunk;
1079 UINTN Index;
1080 SPIN_LOCK *SpinLock;
1081 UINT8 *SpinLockBuffer;
1082 PROCEDURE_TOKEN *ProcTokens;
1083
1084 SpinLockSize = GetSpinLockProperties ();
1085
1086 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1087 ASSERT (TokenCountPerChunk != 0);
1088 if (TokenCountPerChunk == 0) {
1089 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1090 CpuDeadLoop ();
1091 }
1092 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1093
1094 //
1095 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1096 //
1097 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1098 ASSERT (SpinLockBuffer != NULL);
1099
1100 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1101 ASSERT (ProcTokens != NULL);
1102
1103 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1104 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1105 InitializeSpinLock (SpinLock);
1106
1107 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1108 ProcTokens[Index].SpinLock = SpinLock;
1109 ProcTokens[Index].RunningApCount = 0;
1110
1111 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1112 }
1113
1114 return &ProcTokens[0].Link;
1115 }
1116
1117 /**
1118 Get the free token.
1119
1120 If no free token, allocate new tokens then return the free one.
1121
1122 @param RunningApsCount The Running Aps count for this token.
1123
1124 @retval return the first free PROCEDURE_TOKEN.
1125
1126 **/
1127 PROCEDURE_TOKEN *
1128 GetFreeToken (
1129 IN UINT32 RunningApsCount
1130 )
1131 {
1132 PROCEDURE_TOKEN *NewToken;
1133
1134 //
1135 // If FirstFreeToken meets the end of token list, enlarge the token list.
1136 // Set FirstFreeToken to the first free token.
1137 //
1138 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1139 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1140 }
1141 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1142 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1143
1144 NewToken->RunningApCount = RunningApsCount;
1145 AcquireSpinLock (NewToken->SpinLock);
1146
1147 return NewToken;
1148 }
1149
1150 /**
1151 Checks status of specified AP.
1152
1153 This function checks whether the specified AP has finished the task assigned
1154 by StartupThisAP(), and whether timeout expires.
1155
1156 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1157 BroadcastProcedure.
1158
1159 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1160 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1161 **/
1162 EFI_STATUS
1163 IsApReady (
1164 IN SPIN_LOCK *Token
1165 )
1166 {
1167 if (AcquireSpinLockOrFail (Token)) {
1168 ReleaseSpinLock (Token);
1169 return EFI_SUCCESS;
1170 }
1171
1172 return EFI_NOT_READY;
1173 }
1174
1175 /**
1176 Schedule a procedure to run on the specified CPU.
1177
1178 @param[in] Procedure The address of the procedure to run
1179 @param[in] CpuIndex Target CPU Index
1180 @param[in,out] ProcArguments The parameter to pass to the procedure
1181 @param[in] Token This is an optional parameter that allows the caller to execute the
1182 procedure in a blocking or non-blocking fashion. If it is NULL the
1183 call is blocking, and the call will not return until the AP has
1184 completed the procedure. If the token is not NULL, the call will
1185 return immediately. The caller can check whether the procedure has
1186 completed with CheckOnProcedure or WaitForProcedure.
1187 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1188 execution of Procedure, either for blocking or non-blocking mode.
1189 Zero means infinity. If the timeout expires before all APs return
1190 from Procedure, then Procedure on the failed APs is terminated. If
1191 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1192 If the timeout expires in non-blocking mode, the timeout determined
1193 can be through CheckOnProcedure or WaitForProcedure.
1194 Note that timeout support is optional. Whether an implementation
1195 supports this feature can be determined via the Attributes data
1196 member.
1197 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1198 by Procedure when it completes execution on the target AP, or with
1199 EFI_TIMEOUT if the Procedure fails to complete within the optional
1200 timeout. The implementation will update this variable with
1201 EFI_NOT_READY prior to starting Procedure on the target AP.
1202
1203 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1204 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1205 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1206 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1207 @retval EFI_SUCCESS The procedure has been successfully scheduled
1208
1209 **/
1210 EFI_STATUS
1211 InternalSmmStartupThisAp (
1212 IN EFI_AP_PROCEDURE2 Procedure,
1213 IN UINTN CpuIndex,
1214 IN OUT VOID *ProcArguments OPTIONAL,
1215 IN MM_COMPLETION *Token,
1216 IN UINTN TimeoutInMicroseconds,
1217 IN OUT EFI_STATUS *CpuStatus
1218 )
1219 {
1220 PROCEDURE_TOKEN *ProcToken;
1221
1222 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1223 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1224 return EFI_INVALID_PARAMETER;
1225 }
1226 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1227 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1228 return EFI_INVALID_PARAMETER;
1229 }
1230 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1231 return EFI_INVALID_PARAMETER;
1232 }
1233 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1234 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1235 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1236 }
1237 return EFI_INVALID_PARAMETER;
1238 }
1239 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1240 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1241 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1242 }
1243 return EFI_INVALID_PARAMETER;
1244 }
1245 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1246 return EFI_INVALID_PARAMETER;
1247 }
1248 if (Procedure == NULL) {
1249 return EFI_INVALID_PARAMETER;
1250 }
1251
1252 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1253
1254 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1255 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1256 if (Token != NULL) {
1257 ProcToken= GetFreeToken (1);
1258 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1259 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1260 }
1261 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1262 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1263 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1264 }
1265
1266 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1267
1268 if (Token == NULL) {
1269 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1270 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1271 }
1272
1273 return EFI_SUCCESS;
1274 }
1275
1276 /**
1277 Worker function to execute a caller provided function on all enabled APs.
1278
1279 @param[in] Procedure A pointer to the function to be run on
1280 enabled APs of the system.
1281 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1282 APs to return from Procedure, either for
1283 blocking or non-blocking mode.
1284 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1285 all APs.
1286 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1287 procedure in a blocking or non-blocking fashion. If it is NULL the
1288 call is blocking, and the call will not return until the AP has
1289 completed the procedure. If the token is not NULL, the call will
1290 return immediately. The caller can check whether the procedure has
1291 completed with CheckOnProcedure or WaitForProcedure.
1292 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1293 by Procedure when it completes execution on the target AP, or with
1294 EFI_TIMEOUT if the Procedure fails to complete within the optional
1295 timeout. The implementation will update this variable with
1296 EFI_NOT_READY prior to starting Procedure on the target AP.
1297
1298
1299 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1300 the timeout expired.
1301 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1302 to all enabled APs.
1303 @retval others Failed to Startup all APs.
1304
1305 **/
1306 EFI_STATUS
1307 InternalSmmStartupAllAPs (
1308 IN EFI_AP_PROCEDURE2 Procedure,
1309 IN UINTN TimeoutInMicroseconds,
1310 IN OUT VOID *ProcedureArguments OPTIONAL,
1311 IN OUT MM_COMPLETION *Token,
1312 IN OUT EFI_STATUS *CPUStatus
1313 )
1314 {
1315 UINTN Index;
1316 UINTN CpuCount;
1317 PROCEDURE_TOKEN *ProcToken;
1318
1319 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1320 return EFI_INVALID_PARAMETER;
1321 }
1322 if (Procedure == NULL) {
1323 return EFI_INVALID_PARAMETER;
1324 }
1325
1326 CpuCount = 0;
1327 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1328 if (IsPresentAp (Index)) {
1329 CpuCount ++;
1330
1331 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1332 return EFI_INVALID_PARAMETER;
1333 }
1334
1335 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1336 return EFI_NOT_READY;
1337 }
1338 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1339 }
1340 }
1341 if (CpuCount == 0) {
1342 return EFI_NOT_STARTED;
1343 }
1344
1345 if (Token != NULL) {
1346 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1347 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1348 } else {
1349 ProcToken = NULL;
1350 }
1351
1352 //
1353 // Make sure all BUSY should be acquired.
1354 //
1355 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1356 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1357 // block mode.
1358 //
1359 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1360 if (IsPresentAp (Index)) {
1361 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1362 }
1363 }
1364
1365 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1366 if (IsPresentAp (Index)) {
1367 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1368 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1369 if (ProcToken != NULL) {
1370 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1371 }
1372 if (CPUStatus != NULL) {
1373 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1374 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1375 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1376 }
1377 }
1378 } else {
1379 //
1380 // PI spec requirement:
1381 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1382 //
1383 if (CPUStatus != NULL) {
1384 CPUStatus[Index] = EFI_NOT_STARTED;
1385 }
1386
1387 //
1388 // Decrease the count to mark this processor(AP or BSP) as finished.
1389 //
1390 if (ProcToken != NULL) {
1391 WaitForSemaphore (&ProcToken->RunningApCount);
1392 }
1393 }
1394 }
1395
1396 ReleaseAllAPs ();
1397
1398 if (Token == NULL) {
1399 //
1400 // Make sure all APs have completed their tasks.
1401 //
1402 WaitForAllAPsNotBusy (TRUE);
1403 }
1404
1405 return EFI_SUCCESS;
1406 }
1407
1408 /**
1409 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1410 If the function is defined with a type that is not compatible with
1411 the type (of the expression) pointed to by the expression that
1412 denotes the called function, the behavior is undefined.
1413
1414 So add below wrapper function to convert between EFI_AP_PROCEDURE
1415 and EFI_AP_PROCEDURE2.
1416
1417 Wrapper for Procedures.
1418
1419 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1420
1421 **/
1422 EFI_STATUS
1423 EFIAPI
1424 ProcedureWrapper (
1425 IN VOID *Buffer
1426 )
1427 {
1428 PROCEDURE_WRAPPER *Wrapper;
1429
1430 Wrapper = Buffer;
1431 Wrapper->Procedure (Wrapper->ProcedureArgument);
1432
1433 return EFI_SUCCESS;
1434 }
1435
1436 /**
1437 Schedule a procedure to run on the specified CPU in blocking mode.
1438
1439 @param[in] Procedure The address of the procedure to run
1440 @param[in] CpuIndex Target CPU Index
1441 @param[in, out] ProcArguments The parameter to pass to the procedure
1442
1443 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1444 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1445 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1446 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1447 @retval EFI_SUCCESS The procedure has been successfully scheduled
1448
1449 **/
1450 EFI_STATUS
1451 EFIAPI
1452 SmmBlockingStartupThisAp (
1453 IN EFI_AP_PROCEDURE Procedure,
1454 IN UINTN CpuIndex,
1455 IN OUT VOID *ProcArguments OPTIONAL
1456 )
1457 {
1458 PROCEDURE_WRAPPER Wrapper;
1459
1460 Wrapper.Procedure = Procedure;
1461 Wrapper.ProcedureArgument = ProcArguments;
1462
1463 //
1464 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1465 //
1466 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1467 }
1468
1469 /**
1470 Schedule a procedure to run on the specified CPU.
1471
1472 @param Procedure The address of the procedure to run
1473 @param CpuIndex Target CPU Index
1474 @param ProcArguments The parameter to pass to the procedure
1475
1476 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1477 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1478 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1479 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1480 @retval EFI_SUCCESS The procedure has been successfully scheduled
1481
1482 **/
1483 EFI_STATUS
1484 EFIAPI
1485 SmmStartupThisAp (
1486 IN EFI_AP_PROCEDURE Procedure,
1487 IN UINTN CpuIndex,
1488 IN OUT VOID *ProcArguments OPTIONAL
1489 )
1490 {
1491 MM_COMPLETION Token;
1492
1493 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1494 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1495
1496 //
1497 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1498 //
1499 return InternalSmmStartupThisAp (
1500 ProcedureWrapper,
1501 CpuIndex,
1502 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1503 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
1504 0,
1505 NULL
1506 );
1507 }
1508
1509 /**
1510 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1511 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1512
1513 NOTE: It might not be appreciated in runtime since it might
1514 conflict with OS debugging facilities. Turn them off in RELEASE.
1515
1516 @param CpuIndex CPU Index
1517
1518 **/
1519 VOID
1520 EFIAPI
1521 CpuSmmDebugEntry (
1522 IN UINTN CpuIndex
1523 )
1524 {
1525 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1526
1527 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1528 ASSERT(CpuIndex < mMaxNumberOfCpus);
1529 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1530 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1531 AsmWriteDr6 (CpuSaveState->x86._DR6);
1532 AsmWriteDr7 (CpuSaveState->x86._DR7);
1533 } else {
1534 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1535 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1536 }
1537 }
1538 }
1539
1540 /**
1541 This function restores DR6 & DR7 to SMM save state.
1542
1543 NOTE: It might not be appreciated in runtime since it might
1544 conflict with OS debugging facilities. Turn them off in RELEASE.
1545
1546 @param CpuIndex CPU Index
1547
1548 **/
1549 VOID
1550 EFIAPI
1551 CpuSmmDebugExit (
1552 IN UINTN CpuIndex
1553 )
1554 {
1555 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1556
1557 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1558 ASSERT(CpuIndex < mMaxNumberOfCpus);
1559 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1560 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1561 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1562 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1563 } else {
1564 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1565 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1566 }
1567 }
1568 }
1569
1570 /**
1571 C function for SMI entry, each processor comes here upon SMI trigger.
1572
1573 @param CpuIndex CPU Index
1574
1575 **/
1576 VOID
1577 EFIAPI
1578 SmiRendezvous (
1579 IN UINTN CpuIndex
1580 )
1581 {
1582 EFI_STATUS Status;
1583 BOOLEAN ValidSmi;
1584 BOOLEAN IsBsp;
1585 BOOLEAN BspInProgress;
1586 UINTN Index;
1587 UINTN Cr2;
1588
1589 ASSERT(CpuIndex < mMaxNumberOfCpus);
1590
1591 //
1592 // Save Cr2 because Page Fault exception in SMM may override its value,
1593 // when using on-demand paging for above 4G memory.
1594 //
1595 Cr2 = 0;
1596 SaveCr2 (&Cr2);
1597
1598 //
1599 // Call the user register Startup function first.
1600 //
1601 if (mSmmMpSyncData->StartupProcedure != NULL) {
1602 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1603 }
1604
1605 //
1606 // Perform CPU specific entry hooks
1607 //
1608 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1609
1610 //
1611 // Determine if this is a valid SMI
1612 //
1613 ValidSmi = PlatformValidSmi();
1614
1615 //
1616 // Determine if BSP has been already in progress. Note this must be checked after
1617 // ValidSmi because BSP may clear a valid SMI source after checking in.
1618 //
1619 BspInProgress = *mSmmMpSyncData->InsideSmm;
1620
1621 if (!BspInProgress && !ValidSmi) {
1622 //
1623 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1624 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1625 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1626 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1627 // is nothing we need to do.
1628 //
1629 goto Exit;
1630 } else {
1631 //
1632 // Signal presence of this processor
1633 //
1634 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1635 //
1636 // BSP has already ended the synchronization, so QUIT!!!
1637 //
1638
1639 //
1640 // Wait for BSP's signal to finish SMI
1641 //
1642 while (*mSmmMpSyncData->AllCpusInSync) {
1643 CpuPause ();
1644 }
1645 goto Exit;
1646 } else {
1647
1648 //
1649 // The BUSY lock is initialized to Released state.
1650 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1651 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1652 // after AP's present flag is detected.
1653 //
1654 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1655 }
1656
1657 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1658 ActivateSmmProfile (CpuIndex);
1659 }
1660
1661 if (BspInProgress) {
1662 //
1663 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1664 // as BSP may have cleared the SMI status
1665 //
1666 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1667 } else {
1668 //
1669 // We have a valid SMI
1670 //
1671
1672 //
1673 // Elect BSP
1674 //
1675 IsBsp = FALSE;
1676 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1677 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1678 //
1679 // Call platform hook to do BSP election
1680 //
1681 Status = PlatformSmmBspElection (&IsBsp);
1682 if (EFI_SUCCESS == Status) {
1683 //
1684 // Platform hook determines successfully
1685 //
1686 if (IsBsp) {
1687 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1688 }
1689 } else {
1690 //
1691 // Platform hook fails to determine, use default BSP election method
1692 //
1693 InterlockedCompareExchange32 (
1694 (UINT32*)&mSmmMpSyncData->BspIndex,
1695 (UINT32)-1,
1696 (UINT32)CpuIndex
1697 );
1698 }
1699 }
1700 }
1701
1702 //
1703 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1704 //
1705 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1706
1707 //
1708 // Clear last request for SwitchBsp.
1709 //
1710 if (mSmmMpSyncData->SwitchBsp) {
1711 mSmmMpSyncData->SwitchBsp = FALSE;
1712 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1713 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1714 }
1715 }
1716
1717 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1718 SmmProfileRecordSmiNum ();
1719 }
1720
1721 //
1722 // BSP Handler is always called with a ValidSmi == TRUE
1723 //
1724 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1725 } else {
1726 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1727 }
1728 }
1729
1730 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1731
1732 //
1733 // Wait for BSP's signal to exit SMI
1734 //
1735 while (*mSmmMpSyncData->AllCpusInSync) {
1736 CpuPause ();
1737 }
1738 }
1739
1740 Exit:
1741 SmmCpuFeaturesRendezvousExit (CpuIndex);
1742
1743 //
1744 // Restore Cr2
1745 //
1746 RestoreCr2 (Cr2);
1747 }
1748
1749 /**
1750 Allocate buffer for SpinLock and Wrapper function buffer.
1751
1752 **/
1753 VOID
1754 InitializeDataForMmMp (
1755 VOID
1756 )
1757 {
1758 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1759 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1760
1761 InitializeListHead (&gSmmCpuPrivate->TokenList);
1762
1763 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1764 }
1765
1766 /**
1767 Allocate buffer for all semaphores and spin locks.
1768
1769 **/
1770 VOID
1771 InitializeSmmCpuSemaphores (
1772 VOID
1773 )
1774 {
1775 UINTN ProcessorCount;
1776 UINTN TotalSize;
1777 UINTN GlobalSemaphoresSize;
1778 UINTN CpuSemaphoresSize;
1779 UINTN SemaphoreSize;
1780 UINTN Pages;
1781 UINTN *SemaphoreBlock;
1782 UINTN SemaphoreAddr;
1783
1784 SemaphoreSize = GetSpinLockProperties ();
1785 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1786 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1787 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1788 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1789 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1790 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1791 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1792 SemaphoreBlock = AllocatePages (Pages);
1793 ASSERT (SemaphoreBlock != NULL);
1794 ZeroMem (SemaphoreBlock, TotalSize);
1795
1796 SemaphoreAddr = (UINTN)SemaphoreBlock;
1797 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1798 SemaphoreAddr += SemaphoreSize;
1799 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1800 SemaphoreAddr += SemaphoreSize;
1801 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1802 SemaphoreAddr += SemaphoreSize;
1803 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1804 SemaphoreAddr += SemaphoreSize;
1805 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1806 = (SPIN_LOCK *)SemaphoreAddr;
1807 SemaphoreAddr += SemaphoreSize;
1808
1809 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1810 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1811 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1812 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1813 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1814 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1815
1816 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1817 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1818
1819 mSemaphoreSize = SemaphoreSize;
1820 }
1821
1822 /**
1823 Initialize un-cacheable data.
1824
1825 **/
1826 VOID
1827 EFIAPI
1828 InitializeMpSyncData (
1829 VOID
1830 )
1831 {
1832 UINTN CpuIndex;
1833
1834 if (mSmmMpSyncData != NULL) {
1835 //
1836 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1837 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1838 //
1839 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1840 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1841 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1842 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1843 //
1844 // Enable BSP election by setting BspIndex to -1
1845 //
1846 mSmmMpSyncData->BspIndex = (UINT32)-1;
1847 }
1848 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1849
1850 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1851 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1852 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1853 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1854 mSmmMpSyncData->AllCpusInSync != NULL);
1855 *mSmmMpSyncData->Counter = 0;
1856 *mSmmMpSyncData->InsideSmm = FALSE;
1857 *mSmmMpSyncData->AllCpusInSync = FALSE;
1858
1859 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1860 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1861 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1862 mSmmMpSyncData->CpuData[CpuIndex].Run =
1863 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1864 mSmmMpSyncData->CpuData[CpuIndex].Present =
1865 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1866 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1867 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1868 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1869 }
1870 }
1871 }
1872
1873 /**
1874 Initialize global data for MP synchronization.
1875
1876 @param Stacks Base address of SMI stack buffer for all processors.
1877 @param StackSize Stack size for each processor in SMM.
1878 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1879
1880 **/
1881 UINT32
1882 InitializeMpServiceData (
1883 IN VOID *Stacks,
1884 IN UINTN StackSize,
1885 IN UINTN ShadowStackSize
1886 )
1887 {
1888 UINT32 Cr3;
1889 UINTN Index;
1890 UINT8 *GdtTssTables;
1891 UINTN GdtTableStepSize;
1892 CPUID_VERSION_INFO_EDX RegEdx;
1893
1894 //
1895 // Determine if this CPU supports machine check
1896 //
1897 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1898 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1899
1900 //
1901 // Allocate memory for all locks and semaphores
1902 //
1903 InitializeSmmCpuSemaphores ();
1904
1905 //
1906 // Initialize mSmmMpSyncData
1907 //
1908 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1909 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1910 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1911 ASSERT (mSmmMpSyncData != NULL);
1912 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1913 InitializeMpSyncData ();
1914
1915 //
1916 // Initialize physical address mask
1917 // NOTE: Physical memory above virtual address limit is not supported !!!
1918 //
1919 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1920 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1921 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1922
1923 //
1924 // Create page tables
1925 //
1926 Cr3 = SmmInitPageTable ();
1927
1928 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1929
1930 //
1931 // Install SMI handler for each CPU
1932 //
1933 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1934 InstallSmiHandler (
1935 Index,
1936 (UINT32)mCpuHotPlugData.SmBase[Index],
1937 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1938 StackSize,
1939 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1940 gcSmiGdtr.Limit + 1,
1941 gcSmiIdtr.Base,
1942 gcSmiIdtr.Limit + 1,
1943 Cr3
1944 );
1945 }
1946
1947 //
1948 // Record current MTRR settings
1949 //
1950 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1951 MtrrGetAllMtrrs (&gSmiMtrrs);
1952
1953 return Cr3;
1954 }
1955
1956 /**
1957
1958 Register the SMM Foundation entry point.
1959
1960 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1961 @param SmmEntryPoint SMM Foundation EntryPoint
1962
1963 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1964
1965 **/
1966 EFI_STATUS
1967 EFIAPI
1968 RegisterSmmEntry (
1969 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1970 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1971 )
1972 {
1973 //
1974 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1975 //
1976 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1977 return EFI_SUCCESS;
1978 }
1979
1980 /**
1981
1982 Register the SMM Foundation entry point.
1983
1984 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
1985 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
1986 with the related definitions of
1987 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
1988 If caller may pass a value of NULL to deregister any existing
1989 startup procedure.
1990 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
1991 run by the AP. It is an optional common mailbox between APs and
1992 the caller to share information
1993
1994 @retval EFI_SUCCESS The Procedure has been set successfully.
1995 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
1996
1997 **/
1998 EFI_STATUS
1999 RegisterStartupProcedure (
2000 IN EFI_AP_PROCEDURE Procedure,
2001 IN OUT VOID *ProcedureArguments OPTIONAL
2002 )
2003 {
2004 if (Procedure == NULL && ProcedureArguments != NULL) {
2005 return EFI_INVALID_PARAMETER;
2006 }
2007 if (mSmmMpSyncData == NULL) {
2008 return EFI_NOT_READY;
2009 }
2010
2011 mSmmMpSyncData->StartupProcedure = Procedure;
2012 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2013
2014 return EFI_SUCCESS;
2015 }