]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
6227b2428a1e3e1644ea7bb0e607d7137266aa8e
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 extern UINTN mSmmShadowStackSize;
27
28 /**
29 Performs an atomic compare exchange operation to get semaphore.
30 The compare exchange operation must be performed using
31 MP safe mechanisms.
32
33 @param Sem IN: 32-bit unsigned integer
34 OUT: original integer - 1
35 @return Original integer - 1
36
37 **/
38 UINT32
39 WaitForSemaphore (
40 IN OUT volatile UINT32 *Sem
41 )
42 {
43 UINT32 Value;
44
45 for (;;) {
46 Value = *Sem;
47 if (Value != 0 &&
48 InterlockedCompareExchange32 (
49 (UINT32*)Sem,
50 Value,
51 Value - 1
52 ) == Value) {
53 break;
54 }
55 CpuPause ();
56 }
57 return Value - 1;
58 }
59
60
61 /**
62 Performs an atomic compare exchange operation to release semaphore.
63 The compare exchange operation must be performed using
64 MP safe mechanisms.
65
66 @param Sem IN: 32-bit unsigned integer
67 OUT: original integer + 1
68 @return Original integer + 1
69
70 **/
71 UINT32
72 ReleaseSemaphore (
73 IN OUT volatile UINT32 *Sem
74 )
75 {
76 UINT32 Value;
77
78 do {
79 Value = *Sem;
80 } while (Value + 1 != 0 &&
81 InterlockedCompareExchange32 (
82 (UINT32*)Sem,
83 Value,
84 Value + 1
85 ) != Value);
86 return Value + 1;
87 }
88
89 /**
90 Performs an atomic compare exchange operation to lock semaphore.
91 The compare exchange operation must be performed using
92 MP safe mechanisms.
93
94 @param Sem IN: 32-bit unsigned integer
95 OUT: -1
96 @return Original integer
97
98 **/
99 UINT32
100 LockdownSemaphore (
101 IN OUT volatile UINT32 *Sem
102 )
103 {
104 UINT32 Value;
105
106 do {
107 Value = *Sem;
108 } while (InterlockedCompareExchange32 (
109 (UINT32*)Sem,
110 Value, (UINT32)-1
111 ) != Value);
112 return Value;
113 }
114
115 /**
116 Wait all APs to performs an atomic compare exchange operation to release semaphore.
117
118 @param NumberOfAPs AP number
119
120 **/
121 VOID
122 WaitForAllAPs (
123 IN UINTN NumberOfAPs
124 )
125 {
126 UINTN BspIndex;
127
128 BspIndex = mSmmMpSyncData->BspIndex;
129 while (NumberOfAPs-- > 0) {
130 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
131 }
132 }
133
134 /**
135 Performs an atomic compare exchange operation to release semaphore
136 for each AP.
137
138 **/
139 VOID
140 ReleaseAllAPs (
141 VOID
142 )
143 {
144 UINTN Index;
145
146 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
147 if (IsPresentAp (Index)) {
148 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
149 }
150 }
151 }
152
153 /**
154 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
155
156 @param Exceptions CPU Arrival exception flags.
157
158 @retval TRUE if all CPUs the have checked in.
159 @retval FALSE if at least one Normal AP hasn't checked in.
160
161 **/
162 BOOLEAN
163 AllCpusInSmmWithExceptions (
164 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
165 )
166 {
167 UINTN Index;
168 SMM_CPU_DATA_BLOCK *CpuData;
169 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
170
171 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
172
173 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
174 return TRUE;
175 }
176
177 CpuData = mSmmMpSyncData->CpuData;
178 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
179 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
180 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
181 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
182 continue;
183 }
184 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
185 continue;
186 }
187 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
188 continue;
189 }
190 return FALSE;
191 }
192 }
193
194
195 return TRUE;
196 }
197
198 /**
199 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
200
201 @retval TRUE Os enable lmce.
202 @retval FALSE Os not enable lmce.
203
204 **/
205 BOOLEAN
206 IsLmceOsEnabled (
207 VOID
208 )
209 {
210 MSR_IA32_MCG_CAP_REGISTER McgCap;
211 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
212 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
213
214 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
215 if (McgCap.Bits.MCG_LMCE_P == 0) {
216 return FALSE;
217 }
218
219 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
220 if (FeatureCtrl.Bits.LmceOn == 0) {
221 return FALSE;
222 }
223
224 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
225 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
226 }
227
228 /**
229 Return if Local machine check exception signaled.
230
231 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
232 delivered to only the logical processor.
233
234 @retval TRUE LMCE was signaled.
235 @retval FALSE LMCE was not signaled.
236
237 **/
238 BOOLEAN
239 IsLmceSignaled (
240 VOID
241 )
242 {
243 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
244
245 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
246 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
247 }
248
249 /**
250 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
251 entering SMM, except SMI disabled APs.
252
253 **/
254 VOID
255 SmmWaitForApArrival (
256 VOID
257 )
258 {
259 UINT64 Timer;
260 UINTN Index;
261 BOOLEAN LmceEn;
262 BOOLEAN LmceSignal;
263
264 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
265
266 LmceEn = FALSE;
267 LmceSignal = FALSE;
268 if (mMachineCheckSupported) {
269 LmceEn = IsLmceOsEnabled ();
270 LmceSignal = IsLmceSignaled();
271 }
272
273 //
274 // Platform implementor should choose a timeout value appropriately:
275 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
276 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
277 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
278 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
279 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
280 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
281 // - The timeout value must be longer than longest possible IO operation in the system
282 //
283
284 //
285 // Sync with APs 1st timeout
286 //
287 for (Timer = StartSyncTimer ();
288 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
289 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
290 ) {
291 CpuPause ();
292 }
293
294 //
295 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
296 // because:
297 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
298 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
299 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
300 // work while SMI handling is on-going.
301 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
302 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
303 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
304 // mode work while SMI handling is on-going.
305 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
306 // - In traditional flow, SMI disabling is discouraged.
307 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
308 // In both cases, adding SMI-disabling checking code increases overhead.
309 //
310 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
311 //
312 // Send SMI IPIs to bring outside processors in
313 //
314 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
315 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
316 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
317 }
318 }
319
320 //
321 // Sync with APs 2nd timeout.
322 //
323 for (Timer = StartSyncTimer ();
324 !IsSyncTimerTimeout (Timer) &&
325 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
326 ) {
327 CpuPause ();
328 }
329 }
330
331 return;
332 }
333
334
335 /**
336 Replace OS MTRR's with SMI MTRR's.
337
338 @param CpuIndex Processor Index
339
340 **/
341 VOID
342 ReplaceOSMtrrs (
343 IN UINTN CpuIndex
344 )
345 {
346 SmmCpuFeaturesDisableSmrr ();
347
348 //
349 // Replace all MTRRs registers
350 //
351 MtrrSetAllMtrrs (&gSmiMtrrs);
352 }
353
354 /**
355 Wheck whether task has been finished by all APs.
356
357 @param BlockMode Whether did it in block mode or non-block mode.
358
359 @retval TRUE Task has been finished by all APs.
360 @retval FALSE Task not has been finished by all APs.
361
362 **/
363 BOOLEAN
364 WaitForAllAPsNotBusy (
365 IN BOOLEAN BlockMode
366 )
367 {
368 UINTN Index;
369
370 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
371 //
372 // Ignore BSP and APs which not call in SMM.
373 //
374 if (!IsPresentAp(Index)) {
375 continue;
376 }
377
378 if (BlockMode) {
379 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
380 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
381 } else {
382 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
383 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
384 } else {
385 return FALSE;
386 }
387 }
388 }
389
390 return TRUE;
391 }
392
393 /**
394 Check whether it is an present AP.
395
396 @param CpuIndex The AP index which calls this function.
397
398 @retval TRUE It's a present AP.
399 @retval TRUE This is not an AP or it is not present.
400
401 **/
402 BOOLEAN
403 IsPresentAp (
404 IN UINTN CpuIndex
405 )
406 {
407 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
408 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
409 }
410
411 /**
412 Clean up the status flags used during executing the procedure.
413
414 @param CpuIndex The AP index which calls this function.
415
416 **/
417 VOID
418 ReleaseToken (
419 IN UINTN CpuIndex
420 )
421 {
422 PROCEDURE_TOKEN *Token;
423
424 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
425
426 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
427 ReleaseSpinLock (Token->SpinLock);
428 }
429
430 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
431 }
432
433 /**
434 Free the tokens in the maintained list.
435
436 **/
437 VOID
438 ResetTokens (
439 VOID
440 )
441 {
442 //
443 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
444 //
445 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
446 }
447
448 /**
449 SMI handler for BSP.
450
451 @param CpuIndex BSP processor Index
452 @param SyncMode SMM MP sync mode
453
454 **/
455 VOID
456 BSPHandler (
457 IN UINTN CpuIndex,
458 IN SMM_CPU_SYNC_MODE SyncMode
459 )
460 {
461 UINTN Index;
462 MTRR_SETTINGS Mtrrs;
463 UINTN ApCount;
464 BOOLEAN ClearTopLevelSmiResult;
465 UINTN PresentCount;
466
467 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
468 ApCount = 0;
469
470 //
471 // Flag BSP's presence
472 //
473 *mSmmMpSyncData->InsideSmm = TRUE;
474
475 //
476 // Initialize Debug Agent to start source level debug in BSP handler
477 //
478 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
479
480 //
481 // Mark this processor's presence
482 //
483 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
484
485 //
486 // Clear platform top level SMI status bit before calling SMI handlers. If
487 // we cleared it after SMI handlers are run, we would miss the SMI that
488 // occurs after SMI handlers are done and before SMI status bit is cleared.
489 //
490 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
491 ASSERT (ClearTopLevelSmiResult == TRUE);
492
493 //
494 // Set running processor index
495 //
496 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
497
498 //
499 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
500 //
501 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
502
503 //
504 // Wait for APs to arrive
505 //
506 SmmWaitForApArrival();
507
508 //
509 // Lock the counter down and retrieve the number of APs
510 //
511 *mSmmMpSyncData->AllCpusInSync = TRUE;
512 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
513
514 //
515 // Wait for all APs to get ready for programming MTRRs
516 //
517 WaitForAllAPs (ApCount);
518
519 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
520 //
521 // Signal all APs it's time for backup MTRRs
522 //
523 ReleaseAllAPs ();
524
525 //
526 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
527 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
528 // to a large enough value to avoid this situation.
529 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
530 // We do the backup first and then set MTRR to avoid race condition for threads
531 // in the same core.
532 //
533 MtrrGetAllMtrrs(&Mtrrs);
534
535 //
536 // Wait for all APs to complete their MTRR saving
537 //
538 WaitForAllAPs (ApCount);
539
540 //
541 // Let all processors program SMM MTRRs together
542 //
543 ReleaseAllAPs ();
544
545 //
546 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
547 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
548 // to a large enough value to avoid this situation.
549 //
550 ReplaceOSMtrrs (CpuIndex);
551
552 //
553 // Wait for all APs to complete their MTRR programming
554 //
555 WaitForAllAPs (ApCount);
556 }
557 }
558
559 //
560 // The BUSY lock is initialized to Acquired state
561 //
562 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
563
564 //
565 // Perform the pre tasks
566 //
567 PerformPreTasks ();
568
569 //
570 // Invoke SMM Foundation EntryPoint with the processor information context.
571 //
572 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
573
574 //
575 // Make sure all APs have completed their pending none-block tasks
576 //
577 WaitForAllAPsNotBusy (TRUE);
578
579 //
580 // Perform the remaining tasks
581 //
582 PerformRemainingTasks ();
583
584 //
585 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
586 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
587 // will run through freely.
588 //
589 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
590
591 //
592 // Lock the counter down and retrieve the number of APs
593 //
594 *mSmmMpSyncData->AllCpusInSync = TRUE;
595 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
596 //
597 // Make sure all APs have their Present flag set
598 //
599 while (TRUE) {
600 PresentCount = 0;
601 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
602 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
603 PresentCount ++;
604 }
605 }
606 if (PresentCount > ApCount) {
607 break;
608 }
609 }
610 }
611
612 //
613 // Notify all APs to exit
614 //
615 *mSmmMpSyncData->InsideSmm = FALSE;
616 ReleaseAllAPs ();
617
618 //
619 // Wait for all APs to complete their pending tasks
620 //
621 WaitForAllAPs (ApCount);
622
623 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
624 //
625 // Signal APs to restore MTRRs
626 //
627 ReleaseAllAPs ();
628
629 //
630 // Restore OS MTRRs
631 //
632 SmmCpuFeaturesReenableSmrr ();
633 MtrrSetAllMtrrs(&Mtrrs);
634
635 //
636 // Wait for all APs to complete MTRR programming
637 //
638 WaitForAllAPs (ApCount);
639 }
640
641 //
642 // Stop source level debug in BSP handler, the code below will not be
643 // debugged.
644 //
645 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
646
647 //
648 // Signal APs to Reset states/semaphore for this processor
649 //
650 ReleaseAllAPs ();
651
652 //
653 // Perform pending operations for hot-plug
654 //
655 SmmCpuUpdate ();
656
657 //
658 // Clear the Present flag of BSP
659 //
660 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
661
662 //
663 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
664 // WaitForAllAps does not depend on the Present flag.
665 //
666 WaitForAllAPs (ApCount);
667
668 //
669 // Reset the tokens buffer.
670 //
671 ResetTokens ();
672
673 //
674 // Reset BspIndex to -1, meaning BSP has not been elected.
675 //
676 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
677 mSmmMpSyncData->BspIndex = (UINT32)-1;
678 }
679
680 //
681 // Allow APs to check in from this point on
682 //
683 *mSmmMpSyncData->Counter = 0;
684 *mSmmMpSyncData->AllCpusInSync = FALSE;
685 }
686
687 /**
688 SMI handler for AP.
689
690 @param CpuIndex AP processor Index.
691 @param ValidSmi Indicates that current SMI is a valid SMI or not.
692 @param SyncMode SMM MP sync mode.
693
694 **/
695 VOID
696 APHandler (
697 IN UINTN CpuIndex,
698 IN BOOLEAN ValidSmi,
699 IN SMM_CPU_SYNC_MODE SyncMode
700 )
701 {
702 UINT64 Timer;
703 UINTN BspIndex;
704 MTRR_SETTINGS Mtrrs;
705 EFI_STATUS ProcedureStatus;
706
707 //
708 // Timeout BSP
709 //
710 for (Timer = StartSyncTimer ();
711 !IsSyncTimerTimeout (Timer) &&
712 !(*mSmmMpSyncData->InsideSmm);
713 ) {
714 CpuPause ();
715 }
716
717 if (!(*mSmmMpSyncData->InsideSmm)) {
718 //
719 // BSP timeout in the first round
720 //
721 if (mSmmMpSyncData->BspIndex != -1) {
722 //
723 // BSP Index is known
724 //
725 BspIndex = mSmmMpSyncData->BspIndex;
726 ASSERT (CpuIndex != BspIndex);
727
728 //
729 // Send SMI IPI to bring BSP in
730 //
731 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
732
733 //
734 // Now clock BSP for the 2nd time
735 //
736 for (Timer = StartSyncTimer ();
737 !IsSyncTimerTimeout (Timer) &&
738 !(*mSmmMpSyncData->InsideSmm);
739 ) {
740 CpuPause ();
741 }
742
743 if (!(*mSmmMpSyncData->InsideSmm)) {
744 //
745 // Give up since BSP is unable to enter SMM
746 // and signal the completion of this AP
747 WaitForSemaphore (mSmmMpSyncData->Counter);
748 return;
749 }
750 } else {
751 //
752 // Don't know BSP index. Give up without sending IPI to BSP.
753 //
754 WaitForSemaphore (mSmmMpSyncData->Counter);
755 return;
756 }
757 }
758
759 //
760 // BSP is available
761 //
762 BspIndex = mSmmMpSyncData->BspIndex;
763 ASSERT (CpuIndex != BspIndex);
764
765 //
766 // Mark this processor's presence
767 //
768 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
769
770 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
771 //
772 // Notify BSP of arrival at this point
773 //
774 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
775 }
776
777 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
778 //
779 // Wait for the signal from BSP to backup MTRRs
780 //
781 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
782
783 //
784 // Backup OS MTRRs
785 //
786 MtrrGetAllMtrrs(&Mtrrs);
787
788 //
789 // Signal BSP the completion of this AP
790 //
791 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
792
793 //
794 // Wait for BSP's signal to program MTRRs
795 //
796 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
797
798 //
799 // Replace OS MTRRs with SMI MTRRs
800 //
801 ReplaceOSMtrrs (CpuIndex);
802
803 //
804 // Signal BSP the completion of this AP
805 //
806 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
807 }
808
809 while (TRUE) {
810 //
811 // Wait for something to happen
812 //
813 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
814
815 //
816 // Check if BSP wants to exit SMM
817 //
818 if (!(*mSmmMpSyncData->InsideSmm)) {
819 break;
820 }
821
822 //
823 // BUSY should be acquired by SmmStartupThisAp()
824 //
825 ASSERT (
826 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
827 );
828
829 //
830 // Invoke the scheduled procedure
831 //
832 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
833 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
834 );
835 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
836 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
837 }
838
839 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
840 ReleaseToken (CpuIndex);
841 }
842
843 //
844 // Release BUSY
845 //
846 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
847 }
848
849 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
850 //
851 // Notify BSP the readiness of this AP to program MTRRs
852 //
853 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
854
855 //
856 // Wait for the signal from BSP to program MTRRs
857 //
858 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
859
860 //
861 // Restore OS MTRRs
862 //
863 SmmCpuFeaturesReenableSmrr ();
864 MtrrSetAllMtrrs(&Mtrrs);
865 }
866
867 //
868 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
869 //
870 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
871
872 //
873 // Wait for the signal from BSP to Reset states/semaphore for this processor
874 //
875 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
876
877 //
878 // Reset states/semaphore for this processor
879 //
880 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
881
882 //
883 // Notify BSP the readiness of this AP to exit SMM
884 //
885 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
886
887 }
888
889 /**
890 Create 4G PageTable in SMRAM.
891
892 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
893 @return PageTable Address
894
895 **/
896 UINT32
897 Gen4GPageTable (
898 IN BOOLEAN Is32BitPageTable
899 )
900 {
901 VOID *PageTable;
902 UINTN Index;
903 UINT64 *Pte;
904 UINTN PagesNeeded;
905 UINTN Low2MBoundary;
906 UINTN High2MBoundary;
907 UINTN Pages;
908 UINTN GuardPage;
909 UINT64 *Pdpte;
910 UINTN PageIndex;
911 UINTN PageAddress;
912
913 Low2MBoundary = 0;
914 High2MBoundary = 0;
915 PagesNeeded = 0;
916 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
917 //
918 // Add one more page for known good stack, then find the lower 2MB aligned address.
919 //
920 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
921 //
922 // Add two more pages for known good stack and stack guard page,
923 // then find the lower 2MB aligned address.
924 //
925 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
926 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
927 }
928 //
929 // Allocate the page table
930 //
931 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
932 ASSERT (PageTable != NULL);
933
934 PageTable = (VOID *)((UINTN)PageTable);
935 Pte = (UINT64*)PageTable;
936
937 //
938 // Zero out all page table entries first
939 //
940 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
941
942 //
943 // Set Page Directory Pointers
944 //
945 for (Index = 0; Index < 4; Index++) {
946 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
947 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
948 }
949 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
950
951 //
952 // Fill in Page Directory Entries
953 //
954 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
955 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
956 }
957
958 Pdpte = (UINT64*)PageTable;
959 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
960 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
961 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
962 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
963 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
964 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
965 //
966 // Fill in Page Table Entries
967 //
968 Pte = (UINT64*)Pages;
969 PageAddress = PageIndex;
970 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
971 if (PageAddress == GuardPage) {
972 //
973 // Mark the guard page as non-present
974 //
975 Pte[Index] = PageAddress | mAddressEncMask;
976 GuardPage += (mSmmStackSize + mSmmShadowStackSize);
977 if (GuardPage > mSmmStackArrayEnd) {
978 GuardPage = 0;
979 }
980 } else {
981 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
982 }
983 PageAddress+= EFI_PAGE_SIZE;
984 }
985 Pages += EFI_PAGE_SIZE;
986 }
987 }
988
989 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
990 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
991 if ((Pte[0] & IA32_PG_PS) == 0) {
992 // 4K-page entries are already mapped. Just hide the first one anyway.
993 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
994 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
995 } else {
996 // Create 4K-page entries
997 Pages = (UINTN)AllocatePageTableMemory (1);
998 ASSERT (Pages != 0);
999
1000 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1001
1002 Pte = (UINT64*)Pages;
1003 PageAddress = 0;
1004 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1005 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1006 PageAddress += EFI_PAGE_SIZE;
1007 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1008 }
1009 }
1010 }
1011
1012 return (UINT32)(UINTN)PageTable;
1013 }
1014
1015 /**
1016 Checks whether the input token is the current used token.
1017
1018 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1019 BroadcastProcedure.
1020
1021 @retval TRUE The input token is the current used token.
1022 @retval FALSE The input token is not the current used token.
1023 **/
1024 BOOLEAN
1025 IsTokenInUse (
1026 IN SPIN_LOCK *Token
1027 )
1028 {
1029 LIST_ENTRY *Link;
1030 PROCEDURE_TOKEN *ProcToken;
1031
1032 if (Token == NULL) {
1033 return FALSE;
1034 }
1035
1036 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1037 //
1038 // Only search used tokens.
1039 //
1040 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1041 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1042
1043 if (ProcToken->SpinLock == Token) {
1044 return TRUE;
1045 }
1046
1047 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1048 }
1049
1050 return FALSE;
1051 }
1052
1053 /**
1054 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1055
1056 @return First token of the token buffer.
1057 **/
1058 LIST_ENTRY *
1059 AllocateTokenBuffer (
1060 VOID
1061 )
1062 {
1063 UINTN SpinLockSize;
1064 UINT32 TokenCountPerChunk;
1065 UINTN Index;
1066 SPIN_LOCK *SpinLock;
1067 UINT8 *SpinLockBuffer;
1068 PROCEDURE_TOKEN *ProcTokens;
1069
1070 SpinLockSize = GetSpinLockProperties ();
1071
1072 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1073 ASSERT (TokenCountPerChunk != 0);
1074 if (TokenCountPerChunk == 0) {
1075 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1076 CpuDeadLoop ();
1077 }
1078 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1079
1080 //
1081 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1082 //
1083 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1084 ASSERT (SpinLockBuffer != NULL);
1085
1086 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1087 ASSERT (ProcTokens != NULL);
1088
1089 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1090 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1091 InitializeSpinLock (SpinLock);
1092
1093 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1094 ProcTokens[Index].SpinLock = SpinLock;
1095 ProcTokens[Index].RunningApCount = 0;
1096
1097 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1098 }
1099
1100 return &ProcTokens[0].Link;
1101 }
1102
1103 /**
1104 Get the free token.
1105
1106 If no free token, allocate new tokens then return the free one.
1107
1108 @param RunningApsCount The Running Aps count for this token.
1109
1110 @retval return the first free PROCEDURE_TOKEN.
1111
1112 **/
1113 PROCEDURE_TOKEN *
1114 GetFreeToken (
1115 IN UINT32 RunningApsCount
1116 )
1117 {
1118 PROCEDURE_TOKEN *NewToken;
1119
1120 //
1121 // If FirstFreeToken meets the end of token list, enlarge the token list.
1122 // Set FirstFreeToken to the first free token.
1123 //
1124 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1125 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1126 }
1127 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1128 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1129
1130 NewToken->RunningApCount = RunningApsCount;
1131 AcquireSpinLock (NewToken->SpinLock);
1132
1133 return NewToken;
1134 }
1135
1136 /**
1137 Checks status of specified AP.
1138
1139 This function checks whether the specified AP has finished the task assigned
1140 by StartupThisAP(), and whether timeout expires.
1141
1142 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1143 BroadcastProcedure.
1144
1145 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1146 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1147 **/
1148 EFI_STATUS
1149 IsApReady (
1150 IN SPIN_LOCK *Token
1151 )
1152 {
1153 if (AcquireSpinLockOrFail (Token)) {
1154 ReleaseSpinLock (Token);
1155 return EFI_SUCCESS;
1156 }
1157
1158 return EFI_NOT_READY;
1159 }
1160
1161 /**
1162 Schedule a procedure to run on the specified CPU.
1163
1164 @param[in] Procedure The address of the procedure to run
1165 @param[in] CpuIndex Target CPU Index
1166 @param[in,out] ProcArguments The parameter to pass to the procedure
1167 @param[in] Token This is an optional parameter that allows the caller to execute the
1168 procedure in a blocking or non-blocking fashion. If it is NULL the
1169 call is blocking, and the call will not return until the AP has
1170 completed the procedure. If the token is not NULL, the call will
1171 return immediately. The caller can check whether the procedure has
1172 completed with CheckOnProcedure or WaitForProcedure.
1173 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1174 execution of Procedure, either for blocking or non-blocking mode.
1175 Zero means infinity. If the timeout expires before all APs return
1176 from Procedure, then Procedure on the failed APs is terminated. If
1177 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1178 If the timeout expires in non-blocking mode, the timeout determined
1179 can be through CheckOnProcedure or WaitForProcedure.
1180 Note that timeout support is optional. Whether an implementation
1181 supports this feature can be determined via the Attributes data
1182 member.
1183 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1184 by Procedure when it completes execution on the target AP, or with
1185 EFI_TIMEOUT if the Procedure fails to complete within the optional
1186 timeout. The implementation will update this variable with
1187 EFI_NOT_READY prior to starting Procedure on the target AP.
1188
1189 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1190 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1191 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1192 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1193 @retval EFI_SUCCESS The procedure has been successfully scheduled
1194
1195 **/
1196 EFI_STATUS
1197 InternalSmmStartupThisAp (
1198 IN EFI_AP_PROCEDURE2 Procedure,
1199 IN UINTN CpuIndex,
1200 IN OUT VOID *ProcArguments OPTIONAL,
1201 IN MM_COMPLETION *Token,
1202 IN UINTN TimeoutInMicroseconds,
1203 IN OUT EFI_STATUS *CpuStatus
1204 )
1205 {
1206 PROCEDURE_TOKEN *ProcToken;
1207
1208 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1209 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1210 return EFI_INVALID_PARAMETER;
1211 }
1212 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1213 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1214 return EFI_INVALID_PARAMETER;
1215 }
1216 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1217 return EFI_INVALID_PARAMETER;
1218 }
1219 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1220 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1221 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1222 }
1223 return EFI_INVALID_PARAMETER;
1224 }
1225 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1226 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1227 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1228 }
1229 return EFI_INVALID_PARAMETER;
1230 }
1231 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1232 return EFI_INVALID_PARAMETER;
1233 }
1234 if (Procedure == NULL) {
1235 return EFI_INVALID_PARAMETER;
1236 }
1237
1238 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1239
1240 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1241 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1242 if (Token != NULL) {
1243 ProcToken= GetFreeToken (1);
1244 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1245 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1246 }
1247 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1248 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1249 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1250 }
1251
1252 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1253
1254 if (Token == NULL) {
1255 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1256 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1257 }
1258
1259 return EFI_SUCCESS;
1260 }
1261
1262 /**
1263 Worker function to execute a caller provided function on all enabled APs.
1264
1265 @param[in] Procedure A pointer to the function to be run on
1266 enabled APs of the system.
1267 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1268 APs to return from Procedure, either for
1269 blocking or non-blocking mode.
1270 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1271 all APs.
1272 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1273 procedure in a blocking or non-blocking fashion. If it is NULL the
1274 call is blocking, and the call will not return until the AP has
1275 completed the procedure. If the token is not NULL, the call will
1276 return immediately. The caller can check whether the procedure has
1277 completed with CheckOnProcedure or WaitForProcedure.
1278 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1279 by Procedure when it completes execution on the target AP, or with
1280 EFI_TIMEOUT if the Procedure fails to complete within the optional
1281 timeout. The implementation will update this variable with
1282 EFI_NOT_READY prior to starting Procedure on the target AP.
1283
1284
1285 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1286 the timeout expired.
1287 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1288 to all enabled APs.
1289 @retval others Failed to Startup all APs.
1290
1291 **/
1292 EFI_STATUS
1293 InternalSmmStartupAllAPs (
1294 IN EFI_AP_PROCEDURE2 Procedure,
1295 IN UINTN TimeoutInMicroseconds,
1296 IN OUT VOID *ProcedureArguments OPTIONAL,
1297 IN OUT MM_COMPLETION *Token,
1298 IN OUT EFI_STATUS *CPUStatus
1299 )
1300 {
1301 UINTN Index;
1302 UINTN CpuCount;
1303 PROCEDURE_TOKEN *ProcToken;
1304
1305 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1306 return EFI_INVALID_PARAMETER;
1307 }
1308 if (Procedure == NULL) {
1309 return EFI_INVALID_PARAMETER;
1310 }
1311
1312 CpuCount = 0;
1313 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1314 if (IsPresentAp (Index)) {
1315 CpuCount ++;
1316
1317 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1318 return EFI_INVALID_PARAMETER;
1319 }
1320
1321 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1322 return EFI_NOT_READY;
1323 }
1324 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1325 }
1326 }
1327 if (CpuCount == 0) {
1328 return EFI_NOT_STARTED;
1329 }
1330
1331 if (Token != NULL) {
1332 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1333 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1334 } else {
1335 ProcToken = NULL;
1336 }
1337
1338 //
1339 // Make sure all BUSY should be acquired.
1340 //
1341 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1342 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1343 // block mode.
1344 //
1345 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1346 if (IsPresentAp (Index)) {
1347 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1348 }
1349 }
1350
1351 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1352 if (IsPresentAp (Index)) {
1353 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1354 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1355 if (ProcToken != NULL) {
1356 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1357 }
1358 if (CPUStatus != NULL) {
1359 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1360 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1361 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1362 }
1363 }
1364 } else {
1365 //
1366 // PI spec requirement:
1367 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1368 //
1369 if (CPUStatus != NULL) {
1370 CPUStatus[Index] = EFI_NOT_STARTED;
1371 }
1372
1373 //
1374 // Decrease the count to mark this processor(AP or BSP) as finished.
1375 //
1376 if (ProcToken != NULL) {
1377 WaitForSemaphore (&ProcToken->RunningApCount);
1378 }
1379 }
1380 }
1381
1382 ReleaseAllAPs ();
1383
1384 if (Token == NULL) {
1385 //
1386 // Make sure all APs have completed their tasks.
1387 //
1388 WaitForAllAPsNotBusy (TRUE);
1389 }
1390
1391 return EFI_SUCCESS;
1392 }
1393
1394 /**
1395 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1396 If the function is defined with a type that is not compatible with
1397 the type (of the expression) pointed to by the expression that
1398 denotes the called function, the behavior is undefined.
1399
1400 So add below wrapper function to convert between EFI_AP_PROCEDURE
1401 and EFI_AP_PROCEDURE2.
1402
1403 Wrapper for Procedures.
1404
1405 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1406
1407 **/
1408 EFI_STATUS
1409 EFIAPI
1410 ProcedureWrapper (
1411 IN VOID *Buffer
1412 )
1413 {
1414 PROCEDURE_WRAPPER *Wrapper;
1415
1416 Wrapper = Buffer;
1417 Wrapper->Procedure (Wrapper->ProcedureArgument);
1418
1419 return EFI_SUCCESS;
1420 }
1421
1422 /**
1423 Schedule a procedure to run on the specified CPU in blocking mode.
1424
1425 @param[in] Procedure The address of the procedure to run
1426 @param[in] CpuIndex Target CPU Index
1427 @param[in, out] ProcArguments The parameter to pass to the procedure
1428
1429 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1430 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1431 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1432 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1433 @retval EFI_SUCCESS The procedure has been successfully scheduled
1434
1435 **/
1436 EFI_STATUS
1437 EFIAPI
1438 SmmBlockingStartupThisAp (
1439 IN EFI_AP_PROCEDURE Procedure,
1440 IN UINTN CpuIndex,
1441 IN OUT VOID *ProcArguments OPTIONAL
1442 )
1443 {
1444 PROCEDURE_WRAPPER Wrapper;
1445
1446 Wrapper.Procedure = Procedure;
1447 Wrapper.ProcedureArgument = ProcArguments;
1448
1449 //
1450 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1451 //
1452 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1453 }
1454
1455 /**
1456 Schedule a procedure to run on the specified CPU.
1457
1458 @param Procedure The address of the procedure to run
1459 @param CpuIndex Target CPU Index
1460 @param ProcArguments The parameter to pass to the procedure
1461
1462 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1463 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1464 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1465 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1466 @retval EFI_SUCCESS The procedure has been successfully scheduled
1467
1468 **/
1469 EFI_STATUS
1470 EFIAPI
1471 SmmStartupThisAp (
1472 IN EFI_AP_PROCEDURE Procedure,
1473 IN UINTN CpuIndex,
1474 IN OUT VOID *ProcArguments OPTIONAL
1475 )
1476 {
1477 MM_COMPLETION Token;
1478
1479 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1480 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1481
1482 //
1483 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1484 //
1485 return InternalSmmStartupThisAp (
1486 ProcedureWrapper,
1487 CpuIndex,
1488 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1489 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
1490 0,
1491 NULL
1492 );
1493 }
1494
1495 /**
1496 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1497 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1498
1499 NOTE: It might not be appreciated in runtime since it might
1500 conflict with OS debugging facilities. Turn them off in RELEASE.
1501
1502 @param CpuIndex CPU Index
1503
1504 **/
1505 VOID
1506 EFIAPI
1507 CpuSmmDebugEntry (
1508 IN UINTN CpuIndex
1509 )
1510 {
1511 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1512
1513 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1514 ASSERT(CpuIndex < mMaxNumberOfCpus);
1515 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1516 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1517 AsmWriteDr6 (CpuSaveState->x86._DR6);
1518 AsmWriteDr7 (CpuSaveState->x86._DR7);
1519 } else {
1520 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1521 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1522 }
1523 }
1524 }
1525
1526 /**
1527 This function restores DR6 & DR7 to SMM save state.
1528
1529 NOTE: It might not be appreciated in runtime since it might
1530 conflict with OS debugging facilities. Turn them off in RELEASE.
1531
1532 @param CpuIndex CPU Index
1533
1534 **/
1535 VOID
1536 EFIAPI
1537 CpuSmmDebugExit (
1538 IN UINTN CpuIndex
1539 )
1540 {
1541 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1542
1543 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1544 ASSERT(CpuIndex < mMaxNumberOfCpus);
1545 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1546 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1547 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1548 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1549 } else {
1550 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1551 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1552 }
1553 }
1554 }
1555
1556 /**
1557 C function for SMI entry, each processor comes here upon SMI trigger.
1558
1559 @param CpuIndex CPU Index
1560
1561 **/
1562 VOID
1563 EFIAPI
1564 SmiRendezvous (
1565 IN UINTN CpuIndex
1566 )
1567 {
1568 EFI_STATUS Status;
1569 BOOLEAN ValidSmi;
1570 BOOLEAN IsBsp;
1571 BOOLEAN BspInProgress;
1572 UINTN Index;
1573 UINTN Cr2;
1574
1575 ASSERT(CpuIndex < mMaxNumberOfCpus);
1576
1577 //
1578 // Save Cr2 because Page Fault exception in SMM may override its value,
1579 // when using on-demand paging for above 4G memory.
1580 //
1581 Cr2 = 0;
1582 SaveCr2 (&Cr2);
1583
1584 //
1585 // Call the user register Startup function first.
1586 //
1587 if (mSmmMpSyncData->StartupProcedure != NULL) {
1588 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1589 }
1590
1591 //
1592 // Perform CPU specific entry hooks
1593 //
1594 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1595
1596 //
1597 // Determine if this is a valid SMI
1598 //
1599 ValidSmi = PlatformValidSmi();
1600
1601 //
1602 // Determine if BSP has been already in progress. Note this must be checked after
1603 // ValidSmi because BSP may clear a valid SMI source after checking in.
1604 //
1605 BspInProgress = *mSmmMpSyncData->InsideSmm;
1606
1607 if (!BspInProgress && !ValidSmi) {
1608 //
1609 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1610 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1611 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1612 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1613 // is nothing we need to do.
1614 //
1615 goto Exit;
1616 } else {
1617 //
1618 // Signal presence of this processor
1619 //
1620 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1621 //
1622 // BSP has already ended the synchronization, so QUIT!!!
1623 //
1624
1625 //
1626 // Wait for BSP's signal to finish SMI
1627 //
1628 while (*mSmmMpSyncData->AllCpusInSync) {
1629 CpuPause ();
1630 }
1631 goto Exit;
1632 } else {
1633
1634 //
1635 // The BUSY lock is initialized to Released state.
1636 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1637 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1638 // after AP's present flag is detected.
1639 //
1640 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1641 }
1642
1643 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1644 ActivateSmmProfile (CpuIndex);
1645 }
1646
1647 if (BspInProgress) {
1648 //
1649 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1650 // as BSP may have cleared the SMI status
1651 //
1652 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1653 } else {
1654 //
1655 // We have a valid SMI
1656 //
1657
1658 //
1659 // Elect BSP
1660 //
1661 IsBsp = FALSE;
1662 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1663 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1664 //
1665 // Call platform hook to do BSP election
1666 //
1667 Status = PlatformSmmBspElection (&IsBsp);
1668 if (EFI_SUCCESS == Status) {
1669 //
1670 // Platform hook determines successfully
1671 //
1672 if (IsBsp) {
1673 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1674 }
1675 } else {
1676 //
1677 // Platform hook fails to determine, use default BSP election method
1678 //
1679 InterlockedCompareExchange32 (
1680 (UINT32*)&mSmmMpSyncData->BspIndex,
1681 (UINT32)-1,
1682 (UINT32)CpuIndex
1683 );
1684 }
1685 }
1686 }
1687
1688 //
1689 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1690 //
1691 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1692
1693 //
1694 // Clear last request for SwitchBsp.
1695 //
1696 if (mSmmMpSyncData->SwitchBsp) {
1697 mSmmMpSyncData->SwitchBsp = FALSE;
1698 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1699 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1700 }
1701 }
1702
1703 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1704 SmmProfileRecordSmiNum ();
1705 }
1706
1707 //
1708 // BSP Handler is always called with a ValidSmi == TRUE
1709 //
1710 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1711 } else {
1712 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1713 }
1714 }
1715
1716 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1717
1718 //
1719 // Wait for BSP's signal to exit SMI
1720 //
1721 while (*mSmmMpSyncData->AllCpusInSync) {
1722 CpuPause ();
1723 }
1724 }
1725
1726 Exit:
1727 SmmCpuFeaturesRendezvousExit (CpuIndex);
1728
1729 //
1730 // Restore Cr2
1731 //
1732 RestoreCr2 (Cr2);
1733 }
1734
1735 /**
1736 Allocate buffer for SpinLock and Wrapper function buffer.
1737
1738 **/
1739 VOID
1740 InitializeDataForMmMp (
1741 VOID
1742 )
1743 {
1744 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1745 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1746
1747 InitializeListHead (&gSmmCpuPrivate->TokenList);
1748
1749 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1750 }
1751
1752 /**
1753 Allocate buffer for all semaphores and spin locks.
1754
1755 **/
1756 VOID
1757 InitializeSmmCpuSemaphores (
1758 VOID
1759 )
1760 {
1761 UINTN ProcessorCount;
1762 UINTN TotalSize;
1763 UINTN GlobalSemaphoresSize;
1764 UINTN CpuSemaphoresSize;
1765 UINTN SemaphoreSize;
1766 UINTN Pages;
1767 UINTN *SemaphoreBlock;
1768 UINTN SemaphoreAddr;
1769
1770 SemaphoreSize = GetSpinLockProperties ();
1771 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1772 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1773 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1774 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1775 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1776 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1777 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1778 SemaphoreBlock = AllocatePages (Pages);
1779 ASSERT (SemaphoreBlock != NULL);
1780 ZeroMem (SemaphoreBlock, TotalSize);
1781
1782 SemaphoreAddr = (UINTN)SemaphoreBlock;
1783 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1784 SemaphoreAddr += SemaphoreSize;
1785 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1786 SemaphoreAddr += SemaphoreSize;
1787 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1788 SemaphoreAddr += SemaphoreSize;
1789 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1790 SemaphoreAddr += SemaphoreSize;
1791 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1792 = (SPIN_LOCK *)SemaphoreAddr;
1793 SemaphoreAddr += SemaphoreSize;
1794
1795 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1796 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1797 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1798 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1799 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1800 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1801
1802 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1803 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1804
1805 mSemaphoreSize = SemaphoreSize;
1806 }
1807
1808 /**
1809 Initialize un-cacheable data.
1810
1811 **/
1812 VOID
1813 EFIAPI
1814 InitializeMpSyncData (
1815 VOID
1816 )
1817 {
1818 UINTN CpuIndex;
1819
1820 if (mSmmMpSyncData != NULL) {
1821 //
1822 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1823 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1824 //
1825 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1826 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1827 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1828 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1829 //
1830 // Enable BSP election by setting BspIndex to -1
1831 //
1832 mSmmMpSyncData->BspIndex = (UINT32)-1;
1833 }
1834 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1835
1836 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1837 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1838 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1839 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1840 mSmmMpSyncData->AllCpusInSync != NULL);
1841 *mSmmMpSyncData->Counter = 0;
1842 *mSmmMpSyncData->InsideSmm = FALSE;
1843 *mSmmMpSyncData->AllCpusInSync = FALSE;
1844
1845 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1846 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1847 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1848 mSmmMpSyncData->CpuData[CpuIndex].Run =
1849 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1850 mSmmMpSyncData->CpuData[CpuIndex].Present =
1851 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1852 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1853 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1854 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1855 }
1856 }
1857 }
1858
1859 /**
1860 Initialize global data for MP synchronization.
1861
1862 @param Stacks Base address of SMI stack buffer for all processors.
1863 @param StackSize Stack size for each processor in SMM.
1864 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1865
1866 **/
1867 UINT32
1868 InitializeMpServiceData (
1869 IN VOID *Stacks,
1870 IN UINTN StackSize,
1871 IN UINTN ShadowStackSize
1872 )
1873 {
1874 UINT32 Cr3;
1875 UINTN Index;
1876 UINT8 *GdtTssTables;
1877 UINTN GdtTableStepSize;
1878 CPUID_VERSION_INFO_EDX RegEdx;
1879
1880 //
1881 // Determine if this CPU supports machine check
1882 //
1883 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1884 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1885
1886 //
1887 // Allocate memory for all locks and semaphores
1888 //
1889 InitializeSmmCpuSemaphores ();
1890
1891 //
1892 // Initialize mSmmMpSyncData
1893 //
1894 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1895 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1896 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1897 ASSERT (mSmmMpSyncData != NULL);
1898 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1899 InitializeMpSyncData ();
1900
1901 //
1902 // Initialize physical address mask
1903 // NOTE: Physical memory above virtual address limit is not supported !!!
1904 //
1905 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1906 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1907 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1908
1909 //
1910 // Create page tables
1911 //
1912 Cr3 = SmmInitPageTable ();
1913
1914 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1915
1916 //
1917 // Install SMI handler for each CPU
1918 //
1919 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1920 InstallSmiHandler (
1921 Index,
1922 (UINT32)mCpuHotPlugData.SmBase[Index],
1923 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1924 StackSize,
1925 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1926 gcSmiGdtr.Limit + 1,
1927 gcSmiIdtr.Base,
1928 gcSmiIdtr.Limit + 1,
1929 Cr3
1930 );
1931 }
1932
1933 //
1934 // Record current MTRR settings
1935 //
1936 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1937 MtrrGetAllMtrrs (&gSmiMtrrs);
1938
1939 return Cr3;
1940 }
1941
1942 /**
1943
1944 Register the SMM Foundation entry point.
1945
1946 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1947 @param SmmEntryPoint SMM Foundation EntryPoint
1948
1949 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1950
1951 **/
1952 EFI_STATUS
1953 EFIAPI
1954 RegisterSmmEntry (
1955 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1956 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1957 )
1958 {
1959 //
1960 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1961 //
1962 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1963 return EFI_SUCCESS;
1964 }
1965
1966 /**
1967
1968 Register the SMM Foundation entry point.
1969
1970 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
1971 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
1972 with the related definitions of
1973 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
1974 If caller may pass a value of NULL to deregister any existing
1975 startup procedure.
1976 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
1977 run by the AP. It is an optional common mailbox between APs and
1978 the caller to share information
1979
1980 @retval EFI_SUCCESS The Procedure has been set successfully.
1981 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
1982
1983 **/
1984 EFI_STATUS
1985 RegisterStartupProcedure (
1986 IN EFI_AP_PROCEDURE Procedure,
1987 IN OUT VOID *ProcedureArguments OPTIONAL
1988 )
1989 {
1990 if (Procedure == NULL && ProcedureArguments != NULL) {
1991 return EFI_INVALID_PARAMETER;
1992 }
1993 if (mSmmMpSyncData == NULL) {
1994 return EFI_NOT_READY;
1995 }
1996
1997 mSmmMpSyncData->StartupProcedure = Procedure;
1998 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
1999
2000 return EFI_SUCCESS;
2001 }