]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg: Replace Opcode with the corresponding instructions.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25 MM_COMPLETION mSmmStartupThisApToken;
26
27 extern UINTN mSmmShadowStackSize;
28
29 /**
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
32 MP safe mechanisms.
33
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
37
38 **/
39 UINT32
40 WaitForSemaphore (
41 IN OUT volatile UINT32 *Sem
42 )
43 {
44 UINT32 Value;
45
46 for ( ; ;) {
47 Value = *Sem;
48 if ((Value != 0) &&
49 (InterlockedCompareExchange32 (
50 (UINT32 *)Sem,
51 Value,
52 Value - 1
53 ) == Value))
54 {
55 break;
56 }
57
58 CpuPause ();
59 }
60
61 return Value - 1;
62 }
63
64 /**
65 Performs an atomic compare exchange operation to release semaphore.
66 The compare exchange operation must be performed using
67 MP safe mechanisms.
68
69 @param Sem IN: 32-bit unsigned integer
70 OUT: original integer + 1
71 @return Original integer + 1
72
73 **/
74 UINT32
75 ReleaseSemaphore (
76 IN OUT volatile UINT32 *Sem
77 )
78 {
79 UINT32 Value;
80
81 do {
82 Value = *Sem;
83 } while (Value + 1 != 0 &&
84 InterlockedCompareExchange32 (
85 (UINT32 *)Sem,
86 Value,
87 Value + 1
88 ) != Value);
89
90 return Value + 1;
91 }
92
93 /**
94 Performs an atomic compare exchange operation to lock semaphore.
95 The compare exchange operation must be performed using
96 MP safe mechanisms.
97
98 @param Sem IN: 32-bit unsigned integer
99 OUT: -1
100 @return Original integer
101
102 **/
103 UINT32
104 LockdownSemaphore (
105 IN OUT volatile UINT32 *Sem
106 )
107 {
108 UINT32 Value;
109
110 do {
111 Value = *Sem;
112 } while (InterlockedCompareExchange32 (
113 (UINT32 *)Sem,
114 Value,
115 (UINT32)-1
116 ) != Value);
117
118 return Value;
119 }
120
121 /**
122 Wait all APs to performs an atomic compare exchange operation to release semaphore.
123
124 @param NumberOfAPs AP number
125
126 **/
127 VOID
128 WaitForAllAPs (
129 IN UINTN NumberOfAPs
130 )
131 {
132 UINTN BspIndex;
133
134 BspIndex = mSmmMpSyncData->BspIndex;
135 while (NumberOfAPs-- > 0) {
136 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
137 }
138 }
139
140 /**
141 Performs an atomic compare exchange operation to release semaphore
142 for each AP.
143
144 **/
145 VOID
146 ReleaseAllAPs (
147 VOID
148 )
149 {
150 UINTN Index;
151
152 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
153 if (IsPresentAp (Index)) {
154 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
155 }
156 }
157 }
158
159 /**
160 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
161
162 @param Exceptions CPU Arrival exception flags.
163
164 @retval TRUE if all CPUs the have checked in.
165 @retval FALSE if at least one Normal AP hasn't checked in.
166
167 **/
168 BOOLEAN
169 AllCpusInSmmWithExceptions (
170 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
171 )
172 {
173 UINTN Index;
174 SMM_CPU_DATA_BLOCK *CpuData;
175 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
176
177 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
178
179 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
180 return TRUE;
181 }
182
183 CpuData = mSmmMpSyncData->CpuData;
184 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
185 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
186 if (!(*(CpuData[Index].Present)) && (ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
187 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0)) {
188 continue;
189 }
190
191 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0)) {
192 continue;
193 }
194
195 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0)) {
196 continue;
197 }
198
199 return FALSE;
200 }
201 }
202
203 return TRUE;
204 }
205
206 /**
207 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
208
209 @retval TRUE Os enable lmce.
210 @retval FALSE Os not enable lmce.
211
212 **/
213 BOOLEAN
214 IsLmceOsEnabled (
215 VOID
216 )
217 {
218 MSR_IA32_MCG_CAP_REGISTER McgCap;
219 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
220 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
221
222 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
223 if (McgCap.Bits.MCG_LMCE_P == 0) {
224 return FALSE;
225 }
226
227 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
228 if (FeatureCtrl.Bits.LmceOn == 0) {
229 return FALSE;
230 }
231
232 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
233 return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);
234 }
235
236 /**
237 Return if Local machine check exception signaled.
238
239 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
240 delivered to only the logical processor.
241
242 @retval TRUE LMCE was signaled.
243 @retval FALSE LMCE was not signaled.
244
245 **/
246 BOOLEAN
247 IsLmceSignaled (
248 VOID
249 )
250 {
251 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
252
253 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
254 return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);
255 }
256
257 /**
258 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
259 entering SMM, except SMI disabled APs.
260
261 **/
262 VOID
263 SmmWaitForApArrival (
264 VOID
265 )
266 {
267 UINT64 Timer;
268 UINTN Index;
269 BOOLEAN LmceEn;
270 BOOLEAN LmceSignal;
271
272 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
273
274 LmceEn = FALSE;
275 LmceSignal = FALSE;
276 if (mMachineCheckSupported) {
277 LmceEn = IsLmceOsEnabled ();
278 LmceSignal = IsLmceSignaled ();
279 }
280
281 //
282 // Platform implementor should choose a timeout value appropriately:
283 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
284 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
285 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
286 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
287 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
288 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
289 // - The timeout value must be longer than longest possible IO operation in the system
290 //
291
292 //
293 // Sync with APs 1st timeout
294 //
295 for (Timer = StartSyncTimer ();
296 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
297 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
298 )
299 {
300 CpuPause ();
301 }
302
303 //
304 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
305 // because:
306 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
307 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
308 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
309 // work while SMI handling is on-going.
310 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
311 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
312 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
313 // mode work while SMI handling is on-going.
314 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
315 // - In traditional flow, SMI disabling is discouraged.
316 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
317 // In both cases, adding SMI-disabling checking code increases overhead.
318 //
319 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
320 //
321 // Send SMI IPIs to bring outside processors in
322 //
323 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
324 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
325 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
326 }
327 }
328
329 //
330 // Sync with APs 2nd timeout.
331 //
332 for (Timer = StartSyncTimer ();
333 !IsSyncTimerTimeout (Timer) &&
334 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
335 )
336 {
337 CpuPause ();
338 }
339 }
340
341 return;
342 }
343
344 /**
345 Replace OS MTRR's with SMI MTRR's.
346
347 @param CpuIndex Processor Index
348
349 **/
350 VOID
351 ReplaceOSMtrrs (
352 IN UINTN CpuIndex
353 )
354 {
355 SmmCpuFeaturesDisableSmrr ();
356
357 //
358 // Replace all MTRRs registers
359 //
360 MtrrSetAllMtrrs (&gSmiMtrrs);
361 }
362
363 /**
364 Wheck whether task has been finished by all APs.
365
366 @param BlockMode Whether did it in block mode or non-block mode.
367
368 @retval TRUE Task has been finished by all APs.
369 @retval FALSE Task not has been finished by all APs.
370
371 **/
372 BOOLEAN
373 WaitForAllAPsNotBusy (
374 IN BOOLEAN BlockMode
375 )
376 {
377 UINTN Index;
378
379 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
380 //
381 // Ignore BSP and APs which not call in SMM.
382 //
383 if (!IsPresentAp (Index)) {
384 continue;
385 }
386
387 if (BlockMode) {
388 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
389 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
390 } else {
391 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
392 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
393 } else {
394 return FALSE;
395 }
396 }
397 }
398
399 return TRUE;
400 }
401
402 /**
403 Check whether it is an present AP.
404
405 @param CpuIndex The AP index which calls this function.
406
407 @retval TRUE It's a present AP.
408 @retval TRUE This is not an AP or it is not present.
409
410 **/
411 BOOLEAN
412 IsPresentAp (
413 IN UINTN CpuIndex
414 )
415 {
416 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
417 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
418 }
419
420 /**
421 Clean up the status flags used during executing the procedure.
422
423 @param CpuIndex The AP index which calls this function.
424
425 **/
426 VOID
427 ReleaseToken (
428 IN UINTN CpuIndex
429 )
430 {
431 PROCEDURE_TOKEN *Token;
432
433 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
434
435 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
436 ReleaseSpinLock (Token->SpinLock);
437 }
438
439 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
440 }
441
442 /**
443 Free the tokens in the maintained list.
444
445 **/
446 VOID
447 ResetTokens (
448 VOID
449 )
450 {
451 //
452 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
453 //
454 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
455 }
456
457 /**
458 SMI handler for BSP.
459
460 @param CpuIndex BSP processor Index
461 @param SyncMode SMM MP sync mode
462
463 **/
464 VOID
465 BSPHandler (
466 IN UINTN CpuIndex,
467 IN SMM_CPU_SYNC_MODE SyncMode
468 )
469 {
470 UINTN Index;
471 MTRR_SETTINGS Mtrrs;
472 UINTN ApCount;
473 BOOLEAN ClearTopLevelSmiResult;
474 UINTN PresentCount;
475
476 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
477 ApCount = 0;
478
479 //
480 // Flag BSP's presence
481 //
482 *mSmmMpSyncData->InsideSmm = TRUE;
483
484 //
485 // Initialize Debug Agent to start source level debug in BSP handler
486 //
487 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
488
489 //
490 // Mark this processor's presence
491 //
492 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
493
494 //
495 // Clear platform top level SMI status bit before calling SMI handlers. If
496 // we cleared it after SMI handlers are run, we would miss the SMI that
497 // occurs after SMI handlers are done and before SMI status bit is cleared.
498 //
499 ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();
500 ASSERT (ClearTopLevelSmiResult == TRUE);
501
502 //
503 // Set running processor index
504 //
505 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
506
507 //
508 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
509 //
510 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
511 //
512 // Wait for APs to arrive
513 //
514 SmmWaitForApArrival ();
515
516 //
517 // Lock the counter down and retrieve the number of APs
518 //
519 *mSmmMpSyncData->AllCpusInSync = TRUE;
520 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
521
522 //
523 // Wait for all APs to get ready for programming MTRRs
524 //
525 WaitForAllAPs (ApCount);
526
527 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
528 //
529 // Signal all APs it's time for backup MTRRs
530 //
531 ReleaseAllAPs ();
532
533 //
534 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
535 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
536 // to a large enough value to avoid this situation.
537 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
538 // We do the backup first and then set MTRR to avoid race condition for threads
539 // in the same core.
540 //
541 MtrrGetAllMtrrs (&Mtrrs);
542
543 //
544 // Wait for all APs to complete their MTRR saving
545 //
546 WaitForAllAPs (ApCount);
547
548 //
549 // Let all processors program SMM MTRRs together
550 //
551 ReleaseAllAPs ();
552
553 //
554 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
555 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
556 // to a large enough value to avoid this situation.
557 //
558 ReplaceOSMtrrs (CpuIndex);
559
560 //
561 // Wait for all APs to complete their MTRR programming
562 //
563 WaitForAllAPs (ApCount);
564 }
565 }
566
567 //
568 // The BUSY lock is initialized to Acquired state
569 //
570 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
571
572 //
573 // Perform the pre tasks
574 //
575 PerformPreTasks ();
576
577 //
578 // Invoke SMM Foundation EntryPoint with the processor information context.
579 //
580 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
581
582 //
583 // Make sure all APs have completed their pending none-block tasks
584 //
585 WaitForAllAPsNotBusy (TRUE);
586
587 //
588 // Perform the remaining tasks
589 //
590 PerformRemainingTasks ();
591
592 //
593 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
594 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
595 // will run through freely.
596 //
597 if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {
598 //
599 // Lock the counter down and retrieve the number of APs
600 //
601 *mSmmMpSyncData->AllCpusInSync = TRUE;
602 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
603 //
604 // Make sure all APs have their Present flag set
605 //
606 while (TRUE) {
607 PresentCount = 0;
608 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
609 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
610 PresentCount++;
611 }
612 }
613
614 if (PresentCount > ApCount) {
615 break;
616 }
617 }
618 }
619
620 //
621 // Notify all APs to exit
622 //
623 *mSmmMpSyncData->InsideSmm = FALSE;
624 ReleaseAllAPs ();
625
626 //
627 // Wait for all APs to complete their pending tasks
628 //
629 WaitForAllAPs (ApCount);
630
631 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
632 //
633 // Signal APs to restore MTRRs
634 //
635 ReleaseAllAPs ();
636
637 //
638 // Restore OS MTRRs
639 //
640 SmmCpuFeaturesReenableSmrr ();
641 MtrrSetAllMtrrs (&Mtrrs);
642
643 //
644 // Wait for all APs to complete MTRR programming
645 //
646 WaitForAllAPs (ApCount);
647 }
648
649 //
650 // Stop source level debug in BSP handler, the code below will not be
651 // debugged.
652 //
653 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
654
655 //
656 // Signal APs to Reset states/semaphore for this processor
657 //
658 ReleaseAllAPs ();
659
660 //
661 // Perform pending operations for hot-plug
662 //
663 SmmCpuUpdate ();
664
665 //
666 // Clear the Present flag of BSP
667 //
668 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
669
670 //
671 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
672 // WaitForAllAps does not depend on the Present flag.
673 //
674 WaitForAllAPs (ApCount);
675
676 //
677 // Reset the tokens buffer.
678 //
679 ResetTokens ();
680
681 //
682 // Reset BspIndex to -1, meaning BSP has not been elected.
683 //
684 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
685 mSmmMpSyncData->BspIndex = (UINT32)-1;
686 }
687
688 //
689 // Allow APs to check in from this point on
690 //
691 *mSmmMpSyncData->Counter = 0;
692 *mSmmMpSyncData->AllCpusInSync = FALSE;
693 }
694
695 /**
696 SMI handler for AP.
697
698 @param CpuIndex AP processor Index.
699 @param ValidSmi Indicates that current SMI is a valid SMI or not.
700 @param SyncMode SMM MP sync mode.
701
702 **/
703 VOID
704 APHandler (
705 IN UINTN CpuIndex,
706 IN BOOLEAN ValidSmi,
707 IN SMM_CPU_SYNC_MODE SyncMode
708 )
709 {
710 UINT64 Timer;
711 UINTN BspIndex;
712 MTRR_SETTINGS Mtrrs;
713 EFI_STATUS ProcedureStatus;
714
715 //
716 // Timeout BSP
717 //
718 for (Timer = StartSyncTimer ();
719 !IsSyncTimerTimeout (Timer) &&
720 !(*mSmmMpSyncData->InsideSmm);
721 )
722 {
723 CpuPause ();
724 }
725
726 if (!(*mSmmMpSyncData->InsideSmm)) {
727 //
728 // BSP timeout in the first round
729 //
730 if (mSmmMpSyncData->BspIndex != -1) {
731 //
732 // BSP Index is known
733 //
734 BspIndex = mSmmMpSyncData->BspIndex;
735 ASSERT (CpuIndex != BspIndex);
736
737 //
738 // Send SMI IPI to bring BSP in
739 //
740 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
741
742 //
743 // Now clock BSP for the 2nd time
744 //
745 for (Timer = StartSyncTimer ();
746 !IsSyncTimerTimeout (Timer) &&
747 !(*mSmmMpSyncData->InsideSmm);
748 )
749 {
750 CpuPause ();
751 }
752
753 if (!(*mSmmMpSyncData->InsideSmm)) {
754 //
755 // Give up since BSP is unable to enter SMM
756 // and signal the completion of this AP
757 WaitForSemaphore (mSmmMpSyncData->Counter);
758 return;
759 }
760 } else {
761 //
762 // Don't know BSP index. Give up without sending IPI to BSP.
763 //
764 WaitForSemaphore (mSmmMpSyncData->Counter);
765 return;
766 }
767 }
768
769 //
770 // BSP is available
771 //
772 BspIndex = mSmmMpSyncData->BspIndex;
773 ASSERT (CpuIndex != BspIndex);
774
775 //
776 // Mark this processor's presence
777 //
778 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
779
780 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
781 //
782 // Notify BSP of arrival at this point
783 //
784 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
785 }
786
787 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
788 //
789 // Wait for the signal from BSP to backup MTRRs
790 //
791 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
792
793 //
794 // Backup OS MTRRs
795 //
796 MtrrGetAllMtrrs (&Mtrrs);
797
798 //
799 // Signal BSP the completion of this AP
800 //
801 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
802
803 //
804 // Wait for BSP's signal to program MTRRs
805 //
806 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
807
808 //
809 // Replace OS MTRRs with SMI MTRRs
810 //
811 ReplaceOSMtrrs (CpuIndex);
812
813 //
814 // Signal BSP the completion of this AP
815 //
816 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
817 }
818
819 while (TRUE) {
820 //
821 // Wait for something to happen
822 //
823 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
824
825 //
826 // Check if BSP wants to exit SMM
827 //
828 if (!(*mSmmMpSyncData->InsideSmm)) {
829 break;
830 }
831
832 //
833 // BUSY should be acquired by SmmStartupThisAp()
834 //
835 ASSERT (
836 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
837 );
838
839 //
840 // Invoke the scheduled procedure
841 //
842 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(
843 (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter
844 );
845 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
846 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
847 }
848
849 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
850 ReleaseToken (CpuIndex);
851 }
852
853 //
854 // Release BUSY
855 //
856 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
857 }
858
859 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
860 //
861 // Notify BSP the readiness of this AP to program MTRRs
862 //
863 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
864
865 //
866 // Wait for the signal from BSP to program MTRRs
867 //
868 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
869
870 //
871 // Restore OS MTRRs
872 //
873 SmmCpuFeaturesReenableSmrr ();
874 MtrrSetAllMtrrs (&Mtrrs);
875 }
876
877 //
878 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
879 //
880 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
881
882 //
883 // Wait for the signal from BSP to Reset states/semaphore for this processor
884 //
885 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
886
887 //
888 // Reset states/semaphore for this processor
889 //
890 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
891
892 //
893 // Notify BSP the readiness of this AP to exit SMM
894 //
895 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
896 }
897
898 /**
899 Create 4G PageTable in SMRAM.
900
901 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
902 @return PageTable Address
903
904 **/
905 UINT32
906 Gen4GPageTable (
907 IN BOOLEAN Is32BitPageTable
908 )
909 {
910 VOID *PageTable;
911 UINTN Index;
912 UINT64 *Pte;
913 UINTN PagesNeeded;
914 UINTN Low2MBoundary;
915 UINTN High2MBoundary;
916 UINTN Pages;
917 UINTN GuardPage;
918 UINT64 *Pdpte;
919 UINTN PageIndex;
920 UINTN PageAddress;
921
922 Low2MBoundary = 0;
923 High2MBoundary = 0;
924 PagesNeeded = 0;
925 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
926 //
927 // Add one more page for known good stack, then find the lower 2MB aligned address.
928 //
929 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
930 //
931 // Add two more pages for known good stack and stack guard page,
932 // then find the lower 2MB aligned address.
933 //
934 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
935 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
936 }
937
938 //
939 // Allocate the page table
940 //
941 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
942 ASSERT (PageTable != NULL);
943
944 PageTable = (VOID *)((UINTN)PageTable);
945 Pte = (UINT64 *)PageTable;
946
947 //
948 // Zero out all page table entries first
949 //
950 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
951
952 //
953 // Set Page Directory Pointers
954 //
955 for (Index = 0; Index < 4; Index++) {
956 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
957 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
958 }
959
960 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
961
962 //
963 // Fill in Page Directory Entries
964 //
965 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
966 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
967 }
968
969 Pdpte = (UINT64 *)PageTable;
970 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
971 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
972 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
973 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
974 Pte = (UINT64 *)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
975 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
976 //
977 // Fill in Page Table Entries
978 //
979 Pte = (UINT64 *)Pages;
980 PageAddress = PageIndex;
981 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
982 if (PageAddress == GuardPage) {
983 //
984 // Mark the guard page as non-present
985 //
986 Pte[Index] = PageAddress | mAddressEncMask;
987 GuardPage += (mSmmStackSize + mSmmShadowStackSize);
988 if (GuardPage > mSmmStackArrayEnd) {
989 GuardPage = 0;
990 }
991 } else {
992 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
993 }
994
995 PageAddress += EFI_PAGE_SIZE;
996 }
997
998 Pages += EFI_PAGE_SIZE;
999 }
1000 }
1001
1002 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1003 Pte = (UINT64 *)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1004 if ((Pte[0] & IA32_PG_PS) == 0) {
1005 // 4K-page entries are already mapped. Just hide the first one anyway.
1006 Pte = (UINT64 *)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1007 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1008 } else {
1009 // Create 4K-page entries
1010 Pages = (UINTN)AllocatePageTableMemory (1);
1011 ASSERT (Pages != 0);
1012
1013 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1014
1015 Pte = (UINT64 *)Pages;
1016 PageAddress = 0;
1017 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1018 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1019 PageAddress += EFI_PAGE_SIZE;
1020 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1021 }
1022 }
1023 }
1024
1025 return (UINT32)(UINTN)PageTable;
1026 }
1027
1028 /**
1029 Checks whether the input token is the current used token.
1030
1031 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1032 BroadcastProcedure.
1033
1034 @retval TRUE The input token is the current used token.
1035 @retval FALSE The input token is not the current used token.
1036 **/
1037 BOOLEAN
1038 IsTokenInUse (
1039 IN SPIN_LOCK *Token
1040 )
1041 {
1042 LIST_ENTRY *Link;
1043 PROCEDURE_TOKEN *ProcToken;
1044
1045 if (Token == NULL) {
1046 return FALSE;
1047 }
1048
1049 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1050 //
1051 // Only search used tokens.
1052 //
1053 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1054 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1055
1056 if (ProcToken->SpinLock == Token) {
1057 return TRUE;
1058 }
1059
1060 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1061 }
1062
1063 return FALSE;
1064 }
1065
1066 /**
1067 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1068
1069 @return First token of the token buffer.
1070 **/
1071 LIST_ENTRY *
1072 AllocateTokenBuffer (
1073 VOID
1074 )
1075 {
1076 UINTN SpinLockSize;
1077 UINT32 TokenCountPerChunk;
1078 UINTN Index;
1079 SPIN_LOCK *SpinLock;
1080 UINT8 *SpinLockBuffer;
1081 PROCEDURE_TOKEN *ProcTokens;
1082
1083 SpinLockSize = GetSpinLockProperties ();
1084
1085 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1086 ASSERT (TokenCountPerChunk != 0);
1087 if (TokenCountPerChunk == 0) {
1088 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1089 CpuDeadLoop ();
1090 }
1091
1092 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1093
1094 //
1095 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1096 //
1097 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1098 ASSERT (SpinLockBuffer != NULL);
1099
1100 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1101 ASSERT (ProcTokens != NULL);
1102
1103 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1104 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1105 InitializeSpinLock (SpinLock);
1106
1107 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1108 ProcTokens[Index].SpinLock = SpinLock;
1109 ProcTokens[Index].RunningApCount = 0;
1110
1111 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1112 }
1113
1114 return &ProcTokens[0].Link;
1115 }
1116
1117 /**
1118 Get the free token.
1119
1120 If no free token, allocate new tokens then return the free one.
1121
1122 @param RunningApsCount The Running Aps count for this token.
1123
1124 @retval return the first free PROCEDURE_TOKEN.
1125
1126 **/
1127 PROCEDURE_TOKEN *
1128 GetFreeToken (
1129 IN UINT32 RunningApsCount
1130 )
1131 {
1132 PROCEDURE_TOKEN *NewToken;
1133
1134 //
1135 // If FirstFreeToken meets the end of token list, enlarge the token list.
1136 // Set FirstFreeToken to the first free token.
1137 //
1138 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1139 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1140 }
1141
1142 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1143 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1144
1145 NewToken->RunningApCount = RunningApsCount;
1146 AcquireSpinLock (NewToken->SpinLock);
1147
1148 return NewToken;
1149 }
1150
1151 /**
1152 Checks status of specified AP.
1153
1154 This function checks whether the specified AP has finished the task assigned
1155 by StartupThisAP(), and whether timeout expires.
1156
1157 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1158 BroadcastProcedure.
1159
1160 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1161 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1162 **/
1163 EFI_STATUS
1164 IsApReady (
1165 IN SPIN_LOCK *Token
1166 )
1167 {
1168 if (AcquireSpinLockOrFail (Token)) {
1169 ReleaseSpinLock (Token);
1170 return EFI_SUCCESS;
1171 }
1172
1173 return EFI_NOT_READY;
1174 }
1175
1176 /**
1177 Schedule a procedure to run on the specified CPU.
1178
1179 @param[in] Procedure The address of the procedure to run
1180 @param[in] CpuIndex Target CPU Index
1181 @param[in,out] ProcArguments The parameter to pass to the procedure
1182 @param[in] Token This is an optional parameter that allows the caller to execute the
1183 procedure in a blocking or non-blocking fashion. If it is NULL the
1184 call is blocking, and the call will not return until the AP has
1185 completed the procedure. If the token is not NULL, the call will
1186 return immediately. The caller can check whether the procedure has
1187 completed with CheckOnProcedure or WaitForProcedure.
1188 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1189 execution of Procedure, either for blocking or non-blocking mode.
1190 Zero means infinity. If the timeout expires before all APs return
1191 from Procedure, then Procedure on the failed APs is terminated. If
1192 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1193 If the timeout expires in non-blocking mode, the timeout determined
1194 can be through CheckOnProcedure or WaitForProcedure.
1195 Note that timeout support is optional. Whether an implementation
1196 supports this feature can be determined via the Attributes data
1197 member.
1198 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1199 by Procedure when it completes execution on the target AP, or with
1200 EFI_TIMEOUT if the Procedure fails to complete within the optional
1201 timeout. The implementation will update this variable with
1202 EFI_NOT_READY prior to starting Procedure on the target AP.
1203
1204 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1205 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1206 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1207 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1208 @retval EFI_SUCCESS The procedure has been successfully scheduled
1209
1210 **/
1211 EFI_STATUS
1212 InternalSmmStartupThisAp (
1213 IN EFI_AP_PROCEDURE2 Procedure,
1214 IN UINTN CpuIndex,
1215 IN OUT VOID *ProcArguments OPTIONAL,
1216 IN MM_COMPLETION *Token,
1217 IN UINTN TimeoutInMicroseconds,
1218 IN OUT EFI_STATUS *CpuStatus
1219 )
1220 {
1221 PROCEDURE_TOKEN *ProcToken;
1222
1223 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1224 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1225 return EFI_INVALID_PARAMETER;
1226 }
1227
1228 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1229 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1230 return EFI_INVALID_PARAMETER;
1231 }
1232
1233 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1234 return EFI_INVALID_PARAMETER;
1235 }
1236
1237 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1238 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1239 DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1240 }
1241
1242 return EFI_INVALID_PARAMETER;
1243 }
1244
1245 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1246 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1247 DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1248 }
1249
1250 return EFI_INVALID_PARAMETER;
1251 }
1252
1253 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1254 return EFI_INVALID_PARAMETER;
1255 }
1256
1257 if (Procedure == NULL) {
1258 return EFI_INVALID_PARAMETER;
1259 }
1260
1261 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1262
1263 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1264 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1265 if (Token != NULL) {
1266 if (Token != &mSmmStartupThisApToken) {
1267 //
1268 // When Token points to mSmmStartupThisApToken, this routine is called
1269 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1270 //
1271 // In this case, caller wants to startup AP procedure in non-blocking
1272 // mode and cannot get the completion status from the Token because there
1273 // is no way to return the Token to caller from SmmStartupThisAp().
1274 // Caller needs to use its implementation specific way to query the completion status.
1275 //
1276 // There is no need to allocate a token for such case so the 3 overheads
1277 // can be avoided:
1278 // 1. Call AllocateTokenBuffer() when there is no free token.
1279 // 2. Get a free token from the token buffer.
1280 // 3. Call ReleaseToken() in APHandler().
1281 //
1282 ProcToken = GetFreeToken (1);
1283 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1284 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1285 }
1286 }
1287
1288 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1289 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1290 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1291 }
1292
1293 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1294
1295 if (Token == NULL) {
1296 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1297 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1298 }
1299
1300 return EFI_SUCCESS;
1301 }
1302
1303 /**
1304 Worker function to execute a caller provided function on all enabled APs.
1305
1306 @param[in] Procedure A pointer to the function to be run on
1307 enabled APs of the system.
1308 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1309 APs to return from Procedure, either for
1310 blocking or non-blocking mode.
1311 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1312 all APs.
1313 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1314 procedure in a blocking or non-blocking fashion. If it is NULL the
1315 call is blocking, and the call will not return until the AP has
1316 completed the procedure. If the token is not NULL, the call will
1317 return immediately. The caller can check whether the procedure has
1318 completed with CheckOnProcedure or WaitForProcedure.
1319 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1320 by Procedure when it completes execution on the target AP, or with
1321 EFI_TIMEOUT if the Procedure fails to complete within the optional
1322 timeout. The implementation will update this variable with
1323 EFI_NOT_READY prior to starting Procedure on the target AP.
1324
1325
1326 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1327 the timeout expired.
1328 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1329 to all enabled APs.
1330 @retval others Failed to Startup all APs.
1331
1332 **/
1333 EFI_STATUS
1334 InternalSmmStartupAllAPs (
1335 IN EFI_AP_PROCEDURE2 Procedure,
1336 IN UINTN TimeoutInMicroseconds,
1337 IN OUT VOID *ProcedureArguments OPTIONAL,
1338 IN OUT MM_COMPLETION *Token,
1339 IN OUT EFI_STATUS *CPUStatus
1340 )
1341 {
1342 UINTN Index;
1343 UINTN CpuCount;
1344 PROCEDURE_TOKEN *ProcToken;
1345
1346 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1347 return EFI_INVALID_PARAMETER;
1348 }
1349
1350 if (Procedure == NULL) {
1351 return EFI_INVALID_PARAMETER;
1352 }
1353
1354 CpuCount = 0;
1355 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1356 if (IsPresentAp (Index)) {
1357 CpuCount++;
1358
1359 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1360 return EFI_INVALID_PARAMETER;
1361 }
1362
1363 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
1364 return EFI_NOT_READY;
1365 }
1366
1367 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1368 }
1369 }
1370
1371 if (CpuCount == 0) {
1372 return EFI_NOT_STARTED;
1373 }
1374
1375 if (Token != NULL) {
1376 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1377 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1378 } else {
1379 ProcToken = NULL;
1380 }
1381
1382 //
1383 // Make sure all BUSY should be acquired.
1384 //
1385 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1386 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1387 // block mode.
1388 //
1389 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1390 if (IsPresentAp (Index)) {
1391 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1392 }
1393 }
1394
1395 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1396 if (IsPresentAp (Index)) {
1397 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;
1398 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1399 if (ProcToken != NULL) {
1400 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1401 }
1402
1403 if (CPUStatus != NULL) {
1404 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1405 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1406 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1407 }
1408 }
1409 } else {
1410 //
1411 // PI spec requirement:
1412 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1413 //
1414 if (CPUStatus != NULL) {
1415 CPUStatus[Index] = EFI_NOT_STARTED;
1416 }
1417
1418 //
1419 // Decrease the count to mark this processor(AP or BSP) as finished.
1420 //
1421 if (ProcToken != NULL) {
1422 WaitForSemaphore (&ProcToken->RunningApCount);
1423 }
1424 }
1425 }
1426
1427 ReleaseAllAPs ();
1428
1429 if (Token == NULL) {
1430 //
1431 // Make sure all APs have completed their tasks.
1432 //
1433 WaitForAllAPsNotBusy (TRUE);
1434 }
1435
1436 return EFI_SUCCESS;
1437 }
1438
1439 /**
1440 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1441 If the function is defined with a type that is not compatible with
1442 the type (of the expression) pointed to by the expression that
1443 denotes the called function, the behavior is undefined.
1444
1445 So add below wrapper function to convert between EFI_AP_PROCEDURE
1446 and EFI_AP_PROCEDURE2.
1447
1448 Wrapper for Procedures.
1449
1450 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1451
1452 **/
1453 EFI_STATUS
1454 EFIAPI
1455 ProcedureWrapper (
1456 IN VOID *Buffer
1457 )
1458 {
1459 PROCEDURE_WRAPPER *Wrapper;
1460
1461 Wrapper = Buffer;
1462 Wrapper->Procedure (Wrapper->ProcedureArgument);
1463
1464 return EFI_SUCCESS;
1465 }
1466
1467 /**
1468 Schedule a procedure to run on the specified CPU in blocking mode.
1469
1470 @param[in] Procedure The address of the procedure to run
1471 @param[in] CpuIndex Target CPU Index
1472 @param[in, out] ProcArguments The parameter to pass to the procedure
1473
1474 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1475 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1476 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1477 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1478 @retval EFI_SUCCESS The procedure has been successfully scheduled
1479
1480 **/
1481 EFI_STATUS
1482 EFIAPI
1483 SmmBlockingStartupThisAp (
1484 IN EFI_AP_PROCEDURE Procedure,
1485 IN UINTN CpuIndex,
1486 IN OUT VOID *ProcArguments OPTIONAL
1487 )
1488 {
1489 PROCEDURE_WRAPPER Wrapper;
1490
1491 Wrapper.Procedure = Procedure;
1492 Wrapper.ProcedureArgument = ProcArguments;
1493
1494 //
1495 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1496 //
1497 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1498 }
1499
1500 /**
1501 Schedule a procedure to run on the specified CPU.
1502
1503 @param Procedure The address of the procedure to run
1504 @param CpuIndex Target CPU Index
1505 @param ProcArguments The parameter to pass to the procedure
1506
1507 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1508 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1509 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1510 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1511 @retval EFI_SUCCESS The procedure has been successfully scheduled
1512
1513 **/
1514 EFI_STATUS
1515 EFIAPI
1516 SmmStartupThisAp (
1517 IN EFI_AP_PROCEDURE Procedure,
1518 IN UINTN CpuIndex,
1519 IN OUT VOID *ProcArguments OPTIONAL
1520 )
1521 {
1522 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1523 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1524
1525 //
1526 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1527 //
1528 return InternalSmmStartupThisAp (
1529 ProcedureWrapper,
1530 CpuIndex,
1531 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1532 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,
1533 0,
1534 NULL
1535 );
1536 }
1537
1538 /**
1539 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1540 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1541
1542 NOTE: It might not be appreciated in runtime since it might
1543 conflict with OS debugging facilities. Turn them off in RELEASE.
1544
1545 @param CpuIndex CPU Index
1546
1547 **/
1548 VOID
1549 EFIAPI
1550 CpuSmmDebugEntry (
1551 IN UINTN CpuIndex
1552 )
1553 {
1554 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1555
1556 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1557 ASSERT (CpuIndex < mMaxNumberOfCpus);
1558 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1559 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1560 AsmWriteDr6 (CpuSaveState->x86._DR6);
1561 AsmWriteDr7 (CpuSaveState->x86._DR7);
1562 } else {
1563 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1564 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1565 }
1566 }
1567 }
1568
1569 /**
1570 This function restores DR6 & DR7 to SMM save state.
1571
1572 NOTE: It might not be appreciated in runtime since it might
1573 conflict with OS debugging facilities. Turn them off in RELEASE.
1574
1575 @param CpuIndex CPU Index
1576
1577 **/
1578 VOID
1579 EFIAPI
1580 CpuSmmDebugExit (
1581 IN UINTN CpuIndex
1582 )
1583 {
1584 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1585
1586 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1587 ASSERT (CpuIndex < mMaxNumberOfCpus);
1588 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1589 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1590 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1591 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1592 } else {
1593 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1594 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1595 }
1596 }
1597 }
1598
1599 /**
1600 C function for SMI entry, each processor comes here upon SMI trigger.
1601
1602 @param CpuIndex CPU Index
1603
1604 **/
1605 VOID
1606 EFIAPI
1607 SmiRendezvous (
1608 IN UINTN CpuIndex
1609 )
1610 {
1611 EFI_STATUS Status;
1612 BOOLEAN ValidSmi;
1613 BOOLEAN IsBsp;
1614 BOOLEAN BspInProgress;
1615 UINTN Index;
1616 UINTN Cr2;
1617
1618 ASSERT (CpuIndex < mMaxNumberOfCpus);
1619
1620 //
1621 // Save Cr2 because Page Fault exception in SMM may override its value,
1622 // when using on-demand paging for above 4G memory.
1623 //
1624 Cr2 = 0;
1625 SaveCr2 (&Cr2);
1626
1627 //
1628 // Call the user register Startup function first.
1629 //
1630 if (mSmmMpSyncData->StartupProcedure != NULL) {
1631 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1632 }
1633
1634 //
1635 // Perform CPU specific entry hooks
1636 //
1637 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1638
1639 //
1640 // Determine if this is a valid SMI
1641 //
1642 ValidSmi = PlatformValidSmi ();
1643
1644 //
1645 // Determine if BSP has been already in progress. Note this must be checked after
1646 // ValidSmi because BSP may clear a valid SMI source after checking in.
1647 //
1648 BspInProgress = *mSmmMpSyncData->InsideSmm;
1649
1650 if (!BspInProgress && !ValidSmi) {
1651 //
1652 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1653 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1654 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1655 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1656 // is nothing we need to do.
1657 //
1658 goto Exit;
1659 } else {
1660 //
1661 // Signal presence of this processor
1662 //
1663 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1664 //
1665 // BSP has already ended the synchronization, so QUIT!!!
1666 //
1667
1668 //
1669 // Wait for BSP's signal to finish SMI
1670 //
1671 while (*mSmmMpSyncData->AllCpusInSync) {
1672 CpuPause ();
1673 }
1674
1675 goto Exit;
1676 } else {
1677 //
1678 // The BUSY lock is initialized to Released state.
1679 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1680 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1681 // after AP's present flag is detected.
1682 //
1683 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1684 }
1685
1686 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1687 ActivateSmmProfile (CpuIndex);
1688 }
1689
1690 if (BspInProgress) {
1691 //
1692 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1693 // as BSP may have cleared the SMI status
1694 //
1695 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1696 } else {
1697 //
1698 // We have a valid SMI
1699 //
1700
1701 //
1702 // Elect BSP
1703 //
1704 IsBsp = FALSE;
1705 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1706 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1707 //
1708 // Call platform hook to do BSP election
1709 //
1710 Status = PlatformSmmBspElection (&IsBsp);
1711 if (EFI_SUCCESS == Status) {
1712 //
1713 // Platform hook determines successfully
1714 //
1715 if (IsBsp) {
1716 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1717 }
1718 } else {
1719 //
1720 // Platform hook fails to determine, use default BSP election method
1721 //
1722 InterlockedCompareExchange32 (
1723 (UINT32 *)&mSmmMpSyncData->BspIndex,
1724 (UINT32)-1,
1725 (UINT32)CpuIndex
1726 );
1727 }
1728 }
1729 }
1730
1731 //
1732 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1733 //
1734 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1735 //
1736 // Clear last request for SwitchBsp.
1737 //
1738 if (mSmmMpSyncData->SwitchBsp) {
1739 mSmmMpSyncData->SwitchBsp = FALSE;
1740 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1741 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1742 }
1743 }
1744
1745 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1746 SmmProfileRecordSmiNum ();
1747 }
1748
1749 //
1750 // BSP Handler is always called with a ValidSmi == TRUE
1751 //
1752 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1753 } else {
1754 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1755 }
1756 }
1757
1758 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1759
1760 //
1761 // Wait for BSP's signal to exit SMI
1762 //
1763 while (*mSmmMpSyncData->AllCpusInSync) {
1764 CpuPause ();
1765 }
1766 }
1767
1768 Exit:
1769 SmmCpuFeaturesRendezvousExit (CpuIndex);
1770
1771 //
1772 // Restore Cr2
1773 //
1774 RestoreCr2 (Cr2);
1775 }
1776
1777 /**
1778 Allocate buffer for SpinLock and Wrapper function buffer.
1779
1780 **/
1781 VOID
1782 InitializeDataForMmMp (
1783 VOID
1784 )
1785 {
1786 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1787 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1788
1789 InitializeListHead (&gSmmCpuPrivate->TokenList);
1790
1791 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1792 }
1793
1794 /**
1795 Allocate buffer for all semaphores and spin locks.
1796
1797 **/
1798 VOID
1799 InitializeSmmCpuSemaphores (
1800 VOID
1801 )
1802 {
1803 UINTN ProcessorCount;
1804 UINTN TotalSize;
1805 UINTN GlobalSemaphoresSize;
1806 UINTN CpuSemaphoresSize;
1807 UINTN SemaphoreSize;
1808 UINTN Pages;
1809 UINTN *SemaphoreBlock;
1810 UINTN SemaphoreAddr;
1811
1812 SemaphoreSize = GetSpinLockProperties ();
1813 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1814 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1815 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1816 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1817 DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1818 DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1819 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1820 SemaphoreBlock = AllocatePages (Pages);
1821 ASSERT (SemaphoreBlock != NULL);
1822 ZeroMem (SemaphoreBlock, TotalSize);
1823
1824 SemaphoreAddr = (UINTN)SemaphoreBlock;
1825 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1826 SemaphoreAddr += SemaphoreSize;
1827 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1828 SemaphoreAddr += SemaphoreSize;
1829 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1830 SemaphoreAddr += SemaphoreSize;
1831 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1832 SemaphoreAddr += SemaphoreSize;
1833 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1834 = (SPIN_LOCK *)SemaphoreAddr;
1835 SemaphoreAddr += SemaphoreSize;
1836
1837 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1838 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1839 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1840 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1841 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1842 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1843
1844 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1845 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1846
1847 mSemaphoreSize = SemaphoreSize;
1848 }
1849
1850 /**
1851 Initialize un-cacheable data.
1852
1853 **/
1854 VOID
1855 EFIAPI
1856 InitializeMpSyncData (
1857 VOID
1858 )
1859 {
1860 UINTN CpuIndex;
1861
1862 if (mSmmMpSyncData != NULL) {
1863 //
1864 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1865 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1866 //
1867 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1868 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1869 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1870 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1871 //
1872 // Enable BSP election by setting BspIndex to -1
1873 //
1874 mSmmMpSyncData->BspIndex = (UINT32)-1;
1875 }
1876
1877 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1878
1879 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1880 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1881 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1882 ASSERT (
1883 mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1884 mSmmMpSyncData->AllCpusInSync != NULL
1885 );
1886 *mSmmMpSyncData->Counter = 0;
1887 *mSmmMpSyncData->InsideSmm = FALSE;
1888 *mSmmMpSyncData->AllCpusInSync = FALSE;
1889
1890 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {
1891 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1892 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1893 mSmmMpSyncData->CpuData[CpuIndex].Run =
1894 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1895 mSmmMpSyncData->CpuData[CpuIndex].Present =
1896 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1897 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1898 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1899 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1900 }
1901 }
1902 }
1903
1904 /**
1905 Initialize global data for MP synchronization.
1906
1907 @param Stacks Base address of SMI stack buffer for all processors.
1908 @param StackSize Stack size for each processor in SMM.
1909 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1910
1911 **/
1912 UINT32
1913 InitializeMpServiceData (
1914 IN VOID *Stacks,
1915 IN UINTN StackSize,
1916 IN UINTN ShadowStackSize
1917 )
1918 {
1919 UINT32 Cr3;
1920 UINTN Index;
1921 UINT8 *GdtTssTables;
1922 UINTN GdtTableStepSize;
1923 CPUID_VERSION_INFO_EDX RegEdx;
1924 UINT32 MaxExtendedFunction;
1925 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
1926
1927 //
1928 // Determine if this CPU supports machine check
1929 //
1930 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1931 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1932
1933 //
1934 // Allocate memory for all locks and semaphores
1935 //
1936 InitializeSmmCpuSemaphores ();
1937
1938 //
1939 // Initialize mSmmMpSyncData
1940 //
1941 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1942 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1943 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1944 ASSERT (mSmmMpSyncData != NULL);
1945 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1946 InitializeMpSyncData ();
1947
1948 //
1949 // Initialize physical address mask
1950 // NOTE: Physical memory above virtual address limit is not supported !!!
1951 //
1952 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);
1953 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {
1954 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
1955 } else {
1956 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
1957 }
1958
1959 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;
1960 //
1961 // Clear the low 12 bits
1962 //
1963 gPhyMask &= 0xfffffffffffff000ULL;
1964
1965 //
1966 // Create page tables
1967 //
1968 Cr3 = SmmInitPageTable ();
1969
1970 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1971
1972 //
1973 // Install SMI handler for each CPU
1974 //
1975 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1976 InstallSmiHandler (
1977 Index,
1978 (UINT32)mCpuHotPlugData.SmBase[Index],
1979 (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1980 StackSize,
1981 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1982 gcSmiGdtr.Limit + 1,
1983 gcSmiIdtr.Base,
1984 gcSmiIdtr.Limit + 1,
1985 Cr3
1986 );
1987 }
1988
1989 //
1990 // Record current MTRR settings
1991 //
1992 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1993 MtrrGetAllMtrrs (&gSmiMtrrs);
1994
1995 return Cr3;
1996 }
1997
1998 /**
1999
2000 Register the SMM Foundation entry point.
2001
2002 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
2003 @param SmmEntryPoint SMM Foundation EntryPoint
2004
2005 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
2006
2007 **/
2008 EFI_STATUS
2009 EFIAPI
2010 RegisterSmmEntry (
2011 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
2012 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2013 )
2014 {
2015 //
2016 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2017 //
2018 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
2019 return EFI_SUCCESS;
2020 }
2021
2022 /**
2023
2024 Register the SMM Foundation entry point.
2025
2026 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2027 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2028 with the related definitions of
2029 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2030 If caller may pass a value of NULL to deregister any existing
2031 startup procedure.
2032 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2033 run by the AP. It is an optional common mailbox between APs and
2034 the caller to share information
2035
2036 @retval EFI_SUCCESS The Procedure has been set successfully.
2037 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2038
2039 **/
2040 EFI_STATUS
2041 RegisterStartupProcedure (
2042 IN EFI_AP_PROCEDURE Procedure,
2043 IN OUT VOID *ProcedureArguments OPTIONAL
2044 )
2045 {
2046 if ((Procedure == NULL) && (ProcedureArguments != NULL)) {
2047 return EFI_INVALID_PARAMETER;
2048 }
2049
2050 if (mSmmMpSyncData == NULL) {
2051 return EFI_NOT_READY;
2052 }
2053
2054 mSmmMpSyncData->StartupProcedure = Procedure;
2055 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2056
2057 return EFI_SUCCESS;
2058 }