]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
89143810b642294110d7ba00873509a2d1ef04ae
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25 MM_COMPLETION mSmmStartupThisApToken;
26
27 extern UINTN mSmmShadowStackSize;
28
29 /**
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
32 MP safe mechanisms.
33
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
37
38 **/
39 UINT32
40 WaitForSemaphore (
41 IN OUT volatile UINT32 *Sem
42 )
43 {
44 UINT32 Value;
45
46 for (;;) {
47 Value = *Sem;
48 if (Value != 0 &&
49 InterlockedCompareExchange32 (
50 (UINT32*)Sem,
51 Value,
52 Value - 1
53 ) == Value) {
54 break;
55 }
56 CpuPause ();
57 }
58 return Value - 1;
59 }
60
61
62 /**
63 Performs an atomic compare exchange operation to release semaphore.
64 The compare exchange operation must be performed using
65 MP safe mechanisms.
66
67 @param Sem IN: 32-bit unsigned integer
68 OUT: original integer + 1
69 @return Original integer + 1
70
71 **/
72 UINT32
73 ReleaseSemaphore (
74 IN OUT volatile UINT32 *Sem
75 )
76 {
77 UINT32 Value;
78
79 do {
80 Value = *Sem;
81 } while (Value + 1 != 0 &&
82 InterlockedCompareExchange32 (
83 (UINT32*)Sem,
84 Value,
85 Value + 1
86 ) != Value);
87 return Value + 1;
88 }
89
90 /**
91 Performs an atomic compare exchange operation to lock semaphore.
92 The compare exchange operation must be performed using
93 MP safe mechanisms.
94
95 @param Sem IN: 32-bit unsigned integer
96 OUT: -1
97 @return Original integer
98
99 **/
100 UINT32
101 LockdownSemaphore (
102 IN OUT volatile UINT32 *Sem
103 )
104 {
105 UINT32 Value;
106
107 do {
108 Value = *Sem;
109 } while (InterlockedCompareExchange32 (
110 (UINT32*)Sem,
111 Value, (UINT32)-1
112 ) != Value);
113 return Value;
114 }
115
116 /**
117 Wait all APs to performs an atomic compare exchange operation to release semaphore.
118
119 @param NumberOfAPs AP number
120
121 **/
122 VOID
123 WaitForAllAPs (
124 IN UINTN NumberOfAPs
125 )
126 {
127 UINTN BspIndex;
128
129 BspIndex = mSmmMpSyncData->BspIndex;
130 while (NumberOfAPs-- > 0) {
131 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
132 }
133 }
134
135 /**
136 Performs an atomic compare exchange operation to release semaphore
137 for each AP.
138
139 **/
140 VOID
141 ReleaseAllAPs (
142 VOID
143 )
144 {
145 UINTN Index;
146
147 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
148 if (IsPresentAp (Index)) {
149 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
150 }
151 }
152 }
153
154 /**
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
156
157 @param Exceptions CPU Arrival exception flags.
158
159 @retval TRUE if all CPUs the have checked in.
160 @retval FALSE if at least one Normal AP hasn't checked in.
161
162 **/
163 BOOLEAN
164 AllCpusInSmmWithExceptions (
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
166 )
167 {
168 UINTN Index;
169 SMM_CPU_DATA_BLOCK *CpuData;
170 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
171
172 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
173
174 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
175 return TRUE;
176 }
177
178 CpuData = mSmmMpSyncData->CpuData;
179 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
180 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
181 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
182 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
183 continue;
184 }
185 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
186 continue;
187 }
188 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
189 continue;
190 }
191 return FALSE;
192 }
193 }
194
195
196 return TRUE;
197 }
198
199 /**
200 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
201
202 @retval TRUE Os enable lmce.
203 @retval FALSE Os not enable lmce.
204
205 **/
206 BOOLEAN
207 IsLmceOsEnabled (
208 VOID
209 )
210 {
211 MSR_IA32_MCG_CAP_REGISTER McgCap;
212 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
213 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
214
215 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
216 if (McgCap.Bits.MCG_LMCE_P == 0) {
217 return FALSE;
218 }
219
220 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
221 if (FeatureCtrl.Bits.LmceOn == 0) {
222 return FALSE;
223 }
224
225 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
226 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
227 }
228
229 /**
230 Return if Local machine check exception signaled.
231
232 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
233 delivered to only the logical processor.
234
235 @retval TRUE LMCE was signaled.
236 @retval FALSE LMCE was not signaled.
237
238 **/
239 BOOLEAN
240 IsLmceSignaled (
241 VOID
242 )
243 {
244 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
245
246 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
247 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
248 }
249
250 /**
251 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
252 entering SMM, except SMI disabled APs.
253
254 **/
255 VOID
256 SmmWaitForApArrival (
257 VOID
258 )
259 {
260 UINT64 Timer;
261 UINTN Index;
262 BOOLEAN LmceEn;
263 BOOLEAN LmceSignal;
264
265 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
266
267 LmceEn = FALSE;
268 LmceSignal = FALSE;
269 if (mMachineCheckSupported) {
270 LmceEn = IsLmceOsEnabled ();
271 LmceSignal = IsLmceSignaled();
272 }
273
274 //
275 // Platform implementor should choose a timeout value appropriately:
276 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
277 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
278 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
279 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
280 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
281 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
282 // - The timeout value must be longer than longest possible IO operation in the system
283 //
284
285 //
286 // Sync with APs 1st timeout
287 //
288 for (Timer = StartSyncTimer ();
289 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
290 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
291 ) {
292 CpuPause ();
293 }
294
295 //
296 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
297 // because:
298 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
299 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
300 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
301 // work while SMI handling is on-going.
302 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
303 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
304 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
305 // mode work while SMI handling is on-going.
306 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
307 // - In traditional flow, SMI disabling is discouraged.
308 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
309 // In both cases, adding SMI-disabling checking code increases overhead.
310 //
311 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
312 //
313 // Send SMI IPIs to bring outside processors in
314 //
315 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
316 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
317 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
318 }
319 }
320
321 //
322 // Sync with APs 2nd timeout.
323 //
324 for (Timer = StartSyncTimer ();
325 !IsSyncTimerTimeout (Timer) &&
326 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
327 ) {
328 CpuPause ();
329 }
330 }
331
332 return;
333 }
334
335
336 /**
337 Replace OS MTRR's with SMI MTRR's.
338
339 @param CpuIndex Processor Index
340
341 **/
342 VOID
343 ReplaceOSMtrrs (
344 IN UINTN CpuIndex
345 )
346 {
347 SmmCpuFeaturesDisableSmrr ();
348
349 //
350 // Replace all MTRRs registers
351 //
352 MtrrSetAllMtrrs (&gSmiMtrrs);
353 }
354
355 /**
356 Wheck whether task has been finished by all APs.
357
358 @param BlockMode Whether did it in block mode or non-block mode.
359
360 @retval TRUE Task has been finished by all APs.
361 @retval FALSE Task not has been finished by all APs.
362
363 **/
364 BOOLEAN
365 WaitForAllAPsNotBusy (
366 IN BOOLEAN BlockMode
367 )
368 {
369 UINTN Index;
370
371 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
372 //
373 // Ignore BSP and APs which not call in SMM.
374 //
375 if (!IsPresentAp(Index)) {
376 continue;
377 }
378
379 if (BlockMode) {
380 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
381 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
382 } else {
383 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
384 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
385 } else {
386 return FALSE;
387 }
388 }
389 }
390
391 return TRUE;
392 }
393
394 /**
395 Check whether it is an present AP.
396
397 @param CpuIndex The AP index which calls this function.
398
399 @retval TRUE It's a present AP.
400 @retval TRUE This is not an AP or it is not present.
401
402 **/
403 BOOLEAN
404 IsPresentAp (
405 IN UINTN CpuIndex
406 )
407 {
408 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
409 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
410 }
411
412 /**
413 Clean up the status flags used during executing the procedure.
414
415 @param CpuIndex The AP index which calls this function.
416
417 **/
418 VOID
419 ReleaseToken (
420 IN UINTN CpuIndex
421 )
422 {
423 PROCEDURE_TOKEN *Token;
424
425 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
426
427 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
428 ReleaseSpinLock (Token->SpinLock);
429 }
430
431 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
432 }
433
434 /**
435 Free the tokens in the maintained list.
436
437 **/
438 VOID
439 ResetTokens (
440 VOID
441 )
442 {
443 //
444 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
445 //
446 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
447 }
448
449 /**
450 SMI handler for BSP.
451
452 @param CpuIndex BSP processor Index
453 @param SyncMode SMM MP sync mode
454
455 **/
456 VOID
457 BSPHandler (
458 IN UINTN CpuIndex,
459 IN SMM_CPU_SYNC_MODE SyncMode
460 )
461 {
462 UINTN Index;
463 MTRR_SETTINGS Mtrrs;
464 UINTN ApCount;
465 BOOLEAN ClearTopLevelSmiResult;
466 UINTN PresentCount;
467
468 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
469 ApCount = 0;
470
471 //
472 // Flag BSP's presence
473 //
474 *mSmmMpSyncData->InsideSmm = TRUE;
475
476 //
477 // Initialize Debug Agent to start source level debug in BSP handler
478 //
479 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
480
481 //
482 // Mark this processor's presence
483 //
484 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
485
486 //
487 // Clear platform top level SMI status bit before calling SMI handlers. If
488 // we cleared it after SMI handlers are run, we would miss the SMI that
489 // occurs after SMI handlers are done and before SMI status bit is cleared.
490 //
491 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
492 ASSERT (ClearTopLevelSmiResult == TRUE);
493
494 //
495 // Set running processor index
496 //
497 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
498
499 //
500 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
501 //
502 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
503
504 //
505 // Wait for APs to arrive
506 //
507 SmmWaitForApArrival();
508
509 //
510 // Lock the counter down and retrieve the number of APs
511 //
512 *mSmmMpSyncData->AllCpusInSync = TRUE;
513 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
514
515 //
516 // Wait for all APs to get ready for programming MTRRs
517 //
518 WaitForAllAPs (ApCount);
519
520 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
521 //
522 // Signal all APs it's time for backup MTRRs
523 //
524 ReleaseAllAPs ();
525
526 //
527 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
528 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
529 // to a large enough value to avoid this situation.
530 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
531 // We do the backup first and then set MTRR to avoid race condition for threads
532 // in the same core.
533 //
534 MtrrGetAllMtrrs(&Mtrrs);
535
536 //
537 // Wait for all APs to complete their MTRR saving
538 //
539 WaitForAllAPs (ApCount);
540
541 //
542 // Let all processors program SMM MTRRs together
543 //
544 ReleaseAllAPs ();
545
546 //
547 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
548 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
549 // to a large enough value to avoid this situation.
550 //
551 ReplaceOSMtrrs (CpuIndex);
552
553 //
554 // Wait for all APs to complete their MTRR programming
555 //
556 WaitForAllAPs (ApCount);
557 }
558 }
559
560 //
561 // The BUSY lock is initialized to Acquired state
562 //
563 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
564
565 //
566 // Perform the pre tasks
567 //
568 PerformPreTasks ();
569
570 //
571 // Invoke SMM Foundation EntryPoint with the processor information context.
572 //
573 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
574
575 //
576 // Make sure all APs have completed their pending none-block tasks
577 //
578 WaitForAllAPsNotBusy (TRUE);
579
580 //
581 // Perform the remaining tasks
582 //
583 PerformRemainingTasks ();
584
585 //
586 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
587 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
588 // will run through freely.
589 //
590 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
591
592 //
593 // Lock the counter down and retrieve the number of APs
594 //
595 *mSmmMpSyncData->AllCpusInSync = TRUE;
596 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
597 //
598 // Make sure all APs have their Present flag set
599 //
600 while (TRUE) {
601 PresentCount = 0;
602 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
603 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
604 PresentCount ++;
605 }
606 }
607 if (PresentCount > ApCount) {
608 break;
609 }
610 }
611 }
612
613 //
614 // Notify all APs to exit
615 //
616 *mSmmMpSyncData->InsideSmm = FALSE;
617 ReleaseAllAPs ();
618
619 //
620 // Wait for all APs to complete their pending tasks
621 //
622 WaitForAllAPs (ApCount);
623
624 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
625 //
626 // Signal APs to restore MTRRs
627 //
628 ReleaseAllAPs ();
629
630 //
631 // Restore OS MTRRs
632 //
633 SmmCpuFeaturesReenableSmrr ();
634 MtrrSetAllMtrrs(&Mtrrs);
635
636 //
637 // Wait for all APs to complete MTRR programming
638 //
639 WaitForAllAPs (ApCount);
640 }
641
642 //
643 // Stop source level debug in BSP handler, the code below will not be
644 // debugged.
645 //
646 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
647
648 //
649 // Signal APs to Reset states/semaphore for this processor
650 //
651 ReleaseAllAPs ();
652
653 //
654 // Perform pending operations for hot-plug
655 //
656 SmmCpuUpdate ();
657
658 //
659 // Clear the Present flag of BSP
660 //
661 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
662
663 //
664 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
665 // WaitForAllAps does not depend on the Present flag.
666 //
667 WaitForAllAPs (ApCount);
668
669 //
670 // Reset the tokens buffer.
671 //
672 ResetTokens ();
673
674 //
675 // Reset BspIndex to -1, meaning BSP has not been elected.
676 //
677 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
678 mSmmMpSyncData->BspIndex = (UINT32)-1;
679 }
680
681 //
682 // Allow APs to check in from this point on
683 //
684 *mSmmMpSyncData->Counter = 0;
685 *mSmmMpSyncData->AllCpusInSync = FALSE;
686 }
687
688 /**
689 SMI handler for AP.
690
691 @param CpuIndex AP processor Index.
692 @param ValidSmi Indicates that current SMI is a valid SMI or not.
693 @param SyncMode SMM MP sync mode.
694
695 **/
696 VOID
697 APHandler (
698 IN UINTN CpuIndex,
699 IN BOOLEAN ValidSmi,
700 IN SMM_CPU_SYNC_MODE SyncMode
701 )
702 {
703 UINT64 Timer;
704 UINTN BspIndex;
705 MTRR_SETTINGS Mtrrs;
706 EFI_STATUS ProcedureStatus;
707
708 //
709 // Timeout BSP
710 //
711 for (Timer = StartSyncTimer ();
712 !IsSyncTimerTimeout (Timer) &&
713 !(*mSmmMpSyncData->InsideSmm);
714 ) {
715 CpuPause ();
716 }
717
718 if (!(*mSmmMpSyncData->InsideSmm)) {
719 //
720 // BSP timeout in the first round
721 //
722 if (mSmmMpSyncData->BspIndex != -1) {
723 //
724 // BSP Index is known
725 //
726 BspIndex = mSmmMpSyncData->BspIndex;
727 ASSERT (CpuIndex != BspIndex);
728
729 //
730 // Send SMI IPI to bring BSP in
731 //
732 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
733
734 //
735 // Now clock BSP for the 2nd time
736 //
737 for (Timer = StartSyncTimer ();
738 !IsSyncTimerTimeout (Timer) &&
739 !(*mSmmMpSyncData->InsideSmm);
740 ) {
741 CpuPause ();
742 }
743
744 if (!(*mSmmMpSyncData->InsideSmm)) {
745 //
746 // Give up since BSP is unable to enter SMM
747 // and signal the completion of this AP
748 WaitForSemaphore (mSmmMpSyncData->Counter);
749 return;
750 }
751 } else {
752 //
753 // Don't know BSP index. Give up without sending IPI to BSP.
754 //
755 WaitForSemaphore (mSmmMpSyncData->Counter);
756 return;
757 }
758 }
759
760 //
761 // BSP is available
762 //
763 BspIndex = mSmmMpSyncData->BspIndex;
764 ASSERT (CpuIndex != BspIndex);
765
766 //
767 // Mark this processor's presence
768 //
769 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
770
771 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
772 //
773 // Notify BSP of arrival at this point
774 //
775 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
776 }
777
778 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
779 //
780 // Wait for the signal from BSP to backup MTRRs
781 //
782 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
783
784 //
785 // Backup OS MTRRs
786 //
787 MtrrGetAllMtrrs(&Mtrrs);
788
789 //
790 // Signal BSP the completion of this AP
791 //
792 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
793
794 //
795 // Wait for BSP's signal to program MTRRs
796 //
797 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
798
799 //
800 // Replace OS MTRRs with SMI MTRRs
801 //
802 ReplaceOSMtrrs (CpuIndex);
803
804 //
805 // Signal BSP the completion of this AP
806 //
807 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
808 }
809
810 while (TRUE) {
811 //
812 // Wait for something to happen
813 //
814 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
815
816 //
817 // Check if BSP wants to exit SMM
818 //
819 if (!(*mSmmMpSyncData->InsideSmm)) {
820 break;
821 }
822
823 //
824 // BUSY should be acquired by SmmStartupThisAp()
825 //
826 ASSERT (
827 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
828 );
829
830 //
831 // Invoke the scheduled procedure
832 //
833 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
834 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
835 );
836 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
837 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
838 }
839
840 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
841 ReleaseToken (CpuIndex);
842 }
843
844 //
845 // Release BUSY
846 //
847 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
848 }
849
850 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
851 //
852 // Notify BSP the readiness of this AP to program MTRRs
853 //
854 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
855
856 //
857 // Wait for the signal from BSP to program MTRRs
858 //
859 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
860
861 //
862 // Restore OS MTRRs
863 //
864 SmmCpuFeaturesReenableSmrr ();
865 MtrrSetAllMtrrs(&Mtrrs);
866 }
867
868 //
869 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
870 //
871 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
872
873 //
874 // Wait for the signal from BSP to Reset states/semaphore for this processor
875 //
876 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
877
878 //
879 // Reset states/semaphore for this processor
880 //
881 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
882
883 //
884 // Notify BSP the readiness of this AP to exit SMM
885 //
886 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
887
888 }
889
890 /**
891 Create 4G PageTable in SMRAM.
892
893 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
894 @return PageTable Address
895
896 **/
897 UINT32
898 Gen4GPageTable (
899 IN BOOLEAN Is32BitPageTable
900 )
901 {
902 VOID *PageTable;
903 UINTN Index;
904 UINT64 *Pte;
905 UINTN PagesNeeded;
906 UINTN Low2MBoundary;
907 UINTN High2MBoundary;
908 UINTN Pages;
909 UINTN GuardPage;
910 UINT64 *Pdpte;
911 UINTN PageIndex;
912 UINTN PageAddress;
913
914 Low2MBoundary = 0;
915 High2MBoundary = 0;
916 PagesNeeded = 0;
917 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
918 //
919 // Add one more page for known good stack, then find the lower 2MB aligned address.
920 //
921 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
922 //
923 // Add two more pages for known good stack and stack guard page,
924 // then find the lower 2MB aligned address.
925 //
926 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
927 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
928 }
929 //
930 // Allocate the page table
931 //
932 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
933 ASSERT (PageTable != NULL);
934
935 PageTable = (VOID *)((UINTN)PageTable);
936 Pte = (UINT64*)PageTable;
937
938 //
939 // Zero out all page table entries first
940 //
941 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
942
943 //
944 // Set Page Directory Pointers
945 //
946 for (Index = 0; Index < 4; Index++) {
947 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
948 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
949 }
950 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
951
952 //
953 // Fill in Page Directory Entries
954 //
955 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
956 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
957 }
958
959 Pdpte = (UINT64*)PageTable;
960 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
961 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
962 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
963 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
964 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
965 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
966 //
967 // Fill in Page Table Entries
968 //
969 Pte = (UINT64*)Pages;
970 PageAddress = PageIndex;
971 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
972 if (PageAddress == GuardPage) {
973 //
974 // Mark the guard page as non-present
975 //
976 Pte[Index] = PageAddress | mAddressEncMask;
977 GuardPage += (mSmmStackSize + mSmmShadowStackSize);
978 if (GuardPage > mSmmStackArrayEnd) {
979 GuardPage = 0;
980 }
981 } else {
982 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
983 }
984 PageAddress+= EFI_PAGE_SIZE;
985 }
986 Pages += EFI_PAGE_SIZE;
987 }
988 }
989
990 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
991 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
992 if ((Pte[0] & IA32_PG_PS) == 0) {
993 // 4K-page entries are already mapped. Just hide the first one anyway.
994 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
995 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
996 } else {
997 // Create 4K-page entries
998 Pages = (UINTN)AllocatePageTableMemory (1);
999 ASSERT (Pages != 0);
1000
1001 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1002
1003 Pte = (UINT64*)Pages;
1004 PageAddress = 0;
1005 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1006 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1007 PageAddress += EFI_PAGE_SIZE;
1008 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1009 }
1010 }
1011 }
1012
1013 return (UINT32)(UINTN)PageTable;
1014 }
1015
1016 /**
1017 Checks whether the input token is the current used token.
1018
1019 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1020 BroadcastProcedure.
1021
1022 @retval TRUE The input token is the current used token.
1023 @retval FALSE The input token is not the current used token.
1024 **/
1025 BOOLEAN
1026 IsTokenInUse (
1027 IN SPIN_LOCK *Token
1028 )
1029 {
1030 LIST_ENTRY *Link;
1031 PROCEDURE_TOKEN *ProcToken;
1032
1033 if (Token == NULL) {
1034 return FALSE;
1035 }
1036
1037 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1038 //
1039 // Only search used tokens.
1040 //
1041 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1042 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1043
1044 if (ProcToken->SpinLock == Token) {
1045 return TRUE;
1046 }
1047
1048 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1049 }
1050
1051 return FALSE;
1052 }
1053
1054 /**
1055 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1056
1057 @return First token of the token buffer.
1058 **/
1059 LIST_ENTRY *
1060 AllocateTokenBuffer (
1061 VOID
1062 )
1063 {
1064 UINTN SpinLockSize;
1065 UINT32 TokenCountPerChunk;
1066 UINTN Index;
1067 SPIN_LOCK *SpinLock;
1068 UINT8 *SpinLockBuffer;
1069 PROCEDURE_TOKEN *ProcTokens;
1070
1071 SpinLockSize = GetSpinLockProperties ();
1072
1073 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1074 ASSERT (TokenCountPerChunk != 0);
1075 if (TokenCountPerChunk == 0) {
1076 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1077 CpuDeadLoop ();
1078 }
1079 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1080
1081 //
1082 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1083 //
1084 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1085 ASSERT (SpinLockBuffer != NULL);
1086
1087 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1088 ASSERT (ProcTokens != NULL);
1089
1090 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1091 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1092 InitializeSpinLock (SpinLock);
1093
1094 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1095 ProcTokens[Index].SpinLock = SpinLock;
1096 ProcTokens[Index].RunningApCount = 0;
1097
1098 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1099 }
1100
1101 return &ProcTokens[0].Link;
1102 }
1103
1104 /**
1105 Get the free token.
1106
1107 If no free token, allocate new tokens then return the free one.
1108
1109 @param RunningApsCount The Running Aps count for this token.
1110
1111 @retval return the first free PROCEDURE_TOKEN.
1112
1113 **/
1114 PROCEDURE_TOKEN *
1115 GetFreeToken (
1116 IN UINT32 RunningApsCount
1117 )
1118 {
1119 PROCEDURE_TOKEN *NewToken;
1120
1121 //
1122 // If FirstFreeToken meets the end of token list, enlarge the token list.
1123 // Set FirstFreeToken to the first free token.
1124 //
1125 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1126 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1127 }
1128 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1129 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1130
1131 NewToken->RunningApCount = RunningApsCount;
1132 AcquireSpinLock (NewToken->SpinLock);
1133
1134 return NewToken;
1135 }
1136
1137 /**
1138 Checks status of specified AP.
1139
1140 This function checks whether the specified AP has finished the task assigned
1141 by StartupThisAP(), and whether timeout expires.
1142
1143 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1144 BroadcastProcedure.
1145
1146 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1147 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1148 **/
1149 EFI_STATUS
1150 IsApReady (
1151 IN SPIN_LOCK *Token
1152 )
1153 {
1154 if (AcquireSpinLockOrFail (Token)) {
1155 ReleaseSpinLock (Token);
1156 return EFI_SUCCESS;
1157 }
1158
1159 return EFI_NOT_READY;
1160 }
1161
1162 /**
1163 Schedule a procedure to run on the specified CPU.
1164
1165 @param[in] Procedure The address of the procedure to run
1166 @param[in] CpuIndex Target CPU Index
1167 @param[in,out] ProcArguments The parameter to pass to the procedure
1168 @param[in] Token This is an optional parameter that allows the caller to execute the
1169 procedure in a blocking or non-blocking fashion. If it is NULL the
1170 call is blocking, and the call will not return until the AP has
1171 completed the procedure. If the token is not NULL, the call will
1172 return immediately. The caller can check whether the procedure has
1173 completed with CheckOnProcedure or WaitForProcedure.
1174 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1175 execution of Procedure, either for blocking or non-blocking mode.
1176 Zero means infinity. If the timeout expires before all APs return
1177 from Procedure, then Procedure on the failed APs is terminated. If
1178 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1179 If the timeout expires in non-blocking mode, the timeout determined
1180 can be through CheckOnProcedure or WaitForProcedure.
1181 Note that timeout support is optional. Whether an implementation
1182 supports this feature can be determined via the Attributes data
1183 member.
1184 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1185 by Procedure when it completes execution on the target AP, or with
1186 EFI_TIMEOUT if the Procedure fails to complete within the optional
1187 timeout. The implementation will update this variable with
1188 EFI_NOT_READY prior to starting Procedure on the target AP.
1189
1190 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1191 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1192 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1193 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1194 @retval EFI_SUCCESS The procedure has been successfully scheduled
1195
1196 **/
1197 EFI_STATUS
1198 InternalSmmStartupThisAp (
1199 IN EFI_AP_PROCEDURE2 Procedure,
1200 IN UINTN CpuIndex,
1201 IN OUT VOID *ProcArguments OPTIONAL,
1202 IN MM_COMPLETION *Token,
1203 IN UINTN TimeoutInMicroseconds,
1204 IN OUT EFI_STATUS *CpuStatus
1205 )
1206 {
1207 PROCEDURE_TOKEN *ProcToken;
1208
1209 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1210 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1211 return EFI_INVALID_PARAMETER;
1212 }
1213 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1214 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1215 return EFI_INVALID_PARAMETER;
1216 }
1217 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1218 return EFI_INVALID_PARAMETER;
1219 }
1220 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1221 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1222 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1223 }
1224 return EFI_INVALID_PARAMETER;
1225 }
1226 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1227 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1228 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1229 }
1230 return EFI_INVALID_PARAMETER;
1231 }
1232 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1233 return EFI_INVALID_PARAMETER;
1234 }
1235 if (Procedure == NULL) {
1236 return EFI_INVALID_PARAMETER;
1237 }
1238
1239 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1240
1241 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1242 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1243 if (Token != NULL) {
1244 if (Token != &mSmmStartupThisApToken) {
1245 //
1246 // When Token points to mSmmStartupThisApToken, this routine is called
1247 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1248 //
1249 // In this case, caller wants to startup AP procedure in non-blocking
1250 // mode and cannot get the completion status from the Token because there
1251 // is no way to return the Token to caller from SmmStartupThisAp().
1252 // Caller needs to use its implementation specific way to query the completion status.
1253 //
1254 // There is no need to allocate a token for such case so the 3 overheads
1255 // can be avoided:
1256 // 1. Call AllocateTokenBuffer() when there is no free token.
1257 // 2. Get a free token from the token buffer.
1258 // 3. Call ReleaseToken() in APHandler().
1259 //
1260 ProcToken = GetFreeToken (1);
1261 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1262 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1263 }
1264 }
1265 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1266 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1267 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1268 }
1269
1270 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1271
1272 if (Token == NULL) {
1273 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1274 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1275 }
1276
1277 return EFI_SUCCESS;
1278 }
1279
1280 /**
1281 Worker function to execute a caller provided function on all enabled APs.
1282
1283 @param[in] Procedure A pointer to the function to be run on
1284 enabled APs of the system.
1285 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1286 APs to return from Procedure, either for
1287 blocking or non-blocking mode.
1288 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1289 all APs.
1290 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1291 procedure in a blocking or non-blocking fashion. If it is NULL the
1292 call is blocking, and the call will not return until the AP has
1293 completed the procedure. If the token is not NULL, the call will
1294 return immediately. The caller can check whether the procedure has
1295 completed with CheckOnProcedure or WaitForProcedure.
1296 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1297 by Procedure when it completes execution on the target AP, or with
1298 EFI_TIMEOUT if the Procedure fails to complete within the optional
1299 timeout. The implementation will update this variable with
1300 EFI_NOT_READY prior to starting Procedure on the target AP.
1301
1302
1303 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1304 the timeout expired.
1305 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1306 to all enabled APs.
1307 @retval others Failed to Startup all APs.
1308
1309 **/
1310 EFI_STATUS
1311 InternalSmmStartupAllAPs (
1312 IN EFI_AP_PROCEDURE2 Procedure,
1313 IN UINTN TimeoutInMicroseconds,
1314 IN OUT VOID *ProcedureArguments OPTIONAL,
1315 IN OUT MM_COMPLETION *Token,
1316 IN OUT EFI_STATUS *CPUStatus
1317 )
1318 {
1319 UINTN Index;
1320 UINTN CpuCount;
1321 PROCEDURE_TOKEN *ProcToken;
1322
1323 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1324 return EFI_INVALID_PARAMETER;
1325 }
1326 if (Procedure == NULL) {
1327 return EFI_INVALID_PARAMETER;
1328 }
1329
1330 CpuCount = 0;
1331 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1332 if (IsPresentAp (Index)) {
1333 CpuCount ++;
1334
1335 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1336 return EFI_INVALID_PARAMETER;
1337 }
1338
1339 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1340 return EFI_NOT_READY;
1341 }
1342 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1343 }
1344 }
1345 if (CpuCount == 0) {
1346 return EFI_NOT_STARTED;
1347 }
1348
1349 if (Token != NULL) {
1350 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1351 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1352 } else {
1353 ProcToken = NULL;
1354 }
1355
1356 //
1357 // Make sure all BUSY should be acquired.
1358 //
1359 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1360 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1361 // block mode.
1362 //
1363 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1364 if (IsPresentAp (Index)) {
1365 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1366 }
1367 }
1368
1369 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1370 if (IsPresentAp (Index)) {
1371 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1372 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1373 if (ProcToken != NULL) {
1374 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1375 }
1376 if (CPUStatus != NULL) {
1377 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1378 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1379 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1380 }
1381 }
1382 } else {
1383 //
1384 // PI spec requirement:
1385 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1386 //
1387 if (CPUStatus != NULL) {
1388 CPUStatus[Index] = EFI_NOT_STARTED;
1389 }
1390
1391 //
1392 // Decrease the count to mark this processor(AP or BSP) as finished.
1393 //
1394 if (ProcToken != NULL) {
1395 WaitForSemaphore (&ProcToken->RunningApCount);
1396 }
1397 }
1398 }
1399
1400 ReleaseAllAPs ();
1401
1402 if (Token == NULL) {
1403 //
1404 // Make sure all APs have completed their tasks.
1405 //
1406 WaitForAllAPsNotBusy (TRUE);
1407 }
1408
1409 return EFI_SUCCESS;
1410 }
1411
1412 /**
1413 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1414 If the function is defined with a type that is not compatible with
1415 the type (of the expression) pointed to by the expression that
1416 denotes the called function, the behavior is undefined.
1417
1418 So add below wrapper function to convert between EFI_AP_PROCEDURE
1419 and EFI_AP_PROCEDURE2.
1420
1421 Wrapper for Procedures.
1422
1423 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1424
1425 **/
1426 EFI_STATUS
1427 EFIAPI
1428 ProcedureWrapper (
1429 IN VOID *Buffer
1430 )
1431 {
1432 PROCEDURE_WRAPPER *Wrapper;
1433
1434 Wrapper = Buffer;
1435 Wrapper->Procedure (Wrapper->ProcedureArgument);
1436
1437 return EFI_SUCCESS;
1438 }
1439
1440 /**
1441 Schedule a procedure to run on the specified CPU in blocking mode.
1442
1443 @param[in] Procedure The address of the procedure to run
1444 @param[in] CpuIndex Target CPU Index
1445 @param[in, out] ProcArguments The parameter to pass to the procedure
1446
1447 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1448 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1449 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1450 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1451 @retval EFI_SUCCESS The procedure has been successfully scheduled
1452
1453 **/
1454 EFI_STATUS
1455 EFIAPI
1456 SmmBlockingStartupThisAp (
1457 IN EFI_AP_PROCEDURE Procedure,
1458 IN UINTN CpuIndex,
1459 IN OUT VOID *ProcArguments OPTIONAL
1460 )
1461 {
1462 PROCEDURE_WRAPPER Wrapper;
1463
1464 Wrapper.Procedure = Procedure;
1465 Wrapper.ProcedureArgument = ProcArguments;
1466
1467 //
1468 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1469 //
1470 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1471 }
1472
1473 /**
1474 Schedule a procedure to run on the specified CPU.
1475
1476 @param Procedure The address of the procedure to run
1477 @param CpuIndex Target CPU Index
1478 @param ProcArguments The parameter to pass to the procedure
1479
1480 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1481 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1482 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1483 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1484 @retval EFI_SUCCESS The procedure has been successfully scheduled
1485
1486 **/
1487 EFI_STATUS
1488 EFIAPI
1489 SmmStartupThisAp (
1490 IN EFI_AP_PROCEDURE Procedure,
1491 IN UINTN CpuIndex,
1492 IN OUT VOID *ProcArguments OPTIONAL
1493 )
1494 {
1495 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1496 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1497
1498 //
1499 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1500 //
1501 return InternalSmmStartupThisAp (
1502 ProcedureWrapper,
1503 CpuIndex,
1504 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1505 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,
1506 0,
1507 NULL
1508 );
1509 }
1510
1511 /**
1512 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1513 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1514
1515 NOTE: It might not be appreciated in runtime since it might
1516 conflict with OS debugging facilities. Turn them off in RELEASE.
1517
1518 @param CpuIndex CPU Index
1519
1520 **/
1521 VOID
1522 EFIAPI
1523 CpuSmmDebugEntry (
1524 IN UINTN CpuIndex
1525 )
1526 {
1527 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1528
1529 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1530 ASSERT(CpuIndex < mMaxNumberOfCpus);
1531 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1532 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1533 AsmWriteDr6 (CpuSaveState->x86._DR6);
1534 AsmWriteDr7 (CpuSaveState->x86._DR7);
1535 } else {
1536 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1537 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1538 }
1539 }
1540 }
1541
1542 /**
1543 This function restores DR6 & DR7 to SMM save state.
1544
1545 NOTE: It might not be appreciated in runtime since it might
1546 conflict with OS debugging facilities. Turn them off in RELEASE.
1547
1548 @param CpuIndex CPU Index
1549
1550 **/
1551 VOID
1552 EFIAPI
1553 CpuSmmDebugExit (
1554 IN UINTN CpuIndex
1555 )
1556 {
1557 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1558
1559 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1560 ASSERT(CpuIndex < mMaxNumberOfCpus);
1561 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1562 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1563 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1564 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1565 } else {
1566 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1567 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1568 }
1569 }
1570 }
1571
1572 /**
1573 C function for SMI entry, each processor comes here upon SMI trigger.
1574
1575 @param CpuIndex CPU Index
1576
1577 **/
1578 VOID
1579 EFIAPI
1580 SmiRendezvous (
1581 IN UINTN CpuIndex
1582 )
1583 {
1584 EFI_STATUS Status;
1585 BOOLEAN ValidSmi;
1586 BOOLEAN IsBsp;
1587 BOOLEAN BspInProgress;
1588 UINTN Index;
1589 UINTN Cr2;
1590
1591 ASSERT(CpuIndex < mMaxNumberOfCpus);
1592
1593 //
1594 // Save Cr2 because Page Fault exception in SMM may override its value,
1595 // when using on-demand paging for above 4G memory.
1596 //
1597 Cr2 = 0;
1598 SaveCr2 (&Cr2);
1599
1600 //
1601 // Call the user register Startup function first.
1602 //
1603 if (mSmmMpSyncData->StartupProcedure != NULL) {
1604 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1605 }
1606
1607 //
1608 // Perform CPU specific entry hooks
1609 //
1610 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1611
1612 //
1613 // Determine if this is a valid SMI
1614 //
1615 ValidSmi = PlatformValidSmi();
1616
1617 //
1618 // Determine if BSP has been already in progress. Note this must be checked after
1619 // ValidSmi because BSP may clear a valid SMI source after checking in.
1620 //
1621 BspInProgress = *mSmmMpSyncData->InsideSmm;
1622
1623 if (!BspInProgress && !ValidSmi) {
1624 //
1625 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1626 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1627 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1628 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1629 // is nothing we need to do.
1630 //
1631 goto Exit;
1632 } else {
1633 //
1634 // Signal presence of this processor
1635 //
1636 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1637 //
1638 // BSP has already ended the synchronization, so QUIT!!!
1639 //
1640
1641 //
1642 // Wait for BSP's signal to finish SMI
1643 //
1644 while (*mSmmMpSyncData->AllCpusInSync) {
1645 CpuPause ();
1646 }
1647 goto Exit;
1648 } else {
1649
1650 //
1651 // The BUSY lock is initialized to Released state.
1652 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1653 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1654 // after AP's present flag is detected.
1655 //
1656 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1657 }
1658
1659 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1660 ActivateSmmProfile (CpuIndex);
1661 }
1662
1663 if (BspInProgress) {
1664 //
1665 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1666 // as BSP may have cleared the SMI status
1667 //
1668 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1669 } else {
1670 //
1671 // We have a valid SMI
1672 //
1673
1674 //
1675 // Elect BSP
1676 //
1677 IsBsp = FALSE;
1678 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1679 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1680 //
1681 // Call platform hook to do BSP election
1682 //
1683 Status = PlatformSmmBspElection (&IsBsp);
1684 if (EFI_SUCCESS == Status) {
1685 //
1686 // Platform hook determines successfully
1687 //
1688 if (IsBsp) {
1689 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1690 }
1691 } else {
1692 //
1693 // Platform hook fails to determine, use default BSP election method
1694 //
1695 InterlockedCompareExchange32 (
1696 (UINT32*)&mSmmMpSyncData->BspIndex,
1697 (UINT32)-1,
1698 (UINT32)CpuIndex
1699 );
1700 }
1701 }
1702 }
1703
1704 //
1705 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1706 //
1707 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1708
1709 //
1710 // Clear last request for SwitchBsp.
1711 //
1712 if (mSmmMpSyncData->SwitchBsp) {
1713 mSmmMpSyncData->SwitchBsp = FALSE;
1714 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1715 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1716 }
1717 }
1718
1719 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1720 SmmProfileRecordSmiNum ();
1721 }
1722
1723 //
1724 // BSP Handler is always called with a ValidSmi == TRUE
1725 //
1726 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1727 } else {
1728 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1729 }
1730 }
1731
1732 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1733
1734 //
1735 // Wait for BSP's signal to exit SMI
1736 //
1737 while (*mSmmMpSyncData->AllCpusInSync) {
1738 CpuPause ();
1739 }
1740 }
1741
1742 Exit:
1743 SmmCpuFeaturesRendezvousExit (CpuIndex);
1744
1745 //
1746 // Restore Cr2
1747 //
1748 RestoreCr2 (Cr2);
1749 }
1750
1751 /**
1752 Allocate buffer for SpinLock and Wrapper function buffer.
1753
1754 **/
1755 VOID
1756 InitializeDataForMmMp (
1757 VOID
1758 )
1759 {
1760 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1761 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1762
1763 InitializeListHead (&gSmmCpuPrivate->TokenList);
1764
1765 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1766 }
1767
1768 /**
1769 Allocate buffer for all semaphores and spin locks.
1770
1771 **/
1772 VOID
1773 InitializeSmmCpuSemaphores (
1774 VOID
1775 )
1776 {
1777 UINTN ProcessorCount;
1778 UINTN TotalSize;
1779 UINTN GlobalSemaphoresSize;
1780 UINTN CpuSemaphoresSize;
1781 UINTN SemaphoreSize;
1782 UINTN Pages;
1783 UINTN *SemaphoreBlock;
1784 UINTN SemaphoreAddr;
1785
1786 SemaphoreSize = GetSpinLockProperties ();
1787 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1788 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1789 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1790 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1791 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1792 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1793 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1794 SemaphoreBlock = AllocatePages (Pages);
1795 ASSERT (SemaphoreBlock != NULL);
1796 ZeroMem (SemaphoreBlock, TotalSize);
1797
1798 SemaphoreAddr = (UINTN)SemaphoreBlock;
1799 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1800 SemaphoreAddr += SemaphoreSize;
1801 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1802 SemaphoreAddr += SemaphoreSize;
1803 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1804 SemaphoreAddr += SemaphoreSize;
1805 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1806 SemaphoreAddr += SemaphoreSize;
1807 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1808 = (SPIN_LOCK *)SemaphoreAddr;
1809 SemaphoreAddr += SemaphoreSize;
1810
1811 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1812 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1813 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1814 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1815 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1816 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1817
1818 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1819 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1820
1821 mSemaphoreSize = SemaphoreSize;
1822 }
1823
1824 /**
1825 Initialize un-cacheable data.
1826
1827 **/
1828 VOID
1829 EFIAPI
1830 InitializeMpSyncData (
1831 VOID
1832 )
1833 {
1834 UINTN CpuIndex;
1835
1836 if (mSmmMpSyncData != NULL) {
1837 //
1838 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1839 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1840 //
1841 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1842 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1843 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1844 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1845 //
1846 // Enable BSP election by setting BspIndex to -1
1847 //
1848 mSmmMpSyncData->BspIndex = (UINT32)-1;
1849 }
1850 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1851
1852 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1853 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1854 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1855 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1856 mSmmMpSyncData->AllCpusInSync != NULL);
1857 *mSmmMpSyncData->Counter = 0;
1858 *mSmmMpSyncData->InsideSmm = FALSE;
1859 *mSmmMpSyncData->AllCpusInSync = FALSE;
1860
1861 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1862 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1863 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1864 mSmmMpSyncData->CpuData[CpuIndex].Run =
1865 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1866 mSmmMpSyncData->CpuData[CpuIndex].Present =
1867 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1868 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1869 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1870 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1871 }
1872 }
1873 }
1874
1875 /**
1876 Initialize global data for MP synchronization.
1877
1878 @param Stacks Base address of SMI stack buffer for all processors.
1879 @param StackSize Stack size for each processor in SMM.
1880 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1881
1882 **/
1883 UINT32
1884 InitializeMpServiceData (
1885 IN VOID *Stacks,
1886 IN UINTN StackSize,
1887 IN UINTN ShadowStackSize
1888 )
1889 {
1890 UINT32 Cr3;
1891 UINTN Index;
1892 UINT8 *GdtTssTables;
1893 UINTN GdtTableStepSize;
1894 CPUID_VERSION_INFO_EDX RegEdx;
1895 UINT32 MaxExtendedFunction;
1896 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
1897
1898 //
1899 // Determine if this CPU supports machine check
1900 //
1901 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1902 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1903
1904 //
1905 // Allocate memory for all locks and semaphores
1906 //
1907 InitializeSmmCpuSemaphores ();
1908
1909 //
1910 // Initialize mSmmMpSyncData
1911 //
1912 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1913 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1914 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1915 ASSERT (mSmmMpSyncData != NULL);
1916 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1917 InitializeMpSyncData ();
1918
1919 //
1920 // Initialize physical address mask
1921 // NOTE: Physical memory above virtual address limit is not supported !!!
1922 //
1923 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);
1924 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {
1925 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
1926 } else {
1927 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
1928 }
1929 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;
1930 //
1931 // Clear the low 12 bits
1932 //
1933 gPhyMask &= 0xfffffffffffff000ULL;
1934
1935 //
1936 // Create page tables
1937 //
1938 Cr3 = SmmInitPageTable ();
1939
1940 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1941
1942 //
1943 // Install SMI handler for each CPU
1944 //
1945 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1946 InstallSmiHandler (
1947 Index,
1948 (UINT32)mCpuHotPlugData.SmBase[Index],
1949 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1950 StackSize,
1951 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1952 gcSmiGdtr.Limit + 1,
1953 gcSmiIdtr.Base,
1954 gcSmiIdtr.Limit + 1,
1955 Cr3
1956 );
1957 }
1958
1959 //
1960 // Record current MTRR settings
1961 //
1962 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1963 MtrrGetAllMtrrs (&gSmiMtrrs);
1964
1965 return Cr3;
1966 }
1967
1968 /**
1969
1970 Register the SMM Foundation entry point.
1971
1972 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1973 @param SmmEntryPoint SMM Foundation EntryPoint
1974
1975 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1976
1977 **/
1978 EFI_STATUS
1979 EFIAPI
1980 RegisterSmmEntry (
1981 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1982 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1983 )
1984 {
1985 //
1986 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1987 //
1988 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1989 return EFI_SUCCESS;
1990 }
1991
1992 /**
1993
1994 Register the SMM Foundation entry point.
1995
1996 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
1997 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
1998 with the related definitions of
1999 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2000 If caller may pass a value of NULL to deregister any existing
2001 startup procedure.
2002 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2003 run by the AP. It is an optional common mailbox between APs and
2004 the caller to share information
2005
2006 @retval EFI_SUCCESS The Procedure has been set successfully.
2007 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2008
2009 **/
2010 EFI_STATUS
2011 RegisterStartupProcedure (
2012 IN EFI_AP_PROCEDURE Procedure,
2013 IN OUT VOID *ProcedureArguments OPTIONAL
2014 )
2015 {
2016 if (Procedure == NULL && ProcedureArguments != NULL) {
2017 return EFI_INVALID_PARAMETER;
2018 }
2019 if (mSmmMpSyncData == NULL) {
2020 return EFI_NOT_READY;
2021 }
2022
2023 mSmmMpSyncData->StartupProcedure = Procedure;
2024 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2025
2026 return EFI_SUCCESS;
2027 }