]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Fix buffer overflow issue.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 /**
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
29 MP safe mechanisms.
30
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
34
35 **/
36 UINT32
37 WaitForSemaphore (
38 IN OUT volatile UINT32 *Sem
39 )
40 {
41 UINT32 Value;
42
43 do {
44 Value = *Sem;
45 } while (Value == 0 ||
46 InterlockedCompareExchange32 (
47 (UINT32*)Sem,
48 Value,
49 Value - 1
50 ) != Value);
51 return Value - 1;
52 }
53
54
55 /**
56 Performs an atomic compare exchange operation to release semaphore.
57 The compare exchange operation must be performed using
58 MP safe mechanisms.
59
60 @param Sem IN: 32-bit unsigned integer
61 OUT: original integer + 1
62 @return Original integer + 1
63
64 **/
65 UINT32
66 ReleaseSemaphore (
67 IN OUT volatile UINT32 *Sem
68 )
69 {
70 UINT32 Value;
71
72 do {
73 Value = *Sem;
74 } while (Value + 1 != 0 &&
75 InterlockedCompareExchange32 (
76 (UINT32*)Sem,
77 Value,
78 Value + 1
79 ) != Value);
80 return Value + 1;
81 }
82
83 /**
84 Performs an atomic compare exchange operation to lock semaphore.
85 The compare exchange operation must be performed using
86 MP safe mechanisms.
87
88 @param Sem IN: 32-bit unsigned integer
89 OUT: -1
90 @return Original integer
91
92 **/
93 UINT32
94 LockdownSemaphore (
95 IN OUT volatile UINT32 *Sem
96 )
97 {
98 UINT32 Value;
99
100 do {
101 Value = *Sem;
102 } while (InterlockedCompareExchange32 (
103 (UINT32*)Sem,
104 Value, (UINT32)-1
105 ) != Value);
106 return Value;
107 }
108
109 /**
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.
111
112 @param NumberOfAPs AP number
113
114 **/
115 VOID
116 WaitForAllAPs (
117 IN UINTN NumberOfAPs
118 )
119 {
120 UINTN BspIndex;
121
122 BspIndex = mSmmMpSyncData->BspIndex;
123 while (NumberOfAPs-- > 0) {
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
125 }
126 }
127
128 /**
129 Performs an atomic compare exchange operation to release semaphore
130 for each AP.
131
132 **/
133 VOID
134 ReleaseAllAPs (
135 VOID
136 )
137 {
138 UINTN Index;
139
140 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
141 if (IsPresentAp (Index)) {
142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
143 }
144 }
145 }
146
147 /**
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
149
150 @param Exceptions CPU Arrival exception flags.
151
152 @retval TRUE if all CPUs the have checked in.
153 @retval FALSE if at least one Normal AP hasn't checked in.
154
155 **/
156 BOOLEAN
157 AllCpusInSmmWithExceptions (
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
159 )
160 {
161 UINTN Index;
162 SMM_CPU_DATA_BLOCK *CpuData;
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
164
165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
166
167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
168 return TRUE;
169 }
170
171 CpuData = mSmmMpSyncData->CpuData;
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
173 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
176 continue;
177 }
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
179 continue;
180 }
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
182 continue;
183 }
184 return FALSE;
185 }
186 }
187
188
189 return TRUE;
190 }
191
192 /**
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
194
195 @retval TRUE Os enable lmce.
196 @retval FALSE Os not enable lmce.
197
198 **/
199 BOOLEAN
200 IsLmceOsEnabled (
201 VOID
202 )
203 {
204 MSR_IA32_MCG_CAP_REGISTER McgCap;
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
207
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
209 if (McgCap.Bits.MCG_LMCE_P == 0) {
210 return FALSE;
211 }
212
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
214 if (FeatureCtrl.Bits.LmceOn == 0) {
215 return FALSE;
216 }
217
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
220 }
221
222 /**
223 Return if Local machine check exception signaled.
224
225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
226 delivered to only the logical processor.
227
228 @retval TRUE LMCE was signaled.
229 @retval FALSE LMCE was not signaled.
230
231 **/
232 BOOLEAN
233 IsLmceSignaled (
234 VOID
235 )
236 {
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
238
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
241 }
242
243 /**
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
245 entering SMM, except SMI disabled APs.
246
247 **/
248 VOID
249 SmmWaitForApArrival (
250 VOID
251 )
252 {
253 UINT64 Timer;
254 UINTN Index;
255 BOOLEAN LmceEn;
256 BOOLEAN LmceSignal;
257
258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
259
260 LmceEn = FALSE;
261 LmceSignal = FALSE;
262 if (mMachineCheckSupported) {
263 LmceEn = IsLmceOsEnabled ();
264 LmceSignal = IsLmceSignaled();
265 }
266
267 //
268 // Platform implementor should choose a timeout value appropriately:
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
275 // - The timeout value must be longer than longest possible IO operation in the system
276 //
277
278 //
279 // Sync with APs 1st timeout
280 //
281 for (Timer = StartSyncTimer ();
282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
284 ) {
285 CpuPause ();
286 }
287
288 //
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
290 // because:
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
294 // work while SMI handling is on-going.
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
298 // mode work while SMI handling is on-going.
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
300 // - In traditional flow, SMI disabling is discouraged.
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
302 // In both cases, adding SMI-disabling checking code increases overhead.
303 //
304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
305 //
306 // Send SMI IPIs to bring outside processors in
307 //
308 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
311 }
312 }
313
314 //
315 // Sync with APs 2nd timeout.
316 //
317 for (Timer = StartSyncTimer ();
318 !IsSyncTimerTimeout (Timer) &&
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
320 ) {
321 CpuPause ();
322 }
323 }
324
325 return;
326 }
327
328
329 /**
330 Replace OS MTRR's with SMI MTRR's.
331
332 @param CpuIndex Processor Index
333
334 **/
335 VOID
336 ReplaceOSMtrrs (
337 IN UINTN CpuIndex
338 )
339 {
340 SmmCpuFeaturesDisableSmrr ();
341
342 //
343 // Replace all MTRRs registers
344 //
345 MtrrSetAllMtrrs (&gSmiMtrrs);
346 }
347
348 /**
349 Wheck whether task has been finished by all APs.
350
351 @param BlockMode Whether did it in block mode or non-block mode.
352
353 @retval TRUE Task has been finished by all APs.
354 @retval FALSE Task not has been finished by all APs.
355
356 **/
357 BOOLEAN
358 WaitForAllAPsNotBusy (
359 IN BOOLEAN BlockMode
360 )
361 {
362 UINTN Index;
363
364 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
365 //
366 // Ignore BSP and APs which not call in SMM.
367 //
368 if (!IsPresentAp(Index)) {
369 continue;
370 }
371
372 if (BlockMode) {
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
375 } else {
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
378 } else {
379 return FALSE;
380 }
381 }
382 }
383
384 return TRUE;
385 }
386
387 /**
388 Check whether it is an present AP.
389
390 @param CpuIndex The AP index which calls this function.
391
392 @retval TRUE It's a present AP.
393 @retval TRUE This is not an AP or it is not present.
394
395 **/
396 BOOLEAN
397 IsPresentAp (
398 IN UINTN CpuIndex
399 )
400 {
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
403 }
404
405 /**
406 Clean up the status flags used during executing the procedure.
407
408 @param CpuIndex The AP index which calls this function.
409
410 **/
411 VOID
412 ReleaseToken (
413 IN UINTN CpuIndex
414 )
415 {
416 PROCEDURE_TOKEN *Token;
417
418 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
419
420 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
421 ReleaseSpinLock (Token->SpinLock);
422 }
423
424 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
425 }
426
427 /**
428 Free the tokens in the maintained list.
429
430 **/
431 VOID
432 FreeTokens (
433 VOID
434 )
435 {
436 LIST_ENTRY *Link;
437 PROCEDURE_TOKEN *ProcToken;
438 TOKEN_BUFFER *TokenBuf;
439
440 //
441 // Only free the token buffer recorded in the OldTOkenBufList
442 // upon exiting SMI. Current token buffer stays allocated so
443 // next SMI doesn't need to re-allocate.
444 //
445 gSmmCpuPrivate->UsedTokenNum = 0;
446
447 Link = GetFirstNode (&gSmmCpuPrivate->OldTokenBufList);
448 while (!IsNull (&gSmmCpuPrivate->OldTokenBufList, Link)) {
449 TokenBuf = TOKEN_BUFFER_FROM_LINK (Link);
450
451 Link = RemoveEntryList (&TokenBuf->Link);
452
453 FreePool (TokenBuf->Buffer);
454 FreePool (TokenBuf);
455 }
456
457 while (!IsListEmpty (&gSmmCpuPrivate->TokenList)) {
458 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
459 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
460
461 RemoveEntryList (&ProcToken->Link);
462
463 FreePool (ProcToken);
464 }
465 }
466
467 /**
468 SMI handler for BSP.
469
470 @param CpuIndex BSP processor Index
471 @param SyncMode SMM MP sync mode
472
473 **/
474 VOID
475 BSPHandler (
476 IN UINTN CpuIndex,
477 IN SMM_CPU_SYNC_MODE SyncMode
478 )
479 {
480 UINTN Index;
481 MTRR_SETTINGS Mtrrs;
482 UINTN ApCount;
483 BOOLEAN ClearTopLevelSmiResult;
484 UINTN PresentCount;
485
486 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
487 ApCount = 0;
488
489 //
490 // Flag BSP's presence
491 //
492 *mSmmMpSyncData->InsideSmm = TRUE;
493
494 //
495 // Initialize Debug Agent to start source level debug in BSP handler
496 //
497 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
498
499 //
500 // Mark this processor's presence
501 //
502 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
503
504 //
505 // Clear platform top level SMI status bit before calling SMI handlers. If
506 // we cleared it after SMI handlers are run, we would miss the SMI that
507 // occurs after SMI handlers are done and before SMI status bit is cleared.
508 //
509 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
510 ASSERT (ClearTopLevelSmiResult == TRUE);
511
512 //
513 // Set running processor index
514 //
515 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
516
517 //
518 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
519 //
520 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
521
522 //
523 // Wait for APs to arrive
524 //
525 SmmWaitForApArrival();
526
527 //
528 // Lock the counter down and retrieve the number of APs
529 //
530 *mSmmMpSyncData->AllCpusInSync = TRUE;
531 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
532
533 //
534 // Wait for all APs to get ready for programming MTRRs
535 //
536 WaitForAllAPs (ApCount);
537
538 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
539 //
540 // Signal all APs it's time for backup MTRRs
541 //
542 ReleaseAllAPs ();
543
544 //
545 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
546 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
547 // to a large enough value to avoid this situation.
548 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
549 // We do the backup first and then set MTRR to avoid race condition for threads
550 // in the same core.
551 //
552 MtrrGetAllMtrrs(&Mtrrs);
553
554 //
555 // Wait for all APs to complete their MTRR saving
556 //
557 WaitForAllAPs (ApCount);
558
559 //
560 // Let all processors program SMM MTRRs together
561 //
562 ReleaseAllAPs ();
563
564 //
565 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
566 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
567 // to a large enough value to avoid this situation.
568 //
569 ReplaceOSMtrrs (CpuIndex);
570
571 //
572 // Wait for all APs to complete their MTRR programming
573 //
574 WaitForAllAPs (ApCount);
575 }
576 }
577
578 //
579 // The BUSY lock is initialized to Acquired state
580 //
581 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
582
583 //
584 // Perform the pre tasks
585 //
586 PerformPreTasks ();
587
588 //
589 // Invoke SMM Foundation EntryPoint with the processor information context.
590 //
591 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
592
593 //
594 // Make sure all APs have completed their pending none-block tasks
595 //
596 WaitForAllAPsNotBusy (TRUE);
597
598 //
599 // Perform the remaining tasks
600 //
601 PerformRemainingTasks ();
602
603 //
604 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
605 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
606 // will run through freely.
607 //
608 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
609
610 //
611 // Lock the counter down and retrieve the number of APs
612 //
613 *mSmmMpSyncData->AllCpusInSync = TRUE;
614 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
615 //
616 // Make sure all APs have their Present flag set
617 //
618 while (TRUE) {
619 PresentCount = 0;
620 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
621 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
622 PresentCount ++;
623 }
624 }
625 if (PresentCount > ApCount) {
626 break;
627 }
628 }
629 }
630
631 //
632 // Notify all APs to exit
633 //
634 *mSmmMpSyncData->InsideSmm = FALSE;
635 ReleaseAllAPs ();
636
637 //
638 // Wait for all APs to complete their pending tasks
639 //
640 WaitForAllAPs (ApCount);
641
642 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
643 //
644 // Signal APs to restore MTRRs
645 //
646 ReleaseAllAPs ();
647
648 //
649 // Restore OS MTRRs
650 //
651 SmmCpuFeaturesReenableSmrr ();
652 MtrrSetAllMtrrs(&Mtrrs);
653
654 //
655 // Wait for all APs to complete MTRR programming
656 //
657 WaitForAllAPs (ApCount);
658 }
659
660 //
661 // Stop source level debug in BSP handler, the code below will not be
662 // debugged.
663 //
664 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
665
666 //
667 // Signal APs to Reset states/semaphore for this processor
668 //
669 ReleaseAllAPs ();
670
671 //
672 // Perform pending operations for hot-plug
673 //
674 SmmCpuUpdate ();
675
676 //
677 // Clear the Present flag of BSP
678 //
679 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
680
681 //
682 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
683 // WaitForAllAps does not depend on the Present flag.
684 //
685 WaitForAllAPs (ApCount);
686
687 //
688 // Clean the tokens buffer.
689 //
690 FreeTokens ();
691
692 //
693 // Reset BspIndex to -1, meaning BSP has not been elected.
694 //
695 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
696 mSmmMpSyncData->BspIndex = (UINT32)-1;
697 }
698
699 //
700 // Allow APs to check in from this point on
701 //
702 *mSmmMpSyncData->Counter = 0;
703 *mSmmMpSyncData->AllCpusInSync = FALSE;
704 }
705
706 /**
707 SMI handler for AP.
708
709 @param CpuIndex AP processor Index.
710 @param ValidSmi Indicates that current SMI is a valid SMI or not.
711 @param SyncMode SMM MP sync mode.
712
713 **/
714 VOID
715 APHandler (
716 IN UINTN CpuIndex,
717 IN BOOLEAN ValidSmi,
718 IN SMM_CPU_SYNC_MODE SyncMode
719 )
720 {
721 UINT64 Timer;
722 UINTN BspIndex;
723 MTRR_SETTINGS Mtrrs;
724 EFI_STATUS ProcedureStatus;
725
726 //
727 // Timeout BSP
728 //
729 for (Timer = StartSyncTimer ();
730 !IsSyncTimerTimeout (Timer) &&
731 !(*mSmmMpSyncData->InsideSmm);
732 ) {
733 CpuPause ();
734 }
735
736 if (!(*mSmmMpSyncData->InsideSmm)) {
737 //
738 // BSP timeout in the first round
739 //
740 if (mSmmMpSyncData->BspIndex != -1) {
741 //
742 // BSP Index is known
743 //
744 BspIndex = mSmmMpSyncData->BspIndex;
745 ASSERT (CpuIndex != BspIndex);
746
747 //
748 // Send SMI IPI to bring BSP in
749 //
750 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
751
752 //
753 // Now clock BSP for the 2nd time
754 //
755 for (Timer = StartSyncTimer ();
756 !IsSyncTimerTimeout (Timer) &&
757 !(*mSmmMpSyncData->InsideSmm);
758 ) {
759 CpuPause ();
760 }
761
762 if (!(*mSmmMpSyncData->InsideSmm)) {
763 //
764 // Give up since BSP is unable to enter SMM
765 // and signal the completion of this AP
766 WaitForSemaphore (mSmmMpSyncData->Counter);
767 return;
768 }
769 } else {
770 //
771 // Don't know BSP index. Give up without sending IPI to BSP.
772 //
773 WaitForSemaphore (mSmmMpSyncData->Counter);
774 return;
775 }
776 }
777
778 //
779 // BSP is available
780 //
781 BspIndex = mSmmMpSyncData->BspIndex;
782 ASSERT (CpuIndex != BspIndex);
783
784 //
785 // Mark this processor's presence
786 //
787 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
788
789 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
790 //
791 // Notify BSP of arrival at this point
792 //
793 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
794 }
795
796 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
797 //
798 // Wait for the signal from BSP to backup MTRRs
799 //
800 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
801
802 //
803 // Backup OS MTRRs
804 //
805 MtrrGetAllMtrrs(&Mtrrs);
806
807 //
808 // Signal BSP the completion of this AP
809 //
810 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
811
812 //
813 // Wait for BSP's signal to program MTRRs
814 //
815 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
816
817 //
818 // Replace OS MTRRs with SMI MTRRs
819 //
820 ReplaceOSMtrrs (CpuIndex);
821
822 //
823 // Signal BSP the completion of this AP
824 //
825 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
826 }
827
828 while (TRUE) {
829 //
830 // Wait for something to happen
831 //
832 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
833
834 //
835 // Check if BSP wants to exit SMM
836 //
837 if (!(*mSmmMpSyncData->InsideSmm)) {
838 break;
839 }
840
841 //
842 // BUSY should be acquired by SmmStartupThisAp()
843 //
844 ASSERT (
845 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
846 );
847
848 //
849 // Invoke the scheduled procedure
850 //
851 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
852 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
853 );
854 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
855 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
856 }
857
858 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
859 ReleaseToken (CpuIndex);
860 }
861
862 //
863 // Release BUSY
864 //
865 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
866 }
867
868 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
869 //
870 // Notify BSP the readiness of this AP to program MTRRs
871 //
872 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
873
874 //
875 // Wait for the signal from BSP to program MTRRs
876 //
877 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
878
879 //
880 // Restore OS MTRRs
881 //
882 SmmCpuFeaturesReenableSmrr ();
883 MtrrSetAllMtrrs(&Mtrrs);
884 }
885
886 //
887 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
888 //
889 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
890
891 //
892 // Wait for the signal from BSP to Reset states/semaphore for this processor
893 //
894 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
895
896 //
897 // Reset states/semaphore for this processor
898 //
899 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
900
901 //
902 // Notify BSP the readiness of this AP to exit SMM
903 //
904 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
905
906 }
907
908 /**
909 Create 4G PageTable in SMRAM.
910
911 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
912 @return PageTable Address
913
914 **/
915 UINT32
916 Gen4GPageTable (
917 IN BOOLEAN Is32BitPageTable
918 )
919 {
920 VOID *PageTable;
921 UINTN Index;
922 UINT64 *Pte;
923 UINTN PagesNeeded;
924 UINTN Low2MBoundary;
925 UINTN High2MBoundary;
926 UINTN Pages;
927 UINTN GuardPage;
928 UINT64 *Pdpte;
929 UINTN PageIndex;
930 UINTN PageAddress;
931
932 Low2MBoundary = 0;
933 High2MBoundary = 0;
934 PagesNeeded = 0;
935 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
936 //
937 // Add one more page for known good stack, then find the lower 2MB aligned address.
938 //
939 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
940 //
941 // Add two more pages for known good stack and stack guard page,
942 // then find the lower 2MB aligned address.
943 //
944 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
945 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
946 }
947 //
948 // Allocate the page table
949 //
950 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
951 ASSERT (PageTable != NULL);
952
953 PageTable = (VOID *)((UINTN)PageTable);
954 Pte = (UINT64*)PageTable;
955
956 //
957 // Zero out all page table entries first
958 //
959 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
960
961 //
962 // Set Page Directory Pointers
963 //
964 for (Index = 0; Index < 4; Index++) {
965 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
966 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
967 }
968 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
969
970 //
971 // Fill in Page Directory Entries
972 //
973 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
974 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
975 }
976
977 Pdpte = (UINT64*)PageTable;
978 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
979 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
980 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
981 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
982 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
983 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
984 //
985 // Fill in Page Table Entries
986 //
987 Pte = (UINT64*)Pages;
988 PageAddress = PageIndex;
989 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
990 if (PageAddress == GuardPage) {
991 //
992 // Mark the guard page as non-present
993 //
994 Pte[Index] = PageAddress | mAddressEncMask;
995 GuardPage += mSmmStackSize;
996 if (GuardPage > mSmmStackArrayEnd) {
997 GuardPage = 0;
998 }
999 } else {
1000 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1001 }
1002 PageAddress+= EFI_PAGE_SIZE;
1003 }
1004 Pages += EFI_PAGE_SIZE;
1005 }
1006 }
1007
1008 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
1009 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1010 if ((Pte[0] & IA32_PG_PS) == 0) {
1011 // 4K-page entries are already mapped. Just hide the first one anyway.
1012 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
1013 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
1014 } else {
1015 // Create 4K-page entries
1016 Pages = (UINTN)AllocatePageTableMemory (1);
1017 ASSERT (Pages != 0);
1018
1019 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
1020
1021 Pte = (UINT64*)Pages;
1022 PageAddress = 0;
1023 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1024 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1025 PageAddress += EFI_PAGE_SIZE;
1026 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1027 }
1028 }
1029 }
1030
1031 return (UINT32)(UINTN)PageTable;
1032 }
1033
1034 /**
1035 Checks whether the input token is the current used token.
1036
1037 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1038 BroadcastProcedure.
1039
1040 @retval TRUE The input token is the current used token.
1041 @retval FALSE The input token is not the current used token.
1042 **/
1043 BOOLEAN
1044 IsTokenInUse (
1045 IN SPIN_LOCK *Token
1046 )
1047 {
1048 LIST_ENTRY *Link;
1049 PROCEDURE_TOKEN *ProcToken;
1050
1051 if (Token == NULL) {
1052 return FALSE;
1053 }
1054
1055 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1056 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {
1057 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1058
1059 if (ProcToken->SpinLock == Token) {
1060 return TRUE;
1061 }
1062
1063 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1064 }
1065
1066 return FALSE;
1067 }
1068
1069 /**
1070 create token and save it to the maintain list.
1071
1072 @param RunningApCount Input running AP count.
1073
1074 @retval return the spin lock used as token.
1075
1076 **/
1077 PROCEDURE_TOKEN *
1078 CreateToken (
1079 IN UINT32 RunningApCount
1080 )
1081 {
1082 PROCEDURE_TOKEN *ProcToken;
1083 SPIN_LOCK *SpinLock;
1084 UINTN SpinLockSize;
1085 TOKEN_BUFFER *TokenBuf;
1086 UINT32 TokenCountPerChunk;
1087
1088 SpinLockSize = GetSpinLockProperties ();
1089 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1090
1091 if (gSmmCpuPrivate->UsedTokenNum == TokenCountPerChunk) {
1092 DEBUG ((DEBUG_VERBOSE, "CpuSmm: No free token buffer, allocate new buffer!\n"));
1093
1094 //
1095 // Record current token buffer for later free action usage.
1096 // Current used token buffer not in this list.
1097 //
1098 TokenBuf = AllocatePool (sizeof (TOKEN_BUFFER));
1099 ASSERT (TokenBuf != NULL);
1100 TokenBuf->Signature = TOKEN_BUFFER_SIGNATURE;
1101 TokenBuf->Buffer = gSmmCpuPrivate->CurrentTokenBuf;
1102
1103 InsertTailList (&gSmmCpuPrivate->OldTokenBufList, &TokenBuf->Link);
1104
1105 gSmmCpuPrivate->CurrentTokenBuf = AllocatePool (SpinLockSize * TokenCountPerChunk);
1106 ASSERT (gSmmCpuPrivate->CurrentTokenBuf != NULL);
1107 gSmmCpuPrivate->UsedTokenNum = 0;
1108 }
1109
1110 SpinLock = (SPIN_LOCK *)(gSmmCpuPrivate->CurrentTokenBuf + SpinLockSize * gSmmCpuPrivate->UsedTokenNum);
1111 gSmmCpuPrivate->UsedTokenNum++;
1112
1113 InitializeSpinLock (SpinLock);
1114 AcquireSpinLock (SpinLock);
1115
1116 ProcToken = AllocatePool (sizeof (PROCEDURE_TOKEN));
1117 ASSERT (ProcToken != NULL);
1118 ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;
1119 ProcToken->SpinLock = SpinLock;
1120 ProcToken->RunningApCount = RunningApCount;
1121
1122 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);
1123
1124 return ProcToken;
1125 }
1126
1127 /**
1128 Checks status of specified AP.
1129
1130 This function checks whether the specified AP has finished the task assigned
1131 by StartupThisAP(), and whether timeout expires.
1132
1133 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1134 BroadcastProcedure.
1135
1136 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1137 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1138 **/
1139 EFI_STATUS
1140 IsApReady (
1141 IN SPIN_LOCK *Token
1142 )
1143 {
1144 if (AcquireSpinLockOrFail (Token)) {
1145 ReleaseSpinLock (Token);
1146 return EFI_SUCCESS;
1147 }
1148
1149 return EFI_NOT_READY;
1150 }
1151
1152 /**
1153 Schedule a procedure to run on the specified CPU.
1154
1155 @param[in] Procedure The address of the procedure to run
1156 @param[in] CpuIndex Target CPU Index
1157 @param[in,out] ProcArguments The parameter to pass to the procedure
1158 @param[in] Token This is an optional parameter that allows the caller to execute the
1159 procedure in a blocking or non-blocking fashion. If it is NULL the
1160 call is blocking, and the call will not return until the AP has
1161 completed the procedure. If the token is not NULL, the call will
1162 return immediately. The caller can check whether the procedure has
1163 completed with CheckOnProcedure or WaitForProcedure.
1164 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1165 execution of Procedure, either for blocking or non-blocking mode.
1166 Zero means infinity. If the timeout expires before all APs return
1167 from Procedure, then Procedure on the failed APs is terminated. If
1168 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1169 If the timeout expires in non-blocking mode, the timeout determined
1170 can be through CheckOnProcedure or WaitForProcedure.
1171 Note that timeout support is optional. Whether an implementation
1172 supports this feature can be determined via the Attributes data
1173 member.
1174 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1175 by Procedure when it completes execution on the target AP, or with
1176 EFI_TIMEOUT if the Procedure fails to complete within the optional
1177 timeout. The implementation will update this variable with
1178 EFI_NOT_READY prior to starting Procedure on the target AP.
1179
1180 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1181 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1182 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1183 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1184 @retval EFI_SUCCESS The procedure has been successfully scheduled
1185
1186 **/
1187 EFI_STATUS
1188 InternalSmmStartupThisAp (
1189 IN EFI_AP_PROCEDURE2 Procedure,
1190 IN UINTN CpuIndex,
1191 IN OUT VOID *ProcArguments OPTIONAL,
1192 IN MM_COMPLETION *Token,
1193 IN UINTN TimeoutInMicroseconds,
1194 IN OUT EFI_STATUS *CpuStatus
1195 )
1196 {
1197 PROCEDURE_TOKEN *ProcToken;
1198
1199 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1200 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1201 return EFI_INVALID_PARAMETER;
1202 }
1203 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1204 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1205 return EFI_INVALID_PARAMETER;
1206 }
1207 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1208 return EFI_INVALID_PARAMETER;
1209 }
1210 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1211 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1212 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1213 }
1214 return EFI_INVALID_PARAMETER;
1215 }
1216 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1217 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1218 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1219 }
1220 return EFI_INVALID_PARAMETER;
1221 }
1222 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1223 return EFI_INVALID_PARAMETER;
1224 }
1225 if (Procedure == NULL) {
1226 return EFI_INVALID_PARAMETER;
1227 }
1228
1229 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1230
1231 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1232 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1233 if (Token != NULL) {
1234 ProcToken= CreateToken (1);
1235 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1236 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1237 }
1238 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1239 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1240 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1241 }
1242
1243 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1244
1245 if (Token == NULL) {
1246 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1247 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1248 }
1249
1250 return EFI_SUCCESS;
1251 }
1252
1253 /**
1254 Worker function to execute a caller provided function on all enabled APs.
1255
1256 @param[in] Procedure A pointer to the function to be run on
1257 enabled APs of the system.
1258 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1259 APs to return from Procedure, either for
1260 blocking or non-blocking mode.
1261 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1262 all APs.
1263 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1264 procedure in a blocking or non-blocking fashion. If it is NULL the
1265 call is blocking, and the call will not return until the AP has
1266 completed the procedure. If the token is not NULL, the call will
1267 return immediately. The caller can check whether the procedure has
1268 completed with CheckOnProcedure or WaitForProcedure.
1269 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1270 by Procedure when it completes execution on the target AP, or with
1271 EFI_TIMEOUT if the Procedure fails to complete within the optional
1272 timeout. The implementation will update this variable with
1273 EFI_NOT_READY prior to starting Procedure on the target AP.
1274
1275
1276 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1277 the timeout expired.
1278 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1279 to all enabled APs.
1280 @retval others Failed to Startup all APs.
1281
1282 **/
1283 EFI_STATUS
1284 InternalSmmStartupAllAPs (
1285 IN EFI_AP_PROCEDURE2 Procedure,
1286 IN UINTN TimeoutInMicroseconds,
1287 IN OUT VOID *ProcedureArguments OPTIONAL,
1288 IN OUT MM_COMPLETION *Token,
1289 IN OUT EFI_STATUS *CPUStatus
1290 )
1291 {
1292 UINTN Index;
1293 UINTN CpuCount;
1294 PROCEDURE_TOKEN *ProcToken;
1295
1296 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1297 return EFI_INVALID_PARAMETER;
1298 }
1299 if (Procedure == NULL) {
1300 return EFI_INVALID_PARAMETER;
1301 }
1302
1303 CpuCount = 0;
1304 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1305 if (IsPresentAp (Index)) {
1306 CpuCount ++;
1307
1308 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1309 return EFI_INVALID_PARAMETER;
1310 }
1311
1312 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1313 return EFI_NOT_READY;
1314 }
1315 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1316 }
1317 }
1318 if (CpuCount == 0) {
1319 return EFI_NOT_STARTED;
1320 }
1321
1322 if (Token != NULL) {
1323 ProcToken = CreateToken ((UINT32)mMaxNumberOfCpus);
1324 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1325 } else {
1326 ProcToken = NULL;
1327 }
1328
1329 //
1330 // Make sure all BUSY should be acquired.
1331 //
1332 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1333 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1334 // block mode.
1335 //
1336 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1337 if (IsPresentAp (Index)) {
1338 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1339 }
1340 }
1341
1342 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1343 if (IsPresentAp (Index)) {
1344 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1345 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1346 if (ProcToken != NULL) {
1347 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1348 }
1349 if (CPUStatus != NULL) {
1350 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1351 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1352 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1353 }
1354 }
1355 } else {
1356 //
1357 // PI spec requirement:
1358 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1359 //
1360 if (CPUStatus != NULL) {
1361 CPUStatus[Index] = EFI_NOT_STARTED;
1362 }
1363
1364 //
1365 // Decrease the count to mark this processor(AP or BSP) as finished.
1366 //
1367 if (ProcToken != NULL) {
1368 WaitForSemaphore (&ProcToken->RunningApCount);
1369 }
1370 }
1371 }
1372
1373 ReleaseAllAPs ();
1374
1375 if (Token == NULL) {
1376 //
1377 // Make sure all APs have completed their tasks.
1378 //
1379 WaitForAllAPsNotBusy (TRUE);
1380 }
1381
1382 return EFI_SUCCESS;
1383 }
1384
1385 /**
1386 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1387 If the function is defined with a type that is not compatible with
1388 the type (of the expression) pointed to by the expression that
1389 denotes the called function, the behavior is undefined.
1390
1391 So add below wrapper function to convert between EFI_AP_PROCEDURE
1392 and EFI_AP_PROCEDURE2.
1393
1394 Wrapper for Procedures.
1395
1396 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1397
1398 **/
1399 EFI_STATUS
1400 EFIAPI
1401 ProcedureWrapper (
1402 IN VOID *Buffer
1403 )
1404 {
1405 PROCEDURE_WRAPPER *Wrapper;
1406
1407 Wrapper = Buffer;
1408 Wrapper->Procedure (Wrapper->ProcedureArgument);
1409
1410 return EFI_SUCCESS;
1411 }
1412
1413 /**
1414 Schedule a procedure to run on the specified CPU in blocking mode.
1415
1416 @param[in] Procedure The address of the procedure to run
1417 @param[in] CpuIndex Target CPU Index
1418 @param[in, out] ProcArguments The parameter to pass to the procedure
1419
1420 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1421 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1422 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1423 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1424 @retval EFI_SUCCESS The procedure has been successfully scheduled
1425
1426 **/
1427 EFI_STATUS
1428 EFIAPI
1429 SmmBlockingStartupThisAp (
1430 IN EFI_AP_PROCEDURE Procedure,
1431 IN UINTN CpuIndex,
1432 IN OUT VOID *ProcArguments OPTIONAL
1433 )
1434 {
1435 PROCEDURE_WRAPPER Wrapper;
1436
1437 Wrapper.Procedure = Procedure;
1438 Wrapper.ProcedureArgument = ProcArguments;
1439
1440 //
1441 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1442 //
1443 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1444 }
1445
1446 /**
1447 Schedule a procedure to run on the specified CPU.
1448
1449 @param Procedure The address of the procedure to run
1450 @param CpuIndex Target CPU Index
1451 @param ProcArguments The parameter to pass to the procedure
1452
1453 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1454 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1455 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1456 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1457 @retval EFI_SUCCESS The procedure has been successfully scheduled
1458
1459 **/
1460 EFI_STATUS
1461 EFIAPI
1462 SmmStartupThisAp (
1463 IN EFI_AP_PROCEDURE Procedure,
1464 IN UINTN CpuIndex,
1465 IN OUT VOID *ProcArguments OPTIONAL
1466 )
1467 {
1468 MM_COMPLETION Token;
1469
1470 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1471 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1472
1473 //
1474 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1475 //
1476 return InternalSmmStartupThisAp (
1477 ProcedureWrapper,
1478 CpuIndex,
1479 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1480 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
1481 0,
1482 NULL
1483 );
1484 }
1485
1486 /**
1487 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1488 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1489
1490 NOTE: It might not be appreciated in runtime since it might
1491 conflict with OS debugging facilities. Turn them off in RELEASE.
1492
1493 @param CpuIndex CPU Index
1494
1495 **/
1496 VOID
1497 EFIAPI
1498 CpuSmmDebugEntry (
1499 IN UINTN CpuIndex
1500 )
1501 {
1502 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1503
1504 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1505 ASSERT(CpuIndex < mMaxNumberOfCpus);
1506 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1507 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1508 AsmWriteDr6 (CpuSaveState->x86._DR6);
1509 AsmWriteDr7 (CpuSaveState->x86._DR7);
1510 } else {
1511 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1512 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1513 }
1514 }
1515 }
1516
1517 /**
1518 This function restores DR6 & DR7 to SMM save state.
1519
1520 NOTE: It might not be appreciated in runtime since it might
1521 conflict with OS debugging facilities. Turn them off in RELEASE.
1522
1523 @param CpuIndex CPU Index
1524
1525 **/
1526 VOID
1527 EFIAPI
1528 CpuSmmDebugExit (
1529 IN UINTN CpuIndex
1530 )
1531 {
1532 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1533
1534 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1535 ASSERT(CpuIndex < mMaxNumberOfCpus);
1536 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1537 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1538 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1539 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1540 } else {
1541 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1542 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1543 }
1544 }
1545 }
1546
1547 /**
1548 C function for SMI entry, each processor comes here upon SMI trigger.
1549
1550 @param CpuIndex CPU Index
1551
1552 **/
1553 VOID
1554 EFIAPI
1555 SmiRendezvous (
1556 IN UINTN CpuIndex
1557 )
1558 {
1559 EFI_STATUS Status;
1560 BOOLEAN ValidSmi;
1561 BOOLEAN IsBsp;
1562 BOOLEAN BspInProgress;
1563 UINTN Index;
1564 UINTN Cr2;
1565
1566 ASSERT(CpuIndex < mMaxNumberOfCpus);
1567
1568 //
1569 // Save Cr2 because Page Fault exception in SMM may override its value,
1570 // when using on-demand paging for above 4G memory.
1571 //
1572 Cr2 = 0;
1573 SaveCr2 (&Cr2);
1574
1575 //
1576 // Call the user register Startup function first.
1577 //
1578 if (mSmmMpSyncData->StartupProcedure != NULL) {
1579 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1580 }
1581
1582 //
1583 // Perform CPU specific entry hooks
1584 //
1585 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1586
1587 //
1588 // Determine if this is a valid SMI
1589 //
1590 ValidSmi = PlatformValidSmi();
1591
1592 //
1593 // Determine if BSP has been already in progress. Note this must be checked after
1594 // ValidSmi because BSP may clear a valid SMI source after checking in.
1595 //
1596 BspInProgress = *mSmmMpSyncData->InsideSmm;
1597
1598 if (!BspInProgress && !ValidSmi) {
1599 //
1600 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1601 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1602 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1603 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1604 // is nothing we need to do.
1605 //
1606 goto Exit;
1607 } else {
1608 //
1609 // Signal presence of this processor
1610 //
1611 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1612 //
1613 // BSP has already ended the synchronization, so QUIT!!!
1614 //
1615
1616 //
1617 // Wait for BSP's signal to finish SMI
1618 //
1619 while (*mSmmMpSyncData->AllCpusInSync) {
1620 CpuPause ();
1621 }
1622 goto Exit;
1623 } else {
1624
1625 //
1626 // The BUSY lock is initialized to Released state.
1627 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1628 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1629 // after AP's present flag is detected.
1630 //
1631 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1632 }
1633
1634 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1635 ActivateSmmProfile (CpuIndex);
1636 }
1637
1638 if (BspInProgress) {
1639 //
1640 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1641 // as BSP may have cleared the SMI status
1642 //
1643 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1644 } else {
1645 //
1646 // We have a valid SMI
1647 //
1648
1649 //
1650 // Elect BSP
1651 //
1652 IsBsp = FALSE;
1653 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1654 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1655 //
1656 // Call platform hook to do BSP election
1657 //
1658 Status = PlatformSmmBspElection (&IsBsp);
1659 if (EFI_SUCCESS == Status) {
1660 //
1661 // Platform hook determines successfully
1662 //
1663 if (IsBsp) {
1664 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1665 }
1666 } else {
1667 //
1668 // Platform hook fails to determine, use default BSP election method
1669 //
1670 InterlockedCompareExchange32 (
1671 (UINT32*)&mSmmMpSyncData->BspIndex,
1672 (UINT32)-1,
1673 (UINT32)CpuIndex
1674 );
1675 }
1676 }
1677 }
1678
1679 //
1680 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1681 //
1682 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1683
1684 //
1685 // Clear last request for SwitchBsp.
1686 //
1687 if (mSmmMpSyncData->SwitchBsp) {
1688 mSmmMpSyncData->SwitchBsp = FALSE;
1689 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1690 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1691 }
1692 }
1693
1694 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1695 SmmProfileRecordSmiNum ();
1696 }
1697
1698 //
1699 // BSP Handler is always called with a ValidSmi == TRUE
1700 //
1701 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1702 } else {
1703 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1704 }
1705 }
1706
1707 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1708
1709 //
1710 // Wait for BSP's signal to exit SMI
1711 //
1712 while (*mSmmMpSyncData->AllCpusInSync) {
1713 CpuPause ();
1714 }
1715 }
1716
1717 Exit:
1718 SmmCpuFeaturesRendezvousExit (CpuIndex);
1719
1720 //
1721 // Restore Cr2
1722 //
1723 RestoreCr2 (Cr2);
1724 }
1725
1726 /**
1727 Allocate buffer for SpinLock and Wrapper function buffer.
1728
1729 **/
1730 VOID
1731 InitializeDataForMmMp (
1732 VOID
1733 )
1734 {
1735 UINTN SpinLockSize;
1736 UINT32 TokenCountPerChunk;
1737
1738 SpinLockSize = GetSpinLockProperties ();
1739 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1740 ASSERT (TokenCountPerChunk != 0);
1741 if (TokenCountPerChunk == 0) {
1742 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1743 CpuDeadLoop ();
1744 }
1745 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1746
1747 gSmmCpuPrivate->CurrentTokenBuf = AllocatePool (SpinLockSize * TokenCountPerChunk);
1748 ASSERT (gSmmCpuPrivate->CurrentTokenBuf != NULL);
1749
1750 gSmmCpuPrivate->UsedTokenNum = 0;
1751
1752 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1753 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1754
1755 InitializeListHead (&gSmmCpuPrivate->TokenList);
1756 InitializeListHead (&gSmmCpuPrivate->OldTokenBufList);
1757 }
1758
1759 /**
1760 Allocate buffer for all semaphores and spin locks.
1761
1762 **/
1763 VOID
1764 InitializeSmmCpuSemaphores (
1765 VOID
1766 )
1767 {
1768 UINTN ProcessorCount;
1769 UINTN TotalSize;
1770 UINTN GlobalSemaphoresSize;
1771 UINTN CpuSemaphoresSize;
1772 UINTN SemaphoreSize;
1773 UINTN Pages;
1774 UINTN *SemaphoreBlock;
1775 UINTN SemaphoreAddr;
1776
1777 SemaphoreSize = GetSpinLockProperties ();
1778 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1779 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1780 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1781 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1782 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1783 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1784 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1785 SemaphoreBlock = AllocatePages (Pages);
1786 ASSERT (SemaphoreBlock != NULL);
1787 ZeroMem (SemaphoreBlock, TotalSize);
1788
1789 SemaphoreAddr = (UINTN)SemaphoreBlock;
1790 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1791 SemaphoreAddr += SemaphoreSize;
1792 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1793 SemaphoreAddr += SemaphoreSize;
1794 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1795 SemaphoreAddr += SemaphoreSize;
1796 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1797 SemaphoreAddr += SemaphoreSize;
1798 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1799 = (SPIN_LOCK *)SemaphoreAddr;
1800 SemaphoreAddr += SemaphoreSize;
1801
1802 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1803 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1804 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1805 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1806 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1807 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1808
1809 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1810 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1811
1812 mSemaphoreSize = SemaphoreSize;
1813 }
1814
1815 /**
1816 Initialize un-cacheable data.
1817
1818 **/
1819 VOID
1820 EFIAPI
1821 InitializeMpSyncData (
1822 VOID
1823 )
1824 {
1825 UINTN CpuIndex;
1826
1827 if (mSmmMpSyncData != NULL) {
1828 //
1829 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1830 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1831 //
1832 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1833 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1834 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1835 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1836 //
1837 // Enable BSP election by setting BspIndex to -1
1838 //
1839 mSmmMpSyncData->BspIndex = (UINT32)-1;
1840 }
1841 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1842
1843 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1844 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1845 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1846 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1847 mSmmMpSyncData->AllCpusInSync != NULL);
1848 *mSmmMpSyncData->Counter = 0;
1849 *mSmmMpSyncData->InsideSmm = FALSE;
1850 *mSmmMpSyncData->AllCpusInSync = FALSE;
1851
1852 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1853 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1854 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1855 mSmmMpSyncData->CpuData[CpuIndex].Run =
1856 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1857 mSmmMpSyncData->CpuData[CpuIndex].Present =
1858 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1859 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1860 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1861 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1862 }
1863 }
1864 }
1865
1866 /**
1867 Initialize global data for MP synchronization.
1868
1869 @param Stacks Base address of SMI stack buffer for all processors.
1870 @param StackSize Stack size for each processor in SMM.
1871 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1872
1873 **/
1874 UINT32
1875 InitializeMpServiceData (
1876 IN VOID *Stacks,
1877 IN UINTN StackSize,
1878 IN UINTN ShadowStackSize
1879 )
1880 {
1881 UINT32 Cr3;
1882 UINTN Index;
1883 UINT8 *GdtTssTables;
1884 UINTN GdtTableStepSize;
1885 CPUID_VERSION_INFO_EDX RegEdx;
1886
1887 //
1888 // Determine if this CPU supports machine check
1889 //
1890 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1891 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1892
1893 //
1894 // Allocate memory for all locks and semaphores
1895 //
1896 InitializeSmmCpuSemaphores ();
1897
1898 //
1899 // Initialize mSmmMpSyncData
1900 //
1901 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1902 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1903 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1904 ASSERT (mSmmMpSyncData != NULL);
1905 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1906 InitializeMpSyncData ();
1907
1908 //
1909 // Initialize physical address mask
1910 // NOTE: Physical memory above virtual address limit is not supported !!!
1911 //
1912 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1913 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1914 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1915
1916 //
1917 // Create page tables
1918 //
1919 Cr3 = SmmInitPageTable ();
1920
1921 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1922
1923 //
1924 // Install SMI handler for each CPU
1925 //
1926 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1927 InstallSmiHandler (
1928 Index,
1929 (UINT32)mCpuHotPlugData.SmBase[Index],
1930 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1931 StackSize,
1932 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1933 gcSmiGdtr.Limit + 1,
1934 gcSmiIdtr.Base,
1935 gcSmiIdtr.Limit + 1,
1936 Cr3
1937 );
1938 }
1939
1940 //
1941 // Record current MTRR settings
1942 //
1943 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1944 MtrrGetAllMtrrs (&gSmiMtrrs);
1945
1946 return Cr3;
1947 }
1948
1949 /**
1950
1951 Register the SMM Foundation entry point.
1952
1953 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1954 @param SmmEntryPoint SMM Foundation EntryPoint
1955
1956 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1957
1958 **/
1959 EFI_STATUS
1960 EFIAPI
1961 RegisterSmmEntry (
1962 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1963 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1964 )
1965 {
1966 //
1967 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1968 //
1969 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1970 return EFI_SUCCESS;
1971 }
1972
1973 /**
1974
1975 Register the SMM Foundation entry point.
1976
1977 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
1978 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
1979 with the related definitions of
1980 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
1981 If caller may pass a value of NULL to deregister any existing
1982 startup procedure.
1983 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
1984 run by the AP. It is an optional common mailbox between APs and
1985 the caller to share information
1986
1987 @retval EFI_SUCCESS The Procedure has been set successfully.
1988 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
1989
1990 **/
1991 EFI_STATUS
1992 RegisterStartupProcedure (
1993 IN EFI_AP_PROCEDURE Procedure,
1994 IN OUT VOID *ProcedureArguments OPTIONAL
1995 )
1996 {
1997 if (Procedure == NULL && ProcedureArguments != NULL) {
1998 return EFI_INVALID_PARAMETER;
1999 }
2000 if (mSmmMpSyncData == NULL) {
2001 return EFI_NOT_READY;
2002 }
2003
2004 mSmmMpSyncData->StartupProcedure = Procedure;
2005 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2006
2007 return EFI_SUCCESS;
2008 }