]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
4bcd217917d7f42b8b834bae6dd5fddab02e0ee4
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 /**
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
29 MP safe mechanisms.
30
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
34
35 **/
36 UINT32
37 WaitForSemaphore (
38 IN OUT volatile UINT32 *Sem
39 )
40 {
41 UINT32 Value;
42
43 for (;;) {
44 Value = *Sem;
45 if (Value != 0 &&
46 InterlockedCompareExchange32 (
47 (UINT32*)Sem,
48 Value,
49 Value - 1
50 ) == Value) {
51 break;
52 }
53 CpuPause ();
54 }
55 return Value - 1;
56 }
57
58
59 /**
60 Performs an atomic compare exchange operation to release semaphore.
61 The compare exchange operation must be performed using
62 MP safe mechanisms.
63
64 @param Sem IN: 32-bit unsigned integer
65 OUT: original integer + 1
66 @return Original integer + 1
67
68 **/
69 UINT32
70 ReleaseSemaphore (
71 IN OUT volatile UINT32 *Sem
72 )
73 {
74 UINT32 Value;
75
76 do {
77 Value = *Sem;
78 } while (Value + 1 != 0 &&
79 InterlockedCompareExchange32 (
80 (UINT32*)Sem,
81 Value,
82 Value + 1
83 ) != Value);
84 return Value + 1;
85 }
86
87 /**
88 Performs an atomic compare exchange operation to lock semaphore.
89 The compare exchange operation must be performed using
90 MP safe mechanisms.
91
92 @param Sem IN: 32-bit unsigned integer
93 OUT: -1
94 @return Original integer
95
96 **/
97 UINT32
98 LockdownSemaphore (
99 IN OUT volatile UINT32 *Sem
100 )
101 {
102 UINT32 Value;
103
104 do {
105 Value = *Sem;
106 } while (InterlockedCompareExchange32 (
107 (UINT32*)Sem,
108 Value, (UINT32)-1
109 ) != Value);
110 return Value;
111 }
112
113 /**
114 Wait all APs to performs an atomic compare exchange operation to release semaphore.
115
116 @param NumberOfAPs AP number
117
118 **/
119 VOID
120 WaitForAllAPs (
121 IN UINTN NumberOfAPs
122 )
123 {
124 UINTN BspIndex;
125
126 BspIndex = mSmmMpSyncData->BspIndex;
127 while (NumberOfAPs-- > 0) {
128 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
129 }
130 }
131
132 /**
133 Performs an atomic compare exchange operation to release semaphore
134 for each AP.
135
136 **/
137 VOID
138 ReleaseAllAPs (
139 VOID
140 )
141 {
142 UINTN Index;
143
144 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
145 if (IsPresentAp (Index)) {
146 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
147 }
148 }
149 }
150
151 /**
152 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
153
154 @param Exceptions CPU Arrival exception flags.
155
156 @retval TRUE if all CPUs the have checked in.
157 @retval FALSE if at least one Normal AP hasn't checked in.
158
159 **/
160 BOOLEAN
161 AllCpusInSmmWithExceptions (
162 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
163 )
164 {
165 UINTN Index;
166 SMM_CPU_DATA_BLOCK *CpuData;
167 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
168
169 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
170
171 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
172 return TRUE;
173 }
174
175 CpuData = mSmmMpSyncData->CpuData;
176 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
177 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
178 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
179 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
180 continue;
181 }
182 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
183 continue;
184 }
185 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
186 continue;
187 }
188 return FALSE;
189 }
190 }
191
192
193 return TRUE;
194 }
195
196 /**
197 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
198
199 @retval TRUE Os enable lmce.
200 @retval FALSE Os not enable lmce.
201
202 **/
203 BOOLEAN
204 IsLmceOsEnabled (
205 VOID
206 )
207 {
208 MSR_IA32_MCG_CAP_REGISTER McgCap;
209 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
210 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
211
212 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
213 if (McgCap.Bits.MCG_LMCE_P == 0) {
214 return FALSE;
215 }
216
217 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
218 if (FeatureCtrl.Bits.LmceOn == 0) {
219 return FALSE;
220 }
221
222 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
223 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
224 }
225
226 /**
227 Return if Local machine check exception signaled.
228
229 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
230 delivered to only the logical processor.
231
232 @retval TRUE LMCE was signaled.
233 @retval FALSE LMCE was not signaled.
234
235 **/
236 BOOLEAN
237 IsLmceSignaled (
238 VOID
239 )
240 {
241 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
242
243 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
244 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
245 }
246
247 /**
248 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
249 entering SMM, except SMI disabled APs.
250
251 **/
252 VOID
253 SmmWaitForApArrival (
254 VOID
255 )
256 {
257 UINT64 Timer;
258 UINTN Index;
259 BOOLEAN LmceEn;
260 BOOLEAN LmceSignal;
261
262 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
263
264 LmceEn = FALSE;
265 LmceSignal = FALSE;
266 if (mMachineCheckSupported) {
267 LmceEn = IsLmceOsEnabled ();
268 LmceSignal = IsLmceSignaled();
269 }
270
271 //
272 // Platform implementor should choose a timeout value appropriately:
273 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
274 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
275 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
276 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
277 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
278 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
279 // - The timeout value must be longer than longest possible IO operation in the system
280 //
281
282 //
283 // Sync with APs 1st timeout
284 //
285 for (Timer = StartSyncTimer ();
286 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
287 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
288 ) {
289 CpuPause ();
290 }
291
292 //
293 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
294 // because:
295 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
296 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
297 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
298 // work while SMI handling is on-going.
299 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
300 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
301 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
302 // mode work while SMI handling is on-going.
303 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
304 // - In traditional flow, SMI disabling is discouraged.
305 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
306 // In both cases, adding SMI-disabling checking code increases overhead.
307 //
308 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
309 //
310 // Send SMI IPIs to bring outside processors in
311 //
312 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
313 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
314 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
315 }
316 }
317
318 //
319 // Sync with APs 2nd timeout.
320 //
321 for (Timer = StartSyncTimer ();
322 !IsSyncTimerTimeout (Timer) &&
323 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
324 ) {
325 CpuPause ();
326 }
327 }
328
329 return;
330 }
331
332
333 /**
334 Replace OS MTRR's with SMI MTRR's.
335
336 @param CpuIndex Processor Index
337
338 **/
339 VOID
340 ReplaceOSMtrrs (
341 IN UINTN CpuIndex
342 )
343 {
344 SmmCpuFeaturesDisableSmrr ();
345
346 //
347 // Replace all MTRRs registers
348 //
349 MtrrSetAllMtrrs (&gSmiMtrrs);
350 }
351
352 /**
353 Wheck whether task has been finished by all APs.
354
355 @param BlockMode Whether did it in block mode or non-block mode.
356
357 @retval TRUE Task has been finished by all APs.
358 @retval FALSE Task not has been finished by all APs.
359
360 **/
361 BOOLEAN
362 WaitForAllAPsNotBusy (
363 IN BOOLEAN BlockMode
364 )
365 {
366 UINTN Index;
367
368 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
369 //
370 // Ignore BSP and APs which not call in SMM.
371 //
372 if (!IsPresentAp(Index)) {
373 continue;
374 }
375
376 if (BlockMode) {
377 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
378 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
379 } else {
380 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
381 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
382 } else {
383 return FALSE;
384 }
385 }
386 }
387
388 return TRUE;
389 }
390
391 /**
392 Check whether it is an present AP.
393
394 @param CpuIndex The AP index which calls this function.
395
396 @retval TRUE It's a present AP.
397 @retval TRUE This is not an AP or it is not present.
398
399 **/
400 BOOLEAN
401 IsPresentAp (
402 IN UINTN CpuIndex
403 )
404 {
405 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
406 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
407 }
408
409 /**
410 Clean up the status flags used during executing the procedure.
411
412 @param CpuIndex The AP index which calls this function.
413
414 **/
415 VOID
416 ReleaseToken (
417 IN UINTN CpuIndex
418 )
419 {
420 PROCEDURE_TOKEN *Token;
421
422 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
423
424 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
425 ReleaseSpinLock (Token->SpinLock);
426 }
427
428 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
429 }
430
431 /**
432 Free the tokens in the maintained list.
433
434 **/
435 VOID
436 ResetTokens (
437 VOID
438 )
439 {
440 //
441 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
442 //
443 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
444 }
445
446 /**
447 SMI handler for BSP.
448
449 @param CpuIndex BSP processor Index
450 @param SyncMode SMM MP sync mode
451
452 **/
453 VOID
454 BSPHandler (
455 IN UINTN CpuIndex,
456 IN SMM_CPU_SYNC_MODE SyncMode
457 )
458 {
459 UINTN Index;
460 MTRR_SETTINGS Mtrrs;
461 UINTN ApCount;
462 BOOLEAN ClearTopLevelSmiResult;
463 UINTN PresentCount;
464
465 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
466 ApCount = 0;
467
468 //
469 // Flag BSP's presence
470 //
471 *mSmmMpSyncData->InsideSmm = TRUE;
472
473 //
474 // Initialize Debug Agent to start source level debug in BSP handler
475 //
476 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
477
478 //
479 // Mark this processor's presence
480 //
481 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
482
483 //
484 // Clear platform top level SMI status bit before calling SMI handlers. If
485 // we cleared it after SMI handlers are run, we would miss the SMI that
486 // occurs after SMI handlers are done and before SMI status bit is cleared.
487 //
488 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
489 ASSERT (ClearTopLevelSmiResult == TRUE);
490
491 //
492 // Set running processor index
493 //
494 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
495
496 //
497 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
498 //
499 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
500
501 //
502 // Wait for APs to arrive
503 //
504 SmmWaitForApArrival();
505
506 //
507 // Lock the counter down and retrieve the number of APs
508 //
509 *mSmmMpSyncData->AllCpusInSync = TRUE;
510 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
511
512 //
513 // Wait for all APs to get ready for programming MTRRs
514 //
515 WaitForAllAPs (ApCount);
516
517 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
518 //
519 // Signal all APs it's time for backup MTRRs
520 //
521 ReleaseAllAPs ();
522
523 //
524 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
525 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
526 // to a large enough value to avoid this situation.
527 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
528 // We do the backup first and then set MTRR to avoid race condition for threads
529 // in the same core.
530 //
531 MtrrGetAllMtrrs(&Mtrrs);
532
533 //
534 // Wait for all APs to complete their MTRR saving
535 //
536 WaitForAllAPs (ApCount);
537
538 //
539 // Let all processors program SMM MTRRs together
540 //
541 ReleaseAllAPs ();
542
543 //
544 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
545 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
546 // to a large enough value to avoid this situation.
547 //
548 ReplaceOSMtrrs (CpuIndex);
549
550 //
551 // Wait for all APs to complete their MTRR programming
552 //
553 WaitForAllAPs (ApCount);
554 }
555 }
556
557 //
558 // The BUSY lock is initialized to Acquired state
559 //
560 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
561
562 //
563 // Perform the pre tasks
564 //
565 PerformPreTasks ();
566
567 //
568 // Invoke SMM Foundation EntryPoint with the processor information context.
569 //
570 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
571
572 //
573 // Make sure all APs have completed their pending none-block tasks
574 //
575 WaitForAllAPsNotBusy (TRUE);
576
577 //
578 // Perform the remaining tasks
579 //
580 PerformRemainingTasks ();
581
582 //
583 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
584 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
585 // will run through freely.
586 //
587 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
588
589 //
590 // Lock the counter down and retrieve the number of APs
591 //
592 *mSmmMpSyncData->AllCpusInSync = TRUE;
593 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
594 //
595 // Make sure all APs have their Present flag set
596 //
597 while (TRUE) {
598 PresentCount = 0;
599 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
600 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
601 PresentCount ++;
602 }
603 }
604 if (PresentCount > ApCount) {
605 break;
606 }
607 }
608 }
609
610 //
611 // Notify all APs to exit
612 //
613 *mSmmMpSyncData->InsideSmm = FALSE;
614 ReleaseAllAPs ();
615
616 //
617 // Wait for all APs to complete their pending tasks
618 //
619 WaitForAllAPs (ApCount);
620
621 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
622 //
623 // Signal APs to restore MTRRs
624 //
625 ReleaseAllAPs ();
626
627 //
628 // Restore OS MTRRs
629 //
630 SmmCpuFeaturesReenableSmrr ();
631 MtrrSetAllMtrrs(&Mtrrs);
632
633 //
634 // Wait for all APs to complete MTRR programming
635 //
636 WaitForAllAPs (ApCount);
637 }
638
639 //
640 // Stop source level debug in BSP handler, the code below will not be
641 // debugged.
642 //
643 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
644
645 //
646 // Signal APs to Reset states/semaphore for this processor
647 //
648 ReleaseAllAPs ();
649
650 //
651 // Perform pending operations for hot-plug
652 //
653 SmmCpuUpdate ();
654
655 //
656 // Clear the Present flag of BSP
657 //
658 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
659
660 //
661 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
662 // WaitForAllAps does not depend on the Present flag.
663 //
664 WaitForAllAPs (ApCount);
665
666 //
667 // Reset the tokens buffer.
668 //
669 ResetTokens ();
670
671 //
672 // Reset BspIndex to -1, meaning BSP has not been elected.
673 //
674 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
675 mSmmMpSyncData->BspIndex = (UINT32)-1;
676 }
677
678 //
679 // Allow APs to check in from this point on
680 //
681 *mSmmMpSyncData->Counter = 0;
682 *mSmmMpSyncData->AllCpusInSync = FALSE;
683 }
684
685 /**
686 SMI handler for AP.
687
688 @param CpuIndex AP processor Index.
689 @param ValidSmi Indicates that current SMI is a valid SMI or not.
690 @param SyncMode SMM MP sync mode.
691
692 **/
693 VOID
694 APHandler (
695 IN UINTN CpuIndex,
696 IN BOOLEAN ValidSmi,
697 IN SMM_CPU_SYNC_MODE SyncMode
698 )
699 {
700 UINT64 Timer;
701 UINTN BspIndex;
702 MTRR_SETTINGS Mtrrs;
703 EFI_STATUS ProcedureStatus;
704
705 //
706 // Timeout BSP
707 //
708 for (Timer = StartSyncTimer ();
709 !IsSyncTimerTimeout (Timer) &&
710 !(*mSmmMpSyncData->InsideSmm);
711 ) {
712 CpuPause ();
713 }
714
715 if (!(*mSmmMpSyncData->InsideSmm)) {
716 //
717 // BSP timeout in the first round
718 //
719 if (mSmmMpSyncData->BspIndex != -1) {
720 //
721 // BSP Index is known
722 //
723 BspIndex = mSmmMpSyncData->BspIndex;
724 ASSERT (CpuIndex != BspIndex);
725
726 //
727 // Send SMI IPI to bring BSP in
728 //
729 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
730
731 //
732 // Now clock BSP for the 2nd time
733 //
734 for (Timer = StartSyncTimer ();
735 !IsSyncTimerTimeout (Timer) &&
736 !(*mSmmMpSyncData->InsideSmm);
737 ) {
738 CpuPause ();
739 }
740
741 if (!(*mSmmMpSyncData->InsideSmm)) {
742 //
743 // Give up since BSP is unable to enter SMM
744 // and signal the completion of this AP
745 WaitForSemaphore (mSmmMpSyncData->Counter);
746 return;
747 }
748 } else {
749 //
750 // Don't know BSP index. Give up without sending IPI to BSP.
751 //
752 WaitForSemaphore (mSmmMpSyncData->Counter);
753 return;
754 }
755 }
756
757 //
758 // BSP is available
759 //
760 BspIndex = mSmmMpSyncData->BspIndex;
761 ASSERT (CpuIndex != BspIndex);
762
763 //
764 // Mark this processor's presence
765 //
766 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
767
768 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
769 //
770 // Notify BSP of arrival at this point
771 //
772 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
773 }
774
775 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
776 //
777 // Wait for the signal from BSP to backup MTRRs
778 //
779 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
780
781 //
782 // Backup OS MTRRs
783 //
784 MtrrGetAllMtrrs(&Mtrrs);
785
786 //
787 // Signal BSP the completion of this AP
788 //
789 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
790
791 //
792 // Wait for BSP's signal to program MTRRs
793 //
794 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
795
796 //
797 // Replace OS MTRRs with SMI MTRRs
798 //
799 ReplaceOSMtrrs (CpuIndex);
800
801 //
802 // Signal BSP the completion of this AP
803 //
804 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
805 }
806
807 while (TRUE) {
808 //
809 // Wait for something to happen
810 //
811 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
812
813 //
814 // Check if BSP wants to exit SMM
815 //
816 if (!(*mSmmMpSyncData->InsideSmm)) {
817 break;
818 }
819
820 //
821 // BUSY should be acquired by SmmStartupThisAp()
822 //
823 ASSERT (
824 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
825 );
826
827 //
828 // Invoke the scheduled procedure
829 //
830 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
831 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
832 );
833 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
834 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
835 }
836
837 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
838 ReleaseToken (CpuIndex);
839 }
840
841 //
842 // Release BUSY
843 //
844 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
845 }
846
847 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
848 //
849 // Notify BSP the readiness of this AP to program MTRRs
850 //
851 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
852
853 //
854 // Wait for the signal from BSP to program MTRRs
855 //
856 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
857
858 //
859 // Restore OS MTRRs
860 //
861 SmmCpuFeaturesReenableSmrr ();
862 MtrrSetAllMtrrs(&Mtrrs);
863 }
864
865 //
866 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
867 //
868 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
869
870 //
871 // Wait for the signal from BSP to Reset states/semaphore for this processor
872 //
873 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
874
875 //
876 // Reset states/semaphore for this processor
877 //
878 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
879
880 //
881 // Notify BSP the readiness of this AP to exit SMM
882 //
883 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
884
885 }
886
887 /**
888 Create 4G PageTable in SMRAM.
889
890 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
891 @return PageTable Address
892
893 **/
894 UINT32
895 Gen4GPageTable (
896 IN BOOLEAN Is32BitPageTable
897 )
898 {
899 VOID *PageTable;
900 UINTN Index;
901 UINT64 *Pte;
902 UINTN PagesNeeded;
903 UINTN Low2MBoundary;
904 UINTN High2MBoundary;
905 UINTN Pages;
906 UINTN GuardPage;
907 UINT64 *Pdpte;
908 UINTN PageIndex;
909 UINTN PageAddress;
910
911 Low2MBoundary = 0;
912 High2MBoundary = 0;
913 PagesNeeded = 0;
914 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
915 //
916 // Add one more page for known good stack, then find the lower 2MB aligned address.
917 //
918 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
919 //
920 // Add two more pages for known good stack and stack guard page,
921 // then find the lower 2MB aligned address.
922 //
923 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
924 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
925 }
926 //
927 // Allocate the page table
928 //
929 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
930 ASSERT (PageTable != NULL);
931
932 PageTable = (VOID *)((UINTN)PageTable);
933 Pte = (UINT64*)PageTable;
934
935 //
936 // Zero out all page table entries first
937 //
938 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
939
940 //
941 // Set Page Directory Pointers
942 //
943 for (Index = 0; Index < 4; Index++) {
944 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
945 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
946 }
947 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
948
949 //
950 // Fill in Page Directory Entries
951 //
952 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
953 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
954 }
955
956 Pdpte = (UINT64*)PageTable;
957 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
958 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
959 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
960 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
961 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
962 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
963 //
964 // Fill in Page Table Entries
965 //
966 Pte = (UINT64*)Pages;
967 PageAddress = PageIndex;
968 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
969 if (PageAddress == GuardPage) {
970 //
971 // Mark the guard page as non-present
972 //
973 Pte[Index] = PageAddress | mAddressEncMask;
974 GuardPage += mSmmStackSize;
975 if (GuardPage > mSmmStackArrayEnd) {
976 GuardPage = 0;
977 }
978 } else {
979 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
980 }
981 PageAddress+= EFI_PAGE_SIZE;
982 }
983 Pages += EFI_PAGE_SIZE;
984 }
985 }
986
987 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
988 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
989 if ((Pte[0] & IA32_PG_PS) == 0) {
990 // 4K-page entries are already mapped. Just hide the first one anyway.
991 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
992 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
993 } else {
994 // Create 4K-page entries
995 Pages = (UINTN)AllocatePageTableMemory (1);
996 ASSERT (Pages != 0);
997
998 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
999
1000 Pte = (UINT64*)Pages;
1001 PageAddress = 0;
1002 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
1003 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
1004 PageAddress += EFI_PAGE_SIZE;
1005 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1006 }
1007 }
1008 }
1009
1010 return (UINT32)(UINTN)PageTable;
1011 }
1012
1013 /**
1014 Checks whether the input token is the current used token.
1015
1016 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1017 BroadcastProcedure.
1018
1019 @retval TRUE The input token is the current used token.
1020 @retval FALSE The input token is not the current used token.
1021 **/
1022 BOOLEAN
1023 IsTokenInUse (
1024 IN SPIN_LOCK *Token
1025 )
1026 {
1027 LIST_ENTRY *Link;
1028 PROCEDURE_TOKEN *ProcToken;
1029
1030 if (Token == NULL) {
1031 return FALSE;
1032 }
1033
1034 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
1035 //
1036 // Only search used tokens.
1037 //
1038 while (Link != gSmmCpuPrivate->FirstFreeToken) {
1039 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
1040
1041 if (ProcToken->SpinLock == Token) {
1042 return TRUE;
1043 }
1044
1045 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
1046 }
1047
1048 return FALSE;
1049 }
1050
1051 /**
1052 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
1053
1054 @return First token of the token buffer.
1055 **/
1056 LIST_ENTRY *
1057 AllocateTokenBuffer (
1058 VOID
1059 )
1060 {
1061 UINTN SpinLockSize;
1062 UINT32 TokenCountPerChunk;
1063 UINTN Index;
1064 SPIN_LOCK *SpinLock;
1065 UINT8 *SpinLockBuffer;
1066 PROCEDURE_TOKEN *ProcTokens;
1067
1068 SpinLockSize = GetSpinLockProperties ();
1069
1070 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
1071 ASSERT (TokenCountPerChunk != 0);
1072 if (TokenCountPerChunk == 0) {
1073 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
1074 CpuDeadLoop ();
1075 }
1076 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
1077
1078 //
1079 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
1080 //
1081 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
1082 ASSERT (SpinLockBuffer != NULL);
1083
1084 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
1085 ASSERT (ProcTokens != NULL);
1086
1087 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1088 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1089 InitializeSpinLock (SpinLock);
1090
1091 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1092 ProcTokens[Index].SpinLock = SpinLock;
1093 ProcTokens[Index].RunningApCount = 0;
1094
1095 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1096 }
1097
1098 return &ProcTokens[0].Link;
1099 }
1100
1101 /**
1102 Get the free token.
1103
1104 If no free token, allocate new tokens then return the free one.
1105
1106 @param RunningApsCount The Running Aps count for this token.
1107
1108 @retval return the first free PROCEDURE_TOKEN.
1109
1110 **/
1111 PROCEDURE_TOKEN *
1112 GetFreeToken (
1113 IN UINT32 RunningApsCount
1114 )
1115 {
1116 PROCEDURE_TOKEN *NewToken;
1117
1118 //
1119 // If FirstFreeToken meets the end of token list, enlarge the token list.
1120 // Set FirstFreeToken to the first free token.
1121 //
1122 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1123 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1124 }
1125 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1126 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1127
1128 NewToken->RunningApCount = RunningApsCount;
1129 AcquireSpinLock (NewToken->SpinLock);
1130
1131 return NewToken;
1132 }
1133
1134 /**
1135 Checks status of specified AP.
1136
1137 This function checks whether the specified AP has finished the task assigned
1138 by StartupThisAP(), and whether timeout expires.
1139
1140 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1141 BroadcastProcedure.
1142
1143 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1144 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1145 **/
1146 EFI_STATUS
1147 IsApReady (
1148 IN SPIN_LOCK *Token
1149 )
1150 {
1151 if (AcquireSpinLockOrFail (Token)) {
1152 ReleaseSpinLock (Token);
1153 return EFI_SUCCESS;
1154 }
1155
1156 return EFI_NOT_READY;
1157 }
1158
1159 /**
1160 Schedule a procedure to run on the specified CPU.
1161
1162 @param[in] Procedure The address of the procedure to run
1163 @param[in] CpuIndex Target CPU Index
1164 @param[in,out] ProcArguments The parameter to pass to the procedure
1165 @param[in] Token This is an optional parameter that allows the caller to execute the
1166 procedure in a blocking or non-blocking fashion. If it is NULL the
1167 call is blocking, and the call will not return until the AP has
1168 completed the procedure. If the token is not NULL, the call will
1169 return immediately. The caller can check whether the procedure has
1170 completed with CheckOnProcedure or WaitForProcedure.
1171 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1172 execution of Procedure, either for blocking or non-blocking mode.
1173 Zero means infinity. If the timeout expires before all APs return
1174 from Procedure, then Procedure on the failed APs is terminated. If
1175 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1176 If the timeout expires in non-blocking mode, the timeout determined
1177 can be through CheckOnProcedure or WaitForProcedure.
1178 Note that timeout support is optional. Whether an implementation
1179 supports this feature can be determined via the Attributes data
1180 member.
1181 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1182 by Procedure when it completes execution on the target AP, or with
1183 EFI_TIMEOUT if the Procedure fails to complete within the optional
1184 timeout. The implementation will update this variable with
1185 EFI_NOT_READY prior to starting Procedure on the target AP.
1186
1187 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1188 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1189 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1190 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1191 @retval EFI_SUCCESS The procedure has been successfully scheduled
1192
1193 **/
1194 EFI_STATUS
1195 InternalSmmStartupThisAp (
1196 IN EFI_AP_PROCEDURE2 Procedure,
1197 IN UINTN CpuIndex,
1198 IN OUT VOID *ProcArguments OPTIONAL,
1199 IN MM_COMPLETION *Token,
1200 IN UINTN TimeoutInMicroseconds,
1201 IN OUT EFI_STATUS *CpuStatus
1202 )
1203 {
1204 PROCEDURE_TOKEN *ProcToken;
1205
1206 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1207 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1208 return EFI_INVALID_PARAMETER;
1209 }
1210 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1211 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1212 return EFI_INVALID_PARAMETER;
1213 }
1214 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1215 return EFI_INVALID_PARAMETER;
1216 }
1217 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1218 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1219 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1220 }
1221 return EFI_INVALID_PARAMETER;
1222 }
1223 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1224 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1225 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1226 }
1227 return EFI_INVALID_PARAMETER;
1228 }
1229 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1230 return EFI_INVALID_PARAMETER;
1231 }
1232 if (Procedure == NULL) {
1233 return EFI_INVALID_PARAMETER;
1234 }
1235
1236 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1237
1238 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1239 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1240 if (Token != NULL) {
1241 ProcToken= GetFreeToken (1);
1242 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1243 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1244 }
1245 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1246 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1247 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1248 }
1249
1250 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
1251
1252 if (Token == NULL) {
1253 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1254 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1255 }
1256
1257 return EFI_SUCCESS;
1258 }
1259
1260 /**
1261 Worker function to execute a caller provided function on all enabled APs.
1262
1263 @param[in] Procedure A pointer to the function to be run on
1264 enabled APs of the system.
1265 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1266 APs to return from Procedure, either for
1267 blocking or non-blocking mode.
1268 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1269 all APs.
1270 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1271 procedure in a blocking or non-blocking fashion. If it is NULL the
1272 call is blocking, and the call will not return until the AP has
1273 completed the procedure. If the token is not NULL, the call will
1274 return immediately. The caller can check whether the procedure has
1275 completed with CheckOnProcedure or WaitForProcedure.
1276 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1277 by Procedure when it completes execution on the target AP, or with
1278 EFI_TIMEOUT if the Procedure fails to complete within the optional
1279 timeout. The implementation will update this variable with
1280 EFI_NOT_READY prior to starting Procedure on the target AP.
1281
1282
1283 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1284 the timeout expired.
1285 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1286 to all enabled APs.
1287 @retval others Failed to Startup all APs.
1288
1289 **/
1290 EFI_STATUS
1291 InternalSmmStartupAllAPs (
1292 IN EFI_AP_PROCEDURE2 Procedure,
1293 IN UINTN TimeoutInMicroseconds,
1294 IN OUT VOID *ProcedureArguments OPTIONAL,
1295 IN OUT MM_COMPLETION *Token,
1296 IN OUT EFI_STATUS *CPUStatus
1297 )
1298 {
1299 UINTN Index;
1300 UINTN CpuCount;
1301 PROCEDURE_TOKEN *ProcToken;
1302
1303 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1304 return EFI_INVALID_PARAMETER;
1305 }
1306 if (Procedure == NULL) {
1307 return EFI_INVALID_PARAMETER;
1308 }
1309
1310 CpuCount = 0;
1311 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1312 if (IsPresentAp (Index)) {
1313 CpuCount ++;
1314
1315 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1316 return EFI_INVALID_PARAMETER;
1317 }
1318
1319 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
1320 return EFI_NOT_READY;
1321 }
1322 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1323 }
1324 }
1325 if (CpuCount == 0) {
1326 return EFI_NOT_STARTED;
1327 }
1328
1329 if (Token != NULL) {
1330 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1331 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1332 } else {
1333 ProcToken = NULL;
1334 }
1335
1336 //
1337 // Make sure all BUSY should be acquired.
1338 //
1339 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1340 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1341 // block mode.
1342 //
1343 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1344 if (IsPresentAp (Index)) {
1345 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1346 }
1347 }
1348
1349 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1350 if (IsPresentAp (Index)) {
1351 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
1352 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1353 if (ProcToken != NULL) {
1354 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1355 }
1356 if (CPUStatus != NULL) {
1357 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1358 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1359 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1360 }
1361 }
1362 } else {
1363 //
1364 // PI spec requirement:
1365 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1366 //
1367 if (CPUStatus != NULL) {
1368 CPUStatus[Index] = EFI_NOT_STARTED;
1369 }
1370
1371 //
1372 // Decrease the count to mark this processor(AP or BSP) as finished.
1373 //
1374 if (ProcToken != NULL) {
1375 WaitForSemaphore (&ProcToken->RunningApCount);
1376 }
1377 }
1378 }
1379
1380 ReleaseAllAPs ();
1381
1382 if (Token == NULL) {
1383 //
1384 // Make sure all APs have completed their tasks.
1385 //
1386 WaitForAllAPsNotBusy (TRUE);
1387 }
1388
1389 return EFI_SUCCESS;
1390 }
1391
1392 /**
1393 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1394 If the function is defined with a type that is not compatible with
1395 the type (of the expression) pointed to by the expression that
1396 denotes the called function, the behavior is undefined.
1397
1398 So add below wrapper function to convert between EFI_AP_PROCEDURE
1399 and EFI_AP_PROCEDURE2.
1400
1401 Wrapper for Procedures.
1402
1403 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1404
1405 **/
1406 EFI_STATUS
1407 EFIAPI
1408 ProcedureWrapper (
1409 IN VOID *Buffer
1410 )
1411 {
1412 PROCEDURE_WRAPPER *Wrapper;
1413
1414 Wrapper = Buffer;
1415 Wrapper->Procedure (Wrapper->ProcedureArgument);
1416
1417 return EFI_SUCCESS;
1418 }
1419
1420 /**
1421 Schedule a procedure to run on the specified CPU in blocking mode.
1422
1423 @param[in] Procedure The address of the procedure to run
1424 @param[in] CpuIndex Target CPU Index
1425 @param[in, out] ProcArguments The parameter to pass to the procedure
1426
1427 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1428 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1429 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1430 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1431 @retval EFI_SUCCESS The procedure has been successfully scheduled
1432
1433 **/
1434 EFI_STATUS
1435 EFIAPI
1436 SmmBlockingStartupThisAp (
1437 IN EFI_AP_PROCEDURE Procedure,
1438 IN UINTN CpuIndex,
1439 IN OUT VOID *ProcArguments OPTIONAL
1440 )
1441 {
1442 PROCEDURE_WRAPPER Wrapper;
1443
1444 Wrapper.Procedure = Procedure;
1445 Wrapper.ProcedureArgument = ProcArguments;
1446
1447 //
1448 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1449 //
1450 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1451 }
1452
1453 /**
1454 Schedule a procedure to run on the specified CPU.
1455
1456 @param Procedure The address of the procedure to run
1457 @param CpuIndex Target CPU Index
1458 @param ProcArguments The parameter to pass to the procedure
1459
1460 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1461 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1462 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1463 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1464 @retval EFI_SUCCESS The procedure has been successfully scheduled
1465
1466 **/
1467 EFI_STATUS
1468 EFIAPI
1469 SmmStartupThisAp (
1470 IN EFI_AP_PROCEDURE Procedure,
1471 IN UINTN CpuIndex,
1472 IN OUT VOID *ProcArguments OPTIONAL
1473 )
1474 {
1475 MM_COMPLETION Token;
1476
1477 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1478 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1479
1480 //
1481 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1482 //
1483 return InternalSmmStartupThisAp (
1484 ProcedureWrapper,
1485 CpuIndex,
1486 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1487 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
1488 0,
1489 NULL
1490 );
1491 }
1492
1493 /**
1494 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1495 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1496
1497 NOTE: It might not be appreciated in runtime since it might
1498 conflict with OS debugging facilities. Turn them off in RELEASE.
1499
1500 @param CpuIndex CPU Index
1501
1502 **/
1503 VOID
1504 EFIAPI
1505 CpuSmmDebugEntry (
1506 IN UINTN CpuIndex
1507 )
1508 {
1509 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1510
1511 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1512 ASSERT(CpuIndex < mMaxNumberOfCpus);
1513 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1514 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1515 AsmWriteDr6 (CpuSaveState->x86._DR6);
1516 AsmWriteDr7 (CpuSaveState->x86._DR7);
1517 } else {
1518 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1519 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1520 }
1521 }
1522 }
1523
1524 /**
1525 This function restores DR6 & DR7 to SMM save state.
1526
1527 NOTE: It might not be appreciated in runtime since it might
1528 conflict with OS debugging facilities. Turn them off in RELEASE.
1529
1530 @param CpuIndex CPU Index
1531
1532 **/
1533 VOID
1534 EFIAPI
1535 CpuSmmDebugExit (
1536 IN UINTN CpuIndex
1537 )
1538 {
1539 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1540
1541 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1542 ASSERT(CpuIndex < mMaxNumberOfCpus);
1543 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1544 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1545 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1546 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1547 } else {
1548 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1549 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1550 }
1551 }
1552 }
1553
1554 /**
1555 C function for SMI entry, each processor comes here upon SMI trigger.
1556
1557 @param CpuIndex CPU Index
1558
1559 **/
1560 VOID
1561 EFIAPI
1562 SmiRendezvous (
1563 IN UINTN CpuIndex
1564 )
1565 {
1566 EFI_STATUS Status;
1567 BOOLEAN ValidSmi;
1568 BOOLEAN IsBsp;
1569 BOOLEAN BspInProgress;
1570 UINTN Index;
1571 UINTN Cr2;
1572
1573 ASSERT(CpuIndex < mMaxNumberOfCpus);
1574
1575 //
1576 // Save Cr2 because Page Fault exception in SMM may override its value,
1577 // when using on-demand paging for above 4G memory.
1578 //
1579 Cr2 = 0;
1580 SaveCr2 (&Cr2);
1581
1582 //
1583 // Call the user register Startup function first.
1584 //
1585 if (mSmmMpSyncData->StartupProcedure != NULL) {
1586 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1587 }
1588
1589 //
1590 // Perform CPU specific entry hooks
1591 //
1592 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1593
1594 //
1595 // Determine if this is a valid SMI
1596 //
1597 ValidSmi = PlatformValidSmi();
1598
1599 //
1600 // Determine if BSP has been already in progress. Note this must be checked after
1601 // ValidSmi because BSP may clear a valid SMI source after checking in.
1602 //
1603 BspInProgress = *mSmmMpSyncData->InsideSmm;
1604
1605 if (!BspInProgress && !ValidSmi) {
1606 //
1607 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1608 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1609 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1610 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1611 // is nothing we need to do.
1612 //
1613 goto Exit;
1614 } else {
1615 //
1616 // Signal presence of this processor
1617 //
1618 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1619 //
1620 // BSP has already ended the synchronization, so QUIT!!!
1621 //
1622
1623 //
1624 // Wait for BSP's signal to finish SMI
1625 //
1626 while (*mSmmMpSyncData->AllCpusInSync) {
1627 CpuPause ();
1628 }
1629 goto Exit;
1630 } else {
1631
1632 //
1633 // The BUSY lock is initialized to Released state.
1634 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1635 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1636 // after AP's present flag is detected.
1637 //
1638 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1639 }
1640
1641 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1642 ActivateSmmProfile (CpuIndex);
1643 }
1644
1645 if (BspInProgress) {
1646 //
1647 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1648 // as BSP may have cleared the SMI status
1649 //
1650 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1651 } else {
1652 //
1653 // We have a valid SMI
1654 //
1655
1656 //
1657 // Elect BSP
1658 //
1659 IsBsp = FALSE;
1660 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1661 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1662 //
1663 // Call platform hook to do BSP election
1664 //
1665 Status = PlatformSmmBspElection (&IsBsp);
1666 if (EFI_SUCCESS == Status) {
1667 //
1668 // Platform hook determines successfully
1669 //
1670 if (IsBsp) {
1671 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1672 }
1673 } else {
1674 //
1675 // Platform hook fails to determine, use default BSP election method
1676 //
1677 InterlockedCompareExchange32 (
1678 (UINT32*)&mSmmMpSyncData->BspIndex,
1679 (UINT32)-1,
1680 (UINT32)CpuIndex
1681 );
1682 }
1683 }
1684 }
1685
1686 //
1687 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1688 //
1689 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1690
1691 //
1692 // Clear last request for SwitchBsp.
1693 //
1694 if (mSmmMpSyncData->SwitchBsp) {
1695 mSmmMpSyncData->SwitchBsp = FALSE;
1696 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1697 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1698 }
1699 }
1700
1701 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1702 SmmProfileRecordSmiNum ();
1703 }
1704
1705 //
1706 // BSP Handler is always called with a ValidSmi == TRUE
1707 //
1708 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1709 } else {
1710 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1711 }
1712 }
1713
1714 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1715
1716 //
1717 // Wait for BSP's signal to exit SMI
1718 //
1719 while (*mSmmMpSyncData->AllCpusInSync) {
1720 CpuPause ();
1721 }
1722 }
1723
1724 Exit:
1725 SmmCpuFeaturesRendezvousExit (CpuIndex);
1726
1727 //
1728 // Restore Cr2
1729 //
1730 RestoreCr2 (Cr2);
1731 }
1732
1733 /**
1734 Allocate buffer for SpinLock and Wrapper function buffer.
1735
1736 **/
1737 VOID
1738 InitializeDataForMmMp (
1739 VOID
1740 )
1741 {
1742 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1743 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1744
1745 InitializeListHead (&gSmmCpuPrivate->TokenList);
1746
1747 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1748 }
1749
1750 /**
1751 Allocate buffer for all semaphores and spin locks.
1752
1753 **/
1754 VOID
1755 InitializeSmmCpuSemaphores (
1756 VOID
1757 )
1758 {
1759 UINTN ProcessorCount;
1760 UINTN TotalSize;
1761 UINTN GlobalSemaphoresSize;
1762 UINTN CpuSemaphoresSize;
1763 UINTN SemaphoreSize;
1764 UINTN Pages;
1765 UINTN *SemaphoreBlock;
1766 UINTN SemaphoreAddr;
1767
1768 SemaphoreSize = GetSpinLockProperties ();
1769 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1770 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1771 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1772 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1773 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1774 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1775 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1776 SemaphoreBlock = AllocatePages (Pages);
1777 ASSERT (SemaphoreBlock != NULL);
1778 ZeroMem (SemaphoreBlock, TotalSize);
1779
1780 SemaphoreAddr = (UINTN)SemaphoreBlock;
1781 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1782 SemaphoreAddr += SemaphoreSize;
1783 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1784 SemaphoreAddr += SemaphoreSize;
1785 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1786 SemaphoreAddr += SemaphoreSize;
1787 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1788 SemaphoreAddr += SemaphoreSize;
1789 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1790 = (SPIN_LOCK *)SemaphoreAddr;
1791 SemaphoreAddr += SemaphoreSize;
1792
1793 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1794 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1795 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1796 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1797 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1798 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1799
1800 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1801 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1802
1803 mSemaphoreSize = SemaphoreSize;
1804 }
1805
1806 /**
1807 Initialize un-cacheable data.
1808
1809 **/
1810 VOID
1811 EFIAPI
1812 InitializeMpSyncData (
1813 VOID
1814 )
1815 {
1816 UINTN CpuIndex;
1817
1818 if (mSmmMpSyncData != NULL) {
1819 //
1820 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1821 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1822 //
1823 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1824 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1825 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1826 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1827 //
1828 // Enable BSP election by setting BspIndex to -1
1829 //
1830 mSmmMpSyncData->BspIndex = (UINT32)-1;
1831 }
1832 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1833
1834 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1835 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1836 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1837 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1838 mSmmMpSyncData->AllCpusInSync != NULL);
1839 *mSmmMpSyncData->Counter = 0;
1840 *mSmmMpSyncData->InsideSmm = FALSE;
1841 *mSmmMpSyncData->AllCpusInSync = FALSE;
1842
1843 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1844 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1845 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1846 mSmmMpSyncData->CpuData[CpuIndex].Run =
1847 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1848 mSmmMpSyncData->CpuData[CpuIndex].Present =
1849 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1850 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1851 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1852 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1853 }
1854 }
1855 }
1856
1857 /**
1858 Initialize global data for MP synchronization.
1859
1860 @param Stacks Base address of SMI stack buffer for all processors.
1861 @param StackSize Stack size for each processor in SMM.
1862 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1863
1864 **/
1865 UINT32
1866 InitializeMpServiceData (
1867 IN VOID *Stacks,
1868 IN UINTN StackSize,
1869 IN UINTN ShadowStackSize
1870 )
1871 {
1872 UINT32 Cr3;
1873 UINTN Index;
1874 UINT8 *GdtTssTables;
1875 UINTN GdtTableStepSize;
1876 CPUID_VERSION_INFO_EDX RegEdx;
1877
1878 //
1879 // Determine if this CPU supports machine check
1880 //
1881 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1882 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1883
1884 //
1885 // Allocate memory for all locks and semaphores
1886 //
1887 InitializeSmmCpuSemaphores ();
1888
1889 //
1890 // Initialize mSmmMpSyncData
1891 //
1892 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1893 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1894 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1895 ASSERT (mSmmMpSyncData != NULL);
1896 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1897 InitializeMpSyncData ();
1898
1899 //
1900 // Initialize physical address mask
1901 // NOTE: Physical memory above virtual address limit is not supported !!!
1902 //
1903 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1904 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1905 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1906
1907 //
1908 // Create page tables
1909 //
1910 Cr3 = SmmInitPageTable ();
1911
1912 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1913
1914 //
1915 // Install SMI handler for each CPU
1916 //
1917 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1918 InstallSmiHandler (
1919 Index,
1920 (UINT32)mCpuHotPlugData.SmBase[Index],
1921 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1922 StackSize,
1923 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1924 gcSmiGdtr.Limit + 1,
1925 gcSmiIdtr.Base,
1926 gcSmiIdtr.Limit + 1,
1927 Cr3
1928 );
1929 }
1930
1931 //
1932 // Record current MTRR settings
1933 //
1934 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1935 MtrrGetAllMtrrs (&gSmiMtrrs);
1936
1937 return Cr3;
1938 }
1939
1940 /**
1941
1942 Register the SMM Foundation entry point.
1943
1944 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1945 @param SmmEntryPoint SMM Foundation EntryPoint
1946
1947 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1948
1949 **/
1950 EFI_STATUS
1951 EFIAPI
1952 RegisterSmmEntry (
1953 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1954 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1955 )
1956 {
1957 //
1958 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1959 //
1960 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1961 return EFI_SUCCESS;
1962 }
1963
1964 /**
1965
1966 Register the SMM Foundation entry point.
1967
1968 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
1969 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
1970 with the related definitions of
1971 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
1972 If caller may pass a value of NULL to deregister any existing
1973 startup procedure.
1974 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
1975 run by the AP. It is an optional common mailbox between APs and
1976 the caller to share information
1977
1978 @retval EFI_SUCCESS The Procedure has been set successfully.
1979 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
1980
1981 **/
1982 EFI_STATUS
1983 RegisterStartupProcedure (
1984 IN EFI_AP_PROCEDURE Procedure,
1985 IN OUT VOID *ProcedureArguments OPTIONAL
1986 )
1987 {
1988 if (Procedure == NULL && ProcedureArguments != NULL) {
1989 return EFI_INVALID_PARAMETER;
1990 }
1991 if (mSmmMpSyncData == NULL) {
1992 return EFI_NOT_READY;
1993 }
1994
1995 mSmmMpSyncData->StartupProcedure = Procedure;
1996 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
1997
1998 return EFI_SUCCESS;
1999 }