]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12
13 //
14 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15 //
16 MTRR_SETTINGS gSmiMtrrs;
17 UINT64 gPhyMask;
18 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19 UINTN mSmmMpSyncDataSize;
20 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21 UINTN mSemaphoreSize;
22 SPIN_LOCK *mPFLock = NULL;
23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24 BOOLEAN mMachineCheckSupported = FALSE;
25
26 /**
27 Performs an atomic compare exchange operation to get semaphore.
28 The compare exchange operation must be performed using
29 MP safe mechanisms.
30
31 @param Sem IN: 32-bit unsigned integer
32 OUT: original integer - 1
33 @return Original integer - 1
34
35 **/
36 UINT32
37 WaitForSemaphore (
38 IN OUT volatile UINT32 *Sem
39 )
40 {
41 UINT32 Value;
42
43 do {
44 Value = *Sem;
45 } while (Value == 0 ||
46 InterlockedCompareExchange32 (
47 (UINT32*)Sem,
48 Value,
49 Value - 1
50 ) != Value);
51 return Value - 1;
52 }
53
54
55 /**
56 Performs an atomic compare exchange operation to release semaphore.
57 The compare exchange operation must be performed using
58 MP safe mechanisms.
59
60 @param Sem IN: 32-bit unsigned integer
61 OUT: original integer + 1
62 @return Original integer + 1
63
64 **/
65 UINT32
66 ReleaseSemaphore (
67 IN OUT volatile UINT32 *Sem
68 )
69 {
70 UINT32 Value;
71
72 do {
73 Value = *Sem;
74 } while (Value + 1 != 0 &&
75 InterlockedCompareExchange32 (
76 (UINT32*)Sem,
77 Value,
78 Value + 1
79 ) != Value);
80 return Value + 1;
81 }
82
83 /**
84 Performs an atomic compare exchange operation to lock semaphore.
85 The compare exchange operation must be performed using
86 MP safe mechanisms.
87
88 @param Sem IN: 32-bit unsigned integer
89 OUT: -1
90 @return Original integer
91
92 **/
93 UINT32
94 LockdownSemaphore (
95 IN OUT volatile UINT32 *Sem
96 )
97 {
98 UINT32 Value;
99
100 do {
101 Value = *Sem;
102 } while (InterlockedCompareExchange32 (
103 (UINT32*)Sem,
104 Value, (UINT32)-1
105 ) != Value);
106 return Value;
107 }
108
109 /**
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.
111
112 @param NumberOfAPs AP number
113
114 **/
115 VOID
116 WaitForAllAPs (
117 IN UINTN NumberOfAPs
118 )
119 {
120 UINTN BspIndex;
121
122 BspIndex = mSmmMpSyncData->BspIndex;
123 while (NumberOfAPs-- > 0) {
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
125 }
126 }
127
128 /**
129 Performs an atomic compare exchange operation to release semaphore
130 for each AP.
131
132 **/
133 VOID
134 ReleaseAllAPs (
135 VOID
136 )
137 {
138 UINTN Index;
139 UINTN BspIndex;
140
141 BspIndex = mSmmMpSyncData->BspIndex;
142 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
143 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
144 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
145 }
146 }
147 }
148
149 /**
150 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
151
152 @param Exceptions CPU Arrival exception flags.
153
154 @retval TRUE if all CPUs the have checked in.
155 @retval FALSE if at least one Normal AP hasn't checked in.
156
157 **/
158 BOOLEAN
159 AllCpusInSmmWithExceptions (
160 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
161 )
162 {
163 UINTN Index;
164 SMM_CPU_DATA_BLOCK *CpuData;
165 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
166
167 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
168
169 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
170 return TRUE;
171 }
172
173 CpuData = mSmmMpSyncData->CpuData;
174 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
175 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
176 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
177 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
178 continue;
179 }
180 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
181 continue;
182 }
183 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
184 continue;
185 }
186 return FALSE;
187 }
188 }
189
190
191 return TRUE;
192 }
193
194 /**
195 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
196
197 @retval TRUE Os enable lmce.
198 @retval FALSE Os not enable lmce.
199
200 **/
201 BOOLEAN
202 IsLmceOsEnabled (
203 VOID
204 )
205 {
206 MSR_IA32_MCG_CAP_REGISTER McgCap;
207 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
208 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
209
210 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
211 if (McgCap.Bits.MCG_LMCE_P == 0) {
212 return FALSE;
213 }
214
215 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
216 if (FeatureCtrl.Bits.LmceOn == 0) {
217 return FALSE;
218 }
219
220 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
221 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
222 }
223
224 /**
225 Return if Local machine check exception signaled.
226
227 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
228 delivered to only the logical processor.
229
230 @retval TRUE LMCE was signaled.
231 @retval FALSE LMCE was not signaled.
232
233 **/
234 BOOLEAN
235 IsLmceSignaled (
236 VOID
237 )
238 {
239 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
240
241 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
242 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
243 }
244
245 /**
246 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
247 entering SMM, except SMI disabled APs.
248
249 **/
250 VOID
251 SmmWaitForApArrival (
252 VOID
253 )
254 {
255 UINT64 Timer;
256 UINTN Index;
257 BOOLEAN LmceEn;
258 BOOLEAN LmceSignal;
259
260 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
261
262 LmceEn = FALSE;
263 LmceSignal = FALSE;
264 if (mMachineCheckSupported) {
265 LmceEn = IsLmceOsEnabled ();
266 LmceSignal = IsLmceSignaled();
267 }
268
269 //
270 // Platform implementor should choose a timeout value appropriately:
271 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
272 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
273 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
274 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
275 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
276 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
277 // - The timeout value must be longer than longest possible IO operation in the system
278 //
279
280 //
281 // Sync with APs 1st timeout
282 //
283 for (Timer = StartSyncTimer ();
284 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
285 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
286 ) {
287 CpuPause ();
288 }
289
290 //
291 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
292 // because:
293 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
294 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
295 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
296 // work while SMI handling is on-going.
297 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
298 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
299 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
300 // mode work while SMI handling is on-going.
301 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
302 // - In traditional flow, SMI disabling is discouraged.
303 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
304 // In both cases, adding SMI-disabling checking code increases overhead.
305 //
306 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
307 //
308 // Send SMI IPIs to bring outside processors in
309 //
310 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
311 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
312 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
313 }
314 }
315
316 //
317 // Sync with APs 2nd timeout.
318 //
319 for (Timer = StartSyncTimer ();
320 !IsSyncTimerTimeout (Timer) &&
321 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
322 ) {
323 CpuPause ();
324 }
325 }
326
327 return;
328 }
329
330
331 /**
332 Replace OS MTRR's with SMI MTRR's.
333
334 @param CpuIndex Processor Index
335
336 **/
337 VOID
338 ReplaceOSMtrrs (
339 IN UINTN CpuIndex
340 )
341 {
342 SmmCpuFeaturesDisableSmrr ();
343
344 //
345 // Replace all MTRRs registers
346 //
347 MtrrSetAllMtrrs (&gSmiMtrrs);
348 }
349
350 /**
351 SMI handler for BSP.
352
353 @param CpuIndex BSP processor Index
354 @param SyncMode SMM MP sync mode
355
356 **/
357 VOID
358 BSPHandler (
359 IN UINTN CpuIndex,
360 IN SMM_CPU_SYNC_MODE SyncMode
361 )
362 {
363 UINTN Index;
364 MTRR_SETTINGS Mtrrs;
365 UINTN ApCount;
366 BOOLEAN ClearTopLevelSmiResult;
367 UINTN PresentCount;
368
369 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
370 ApCount = 0;
371
372 //
373 // Flag BSP's presence
374 //
375 *mSmmMpSyncData->InsideSmm = TRUE;
376
377 //
378 // Initialize Debug Agent to start source level debug in BSP handler
379 //
380 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
381
382 //
383 // Mark this processor's presence
384 //
385 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
386
387 //
388 // Clear platform top level SMI status bit before calling SMI handlers. If
389 // we cleared it after SMI handlers are run, we would miss the SMI that
390 // occurs after SMI handlers are done and before SMI status bit is cleared.
391 //
392 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
393 ASSERT (ClearTopLevelSmiResult == TRUE);
394
395 //
396 // Set running processor index
397 //
398 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
399
400 //
401 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
402 //
403 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
404
405 //
406 // Wait for APs to arrive
407 //
408 SmmWaitForApArrival();
409
410 //
411 // Lock the counter down and retrieve the number of APs
412 //
413 *mSmmMpSyncData->AllCpusInSync = TRUE;
414 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
415
416 //
417 // Wait for all APs to get ready for programming MTRRs
418 //
419 WaitForAllAPs (ApCount);
420
421 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
422 //
423 // Signal all APs it's time for backup MTRRs
424 //
425 ReleaseAllAPs ();
426
427 //
428 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
429 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
430 // to a large enough value to avoid this situation.
431 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
432 // We do the backup first and then set MTRR to avoid race condition for threads
433 // in the same core.
434 //
435 MtrrGetAllMtrrs(&Mtrrs);
436
437 //
438 // Wait for all APs to complete their MTRR saving
439 //
440 WaitForAllAPs (ApCount);
441
442 //
443 // Let all processors program SMM MTRRs together
444 //
445 ReleaseAllAPs ();
446
447 //
448 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
449 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
450 // to a large enough value to avoid this situation.
451 //
452 ReplaceOSMtrrs (CpuIndex);
453
454 //
455 // Wait for all APs to complete their MTRR programming
456 //
457 WaitForAllAPs (ApCount);
458 }
459 }
460
461 //
462 // The BUSY lock is initialized to Acquired state
463 //
464 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
465
466 //
467 // Perform the pre tasks
468 //
469 PerformPreTasks ();
470
471 //
472 // Invoke SMM Foundation EntryPoint with the processor information context.
473 //
474 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
475
476 //
477 // Make sure all APs have completed their pending none-block tasks
478 //
479 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
480 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
481 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
482 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
483 }
484 }
485
486 //
487 // Perform the remaining tasks
488 //
489 PerformRemainingTasks ();
490
491 //
492 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
493 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
494 // will run through freely.
495 //
496 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
497
498 //
499 // Lock the counter down and retrieve the number of APs
500 //
501 *mSmmMpSyncData->AllCpusInSync = TRUE;
502 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
503 //
504 // Make sure all APs have their Present flag set
505 //
506 while (TRUE) {
507 PresentCount = 0;
508 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
509 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
510 PresentCount ++;
511 }
512 }
513 if (PresentCount > ApCount) {
514 break;
515 }
516 }
517 }
518
519 //
520 // Notify all APs to exit
521 //
522 *mSmmMpSyncData->InsideSmm = FALSE;
523 ReleaseAllAPs ();
524
525 //
526 // Wait for all APs to complete their pending tasks
527 //
528 WaitForAllAPs (ApCount);
529
530 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
531 //
532 // Signal APs to restore MTRRs
533 //
534 ReleaseAllAPs ();
535
536 //
537 // Restore OS MTRRs
538 //
539 SmmCpuFeaturesReenableSmrr ();
540 MtrrSetAllMtrrs(&Mtrrs);
541
542 //
543 // Wait for all APs to complete MTRR programming
544 //
545 WaitForAllAPs (ApCount);
546 }
547
548 //
549 // Stop source level debug in BSP handler, the code below will not be
550 // debugged.
551 //
552 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
553
554 //
555 // Signal APs to Reset states/semaphore for this processor
556 //
557 ReleaseAllAPs ();
558
559 //
560 // Perform pending operations for hot-plug
561 //
562 SmmCpuUpdate ();
563
564 //
565 // Clear the Present flag of BSP
566 //
567 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
568
569 //
570 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
571 // WaitForAllAps does not depend on the Present flag.
572 //
573 WaitForAllAPs (ApCount);
574
575 //
576 // Reset BspIndex to -1, meaning BSP has not been elected.
577 //
578 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
579 mSmmMpSyncData->BspIndex = (UINT32)-1;
580 }
581
582 //
583 // Allow APs to check in from this point on
584 //
585 *mSmmMpSyncData->Counter = 0;
586 *mSmmMpSyncData->AllCpusInSync = FALSE;
587 }
588
589 /**
590 SMI handler for AP.
591
592 @param CpuIndex AP processor Index.
593 @param ValidSmi Indicates that current SMI is a valid SMI or not.
594 @param SyncMode SMM MP sync mode.
595
596 **/
597 VOID
598 APHandler (
599 IN UINTN CpuIndex,
600 IN BOOLEAN ValidSmi,
601 IN SMM_CPU_SYNC_MODE SyncMode
602 )
603 {
604 UINT64 Timer;
605 UINTN BspIndex;
606 MTRR_SETTINGS Mtrrs;
607
608 //
609 // Timeout BSP
610 //
611 for (Timer = StartSyncTimer ();
612 !IsSyncTimerTimeout (Timer) &&
613 !(*mSmmMpSyncData->InsideSmm);
614 ) {
615 CpuPause ();
616 }
617
618 if (!(*mSmmMpSyncData->InsideSmm)) {
619 //
620 // BSP timeout in the first round
621 //
622 if (mSmmMpSyncData->BspIndex != -1) {
623 //
624 // BSP Index is known
625 //
626 BspIndex = mSmmMpSyncData->BspIndex;
627 ASSERT (CpuIndex != BspIndex);
628
629 //
630 // Send SMI IPI to bring BSP in
631 //
632 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
633
634 //
635 // Now clock BSP for the 2nd time
636 //
637 for (Timer = StartSyncTimer ();
638 !IsSyncTimerTimeout (Timer) &&
639 !(*mSmmMpSyncData->InsideSmm);
640 ) {
641 CpuPause ();
642 }
643
644 if (!(*mSmmMpSyncData->InsideSmm)) {
645 //
646 // Give up since BSP is unable to enter SMM
647 // and signal the completion of this AP
648 WaitForSemaphore (mSmmMpSyncData->Counter);
649 return;
650 }
651 } else {
652 //
653 // Don't know BSP index. Give up without sending IPI to BSP.
654 //
655 WaitForSemaphore (mSmmMpSyncData->Counter);
656 return;
657 }
658 }
659
660 //
661 // BSP is available
662 //
663 BspIndex = mSmmMpSyncData->BspIndex;
664 ASSERT (CpuIndex != BspIndex);
665
666 //
667 // Mark this processor's presence
668 //
669 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
670
671 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
672 //
673 // Notify BSP of arrival at this point
674 //
675 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
676 }
677
678 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
679 //
680 // Wait for the signal from BSP to backup MTRRs
681 //
682 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
683
684 //
685 // Backup OS MTRRs
686 //
687 MtrrGetAllMtrrs(&Mtrrs);
688
689 //
690 // Signal BSP the completion of this AP
691 //
692 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
693
694 //
695 // Wait for BSP's signal to program MTRRs
696 //
697 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
698
699 //
700 // Replace OS MTRRs with SMI MTRRs
701 //
702 ReplaceOSMtrrs (CpuIndex);
703
704 //
705 // Signal BSP the completion of this AP
706 //
707 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
708 }
709
710 while (TRUE) {
711 //
712 // Wait for something to happen
713 //
714 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
715
716 //
717 // Check if BSP wants to exit SMM
718 //
719 if (!(*mSmmMpSyncData->InsideSmm)) {
720 break;
721 }
722
723 //
724 // BUSY should be acquired by SmmStartupThisAp()
725 //
726 ASSERT (
727 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
728 );
729
730 //
731 // Invoke the scheduled procedure
732 //
733 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
734 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
735 );
736
737 //
738 // Release BUSY
739 //
740 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
741 }
742
743 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
744 //
745 // Notify BSP the readiness of this AP to program MTRRs
746 //
747 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
748
749 //
750 // Wait for the signal from BSP to program MTRRs
751 //
752 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
753
754 //
755 // Restore OS MTRRs
756 //
757 SmmCpuFeaturesReenableSmrr ();
758 MtrrSetAllMtrrs(&Mtrrs);
759 }
760
761 //
762 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
763 //
764 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
765
766 //
767 // Wait for the signal from BSP to Reset states/semaphore for this processor
768 //
769 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
770
771 //
772 // Reset states/semaphore for this processor
773 //
774 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
775
776 //
777 // Notify BSP the readiness of this AP to exit SMM
778 //
779 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
780
781 }
782
783 /**
784 Create 4G PageTable in SMRAM.
785
786 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
787 @return PageTable Address
788
789 **/
790 UINT32
791 Gen4GPageTable (
792 IN BOOLEAN Is32BitPageTable
793 )
794 {
795 VOID *PageTable;
796 UINTN Index;
797 UINT64 *Pte;
798 UINTN PagesNeeded;
799 UINTN Low2MBoundary;
800 UINTN High2MBoundary;
801 UINTN Pages;
802 UINTN GuardPage;
803 UINT64 *Pdpte;
804 UINTN PageIndex;
805 UINTN PageAddress;
806
807 Low2MBoundary = 0;
808 High2MBoundary = 0;
809 PagesNeeded = 0;
810 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
811 //
812 // Add one more page for known good stack, then find the lower 2MB aligned address.
813 //
814 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
815 //
816 // Add two more pages for known good stack and stack guard page,
817 // then find the lower 2MB aligned address.
818 //
819 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
820 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
821 }
822 //
823 // Allocate the page table
824 //
825 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
826 ASSERT (PageTable != NULL);
827
828 PageTable = (VOID *)((UINTN)PageTable);
829 Pte = (UINT64*)PageTable;
830
831 //
832 // Zero out all page table entries first
833 //
834 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
835
836 //
837 // Set Page Directory Pointers
838 //
839 for (Index = 0; Index < 4; Index++) {
840 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
841 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
842 }
843 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
844
845 //
846 // Fill in Page Directory Entries
847 //
848 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
849 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
850 }
851
852 Pdpte = (UINT64*)PageTable;
853 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
854 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
855 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
856 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
857 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
858 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
859 //
860 // Fill in Page Table Entries
861 //
862 Pte = (UINT64*)Pages;
863 PageAddress = PageIndex;
864 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
865 if (PageAddress == GuardPage) {
866 //
867 // Mark the guard page as non-present
868 //
869 Pte[Index] = PageAddress | mAddressEncMask;
870 GuardPage += mSmmStackSize;
871 if (GuardPage > mSmmStackArrayEnd) {
872 GuardPage = 0;
873 }
874 } else {
875 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
876 }
877 PageAddress+= EFI_PAGE_SIZE;
878 }
879 Pages += EFI_PAGE_SIZE;
880 }
881 }
882
883 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
884 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
885 if ((Pte[0] & IA32_PG_PS) == 0) {
886 // 4K-page entries are already mapped. Just hide the first one anyway.
887 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
888 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
889 } else {
890 // Create 4K-page entries
891 Pages = (UINTN)AllocatePageTableMemory (1);
892 ASSERT (Pages != 0);
893
894 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
895
896 Pte = (UINT64*)Pages;
897 PageAddress = 0;
898 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
899 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
900 PageAddress += EFI_PAGE_SIZE;
901 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
902 }
903 }
904 }
905
906 return (UINT32)(UINTN)PageTable;
907 }
908
909 /**
910 Schedule a procedure to run on the specified CPU.
911
912 @param[in] Procedure The address of the procedure to run
913 @param[in] CpuIndex Target CPU Index
914 @param[in, out] ProcArguments The parameter to pass to the procedure
915 @param[in] BlockingMode Startup AP in blocking mode or not
916
917 @retval EFI_INVALID_PARAMETER CpuNumber not valid
918 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
919 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
920 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
921 @retval EFI_SUCCESS The procedure has been successfully scheduled
922
923 **/
924 EFI_STATUS
925 InternalSmmStartupThisAp (
926 IN EFI_AP_PROCEDURE Procedure,
927 IN UINTN CpuIndex,
928 IN OUT VOID *ProcArguments OPTIONAL,
929 IN BOOLEAN BlockingMode
930 )
931 {
932 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
933 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
934 return EFI_INVALID_PARAMETER;
935 }
936 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
937 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
938 return EFI_INVALID_PARAMETER;
939 }
940 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
941 return EFI_INVALID_PARAMETER;
942 }
943 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
944 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
945 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
946 }
947 return EFI_INVALID_PARAMETER;
948 }
949 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
950 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
951 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
952 }
953 return EFI_INVALID_PARAMETER;
954 }
955
956 if (BlockingMode) {
957 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
958 } else {
959 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
960 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
961 return EFI_INVALID_PARAMETER;
962 }
963 }
964
965 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
966 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
967 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
968
969 if (BlockingMode) {
970 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
971 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
972 }
973 return EFI_SUCCESS;
974 }
975
976 /**
977 Schedule a procedure to run on the specified CPU in blocking mode.
978
979 @param[in] Procedure The address of the procedure to run
980 @param[in] CpuIndex Target CPU Index
981 @param[in, out] ProcArguments The parameter to pass to the procedure
982
983 @retval EFI_INVALID_PARAMETER CpuNumber not valid
984 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
985 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
986 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
987 @retval EFI_SUCCESS The procedure has been successfully scheduled
988
989 **/
990 EFI_STATUS
991 EFIAPI
992 SmmBlockingStartupThisAp (
993 IN EFI_AP_PROCEDURE Procedure,
994 IN UINTN CpuIndex,
995 IN OUT VOID *ProcArguments OPTIONAL
996 )
997 {
998 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
999 }
1000
1001 /**
1002 Schedule a procedure to run on the specified CPU.
1003
1004 @param Procedure The address of the procedure to run
1005 @param CpuIndex Target CPU Index
1006 @param ProcArguments The parameter to pass to the procedure
1007
1008 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1009 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1010 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1011 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1012 @retval EFI_SUCCESS The procedure has been successfully scheduled
1013
1014 **/
1015 EFI_STATUS
1016 EFIAPI
1017 SmmStartupThisAp (
1018 IN EFI_AP_PROCEDURE Procedure,
1019 IN UINTN CpuIndex,
1020 IN OUT VOID *ProcArguments OPTIONAL
1021 )
1022 {
1023 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
1024 }
1025
1026 /**
1027 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1028 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1029
1030 NOTE: It might not be appreciated in runtime since it might
1031 conflict with OS debugging facilities. Turn them off in RELEASE.
1032
1033 @param CpuIndex CPU Index
1034
1035 **/
1036 VOID
1037 EFIAPI
1038 CpuSmmDebugEntry (
1039 IN UINTN CpuIndex
1040 )
1041 {
1042 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1043
1044 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1045 ASSERT(CpuIndex < mMaxNumberOfCpus);
1046 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1047 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1048 AsmWriteDr6 (CpuSaveState->x86._DR6);
1049 AsmWriteDr7 (CpuSaveState->x86._DR7);
1050 } else {
1051 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1052 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1053 }
1054 }
1055 }
1056
1057 /**
1058 This function restores DR6 & DR7 to SMM save state.
1059
1060 NOTE: It might not be appreciated in runtime since it might
1061 conflict with OS debugging facilities. Turn them off in RELEASE.
1062
1063 @param CpuIndex CPU Index
1064
1065 **/
1066 VOID
1067 EFIAPI
1068 CpuSmmDebugExit (
1069 IN UINTN CpuIndex
1070 )
1071 {
1072 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1073
1074 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1075 ASSERT(CpuIndex < mMaxNumberOfCpus);
1076 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1077 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1078 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1079 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1080 } else {
1081 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1082 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1083 }
1084 }
1085 }
1086
1087 /**
1088 C function for SMI entry, each processor comes here upon SMI trigger.
1089
1090 @param CpuIndex CPU Index
1091
1092 **/
1093 VOID
1094 EFIAPI
1095 SmiRendezvous (
1096 IN UINTN CpuIndex
1097 )
1098 {
1099 EFI_STATUS Status;
1100 BOOLEAN ValidSmi;
1101 BOOLEAN IsBsp;
1102 BOOLEAN BspInProgress;
1103 UINTN Index;
1104 UINTN Cr2;
1105
1106 ASSERT(CpuIndex < mMaxNumberOfCpus);
1107
1108 //
1109 // Save Cr2 because Page Fault exception in SMM may override its value,
1110 // when using on-demand paging for above 4G memory.
1111 //
1112 Cr2 = 0;
1113 SaveCr2 (&Cr2);
1114
1115 //
1116 // Perform CPU specific entry hooks
1117 //
1118 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1119
1120 //
1121 // Determine if this is a valid SMI
1122 //
1123 ValidSmi = PlatformValidSmi();
1124
1125 //
1126 // Determine if BSP has been already in progress. Note this must be checked after
1127 // ValidSmi because BSP may clear a valid SMI source after checking in.
1128 //
1129 BspInProgress = *mSmmMpSyncData->InsideSmm;
1130
1131 if (!BspInProgress && !ValidSmi) {
1132 //
1133 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1134 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1135 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1136 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1137 // is nothing we need to do.
1138 //
1139 goto Exit;
1140 } else {
1141 //
1142 // Signal presence of this processor
1143 //
1144 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1145 //
1146 // BSP has already ended the synchronization, so QUIT!!!
1147 //
1148
1149 //
1150 // Wait for BSP's signal to finish SMI
1151 //
1152 while (*mSmmMpSyncData->AllCpusInSync) {
1153 CpuPause ();
1154 }
1155 goto Exit;
1156 } else {
1157
1158 //
1159 // The BUSY lock is initialized to Released state.
1160 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1161 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1162 // after AP's present flag is detected.
1163 //
1164 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1165 }
1166
1167 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1168 ActivateSmmProfile (CpuIndex);
1169 }
1170
1171 if (BspInProgress) {
1172 //
1173 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1174 // as BSP may have cleared the SMI status
1175 //
1176 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1177 } else {
1178 //
1179 // We have a valid SMI
1180 //
1181
1182 //
1183 // Elect BSP
1184 //
1185 IsBsp = FALSE;
1186 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1187 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1188 //
1189 // Call platform hook to do BSP election
1190 //
1191 Status = PlatformSmmBspElection (&IsBsp);
1192 if (EFI_SUCCESS == Status) {
1193 //
1194 // Platform hook determines successfully
1195 //
1196 if (IsBsp) {
1197 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1198 }
1199 } else {
1200 //
1201 // Platform hook fails to determine, use default BSP election method
1202 //
1203 InterlockedCompareExchange32 (
1204 (UINT32*)&mSmmMpSyncData->BspIndex,
1205 (UINT32)-1,
1206 (UINT32)CpuIndex
1207 );
1208 }
1209 }
1210 }
1211
1212 //
1213 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1214 //
1215 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1216
1217 //
1218 // Clear last request for SwitchBsp.
1219 //
1220 if (mSmmMpSyncData->SwitchBsp) {
1221 mSmmMpSyncData->SwitchBsp = FALSE;
1222 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1223 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1224 }
1225 }
1226
1227 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1228 SmmProfileRecordSmiNum ();
1229 }
1230
1231 //
1232 // BSP Handler is always called with a ValidSmi == TRUE
1233 //
1234 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1235 } else {
1236 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1237 }
1238 }
1239
1240 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1241
1242 //
1243 // Wait for BSP's signal to exit SMI
1244 //
1245 while (*mSmmMpSyncData->AllCpusInSync) {
1246 CpuPause ();
1247 }
1248 }
1249
1250 Exit:
1251 SmmCpuFeaturesRendezvousExit (CpuIndex);
1252
1253 //
1254 // Restore Cr2
1255 //
1256 RestoreCr2 (Cr2);
1257 }
1258
1259 /**
1260 Allocate buffer for all semaphores and spin locks.
1261
1262 **/
1263 VOID
1264 InitializeSmmCpuSemaphores (
1265 VOID
1266 )
1267 {
1268 UINTN ProcessorCount;
1269 UINTN TotalSize;
1270 UINTN GlobalSemaphoresSize;
1271 UINTN CpuSemaphoresSize;
1272 UINTN SemaphoreSize;
1273 UINTN Pages;
1274 UINTN *SemaphoreBlock;
1275 UINTN SemaphoreAddr;
1276
1277 SemaphoreSize = GetSpinLockProperties ();
1278 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1279 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1280 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1281 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1282 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1283 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1284 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1285 SemaphoreBlock = AllocatePages (Pages);
1286 ASSERT (SemaphoreBlock != NULL);
1287 ZeroMem (SemaphoreBlock, TotalSize);
1288
1289 SemaphoreAddr = (UINTN)SemaphoreBlock;
1290 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1291 SemaphoreAddr += SemaphoreSize;
1292 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1293 SemaphoreAddr += SemaphoreSize;
1294 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1295 SemaphoreAddr += SemaphoreSize;
1296 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1297 SemaphoreAddr += SemaphoreSize;
1298 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1299 = (SPIN_LOCK *)SemaphoreAddr;
1300 SemaphoreAddr += SemaphoreSize;
1301
1302 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1303 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1304 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1305 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1306 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1307 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1308
1309 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1310 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1311
1312 mSemaphoreSize = SemaphoreSize;
1313 }
1314
1315 /**
1316 Initialize un-cacheable data.
1317
1318 **/
1319 VOID
1320 EFIAPI
1321 InitializeMpSyncData (
1322 VOID
1323 )
1324 {
1325 UINTN CpuIndex;
1326
1327 if (mSmmMpSyncData != NULL) {
1328 //
1329 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1330 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1331 //
1332 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1333 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1334 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1335 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1336 //
1337 // Enable BSP election by setting BspIndex to -1
1338 //
1339 mSmmMpSyncData->BspIndex = (UINT32)-1;
1340 }
1341 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1342
1343 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1344 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1345 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1346 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1347 mSmmMpSyncData->AllCpusInSync != NULL);
1348 *mSmmMpSyncData->Counter = 0;
1349 *mSmmMpSyncData->InsideSmm = FALSE;
1350 *mSmmMpSyncData->AllCpusInSync = FALSE;
1351
1352 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1353 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1354 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1355 mSmmMpSyncData->CpuData[CpuIndex].Run =
1356 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1357 mSmmMpSyncData->CpuData[CpuIndex].Present =
1358 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1359 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1360 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1361 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1362 }
1363 }
1364 }
1365
1366 /**
1367 Initialize global data for MP synchronization.
1368
1369 @param Stacks Base address of SMI stack buffer for all processors.
1370 @param StackSize Stack size for each processor in SMM.
1371 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1372
1373 **/
1374 UINT32
1375 InitializeMpServiceData (
1376 IN VOID *Stacks,
1377 IN UINTN StackSize,
1378 IN UINTN ShadowStackSize
1379 )
1380 {
1381 UINT32 Cr3;
1382 UINTN Index;
1383 UINT8 *GdtTssTables;
1384 UINTN GdtTableStepSize;
1385 CPUID_VERSION_INFO_EDX RegEdx;
1386
1387 //
1388 // Determine if this CPU supports machine check
1389 //
1390 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1391 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1392
1393 //
1394 // Allocate memory for all locks and semaphores
1395 //
1396 InitializeSmmCpuSemaphores ();
1397
1398 //
1399 // Initialize mSmmMpSyncData
1400 //
1401 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1402 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1403 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1404 ASSERT (mSmmMpSyncData != NULL);
1405 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1406 InitializeMpSyncData ();
1407
1408 //
1409 // Initialize physical address mask
1410 // NOTE: Physical memory above virtual address limit is not supported !!!
1411 //
1412 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1413 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1414 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1415
1416 //
1417 // Create page tables
1418 //
1419 Cr3 = SmmInitPageTable ();
1420
1421 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1422
1423 //
1424 // Install SMI handler for each CPU
1425 //
1426 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1427 InstallSmiHandler (
1428 Index,
1429 (UINT32)mCpuHotPlugData.SmBase[Index],
1430 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1431 StackSize,
1432 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1433 gcSmiGdtr.Limit + 1,
1434 gcSmiIdtr.Base,
1435 gcSmiIdtr.Limit + 1,
1436 Cr3
1437 );
1438 }
1439
1440 //
1441 // Record current MTRR settings
1442 //
1443 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1444 MtrrGetAllMtrrs (&gSmiMtrrs);
1445
1446 return Cr3;
1447 }
1448
1449 /**
1450
1451 Register the SMM Foundation entry point.
1452
1453 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1454 @param SmmEntryPoint SMM Foundation EntryPoint
1455
1456 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1457
1458 **/
1459 EFI_STATUS
1460 EFIAPI
1461 RegisterSmmEntry (
1462 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1463 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1464 )
1465 {
1466 //
1467 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1468 //
1469 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1470 return EFI_SUCCESS;
1471 }