]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
17459c790ce54ca845f2e8d1cf5aacf008577687
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
21 //
22 MTRR_SETTINGS gSmiMtrrs;
23 UINT64 gPhyMask;
24 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
25 UINTN mSmmMpSyncDataSize;
26 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
27 UINTN mSemaphoreSize;
28 SPIN_LOCK *mPFLock = NULL;
29 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
30 BOOLEAN mMachineCheckSupported = FALSE;
31
32 /**
33 Performs an atomic compare exchange operation to get semaphore.
34 The compare exchange operation must be performed using
35 MP safe mechanisms.
36
37 @param Sem IN: 32-bit unsigned integer
38 OUT: original integer - 1
39 @return Original integer - 1
40
41 **/
42 UINT32
43 WaitForSemaphore (
44 IN OUT volatile UINT32 *Sem
45 )
46 {
47 UINT32 Value;
48
49 do {
50 Value = *Sem;
51 } while (Value == 0 ||
52 InterlockedCompareExchange32 (
53 (UINT32*)Sem,
54 Value,
55 Value - 1
56 ) != Value);
57 return Value - 1;
58 }
59
60
61 /**
62 Performs an atomic compare exchange operation to release semaphore.
63 The compare exchange operation must be performed using
64 MP safe mechanisms.
65
66 @param Sem IN: 32-bit unsigned integer
67 OUT: original integer + 1
68 @return Original integer + 1
69
70 **/
71 UINT32
72 ReleaseSemaphore (
73 IN OUT volatile UINT32 *Sem
74 )
75 {
76 UINT32 Value;
77
78 do {
79 Value = *Sem;
80 } while (Value + 1 != 0 &&
81 InterlockedCompareExchange32 (
82 (UINT32*)Sem,
83 Value,
84 Value + 1
85 ) != Value);
86 return Value + 1;
87 }
88
89 /**
90 Performs an atomic compare exchange operation to lock semaphore.
91 The compare exchange operation must be performed using
92 MP safe mechanisms.
93
94 @param Sem IN: 32-bit unsigned integer
95 OUT: -1
96 @return Original integer
97
98 **/
99 UINT32
100 LockdownSemaphore (
101 IN OUT volatile UINT32 *Sem
102 )
103 {
104 UINT32 Value;
105
106 do {
107 Value = *Sem;
108 } while (InterlockedCompareExchange32 (
109 (UINT32*)Sem,
110 Value, (UINT32)-1
111 ) != Value);
112 return Value;
113 }
114
115 /**
116 Wait all APs to performs an atomic compare exchange operation to release semaphore.
117
118 @param NumberOfAPs AP number
119
120 **/
121 VOID
122 WaitForAllAPs (
123 IN UINTN NumberOfAPs
124 )
125 {
126 UINTN BspIndex;
127
128 BspIndex = mSmmMpSyncData->BspIndex;
129 while (NumberOfAPs-- > 0) {
130 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
131 }
132 }
133
134 /**
135 Performs an atomic compare exchange operation to release semaphore
136 for each AP.
137
138 **/
139 VOID
140 ReleaseAllAPs (
141 VOID
142 )
143 {
144 UINTN Index;
145 UINTN BspIndex;
146
147 BspIndex = mSmmMpSyncData->BspIndex;
148 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
149 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
150 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
151 }
152 }
153 }
154
155 /**
156 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
157
158 @param Exceptions CPU Arrival exception flags.
159
160 @retval TRUE if all CPUs the have checked in.
161 @retval FALSE if at least one Normal AP hasn't checked in.
162
163 **/
164 BOOLEAN
165 AllCpusInSmmWithExceptions (
166 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
167 )
168 {
169 UINTN Index;
170 SMM_CPU_DATA_BLOCK *CpuData;
171 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
172
173 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
174
175 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
176 return TRUE;
177 }
178
179 CpuData = mSmmMpSyncData->CpuData;
180 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
181 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
182 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
183 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
184 continue;
185 }
186 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
187 continue;
188 }
189 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
190 continue;
191 }
192 return FALSE;
193 }
194 }
195
196
197 return TRUE;
198 }
199
200 /**
201 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
202
203 @retval TRUE Os enable lmce.
204 @retval FALSE Os not enable lmce.
205
206 **/
207 BOOLEAN
208 IsLmceOsEnabled (
209 VOID
210 )
211 {
212 MSR_IA32_MCG_CAP_REGISTER McgCap;
213 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
214 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
215
216 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
217 if (McgCap.Bits.MCG_LMCE_P == 0) {
218 return FALSE;
219 }
220
221 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
222 if (FeatureCtrl.Bits.LmceOn == 0) {
223 return FALSE;
224 }
225
226 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
227 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
228 }
229
230 /**
231 Return if Local machine check exception signaled.
232
233 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
234 delivered to only the logical processor.
235
236 @retval TRUE LMCE was signaled.
237 @retval FALSE LMCE was not signaled.
238
239 **/
240 BOOLEAN
241 IsLmceSignaled (
242 VOID
243 )
244 {
245 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
246
247 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
248 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
249 }
250
251 /**
252 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
253 entering SMM, except SMI disabled APs.
254
255 **/
256 VOID
257 SmmWaitForApArrival (
258 VOID
259 )
260 {
261 UINT64 Timer;
262 UINTN Index;
263 BOOLEAN LmceEn;
264 BOOLEAN LmceSignal;
265
266 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
267
268 LmceEn = FALSE;
269 LmceSignal = FALSE;
270 if (mMachineCheckSupported) {
271 LmceEn = IsLmceOsEnabled ();
272 LmceSignal = IsLmceSignaled();
273 }
274
275 //
276 // Platform implementor should choose a timeout value appropriately:
277 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
278 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
279 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
280 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
281 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
282 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
283 // - The timeout value must be longer than longest possible IO operation in the system
284 //
285
286 //
287 // Sync with APs 1st timeout
288 //
289 for (Timer = StartSyncTimer ();
290 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
291 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
292 ) {
293 CpuPause ();
294 }
295
296 //
297 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
298 // because:
299 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
300 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
301 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
302 // work while SMI handling is on-going.
303 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
304 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
305 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
306 // mode work while SMI handling is on-going.
307 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
308 // - In traditional flow, SMI disabling is discouraged.
309 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
310 // In both cases, adding SMI-disabling checking code increases overhead.
311 //
312 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
313 //
314 // Send SMI IPIs to bring outside processors in
315 //
316 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
317 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
318 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
319 }
320 }
321
322 //
323 // Sync with APs 2nd timeout.
324 //
325 for (Timer = StartSyncTimer ();
326 !IsSyncTimerTimeout (Timer) &&
327 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
328 ) {
329 CpuPause ();
330 }
331 }
332
333 return;
334 }
335
336
337 /**
338 Replace OS MTRR's with SMI MTRR's.
339
340 @param CpuIndex Processor Index
341
342 **/
343 VOID
344 ReplaceOSMtrrs (
345 IN UINTN CpuIndex
346 )
347 {
348 SmmCpuFeaturesDisableSmrr ();
349
350 //
351 // Replace all MTRRs registers
352 //
353 MtrrSetAllMtrrs (&gSmiMtrrs);
354 }
355
356 /**
357 SMI handler for BSP.
358
359 @param CpuIndex BSP processor Index
360 @param SyncMode SMM MP sync mode
361
362 **/
363 VOID
364 BSPHandler (
365 IN UINTN CpuIndex,
366 IN SMM_CPU_SYNC_MODE SyncMode
367 )
368 {
369 UINTN Index;
370 MTRR_SETTINGS Mtrrs;
371 UINTN ApCount;
372 BOOLEAN ClearTopLevelSmiResult;
373 UINTN PresentCount;
374
375 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
376 ApCount = 0;
377
378 //
379 // Flag BSP's presence
380 //
381 *mSmmMpSyncData->InsideSmm = TRUE;
382
383 //
384 // Initialize Debug Agent to start source level debug in BSP handler
385 //
386 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
387
388 //
389 // Mark this processor's presence
390 //
391 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
392
393 //
394 // Clear platform top level SMI status bit before calling SMI handlers. If
395 // we cleared it after SMI handlers are run, we would miss the SMI that
396 // occurs after SMI handlers are done and before SMI status bit is cleared.
397 //
398 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
399 ASSERT (ClearTopLevelSmiResult == TRUE);
400
401 //
402 // Set running processor index
403 //
404 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
405
406 //
407 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
408 //
409 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
410
411 //
412 // Wait for APs to arrive
413 //
414 SmmWaitForApArrival();
415
416 //
417 // Lock the counter down and retrieve the number of APs
418 //
419 *mSmmMpSyncData->AllCpusInSync = TRUE;
420 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
421
422 //
423 // Wait for all APs to get ready for programming MTRRs
424 //
425 WaitForAllAPs (ApCount);
426
427 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
428 //
429 // Signal all APs it's time for backup MTRRs
430 //
431 ReleaseAllAPs ();
432
433 //
434 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
435 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
436 // to a large enough value to avoid this situation.
437 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
438 // We do the backup first and then set MTRR to avoid race condition for threads
439 // in the same core.
440 //
441 MtrrGetAllMtrrs(&Mtrrs);
442
443 //
444 // Wait for all APs to complete their MTRR saving
445 //
446 WaitForAllAPs (ApCount);
447
448 //
449 // Let all processors program SMM MTRRs together
450 //
451 ReleaseAllAPs ();
452
453 //
454 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
455 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
456 // to a large enough value to avoid this situation.
457 //
458 ReplaceOSMtrrs (CpuIndex);
459
460 //
461 // Wait for all APs to complete their MTRR programming
462 //
463 WaitForAllAPs (ApCount);
464 }
465 }
466
467 //
468 // The BUSY lock is initialized to Acquired state
469 //
470 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
471
472 //
473 // Perform the pre tasks
474 //
475 PerformPreTasks ();
476
477 //
478 // Invoke SMM Foundation EntryPoint with the processor information context.
479 //
480 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
481
482 //
483 // Make sure all APs have completed their pending none-block tasks
484 //
485 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
486 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
487 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
488 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
489 }
490 }
491
492 //
493 // Perform the remaining tasks
494 //
495 PerformRemainingTasks ();
496
497 //
498 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
499 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
500 // will run through freely.
501 //
502 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
503
504 //
505 // Lock the counter down and retrieve the number of APs
506 //
507 *mSmmMpSyncData->AllCpusInSync = TRUE;
508 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
509 //
510 // Make sure all APs have their Present flag set
511 //
512 while (TRUE) {
513 PresentCount = 0;
514 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
515 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
516 PresentCount ++;
517 }
518 }
519 if (PresentCount > ApCount) {
520 break;
521 }
522 }
523 }
524
525 //
526 // Notify all APs to exit
527 //
528 *mSmmMpSyncData->InsideSmm = FALSE;
529 ReleaseAllAPs ();
530
531 //
532 // Wait for all APs to complete their pending tasks
533 //
534 WaitForAllAPs (ApCount);
535
536 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
537 //
538 // Signal APs to restore MTRRs
539 //
540 ReleaseAllAPs ();
541
542 //
543 // Restore OS MTRRs
544 //
545 SmmCpuFeaturesReenableSmrr ();
546 MtrrSetAllMtrrs(&Mtrrs);
547
548 //
549 // Wait for all APs to complete MTRR programming
550 //
551 WaitForAllAPs (ApCount);
552 }
553
554 //
555 // Stop source level debug in BSP handler, the code below will not be
556 // debugged.
557 //
558 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
559
560 //
561 // Signal APs to Reset states/semaphore for this processor
562 //
563 ReleaseAllAPs ();
564
565 //
566 // Perform pending operations for hot-plug
567 //
568 SmmCpuUpdate ();
569
570 //
571 // Clear the Present flag of BSP
572 //
573 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
574
575 //
576 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
577 // WaitForAllAps does not depend on the Present flag.
578 //
579 WaitForAllAPs (ApCount);
580
581 //
582 // Reset BspIndex to -1, meaning BSP has not been elected.
583 //
584 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
585 mSmmMpSyncData->BspIndex = (UINT32)-1;
586 }
587
588 //
589 // Allow APs to check in from this point on
590 //
591 *mSmmMpSyncData->Counter = 0;
592 *mSmmMpSyncData->AllCpusInSync = FALSE;
593 }
594
595 /**
596 SMI handler for AP.
597
598 @param CpuIndex AP processor Index.
599 @param ValidSmi Indicates that current SMI is a valid SMI or not.
600 @param SyncMode SMM MP sync mode.
601
602 **/
603 VOID
604 APHandler (
605 IN UINTN CpuIndex,
606 IN BOOLEAN ValidSmi,
607 IN SMM_CPU_SYNC_MODE SyncMode
608 )
609 {
610 UINT64 Timer;
611 UINTN BspIndex;
612 MTRR_SETTINGS Mtrrs;
613
614 //
615 // Timeout BSP
616 //
617 for (Timer = StartSyncTimer ();
618 !IsSyncTimerTimeout (Timer) &&
619 !(*mSmmMpSyncData->InsideSmm);
620 ) {
621 CpuPause ();
622 }
623
624 if (!(*mSmmMpSyncData->InsideSmm)) {
625 //
626 // BSP timeout in the first round
627 //
628 if (mSmmMpSyncData->BspIndex != -1) {
629 //
630 // BSP Index is known
631 //
632 BspIndex = mSmmMpSyncData->BspIndex;
633 ASSERT (CpuIndex != BspIndex);
634
635 //
636 // Send SMI IPI to bring BSP in
637 //
638 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
639
640 //
641 // Now clock BSP for the 2nd time
642 //
643 for (Timer = StartSyncTimer ();
644 !IsSyncTimerTimeout (Timer) &&
645 !(*mSmmMpSyncData->InsideSmm);
646 ) {
647 CpuPause ();
648 }
649
650 if (!(*mSmmMpSyncData->InsideSmm)) {
651 //
652 // Give up since BSP is unable to enter SMM
653 // and signal the completion of this AP
654 WaitForSemaphore (mSmmMpSyncData->Counter);
655 return;
656 }
657 } else {
658 //
659 // Don't know BSP index. Give up without sending IPI to BSP.
660 //
661 WaitForSemaphore (mSmmMpSyncData->Counter);
662 return;
663 }
664 }
665
666 //
667 // BSP is available
668 //
669 BspIndex = mSmmMpSyncData->BspIndex;
670 ASSERT (CpuIndex != BspIndex);
671
672 //
673 // Mark this processor's presence
674 //
675 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
676
677 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
678 //
679 // Notify BSP of arrival at this point
680 //
681 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
682 }
683
684 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
685 //
686 // Wait for the signal from BSP to backup MTRRs
687 //
688 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
689
690 //
691 // Backup OS MTRRs
692 //
693 MtrrGetAllMtrrs(&Mtrrs);
694
695 //
696 // Signal BSP the completion of this AP
697 //
698 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
699
700 //
701 // Wait for BSP's signal to program MTRRs
702 //
703 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
704
705 //
706 // Replace OS MTRRs with SMI MTRRs
707 //
708 ReplaceOSMtrrs (CpuIndex);
709
710 //
711 // Signal BSP the completion of this AP
712 //
713 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
714 }
715
716 while (TRUE) {
717 //
718 // Wait for something to happen
719 //
720 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
721
722 //
723 // Check if BSP wants to exit SMM
724 //
725 if (!(*mSmmMpSyncData->InsideSmm)) {
726 break;
727 }
728
729 //
730 // BUSY should be acquired by SmmStartupThisAp()
731 //
732 ASSERT (
733 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
734 );
735
736 //
737 // Invoke the scheduled procedure
738 //
739 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
740 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
741 );
742
743 //
744 // Release BUSY
745 //
746 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
747 }
748
749 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
750 //
751 // Notify BSP the readiness of this AP to program MTRRs
752 //
753 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
754
755 //
756 // Wait for the signal from BSP to program MTRRs
757 //
758 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
759
760 //
761 // Restore OS MTRRs
762 //
763 SmmCpuFeaturesReenableSmrr ();
764 MtrrSetAllMtrrs(&Mtrrs);
765 }
766
767 //
768 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
769 //
770 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
771
772 //
773 // Wait for the signal from BSP to Reset states/semaphore for this processor
774 //
775 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
776
777 //
778 // Reset states/semaphore for this processor
779 //
780 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
781
782 //
783 // Notify BSP the readiness of this AP to exit SMM
784 //
785 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
786
787 }
788
789 /**
790 Create 4G PageTable in SMRAM.
791
792 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
793 @return PageTable Address
794
795 **/
796 UINT32
797 Gen4GPageTable (
798 IN BOOLEAN Is32BitPageTable
799 )
800 {
801 VOID *PageTable;
802 UINTN Index;
803 UINT64 *Pte;
804 UINTN PagesNeeded;
805 UINTN Low2MBoundary;
806 UINTN High2MBoundary;
807 UINTN Pages;
808 UINTN GuardPage;
809 UINT64 *Pdpte;
810 UINTN PageIndex;
811 UINTN PageAddress;
812
813 Low2MBoundary = 0;
814 High2MBoundary = 0;
815 PagesNeeded = 0;
816 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
817 //
818 // Add one more page for known good stack, then find the lower 2MB aligned address.
819 //
820 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
821 //
822 // Add two more pages for known good stack and stack guard page,
823 // then find the lower 2MB aligned address.
824 //
825 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
826 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
827 }
828 //
829 // Allocate the page table
830 //
831 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
832 ASSERT (PageTable != NULL);
833
834 PageTable = (VOID *)((UINTN)PageTable);
835 Pte = (UINT64*)PageTable;
836
837 //
838 // Zero out all page table entries first
839 //
840 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
841
842 //
843 // Set Page Directory Pointers
844 //
845 for (Index = 0; Index < 4; Index++) {
846 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
847 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
848 }
849 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
850
851 //
852 // Fill in Page Directory Entries
853 //
854 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
855 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
856 }
857
858 Pdpte = (UINT64*)PageTable;
859 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
860 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
861 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
862 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
863 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
864 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
865 //
866 // Fill in Page Table Entries
867 //
868 Pte = (UINT64*)Pages;
869 PageAddress = PageIndex;
870 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
871 if (PageAddress == GuardPage) {
872 //
873 // Mark the guard page as non-present
874 //
875 Pte[Index] = PageAddress | mAddressEncMask;
876 GuardPage += mSmmStackSize;
877 if (GuardPage > mSmmStackArrayEnd) {
878 GuardPage = 0;
879 }
880 } else {
881 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
882 }
883 PageAddress+= EFI_PAGE_SIZE;
884 }
885 Pages += EFI_PAGE_SIZE;
886 }
887 }
888
889 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
890 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
891 if ((Pte[0] & IA32_PG_PS) == 0) {
892 // 4K-page entries are already mapped. Just hide the first one anyway.
893 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
894 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
895 } else {
896 // Create 4K-page entries
897 Pages = (UINTN)AllocatePageTableMemory (1);
898 ASSERT (Pages != 0);
899
900 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
901
902 Pte = (UINT64*)Pages;
903 PageAddress = 0;
904 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
905 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
906 PageAddress += EFI_PAGE_SIZE;
907 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
908 }
909 }
910 }
911
912 return (UINT32)(UINTN)PageTable;
913 }
914
915 /**
916 Schedule a procedure to run on the specified CPU.
917
918 @param[in] Procedure The address of the procedure to run
919 @param[in] CpuIndex Target CPU Index
920 @param[in, out] ProcArguments The parameter to pass to the procedure
921 @param[in] BlockingMode Startup AP in blocking mode or not
922
923 @retval EFI_INVALID_PARAMETER CpuNumber not valid
924 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
925 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
926 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
927 @retval EFI_SUCCESS The procedure has been successfully scheduled
928
929 **/
930 EFI_STATUS
931 InternalSmmStartupThisAp (
932 IN EFI_AP_PROCEDURE Procedure,
933 IN UINTN CpuIndex,
934 IN OUT VOID *ProcArguments OPTIONAL,
935 IN BOOLEAN BlockingMode
936 )
937 {
938 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
939 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
940 return EFI_INVALID_PARAMETER;
941 }
942 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
943 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
944 return EFI_INVALID_PARAMETER;
945 }
946 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
947 return EFI_INVALID_PARAMETER;
948 }
949 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
950 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
951 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
952 }
953 return EFI_INVALID_PARAMETER;
954 }
955 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
956 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
957 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
958 }
959 return EFI_INVALID_PARAMETER;
960 }
961
962 if (BlockingMode) {
963 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
964 } else {
965 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
966 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
967 return EFI_INVALID_PARAMETER;
968 }
969 }
970
971 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
972 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
973 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
974
975 if (BlockingMode) {
976 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
977 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
978 }
979 return EFI_SUCCESS;
980 }
981
982 /**
983 Schedule a procedure to run on the specified CPU in blocking mode.
984
985 @param[in] Procedure The address of the procedure to run
986 @param[in] CpuIndex Target CPU Index
987 @param[in, out] ProcArguments The parameter to pass to the procedure
988
989 @retval EFI_INVALID_PARAMETER CpuNumber not valid
990 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
991 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
992 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
993 @retval EFI_SUCCESS The procedure has been successfully scheduled
994
995 **/
996 EFI_STATUS
997 EFIAPI
998 SmmBlockingStartupThisAp (
999 IN EFI_AP_PROCEDURE Procedure,
1000 IN UINTN CpuIndex,
1001 IN OUT VOID *ProcArguments OPTIONAL
1002 )
1003 {
1004 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
1005 }
1006
1007 /**
1008 Schedule a procedure to run on the specified CPU.
1009
1010 @param Procedure The address of the procedure to run
1011 @param CpuIndex Target CPU Index
1012 @param ProcArguments The parameter to pass to the procedure
1013
1014 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1015 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1016 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1017 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1018 @retval EFI_SUCCESS The procedure has been successfully scheduled
1019
1020 **/
1021 EFI_STATUS
1022 EFIAPI
1023 SmmStartupThisAp (
1024 IN EFI_AP_PROCEDURE Procedure,
1025 IN UINTN CpuIndex,
1026 IN OUT VOID *ProcArguments OPTIONAL
1027 )
1028 {
1029 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
1030 }
1031
1032 /**
1033 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1034 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1035
1036 NOTE: It might not be appreciated in runtime since it might
1037 conflict with OS debugging facilities. Turn them off in RELEASE.
1038
1039 @param CpuIndex CPU Index
1040
1041 **/
1042 VOID
1043 EFIAPI
1044 CpuSmmDebugEntry (
1045 IN UINTN CpuIndex
1046 )
1047 {
1048 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1049
1050 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1051 ASSERT(CpuIndex < mMaxNumberOfCpus);
1052 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1053 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1054 AsmWriteDr6 (CpuSaveState->x86._DR6);
1055 AsmWriteDr7 (CpuSaveState->x86._DR7);
1056 } else {
1057 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1058 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1059 }
1060 }
1061 }
1062
1063 /**
1064 This function restores DR6 & DR7 to SMM save state.
1065
1066 NOTE: It might not be appreciated in runtime since it might
1067 conflict with OS debugging facilities. Turn them off in RELEASE.
1068
1069 @param CpuIndex CPU Index
1070
1071 **/
1072 VOID
1073 EFIAPI
1074 CpuSmmDebugExit (
1075 IN UINTN CpuIndex
1076 )
1077 {
1078 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1079
1080 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1081 ASSERT(CpuIndex < mMaxNumberOfCpus);
1082 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1083 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1084 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1085 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1086 } else {
1087 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1088 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1089 }
1090 }
1091 }
1092
1093 /**
1094 C function for SMI entry, each processor comes here upon SMI trigger.
1095
1096 @param CpuIndex CPU Index
1097
1098 **/
1099 VOID
1100 EFIAPI
1101 SmiRendezvous (
1102 IN UINTN CpuIndex
1103 )
1104 {
1105 EFI_STATUS Status;
1106 BOOLEAN ValidSmi;
1107 BOOLEAN IsBsp;
1108 BOOLEAN BspInProgress;
1109 UINTN Index;
1110 UINTN Cr2;
1111
1112 ASSERT(CpuIndex < mMaxNumberOfCpus);
1113
1114 //
1115 // Save Cr2 because Page Fault exception in SMM may override its value
1116 //
1117 Cr2 = AsmReadCr2 ();
1118
1119 //
1120 // Perform CPU specific entry hooks
1121 //
1122 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1123
1124 //
1125 // Determine if this is a valid SMI
1126 //
1127 ValidSmi = PlatformValidSmi();
1128
1129 //
1130 // Determine if BSP has been already in progress. Note this must be checked after
1131 // ValidSmi because BSP may clear a valid SMI source after checking in.
1132 //
1133 BspInProgress = *mSmmMpSyncData->InsideSmm;
1134
1135 if (!BspInProgress && !ValidSmi) {
1136 //
1137 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1138 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1139 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1140 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1141 // is nothing we need to do.
1142 //
1143 goto Exit;
1144 } else {
1145 //
1146 // Signal presence of this processor
1147 //
1148 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1149 //
1150 // BSP has already ended the synchronization, so QUIT!!!
1151 //
1152
1153 //
1154 // Wait for BSP's signal to finish SMI
1155 //
1156 while (*mSmmMpSyncData->AllCpusInSync) {
1157 CpuPause ();
1158 }
1159 goto Exit;
1160 } else {
1161
1162 //
1163 // The BUSY lock is initialized to Released state.
1164 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1165 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1166 // after AP's present flag is detected.
1167 //
1168 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1169 }
1170
1171 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1172 ActivateSmmProfile (CpuIndex);
1173 }
1174
1175 if (BspInProgress) {
1176 //
1177 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1178 // as BSP may have cleared the SMI status
1179 //
1180 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1181 } else {
1182 //
1183 // We have a valid SMI
1184 //
1185
1186 //
1187 // Elect BSP
1188 //
1189 IsBsp = FALSE;
1190 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1191 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1192 //
1193 // Call platform hook to do BSP election
1194 //
1195 Status = PlatformSmmBspElection (&IsBsp);
1196 if (EFI_SUCCESS == Status) {
1197 //
1198 // Platform hook determines successfully
1199 //
1200 if (IsBsp) {
1201 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1202 }
1203 } else {
1204 //
1205 // Platform hook fails to determine, use default BSP election method
1206 //
1207 InterlockedCompareExchange32 (
1208 (UINT32*)&mSmmMpSyncData->BspIndex,
1209 (UINT32)-1,
1210 (UINT32)CpuIndex
1211 );
1212 }
1213 }
1214 }
1215
1216 //
1217 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1218 //
1219 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1220
1221 //
1222 // Clear last request for SwitchBsp.
1223 //
1224 if (mSmmMpSyncData->SwitchBsp) {
1225 mSmmMpSyncData->SwitchBsp = FALSE;
1226 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1227 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1228 }
1229 }
1230
1231 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1232 SmmProfileRecordSmiNum ();
1233 }
1234
1235 //
1236 // BSP Handler is always called with a ValidSmi == TRUE
1237 //
1238 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1239 } else {
1240 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1241 }
1242 }
1243
1244 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1245
1246 //
1247 // Wait for BSP's signal to exit SMI
1248 //
1249 while (*mSmmMpSyncData->AllCpusInSync) {
1250 CpuPause ();
1251 }
1252 }
1253
1254 Exit:
1255 SmmCpuFeaturesRendezvousExit (CpuIndex);
1256 //
1257 // Restore Cr2
1258 //
1259 AsmWriteCr2 (Cr2);
1260 }
1261
1262 /**
1263 Allocate buffer for all semaphores and spin locks.
1264
1265 **/
1266 VOID
1267 InitializeSmmCpuSemaphores (
1268 VOID
1269 )
1270 {
1271 UINTN ProcessorCount;
1272 UINTN TotalSize;
1273 UINTN GlobalSemaphoresSize;
1274 UINTN CpuSemaphoresSize;
1275 UINTN MsrSemahporeSize;
1276 UINTN SemaphoreSize;
1277 UINTN Pages;
1278 UINTN *SemaphoreBlock;
1279 UINTN SemaphoreAddr;
1280
1281 SemaphoreSize = GetSpinLockProperties ();
1282 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1283 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1284 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1285 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;
1286 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;
1287 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1288 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1289 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1290 SemaphoreBlock = AllocatePages (Pages);
1291 ASSERT (SemaphoreBlock != NULL);
1292 ZeroMem (SemaphoreBlock, TotalSize);
1293
1294 SemaphoreAddr = (UINTN)SemaphoreBlock;
1295 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1296 SemaphoreAddr += SemaphoreSize;
1297 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1298 SemaphoreAddr += SemaphoreSize;
1299 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1300 SemaphoreAddr += SemaphoreSize;
1301 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1302 SemaphoreAddr += SemaphoreSize;
1303 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1304 = (SPIN_LOCK *)SemaphoreAddr;
1305 SemaphoreAddr += SemaphoreSize;
1306 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
1307 = (SPIN_LOCK *)SemaphoreAddr;
1308
1309 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1310 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1311 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1312 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1313 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1314 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1315
1316 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;
1317 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;
1318 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =
1319 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
1320 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
1321
1322 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1323 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1324 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
1325
1326 mSemaphoreSize = SemaphoreSize;
1327 }
1328
1329 /**
1330 Initialize un-cacheable data.
1331
1332 **/
1333 VOID
1334 EFIAPI
1335 InitializeMpSyncData (
1336 VOID
1337 )
1338 {
1339 UINTN CpuIndex;
1340
1341 if (mSmmMpSyncData != NULL) {
1342 //
1343 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1344 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1345 //
1346 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1347 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1348 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1349 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1350 //
1351 // Enable BSP election by setting BspIndex to -1
1352 //
1353 mSmmMpSyncData->BspIndex = (UINT32)-1;
1354 }
1355 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1356
1357 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1358 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1359 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1360 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1361 mSmmMpSyncData->AllCpusInSync != NULL);
1362 *mSmmMpSyncData->Counter = 0;
1363 *mSmmMpSyncData->InsideSmm = FALSE;
1364 *mSmmMpSyncData->AllCpusInSync = FALSE;
1365
1366 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1367 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1368 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1369 mSmmMpSyncData->CpuData[CpuIndex].Run =
1370 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1371 mSmmMpSyncData->CpuData[CpuIndex].Present =
1372 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1373 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1374 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1375 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1376 }
1377 }
1378 }
1379
1380 /**
1381 Initialize global data for MP synchronization.
1382
1383 @param Stacks Base address of SMI stack buffer for all processors.
1384 @param StackSize Stack size for each processor in SMM.
1385
1386 **/
1387 UINT32
1388 InitializeMpServiceData (
1389 IN VOID *Stacks,
1390 IN UINTN StackSize
1391 )
1392 {
1393 UINT32 Cr3;
1394 UINTN Index;
1395 UINT8 *GdtTssTables;
1396 UINTN GdtTableStepSize;
1397 CPUID_VERSION_INFO_EDX RegEdx;
1398
1399 //
1400 // Determine if this CPU supports machine check
1401 //
1402 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1403 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1404
1405 //
1406 // Allocate memory for all locks and semaphores
1407 //
1408 InitializeSmmCpuSemaphores ();
1409
1410 //
1411 // Initialize mSmmMpSyncData
1412 //
1413 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1414 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1415 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1416 ASSERT (mSmmMpSyncData != NULL);
1417 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1418 InitializeMpSyncData ();
1419
1420 //
1421 // Initialize physical address mask
1422 // NOTE: Physical memory above virtual address limit is not supported !!!
1423 //
1424 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1425 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1426 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1427
1428 //
1429 // Create page tables
1430 //
1431 Cr3 = SmmInitPageTable ();
1432
1433 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1434
1435 //
1436 // Install SMI handler for each CPU
1437 //
1438 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1439 InstallSmiHandler (
1440 Index,
1441 (UINT32)mCpuHotPlugData.SmBase[Index],
1442 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1443 StackSize,
1444 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1445 gcSmiGdtr.Limit + 1,
1446 gcSmiIdtr.Base,
1447 gcSmiIdtr.Limit + 1,
1448 Cr3
1449 );
1450 }
1451
1452 //
1453 // Record current MTRR settings
1454 //
1455 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1456 MtrrGetAllMtrrs (&gSmiMtrrs);
1457
1458 return Cr3;
1459 }
1460
1461 /**
1462
1463 Register the SMM Foundation entry point.
1464
1465 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1466 @param SmmEntryPoint SMM Foundation EntryPoint
1467
1468 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1469
1470 **/
1471 EFI_STATUS
1472 EFIAPI
1473 RegisterSmmEntry (
1474 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1475 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1476 )
1477 {
1478 //
1479 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1480 //
1481 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1482 return EFI_SUCCESS;
1483 }