]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
6b66c49085960cafac0e898e13f85590c2e01088
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
21 //
22 MTRR_SETTINGS gSmiMtrrs;
23 UINT64 gPhyMask;
24 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
25 UINTN mSmmMpSyncDataSize;
26 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
27 UINTN mSemaphoreSize;
28 SPIN_LOCK *mPFLock = NULL;
29 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
30
31 /**
32 Performs an atomic compare exchange operation to get semaphore.
33 The compare exchange operation must be performed using
34 MP safe mechanisms.
35
36 @param Sem IN: 32-bit unsigned integer
37 OUT: original integer - 1
38 @return Original integer - 1
39
40 **/
41 UINT32
42 WaitForSemaphore (
43 IN OUT volatile UINT32 *Sem
44 )
45 {
46 UINT32 Value;
47
48 do {
49 Value = *Sem;
50 } while (Value == 0 ||
51 InterlockedCompareExchange32 (
52 (UINT32*)Sem,
53 Value,
54 Value - 1
55 ) != Value);
56 return Value - 1;
57 }
58
59
60 /**
61 Performs an atomic compare exchange operation to release semaphore.
62 The compare exchange operation must be performed using
63 MP safe mechanisms.
64
65 @param Sem IN: 32-bit unsigned integer
66 OUT: original integer + 1
67 @return Original integer + 1
68
69 **/
70 UINT32
71 ReleaseSemaphore (
72 IN OUT volatile UINT32 *Sem
73 )
74 {
75 UINT32 Value;
76
77 do {
78 Value = *Sem;
79 } while (Value + 1 != 0 &&
80 InterlockedCompareExchange32 (
81 (UINT32*)Sem,
82 Value,
83 Value + 1
84 ) != Value);
85 return Value + 1;
86 }
87
88 /**
89 Performs an atomic compare exchange operation to lock semaphore.
90 The compare exchange operation must be performed using
91 MP safe mechanisms.
92
93 @param Sem IN: 32-bit unsigned integer
94 OUT: -1
95 @return Original integer
96
97 **/
98 UINT32
99 LockdownSemaphore (
100 IN OUT volatile UINT32 *Sem
101 )
102 {
103 UINT32 Value;
104
105 do {
106 Value = *Sem;
107 } while (InterlockedCompareExchange32 (
108 (UINT32*)Sem,
109 Value, (UINT32)-1
110 ) != Value);
111 return Value;
112 }
113
114 /**
115 Wait all APs to performs an atomic compare exchange operation to release semaphore.
116
117 @param NumberOfAPs AP number
118
119 **/
120 VOID
121 WaitForAllAPs (
122 IN UINTN NumberOfAPs
123 )
124 {
125 UINTN BspIndex;
126
127 BspIndex = mSmmMpSyncData->BspIndex;
128 while (NumberOfAPs-- > 0) {
129 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
130 }
131 }
132
133 /**
134 Performs an atomic compare exchange operation to release semaphore
135 for each AP.
136
137 **/
138 VOID
139 ReleaseAllAPs (
140 VOID
141 )
142 {
143 UINTN Index;
144 UINTN BspIndex;
145
146 BspIndex = mSmmMpSyncData->BspIndex;
147 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
148 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
149 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
150 }
151 }
152 }
153
154 /**
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
156
157 @param Exceptions CPU Arrival exception flags.
158
159 @retval TRUE if all CPUs the have checked in.
160 @retval FALSE if at least one Normal AP hasn't checked in.
161
162 **/
163 BOOLEAN
164 AllCpusInSmmWithExceptions (
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
166 )
167 {
168 UINTN Index;
169 SMM_CPU_DATA_BLOCK *CpuData;
170 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
171
172 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
173
174 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
175 return TRUE;
176 }
177
178 CpuData = mSmmMpSyncData->CpuData;
179 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
180 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
181 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
182 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
183 continue;
184 }
185 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
186 continue;
187 }
188 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
189 continue;
190 }
191 return FALSE;
192 }
193 }
194
195
196 return TRUE;
197 }
198
199 /**
200 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
201
202 @retval TRUE Os enable lmce.
203 @retval FALSE Os not enable lmce.
204
205 **/
206 BOOLEAN
207 IsLmceOsEnabled (
208 VOID
209 )
210 {
211 MSR_IA32_MCG_CAP_REGISTER McgCap;
212 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
213 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
214
215 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
216 if (McgCap.Bits.MCG_LMCE_P == 0) {
217 return FALSE;
218 }
219
220 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
221 if (FeatureCtrl.Bits.LmceOn == 0) {
222 return FALSE;
223 }
224
225 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
226 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
227 }
228
229 /**
230 Return if Local machine check exception signaled.
231
232 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
233 delivered to only the logical processor.
234
235 @retval TRUE LMCE was signaled.
236 @retval FALSE LMCE was not signaled.
237
238 **/
239 BOOLEAN
240 IsLmceSignaled (
241 VOID
242 )
243 {
244 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
245
246 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
247 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
248 }
249
250 /**
251 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
252 entering SMM, except SMI disabled APs.
253
254 **/
255 VOID
256 SmmWaitForApArrival (
257 VOID
258 )
259 {
260 UINT64 Timer;
261 UINTN Index;
262 BOOLEAN LmceEn;
263 BOOLEAN LmceSignal;
264
265 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
266
267 LmceEn = IsLmceOsEnabled ();
268 LmceSignal = IsLmceSignaled();
269
270 //
271 // Platform implementor should choose a timeout value appropriately:
272 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
273 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
274 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
275 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
276 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
277 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
278 // - The timeout value must be longer than longest possible IO operation in the system
279 //
280
281 //
282 // Sync with APs 1st timeout
283 //
284 for (Timer = StartSyncTimer ();
285 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
286 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
287 ) {
288 CpuPause ();
289 }
290
291 //
292 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
293 // because:
294 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
295 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
296 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
297 // work while SMI handling is on-going.
298 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
299 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
300 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
301 // mode work while SMI handling is on-going.
302 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
303 // - In traditional flow, SMI disabling is discouraged.
304 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
305 // In both cases, adding SMI-disabling checking code increases overhead.
306 //
307 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
308 //
309 // Send SMI IPIs to bring outside processors in
310 //
311 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
312 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
313 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
314 }
315 }
316
317 //
318 // Sync with APs 2nd timeout.
319 //
320 for (Timer = StartSyncTimer ();
321 !IsSyncTimerTimeout (Timer) &&
322 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
323 ) {
324 CpuPause ();
325 }
326 }
327
328 return;
329 }
330
331
332 /**
333 Replace OS MTRR's with SMI MTRR's.
334
335 @param CpuIndex Processor Index
336
337 **/
338 VOID
339 ReplaceOSMtrrs (
340 IN UINTN CpuIndex
341 )
342 {
343 SmmCpuFeaturesDisableSmrr ();
344
345 //
346 // Replace all MTRRs registers
347 //
348 MtrrSetAllMtrrs (&gSmiMtrrs);
349 }
350
351 /**
352 SMI handler for BSP.
353
354 @param CpuIndex BSP processor Index
355 @param SyncMode SMM MP sync mode
356
357 **/
358 VOID
359 BSPHandler (
360 IN UINTN CpuIndex,
361 IN SMM_CPU_SYNC_MODE SyncMode
362 )
363 {
364 UINTN Index;
365 MTRR_SETTINGS Mtrrs;
366 UINTN ApCount;
367 BOOLEAN ClearTopLevelSmiResult;
368 UINTN PresentCount;
369
370 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
371 ApCount = 0;
372
373 //
374 // Flag BSP's presence
375 //
376 *mSmmMpSyncData->InsideSmm = TRUE;
377
378 //
379 // Initialize Debug Agent to start source level debug in BSP handler
380 //
381 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
382
383 //
384 // Mark this processor's presence
385 //
386 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
387
388 //
389 // Clear platform top level SMI status bit before calling SMI handlers. If
390 // we cleared it after SMI handlers are run, we would miss the SMI that
391 // occurs after SMI handlers are done and before SMI status bit is cleared.
392 //
393 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
394 ASSERT (ClearTopLevelSmiResult == TRUE);
395
396 //
397 // Set running processor index
398 //
399 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
400
401 //
402 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
403 //
404 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
405
406 //
407 // Wait for APs to arrive
408 //
409 SmmWaitForApArrival();
410
411 //
412 // Lock the counter down and retrieve the number of APs
413 //
414 *mSmmMpSyncData->AllCpusInSync = TRUE;
415 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
416
417 //
418 // Wait for all APs to get ready for programming MTRRs
419 //
420 WaitForAllAPs (ApCount);
421
422 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
423 //
424 // Signal all APs it's time for backup MTRRs
425 //
426 ReleaseAllAPs ();
427
428 //
429 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
430 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
431 // to a large enough value to avoid this situation.
432 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
433 // We do the backup first and then set MTRR to avoid race condition for threads
434 // in the same core.
435 //
436 MtrrGetAllMtrrs(&Mtrrs);
437
438 //
439 // Wait for all APs to complete their MTRR saving
440 //
441 WaitForAllAPs (ApCount);
442
443 //
444 // Let all processors program SMM MTRRs together
445 //
446 ReleaseAllAPs ();
447
448 //
449 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
450 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
451 // to a large enough value to avoid this situation.
452 //
453 ReplaceOSMtrrs (CpuIndex);
454
455 //
456 // Wait for all APs to complete their MTRR programming
457 //
458 WaitForAllAPs (ApCount);
459 }
460 }
461
462 //
463 // The BUSY lock is initialized to Acquired state
464 //
465 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
466
467 //
468 // Perform the pre tasks
469 //
470 PerformPreTasks ();
471
472 //
473 // Invoke SMM Foundation EntryPoint with the processor information context.
474 //
475 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
476
477 //
478 // Make sure all APs have completed their pending none-block tasks
479 //
480 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
481 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
482 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
483 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
484 }
485 }
486
487 //
488 // Perform the remaining tasks
489 //
490 PerformRemainingTasks ();
491
492 //
493 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
494 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
495 // will run through freely.
496 //
497 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
498
499 //
500 // Lock the counter down and retrieve the number of APs
501 //
502 *mSmmMpSyncData->AllCpusInSync = TRUE;
503 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
504 //
505 // Make sure all APs have their Present flag set
506 //
507 while (TRUE) {
508 PresentCount = 0;
509 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
510 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
511 PresentCount ++;
512 }
513 }
514 if (PresentCount > ApCount) {
515 break;
516 }
517 }
518 }
519
520 //
521 // Notify all APs to exit
522 //
523 *mSmmMpSyncData->InsideSmm = FALSE;
524 ReleaseAllAPs ();
525
526 //
527 // Wait for all APs to complete their pending tasks
528 //
529 WaitForAllAPs (ApCount);
530
531 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
532 //
533 // Signal APs to restore MTRRs
534 //
535 ReleaseAllAPs ();
536
537 //
538 // Restore OS MTRRs
539 //
540 SmmCpuFeaturesReenableSmrr ();
541 MtrrSetAllMtrrs(&Mtrrs);
542
543 //
544 // Wait for all APs to complete MTRR programming
545 //
546 WaitForAllAPs (ApCount);
547 }
548
549 //
550 // Stop source level debug in BSP handler, the code below will not be
551 // debugged.
552 //
553 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
554
555 //
556 // Signal APs to Reset states/semaphore for this processor
557 //
558 ReleaseAllAPs ();
559
560 //
561 // Perform pending operations for hot-plug
562 //
563 SmmCpuUpdate ();
564
565 //
566 // Clear the Present flag of BSP
567 //
568 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
569
570 //
571 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
572 // WaitForAllAps does not depend on the Present flag.
573 //
574 WaitForAllAPs (ApCount);
575
576 //
577 // Reset BspIndex to -1, meaning BSP has not been elected.
578 //
579 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
580 mSmmMpSyncData->BspIndex = (UINT32)-1;
581 }
582
583 //
584 // Allow APs to check in from this point on
585 //
586 *mSmmMpSyncData->Counter = 0;
587 *mSmmMpSyncData->AllCpusInSync = FALSE;
588 }
589
590 /**
591 SMI handler for AP.
592
593 @param CpuIndex AP processor Index.
594 @param ValidSmi Indicates that current SMI is a valid SMI or not.
595 @param SyncMode SMM MP sync mode.
596
597 **/
598 VOID
599 APHandler (
600 IN UINTN CpuIndex,
601 IN BOOLEAN ValidSmi,
602 IN SMM_CPU_SYNC_MODE SyncMode
603 )
604 {
605 UINT64 Timer;
606 UINTN BspIndex;
607 MTRR_SETTINGS Mtrrs;
608
609 //
610 // Timeout BSP
611 //
612 for (Timer = StartSyncTimer ();
613 !IsSyncTimerTimeout (Timer) &&
614 !(*mSmmMpSyncData->InsideSmm);
615 ) {
616 CpuPause ();
617 }
618
619 if (!(*mSmmMpSyncData->InsideSmm)) {
620 //
621 // BSP timeout in the first round
622 //
623 if (mSmmMpSyncData->BspIndex != -1) {
624 //
625 // BSP Index is known
626 //
627 BspIndex = mSmmMpSyncData->BspIndex;
628 ASSERT (CpuIndex != BspIndex);
629
630 //
631 // Send SMI IPI to bring BSP in
632 //
633 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
634
635 //
636 // Now clock BSP for the 2nd time
637 //
638 for (Timer = StartSyncTimer ();
639 !IsSyncTimerTimeout (Timer) &&
640 !(*mSmmMpSyncData->InsideSmm);
641 ) {
642 CpuPause ();
643 }
644
645 if (!(*mSmmMpSyncData->InsideSmm)) {
646 //
647 // Give up since BSP is unable to enter SMM
648 // and signal the completion of this AP
649 WaitForSemaphore (mSmmMpSyncData->Counter);
650 return;
651 }
652 } else {
653 //
654 // Don't know BSP index. Give up without sending IPI to BSP.
655 //
656 WaitForSemaphore (mSmmMpSyncData->Counter);
657 return;
658 }
659 }
660
661 //
662 // BSP is available
663 //
664 BspIndex = mSmmMpSyncData->BspIndex;
665 ASSERT (CpuIndex != BspIndex);
666
667 //
668 // Mark this processor's presence
669 //
670 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
671
672 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
673 //
674 // Notify BSP of arrival at this point
675 //
676 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
677 }
678
679 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
680 //
681 // Wait for the signal from BSP to backup MTRRs
682 //
683 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
684
685 //
686 // Backup OS MTRRs
687 //
688 MtrrGetAllMtrrs(&Mtrrs);
689
690 //
691 // Signal BSP the completion of this AP
692 //
693 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
694
695 //
696 // Wait for BSP's signal to program MTRRs
697 //
698 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
699
700 //
701 // Replace OS MTRRs with SMI MTRRs
702 //
703 ReplaceOSMtrrs (CpuIndex);
704
705 //
706 // Signal BSP the completion of this AP
707 //
708 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
709 }
710
711 while (TRUE) {
712 //
713 // Wait for something to happen
714 //
715 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
716
717 //
718 // Check if BSP wants to exit SMM
719 //
720 if (!(*mSmmMpSyncData->InsideSmm)) {
721 break;
722 }
723
724 //
725 // BUSY should be acquired by SmmStartupThisAp()
726 //
727 ASSERT (
728 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
729 );
730
731 //
732 // Invoke the scheduled procedure
733 //
734 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
735 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
736 );
737
738 //
739 // Release BUSY
740 //
741 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
742 }
743
744 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
745 //
746 // Notify BSP the readiness of this AP to program MTRRs
747 //
748 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
749
750 //
751 // Wait for the signal from BSP to program MTRRs
752 //
753 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
754
755 //
756 // Restore OS MTRRs
757 //
758 SmmCpuFeaturesReenableSmrr ();
759 MtrrSetAllMtrrs(&Mtrrs);
760 }
761
762 //
763 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
764 //
765 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
766
767 //
768 // Wait for the signal from BSP to Reset states/semaphore for this processor
769 //
770 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
771
772 //
773 // Reset states/semaphore for this processor
774 //
775 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
776
777 //
778 // Notify BSP the readiness of this AP to exit SMM
779 //
780 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
781
782 }
783
784 /**
785 Create 4G PageTable in SMRAM.
786
787 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
788 @return PageTable Address
789
790 **/
791 UINT32
792 Gen4GPageTable (
793 IN BOOLEAN Is32BitPageTable
794 )
795 {
796 VOID *PageTable;
797 UINTN Index;
798 UINT64 *Pte;
799 UINTN PagesNeeded;
800 UINTN Low2MBoundary;
801 UINTN High2MBoundary;
802 UINTN Pages;
803 UINTN GuardPage;
804 UINT64 *Pdpte;
805 UINTN PageIndex;
806 UINTN PageAddress;
807
808 Low2MBoundary = 0;
809 High2MBoundary = 0;
810 PagesNeeded = 0;
811 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
812 //
813 // Add one more page for known good stack, then find the lower 2MB aligned address.
814 //
815 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
816 //
817 // Add two more pages for known good stack and stack guard page,
818 // then find the lower 2MB aligned address.
819 //
820 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
821 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
822 }
823 //
824 // Allocate the page table
825 //
826 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
827 ASSERT (PageTable != NULL);
828
829 PageTable = (VOID *)((UINTN)PageTable);
830 Pte = (UINT64*)PageTable;
831
832 //
833 // Zero out all page table entries first
834 //
835 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
836
837 //
838 // Set Page Directory Pointers
839 //
840 for (Index = 0; Index < 4; Index++) {
841 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
842 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
843 }
844 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
845
846 //
847 // Fill in Page Directory Entries
848 //
849 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
850 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
851 }
852
853 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
854 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
855 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
856 Pdpte = (UINT64*)PageTable;
857 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
858 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
859 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
860 //
861 // Fill in Page Table Entries
862 //
863 Pte = (UINT64*)Pages;
864 PageAddress = PageIndex;
865 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
866 if (PageAddress == GuardPage) {
867 //
868 // Mark the guard page as non-present
869 //
870 Pte[Index] = PageAddress | mAddressEncMask;
871 GuardPage += mSmmStackSize;
872 if (GuardPage > mSmmStackArrayEnd) {
873 GuardPage = 0;
874 }
875 } else {
876 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
877 }
878 PageAddress+= EFI_PAGE_SIZE;
879 }
880 Pages += EFI_PAGE_SIZE;
881 }
882 }
883
884 return (UINT32)(UINTN)PageTable;
885 }
886
887 /**
888 Schedule a procedure to run on the specified CPU.
889
890 @param[in] Procedure The address of the procedure to run
891 @param[in] CpuIndex Target CPU Index
892 @param[in, out] ProcArguments The parameter to pass to the procedure
893 @param[in] BlockingMode Startup AP in blocking mode or not
894
895 @retval EFI_INVALID_PARAMETER CpuNumber not valid
896 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
897 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
898 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
899 @retval EFI_SUCCESS The procedure has been successfully scheduled
900
901 **/
902 EFI_STATUS
903 InternalSmmStartupThisAp (
904 IN EFI_AP_PROCEDURE Procedure,
905 IN UINTN CpuIndex,
906 IN OUT VOID *ProcArguments OPTIONAL,
907 IN BOOLEAN BlockingMode
908 )
909 {
910 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
911 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
912 return EFI_INVALID_PARAMETER;
913 }
914 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
915 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
916 return EFI_INVALID_PARAMETER;
917 }
918 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
919 return EFI_INVALID_PARAMETER;
920 }
921 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
922 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
923 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
924 }
925 return EFI_INVALID_PARAMETER;
926 }
927 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
928 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
929 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
930 }
931 return EFI_INVALID_PARAMETER;
932 }
933
934 if (BlockingMode) {
935 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
936 } else {
937 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
938 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
939 return EFI_INVALID_PARAMETER;
940 }
941 }
942
943 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
944 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
945 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
946
947 if (BlockingMode) {
948 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
949 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
950 }
951 return EFI_SUCCESS;
952 }
953
954 /**
955 Schedule a procedure to run on the specified CPU in blocking mode.
956
957 @param[in] Procedure The address of the procedure to run
958 @param[in] CpuIndex Target CPU Index
959 @param[in, out] ProcArguments The parameter to pass to the procedure
960
961 @retval EFI_INVALID_PARAMETER CpuNumber not valid
962 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
963 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
964 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
965 @retval EFI_SUCCESS The procedure has been successfully scheduled
966
967 **/
968 EFI_STATUS
969 EFIAPI
970 SmmBlockingStartupThisAp (
971 IN EFI_AP_PROCEDURE Procedure,
972 IN UINTN CpuIndex,
973 IN OUT VOID *ProcArguments OPTIONAL
974 )
975 {
976 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
977 }
978
979 /**
980 Schedule a procedure to run on the specified CPU.
981
982 @param Procedure The address of the procedure to run
983 @param CpuIndex Target CPU Index
984 @param ProcArguments The parameter to pass to the procedure
985
986 @retval EFI_INVALID_PARAMETER CpuNumber not valid
987 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
988 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
989 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
990 @retval EFI_SUCCESS The procedure has been successfully scheduled
991
992 **/
993 EFI_STATUS
994 EFIAPI
995 SmmStartupThisAp (
996 IN EFI_AP_PROCEDURE Procedure,
997 IN UINTN CpuIndex,
998 IN OUT VOID *ProcArguments OPTIONAL
999 )
1000 {
1001 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
1002 }
1003
1004 /**
1005 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1006 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1007
1008 NOTE: It might not be appreciated in runtime since it might
1009 conflict with OS debugging facilities. Turn them off in RELEASE.
1010
1011 @param CpuIndex CPU Index
1012
1013 **/
1014 VOID
1015 EFIAPI
1016 CpuSmmDebugEntry (
1017 IN UINTN CpuIndex
1018 )
1019 {
1020 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1021
1022 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1023 ASSERT(CpuIndex < mMaxNumberOfCpus);
1024 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1025 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1026 AsmWriteDr6 (CpuSaveState->x86._DR6);
1027 AsmWriteDr7 (CpuSaveState->x86._DR7);
1028 } else {
1029 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1030 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1031 }
1032 }
1033 }
1034
1035 /**
1036 This function restores DR6 & DR7 to SMM save state.
1037
1038 NOTE: It might not be appreciated in runtime since it might
1039 conflict with OS debugging facilities. Turn them off in RELEASE.
1040
1041 @param CpuIndex CPU Index
1042
1043 **/
1044 VOID
1045 EFIAPI
1046 CpuSmmDebugExit (
1047 IN UINTN CpuIndex
1048 )
1049 {
1050 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1051
1052 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1053 ASSERT(CpuIndex < mMaxNumberOfCpus);
1054 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1055 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1056 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1057 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1058 } else {
1059 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1060 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1061 }
1062 }
1063 }
1064
1065 /**
1066 C function for SMI entry, each processor comes here upon SMI trigger.
1067
1068 @param CpuIndex CPU Index
1069
1070 **/
1071 VOID
1072 EFIAPI
1073 SmiRendezvous (
1074 IN UINTN CpuIndex
1075 )
1076 {
1077 EFI_STATUS Status;
1078 BOOLEAN ValidSmi;
1079 BOOLEAN IsBsp;
1080 BOOLEAN BspInProgress;
1081 UINTN Index;
1082 UINTN Cr2;
1083
1084 ASSERT(CpuIndex < mMaxNumberOfCpus);
1085
1086 //
1087 // Save Cr2 because Page Fault exception in SMM may override its value
1088 //
1089 Cr2 = AsmReadCr2 ();
1090
1091 //
1092 // Perform CPU specific entry hooks
1093 //
1094 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1095
1096 //
1097 // Determine if this is a valid SMI
1098 //
1099 ValidSmi = PlatformValidSmi();
1100
1101 //
1102 // Determine if BSP has been already in progress. Note this must be checked after
1103 // ValidSmi because BSP may clear a valid SMI source after checking in.
1104 //
1105 BspInProgress = *mSmmMpSyncData->InsideSmm;
1106
1107 if (!BspInProgress && !ValidSmi) {
1108 //
1109 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1110 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1111 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1112 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1113 // is nothing we need to do.
1114 //
1115 goto Exit;
1116 } else {
1117 //
1118 // Signal presence of this processor
1119 //
1120 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1121 //
1122 // BSP has already ended the synchronization, so QUIT!!!
1123 //
1124
1125 //
1126 // Wait for BSP's signal to finish SMI
1127 //
1128 while (*mSmmMpSyncData->AllCpusInSync) {
1129 CpuPause ();
1130 }
1131 goto Exit;
1132 } else {
1133
1134 //
1135 // The BUSY lock is initialized to Released state.
1136 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1137 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1138 // after AP's present flag is detected.
1139 //
1140 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1141 }
1142
1143 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1144 ActivateSmmProfile (CpuIndex);
1145 }
1146
1147 if (BspInProgress) {
1148 //
1149 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1150 // as BSP may have cleared the SMI status
1151 //
1152 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1153 } else {
1154 //
1155 // We have a valid SMI
1156 //
1157
1158 //
1159 // Elect BSP
1160 //
1161 IsBsp = FALSE;
1162 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1163 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1164 //
1165 // Call platform hook to do BSP election
1166 //
1167 Status = PlatformSmmBspElection (&IsBsp);
1168 if (EFI_SUCCESS == Status) {
1169 //
1170 // Platform hook determines successfully
1171 //
1172 if (IsBsp) {
1173 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1174 }
1175 } else {
1176 //
1177 // Platform hook fails to determine, use default BSP election method
1178 //
1179 InterlockedCompareExchange32 (
1180 (UINT32*)&mSmmMpSyncData->BspIndex,
1181 (UINT32)-1,
1182 (UINT32)CpuIndex
1183 );
1184 }
1185 }
1186 }
1187
1188 //
1189 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1190 //
1191 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1192
1193 //
1194 // Clear last request for SwitchBsp.
1195 //
1196 if (mSmmMpSyncData->SwitchBsp) {
1197 mSmmMpSyncData->SwitchBsp = FALSE;
1198 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1199 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1200 }
1201 }
1202
1203 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1204 SmmProfileRecordSmiNum ();
1205 }
1206
1207 //
1208 // BSP Handler is always called with a ValidSmi == TRUE
1209 //
1210 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1211 } else {
1212 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1213 }
1214 }
1215
1216 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1217
1218 //
1219 // Wait for BSP's signal to exit SMI
1220 //
1221 while (*mSmmMpSyncData->AllCpusInSync) {
1222 CpuPause ();
1223 }
1224 }
1225
1226 Exit:
1227 SmmCpuFeaturesRendezvousExit (CpuIndex);
1228 //
1229 // Restore Cr2
1230 //
1231 AsmWriteCr2 (Cr2);
1232 }
1233
1234 /**
1235 Allocate buffer for all semaphores and spin locks.
1236
1237 **/
1238 VOID
1239 InitializeSmmCpuSemaphores (
1240 VOID
1241 )
1242 {
1243 UINTN ProcessorCount;
1244 UINTN TotalSize;
1245 UINTN GlobalSemaphoresSize;
1246 UINTN CpuSemaphoresSize;
1247 UINTN MsrSemahporeSize;
1248 UINTN SemaphoreSize;
1249 UINTN Pages;
1250 UINTN *SemaphoreBlock;
1251 UINTN SemaphoreAddr;
1252
1253 SemaphoreSize = GetSpinLockProperties ();
1254 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1255 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1256 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1257 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;
1258 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;
1259 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1260 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1261 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1262 SemaphoreBlock = AllocatePages (Pages);
1263 ASSERT (SemaphoreBlock != NULL);
1264 ZeroMem (SemaphoreBlock, TotalSize);
1265
1266 SemaphoreAddr = (UINTN)SemaphoreBlock;
1267 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1268 SemaphoreAddr += SemaphoreSize;
1269 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1270 SemaphoreAddr += SemaphoreSize;
1271 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1272 SemaphoreAddr += SemaphoreSize;
1273 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1274 SemaphoreAddr += SemaphoreSize;
1275 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1276 = (SPIN_LOCK *)SemaphoreAddr;
1277 SemaphoreAddr += SemaphoreSize;
1278 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
1279 = (SPIN_LOCK *)SemaphoreAddr;
1280
1281 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1282 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1283 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1284 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1285 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1286 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1287
1288 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;
1289 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;
1290 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =
1291 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
1292 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
1293
1294 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1295 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1296 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
1297
1298 mSemaphoreSize = SemaphoreSize;
1299 }
1300
1301 /**
1302 Initialize un-cacheable data.
1303
1304 **/
1305 VOID
1306 EFIAPI
1307 InitializeMpSyncData (
1308 VOID
1309 )
1310 {
1311 UINTN CpuIndex;
1312
1313 if (mSmmMpSyncData != NULL) {
1314 //
1315 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1316 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1317 //
1318 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1319 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1320 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1321 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1322 //
1323 // Enable BSP election by setting BspIndex to -1
1324 //
1325 mSmmMpSyncData->BspIndex = (UINT32)-1;
1326 }
1327 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1328
1329 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1330 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1331 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1332 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1333 mSmmMpSyncData->AllCpusInSync != NULL);
1334 *mSmmMpSyncData->Counter = 0;
1335 *mSmmMpSyncData->InsideSmm = FALSE;
1336 *mSmmMpSyncData->AllCpusInSync = FALSE;
1337
1338 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1339 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1340 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1341 mSmmMpSyncData->CpuData[CpuIndex].Run =
1342 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1343 mSmmMpSyncData->CpuData[CpuIndex].Present =
1344 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1345 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1346 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1347 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1348 }
1349 }
1350 }
1351
1352 /**
1353 Initialize global data for MP synchronization.
1354
1355 @param Stacks Base address of SMI stack buffer for all processors.
1356 @param StackSize Stack size for each processor in SMM.
1357
1358 **/
1359 UINT32
1360 InitializeMpServiceData (
1361 IN VOID *Stacks,
1362 IN UINTN StackSize
1363 )
1364 {
1365 UINT32 Cr3;
1366 UINTN Index;
1367 UINT8 *GdtTssTables;
1368 UINTN GdtTableStepSize;
1369
1370 //
1371 // Allocate memory for all locks and semaphores
1372 //
1373 InitializeSmmCpuSemaphores ();
1374
1375 //
1376 // Initialize mSmmMpSyncData
1377 //
1378 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1379 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1380 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1381 ASSERT (mSmmMpSyncData != NULL);
1382 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1383 InitializeMpSyncData ();
1384
1385 //
1386 // Initialize physical address mask
1387 // NOTE: Physical memory above virtual address limit is not supported !!!
1388 //
1389 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1390 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1391 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1392
1393 //
1394 // Create page tables
1395 //
1396 Cr3 = SmmInitPageTable ();
1397
1398 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1399
1400 //
1401 // Install SMI handler for each CPU
1402 //
1403 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1404 InstallSmiHandler (
1405 Index,
1406 (UINT32)mCpuHotPlugData.SmBase[Index],
1407 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1408 StackSize,
1409 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1410 gcSmiGdtr.Limit + 1,
1411 gcSmiIdtr.Base,
1412 gcSmiIdtr.Limit + 1,
1413 Cr3
1414 );
1415 }
1416
1417 //
1418 // Record current MTRR settings
1419 //
1420 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1421 MtrrGetAllMtrrs (&gSmiMtrrs);
1422
1423 return Cr3;
1424 }
1425
1426 /**
1427
1428 Register the SMM Foundation entry point.
1429
1430 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1431 @param SmmEntryPoint SMM Foundation EntryPoint
1432
1433 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1434
1435 **/
1436 EFI_STATUS
1437 EFIAPI
1438 RegisterSmmEntry (
1439 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1440 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1441 )
1442 {
1443 //
1444 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1445 //
1446 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1447 return EFI_SUCCESS;
1448 }