]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
Eliminate EFI_IMAGE_MACHINE_TYPE_SUPPORTED.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
19 //
20 UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];
21 UINT64 gPhyMask;
22 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
23 UINTN mSmmMpSyncDataSize;
24
25 /**
26 Performs an atomic compare exchange operation to get semaphore.
27 The compare exchange operation must be performed using
28 MP safe mechanisms.
29
30 @param Sem IN: 32-bit unsigned integer
31 OUT: original integer - 1
32 @return Original integer - 1
33
34 **/
35 UINT32
36 WaitForSemaphore (
37 IN OUT volatile UINT32 *Sem
38 )
39 {
40 UINT32 Value;
41
42 do {
43 Value = *Sem;
44 } while (Value == 0 ||
45 InterlockedCompareExchange32 (
46 (UINT32*)Sem,
47 Value,
48 Value - 1
49 ) != Value);
50 return Value - 1;
51 }
52
53
54 /**
55 Performs an atomic compare exchange operation to release semaphore.
56 The compare exchange operation must be performed using
57 MP safe mechanisms.
58
59 @param Sem IN: 32-bit unsigned integer
60 OUT: original integer + 1
61 @return Original integer + 1
62
63 **/
64 UINT32
65 ReleaseSemaphore (
66 IN OUT volatile UINT32 *Sem
67 )
68 {
69 UINT32 Value;
70
71 do {
72 Value = *Sem;
73 } while (Value + 1 != 0 &&
74 InterlockedCompareExchange32 (
75 (UINT32*)Sem,
76 Value,
77 Value + 1
78 ) != Value);
79 return Value + 1;
80 }
81
82 /**
83 Performs an atomic compare exchange operation to lock semaphore.
84 The compare exchange operation must be performed using
85 MP safe mechanisms.
86
87 @param Sem IN: 32-bit unsigned integer
88 OUT: -1
89 @return Original integer
90
91 **/
92 UINT32
93 LockdownSemaphore (
94 IN OUT volatile UINT32 *Sem
95 )
96 {
97 UINT32 Value;
98
99 do {
100 Value = *Sem;
101 } while (InterlockedCompareExchange32 (
102 (UINT32*)Sem,
103 Value, (UINT32)-1
104 ) != Value);
105 return Value;
106 }
107
108 /**
109 Wait all APs to performs an atomic compare exchange operation to release semaphore.
110
111 @param NumberOfAPs AP number
112
113 **/
114 VOID
115 WaitForAllAPs (
116 IN UINTN NumberOfAPs
117 )
118 {
119 UINTN BspIndex;
120
121 BspIndex = mSmmMpSyncData->BspIndex;
122 while (NumberOfAPs-- > 0) {
123 WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
124 }
125 }
126
127 /**
128 Performs an atomic compare exchange operation to release semaphore
129 for each AP.
130
131 **/
132 VOID
133 ReleaseAllAPs (
134 VOID
135 )
136 {
137 UINTN Index;
138 UINTN BspIndex;
139
140 BspIndex = mSmmMpSyncData->BspIndex;
141 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
142 if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {
143 ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);
144 }
145 }
146 }
147
148 /**
149 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
150
151 @param Exceptions CPU Arrival exception flags.
152
153 @retval TRUE if all CPUs the have checked in.
154 @retval FALSE if at least one Normal AP hasn't checked in.
155
156 **/
157 BOOLEAN
158 AllCpusInSmmWithExceptions (
159 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
160 )
161 {
162 UINTN Index;
163 SMM_CPU_DATA_BLOCK *CpuData;
164 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
165
166 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
167
168 if (mSmmMpSyncData->Counter == mNumberOfCpus) {
169 return TRUE;
170 }
171
172 CpuData = mSmmMpSyncData->CpuData;
173 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
174 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
175 if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
176 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
177 continue;
178 }
179 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
180 continue;
181 }
182 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
183 continue;
184 }
185 return FALSE;
186 }
187 }
188
189
190 return TRUE;
191 }
192
193
194 /**
195 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
196 entering SMM, except SMI disabled APs.
197
198 **/
199 VOID
200 SmmWaitForApArrival (
201 VOID
202 )
203 {
204 UINT64 Timer;
205 UINTN Index;
206
207 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
208
209 //
210 // Platform implementor should choose a timeout value appropriately:
211 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
212 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
213 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
214 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
215 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
216 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
217 // - The timeout value must be longer than longest possible IO operation in the system
218 //
219
220 //
221 // Sync with APs 1st timeout
222 //
223 for (Timer = StartSyncTimer ();
224 !IsSyncTimerTimeout (Timer) &&
225 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
226 ) {
227 CpuPause ();
228 }
229
230 //
231 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
232 // because:
233 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
234 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
235 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
236 // work while SMI handling is on-going.
237 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
238 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
239 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
240 // mode work while SMI handling is on-going.
241 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
242 // - In traditional flow, SMI disabling is discouraged.
243 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
244 // In both cases, adding SMI-disabling checking code increases overhead.
245 //
246 if (mSmmMpSyncData->Counter < mNumberOfCpus) {
247 //
248 // Send SMI IPIs to bring outside processors in
249 //
250 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
251 if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
252 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
253 }
254 }
255
256 //
257 // Sync with APs 2nd timeout.
258 //
259 for (Timer = StartSyncTimer ();
260 !IsSyncTimerTimeout (Timer) &&
261 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
262 ) {
263 CpuPause ();
264 }
265 }
266
267 return;
268 }
269
270
271 /**
272 Replace OS MTRR's with SMI MTRR's.
273
274 @param CpuIndex Processor Index
275
276 **/
277 VOID
278 ReplaceOSMtrrs (
279 IN UINTN CpuIndex
280 )
281 {
282 PROCESSOR_SMM_DESCRIPTOR *Psd;
283 UINT64 *SmiMtrrs;
284 MTRR_SETTINGS *BiosMtrr;
285
286 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);
287 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;
288
289 SmmCpuFeaturesDisableSmrr ();
290
291 //
292 // Replace all MTRRs registers
293 //
294 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;
295 MtrrSetAllMtrrs(BiosMtrr);
296 }
297
298 /**
299 SMI handler for BSP.
300
301 @param CpuIndex BSP processor Index
302 @param SyncMode SMM MP sync mode
303
304 **/
305 VOID
306 BSPHandler (
307 IN UINTN CpuIndex,
308 IN SMM_CPU_SYNC_MODE SyncMode
309 )
310 {
311 UINTN Index;
312 MTRR_SETTINGS Mtrrs;
313 UINTN ApCount;
314 BOOLEAN ClearTopLevelSmiResult;
315 UINTN PresentCount;
316
317 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
318 ApCount = 0;
319
320 //
321 // Flag BSP's presence
322 //
323 mSmmMpSyncData->InsideSmm = TRUE;
324
325 //
326 // Initialize Debug Agent to start source level debug in BSP handler
327 //
328 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
329
330 //
331 // Mark this processor's presence
332 //
333 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
334
335 //
336 // Clear platform top level SMI status bit before calling SMI handlers. If
337 // we cleared it after SMI handlers are run, we would miss the SMI that
338 // occurs after SMI handlers are done and before SMI status bit is cleared.
339 //
340 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
341 ASSERT (ClearTopLevelSmiResult == TRUE);
342
343 //
344 // Set running processor index
345 //
346 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
347
348 //
349 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
350 //
351 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
352
353 //
354 // Wait for APs to arrive
355 //
356 SmmWaitForApArrival();
357
358 //
359 // Lock the counter down and retrieve the number of APs
360 //
361 mSmmMpSyncData->AllCpusInSync = TRUE;
362 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
363
364 //
365 // Wait for all APs to get ready for programming MTRRs
366 //
367 WaitForAllAPs (ApCount);
368
369 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
370 //
371 // Signal all APs it's time for backup MTRRs
372 //
373 ReleaseAllAPs ();
374
375 //
376 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
377 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
378 // to a large enough value to avoid this situation.
379 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
380 // We do the backup first and then set MTRR to avoid race condition for threads
381 // in the same core.
382 //
383 MtrrGetAllMtrrs(&Mtrrs);
384
385 //
386 // Wait for all APs to complete their MTRR saving
387 //
388 WaitForAllAPs (ApCount);
389
390 //
391 // Let all processors program SMM MTRRs together
392 //
393 ReleaseAllAPs ();
394
395 //
396 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
397 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
398 // to a large enough value to avoid this situation.
399 //
400 ReplaceOSMtrrs (CpuIndex);
401
402 //
403 // Wait for all APs to complete their MTRR programming
404 //
405 WaitForAllAPs (ApCount);
406 }
407 }
408
409 //
410 // The BUSY lock is initialized to Acquired state
411 //
412 AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
413
414 //
415 // Restore SMM Configuration in S3 boot path.
416 //
417 if (mRestoreSmmConfigurationInS3) {
418 //
419 // Configure SMM Code Access Check feature if available.
420 //
421 ConfigSmmCodeAccessCheck ();
422 mRestoreSmmConfigurationInS3 = FALSE;
423 }
424
425 //
426 // Invoke SMM Foundation EntryPoint with the processor information context.
427 //
428 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
429
430 //
431 // Make sure all APs have completed their pending none-block tasks
432 //
433 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
434 if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {
435 AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);
436 ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;
437 }
438 }
439
440 //
441 // Perform the remaining tasks
442 //
443 PerformRemainingTasks ();
444
445 //
446 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
447 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
448 // will run through freely.
449 //
450 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
451
452 //
453 // Lock the counter down and retrieve the number of APs
454 //
455 mSmmMpSyncData->AllCpusInSync = TRUE;
456 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
457 //
458 // Make sure all APs have their Present flag set
459 //
460 while (TRUE) {
461 PresentCount = 0;
462 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
463 if (mSmmMpSyncData->CpuData[Index].Present) {
464 PresentCount ++;
465 }
466 }
467 if (PresentCount > ApCount) {
468 break;
469 }
470 }
471 }
472
473 //
474 // Notify all APs to exit
475 //
476 mSmmMpSyncData->InsideSmm = FALSE;
477 ReleaseAllAPs ();
478
479 //
480 // Wait for all APs to complete their pending tasks
481 //
482 WaitForAllAPs (ApCount);
483
484 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
485 //
486 // Signal APs to restore MTRRs
487 //
488 ReleaseAllAPs ();
489
490 //
491 // Restore OS MTRRs
492 //
493 SmmCpuFeaturesReenableSmrr ();
494 MtrrSetAllMtrrs(&Mtrrs);
495
496 //
497 // Wait for all APs to complete MTRR programming
498 //
499 WaitForAllAPs (ApCount);
500 }
501
502 //
503 // Stop source level debug in BSP handler, the code below will not be
504 // debugged.
505 //
506 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
507
508 //
509 // Signal APs to Reset states/semaphore for this processor
510 //
511 ReleaseAllAPs ();
512
513 //
514 // Perform pending operations for hot-plug
515 //
516 SmmCpuUpdate ();
517
518 //
519 // Clear the Present flag of BSP
520 //
521 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
522
523 //
524 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
525 // WaitForAllAps does not depend on the Present flag.
526 //
527 WaitForAllAPs (ApCount);
528
529 //
530 // Reset BspIndex to -1, meaning BSP has not been elected.
531 //
532 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
533 mSmmMpSyncData->BspIndex = (UINT32)-1;
534 }
535
536 //
537 // Allow APs to check in from this point on
538 //
539 mSmmMpSyncData->Counter = 0;
540 mSmmMpSyncData->AllCpusInSync = FALSE;
541 }
542
543 /**
544 SMI handler for AP.
545
546 @param CpuIndex AP processor Index.
547 @param ValidSmi Indicates that current SMI is a valid SMI or not.
548 @param SyncMode SMM MP sync mode.
549
550 **/
551 VOID
552 APHandler (
553 IN UINTN CpuIndex,
554 IN BOOLEAN ValidSmi,
555 IN SMM_CPU_SYNC_MODE SyncMode
556 )
557 {
558 UINT64 Timer;
559 UINTN BspIndex;
560 MTRR_SETTINGS Mtrrs;
561
562 //
563 // Timeout BSP
564 //
565 for (Timer = StartSyncTimer ();
566 !IsSyncTimerTimeout (Timer) &&
567 !mSmmMpSyncData->InsideSmm;
568 ) {
569 CpuPause ();
570 }
571
572 if (!mSmmMpSyncData->InsideSmm) {
573 //
574 // BSP timeout in the first round
575 //
576 if (mSmmMpSyncData->BspIndex != -1) {
577 //
578 // BSP Index is known
579 //
580 BspIndex = mSmmMpSyncData->BspIndex;
581 ASSERT (CpuIndex != BspIndex);
582
583 //
584 // Send SMI IPI to bring BSP in
585 //
586 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
587
588 //
589 // Now clock BSP for the 2nd time
590 //
591 for (Timer = StartSyncTimer ();
592 !IsSyncTimerTimeout (Timer) &&
593 !mSmmMpSyncData->InsideSmm;
594 ) {
595 CpuPause ();
596 }
597
598 if (!mSmmMpSyncData->InsideSmm) {
599 //
600 // Give up since BSP is unable to enter SMM
601 // and signal the completion of this AP
602 WaitForSemaphore (&mSmmMpSyncData->Counter);
603 return;
604 }
605 } else {
606 //
607 // Don't know BSP index. Give up without sending IPI to BSP.
608 //
609 WaitForSemaphore (&mSmmMpSyncData->Counter);
610 return;
611 }
612 }
613
614 //
615 // BSP is available
616 //
617 BspIndex = mSmmMpSyncData->BspIndex;
618 ASSERT (CpuIndex != BspIndex);
619
620 //
621 // Mark this processor's presence
622 //
623 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
624
625 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
626 //
627 // Notify BSP of arrival at this point
628 //
629 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
630 }
631
632 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
633 //
634 // Wait for the signal from BSP to backup MTRRs
635 //
636 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
637
638 //
639 // Backup OS MTRRs
640 //
641 MtrrGetAllMtrrs(&Mtrrs);
642
643 //
644 // Signal BSP the completion of this AP
645 //
646 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
647
648 //
649 // Wait for BSP's signal to program MTRRs
650 //
651 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
652
653 //
654 // Replace OS MTRRs with SMI MTRRs
655 //
656 ReplaceOSMtrrs (CpuIndex);
657
658 //
659 // Signal BSP the completion of this AP
660 //
661 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
662 }
663
664 while (TRUE) {
665 //
666 // Wait for something to happen
667 //
668 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
669
670 //
671 // Check if BSP wants to exit SMM
672 //
673 if (!mSmmMpSyncData->InsideSmm) {
674 break;
675 }
676
677 //
678 // BUSY should be acquired by SmmStartupThisAp()
679 //
680 ASSERT (
681 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)
682 );
683
684 //
685 // Invoke the scheduled procedure
686 //
687 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
688 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
689 );
690
691 //
692 // Release BUSY
693 //
694 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
695 }
696
697 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
698 //
699 // Notify BSP the readiness of this AP to program MTRRs
700 //
701 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
702
703 //
704 // Wait for the signal from BSP to program MTRRs
705 //
706 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
707
708 //
709 // Restore OS MTRRs
710 //
711 SmmCpuFeaturesReenableSmrr ();
712 MtrrSetAllMtrrs(&Mtrrs);
713 }
714
715 //
716 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
717 //
718 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
719
720 //
721 // Wait for the signal from BSP to Reset states/semaphore for this processor
722 //
723 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
724
725 //
726 // Reset states/semaphore for this processor
727 //
728 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
729
730 //
731 // Notify BSP the readiness of this AP to exit SMM
732 //
733 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
734
735 }
736
737 /**
738 Create 4G PageTable in SMRAM.
739
740 @param ExtraPages Additional page numbers besides for 4G memory
741 @return PageTable Address
742
743 **/
744 UINT32
745 Gen4GPageTable (
746 IN UINTN ExtraPages
747 )
748 {
749 VOID *PageTable;
750 UINTN Index;
751 UINT64 *Pte;
752 UINTN PagesNeeded;
753 UINTN Low2MBoundary;
754 UINTN High2MBoundary;
755 UINTN Pages;
756 UINTN GuardPage;
757 UINT64 *Pdpte;
758 UINTN PageIndex;
759 UINTN PageAddress;
760
761 Low2MBoundary = 0;
762 High2MBoundary = 0;
763 PagesNeeded = 0;
764 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
765 //
766 // Add one more page for known good stack, then find the lower 2MB aligned address.
767 //
768 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
769 //
770 // Add two more pages for known good stack and stack guard page,
771 // then find the lower 2MB aligned address.
772 //
773 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
774 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
775 }
776 //
777 // Allocate the page table
778 //
779 PageTable = AllocatePages (ExtraPages + 5 + PagesNeeded);
780 ASSERT (PageTable != NULL);
781
782 PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));
783 Pte = (UINT64*)PageTable;
784
785 //
786 // Zero out all page table entries first
787 //
788 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
789
790 //
791 // Set Page Directory Pointers
792 //
793 for (Index = 0; Index < 4; Index++) {
794 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + IA32_PG_P;
795 }
796 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
797
798 //
799 // Fill in Page Directory Entries
800 //
801 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
802 Pte[Index] = (Index << 21) + IA32_PG_PS + IA32_PG_RW + IA32_PG_P;
803 }
804
805 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
806 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
807 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
808 Pdpte = (UINT64*)PageTable;
809 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
810 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
811 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages + IA32_PG_RW + IA32_PG_P;
812 //
813 // Fill in Page Table Entries
814 //
815 Pte = (UINT64*)Pages;
816 PageAddress = PageIndex;
817 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
818 if (PageAddress == GuardPage) {
819 //
820 // Mark the guard page as non-present
821 //
822 Pte[Index] = PageAddress;
823 GuardPage += mSmmStackSize;
824 if (GuardPage > mSmmStackArrayEnd) {
825 GuardPage = 0;
826 }
827 } else {
828 Pte[Index] = PageAddress + IA32_PG_RW + IA32_PG_P;
829 }
830 PageAddress+= EFI_PAGE_SIZE;
831 }
832 Pages += EFI_PAGE_SIZE;
833 }
834 }
835
836 return (UINT32)(UINTN)PageTable;
837 }
838
839 /**
840 Set memory cache ability.
841
842 @param PageTable PageTable Address
843 @param Address Memory Address to change cache ability
844 @param Cacheability Cache ability to set
845
846 **/
847 VOID
848 SetCacheability (
849 IN UINT64 *PageTable,
850 IN UINTN Address,
851 IN UINT8 Cacheability
852 )
853 {
854 UINTN PTIndex;
855 VOID *NewPageTableAddress;
856 UINT64 *NewPageTable;
857 UINTN Index;
858
859 ASSERT ((Address & EFI_PAGE_MASK) == 0);
860
861 if (sizeof (UINTN) == sizeof (UINT64)) {
862 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
863 ASSERT (PageTable[PTIndex] & IA32_PG_P);
864 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
865 }
866
867 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
868 ASSERT (PageTable[PTIndex] & IA32_PG_P);
869 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
870
871 //
872 // A perfect implementation should check the original cacheability with the
873 // one being set, and break a 2M page entry into pieces only when they
874 // disagreed.
875 //
876 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
877 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
878 //
879 // Allocate a page from SMRAM
880 //
881 NewPageTableAddress = AllocatePages (1);
882 ASSERT (NewPageTableAddress != NULL);
883
884 NewPageTable = (UINT64 *)NewPageTableAddress;
885
886 for (Index = 0; Index < 0x200; Index++) {
887 NewPageTable[Index] = PageTable[PTIndex];
888 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
889 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
890 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
891 }
892 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
893 }
894
895 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P;
896 }
897
898 ASSERT (PageTable[PTIndex] & IA32_PG_P);
899 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
900
901 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
902 ASSERT (PageTable[PTIndex] & IA32_PG_P);
903 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
904 PageTable[PTIndex] |= (UINT64)Cacheability;
905 }
906
907
908 /**
909 Schedule a procedure to run on the specified CPU.
910
911 @param Procedure The address of the procedure to run
912 @param CpuIndex Target CPU Index
913 @param ProcArguments The parameter to pass to the procedure
914
915 @retval EFI_INVALID_PARAMETER CpuNumber not valid
916 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
917 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
918 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
919 @retval EFI_SUCCESS The procedure has been successfully scheduled
920
921 **/
922 EFI_STATUS
923 EFIAPI
924 SmmStartupThisAp (
925 IN EFI_AP_PROCEDURE Procedure,
926 IN UINTN CpuIndex,
927 IN OUT VOID *ProcArguments OPTIONAL
928 )
929 {
930 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||
931 CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
932 !mSmmMpSyncData->CpuData[CpuIndex].Present ||
933 gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
934 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
935 return EFI_INVALID_PARAMETER;
936 }
937
938 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
939 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
940 ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
941
942 if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {
943 AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
944 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
945 }
946 return EFI_SUCCESS;
947 }
948
949 /**
950 C function for SMI entry, each processor comes here upon SMI trigger.
951
952 @param CpuIndex CPU Index
953
954 **/
955 VOID
956 EFIAPI
957 SmiRendezvous (
958 IN UINTN CpuIndex
959 )
960 {
961 EFI_STATUS Status;
962 BOOLEAN ValidSmi;
963 BOOLEAN IsBsp;
964 BOOLEAN BspInProgress;
965 UINTN Index;
966 UINTN Cr2;
967
968 //
969 // Save Cr2 because Page Fault exception in SMM may override its value
970 //
971 Cr2 = AsmReadCr2 ();
972
973 //
974 // Perform CPU specific entry hooks
975 //
976 SmmCpuFeaturesRendezvousEntry (CpuIndex);
977
978 //
979 // Determine if this is a valid SMI
980 //
981 ValidSmi = PlatformValidSmi();
982
983 //
984 // Determine if BSP has been already in progress. Note this must be checked after
985 // ValidSmi because BSP may clear a valid SMI source after checking in.
986 //
987 BspInProgress = mSmmMpSyncData->InsideSmm;
988
989 if (!BspInProgress && !ValidSmi) {
990 //
991 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
992 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
993 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
994 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
995 // is nothing we need to do.
996 //
997 goto Exit;
998 } else {
999 //
1000 // Signal presence of this processor
1001 //
1002 if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {
1003 //
1004 // BSP has already ended the synchronization, so QUIT!!!
1005 //
1006
1007 //
1008 // Wait for BSP's signal to finish SMI
1009 //
1010 while (mSmmMpSyncData->AllCpusInSync) {
1011 CpuPause ();
1012 }
1013 goto Exit;
1014 } else {
1015
1016 //
1017 // The BUSY lock is initialized to Released state.
1018 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1019 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1020 // after AP's present flag is detected.
1021 //
1022 InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
1023 }
1024
1025 //
1026 // Try to enable NX
1027 //
1028 if (mXdSupported) {
1029 ActivateXd ();
1030 }
1031
1032 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1033 ActivateSmmProfile (CpuIndex);
1034 }
1035
1036 if (BspInProgress) {
1037 //
1038 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1039 // as BSP may have cleared the SMI status
1040 //
1041 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1042 } else {
1043 //
1044 // We have a valid SMI
1045 //
1046
1047 //
1048 // Elect BSP
1049 //
1050 IsBsp = FALSE;
1051 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1052 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1053 //
1054 // Call platform hook to do BSP election
1055 //
1056 Status = PlatformSmmBspElection (&IsBsp);
1057 if (EFI_SUCCESS == Status) {
1058 //
1059 // Platform hook determines successfully
1060 //
1061 if (IsBsp) {
1062 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1063 }
1064 } else {
1065 //
1066 // Platform hook fails to determine, use default BSP election method
1067 //
1068 InterlockedCompareExchange32 (
1069 (UINT32*)&mSmmMpSyncData->BspIndex,
1070 (UINT32)-1,
1071 (UINT32)CpuIndex
1072 );
1073 }
1074 }
1075 }
1076
1077 //
1078 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1079 //
1080 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1081
1082 //
1083 // Clear last request for SwitchBsp.
1084 //
1085 if (mSmmMpSyncData->SwitchBsp) {
1086 mSmmMpSyncData->SwitchBsp = FALSE;
1087 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1088 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1089 }
1090 }
1091
1092 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1093 SmmProfileRecordSmiNum ();
1094 }
1095
1096 //
1097 // BSP Handler is always called with a ValidSmi == TRUE
1098 //
1099 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1100
1101 } else {
1102 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1103 }
1104 }
1105
1106 ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1107
1108 //
1109 // Wait for BSP's signal to exit SMI
1110 //
1111 while (mSmmMpSyncData->AllCpusInSync) {
1112 CpuPause ();
1113 }
1114 }
1115
1116 Exit:
1117 SmmCpuFeaturesRendezvousExit (CpuIndex);
1118 //
1119 // Restore Cr2
1120 //
1121 AsmWriteCr2 (Cr2);
1122 }
1123
1124
1125 /**
1126 Initialize un-cacheable data.
1127
1128 **/
1129 VOID
1130 EFIAPI
1131 InitializeMpSyncData (
1132 VOID
1133 )
1134 {
1135 if (mSmmMpSyncData != NULL) {
1136 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1137 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1138 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1139 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1140 //
1141 // Enable BSP election by setting BspIndex to -1
1142 //
1143 mSmmMpSyncData->BspIndex = (UINT32)-1;
1144 }
1145 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
1146 }
1147 }
1148
1149 /**
1150 Initialize global data for MP synchronization.
1151
1152 @param Stacks Base address of SMI stack buffer for all processors.
1153 @param StackSize Stack size for each processor in SMM.
1154
1155 **/
1156 UINT32
1157 InitializeMpServiceData (
1158 IN VOID *Stacks,
1159 IN UINTN StackSize
1160 )
1161 {
1162 UINT32 Cr3;
1163 UINTN Index;
1164 MTRR_SETTINGS *Mtrr;
1165 PROCESSOR_SMM_DESCRIPTOR *Psd;
1166 UINT8 *GdtTssTables;
1167 UINTN GdtTableStepSize;
1168
1169 //
1170 // Initialize physical address mask
1171 // NOTE: Physical memory above virtual address limit is not supported !!!
1172 //
1173 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1174 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1175 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1176
1177 //
1178 // Create page tables
1179 //
1180 Cr3 = SmmInitPageTable ();
1181
1182 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1183
1184 //
1185 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1186 //
1187 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1188 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
1189 CopyMem (Psd, &gcPsd, sizeof (gcPsd));
1190 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
1191 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
1192
1193 //
1194 // Install SMI handler
1195 //
1196 InstallSmiHandler (
1197 Index,
1198 (UINT32)mCpuHotPlugData.SmBase[Index],
1199 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1200 StackSize,
1201 (UINTN)Psd->SmmGdtPtr,
1202 Psd->SmmGdtSize,
1203 gcSmiIdtr.Base,
1204 gcSmiIdtr.Limit + 1,
1205 Cr3
1206 );
1207 }
1208
1209 //
1210 // Initialize mSmmMpSyncData
1211 //
1212 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1213 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1214 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1215 ASSERT (mSmmMpSyncData != NULL);
1216 InitializeMpSyncData ();
1217
1218 //
1219 // Record current MTRR settings
1220 //
1221 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));
1222 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;
1223 MtrrGetAllMtrrs (Mtrr);
1224
1225 return Cr3;
1226 }
1227
1228 /**
1229
1230 Register the SMM Foundation entry point.
1231
1232 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1233 @param SmmEntryPoint SMM Foundation EntryPoint
1234
1235 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1236
1237 **/
1238 EFI_STATUS
1239 EFIAPI
1240 RegisterSmmEntry (
1241 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1242 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1243 )
1244 {
1245 //
1246 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1247 //
1248 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1249 return EFI_SUCCESS;
1250 }