]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
MdeModulePkg-DxeCore: rename CoreGetMemoryMapPropertiesTable
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
19 //
20 UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];
21 UINT64 gPhyMask;
22 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
23 UINTN mSmmMpSyncDataSize;
24
25 /**
26 Performs an atomic compare exchange operation to get semaphore.
27 The compare exchange operation must be performed using
28 MP safe mechanisms.
29
30 @param Sem IN: 32-bit unsigned integer
31 OUT: original integer - 1
32 @return Original integer - 1
33
34 **/
35 UINT32
36 WaitForSemaphore (
37 IN OUT volatile UINT32 *Sem
38 )
39 {
40 UINT32 Value;
41
42 do {
43 Value = *Sem;
44 } while (Value == 0 ||
45 InterlockedCompareExchange32 (
46 (UINT32*)Sem,
47 Value,
48 Value - 1
49 ) != Value);
50 return Value - 1;
51 }
52
53
54 /**
55 Performs an atomic compare exchange operation to release semaphore.
56 The compare exchange operation must be performed using
57 MP safe mechanisms.
58
59 @param Sem IN: 32-bit unsigned integer
60 OUT: original integer + 1
61 @return Original integer + 1
62
63 **/
64 UINT32
65 ReleaseSemaphore (
66 IN OUT volatile UINT32 *Sem
67 )
68 {
69 UINT32 Value;
70
71 do {
72 Value = *Sem;
73 } while (Value + 1 != 0 &&
74 InterlockedCompareExchange32 (
75 (UINT32*)Sem,
76 Value,
77 Value + 1
78 ) != Value);
79 return Value + 1;
80 }
81
82 /**
83 Performs an atomic compare exchange operation to lock semaphore.
84 The compare exchange operation must be performed using
85 MP safe mechanisms.
86
87 @param Sem IN: 32-bit unsigned integer
88 OUT: -1
89 @return Original integer
90
91 **/
92 UINT32
93 LockdownSemaphore (
94 IN OUT volatile UINT32 *Sem
95 )
96 {
97 UINT32 Value;
98
99 do {
100 Value = *Sem;
101 } while (InterlockedCompareExchange32 (
102 (UINT32*)Sem,
103 Value, (UINT32)-1
104 ) != Value);
105 return Value;
106 }
107
108 /**
109 Wait all APs to performs an atomic compare exchange operation to release semaphore.
110
111 @param NumberOfAPs AP number
112
113 **/
114 VOID
115 WaitForAllAPs (
116 IN UINTN NumberOfAPs
117 )
118 {
119 UINTN BspIndex;
120
121 BspIndex = mSmmMpSyncData->BspIndex;
122 while (NumberOfAPs-- > 0) {
123 WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
124 }
125 }
126
127 /**
128 Performs an atomic compare exchange operation to release semaphore
129 for each AP.
130
131 **/
132 VOID
133 ReleaseAllAPs (
134 VOID
135 )
136 {
137 UINTN Index;
138 UINTN BspIndex;
139
140 BspIndex = mSmmMpSyncData->BspIndex;
141 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
142 if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {
143 ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);
144 }
145 }
146 }
147
148 /**
149 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
150
151 @param Exceptions CPU Arrival exception flags.
152
153 @retval TRUE if all CPUs the have checked in.
154 @retval FALSE if at least one Normal AP hasn't checked in.
155
156 **/
157 BOOLEAN
158 AllCpusInSmmWithExceptions (
159 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
160 )
161 {
162 UINTN Index;
163 SMM_CPU_DATA_BLOCK *CpuData;
164 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
165
166 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
167
168 if (mSmmMpSyncData->Counter == mNumberOfCpus) {
169 return TRUE;
170 }
171
172 CpuData = mSmmMpSyncData->CpuData;
173 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
174 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
175 if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
176 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
177 continue;
178 }
179 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
180 continue;
181 }
182 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
183 continue;
184 }
185 return FALSE;
186 }
187 }
188
189
190 return TRUE;
191 }
192
193
194 /**
195 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
196 entering SMM, except SMI disabled APs.
197
198 **/
199 VOID
200 SmmWaitForApArrival (
201 VOID
202 )
203 {
204 UINT64 Timer;
205 UINTN Index;
206
207 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
208
209 //
210 // Platform implementor should choose a timeout value appropriately:
211 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
212 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
213 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
214 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
215 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
216 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
217 // - The timeout value must be longer than longest possible IO operation in the system
218 //
219
220 //
221 // Sync with APs 1st timeout
222 //
223 for (Timer = StartSyncTimer ();
224 !IsSyncTimerTimeout (Timer) &&
225 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
226 ) {
227 CpuPause ();
228 }
229
230 //
231 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
232 // because:
233 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
234 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
235 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
236 // work while SMI handling is on-going.
237 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
238 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
239 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
240 // mode work while SMI handling is on-going.
241 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
242 // - In traditional flow, SMI disabling is discouraged.
243 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
244 // In both cases, adding SMI-disabling checking code increases overhead.
245 //
246 if (mSmmMpSyncData->Counter < mNumberOfCpus) {
247 //
248 // Send SMI IPIs to bring outside processors in
249 //
250 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
251 if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
252 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
253 }
254 }
255
256 //
257 // Sync with APs 2nd timeout.
258 //
259 for (Timer = StartSyncTimer ();
260 !IsSyncTimerTimeout (Timer) &&
261 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
262 ) {
263 CpuPause ();
264 }
265 }
266
267 return;
268 }
269
270
271 /**
272 Replace OS MTRR's with SMI MTRR's.
273
274 @param CpuIndex Processor Index
275
276 **/
277 VOID
278 ReplaceOSMtrrs (
279 IN UINTN CpuIndex
280 )
281 {
282 PROCESSOR_SMM_DESCRIPTOR *Psd;
283 UINT64 *SmiMtrrs;
284 MTRR_SETTINGS *BiosMtrr;
285
286 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);
287 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;
288
289 SmmCpuFeaturesDisableSmrr ();
290
291 //
292 // Replace all MTRRs registers
293 //
294 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;
295 MtrrSetAllMtrrs(BiosMtrr);
296 }
297
298 /**
299 SMI handler for BSP.
300
301 @param CpuIndex BSP processor Index
302 @param SyncMode SMM MP sync mode
303
304 **/
305 VOID
306 BSPHandler (
307 IN UINTN CpuIndex,
308 IN SMM_CPU_SYNC_MODE SyncMode
309 )
310 {
311 UINTN Index;
312 MTRR_SETTINGS Mtrrs;
313 UINTN ApCount;
314 BOOLEAN ClearTopLevelSmiResult;
315 UINTN PresentCount;
316
317 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
318 ApCount = 0;
319
320 //
321 // Flag BSP's presence
322 //
323 mSmmMpSyncData->InsideSmm = TRUE;
324
325 //
326 // Initialize Debug Agent to start source level debug in BSP handler
327 //
328 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
329
330 //
331 // Mark this processor's presence
332 //
333 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
334
335 //
336 // Clear platform top level SMI status bit before calling SMI handlers. If
337 // we cleared it after SMI handlers are run, we would miss the SMI that
338 // occurs after SMI handlers are done and before SMI status bit is cleared.
339 //
340 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
341 ASSERT (ClearTopLevelSmiResult == TRUE);
342
343 //
344 // Set running processor index
345 //
346 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
347
348 //
349 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
350 //
351 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
352
353 //
354 // Wait for APs to arrive
355 //
356 SmmWaitForApArrival();
357
358 //
359 // Lock the counter down and retrieve the number of APs
360 //
361 mSmmMpSyncData->AllCpusInSync = TRUE;
362 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
363
364 //
365 // Wait for all APs to get ready for programming MTRRs
366 //
367 WaitForAllAPs (ApCount);
368
369 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
370 //
371 // Signal all APs it's time for backup MTRRs
372 //
373 ReleaseAllAPs ();
374
375 //
376 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
377 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
378 // to a large enough value to avoid this situation.
379 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
380 // We do the backup first and then set MTRR to avoid race condition for threads
381 // in the same core.
382 //
383 MtrrGetAllMtrrs(&Mtrrs);
384
385 //
386 // Wait for all APs to complete their MTRR saving
387 //
388 WaitForAllAPs (ApCount);
389
390 //
391 // Let all processors program SMM MTRRs together
392 //
393 ReleaseAllAPs ();
394
395 //
396 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
397 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
398 // to a large enough value to avoid this situation.
399 //
400 ReplaceOSMtrrs (CpuIndex);
401
402 //
403 // Wait for all APs to complete their MTRR programming
404 //
405 WaitForAllAPs (ApCount);
406 }
407 }
408
409 //
410 // The BUSY lock is initialized to Acquired state
411 //
412 AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
413
414 //
415 // Perform the pre tasks
416 //
417 PerformPreTasks ();
418
419 //
420 // Invoke SMM Foundation EntryPoint with the processor information context.
421 //
422 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
423
424 //
425 // Make sure all APs have completed their pending none-block tasks
426 //
427 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
428 if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {
429 AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);
430 ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;
431 }
432 }
433
434 //
435 // Perform the remaining tasks
436 //
437 PerformRemainingTasks ();
438
439 //
440 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
441 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
442 // will run through freely.
443 //
444 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
445
446 //
447 // Lock the counter down and retrieve the number of APs
448 //
449 mSmmMpSyncData->AllCpusInSync = TRUE;
450 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
451 //
452 // Make sure all APs have their Present flag set
453 //
454 while (TRUE) {
455 PresentCount = 0;
456 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
457 if (mSmmMpSyncData->CpuData[Index].Present) {
458 PresentCount ++;
459 }
460 }
461 if (PresentCount > ApCount) {
462 break;
463 }
464 }
465 }
466
467 //
468 // Notify all APs to exit
469 //
470 mSmmMpSyncData->InsideSmm = FALSE;
471 ReleaseAllAPs ();
472
473 //
474 // Wait for all APs to complete their pending tasks
475 //
476 WaitForAllAPs (ApCount);
477
478 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
479 //
480 // Signal APs to restore MTRRs
481 //
482 ReleaseAllAPs ();
483
484 //
485 // Restore OS MTRRs
486 //
487 SmmCpuFeaturesReenableSmrr ();
488 MtrrSetAllMtrrs(&Mtrrs);
489
490 //
491 // Wait for all APs to complete MTRR programming
492 //
493 WaitForAllAPs (ApCount);
494 }
495
496 //
497 // Stop source level debug in BSP handler, the code below will not be
498 // debugged.
499 //
500 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
501
502 //
503 // Signal APs to Reset states/semaphore for this processor
504 //
505 ReleaseAllAPs ();
506
507 //
508 // Perform pending operations for hot-plug
509 //
510 SmmCpuUpdate ();
511
512 //
513 // Clear the Present flag of BSP
514 //
515 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
516
517 //
518 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
519 // WaitForAllAps does not depend on the Present flag.
520 //
521 WaitForAllAPs (ApCount);
522
523 //
524 // Reset BspIndex to -1, meaning BSP has not been elected.
525 //
526 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
527 mSmmMpSyncData->BspIndex = (UINT32)-1;
528 }
529
530 //
531 // Allow APs to check in from this point on
532 //
533 mSmmMpSyncData->Counter = 0;
534 mSmmMpSyncData->AllCpusInSync = FALSE;
535 }
536
537 /**
538 SMI handler for AP.
539
540 @param CpuIndex AP processor Index.
541 @param ValidSmi Indicates that current SMI is a valid SMI or not.
542 @param SyncMode SMM MP sync mode.
543
544 **/
545 VOID
546 APHandler (
547 IN UINTN CpuIndex,
548 IN BOOLEAN ValidSmi,
549 IN SMM_CPU_SYNC_MODE SyncMode
550 )
551 {
552 UINT64 Timer;
553 UINTN BspIndex;
554 MTRR_SETTINGS Mtrrs;
555
556 //
557 // Timeout BSP
558 //
559 for (Timer = StartSyncTimer ();
560 !IsSyncTimerTimeout (Timer) &&
561 !mSmmMpSyncData->InsideSmm;
562 ) {
563 CpuPause ();
564 }
565
566 if (!mSmmMpSyncData->InsideSmm) {
567 //
568 // BSP timeout in the first round
569 //
570 if (mSmmMpSyncData->BspIndex != -1) {
571 //
572 // BSP Index is known
573 //
574 BspIndex = mSmmMpSyncData->BspIndex;
575 ASSERT (CpuIndex != BspIndex);
576
577 //
578 // Send SMI IPI to bring BSP in
579 //
580 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
581
582 //
583 // Now clock BSP for the 2nd time
584 //
585 for (Timer = StartSyncTimer ();
586 !IsSyncTimerTimeout (Timer) &&
587 !mSmmMpSyncData->InsideSmm;
588 ) {
589 CpuPause ();
590 }
591
592 if (!mSmmMpSyncData->InsideSmm) {
593 //
594 // Give up since BSP is unable to enter SMM
595 // and signal the completion of this AP
596 WaitForSemaphore (&mSmmMpSyncData->Counter);
597 return;
598 }
599 } else {
600 //
601 // Don't know BSP index. Give up without sending IPI to BSP.
602 //
603 WaitForSemaphore (&mSmmMpSyncData->Counter);
604 return;
605 }
606 }
607
608 //
609 // BSP is available
610 //
611 BspIndex = mSmmMpSyncData->BspIndex;
612 ASSERT (CpuIndex != BspIndex);
613
614 //
615 // Mark this processor's presence
616 //
617 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
618
619 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
620 //
621 // Notify BSP of arrival at this point
622 //
623 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
624 }
625
626 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
627 //
628 // Wait for the signal from BSP to backup MTRRs
629 //
630 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
631
632 //
633 // Backup OS MTRRs
634 //
635 MtrrGetAllMtrrs(&Mtrrs);
636
637 //
638 // Signal BSP the completion of this AP
639 //
640 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
641
642 //
643 // Wait for BSP's signal to program MTRRs
644 //
645 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
646
647 //
648 // Replace OS MTRRs with SMI MTRRs
649 //
650 ReplaceOSMtrrs (CpuIndex);
651
652 //
653 // Signal BSP the completion of this AP
654 //
655 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
656 }
657
658 while (TRUE) {
659 //
660 // Wait for something to happen
661 //
662 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
663
664 //
665 // Check if BSP wants to exit SMM
666 //
667 if (!mSmmMpSyncData->InsideSmm) {
668 break;
669 }
670
671 //
672 // BUSY should be acquired by SmmStartupThisAp()
673 //
674 ASSERT (
675 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)
676 );
677
678 //
679 // Invoke the scheduled procedure
680 //
681 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
682 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
683 );
684
685 //
686 // Release BUSY
687 //
688 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
689 }
690
691 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
692 //
693 // Notify BSP the readiness of this AP to program MTRRs
694 //
695 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
696
697 //
698 // Wait for the signal from BSP to program MTRRs
699 //
700 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
701
702 //
703 // Restore OS MTRRs
704 //
705 SmmCpuFeaturesReenableSmrr ();
706 MtrrSetAllMtrrs(&Mtrrs);
707 }
708
709 //
710 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
711 //
712 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
713
714 //
715 // Wait for the signal from BSP to Reset states/semaphore for this processor
716 //
717 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
718
719 //
720 // Reset states/semaphore for this processor
721 //
722 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
723
724 //
725 // Notify BSP the readiness of this AP to exit SMM
726 //
727 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
728
729 }
730
731 /**
732 Create 4G PageTable in SMRAM.
733
734 @param ExtraPages Additional page numbers besides for 4G memory
735 @param Is32BitPageTable Whether the page table is 32-bit PAE
736 @return PageTable Address
737
738 **/
739 UINT32
740 Gen4GPageTable (
741 IN UINTN ExtraPages,
742 IN BOOLEAN Is32BitPageTable
743 )
744 {
745 VOID *PageTable;
746 UINTN Index;
747 UINT64 *Pte;
748 UINTN PagesNeeded;
749 UINTN Low2MBoundary;
750 UINTN High2MBoundary;
751 UINTN Pages;
752 UINTN GuardPage;
753 UINT64 *Pdpte;
754 UINTN PageIndex;
755 UINTN PageAddress;
756
757 Low2MBoundary = 0;
758 High2MBoundary = 0;
759 PagesNeeded = 0;
760 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
761 //
762 // Add one more page for known good stack, then find the lower 2MB aligned address.
763 //
764 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
765 //
766 // Add two more pages for known good stack and stack guard page,
767 // then find the lower 2MB aligned address.
768 //
769 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
770 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
771 }
772 //
773 // Allocate the page table
774 //
775 PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);
776 ASSERT (PageTable != NULL);
777
778 PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));
779 Pte = (UINT64*)PageTable;
780
781 //
782 // Zero out all page table entries first
783 //
784 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
785
786 //
787 // Set Page Directory Pointers
788 //
789 for (Index = 0; Index < 4; Index++) {
790 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
791 }
792 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
793
794 //
795 // Fill in Page Directory Entries
796 //
797 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
798 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
799 }
800
801 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
802 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
803 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
804 Pdpte = (UINT64*)PageTable;
805 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
806 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
807 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;
808 //
809 // Fill in Page Table Entries
810 //
811 Pte = (UINT64*)Pages;
812 PageAddress = PageIndex;
813 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
814 if (PageAddress == GuardPage) {
815 //
816 // Mark the guard page as non-present
817 //
818 Pte[Index] = PageAddress;
819 GuardPage += mSmmStackSize;
820 if (GuardPage > mSmmStackArrayEnd) {
821 GuardPage = 0;
822 }
823 } else {
824 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;
825 }
826 PageAddress+= EFI_PAGE_SIZE;
827 }
828 Pages += EFI_PAGE_SIZE;
829 }
830 }
831
832 return (UINT32)(UINTN)PageTable;
833 }
834
835 /**
836 Set memory cache ability.
837
838 @param PageTable PageTable Address
839 @param Address Memory Address to change cache ability
840 @param Cacheability Cache ability to set
841
842 **/
843 VOID
844 SetCacheability (
845 IN UINT64 *PageTable,
846 IN UINTN Address,
847 IN UINT8 Cacheability
848 )
849 {
850 UINTN PTIndex;
851 VOID *NewPageTableAddress;
852 UINT64 *NewPageTable;
853 UINTN Index;
854
855 ASSERT ((Address & EFI_PAGE_MASK) == 0);
856
857 if (sizeof (UINTN) == sizeof (UINT64)) {
858 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
859 ASSERT (PageTable[PTIndex] & IA32_PG_P);
860 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
861 }
862
863 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
864 ASSERT (PageTable[PTIndex] & IA32_PG_P);
865 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
866
867 //
868 // A perfect implementation should check the original cacheability with the
869 // one being set, and break a 2M page entry into pieces only when they
870 // disagreed.
871 //
872 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
873 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
874 //
875 // Allocate a page from SMRAM
876 //
877 NewPageTableAddress = AllocatePageTableMemory (1);
878 ASSERT (NewPageTableAddress != NULL);
879
880 NewPageTable = (UINT64 *)NewPageTableAddress;
881
882 for (Index = 0; Index < 0x200; Index++) {
883 NewPageTable[Index] = PageTable[PTIndex];
884 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
885 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
886 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
887 }
888 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
889 }
890
891 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
892 }
893
894 ASSERT (PageTable[PTIndex] & IA32_PG_P);
895 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
896
897 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
898 ASSERT (PageTable[PTIndex] & IA32_PG_P);
899 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
900 PageTable[PTIndex] |= (UINT64)Cacheability;
901 }
902
903
904 /**
905 Schedule a procedure to run on the specified CPU.
906
907 @param Procedure The address of the procedure to run
908 @param CpuIndex Target CPU Index
909 @param ProcArguments The parameter to pass to the procedure
910
911 @retval EFI_INVALID_PARAMETER CpuNumber not valid
912 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
913 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
914 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
915 @retval EFI_SUCCESS The procedure has been successfully scheduled
916
917 **/
918 EFI_STATUS
919 EFIAPI
920 SmmStartupThisAp (
921 IN EFI_AP_PROCEDURE Procedure,
922 IN UINTN CpuIndex,
923 IN OUT VOID *ProcArguments OPTIONAL
924 )
925 {
926 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||
927 CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
928 !mSmmMpSyncData->CpuData[CpuIndex].Present ||
929 gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
930 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
931 return EFI_INVALID_PARAMETER;
932 }
933
934 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
935 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
936 ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
937
938 if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {
939 AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
940 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
941 }
942 return EFI_SUCCESS;
943 }
944
945 /**
946 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
947 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
948
949 NOTE: It might not be appreciated in runtime since it might
950 conflict with OS debugging facilities. Turn them off in RELEASE.
951
952 @param CpuIndex CPU Index
953
954 **/
955 VOID
956 EFIAPI
957 CpuSmmDebugEntry (
958 IN UINTN CpuIndex
959 )
960 {
961 SMRAM_SAVE_STATE_MAP *CpuSaveState;
962
963 if (FeaturePcdGet (PcdCpuSmmDebug)) {
964 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
965 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
966 AsmWriteDr6 (CpuSaveState->x86._DR6);
967 AsmWriteDr7 (CpuSaveState->x86._DR7);
968 } else {
969 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
970 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
971 }
972 }
973 }
974
975 /**
976 This function restores DR6 & DR7 to SMM save state.
977
978 NOTE: It might not be appreciated in runtime since it might
979 conflict with OS debugging facilities. Turn them off in RELEASE.
980
981 @param CpuIndex CPU Index
982
983 **/
984 VOID
985 EFIAPI
986 CpuSmmDebugExit (
987 IN UINTN CpuIndex
988 )
989 {
990 SMRAM_SAVE_STATE_MAP *CpuSaveState;
991
992 if (FeaturePcdGet (PcdCpuSmmDebug)) {
993 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
994 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
995 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
996 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
997 } else {
998 CpuSaveState->x64._DR7 = AsmReadDr7 ();
999 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1000 }
1001 }
1002 }
1003
1004 /**
1005 C function for SMI entry, each processor comes here upon SMI trigger.
1006
1007 @param CpuIndex CPU Index
1008
1009 **/
1010 VOID
1011 EFIAPI
1012 SmiRendezvous (
1013 IN UINTN CpuIndex
1014 )
1015 {
1016 EFI_STATUS Status;
1017 BOOLEAN ValidSmi;
1018 BOOLEAN IsBsp;
1019 BOOLEAN BspInProgress;
1020 UINTN Index;
1021 UINTN Cr2;
1022 BOOLEAN XdDisableFlag;
1023
1024 //
1025 // Save Cr2 because Page Fault exception in SMM may override its value
1026 //
1027 Cr2 = AsmReadCr2 ();
1028
1029 //
1030 // Perform CPU specific entry hooks
1031 //
1032 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1033
1034 //
1035 // Determine if this is a valid SMI
1036 //
1037 ValidSmi = PlatformValidSmi();
1038
1039 //
1040 // Determine if BSP has been already in progress. Note this must be checked after
1041 // ValidSmi because BSP may clear a valid SMI source after checking in.
1042 //
1043 BspInProgress = mSmmMpSyncData->InsideSmm;
1044
1045 if (!BspInProgress && !ValidSmi) {
1046 //
1047 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1048 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1049 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1050 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1051 // is nothing we need to do.
1052 //
1053 goto Exit;
1054 } else {
1055 //
1056 // Signal presence of this processor
1057 //
1058 if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {
1059 //
1060 // BSP has already ended the synchronization, so QUIT!!!
1061 //
1062
1063 //
1064 // Wait for BSP's signal to finish SMI
1065 //
1066 while (mSmmMpSyncData->AllCpusInSync) {
1067 CpuPause ();
1068 }
1069 goto Exit;
1070 } else {
1071
1072 //
1073 // The BUSY lock is initialized to Released state.
1074 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1075 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1076 // after AP's present flag is detected.
1077 //
1078 InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
1079 }
1080
1081 //
1082 // Try to enable XD
1083 //
1084 XdDisableFlag = FALSE;
1085 if (mXdSupported) {
1086 if ((AsmReadMsr64 (MSR_IA32_MISC_ENABLE) & B_XD_DISABLE_BIT) != 0) {
1087 XdDisableFlag = TRUE;
1088 AsmMsrAnd64 (MSR_IA32_MISC_ENABLE, ~B_XD_DISABLE_BIT);
1089 }
1090 ActivateXd ();
1091 }
1092
1093 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1094 ActivateSmmProfile (CpuIndex);
1095 }
1096
1097 if (BspInProgress) {
1098 //
1099 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1100 // as BSP may have cleared the SMI status
1101 //
1102 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1103 } else {
1104 //
1105 // We have a valid SMI
1106 //
1107
1108 //
1109 // Elect BSP
1110 //
1111 IsBsp = FALSE;
1112 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1113 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1114 //
1115 // Call platform hook to do BSP election
1116 //
1117 Status = PlatformSmmBspElection (&IsBsp);
1118 if (EFI_SUCCESS == Status) {
1119 //
1120 // Platform hook determines successfully
1121 //
1122 if (IsBsp) {
1123 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1124 }
1125 } else {
1126 //
1127 // Platform hook fails to determine, use default BSP election method
1128 //
1129 InterlockedCompareExchange32 (
1130 (UINT32*)&mSmmMpSyncData->BspIndex,
1131 (UINT32)-1,
1132 (UINT32)CpuIndex
1133 );
1134 }
1135 }
1136 }
1137
1138 //
1139 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1140 //
1141 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1142
1143 //
1144 // Clear last request for SwitchBsp.
1145 //
1146 if (mSmmMpSyncData->SwitchBsp) {
1147 mSmmMpSyncData->SwitchBsp = FALSE;
1148 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1149 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1150 }
1151 }
1152
1153 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1154 SmmProfileRecordSmiNum ();
1155 }
1156
1157 //
1158 // BSP Handler is always called with a ValidSmi == TRUE
1159 //
1160 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1161 } else {
1162 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1163 }
1164 }
1165
1166 ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1167
1168 //
1169 // Wait for BSP's signal to exit SMI
1170 //
1171 while (mSmmMpSyncData->AllCpusInSync) {
1172 CpuPause ();
1173 }
1174
1175 //
1176 // Restore XD
1177 //
1178 if (XdDisableFlag) {
1179 AsmMsrOr64 (MSR_IA32_MISC_ENABLE, B_XD_DISABLE_BIT);
1180 }
1181 }
1182
1183 Exit:
1184 SmmCpuFeaturesRendezvousExit (CpuIndex);
1185 //
1186 // Restore Cr2
1187 //
1188 AsmWriteCr2 (Cr2);
1189 }
1190
1191
1192 /**
1193 Initialize un-cacheable data.
1194
1195 **/
1196 VOID
1197 EFIAPI
1198 InitializeMpSyncData (
1199 VOID
1200 )
1201 {
1202 if (mSmmMpSyncData != NULL) {
1203 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1204 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1205 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1206 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1207 //
1208 // Enable BSP election by setting BspIndex to -1
1209 //
1210 mSmmMpSyncData->BspIndex = (UINT32)-1;
1211 }
1212 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
1213 }
1214 }
1215
1216 /**
1217 Initialize global data for MP synchronization.
1218
1219 @param Stacks Base address of SMI stack buffer for all processors.
1220 @param StackSize Stack size for each processor in SMM.
1221
1222 **/
1223 UINT32
1224 InitializeMpServiceData (
1225 IN VOID *Stacks,
1226 IN UINTN StackSize
1227 )
1228 {
1229 UINT32 Cr3;
1230 UINTN Index;
1231 MTRR_SETTINGS *Mtrr;
1232 PROCESSOR_SMM_DESCRIPTOR *Psd;
1233 UINT8 *GdtTssTables;
1234 UINTN GdtTableStepSize;
1235
1236 //
1237 // Initialize physical address mask
1238 // NOTE: Physical memory above virtual address limit is not supported !!!
1239 //
1240 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1241 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1242 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1243
1244 //
1245 // Create page tables
1246 //
1247 Cr3 = SmmInitPageTable ();
1248
1249 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1250
1251 //
1252 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1253 //
1254 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1255 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
1256 CopyMem (Psd, &gcPsd, sizeof (gcPsd));
1257 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
1258 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
1259
1260 //
1261 // Install SMI handler
1262 //
1263 InstallSmiHandler (
1264 Index,
1265 (UINT32)mCpuHotPlugData.SmBase[Index],
1266 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1267 StackSize,
1268 (UINTN)Psd->SmmGdtPtr,
1269 Psd->SmmGdtSize,
1270 gcSmiIdtr.Base,
1271 gcSmiIdtr.Limit + 1,
1272 Cr3
1273 );
1274 }
1275
1276 //
1277 // Initialize mSmmMpSyncData
1278 //
1279 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1280 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1281 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1282 ASSERT (mSmmMpSyncData != NULL);
1283 InitializeMpSyncData ();
1284
1285 //
1286 // Record current MTRR settings
1287 //
1288 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));
1289 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;
1290 MtrrGetAllMtrrs (Mtrr);
1291
1292 return Cr3;
1293 }
1294
1295 /**
1296
1297 Register the SMM Foundation entry point.
1298
1299 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1300 @param SmmEntryPoint SMM Foundation EntryPoint
1301
1302 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1303
1304 **/
1305 EFI_STATUS
1306 EFIAPI
1307 RegisterSmmEntry (
1308 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1309 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1310 )
1311 {
1312 //
1313 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1314 //
1315 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1316 return EFI_SUCCESS;
1317 }