]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
a873b68c30022b3686076eec062c0cdd4a8d613a
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
19 //
20 UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];
21 UINT64 gPhyMask;
22 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
23 UINTN mSmmMpSyncDataSize;
24 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
25 UINTN mSemaphoreSize;
26 SPIN_LOCK *mPFLock = NULL;
27 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
28
29 /**
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
32 MP safe mechanisms.
33
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
37
38 **/
39 UINT32
40 WaitForSemaphore (
41 IN OUT volatile UINT32 *Sem
42 )
43 {
44 UINT32 Value;
45
46 do {
47 Value = *Sem;
48 } while (Value == 0 ||
49 InterlockedCompareExchange32 (
50 (UINT32*)Sem,
51 Value,
52 Value - 1
53 ) != Value);
54 return Value - 1;
55 }
56
57
58 /**
59 Performs an atomic compare exchange operation to release semaphore.
60 The compare exchange operation must be performed using
61 MP safe mechanisms.
62
63 @param Sem IN: 32-bit unsigned integer
64 OUT: original integer + 1
65 @return Original integer + 1
66
67 **/
68 UINT32
69 ReleaseSemaphore (
70 IN OUT volatile UINT32 *Sem
71 )
72 {
73 UINT32 Value;
74
75 do {
76 Value = *Sem;
77 } while (Value + 1 != 0 &&
78 InterlockedCompareExchange32 (
79 (UINT32*)Sem,
80 Value,
81 Value + 1
82 ) != Value);
83 return Value + 1;
84 }
85
86 /**
87 Performs an atomic compare exchange operation to lock semaphore.
88 The compare exchange operation must be performed using
89 MP safe mechanisms.
90
91 @param Sem IN: 32-bit unsigned integer
92 OUT: -1
93 @return Original integer
94
95 **/
96 UINT32
97 LockdownSemaphore (
98 IN OUT volatile UINT32 *Sem
99 )
100 {
101 UINT32 Value;
102
103 do {
104 Value = *Sem;
105 } while (InterlockedCompareExchange32 (
106 (UINT32*)Sem,
107 Value, (UINT32)-1
108 ) != Value);
109 return Value;
110 }
111
112 /**
113 Wait all APs to performs an atomic compare exchange operation to release semaphore.
114
115 @param NumberOfAPs AP number
116
117 **/
118 VOID
119 WaitForAllAPs (
120 IN UINTN NumberOfAPs
121 )
122 {
123 UINTN BspIndex;
124
125 BspIndex = mSmmMpSyncData->BspIndex;
126 while (NumberOfAPs-- > 0) {
127 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
128 }
129 }
130
131 /**
132 Performs an atomic compare exchange operation to release semaphore
133 for each AP.
134
135 **/
136 VOID
137 ReleaseAllAPs (
138 VOID
139 )
140 {
141 UINTN Index;
142 UINTN BspIndex;
143
144 BspIndex = mSmmMpSyncData->BspIndex;
145 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
146 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
147 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
148 }
149 }
150 }
151
152 /**
153 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
154
155 @param Exceptions CPU Arrival exception flags.
156
157 @retval TRUE if all CPUs the have checked in.
158 @retval FALSE if at least one Normal AP hasn't checked in.
159
160 **/
161 BOOLEAN
162 AllCpusInSmmWithExceptions (
163 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
164 )
165 {
166 UINTN Index;
167 SMM_CPU_DATA_BLOCK *CpuData;
168 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
169
170 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
171
172 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
173 return TRUE;
174 }
175
176 CpuData = mSmmMpSyncData->CpuData;
177 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
178 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
179 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
180 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
181 continue;
182 }
183 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
184 continue;
185 }
186 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
187 continue;
188 }
189 return FALSE;
190 }
191 }
192
193
194 return TRUE;
195 }
196
197
198 /**
199 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
200 entering SMM, except SMI disabled APs.
201
202 **/
203 VOID
204 SmmWaitForApArrival (
205 VOID
206 )
207 {
208 UINT64 Timer;
209 UINTN Index;
210
211 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
212
213 //
214 // Platform implementor should choose a timeout value appropriately:
215 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
216 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
217 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
218 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
219 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
220 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
221 // - The timeout value must be longer than longest possible IO operation in the system
222 //
223
224 //
225 // Sync with APs 1st timeout
226 //
227 for (Timer = StartSyncTimer ();
228 !IsSyncTimerTimeout (Timer) &&
229 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
230 ) {
231 CpuPause ();
232 }
233
234 //
235 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
236 // because:
237 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
238 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
239 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
240 // work while SMI handling is on-going.
241 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
242 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
243 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
244 // mode work while SMI handling is on-going.
245 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
246 // - In traditional flow, SMI disabling is discouraged.
247 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
248 // In both cases, adding SMI-disabling checking code increases overhead.
249 //
250 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
251 //
252 // Send SMI IPIs to bring outside processors in
253 //
254 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
255 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
256 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
257 }
258 }
259
260 //
261 // Sync with APs 2nd timeout.
262 //
263 for (Timer = StartSyncTimer ();
264 !IsSyncTimerTimeout (Timer) &&
265 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
266 ) {
267 CpuPause ();
268 }
269 }
270
271 return;
272 }
273
274
275 /**
276 Replace OS MTRR's with SMI MTRR's.
277
278 @param CpuIndex Processor Index
279
280 **/
281 VOID
282 ReplaceOSMtrrs (
283 IN UINTN CpuIndex
284 )
285 {
286 PROCESSOR_SMM_DESCRIPTOR *Psd;
287 UINT64 *SmiMtrrs;
288 MTRR_SETTINGS *BiosMtrr;
289
290 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);
291 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;
292
293 SmmCpuFeaturesDisableSmrr ();
294
295 //
296 // Replace all MTRRs registers
297 //
298 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;
299 MtrrSetAllMtrrs(BiosMtrr);
300 }
301
302 /**
303 SMI handler for BSP.
304
305 @param CpuIndex BSP processor Index
306 @param SyncMode SMM MP sync mode
307
308 **/
309 VOID
310 BSPHandler (
311 IN UINTN CpuIndex,
312 IN SMM_CPU_SYNC_MODE SyncMode
313 )
314 {
315 UINTN Index;
316 MTRR_SETTINGS Mtrrs;
317 UINTN ApCount;
318 BOOLEAN ClearTopLevelSmiResult;
319 UINTN PresentCount;
320
321 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
322 ApCount = 0;
323
324 //
325 // Flag BSP's presence
326 //
327 *mSmmMpSyncData->InsideSmm = TRUE;
328
329 //
330 // Initialize Debug Agent to start source level debug in BSP handler
331 //
332 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
333
334 //
335 // Mark this processor's presence
336 //
337 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
338
339 //
340 // Clear platform top level SMI status bit before calling SMI handlers. If
341 // we cleared it after SMI handlers are run, we would miss the SMI that
342 // occurs after SMI handlers are done and before SMI status bit is cleared.
343 //
344 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
345 ASSERT (ClearTopLevelSmiResult == TRUE);
346
347 //
348 // Set running processor index
349 //
350 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
351
352 //
353 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
354 //
355 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
356
357 //
358 // Wait for APs to arrive
359 //
360 SmmWaitForApArrival();
361
362 //
363 // Lock the counter down and retrieve the number of APs
364 //
365 *mSmmMpSyncData->AllCpusInSync = TRUE;
366 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
367
368 //
369 // Wait for all APs to get ready for programming MTRRs
370 //
371 WaitForAllAPs (ApCount);
372
373 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
374 //
375 // Signal all APs it's time for backup MTRRs
376 //
377 ReleaseAllAPs ();
378
379 //
380 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
381 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
382 // to a large enough value to avoid this situation.
383 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
384 // We do the backup first and then set MTRR to avoid race condition for threads
385 // in the same core.
386 //
387 MtrrGetAllMtrrs(&Mtrrs);
388
389 //
390 // Wait for all APs to complete their MTRR saving
391 //
392 WaitForAllAPs (ApCount);
393
394 //
395 // Let all processors program SMM MTRRs together
396 //
397 ReleaseAllAPs ();
398
399 //
400 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
401 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
402 // to a large enough value to avoid this situation.
403 //
404 ReplaceOSMtrrs (CpuIndex);
405
406 //
407 // Wait for all APs to complete their MTRR programming
408 //
409 WaitForAllAPs (ApCount);
410 }
411 }
412
413 //
414 // The BUSY lock is initialized to Acquired state
415 //
416 AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);
417
418 //
419 // Perform the pre tasks
420 //
421 PerformPreTasks ();
422
423 //
424 // Invoke SMM Foundation EntryPoint with the processor information context.
425 //
426 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
427
428 //
429 // Make sure all APs have completed their pending none-block tasks
430 //
431 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
432 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
433 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
434 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
435 }
436 }
437
438 //
439 // Perform the remaining tasks
440 //
441 PerformRemainingTasks ();
442
443 //
444 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
445 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
446 // will run through freely.
447 //
448 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
449
450 //
451 // Lock the counter down and retrieve the number of APs
452 //
453 *mSmmMpSyncData->AllCpusInSync = TRUE;
454 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
455 //
456 // Make sure all APs have their Present flag set
457 //
458 while (TRUE) {
459 PresentCount = 0;
460 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
461 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
462 PresentCount ++;
463 }
464 }
465 if (PresentCount > ApCount) {
466 break;
467 }
468 }
469 }
470
471 //
472 // Notify all APs to exit
473 //
474 *mSmmMpSyncData->InsideSmm = FALSE;
475 ReleaseAllAPs ();
476
477 //
478 // Wait for all APs to complete their pending tasks
479 //
480 WaitForAllAPs (ApCount);
481
482 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
483 //
484 // Signal APs to restore MTRRs
485 //
486 ReleaseAllAPs ();
487
488 //
489 // Restore OS MTRRs
490 //
491 SmmCpuFeaturesReenableSmrr ();
492 MtrrSetAllMtrrs(&Mtrrs);
493
494 //
495 // Wait for all APs to complete MTRR programming
496 //
497 WaitForAllAPs (ApCount);
498 }
499
500 //
501 // Stop source level debug in BSP handler, the code below will not be
502 // debugged.
503 //
504 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
505
506 //
507 // Signal APs to Reset states/semaphore for this processor
508 //
509 ReleaseAllAPs ();
510
511 //
512 // Perform pending operations for hot-plug
513 //
514 SmmCpuUpdate ();
515
516 //
517 // Clear the Present flag of BSP
518 //
519 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
520
521 //
522 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
523 // WaitForAllAps does not depend on the Present flag.
524 //
525 WaitForAllAPs (ApCount);
526
527 //
528 // Reset BspIndex to -1, meaning BSP has not been elected.
529 //
530 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
531 mSmmMpSyncData->BspIndex = (UINT32)-1;
532 }
533
534 //
535 // Allow APs to check in from this point on
536 //
537 *mSmmMpSyncData->Counter = 0;
538 *mSmmMpSyncData->AllCpusInSync = FALSE;
539 }
540
541 /**
542 SMI handler for AP.
543
544 @param CpuIndex AP processor Index.
545 @param ValidSmi Indicates that current SMI is a valid SMI or not.
546 @param SyncMode SMM MP sync mode.
547
548 **/
549 VOID
550 APHandler (
551 IN UINTN CpuIndex,
552 IN BOOLEAN ValidSmi,
553 IN SMM_CPU_SYNC_MODE SyncMode
554 )
555 {
556 UINT64 Timer;
557 UINTN BspIndex;
558 MTRR_SETTINGS Mtrrs;
559
560 //
561 // Timeout BSP
562 //
563 for (Timer = StartSyncTimer ();
564 !IsSyncTimerTimeout (Timer) &&
565 !(*mSmmMpSyncData->InsideSmm);
566 ) {
567 CpuPause ();
568 }
569
570 if (!(*mSmmMpSyncData->InsideSmm)) {
571 //
572 // BSP timeout in the first round
573 //
574 if (mSmmMpSyncData->BspIndex != -1) {
575 //
576 // BSP Index is known
577 //
578 BspIndex = mSmmMpSyncData->BspIndex;
579 ASSERT (CpuIndex != BspIndex);
580
581 //
582 // Send SMI IPI to bring BSP in
583 //
584 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
585
586 //
587 // Now clock BSP for the 2nd time
588 //
589 for (Timer = StartSyncTimer ();
590 !IsSyncTimerTimeout (Timer) &&
591 !(*mSmmMpSyncData->InsideSmm);
592 ) {
593 CpuPause ();
594 }
595
596 if (!(*mSmmMpSyncData->InsideSmm)) {
597 //
598 // Give up since BSP is unable to enter SMM
599 // and signal the completion of this AP
600 WaitForSemaphore (mSmmMpSyncData->Counter);
601 return;
602 }
603 } else {
604 //
605 // Don't know BSP index. Give up without sending IPI to BSP.
606 //
607 WaitForSemaphore (mSmmMpSyncData->Counter);
608 return;
609 }
610 }
611
612 //
613 // BSP is available
614 //
615 BspIndex = mSmmMpSyncData->BspIndex;
616 ASSERT (CpuIndex != BspIndex);
617
618 //
619 // Mark this processor's presence
620 //
621 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
622
623 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
624 //
625 // Notify BSP of arrival at this point
626 //
627 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
628 }
629
630 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
631 //
632 // Wait for the signal from BSP to backup MTRRs
633 //
634 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
635
636 //
637 // Backup OS MTRRs
638 //
639 MtrrGetAllMtrrs(&Mtrrs);
640
641 //
642 // Signal BSP the completion of this AP
643 //
644 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
645
646 //
647 // Wait for BSP's signal to program MTRRs
648 //
649 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
650
651 //
652 // Replace OS MTRRs with SMI MTRRs
653 //
654 ReplaceOSMtrrs (CpuIndex);
655
656 //
657 // Signal BSP the completion of this AP
658 //
659 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
660 }
661
662 while (TRUE) {
663 //
664 // Wait for something to happen
665 //
666 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
667
668 //
669 // Check if BSP wants to exit SMM
670 //
671 if (!(*mSmmMpSyncData->InsideSmm)) {
672 break;
673 }
674
675 //
676 // BUSY should be acquired by SmmStartupThisAp()
677 //
678 ASSERT (
679 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
680 );
681
682 //
683 // Invoke the scheduled procedure
684 //
685 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
686 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
687 );
688
689 //
690 // Release BUSY
691 //
692 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
693 }
694
695 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
696 //
697 // Notify BSP the readiness of this AP to program MTRRs
698 //
699 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
700
701 //
702 // Wait for the signal from BSP to program MTRRs
703 //
704 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
705
706 //
707 // Restore OS MTRRs
708 //
709 SmmCpuFeaturesReenableSmrr ();
710 MtrrSetAllMtrrs(&Mtrrs);
711 }
712
713 //
714 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
715 //
716 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
717
718 //
719 // Wait for the signal from BSP to Reset states/semaphore for this processor
720 //
721 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
722
723 //
724 // Reset states/semaphore for this processor
725 //
726 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
727
728 //
729 // Notify BSP the readiness of this AP to exit SMM
730 //
731 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
732
733 }
734
735 /**
736 Create 4G PageTable in SMRAM.
737
738 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
739 @return PageTable Address
740
741 **/
742 UINT32
743 Gen4GPageTable (
744 IN BOOLEAN Is32BitPageTable
745 )
746 {
747 VOID *PageTable;
748 UINTN Index;
749 UINT64 *Pte;
750 UINTN PagesNeeded;
751 UINTN Low2MBoundary;
752 UINTN High2MBoundary;
753 UINTN Pages;
754 UINTN GuardPage;
755 UINT64 *Pdpte;
756 UINTN PageIndex;
757 UINTN PageAddress;
758
759 Low2MBoundary = 0;
760 High2MBoundary = 0;
761 PagesNeeded = 0;
762 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
763 //
764 // Add one more page for known good stack, then find the lower 2MB aligned address.
765 //
766 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
767 //
768 // Add two more pages for known good stack and stack guard page,
769 // then find the lower 2MB aligned address.
770 //
771 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
772 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
773 }
774 //
775 // Allocate the page table
776 //
777 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
778 ASSERT (PageTable != NULL);
779
780 PageTable = (VOID *)((UINTN)PageTable);
781 Pte = (UINT64*)PageTable;
782
783 //
784 // Zero out all page table entries first
785 //
786 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
787
788 //
789 // Set Page Directory Pointers
790 //
791 for (Index = 0; Index < 4; Index++) {
792 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
793 }
794 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
795
796 //
797 // Fill in Page Directory Entries
798 //
799 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
800 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
801 }
802
803 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
804 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
805 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
806 Pdpte = (UINT64*)PageTable;
807 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
808 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
809 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;
810 //
811 // Fill in Page Table Entries
812 //
813 Pte = (UINT64*)Pages;
814 PageAddress = PageIndex;
815 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
816 if (PageAddress == GuardPage) {
817 //
818 // Mark the guard page as non-present
819 //
820 Pte[Index] = PageAddress;
821 GuardPage += mSmmStackSize;
822 if (GuardPage > mSmmStackArrayEnd) {
823 GuardPage = 0;
824 }
825 } else {
826 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;
827 }
828 PageAddress+= EFI_PAGE_SIZE;
829 }
830 Pages += EFI_PAGE_SIZE;
831 }
832 }
833
834 return (UINT32)(UINTN)PageTable;
835 }
836
837 /**
838 Set memory cache ability.
839
840 @param PageTable PageTable Address
841 @param Address Memory Address to change cache ability
842 @param Cacheability Cache ability to set
843
844 **/
845 VOID
846 SetCacheability (
847 IN UINT64 *PageTable,
848 IN UINTN Address,
849 IN UINT8 Cacheability
850 )
851 {
852 UINTN PTIndex;
853 VOID *NewPageTableAddress;
854 UINT64 *NewPageTable;
855 UINTN Index;
856
857 ASSERT ((Address & EFI_PAGE_MASK) == 0);
858
859 if (sizeof (UINTN) == sizeof (UINT64)) {
860 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
861 ASSERT (PageTable[PTIndex] & IA32_PG_P);
862 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
863 }
864
865 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
866 ASSERT (PageTable[PTIndex] & IA32_PG_P);
867 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
868
869 //
870 // A perfect implementation should check the original cacheability with the
871 // one being set, and break a 2M page entry into pieces only when they
872 // disagreed.
873 //
874 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
875 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
876 //
877 // Allocate a page from SMRAM
878 //
879 NewPageTableAddress = AllocatePageTableMemory (1);
880 ASSERT (NewPageTableAddress != NULL);
881
882 NewPageTable = (UINT64 *)NewPageTableAddress;
883
884 for (Index = 0; Index < 0x200; Index++) {
885 NewPageTable[Index] = PageTable[PTIndex];
886 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
887 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
888 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
889 }
890 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
891 }
892
893 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
894 }
895
896 ASSERT (PageTable[PTIndex] & IA32_PG_P);
897 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
898
899 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
900 ASSERT (PageTable[PTIndex] & IA32_PG_P);
901 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
902 PageTable[PTIndex] |= (UINT64)Cacheability;
903 }
904
905 /**
906 Schedule a procedure to run on the specified CPU.
907
908 @param[in] Procedure The address of the procedure to run
909 @param[in] CpuIndex Target CPU Index
910 @param[in, OUT] ProcArguments The parameter to pass to the procedure
911 @param[in] BlockingMode Startup AP in blocking mode or not
912
913 @retval EFI_INVALID_PARAMETER CpuNumber not valid
914 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
915 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
916 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
917 @retval EFI_SUCCESS The procedure has been successfully scheduled
918
919 **/
920 EFI_STATUS
921 InternalSmmStartupThisAp (
922 IN EFI_AP_PROCEDURE Procedure,
923 IN UINTN CpuIndex,
924 IN OUT VOID *ProcArguments OPTIONAL,
925 IN BOOLEAN BlockingMode
926 )
927 {
928 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
929 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
930 return EFI_INVALID_PARAMETER;
931 }
932 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
933 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
934 return EFI_INVALID_PARAMETER;
935 }
936 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
937 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
938 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
939 }
940 return EFI_INVALID_PARAMETER;
941 }
942 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
943 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
944 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
945 }
946 return EFI_INVALID_PARAMETER;
947 }
948
949 if (BlockingMode) {
950 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
951 } else {
952 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
953 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
954 return EFI_INVALID_PARAMETER;
955 }
956 }
957
958 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
959 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
960 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
961
962 if (BlockingMode) {
963 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
964 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
965 }
966 return EFI_SUCCESS;
967 }
968
969 /**
970 Schedule a procedure to run on the specified CPU in blocking mode.
971
972 @param[in] Procedure The address of the procedure to run
973 @param[in] CpuIndex Target CPU Index
974 @param[in, out] ProcArguments The parameter to pass to the procedure
975
976 @retval EFI_INVALID_PARAMETER CpuNumber not valid
977 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
978 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
979 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
980 @retval EFI_SUCCESS The procedure has been successfully scheduled
981
982 **/
983 EFI_STATUS
984 EFIAPI
985 SmmBlockingStartupThisAp (
986 IN EFI_AP_PROCEDURE Procedure,
987 IN UINTN CpuIndex,
988 IN OUT VOID *ProcArguments OPTIONAL
989 )
990 {
991 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
992 }
993
994 /**
995 Schedule a procedure to run on the specified CPU.
996
997 @param Procedure The address of the procedure to run
998 @param CpuIndex Target CPU Index
999 @param ProcArguments The parameter to pass to the procedure
1000
1001 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1002 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1003 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1004 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1005 @retval EFI_SUCCESS The procedure has been successfully scheduled
1006
1007 **/
1008 EFI_STATUS
1009 EFIAPI
1010 SmmStartupThisAp (
1011 IN EFI_AP_PROCEDURE Procedure,
1012 IN UINTN CpuIndex,
1013 IN OUT VOID *ProcArguments OPTIONAL
1014 )
1015 {
1016 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
1017 }
1018
1019 /**
1020 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1021 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1022
1023 NOTE: It might not be appreciated in runtime since it might
1024 conflict with OS debugging facilities. Turn them off in RELEASE.
1025
1026 @param CpuIndex CPU Index
1027
1028 **/
1029 VOID
1030 EFIAPI
1031 CpuSmmDebugEntry (
1032 IN UINTN CpuIndex
1033 )
1034 {
1035 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1036
1037 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1038 ASSERT(CpuIndex < mMaxNumberOfCpus);
1039 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1040 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1041 AsmWriteDr6 (CpuSaveState->x86._DR6);
1042 AsmWriteDr7 (CpuSaveState->x86._DR7);
1043 } else {
1044 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1045 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1046 }
1047 }
1048 }
1049
1050 /**
1051 This function restores DR6 & DR7 to SMM save state.
1052
1053 NOTE: It might not be appreciated in runtime since it might
1054 conflict with OS debugging facilities. Turn them off in RELEASE.
1055
1056 @param CpuIndex CPU Index
1057
1058 **/
1059 VOID
1060 EFIAPI
1061 CpuSmmDebugExit (
1062 IN UINTN CpuIndex
1063 )
1064 {
1065 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1066
1067 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1068 ASSERT(CpuIndex < mMaxNumberOfCpus);
1069 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1070 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1071 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1072 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1073 } else {
1074 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1075 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1076 }
1077 }
1078 }
1079
1080 /**
1081 C function for SMI entry, each processor comes here upon SMI trigger.
1082
1083 @param CpuIndex CPU Index
1084
1085 **/
1086 VOID
1087 EFIAPI
1088 SmiRendezvous (
1089 IN UINTN CpuIndex
1090 )
1091 {
1092 EFI_STATUS Status;
1093 BOOLEAN ValidSmi;
1094 BOOLEAN IsBsp;
1095 BOOLEAN BspInProgress;
1096 UINTN Index;
1097 UINTN Cr2;
1098
1099 ASSERT(CpuIndex < mMaxNumberOfCpus);
1100
1101 //
1102 // Save Cr2 because Page Fault exception in SMM may override its value
1103 //
1104 Cr2 = AsmReadCr2 ();
1105
1106 //
1107 // Perform CPU specific entry hooks
1108 //
1109 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1110
1111 //
1112 // Determine if this is a valid SMI
1113 //
1114 ValidSmi = PlatformValidSmi();
1115
1116 //
1117 // Determine if BSP has been already in progress. Note this must be checked after
1118 // ValidSmi because BSP may clear a valid SMI source after checking in.
1119 //
1120 BspInProgress = *mSmmMpSyncData->InsideSmm;
1121
1122 if (!BspInProgress && !ValidSmi) {
1123 //
1124 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1125 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1126 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1127 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1128 // is nothing we need to do.
1129 //
1130 goto Exit;
1131 } else {
1132 //
1133 // Signal presence of this processor
1134 //
1135 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1136 //
1137 // BSP has already ended the synchronization, so QUIT!!!
1138 //
1139
1140 //
1141 // Wait for BSP's signal to finish SMI
1142 //
1143 while (*mSmmMpSyncData->AllCpusInSync) {
1144 CpuPause ();
1145 }
1146 goto Exit;
1147 } else {
1148
1149 //
1150 // The BUSY lock is initialized to Released state.
1151 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1152 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1153 // after AP's present flag is detected.
1154 //
1155 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1156 }
1157
1158 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1159 ActivateSmmProfile (CpuIndex);
1160 }
1161
1162 if (BspInProgress) {
1163 //
1164 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1165 // as BSP may have cleared the SMI status
1166 //
1167 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1168 } else {
1169 //
1170 // We have a valid SMI
1171 //
1172
1173 //
1174 // Elect BSP
1175 //
1176 IsBsp = FALSE;
1177 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1178 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1179 //
1180 // Call platform hook to do BSP election
1181 //
1182 Status = PlatformSmmBspElection (&IsBsp);
1183 if (EFI_SUCCESS == Status) {
1184 //
1185 // Platform hook determines successfully
1186 //
1187 if (IsBsp) {
1188 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1189 }
1190 } else {
1191 //
1192 // Platform hook fails to determine, use default BSP election method
1193 //
1194 InterlockedCompareExchange32 (
1195 (UINT32*)&mSmmMpSyncData->BspIndex,
1196 (UINT32)-1,
1197 (UINT32)CpuIndex
1198 );
1199 }
1200 }
1201 }
1202
1203 //
1204 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1205 //
1206 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1207
1208 //
1209 // Clear last request for SwitchBsp.
1210 //
1211 if (mSmmMpSyncData->SwitchBsp) {
1212 mSmmMpSyncData->SwitchBsp = FALSE;
1213 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1214 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1215 }
1216 }
1217
1218 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1219 SmmProfileRecordSmiNum ();
1220 }
1221
1222 //
1223 // BSP Handler is always called with a ValidSmi == TRUE
1224 //
1225 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1226 } else {
1227 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1228 }
1229 }
1230
1231 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1232
1233 //
1234 // Wait for BSP's signal to exit SMI
1235 //
1236 while (*mSmmMpSyncData->AllCpusInSync) {
1237 CpuPause ();
1238 }
1239 }
1240
1241 Exit:
1242 SmmCpuFeaturesRendezvousExit (CpuIndex);
1243 //
1244 // Restore Cr2
1245 //
1246 AsmWriteCr2 (Cr2);
1247 }
1248
1249 /**
1250 Allocate buffer for all semaphores and spin locks.
1251
1252 **/
1253 VOID
1254 InitializeSmmCpuSemaphores (
1255 VOID
1256 )
1257 {
1258 UINTN ProcessorCount;
1259 UINTN TotalSize;
1260 UINTN GlobalSemaphoresSize;
1261 UINTN CpuSemaphoresSize;
1262 UINTN MsrSemahporeSize;
1263 UINTN SemaphoreSize;
1264 UINTN Pages;
1265 UINTN *SemaphoreBlock;
1266 UINTN SemaphoreAddr;
1267
1268 SemaphoreSize = GetSpinLockProperties ();
1269 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1270 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1271 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1272 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;
1273 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;
1274 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1275 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1276 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1277 SemaphoreBlock = AllocatePages (Pages);
1278 ASSERT (SemaphoreBlock != NULL);
1279 ZeroMem (SemaphoreBlock, TotalSize);
1280
1281 SemaphoreAddr = (UINTN)SemaphoreBlock;
1282 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1283 SemaphoreAddr += SemaphoreSize;
1284 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1285 SemaphoreAddr += SemaphoreSize;
1286 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1287 SemaphoreAddr += SemaphoreSize;
1288 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1289 SemaphoreAddr += SemaphoreSize;
1290 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1291 = (SPIN_LOCK *)SemaphoreAddr;
1292 SemaphoreAddr += SemaphoreSize;
1293 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
1294 = (SPIN_LOCK *)SemaphoreAddr;
1295
1296 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1297 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1298 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1299 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1300 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1301 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1302
1303 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;
1304 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;
1305 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =
1306 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
1307 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
1308
1309 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1310 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1311 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
1312
1313 mSemaphoreSize = SemaphoreSize;
1314 }
1315
1316 /**
1317 Initialize un-cacheable data.
1318
1319 **/
1320 VOID
1321 EFIAPI
1322 InitializeMpSyncData (
1323 VOID
1324 )
1325 {
1326 UINTN CpuIndex;
1327
1328 if (mSmmMpSyncData != NULL) {
1329 //
1330 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1331 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1332 //
1333 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1334 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1335 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1336 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1337 //
1338 // Enable BSP election by setting BspIndex to -1
1339 //
1340 mSmmMpSyncData->BspIndex = (UINT32)-1;
1341 }
1342 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1343
1344 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1345 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1346 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1347 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1348 mSmmMpSyncData->AllCpusInSync != NULL);
1349 *mSmmMpSyncData->Counter = 0;
1350 *mSmmMpSyncData->InsideSmm = FALSE;
1351 *mSmmMpSyncData->AllCpusInSync = FALSE;
1352
1353 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1354 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1355 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1356 mSmmMpSyncData->CpuData[CpuIndex].Run =
1357 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1358 mSmmMpSyncData->CpuData[CpuIndex].Present =
1359 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1360 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1361 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1362 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1363 }
1364 }
1365 }
1366
1367 /**
1368 Initialize global data for MP synchronization.
1369
1370 @param Stacks Base address of SMI stack buffer for all processors.
1371 @param StackSize Stack size for each processor in SMM.
1372
1373 **/
1374 UINT32
1375 InitializeMpServiceData (
1376 IN VOID *Stacks,
1377 IN UINTN StackSize
1378 )
1379 {
1380 UINT32 Cr3;
1381 UINTN Index;
1382 MTRR_SETTINGS *Mtrr;
1383 PROCESSOR_SMM_DESCRIPTOR *Psd;
1384 UINT8 *GdtTssTables;
1385 UINTN GdtTableStepSize;
1386
1387 //
1388 // Allocate memory for all locks and semaphores
1389 //
1390 InitializeSmmCpuSemaphores ();
1391
1392 //
1393 // Initialize mSmmMpSyncData
1394 //
1395 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1396 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1397 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1398 ASSERT (mSmmMpSyncData != NULL);
1399 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1400 InitializeMpSyncData ();
1401
1402 //
1403 // Initialize physical address mask
1404 // NOTE: Physical memory above virtual address limit is not supported !!!
1405 //
1406 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1407 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1408 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1409
1410 //
1411 // Create page tables
1412 //
1413 Cr3 = SmmInitPageTable ();
1414
1415 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1416
1417 //
1418 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1419 //
1420 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1421 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
1422 CopyMem (Psd, &gcPsd, sizeof (gcPsd));
1423 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
1424 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
1425
1426 //
1427 // Install SMI handler
1428 //
1429 InstallSmiHandler (
1430 Index,
1431 (UINT32)mCpuHotPlugData.SmBase[Index],
1432 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1433 StackSize,
1434 (UINTN)Psd->SmmGdtPtr,
1435 Psd->SmmGdtSize,
1436 gcSmiIdtr.Base,
1437 gcSmiIdtr.Limit + 1,
1438 Cr3
1439 );
1440 }
1441
1442 //
1443 // Record current MTRR settings
1444 //
1445 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));
1446 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;
1447 MtrrGetAllMtrrs (Mtrr);
1448
1449 return Cr3;
1450 }
1451
1452 /**
1453
1454 Register the SMM Foundation entry point.
1455
1456 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1457 @param SmmEntryPoint SMM Foundation EntryPoint
1458
1459 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1460
1461 **/
1462 EFI_STATUS
1463 EFIAPI
1464 RegisterSmmEntry (
1465 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1466 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1467 )
1468 {
1469 //
1470 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1471 //
1472 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1473 return EFI_SUCCESS;
1474 }