]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: SMM_CPU_DATA_BLOCK is not cleared
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
19 //
20 UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];
21 UINT64 gPhyMask;
22 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
23 UINTN mSmmMpSyncDataSize;
24 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
25 UINTN mSemaphoreSize;
26 SPIN_LOCK *mPFLock = NULL;
27
28 /**
29 Performs an atomic compare exchange operation to get semaphore.
30 The compare exchange operation must be performed using
31 MP safe mechanisms.
32
33 @param Sem IN: 32-bit unsigned integer
34 OUT: original integer - 1
35 @return Original integer - 1
36
37 **/
38 UINT32
39 WaitForSemaphore (
40 IN OUT volatile UINT32 *Sem
41 )
42 {
43 UINT32 Value;
44
45 do {
46 Value = *Sem;
47 } while (Value == 0 ||
48 InterlockedCompareExchange32 (
49 (UINT32*)Sem,
50 Value,
51 Value - 1
52 ) != Value);
53 return Value - 1;
54 }
55
56
57 /**
58 Performs an atomic compare exchange operation to release semaphore.
59 The compare exchange operation must be performed using
60 MP safe mechanisms.
61
62 @param Sem IN: 32-bit unsigned integer
63 OUT: original integer + 1
64 @return Original integer + 1
65
66 **/
67 UINT32
68 ReleaseSemaphore (
69 IN OUT volatile UINT32 *Sem
70 )
71 {
72 UINT32 Value;
73
74 do {
75 Value = *Sem;
76 } while (Value + 1 != 0 &&
77 InterlockedCompareExchange32 (
78 (UINT32*)Sem,
79 Value,
80 Value + 1
81 ) != Value);
82 return Value + 1;
83 }
84
85 /**
86 Performs an atomic compare exchange operation to lock semaphore.
87 The compare exchange operation must be performed using
88 MP safe mechanisms.
89
90 @param Sem IN: 32-bit unsigned integer
91 OUT: -1
92 @return Original integer
93
94 **/
95 UINT32
96 LockdownSemaphore (
97 IN OUT volatile UINT32 *Sem
98 )
99 {
100 UINT32 Value;
101
102 do {
103 Value = *Sem;
104 } while (InterlockedCompareExchange32 (
105 (UINT32*)Sem,
106 Value, (UINT32)-1
107 ) != Value);
108 return Value;
109 }
110
111 /**
112 Wait all APs to performs an atomic compare exchange operation to release semaphore.
113
114 @param NumberOfAPs AP number
115
116 **/
117 VOID
118 WaitForAllAPs (
119 IN UINTN NumberOfAPs
120 )
121 {
122 UINTN BspIndex;
123
124 BspIndex = mSmmMpSyncData->BspIndex;
125 while (NumberOfAPs-- > 0) {
126 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
127 }
128 }
129
130 /**
131 Performs an atomic compare exchange operation to release semaphore
132 for each AP.
133
134 **/
135 VOID
136 ReleaseAllAPs (
137 VOID
138 )
139 {
140 UINTN Index;
141 UINTN BspIndex;
142
143 BspIndex = mSmmMpSyncData->BspIndex;
144 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
145 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
146 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
147 }
148 }
149 }
150
151 /**
152 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
153
154 @param Exceptions CPU Arrival exception flags.
155
156 @retval TRUE if all CPUs the have checked in.
157 @retval FALSE if at least one Normal AP hasn't checked in.
158
159 **/
160 BOOLEAN
161 AllCpusInSmmWithExceptions (
162 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
163 )
164 {
165 UINTN Index;
166 SMM_CPU_DATA_BLOCK *CpuData;
167 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
168
169 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
170
171 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
172 return TRUE;
173 }
174
175 CpuData = mSmmMpSyncData->CpuData;
176 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
177 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
178 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
179 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
180 continue;
181 }
182 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
183 continue;
184 }
185 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
186 continue;
187 }
188 return FALSE;
189 }
190 }
191
192
193 return TRUE;
194 }
195
196
197 /**
198 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
199 entering SMM, except SMI disabled APs.
200
201 **/
202 VOID
203 SmmWaitForApArrival (
204 VOID
205 )
206 {
207 UINT64 Timer;
208 UINTN Index;
209
210 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
211
212 //
213 // Platform implementor should choose a timeout value appropriately:
214 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
215 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
216 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
217 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
218 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
219 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
220 // - The timeout value must be longer than longest possible IO operation in the system
221 //
222
223 //
224 // Sync with APs 1st timeout
225 //
226 for (Timer = StartSyncTimer ();
227 !IsSyncTimerTimeout (Timer) &&
228 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
229 ) {
230 CpuPause ();
231 }
232
233 //
234 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
235 // because:
236 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
237 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
238 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
239 // work while SMI handling is on-going.
240 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
241 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
242 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
243 // mode work while SMI handling is on-going.
244 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
245 // - In traditional flow, SMI disabling is discouraged.
246 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
247 // In both cases, adding SMI-disabling checking code increases overhead.
248 //
249 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
250 //
251 // Send SMI IPIs to bring outside processors in
252 //
253 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
254 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
255 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
256 }
257 }
258
259 //
260 // Sync with APs 2nd timeout.
261 //
262 for (Timer = StartSyncTimer ();
263 !IsSyncTimerTimeout (Timer) &&
264 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
265 ) {
266 CpuPause ();
267 }
268 }
269
270 return;
271 }
272
273
274 /**
275 Replace OS MTRR's with SMI MTRR's.
276
277 @param CpuIndex Processor Index
278
279 **/
280 VOID
281 ReplaceOSMtrrs (
282 IN UINTN CpuIndex
283 )
284 {
285 PROCESSOR_SMM_DESCRIPTOR *Psd;
286 UINT64 *SmiMtrrs;
287 MTRR_SETTINGS *BiosMtrr;
288
289 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);
290 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;
291
292 SmmCpuFeaturesDisableSmrr ();
293
294 //
295 // Replace all MTRRs registers
296 //
297 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;
298 MtrrSetAllMtrrs(BiosMtrr);
299 }
300
301 /**
302 SMI handler for BSP.
303
304 @param CpuIndex BSP processor Index
305 @param SyncMode SMM MP sync mode
306
307 **/
308 VOID
309 BSPHandler (
310 IN UINTN CpuIndex,
311 IN SMM_CPU_SYNC_MODE SyncMode
312 )
313 {
314 UINTN Index;
315 MTRR_SETTINGS Mtrrs;
316 UINTN ApCount;
317 BOOLEAN ClearTopLevelSmiResult;
318 UINTN PresentCount;
319
320 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
321 ApCount = 0;
322
323 //
324 // Flag BSP's presence
325 //
326 *mSmmMpSyncData->InsideSmm = TRUE;
327
328 //
329 // Initialize Debug Agent to start source level debug in BSP handler
330 //
331 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
332
333 //
334 // Mark this processor's presence
335 //
336 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
337
338 //
339 // Clear platform top level SMI status bit before calling SMI handlers. If
340 // we cleared it after SMI handlers are run, we would miss the SMI that
341 // occurs after SMI handlers are done and before SMI status bit is cleared.
342 //
343 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
344 ASSERT (ClearTopLevelSmiResult == TRUE);
345
346 //
347 // Set running processor index
348 //
349 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
350
351 //
352 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
353 //
354 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
355
356 //
357 // Wait for APs to arrive
358 //
359 SmmWaitForApArrival();
360
361 //
362 // Lock the counter down and retrieve the number of APs
363 //
364 *mSmmMpSyncData->AllCpusInSync = TRUE;
365 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
366
367 //
368 // Wait for all APs to get ready for programming MTRRs
369 //
370 WaitForAllAPs (ApCount);
371
372 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
373 //
374 // Signal all APs it's time for backup MTRRs
375 //
376 ReleaseAllAPs ();
377
378 //
379 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
380 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
381 // to a large enough value to avoid this situation.
382 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
383 // We do the backup first and then set MTRR to avoid race condition for threads
384 // in the same core.
385 //
386 MtrrGetAllMtrrs(&Mtrrs);
387
388 //
389 // Wait for all APs to complete their MTRR saving
390 //
391 WaitForAllAPs (ApCount);
392
393 //
394 // Let all processors program SMM MTRRs together
395 //
396 ReleaseAllAPs ();
397
398 //
399 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
400 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
401 // to a large enough value to avoid this situation.
402 //
403 ReplaceOSMtrrs (CpuIndex);
404
405 //
406 // Wait for all APs to complete their MTRR programming
407 //
408 WaitForAllAPs (ApCount);
409 }
410 }
411
412 //
413 // The BUSY lock is initialized to Acquired state
414 //
415 AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);
416
417 //
418 // Perform the pre tasks
419 //
420 PerformPreTasks ();
421
422 //
423 // Invoke SMM Foundation EntryPoint with the processor information context.
424 //
425 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
426
427 //
428 // Make sure all APs have completed their pending none-block tasks
429 //
430 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
431 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
432 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
433 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
434 }
435 }
436
437 //
438 // Perform the remaining tasks
439 //
440 PerformRemainingTasks ();
441
442 //
443 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
444 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
445 // will run through freely.
446 //
447 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
448
449 //
450 // Lock the counter down and retrieve the number of APs
451 //
452 *mSmmMpSyncData->AllCpusInSync = TRUE;
453 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
454 //
455 // Make sure all APs have their Present flag set
456 //
457 while (TRUE) {
458 PresentCount = 0;
459 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
460 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
461 PresentCount ++;
462 }
463 }
464 if (PresentCount > ApCount) {
465 break;
466 }
467 }
468 }
469
470 //
471 // Notify all APs to exit
472 //
473 *mSmmMpSyncData->InsideSmm = FALSE;
474 ReleaseAllAPs ();
475
476 //
477 // Wait for all APs to complete their pending tasks
478 //
479 WaitForAllAPs (ApCount);
480
481 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
482 //
483 // Signal APs to restore MTRRs
484 //
485 ReleaseAllAPs ();
486
487 //
488 // Restore OS MTRRs
489 //
490 SmmCpuFeaturesReenableSmrr ();
491 MtrrSetAllMtrrs(&Mtrrs);
492
493 //
494 // Wait for all APs to complete MTRR programming
495 //
496 WaitForAllAPs (ApCount);
497 }
498
499 //
500 // Stop source level debug in BSP handler, the code below will not be
501 // debugged.
502 //
503 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
504
505 //
506 // Signal APs to Reset states/semaphore for this processor
507 //
508 ReleaseAllAPs ();
509
510 //
511 // Perform pending operations for hot-plug
512 //
513 SmmCpuUpdate ();
514
515 //
516 // Clear the Present flag of BSP
517 //
518 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
519
520 //
521 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
522 // WaitForAllAps does not depend on the Present flag.
523 //
524 WaitForAllAPs (ApCount);
525
526 //
527 // Reset BspIndex to -1, meaning BSP has not been elected.
528 //
529 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
530 mSmmMpSyncData->BspIndex = (UINT32)-1;
531 }
532
533 //
534 // Allow APs to check in from this point on
535 //
536 *mSmmMpSyncData->Counter = 0;
537 *mSmmMpSyncData->AllCpusInSync = FALSE;
538 }
539
540 /**
541 SMI handler for AP.
542
543 @param CpuIndex AP processor Index.
544 @param ValidSmi Indicates that current SMI is a valid SMI or not.
545 @param SyncMode SMM MP sync mode.
546
547 **/
548 VOID
549 APHandler (
550 IN UINTN CpuIndex,
551 IN BOOLEAN ValidSmi,
552 IN SMM_CPU_SYNC_MODE SyncMode
553 )
554 {
555 UINT64 Timer;
556 UINTN BspIndex;
557 MTRR_SETTINGS Mtrrs;
558
559 //
560 // Timeout BSP
561 //
562 for (Timer = StartSyncTimer ();
563 !IsSyncTimerTimeout (Timer) &&
564 !(*mSmmMpSyncData->InsideSmm);
565 ) {
566 CpuPause ();
567 }
568
569 if (!(*mSmmMpSyncData->InsideSmm)) {
570 //
571 // BSP timeout in the first round
572 //
573 if (mSmmMpSyncData->BspIndex != -1) {
574 //
575 // BSP Index is known
576 //
577 BspIndex = mSmmMpSyncData->BspIndex;
578 ASSERT (CpuIndex != BspIndex);
579
580 //
581 // Send SMI IPI to bring BSP in
582 //
583 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
584
585 //
586 // Now clock BSP for the 2nd time
587 //
588 for (Timer = StartSyncTimer ();
589 !IsSyncTimerTimeout (Timer) &&
590 !(*mSmmMpSyncData->InsideSmm);
591 ) {
592 CpuPause ();
593 }
594
595 if (!(*mSmmMpSyncData->InsideSmm)) {
596 //
597 // Give up since BSP is unable to enter SMM
598 // and signal the completion of this AP
599 WaitForSemaphore (mSmmMpSyncData->Counter);
600 return;
601 }
602 } else {
603 //
604 // Don't know BSP index. Give up without sending IPI to BSP.
605 //
606 WaitForSemaphore (mSmmMpSyncData->Counter);
607 return;
608 }
609 }
610
611 //
612 // BSP is available
613 //
614 BspIndex = mSmmMpSyncData->BspIndex;
615 ASSERT (CpuIndex != BspIndex);
616
617 //
618 // Mark this processor's presence
619 //
620 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
621
622 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
623 //
624 // Notify BSP of arrival at this point
625 //
626 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
627 }
628
629 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
630 //
631 // Wait for the signal from BSP to backup MTRRs
632 //
633 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
634
635 //
636 // Backup OS MTRRs
637 //
638 MtrrGetAllMtrrs(&Mtrrs);
639
640 //
641 // Signal BSP the completion of this AP
642 //
643 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
644
645 //
646 // Wait for BSP's signal to program MTRRs
647 //
648 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
649
650 //
651 // Replace OS MTRRs with SMI MTRRs
652 //
653 ReplaceOSMtrrs (CpuIndex);
654
655 //
656 // Signal BSP the completion of this AP
657 //
658 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
659 }
660
661 while (TRUE) {
662 //
663 // Wait for something to happen
664 //
665 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
666
667 //
668 // Check if BSP wants to exit SMM
669 //
670 if (!(*mSmmMpSyncData->InsideSmm)) {
671 break;
672 }
673
674 //
675 // BUSY should be acquired by SmmStartupThisAp()
676 //
677 ASSERT (
678 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
679 );
680
681 //
682 // Invoke the scheduled procedure
683 //
684 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
685 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
686 );
687
688 //
689 // Release BUSY
690 //
691 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
692 }
693
694 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
695 //
696 // Notify BSP the readiness of this AP to program MTRRs
697 //
698 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
699
700 //
701 // Wait for the signal from BSP to program MTRRs
702 //
703 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
704
705 //
706 // Restore OS MTRRs
707 //
708 SmmCpuFeaturesReenableSmrr ();
709 MtrrSetAllMtrrs(&Mtrrs);
710 }
711
712 //
713 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
714 //
715 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
716
717 //
718 // Wait for the signal from BSP to Reset states/semaphore for this processor
719 //
720 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
721
722 //
723 // Reset states/semaphore for this processor
724 //
725 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
726
727 //
728 // Notify BSP the readiness of this AP to exit SMM
729 //
730 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
731
732 }
733
734 /**
735 Create 4G PageTable in SMRAM.
736
737 @param ExtraPages Additional page numbers besides for 4G memory
738 @param Is32BitPageTable Whether the page table is 32-bit PAE
739 @return PageTable Address
740
741 **/
742 UINT32
743 Gen4GPageTable (
744 IN UINTN ExtraPages,
745 IN BOOLEAN Is32BitPageTable
746 )
747 {
748 VOID *PageTable;
749 UINTN Index;
750 UINT64 *Pte;
751 UINTN PagesNeeded;
752 UINTN Low2MBoundary;
753 UINTN High2MBoundary;
754 UINTN Pages;
755 UINTN GuardPage;
756 UINT64 *Pdpte;
757 UINTN PageIndex;
758 UINTN PageAddress;
759
760 Low2MBoundary = 0;
761 High2MBoundary = 0;
762 PagesNeeded = 0;
763 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
764 //
765 // Add one more page for known good stack, then find the lower 2MB aligned address.
766 //
767 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
768 //
769 // Add two more pages for known good stack and stack guard page,
770 // then find the lower 2MB aligned address.
771 //
772 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
773 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
774 }
775 //
776 // Allocate the page table
777 //
778 PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);
779 ASSERT (PageTable != NULL);
780
781 PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));
782 Pte = (UINT64*)PageTable;
783
784 //
785 // Zero out all page table entries first
786 //
787 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
788
789 //
790 // Set Page Directory Pointers
791 //
792 for (Index = 0; Index < 4; Index++) {
793 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
794 }
795 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
796
797 //
798 // Fill in Page Directory Entries
799 //
800 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
801 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
802 }
803
804 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
805 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
806 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
807 Pdpte = (UINT64*)PageTable;
808 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
809 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
810 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;
811 //
812 // Fill in Page Table Entries
813 //
814 Pte = (UINT64*)Pages;
815 PageAddress = PageIndex;
816 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
817 if (PageAddress == GuardPage) {
818 //
819 // Mark the guard page as non-present
820 //
821 Pte[Index] = PageAddress;
822 GuardPage += mSmmStackSize;
823 if (GuardPage > mSmmStackArrayEnd) {
824 GuardPage = 0;
825 }
826 } else {
827 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;
828 }
829 PageAddress+= EFI_PAGE_SIZE;
830 }
831 Pages += EFI_PAGE_SIZE;
832 }
833 }
834
835 return (UINT32)(UINTN)PageTable;
836 }
837
838 /**
839 Set memory cache ability.
840
841 @param PageTable PageTable Address
842 @param Address Memory Address to change cache ability
843 @param Cacheability Cache ability to set
844
845 **/
846 VOID
847 SetCacheability (
848 IN UINT64 *PageTable,
849 IN UINTN Address,
850 IN UINT8 Cacheability
851 )
852 {
853 UINTN PTIndex;
854 VOID *NewPageTableAddress;
855 UINT64 *NewPageTable;
856 UINTN Index;
857
858 ASSERT ((Address & EFI_PAGE_MASK) == 0);
859
860 if (sizeof (UINTN) == sizeof (UINT64)) {
861 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
862 ASSERT (PageTable[PTIndex] & IA32_PG_P);
863 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
864 }
865
866 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
867 ASSERT (PageTable[PTIndex] & IA32_PG_P);
868 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
869
870 //
871 // A perfect implementation should check the original cacheability with the
872 // one being set, and break a 2M page entry into pieces only when they
873 // disagreed.
874 //
875 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
876 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
877 //
878 // Allocate a page from SMRAM
879 //
880 NewPageTableAddress = AllocatePageTableMemory (1);
881 ASSERT (NewPageTableAddress != NULL);
882
883 NewPageTable = (UINT64 *)NewPageTableAddress;
884
885 for (Index = 0; Index < 0x200; Index++) {
886 NewPageTable[Index] = PageTable[PTIndex];
887 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
888 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
889 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
890 }
891 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
892 }
893
894 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
895 }
896
897 ASSERT (PageTable[PTIndex] & IA32_PG_P);
898 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
899
900 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
901 ASSERT (PageTable[PTIndex] & IA32_PG_P);
902 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
903 PageTable[PTIndex] |= (UINT64)Cacheability;
904 }
905
906
907 /**
908 Schedule a procedure to run on the specified CPU.
909
910 @param Procedure The address of the procedure to run
911 @param CpuIndex Target CPU Index
912 @param ProcArguments The parameter to pass to the procedure
913
914 @retval EFI_INVALID_PARAMETER CpuNumber not valid
915 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
916 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
917 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
918 @retval EFI_SUCCESS The procedure has been successfully scheduled
919
920 **/
921 EFI_STATUS
922 EFIAPI
923 SmmStartupThisAp (
924 IN EFI_AP_PROCEDURE Procedure,
925 IN UINTN CpuIndex,
926 IN OUT VOID *ProcArguments OPTIONAL
927 )
928 {
929 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||
930 CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
931 !(*(mSmmMpSyncData->CpuData[CpuIndex].Present)) ||
932 gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
933 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
934 return EFI_INVALID_PARAMETER;
935 }
936
937 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
938 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
939 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
940
941 if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {
942 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
943 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
944 }
945 return EFI_SUCCESS;
946 }
947
948 /**
949 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
950 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
951
952 NOTE: It might not be appreciated in runtime since it might
953 conflict with OS debugging facilities. Turn them off in RELEASE.
954
955 @param CpuIndex CPU Index
956
957 **/
958 VOID
959 EFIAPI
960 CpuSmmDebugEntry (
961 IN UINTN CpuIndex
962 )
963 {
964 SMRAM_SAVE_STATE_MAP *CpuSaveState;
965
966 if (FeaturePcdGet (PcdCpuSmmDebug)) {
967 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
968 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
969 AsmWriteDr6 (CpuSaveState->x86._DR6);
970 AsmWriteDr7 (CpuSaveState->x86._DR7);
971 } else {
972 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
973 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
974 }
975 }
976 }
977
978 /**
979 This function restores DR6 & DR7 to SMM save state.
980
981 NOTE: It might not be appreciated in runtime since it might
982 conflict with OS debugging facilities. Turn them off in RELEASE.
983
984 @param CpuIndex CPU Index
985
986 **/
987 VOID
988 EFIAPI
989 CpuSmmDebugExit (
990 IN UINTN CpuIndex
991 )
992 {
993 SMRAM_SAVE_STATE_MAP *CpuSaveState;
994
995 if (FeaturePcdGet (PcdCpuSmmDebug)) {
996 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
997 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
998 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
999 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1000 } else {
1001 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1002 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1003 }
1004 }
1005 }
1006
1007 /**
1008 C function for SMI entry, each processor comes here upon SMI trigger.
1009
1010 @param CpuIndex CPU Index
1011
1012 **/
1013 VOID
1014 EFIAPI
1015 SmiRendezvous (
1016 IN UINTN CpuIndex
1017 )
1018 {
1019 EFI_STATUS Status;
1020 BOOLEAN ValidSmi;
1021 BOOLEAN IsBsp;
1022 BOOLEAN BspInProgress;
1023 UINTN Index;
1024 UINTN Cr2;
1025 BOOLEAN XdDisableFlag;
1026 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
1027
1028 //
1029 // Save Cr2 because Page Fault exception in SMM may override its value
1030 //
1031 Cr2 = AsmReadCr2 ();
1032
1033 //
1034 // Perform CPU specific entry hooks
1035 //
1036 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1037
1038 //
1039 // Determine if this is a valid SMI
1040 //
1041 ValidSmi = PlatformValidSmi();
1042
1043 //
1044 // Determine if BSP has been already in progress. Note this must be checked after
1045 // ValidSmi because BSP may clear a valid SMI source after checking in.
1046 //
1047 BspInProgress = *mSmmMpSyncData->InsideSmm;
1048
1049 if (!BspInProgress && !ValidSmi) {
1050 //
1051 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1052 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1053 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1054 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1055 // is nothing we need to do.
1056 //
1057 goto Exit;
1058 } else {
1059 //
1060 // Signal presence of this processor
1061 //
1062 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1063 //
1064 // BSP has already ended the synchronization, so QUIT!!!
1065 //
1066
1067 //
1068 // Wait for BSP's signal to finish SMI
1069 //
1070 while (*mSmmMpSyncData->AllCpusInSync) {
1071 CpuPause ();
1072 }
1073 goto Exit;
1074 } else {
1075
1076 //
1077 // The BUSY lock is initialized to Released state.
1078 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1079 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1080 // after AP's present flag is detected.
1081 //
1082 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1083 }
1084
1085 //
1086 // Try to enable XD
1087 //
1088 XdDisableFlag = FALSE;
1089 if (mXdSupported) {
1090 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1091 if (MiscEnableMsr.Bits.XD == 1) {
1092 XdDisableFlag = TRUE;
1093 MiscEnableMsr.Bits.XD = 0;
1094 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
1095 }
1096 ActivateXd ();
1097 }
1098
1099 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1100 ActivateSmmProfile (CpuIndex);
1101 }
1102
1103 if (BspInProgress) {
1104 //
1105 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1106 // as BSP may have cleared the SMI status
1107 //
1108 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1109 } else {
1110 //
1111 // We have a valid SMI
1112 //
1113
1114 //
1115 // Elect BSP
1116 //
1117 IsBsp = FALSE;
1118 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1119 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1120 //
1121 // Call platform hook to do BSP election
1122 //
1123 Status = PlatformSmmBspElection (&IsBsp);
1124 if (EFI_SUCCESS == Status) {
1125 //
1126 // Platform hook determines successfully
1127 //
1128 if (IsBsp) {
1129 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1130 }
1131 } else {
1132 //
1133 // Platform hook fails to determine, use default BSP election method
1134 //
1135 InterlockedCompareExchange32 (
1136 (UINT32*)&mSmmMpSyncData->BspIndex,
1137 (UINT32)-1,
1138 (UINT32)CpuIndex
1139 );
1140 }
1141 }
1142 }
1143
1144 //
1145 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1146 //
1147 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1148
1149 //
1150 // Clear last request for SwitchBsp.
1151 //
1152 if (mSmmMpSyncData->SwitchBsp) {
1153 mSmmMpSyncData->SwitchBsp = FALSE;
1154 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1155 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1156 }
1157 }
1158
1159 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1160 SmmProfileRecordSmiNum ();
1161 }
1162
1163 //
1164 // BSP Handler is always called with a ValidSmi == TRUE
1165 //
1166 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1167 } else {
1168 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1169 }
1170 }
1171
1172 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1173
1174 //
1175 // Wait for BSP's signal to exit SMI
1176 //
1177 while (*mSmmMpSyncData->AllCpusInSync) {
1178 CpuPause ();
1179 }
1180
1181 //
1182 // Restore XD
1183 //
1184 if (XdDisableFlag) {
1185 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1186 MiscEnableMsr.Bits.XD = 1;
1187 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
1188 }
1189 }
1190
1191 Exit:
1192 SmmCpuFeaturesRendezvousExit (CpuIndex);
1193 //
1194 // Restore Cr2
1195 //
1196 AsmWriteCr2 (Cr2);
1197 }
1198
1199 /**
1200 Allocate buffer for all semaphores and spin locks.
1201
1202 **/
1203 VOID
1204 InitializeSmmCpuSemaphores (
1205 VOID
1206 )
1207 {
1208 UINTN ProcessorCount;
1209 UINTN TotalSize;
1210 UINTN GlobalSemaphoresSize;
1211 UINTN CpuSemaphoresSize;
1212 UINTN MsrSemahporeSize;
1213 UINTN SemaphoreSize;
1214 UINTN Pages;
1215 UINTN *SemaphoreBlock;
1216 UINTN SemaphoreAddr;
1217
1218 SemaphoreSize = GetSpinLockProperties ();
1219 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1220 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1221 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1222 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;
1223 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;
1224 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1225 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1226 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1227 SemaphoreBlock = AllocatePages (Pages);
1228 ASSERT (SemaphoreBlock != NULL);
1229 ZeroMem (SemaphoreBlock, TotalSize);
1230
1231 SemaphoreAddr = (UINTN)SemaphoreBlock;
1232 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1233 SemaphoreAddr += SemaphoreSize;
1234 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1235 SemaphoreAddr += SemaphoreSize;
1236 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1237 SemaphoreAddr += SemaphoreSize;
1238 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1239 SemaphoreAddr += SemaphoreSize;
1240 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1241 = (SPIN_LOCK *)SemaphoreAddr;
1242 SemaphoreAddr += SemaphoreSize;
1243 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
1244 = (SPIN_LOCK *)SemaphoreAddr;
1245
1246 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1247 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1248 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1249 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1250 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1251 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1252
1253 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;
1254 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;
1255 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =
1256 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
1257 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
1258
1259 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1260 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1261 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
1262
1263 mSemaphoreSize = SemaphoreSize;
1264 }
1265
1266 /**
1267 Initialize un-cacheable data.
1268
1269 **/
1270 VOID
1271 EFIAPI
1272 InitializeMpSyncData (
1273 VOID
1274 )
1275 {
1276 UINTN CpuIndex;
1277
1278 if (mSmmMpSyncData != NULL) {
1279 //
1280 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1281 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1282 //
1283 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1284 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1285 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1286 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1287 //
1288 // Enable BSP election by setting BspIndex to -1
1289 //
1290 mSmmMpSyncData->BspIndex = (UINT32)-1;
1291 }
1292 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
1293
1294 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1295 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1296 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1297 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1298 mSmmMpSyncData->AllCpusInSync != NULL);
1299 *mSmmMpSyncData->Counter = 0;
1300 *mSmmMpSyncData->InsideSmm = FALSE;
1301 *mSmmMpSyncData->AllCpusInSync = FALSE;
1302
1303 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1304 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1305 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1306 mSmmMpSyncData->CpuData[CpuIndex].Run =
1307 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1308 mSmmMpSyncData->CpuData[CpuIndex].Present =
1309 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1310 }
1311 }
1312 }
1313
1314 /**
1315 Initialize global data for MP synchronization.
1316
1317 @param Stacks Base address of SMI stack buffer for all processors.
1318 @param StackSize Stack size for each processor in SMM.
1319
1320 **/
1321 UINT32
1322 InitializeMpServiceData (
1323 IN VOID *Stacks,
1324 IN UINTN StackSize
1325 )
1326 {
1327 UINT32 Cr3;
1328 UINTN Index;
1329 MTRR_SETTINGS *Mtrr;
1330 PROCESSOR_SMM_DESCRIPTOR *Psd;
1331 UINT8 *GdtTssTables;
1332 UINTN GdtTableStepSize;
1333
1334 //
1335 // Allocate memory for all locks and semaphores
1336 //
1337 InitializeSmmCpuSemaphores ();
1338
1339 //
1340 // Initialize mSmmMpSyncData
1341 //
1342 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1343 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1344 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1345 ASSERT (mSmmMpSyncData != NULL);
1346 InitializeMpSyncData ();
1347
1348 //
1349 // Initialize physical address mask
1350 // NOTE: Physical memory above virtual address limit is not supported !!!
1351 //
1352 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1353 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1354 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1355
1356 //
1357 // Create page tables
1358 //
1359 Cr3 = SmmInitPageTable ();
1360
1361 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1362
1363 //
1364 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1365 //
1366 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1367 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
1368 CopyMem (Psd, &gcPsd, sizeof (gcPsd));
1369 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
1370 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
1371
1372 //
1373 // Install SMI handler
1374 //
1375 InstallSmiHandler (
1376 Index,
1377 (UINT32)mCpuHotPlugData.SmBase[Index],
1378 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1379 StackSize,
1380 (UINTN)Psd->SmmGdtPtr,
1381 Psd->SmmGdtSize,
1382 gcSmiIdtr.Base,
1383 gcSmiIdtr.Limit + 1,
1384 Cr3
1385 );
1386 }
1387
1388 //
1389 // Record current MTRR settings
1390 //
1391 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));
1392 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;
1393 MtrrGetAllMtrrs (Mtrr);
1394
1395 return Cr3;
1396 }
1397
1398 /**
1399
1400 Register the SMM Foundation entry point.
1401
1402 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1403 @param SmmEntryPoint SMM Foundation EntryPoint
1404
1405 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1406
1407 **/
1408 EFI_STATUS
1409 EFIAPI
1410 RegisterSmmEntry (
1411 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1412 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1413 )
1414 {
1415 //
1416 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1417 //
1418 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1419 return EFI_SUCCESS;
1420 }