]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Check ProcessorId == INVALID_APIC_ID
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
21 //
22 MTRR_SETTINGS gSmiMtrrs;
23 UINT64 gPhyMask;
24 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
25 UINTN mSmmMpSyncDataSize;
26 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
27 UINTN mSemaphoreSize;
28 SPIN_LOCK *mPFLock = NULL;
29 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
30
31 /**
32 Performs an atomic compare exchange operation to get semaphore.
33 The compare exchange operation must be performed using
34 MP safe mechanisms.
35
36 @param Sem IN: 32-bit unsigned integer
37 OUT: original integer - 1
38 @return Original integer - 1
39
40 **/
41 UINT32
42 WaitForSemaphore (
43 IN OUT volatile UINT32 *Sem
44 )
45 {
46 UINT32 Value;
47
48 do {
49 Value = *Sem;
50 } while (Value == 0 ||
51 InterlockedCompareExchange32 (
52 (UINT32*)Sem,
53 Value,
54 Value - 1
55 ) != Value);
56 return Value - 1;
57 }
58
59
60 /**
61 Performs an atomic compare exchange operation to release semaphore.
62 The compare exchange operation must be performed using
63 MP safe mechanisms.
64
65 @param Sem IN: 32-bit unsigned integer
66 OUT: original integer + 1
67 @return Original integer + 1
68
69 **/
70 UINT32
71 ReleaseSemaphore (
72 IN OUT volatile UINT32 *Sem
73 )
74 {
75 UINT32 Value;
76
77 do {
78 Value = *Sem;
79 } while (Value + 1 != 0 &&
80 InterlockedCompareExchange32 (
81 (UINT32*)Sem,
82 Value,
83 Value + 1
84 ) != Value);
85 return Value + 1;
86 }
87
88 /**
89 Performs an atomic compare exchange operation to lock semaphore.
90 The compare exchange operation must be performed using
91 MP safe mechanisms.
92
93 @param Sem IN: 32-bit unsigned integer
94 OUT: -1
95 @return Original integer
96
97 **/
98 UINT32
99 LockdownSemaphore (
100 IN OUT volatile UINT32 *Sem
101 )
102 {
103 UINT32 Value;
104
105 do {
106 Value = *Sem;
107 } while (InterlockedCompareExchange32 (
108 (UINT32*)Sem,
109 Value, (UINT32)-1
110 ) != Value);
111 return Value;
112 }
113
114 /**
115 Wait all APs to performs an atomic compare exchange operation to release semaphore.
116
117 @param NumberOfAPs AP number
118
119 **/
120 VOID
121 WaitForAllAPs (
122 IN UINTN NumberOfAPs
123 )
124 {
125 UINTN BspIndex;
126
127 BspIndex = mSmmMpSyncData->BspIndex;
128 while (NumberOfAPs-- > 0) {
129 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
130 }
131 }
132
133 /**
134 Performs an atomic compare exchange operation to release semaphore
135 for each AP.
136
137 **/
138 VOID
139 ReleaseAllAPs (
140 VOID
141 )
142 {
143 UINTN Index;
144 UINTN BspIndex;
145
146 BspIndex = mSmmMpSyncData->BspIndex;
147 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
148 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
149 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
150 }
151 }
152 }
153
154 /**
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
156
157 @param Exceptions CPU Arrival exception flags.
158
159 @retval TRUE if all CPUs the have checked in.
160 @retval FALSE if at least one Normal AP hasn't checked in.
161
162 **/
163 BOOLEAN
164 AllCpusInSmmWithExceptions (
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
166 )
167 {
168 UINTN Index;
169 SMM_CPU_DATA_BLOCK *CpuData;
170 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
171
172 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
173
174 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
175 return TRUE;
176 }
177
178 CpuData = mSmmMpSyncData->CpuData;
179 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
180 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
181 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
182 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
183 continue;
184 }
185 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
186 continue;
187 }
188 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
189 continue;
190 }
191 return FALSE;
192 }
193 }
194
195
196 return TRUE;
197 }
198
199
200 /**
201 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
202 entering SMM, except SMI disabled APs.
203
204 **/
205 VOID
206 SmmWaitForApArrival (
207 VOID
208 )
209 {
210 UINT64 Timer;
211 UINTN Index;
212
213 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
214
215 //
216 // Platform implementor should choose a timeout value appropriately:
217 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
218 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
219 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
220 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
221 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
222 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
223 // - The timeout value must be longer than longest possible IO operation in the system
224 //
225
226 //
227 // Sync with APs 1st timeout
228 //
229 for (Timer = StartSyncTimer ();
230 !IsSyncTimerTimeout (Timer) &&
231 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
232 ) {
233 CpuPause ();
234 }
235
236 //
237 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
238 // because:
239 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
240 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
241 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
242 // work while SMI handling is on-going.
243 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
244 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
245 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
246 // mode work while SMI handling is on-going.
247 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
248 // - In traditional flow, SMI disabling is discouraged.
249 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
250 // In both cases, adding SMI-disabling checking code increases overhead.
251 //
252 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
253 //
254 // Send SMI IPIs to bring outside processors in
255 //
256 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
257 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
258 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
259 }
260 }
261
262 //
263 // Sync with APs 2nd timeout.
264 //
265 for (Timer = StartSyncTimer ();
266 !IsSyncTimerTimeout (Timer) &&
267 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
268 ) {
269 CpuPause ();
270 }
271 }
272
273 return;
274 }
275
276
277 /**
278 Replace OS MTRR's with SMI MTRR's.
279
280 @param CpuIndex Processor Index
281
282 **/
283 VOID
284 ReplaceOSMtrrs (
285 IN UINTN CpuIndex
286 )
287 {
288 SmmCpuFeaturesDisableSmrr ();
289
290 //
291 // Replace all MTRRs registers
292 //
293 MtrrSetAllMtrrs (&gSmiMtrrs);
294 }
295
296 /**
297 SMI handler for BSP.
298
299 @param CpuIndex BSP processor Index
300 @param SyncMode SMM MP sync mode
301
302 **/
303 VOID
304 BSPHandler (
305 IN UINTN CpuIndex,
306 IN SMM_CPU_SYNC_MODE SyncMode
307 )
308 {
309 UINTN Index;
310 MTRR_SETTINGS Mtrrs;
311 UINTN ApCount;
312 BOOLEAN ClearTopLevelSmiResult;
313 UINTN PresentCount;
314
315 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
316 ApCount = 0;
317
318 //
319 // Flag BSP's presence
320 //
321 *mSmmMpSyncData->InsideSmm = TRUE;
322
323 //
324 // Initialize Debug Agent to start source level debug in BSP handler
325 //
326 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
327
328 //
329 // Mark this processor's presence
330 //
331 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
332
333 //
334 // Clear platform top level SMI status bit before calling SMI handlers. If
335 // we cleared it after SMI handlers are run, we would miss the SMI that
336 // occurs after SMI handlers are done and before SMI status bit is cleared.
337 //
338 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
339 ASSERT (ClearTopLevelSmiResult == TRUE);
340
341 //
342 // Set running processor index
343 //
344 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
345
346 //
347 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
348 //
349 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
350
351 //
352 // Wait for APs to arrive
353 //
354 SmmWaitForApArrival();
355
356 //
357 // Lock the counter down and retrieve the number of APs
358 //
359 *mSmmMpSyncData->AllCpusInSync = TRUE;
360 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
361
362 //
363 // Wait for all APs to get ready for programming MTRRs
364 //
365 WaitForAllAPs (ApCount);
366
367 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
368 //
369 // Signal all APs it's time for backup MTRRs
370 //
371 ReleaseAllAPs ();
372
373 //
374 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
375 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
376 // to a large enough value to avoid this situation.
377 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
378 // We do the backup first and then set MTRR to avoid race condition for threads
379 // in the same core.
380 //
381 MtrrGetAllMtrrs(&Mtrrs);
382
383 //
384 // Wait for all APs to complete their MTRR saving
385 //
386 WaitForAllAPs (ApCount);
387
388 //
389 // Let all processors program SMM MTRRs together
390 //
391 ReleaseAllAPs ();
392
393 //
394 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
395 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
396 // to a large enough value to avoid this situation.
397 //
398 ReplaceOSMtrrs (CpuIndex);
399
400 //
401 // Wait for all APs to complete their MTRR programming
402 //
403 WaitForAllAPs (ApCount);
404 }
405 }
406
407 //
408 // The BUSY lock is initialized to Acquired state
409 //
410 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
411
412 //
413 // Perform the pre tasks
414 //
415 PerformPreTasks ();
416
417 //
418 // Invoke SMM Foundation EntryPoint with the processor information context.
419 //
420 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
421
422 //
423 // Make sure all APs have completed their pending none-block tasks
424 //
425 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
426 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
427 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
428 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
429 }
430 }
431
432 //
433 // Perform the remaining tasks
434 //
435 PerformRemainingTasks ();
436
437 //
438 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
439 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
440 // will run through freely.
441 //
442 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
443
444 //
445 // Lock the counter down and retrieve the number of APs
446 //
447 *mSmmMpSyncData->AllCpusInSync = TRUE;
448 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
449 //
450 // Make sure all APs have their Present flag set
451 //
452 while (TRUE) {
453 PresentCount = 0;
454 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
455 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
456 PresentCount ++;
457 }
458 }
459 if (PresentCount > ApCount) {
460 break;
461 }
462 }
463 }
464
465 //
466 // Notify all APs to exit
467 //
468 *mSmmMpSyncData->InsideSmm = FALSE;
469 ReleaseAllAPs ();
470
471 //
472 // Wait for all APs to complete their pending tasks
473 //
474 WaitForAllAPs (ApCount);
475
476 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
477 //
478 // Signal APs to restore MTRRs
479 //
480 ReleaseAllAPs ();
481
482 //
483 // Restore OS MTRRs
484 //
485 SmmCpuFeaturesReenableSmrr ();
486 MtrrSetAllMtrrs(&Mtrrs);
487
488 //
489 // Wait for all APs to complete MTRR programming
490 //
491 WaitForAllAPs (ApCount);
492 }
493
494 //
495 // Stop source level debug in BSP handler, the code below will not be
496 // debugged.
497 //
498 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
499
500 //
501 // Signal APs to Reset states/semaphore for this processor
502 //
503 ReleaseAllAPs ();
504
505 //
506 // Perform pending operations for hot-plug
507 //
508 SmmCpuUpdate ();
509
510 //
511 // Clear the Present flag of BSP
512 //
513 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
514
515 //
516 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
517 // WaitForAllAps does not depend on the Present flag.
518 //
519 WaitForAllAPs (ApCount);
520
521 //
522 // Reset BspIndex to -1, meaning BSP has not been elected.
523 //
524 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
525 mSmmMpSyncData->BspIndex = (UINT32)-1;
526 }
527
528 //
529 // Allow APs to check in from this point on
530 //
531 *mSmmMpSyncData->Counter = 0;
532 *mSmmMpSyncData->AllCpusInSync = FALSE;
533 }
534
535 /**
536 SMI handler for AP.
537
538 @param CpuIndex AP processor Index.
539 @param ValidSmi Indicates that current SMI is a valid SMI or not.
540 @param SyncMode SMM MP sync mode.
541
542 **/
543 VOID
544 APHandler (
545 IN UINTN CpuIndex,
546 IN BOOLEAN ValidSmi,
547 IN SMM_CPU_SYNC_MODE SyncMode
548 )
549 {
550 UINT64 Timer;
551 UINTN BspIndex;
552 MTRR_SETTINGS Mtrrs;
553
554 //
555 // Timeout BSP
556 //
557 for (Timer = StartSyncTimer ();
558 !IsSyncTimerTimeout (Timer) &&
559 !(*mSmmMpSyncData->InsideSmm);
560 ) {
561 CpuPause ();
562 }
563
564 if (!(*mSmmMpSyncData->InsideSmm)) {
565 //
566 // BSP timeout in the first round
567 //
568 if (mSmmMpSyncData->BspIndex != -1) {
569 //
570 // BSP Index is known
571 //
572 BspIndex = mSmmMpSyncData->BspIndex;
573 ASSERT (CpuIndex != BspIndex);
574
575 //
576 // Send SMI IPI to bring BSP in
577 //
578 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
579
580 //
581 // Now clock BSP for the 2nd time
582 //
583 for (Timer = StartSyncTimer ();
584 !IsSyncTimerTimeout (Timer) &&
585 !(*mSmmMpSyncData->InsideSmm);
586 ) {
587 CpuPause ();
588 }
589
590 if (!(*mSmmMpSyncData->InsideSmm)) {
591 //
592 // Give up since BSP is unable to enter SMM
593 // and signal the completion of this AP
594 WaitForSemaphore (mSmmMpSyncData->Counter);
595 return;
596 }
597 } else {
598 //
599 // Don't know BSP index. Give up without sending IPI to BSP.
600 //
601 WaitForSemaphore (mSmmMpSyncData->Counter);
602 return;
603 }
604 }
605
606 //
607 // BSP is available
608 //
609 BspIndex = mSmmMpSyncData->BspIndex;
610 ASSERT (CpuIndex != BspIndex);
611
612 //
613 // Mark this processor's presence
614 //
615 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
616
617 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
618 //
619 // Notify BSP of arrival at this point
620 //
621 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
622 }
623
624 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
625 //
626 // Wait for the signal from BSP to backup MTRRs
627 //
628 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
629
630 //
631 // Backup OS MTRRs
632 //
633 MtrrGetAllMtrrs(&Mtrrs);
634
635 //
636 // Signal BSP the completion of this AP
637 //
638 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
639
640 //
641 // Wait for BSP's signal to program MTRRs
642 //
643 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
644
645 //
646 // Replace OS MTRRs with SMI MTRRs
647 //
648 ReplaceOSMtrrs (CpuIndex);
649
650 //
651 // Signal BSP the completion of this AP
652 //
653 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
654 }
655
656 while (TRUE) {
657 //
658 // Wait for something to happen
659 //
660 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
661
662 //
663 // Check if BSP wants to exit SMM
664 //
665 if (!(*mSmmMpSyncData->InsideSmm)) {
666 break;
667 }
668
669 //
670 // BUSY should be acquired by SmmStartupThisAp()
671 //
672 ASSERT (
673 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
674 );
675
676 //
677 // Invoke the scheduled procedure
678 //
679 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
680 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
681 );
682
683 //
684 // Release BUSY
685 //
686 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
687 }
688
689 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
690 //
691 // Notify BSP the readiness of this AP to program MTRRs
692 //
693 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
694
695 //
696 // Wait for the signal from BSP to program MTRRs
697 //
698 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
699
700 //
701 // Restore OS MTRRs
702 //
703 SmmCpuFeaturesReenableSmrr ();
704 MtrrSetAllMtrrs(&Mtrrs);
705 }
706
707 //
708 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
709 //
710 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
711
712 //
713 // Wait for the signal from BSP to Reset states/semaphore for this processor
714 //
715 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
716
717 //
718 // Reset states/semaphore for this processor
719 //
720 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
721
722 //
723 // Notify BSP the readiness of this AP to exit SMM
724 //
725 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
726
727 }
728
729 /**
730 Create 4G PageTable in SMRAM.
731
732 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
733 @return PageTable Address
734
735 **/
736 UINT32
737 Gen4GPageTable (
738 IN BOOLEAN Is32BitPageTable
739 )
740 {
741 VOID *PageTable;
742 UINTN Index;
743 UINT64 *Pte;
744 UINTN PagesNeeded;
745 UINTN Low2MBoundary;
746 UINTN High2MBoundary;
747 UINTN Pages;
748 UINTN GuardPage;
749 UINT64 *Pdpte;
750 UINTN PageIndex;
751 UINTN PageAddress;
752
753 Low2MBoundary = 0;
754 High2MBoundary = 0;
755 PagesNeeded = 0;
756 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
757 //
758 // Add one more page for known good stack, then find the lower 2MB aligned address.
759 //
760 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
761 //
762 // Add two more pages for known good stack and stack guard page,
763 // then find the lower 2MB aligned address.
764 //
765 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
766 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
767 }
768 //
769 // Allocate the page table
770 //
771 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
772 ASSERT (PageTable != NULL);
773
774 PageTable = (VOID *)((UINTN)PageTable);
775 Pte = (UINT64*)PageTable;
776
777 //
778 // Zero out all page table entries first
779 //
780 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
781
782 //
783 // Set Page Directory Pointers
784 //
785 for (Index = 0; Index < 4; Index++) {
786 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
787 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
788 }
789 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
790
791 //
792 // Fill in Page Directory Entries
793 //
794 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
795 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
796 }
797
798 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
799 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
800 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
801 Pdpte = (UINT64*)PageTable;
802 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
803 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
804 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
805 //
806 // Fill in Page Table Entries
807 //
808 Pte = (UINT64*)Pages;
809 PageAddress = PageIndex;
810 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
811 if (PageAddress == GuardPage) {
812 //
813 // Mark the guard page as non-present
814 //
815 Pte[Index] = PageAddress | mAddressEncMask;
816 GuardPage += mSmmStackSize;
817 if (GuardPage > mSmmStackArrayEnd) {
818 GuardPage = 0;
819 }
820 } else {
821 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
822 }
823 PageAddress+= EFI_PAGE_SIZE;
824 }
825 Pages += EFI_PAGE_SIZE;
826 }
827 }
828
829 return (UINT32)(UINTN)PageTable;
830 }
831
832 /**
833 Schedule a procedure to run on the specified CPU.
834
835 @param[in] Procedure The address of the procedure to run
836 @param[in] CpuIndex Target CPU Index
837 @param[in, out] ProcArguments The parameter to pass to the procedure
838 @param[in] BlockingMode Startup AP in blocking mode or not
839
840 @retval EFI_INVALID_PARAMETER CpuNumber not valid
841 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
842 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
843 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
844 @retval EFI_SUCCESS The procedure has been successfully scheduled
845
846 **/
847 EFI_STATUS
848 InternalSmmStartupThisAp (
849 IN EFI_AP_PROCEDURE Procedure,
850 IN UINTN CpuIndex,
851 IN OUT VOID *ProcArguments OPTIONAL,
852 IN BOOLEAN BlockingMode
853 )
854 {
855 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
856 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
857 return EFI_INVALID_PARAMETER;
858 }
859 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
860 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
861 return EFI_INVALID_PARAMETER;
862 }
863 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
864 return EFI_INVALID_PARAMETER;
865 }
866 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
867 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
868 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
869 }
870 return EFI_INVALID_PARAMETER;
871 }
872 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
873 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
874 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
875 }
876 return EFI_INVALID_PARAMETER;
877 }
878
879 if (BlockingMode) {
880 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
881 } else {
882 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
883 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
884 return EFI_INVALID_PARAMETER;
885 }
886 }
887
888 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
889 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
890 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
891
892 if (BlockingMode) {
893 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
894 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
895 }
896 return EFI_SUCCESS;
897 }
898
899 /**
900 Schedule a procedure to run on the specified CPU in blocking mode.
901
902 @param[in] Procedure The address of the procedure to run
903 @param[in] CpuIndex Target CPU Index
904 @param[in, out] ProcArguments The parameter to pass to the procedure
905
906 @retval EFI_INVALID_PARAMETER CpuNumber not valid
907 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
908 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
909 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
910 @retval EFI_SUCCESS The procedure has been successfully scheduled
911
912 **/
913 EFI_STATUS
914 EFIAPI
915 SmmBlockingStartupThisAp (
916 IN EFI_AP_PROCEDURE Procedure,
917 IN UINTN CpuIndex,
918 IN OUT VOID *ProcArguments OPTIONAL
919 )
920 {
921 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
922 }
923
924 /**
925 Schedule a procedure to run on the specified CPU.
926
927 @param Procedure The address of the procedure to run
928 @param CpuIndex Target CPU Index
929 @param ProcArguments The parameter to pass to the procedure
930
931 @retval EFI_INVALID_PARAMETER CpuNumber not valid
932 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
933 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
934 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
935 @retval EFI_SUCCESS The procedure has been successfully scheduled
936
937 **/
938 EFI_STATUS
939 EFIAPI
940 SmmStartupThisAp (
941 IN EFI_AP_PROCEDURE Procedure,
942 IN UINTN CpuIndex,
943 IN OUT VOID *ProcArguments OPTIONAL
944 )
945 {
946 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
947 }
948
949 /**
950 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
951 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
952
953 NOTE: It might not be appreciated in runtime since it might
954 conflict with OS debugging facilities. Turn them off in RELEASE.
955
956 @param CpuIndex CPU Index
957
958 **/
959 VOID
960 EFIAPI
961 CpuSmmDebugEntry (
962 IN UINTN CpuIndex
963 )
964 {
965 SMRAM_SAVE_STATE_MAP *CpuSaveState;
966
967 if (FeaturePcdGet (PcdCpuSmmDebug)) {
968 ASSERT(CpuIndex < mMaxNumberOfCpus);
969 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
970 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
971 AsmWriteDr6 (CpuSaveState->x86._DR6);
972 AsmWriteDr7 (CpuSaveState->x86._DR7);
973 } else {
974 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
975 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
976 }
977 }
978 }
979
980 /**
981 This function restores DR6 & DR7 to SMM save state.
982
983 NOTE: It might not be appreciated in runtime since it might
984 conflict with OS debugging facilities. Turn them off in RELEASE.
985
986 @param CpuIndex CPU Index
987
988 **/
989 VOID
990 EFIAPI
991 CpuSmmDebugExit (
992 IN UINTN CpuIndex
993 )
994 {
995 SMRAM_SAVE_STATE_MAP *CpuSaveState;
996
997 if (FeaturePcdGet (PcdCpuSmmDebug)) {
998 ASSERT(CpuIndex < mMaxNumberOfCpus);
999 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1000 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1001 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1002 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1003 } else {
1004 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1005 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1006 }
1007 }
1008 }
1009
1010 /**
1011 C function for SMI entry, each processor comes here upon SMI trigger.
1012
1013 @param CpuIndex CPU Index
1014
1015 **/
1016 VOID
1017 EFIAPI
1018 SmiRendezvous (
1019 IN UINTN CpuIndex
1020 )
1021 {
1022 EFI_STATUS Status;
1023 BOOLEAN ValidSmi;
1024 BOOLEAN IsBsp;
1025 BOOLEAN BspInProgress;
1026 UINTN Index;
1027 UINTN Cr2;
1028
1029 ASSERT(CpuIndex < mMaxNumberOfCpus);
1030
1031 //
1032 // Save Cr2 because Page Fault exception in SMM may override its value
1033 //
1034 Cr2 = AsmReadCr2 ();
1035
1036 //
1037 // Perform CPU specific entry hooks
1038 //
1039 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1040
1041 //
1042 // Determine if this is a valid SMI
1043 //
1044 ValidSmi = PlatformValidSmi();
1045
1046 //
1047 // Determine if BSP has been already in progress. Note this must be checked after
1048 // ValidSmi because BSP may clear a valid SMI source after checking in.
1049 //
1050 BspInProgress = *mSmmMpSyncData->InsideSmm;
1051
1052 if (!BspInProgress && !ValidSmi) {
1053 //
1054 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1055 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1056 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1057 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1058 // is nothing we need to do.
1059 //
1060 goto Exit;
1061 } else {
1062 //
1063 // Signal presence of this processor
1064 //
1065 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1066 //
1067 // BSP has already ended the synchronization, so QUIT!!!
1068 //
1069
1070 //
1071 // Wait for BSP's signal to finish SMI
1072 //
1073 while (*mSmmMpSyncData->AllCpusInSync) {
1074 CpuPause ();
1075 }
1076 goto Exit;
1077 } else {
1078
1079 //
1080 // The BUSY lock is initialized to Released state.
1081 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1082 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1083 // after AP's present flag is detected.
1084 //
1085 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1086 }
1087
1088 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1089 ActivateSmmProfile (CpuIndex);
1090 }
1091
1092 if (BspInProgress) {
1093 //
1094 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1095 // as BSP may have cleared the SMI status
1096 //
1097 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1098 } else {
1099 //
1100 // We have a valid SMI
1101 //
1102
1103 //
1104 // Elect BSP
1105 //
1106 IsBsp = FALSE;
1107 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1108 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1109 //
1110 // Call platform hook to do BSP election
1111 //
1112 Status = PlatformSmmBspElection (&IsBsp);
1113 if (EFI_SUCCESS == Status) {
1114 //
1115 // Platform hook determines successfully
1116 //
1117 if (IsBsp) {
1118 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1119 }
1120 } else {
1121 //
1122 // Platform hook fails to determine, use default BSP election method
1123 //
1124 InterlockedCompareExchange32 (
1125 (UINT32*)&mSmmMpSyncData->BspIndex,
1126 (UINT32)-1,
1127 (UINT32)CpuIndex
1128 );
1129 }
1130 }
1131 }
1132
1133 //
1134 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1135 //
1136 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1137
1138 //
1139 // Clear last request for SwitchBsp.
1140 //
1141 if (mSmmMpSyncData->SwitchBsp) {
1142 mSmmMpSyncData->SwitchBsp = FALSE;
1143 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1144 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1145 }
1146 }
1147
1148 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1149 SmmProfileRecordSmiNum ();
1150 }
1151
1152 //
1153 // BSP Handler is always called with a ValidSmi == TRUE
1154 //
1155 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1156 } else {
1157 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1158 }
1159 }
1160
1161 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1162
1163 //
1164 // Wait for BSP's signal to exit SMI
1165 //
1166 while (*mSmmMpSyncData->AllCpusInSync) {
1167 CpuPause ();
1168 }
1169 }
1170
1171 Exit:
1172 SmmCpuFeaturesRendezvousExit (CpuIndex);
1173 //
1174 // Restore Cr2
1175 //
1176 AsmWriteCr2 (Cr2);
1177 }
1178
1179 /**
1180 Allocate buffer for all semaphores and spin locks.
1181
1182 **/
1183 VOID
1184 InitializeSmmCpuSemaphores (
1185 VOID
1186 )
1187 {
1188 UINTN ProcessorCount;
1189 UINTN TotalSize;
1190 UINTN GlobalSemaphoresSize;
1191 UINTN CpuSemaphoresSize;
1192 UINTN MsrSemahporeSize;
1193 UINTN SemaphoreSize;
1194 UINTN Pages;
1195 UINTN *SemaphoreBlock;
1196 UINTN SemaphoreAddr;
1197
1198 SemaphoreSize = GetSpinLockProperties ();
1199 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1200 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1201 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1202 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;
1203 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;
1204 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1205 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1206 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1207 SemaphoreBlock = AllocatePages (Pages);
1208 ASSERT (SemaphoreBlock != NULL);
1209 ZeroMem (SemaphoreBlock, TotalSize);
1210
1211 SemaphoreAddr = (UINTN)SemaphoreBlock;
1212 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1213 SemaphoreAddr += SemaphoreSize;
1214 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1215 SemaphoreAddr += SemaphoreSize;
1216 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1217 SemaphoreAddr += SemaphoreSize;
1218 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1219 SemaphoreAddr += SemaphoreSize;
1220 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1221 = (SPIN_LOCK *)SemaphoreAddr;
1222 SemaphoreAddr += SemaphoreSize;
1223 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
1224 = (SPIN_LOCK *)SemaphoreAddr;
1225
1226 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1227 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1228 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1229 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1230 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1231 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1232
1233 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;
1234 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;
1235 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =
1236 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
1237 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
1238
1239 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1240 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1241 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
1242
1243 mSemaphoreSize = SemaphoreSize;
1244 }
1245
1246 /**
1247 Initialize un-cacheable data.
1248
1249 **/
1250 VOID
1251 EFIAPI
1252 InitializeMpSyncData (
1253 VOID
1254 )
1255 {
1256 UINTN CpuIndex;
1257
1258 if (mSmmMpSyncData != NULL) {
1259 //
1260 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1261 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1262 //
1263 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1264 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1265 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1266 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1267 //
1268 // Enable BSP election by setting BspIndex to -1
1269 //
1270 mSmmMpSyncData->BspIndex = (UINT32)-1;
1271 }
1272 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1273
1274 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1275 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1276 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1277 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1278 mSmmMpSyncData->AllCpusInSync != NULL);
1279 *mSmmMpSyncData->Counter = 0;
1280 *mSmmMpSyncData->InsideSmm = FALSE;
1281 *mSmmMpSyncData->AllCpusInSync = FALSE;
1282
1283 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1284 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1285 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1286 mSmmMpSyncData->CpuData[CpuIndex].Run =
1287 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1288 mSmmMpSyncData->CpuData[CpuIndex].Present =
1289 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1290 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1291 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1292 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1293 }
1294 }
1295 }
1296
1297 /**
1298 Initialize global data for MP synchronization.
1299
1300 @param Stacks Base address of SMI stack buffer for all processors.
1301 @param StackSize Stack size for each processor in SMM.
1302
1303 **/
1304 UINT32
1305 InitializeMpServiceData (
1306 IN VOID *Stacks,
1307 IN UINTN StackSize
1308 )
1309 {
1310 UINT32 Cr3;
1311 UINTN Index;
1312 UINT8 *GdtTssTables;
1313 UINTN GdtTableStepSize;
1314
1315 //
1316 // Allocate memory for all locks and semaphores
1317 //
1318 InitializeSmmCpuSemaphores ();
1319
1320 //
1321 // Initialize mSmmMpSyncData
1322 //
1323 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1324 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1325 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1326 ASSERT (mSmmMpSyncData != NULL);
1327 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1328 InitializeMpSyncData ();
1329
1330 //
1331 // Initialize physical address mask
1332 // NOTE: Physical memory above virtual address limit is not supported !!!
1333 //
1334 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1335 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1336 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1337
1338 //
1339 // Create page tables
1340 //
1341 Cr3 = SmmInitPageTable ();
1342
1343 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1344
1345 //
1346 // Install SMI handler for each CPU
1347 //
1348 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1349 InstallSmiHandler (
1350 Index,
1351 (UINT32)mCpuHotPlugData.SmBase[Index],
1352 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1353 StackSize,
1354 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1355 gcSmiGdtr.Limit + 1,
1356 gcSmiIdtr.Base,
1357 gcSmiIdtr.Limit + 1,
1358 Cr3
1359 );
1360 }
1361
1362 //
1363 // Record current MTRR settings
1364 //
1365 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1366 MtrrGetAllMtrrs (&gSmiMtrrs);
1367
1368 return Cr3;
1369 }
1370
1371 /**
1372
1373 Register the SMM Foundation entry point.
1374
1375 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1376 @param SmmEntryPoint SMM Foundation EntryPoint
1377
1378 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1379
1380 **/
1381 EFI_STATUS
1382 EFIAPI
1383 RegisterSmmEntry (
1384 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1385 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1386 )
1387 {
1388 //
1389 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1390 //
1391 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1392 return EFI_SUCCESS;
1393 }