]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
c7aa48b04e77ddc6b0fbd38b2b96c4337d4f43a2
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
19 //
20 MTRR_SETTINGS gSmiMtrrs;
21 UINT64 gPhyMask;
22 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
23 UINTN mSmmMpSyncDataSize;
24 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
25 UINTN mSemaphoreSize;
26 SPIN_LOCK *mPFLock = NULL;
27 SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
28
29 /**
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
32 MP safe mechanisms.
33
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
37
38 **/
39 UINT32
40 WaitForSemaphore (
41 IN OUT volatile UINT32 *Sem
42 )
43 {
44 UINT32 Value;
45
46 do {
47 Value = *Sem;
48 } while (Value == 0 ||
49 InterlockedCompareExchange32 (
50 (UINT32*)Sem,
51 Value,
52 Value - 1
53 ) != Value);
54 return Value - 1;
55 }
56
57
58 /**
59 Performs an atomic compare exchange operation to release semaphore.
60 The compare exchange operation must be performed using
61 MP safe mechanisms.
62
63 @param Sem IN: 32-bit unsigned integer
64 OUT: original integer + 1
65 @return Original integer + 1
66
67 **/
68 UINT32
69 ReleaseSemaphore (
70 IN OUT volatile UINT32 *Sem
71 )
72 {
73 UINT32 Value;
74
75 do {
76 Value = *Sem;
77 } while (Value + 1 != 0 &&
78 InterlockedCompareExchange32 (
79 (UINT32*)Sem,
80 Value,
81 Value + 1
82 ) != Value);
83 return Value + 1;
84 }
85
86 /**
87 Performs an atomic compare exchange operation to lock semaphore.
88 The compare exchange operation must be performed using
89 MP safe mechanisms.
90
91 @param Sem IN: 32-bit unsigned integer
92 OUT: -1
93 @return Original integer
94
95 **/
96 UINT32
97 LockdownSemaphore (
98 IN OUT volatile UINT32 *Sem
99 )
100 {
101 UINT32 Value;
102
103 do {
104 Value = *Sem;
105 } while (InterlockedCompareExchange32 (
106 (UINT32*)Sem,
107 Value, (UINT32)-1
108 ) != Value);
109 return Value;
110 }
111
112 /**
113 Wait all APs to performs an atomic compare exchange operation to release semaphore.
114
115 @param NumberOfAPs AP number
116
117 **/
118 VOID
119 WaitForAllAPs (
120 IN UINTN NumberOfAPs
121 )
122 {
123 UINTN BspIndex;
124
125 BspIndex = mSmmMpSyncData->BspIndex;
126 while (NumberOfAPs-- > 0) {
127 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
128 }
129 }
130
131 /**
132 Performs an atomic compare exchange operation to release semaphore
133 for each AP.
134
135 **/
136 VOID
137 ReleaseAllAPs (
138 VOID
139 )
140 {
141 UINTN Index;
142 UINTN BspIndex;
143
144 BspIndex = mSmmMpSyncData->BspIndex;
145 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
146 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
147 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
148 }
149 }
150 }
151
152 /**
153 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
154
155 @param Exceptions CPU Arrival exception flags.
156
157 @retval TRUE if all CPUs the have checked in.
158 @retval FALSE if at least one Normal AP hasn't checked in.
159
160 **/
161 BOOLEAN
162 AllCpusInSmmWithExceptions (
163 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
164 )
165 {
166 UINTN Index;
167 SMM_CPU_DATA_BLOCK *CpuData;
168 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
169
170 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
171
172 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
173 return TRUE;
174 }
175
176 CpuData = mSmmMpSyncData->CpuData;
177 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
178 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
179 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
180 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
181 continue;
182 }
183 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
184 continue;
185 }
186 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
187 continue;
188 }
189 return FALSE;
190 }
191 }
192
193
194 return TRUE;
195 }
196
197
198 /**
199 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
200 entering SMM, except SMI disabled APs.
201
202 **/
203 VOID
204 SmmWaitForApArrival (
205 VOID
206 )
207 {
208 UINT64 Timer;
209 UINTN Index;
210
211 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
212
213 //
214 // Platform implementor should choose a timeout value appropriately:
215 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
216 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
217 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
218 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
219 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
220 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
221 // - The timeout value must be longer than longest possible IO operation in the system
222 //
223
224 //
225 // Sync with APs 1st timeout
226 //
227 for (Timer = StartSyncTimer ();
228 !IsSyncTimerTimeout (Timer) &&
229 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
230 ) {
231 CpuPause ();
232 }
233
234 //
235 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
236 // because:
237 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
238 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
239 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
240 // work while SMI handling is on-going.
241 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
242 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
243 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
244 // mode work while SMI handling is on-going.
245 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
246 // - In traditional flow, SMI disabling is discouraged.
247 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
248 // In both cases, adding SMI-disabling checking code increases overhead.
249 //
250 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
251 //
252 // Send SMI IPIs to bring outside processors in
253 //
254 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
255 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
256 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
257 }
258 }
259
260 //
261 // Sync with APs 2nd timeout.
262 //
263 for (Timer = StartSyncTimer ();
264 !IsSyncTimerTimeout (Timer) &&
265 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
266 ) {
267 CpuPause ();
268 }
269 }
270
271 return;
272 }
273
274
275 /**
276 Replace OS MTRR's with SMI MTRR's.
277
278 @param CpuIndex Processor Index
279
280 **/
281 VOID
282 ReplaceOSMtrrs (
283 IN UINTN CpuIndex
284 )
285 {
286 SmmCpuFeaturesDisableSmrr ();
287
288 //
289 // Replace all MTRRs registers
290 //
291 MtrrSetAllMtrrs (&gSmiMtrrs);
292 }
293
294 /**
295 SMI handler for BSP.
296
297 @param CpuIndex BSP processor Index
298 @param SyncMode SMM MP sync mode
299
300 **/
301 VOID
302 BSPHandler (
303 IN UINTN CpuIndex,
304 IN SMM_CPU_SYNC_MODE SyncMode
305 )
306 {
307 UINTN Index;
308 MTRR_SETTINGS Mtrrs;
309 UINTN ApCount;
310 BOOLEAN ClearTopLevelSmiResult;
311 UINTN PresentCount;
312
313 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
314 ApCount = 0;
315
316 //
317 // Flag BSP's presence
318 //
319 *mSmmMpSyncData->InsideSmm = TRUE;
320
321 //
322 // Initialize Debug Agent to start source level debug in BSP handler
323 //
324 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
325
326 //
327 // Mark this processor's presence
328 //
329 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
330
331 //
332 // Clear platform top level SMI status bit before calling SMI handlers. If
333 // we cleared it after SMI handlers are run, we would miss the SMI that
334 // occurs after SMI handlers are done and before SMI status bit is cleared.
335 //
336 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
337 ASSERT (ClearTopLevelSmiResult == TRUE);
338
339 //
340 // Set running processor index
341 //
342 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
343
344 //
345 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
346 //
347 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
348
349 //
350 // Wait for APs to arrive
351 //
352 SmmWaitForApArrival();
353
354 //
355 // Lock the counter down and retrieve the number of APs
356 //
357 *mSmmMpSyncData->AllCpusInSync = TRUE;
358 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
359
360 //
361 // Wait for all APs to get ready for programming MTRRs
362 //
363 WaitForAllAPs (ApCount);
364
365 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
366 //
367 // Signal all APs it's time for backup MTRRs
368 //
369 ReleaseAllAPs ();
370
371 //
372 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
373 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
374 // to a large enough value to avoid this situation.
375 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
376 // We do the backup first and then set MTRR to avoid race condition for threads
377 // in the same core.
378 //
379 MtrrGetAllMtrrs(&Mtrrs);
380
381 //
382 // Wait for all APs to complete their MTRR saving
383 //
384 WaitForAllAPs (ApCount);
385
386 //
387 // Let all processors program SMM MTRRs together
388 //
389 ReleaseAllAPs ();
390
391 //
392 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
393 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
394 // to a large enough value to avoid this situation.
395 //
396 ReplaceOSMtrrs (CpuIndex);
397
398 //
399 // Wait for all APs to complete their MTRR programming
400 //
401 WaitForAllAPs (ApCount);
402 }
403 }
404
405 //
406 // The BUSY lock is initialized to Acquired state
407 //
408 AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);
409
410 //
411 // Perform the pre tasks
412 //
413 PerformPreTasks ();
414
415 //
416 // Invoke SMM Foundation EntryPoint with the processor information context.
417 //
418 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
419
420 //
421 // Make sure all APs have completed their pending none-block tasks
422 //
423 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
424 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
425 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
426 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
427 }
428 }
429
430 //
431 // Perform the remaining tasks
432 //
433 PerformRemainingTasks ();
434
435 //
436 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
437 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
438 // will run through freely.
439 //
440 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
441
442 //
443 // Lock the counter down and retrieve the number of APs
444 //
445 *mSmmMpSyncData->AllCpusInSync = TRUE;
446 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
447 //
448 // Make sure all APs have their Present flag set
449 //
450 while (TRUE) {
451 PresentCount = 0;
452 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
453 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
454 PresentCount ++;
455 }
456 }
457 if (PresentCount > ApCount) {
458 break;
459 }
460 }
461 }
462
463 //
464 // Notify all APs to exit
465 //
466 *mSmmMpSyncData->InsideSmm = FALSE;
467 ReleaseAllAPs ();
468
469 //
470 // Wait for all APs to complete their pending tasks
471 //
472 WaitForAllAPs (ApCount);
473
474 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
475 //
476 // Signal APs to restore MTRRs
477 //
478 ReleaseAllAPs ();
479
480 //
481 // Restore OS MTRRs
482 //
483 SmmCpuFeaturesReenableSmrr ();
484 MtrrSetAllMtrrs(&Mtrrs);
485
486 //
487 // Wait for all APs to complete MTRR programming
488 //
489 WaitForAllAPs (ApCount);
490 }
491
492 //
493 // Stop source level debug in BSP handler, the code below will not be
494 // debugged.
495 //
496 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
497
498 //
499 // Signal APs to Reset states/semaphore for this processor
500 //
501 ReleaseAllAPs ();
502
503 //
504 // Perform pending operations for hot-plug
505 //
506 SmmCpuUpdate ();
507
508 //
509 // Clear the Present flag of BSP
510 //
511 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
512
513 //
514 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
515 // WaitForAllAps does not depend on the Present flag.
516 //
517 WaitForAllAPs (ApCount);
518
519 //
520 // Reset BspIndex to -1, meaning BSP has not been elected.
521 //
522 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
523 mSmmMpSyncData->BspIndex = (UINT32)-1;
524 }
525
526 //
527 // Allow APs to check in from this point on
528 //
529 *mSmmMpSyncData->Counter = 0;
530 *mSmmMpSyncData->AllCpusInSync = FALSE;
531 }
532
533 /**
534 SMI handler for AP.
535
536 @param CpuIndex AP processor Index.
537 @param ValidSmi Indicates that current SMI is a valid SMI or not.
538 @param SyncMode SMM MP sync mode.
539
540 **/
541 VOID
542 APHandler (
543 IN UINTN CpuIndex,
544 IN BOOLEAN ValidSmi,
545 IN SMM_CPU_SYNC_MODE SyncMode
546 )
547 {
548 UINT64 Timer;
549 UINTN BspIndex;
550 MTRR_SETTINGS Mtrrs;
551
552 //
553 // Timeout BSP
554 //
555 for (Timer = StartSyncTimer ();
556 !IsSyncTimerTimeout (Timer) &&
557 !(*mSmmMpSyncData->InsideSmm);
558 ) {
559 CpuPause ();
560 }
561
562 if (!(*mSmmMpSyncData->InsideSmm)) {
563 //
564 // BSP timeout in the first round
565 //
566 if (mSmmMpSyncData->BspIndex != -1) {
567 //
568 // BSP Index is known
569 //
570 BspIndex = mSmmMpSyncData->BspIndex;
571 ASSERT (CpuIndex != BspIndex);
572
573 //
574 // Send SMI IPI to bring BSP in
575 //
576 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
577
578 //
579 // Now clock BSP for the 2nd time
580 //
581 for (Timer = StartSyncTimer ();
582 !IsSyncTimerTimeout (Timer) &&
583 !(*mSmmMpSyncData->InsideSmm);
584 ) {
585 CpuPause ();
586 }
587
588 if (!(*mSmmMpSyncData->InsideSmm)) {
589 //
590 // Give up since BSP is unable to enter SMM
591 // and signal the completion of this AP
592 WaitForSemaphore (mSmmMpSyncData->Counter);
593 return;
594 }
595 } else {
596 //
597 // Don't know BSP index. Give up without sending IPI to BSP.
598 //
599 WaitForSemaphore (mSmmMpSyncData->Counter);
600 return;
601 }
602 }
603
604 //
605 // BSP is available
606 //
607 BspIndex = mSmmMpSyncData->BspIndex;
608 ASSERT (CpuIndex != BspIndex);
609
610 //
611 // Mark this processor's presence
612 //
613 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
614
615 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
616 //
617 // Notify BSP of arrival at this point
618 //
619 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
620 }
621
622 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
623 //
624 // Wait for the signal from BSP to backup MTRRs
625 //
626 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
627
628 //
629 // Backup OS MTRRs
630 //
631 MtrrGetAllMtrrs(&Mtrrs);
632
633 //
634 // Signal BSP the completion of this AP
635 //
636 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
637
638 //
639 // Wait for BSP's signal to program MTRRs
640 //
641 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
642
643 //
644 // Replace OS MTRRs with SMI MTRRs
645 //
646 ReplaceOSMtrrs (CpuIndex);
647
648 //
649 // Signal BSP the completion of this AP
650 //
651 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
652 }
653
654 while (TRUE) {
655 //
656 // Wait for something to happen
657 //
658 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
659
660 //
661 // Check if BSP wants to exit SMM
662 //
663 if (!(*mSmmMpSyncData->InsideSmm)) {
664 break;
665 }
666
667 //
668 // BUSY should be acquired by SmmStartupThisAp()
669 //
670 ASSERT (
671 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
672 );
673
674 //
675 // Invoke the scheduled procedure
676 //
677 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
678 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
679 );
680
681 //
682 // Release BUSY
683 //
684 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
685 }
686
687 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
688 //
689 // Notify BSP the readiness of this AP to program MTRRs
690 //
691 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
692
693 //
694 // Wait for the signal from BSP to program MTRRs
695 //
696 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
697
698 //
699 // Restore OS MTRRs
700 //
701 SmmCpuFeaturesReenableSmrr ();
702 MtrrSetAllMtrrs(&Mtrrs);
703 }
704
705 //
706 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
707 //
708 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
709
710 //
711 // Wait for the signal from BSP to Reset states/semaphore for this processor
712 //
713 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
714
715 //
716 // Reset states/semaphore for this processor
717 //
718 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
719
720 //
721 // Notify BSP the readiness of this AP to exit SMM
722 //
723 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
724
725 }
726
727 /**
728 Create 4G PageTable in SMRAM.
729
730 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
731 @return PageTable Address
732
733 **/
734 UINT32
735 Gen4GPageTable (
736 IN BOOLEAN Is32BitPageTable
737 )
738 {
739 VOID *PageTable;
740 UINTN Index;
741 UINT64 *Pte;
742 UINTN PagesNeeded;
743 UINTN Low2MBoundary;
744 UINTN High2MBoundary;
745 UINTN Pages;
746 UINTN GuardPage;
747 UINT64 *Pdpte;
748 UINTN PageIndex;
749 UINTN PageAddress;
750
751 Low2MBoundary = 0;
752 High2MBoundary = 0;
753 PagesNeeded = 0;
754 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
755 //
756 // Add one more page for known good stack, then find the lower 2MB aligned address.
757 //
758 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
759 //
760 // Add two more pages for known good stack and stack guard page,
761 // then find the lower 2MB aligned address.
762 //
763 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
764 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
765 }
766 //
767 // Allocate the page table
768 //
769 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
770 ASSERT (PageTable != NULL);
771
772 PageTable = (VOID *)((UINTN)PageTable);
773 Pte = (UINT64*)PageTable;
774
775 //
776 // Zero out all page table entries first
777 //
778 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
779
780 //
781 // Set Page Directory Pointers
782 //
783 for (Index = 0; Index < 4; Index++) {
784 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
785 }
786 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
787
788 //
789 // Fill in Page Directory Entries
790 //
791 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
792 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
793 }
794
795 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
796 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
797 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
798 Pdpte = (UINT64*)PageTable;
799 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
800 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
801 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;
802 //
803 // Fill in Page Table Entries
804 //
805 Pte = (UINT64*)Pages;
806 PageAddress = PageIndex;
807 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
808 if (PageAddress == GuardPage) {
809 //
810 // Mark the guard page as non-present
811 //
812 Pte[Index] = PageAddress;
813 GuardPage += mSmmStackSize;
814 if (GuardPage > mSmmStackArrayEnd) {
815 GuardPage = 0;
816 }
817 } else {
818 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;
819 }
820 PageAddress+= EFI_PAGE_SIZE;
821 }
822 Pages += EFI_PAGE_SIZE;
823 }
824 }
825
826 return (UINT32)(UINTN)PageTable;
827 }
828
829 /**
830 Set memory cache ability.
831
832 @param PageTable PageTable Address
833 @param Address Memory Address to change cache ability
834 @param Cacheability Cache ability to set
835
836 **/
837 VOID
838 SetCacheability (
839 IN UINT64 *PageTable,
840 IN UINTN Address,
841 IN UINT8 Cacheability
842 )
843 {
844 UINTN PTIndex;
845 VOID *NewPageTableAddress;
846 UINT64 *NewPageTable;
847 UINTN Index;
848
849 ASSERT ((Address & EFI_PAGE_MASK) == 0);
850
851 if (sizeof (UINTN) == sizeof (UINT64)) {
852 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
853 ASSERT (PageTable[PTIndex] & IA32_PG_P);
854 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
855 }
856
857 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
858 ASSERT (PageTable[PTIndex] & IA32_PG_P);
859 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
860
861 //
862 // A perfect implementation should check the original cacheability with the
863 // one being set, and break a 2M page entry into pieces only when they
864 // disagreed.
865 //
866 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
867 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
868 //
869 // Allocate a page from SMRAM
870 //
871 NewPageTableAddress = AllocatePageTableMemory (1);
872 ASSERT (NewPageTableAddress != NULL);
873
874 NewPageTable = (UINT64 *)NewPageTableAddress;
875
876 for (Index = 0; Index < 0x200; Index++) {
877 NewPageTable[Index] = PageTable[PTIndex];
878 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
879 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
880 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
881 }
882 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
883 }
884
885 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
886 }
887
888 ASSERT (PageTable[PTIndex] & IA32_PG_P);
889 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
890
891 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
892 ASSERT (PageTable[PTIndex] & IA32_PG_P);
893 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
894 PageTable[PTIndex] |= (UINT64)Cacheability;
895 }
896
897 /**
898 Schedule a procedure to run on the specified CPU.
899
900 @param[in] Procedure The address of the procedure to run
901 @param[in] CpuIndex Target CPU Index
902 @param[in, out] ProcArguments The parameter to pass to the procedure
903 @param[in] BlockingMode Startup AP in blocking mode or not
904
905 @retval EFI_INVALID_PARAMETER CpuNumber not valid
906 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
907 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
908 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
909 @retval EFI_SUCCESS The procedure has been successfully scheduled
910
911 **/
912 EFI_STATUS
913 InternalSmmStartupThisAp (
914 IN EFI_AP_PROCEDURE Procedure,
915 IN UINTN CpuIndex,
916 IN OUT VOID *ProcArguments OPTIONAL,
917 IN BOOLEAN BlockingMode
918 )
919 {
920 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
921 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
922 return EFI_INVALID_PARAMETER;
923 }
924 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
925 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
926 return EFI_INVALID_PARAMETER;
927 }
928 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
929 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
930 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
931 }
932 return EFI_INVALID_PARAMETER;
933 }
934 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
935 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
936 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
937 }
938 return EFI_INVALID_PARAMETER;
939 }
940
941 if (BlockingMode) {
942 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
943 } else {
944 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
945 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
946 return EFI_INVALID_PARAMETER;
947 }
948 }
949
950 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
951 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
952 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
953
954 if (BlockingMode) {
955 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
956 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
957 }
958 return EFI_SUCCESS;
959 }
960
961 /**
962 Schedule a procedure to run on the specified CPU in blocking mode.
963
964 @param[in] Procedure The address of the procedure to run
965 @param[in] CpuIndex Target CPU Index
966 @param[in, out] ProcArguments The parameter to pass to the procedure
967
968 @retval EFI_INVALID_PARAMETER CpuNumber not valid
969 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
970 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
971 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
972 @retval EFI_SUCCESS The procedure has been successfully scheduled
973
974 **/
975 EFI_STATUS
976 EFIAPI
977 SmmBlockingStartupThisAp (
978 IN EFI_AP_PROCEDURE Procedure,
979 IN UINTN CpuIndex,
980 IN OUT VOID *ProcArguments OPTIONAL
981 )
982 {
983 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
984 }
985
986 /**
987 Schedule a procedure to run on the specified CPU.
988
989 @param Procedure The address of the procedure to run
990 @param CpuIndex Target CPU Index
991 @param ProcArguments The parameter to pass to the procedure
992
993 @retval EFI_INVALID_PARAMETER CpuNumber not valid
994 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
995 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
996 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
997 @retval EFI_SUCCESS The procedure has been successfully scheduled
998
999 **/
1000 EFI_STATUS
1001 EFIAPI
1002 SmmStartupThisAp (
1003 IN EFI_AP_PROCEDURE Procedure,
1004 IN UINTN CpuIndex,
1005 IN OUT VOID *ProcArguments OPTIONAL
1006 )
1007 {
1008 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
1009 }
1010
1011 /**
1012 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1013 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1014
1015 NOTE: It might not be appreciated in runtime since it might
1016 conflict with OS debugging facilities. Turn them off in RELEASE.
1017
1018 @param CpuIndex CPU Index
1019
1020 **/
1021 VOID
1022 EFIAPI
1023 CpuSmmDebugEntry (
1024 IN UINTN CpuIndex
1025 )
1026 {
1027 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1028
1029 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1030 ASSERT(CpuIndex < mMaxNumberOfCpus);
1031 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1032 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1033 AsmWriteDr6 (CpuSaveState->x86._DR6);
1034 AsmWriteDr7 (CpuSaveState->x86._DR7);
1035 } else {
1036 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1037 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1038 }
1039 }
1040 }
1041
1042 /**
1043 This function restores DR6 & DR7 to SMM save state.
1044
1045 NOTE: It might not be appreciated in runtime since it might
1046 conflict with OS debugging facilities. Turn them off in RELEASE.
1047
1048 @param CpuIndex CPU Index
1049
1050 **/
1051 VOID
1052 EFIAPI
1053 CpuSmmDebugExit (
1054 IN UINTN CpuIndex
1055 )
1056 {
1057 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1058
1059 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1060 ASSERT(CpuIndex < mMaxNumberOfCpus);
1061 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1062 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1063 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1064 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1065 } else {
1066 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1067 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1068 }
1069 }
1070 }
1071
1072 /**
1073 C function for SMI entry, each processor comes here upon SMI trigger.
1074
1075 @param CpuIndex CPU Index
1076
1077 **/
1078 VOID
1079 EFIAPI
1080 SmiRendezvous (
1081 IN UINTN CpuIndex
1082 )
1083 {
1084 EFI_STATUS Status;
1085 BOOLEAN ValidSmi;
1086 BOOLEAN IsBsp;
1087 BOOLEAN BspInProgress;
1088 UINTN Index;
1089 UINTN Cr2;
1090
1091 ASSERT(CpuIndex < mMaxNumberOfCpus);
1092
1093 //
1094 // Save Cr2 because Page Fault exception in SMM may override its value
1095 //
1096 Cr2 = AsmReadCr2 ();
1097
1098 //
1099 // Perform CPU specific entry hooks
1100 //
1101 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1102
1103 //
1104 // Determine if this is a valid SMI
1105 //
1106 ValidSmi = PlatformValidSmi();
1107
1108 //
1109 // Determine if BSP has been already in progress. Note this must be checked after
1110 // ValidSmi because BSP may clear a valid SMI source after checking in.
1111 //
1112 BspInProgress = *mSmmMpSyncData->InsideSmm;
1113
1114 if (!BspInProgress && !ValidSmi) {
1115 //
1116 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1117 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1118 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1119 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1120 // is nothing we need to do.
1121 //
1122 goto Exit;
1123 } else {
1124 //
1125 // Signal presence of this processor
1126 //
1127 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1128 //
1129 // BSP has already ended the synchronization, so QUIT!!!
1130 //
1131
1132 //
1133 // Wait for BSP's signal to finish SMI
1134 //
1135 while (*mSmmMpSyncData->AllCpusInSync) {
1136 CpuPause ();
1137 }
1138 goto Exit;
1139 } else {
1140
1141 //
1142 // The BUSY lock is initialized to Released state.
1143 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1144 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1145 // after AP's present flag is detected.
1146 //
1147 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1148 }
1149
1150 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1151 ActivateSmmProfile (CpuIndex);
1152 }
1153
1154 if (BspInProgress) {
1155 //
1156 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1157 // as BSP may have cleared the SMI status
1158 //
1159 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1160 } else {
1161 //
1162 // We have a valid SMI
1163 //
1164
1165 //
1166 // Elect BSP
1167 //
1168 IsBsp = FALSE;
1169 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1170 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1171 //
1172 // Call platform hook to do BSP election
1173 //
1174 Status = PlatformSmmBspElection (&IsBsp);
1175 if (EFI_SUCCESS == Status) {
1176 //
1177 // Platform hook determines successfully
1178 //
1179 if (IsBsp) {
1180 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1181 }
1182 } else {
1183 //
1184 // Platform hook fails to determine, use default BSP election method
1185 //
1186 InterlockedCompareExchange32 (
1187 (UINT32*)&mSmmMpSyncData->BspIndex,
1188 (UINT32)-1,
1189 (UINT32)CpuIndex
1190 );
1191 }
1192 }
1193 }
1194
1195 //
1196 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1197 //
1198 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1199
1200 //
1201 // Clear last request for SwitchBsp.
1202 //
1203 if (mSmmMpSyncData->SwitchBsp) {
1204 mSmmMpSyncData->SwitchBsp = FALSE;
1205 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1206 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1207 }
1208 }
1209
1210 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1211 SmmProfileRecordSmiNum ();
1212 }
1213
1214 //
1215 // BSP Handler is always called with a ValidSmi == TRUE
1216 //
1217 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1218 } else {
1219 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1220 }
1221 }
1222
1223 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1224
1225 //
1226 // Wait for BSP's signal to exit SMI
1227 //
1228 while (*mSmmMpSyncData->AllCpusInSync) {
1229 CpuPause ();
1230 }
1231 }
1232
1233 Exit:
1234 SmmCpuFeaturesRendezvousExit (CpuIndex);
1235 //
1236 // Restore Cr2
1237 //
1238 AsmWriteCr2 (Cr2);
1239 }
1240
1241 /**
1242 Allocate buffer for all semaphores and spin locks.
1243
1244 **/
1245 VOID
1246 InitializeSmmCpuSemaphores (
1247 VOID
1248 )
1249 {
1250 UINTN ProcessorCount;
1251 UINTN TotalSize;
1252 UINTN GlobalSemaphoresSize;
1253 UINTN CpuSemaphoresSize;
1254 UINTN MsrSemahporeSize;
1255 UINTN SemaphoreSize;
1256 UINTN Pages;
1257 UINTN *SemaphoreBlock;
1258 UINTN SemaphoreAddr;
1259
1260 SemaphoreSize = GetSpinLockProperties ();
1261 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1262 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1263 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1264 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;
1265 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;
1266 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1267 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1268 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1269 SemaphoreBlock = AllocatePages (Pages);
1270 ASSERT (SemaphoreBlock != NULL);
1271 ZeroMem (SemaphoreBlock, TotalSize);
1272
1273 SemaphoreAddr = (UINTN)SemaphoreBlock;
1274 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1275 SemaphoreAddr += SemaphoreSize;
1276 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1277 SemaphoreAddr += SemaphoreSize;
1278 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1279 SemaphoreAddr += SemaphoreSize;
1280 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1281 SemaphoreAddr += SemaphoreSize;
1282 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1283 = (SPIN_LOCK *)SemaphoreAddr;
1284 SemaphoreAddr += SemaphoreSize;
1285 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
1286 = (SPIN_LOCK *)SemaphoreAddr;
1287
1288 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1289 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1290 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1291 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1292 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1293 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1294
1295 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;
1296 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;
1297 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =
1298 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
1299 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
1300
1301 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1302 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1303 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
1304
1305 mSemaphoreSize = SemaphoreSize;
1306 }
1307
1308 /**
1309 Initialize un-cacheable data.
1310
1311 **/
1312 VOID
1313 EFIAPI
1314 InitializeMpSyncData (
1315 VOID
1316 )
1317 {
1318 UINTN CpuIndex;
1319
1320 if (mSmmMpSyncData != NULL) {
1321 //
1322 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1323 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1324 //
1325 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1326 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1327 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1328 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1329 //
1330 // Enable BSP election by setting BspIndex to -1
1331 //
1332 mSmmMpSyncData->BspIndex = (UINT32)-1;
1333 }
1334 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1335
1336 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1337 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1338 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1339 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1340 mSmmMpSyncData->AllCpusInSync != NULL);
1341 *mSmmMpSyncData->Counter = 0;
1342 *mSmmMpSyncData->InsideSmm = FALSE;
1343 *mSmmMpSyncData->AllCpusInSync = FALSE;
1344
1345 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1346 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1347 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1348 mSmmMpSyncData->CpuData[CpuIndex].Run =
1349 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1350 mSmmMpSyncData->CpuData[CpuIndex].Present =
1351 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1352 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1353 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
1354 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1355 }
1356 }
1357 }
1358
1359 /**
1360 Initialize global data for MP synchronization.
1361
1362 @param Stacks Base address of SMI stack buffer for all processors.
1363 @param StackSize Stack size for each processor in SMM.
1364
1365 **/
1366 UINT32
1367 InitializeMpServiceData (
1368 IN VOID *Stacks,
1369 IN UINTN StackSize
1370 )
1371 {
1372 UINT32 Cr3;
1373 UINTN Index;
1374 UINT8 *GdtTssTables;
1375 UINTN GdtTableStepSize;
1376
1377 //
1378 // Allocate memory for all locks and semaphores
1379 //
1380 InitializeSmmCpuSemaphores ();
1381
1382 //
1383 // Initialize mSmmMpSyncData
1384 //
1385 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1386 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1387 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1388 ASSERT (mSmmMpSyncData != NULL);
1389 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1390 InitializeMpSyncData ();
1391
1392 //
1393 // Initialize physical address mask
1394 // NOTE: Physical memory above virtual address limit is not supported !!!
1395 //
1396 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1397 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1398 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1399
1400 //
1401 // Create page tables
1402 //
1403 Cr3 = SmmInitPageTable ();
1404
1405 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1406
1407 //
1408 // Install SMI handler for each CPU
1409 //
1410 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1411 InstallSmiHandler (
1412 Index,
1413 (UINT32)mCpuHotPlugData.SmBase[Index],
1414 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1415 StackSize,
1416 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1417 gcSmiGdtr.Limit + 1,
1418 gcSmiIdtr.Base,
1419 gcSmiIdtr.Limit + 1,
1420 Cr3
1421 );
1422 }
1423
1424 //
1425 // Record current MTRR settings
1426 //
1427 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1428 MtrrGetAllMtrrs (&gSmiMtrrs);
1429
1430 return Cr3;
1431 }
1432
1433 /**
1434
1435 Register the SMM Foundation entry point.
1436
1437 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1438 @param SmmEntryPoint SMM Foundation EntryPoint
1439
1440 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1441
1442 **/
1443 EFI_STATUS
1444 EFIAPI
1445 RegisterSmmEntry (
1446 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1447 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1448 )
1449 {
1450 //
1451 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1452 //
1453 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1454 return EFI_SUCCESS;
1455 }