]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
19 //
20 UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];
21 UINT64 gPhyMask;
22 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
23 UINTN mSmmMpSyncDataSize;
24 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
25 UINTN mSemaphoreSize;
26 SPIN_LOCK *mPFLock = NULL;
27
28 /**
29 Performs an atomic compare exchange operation to get semaphore.
30 The compare exchange operation must be performed using
31 MP safe mechanisms.
32
33 @param Sem IN: 32-bit unsigned integer
34 OUT: original integer - 1
35 @return Original integer - 1
36
37 **/
38 UINT32
39 WaitForSemaphore (
40 IN OUT volatile UINT32 *Sem
41 )
42 {
43 UINT32 Value;
44
45 do {
46 Value = *Sem;
47 } while (Value == 0 ||
48 InterlockedCompareExchange32 (
49 (UINT32*)Sem,
50 Value,
51 Value - 1
52 ) != Value);
53 return Value - 1;
54 }
55
56
57 /**
58 Performs an atomic compare exchange operation to release semaphore.
59 The compare exchange operation must be performed using
60 MP safe mechanisms.
61
62 @param Sem IN: 32-bit unsigned integer
63 OUT: original integer + 1
64 @return Original integer + 1
65
66 **/
67 UINT32
68 ReleaseSemaphore (
69 IN OUT volatile UINT32 *Sem
70 )
71 {
72 UINT32 Value;
73
74 do {
75 Value = *Sem;
76 } while (Value + 1 != 0 &&
77 InterlockedCompareExchange32 (
78 (UINT32*)Sem,
79 Value,
80 Value + 1
81 ) != Value);
82 return Value + 1;
83 }
84
85 /**
86 Performs an atomic compare exchange operation to lock semaphore.
87 The compare exchange operation must be performed using
88 MP safe mechanisms.
89
90 @param Sem IN: 32-bit unsigned integer
91 OUT: -1
92 @return Original integer
93
94 **/
95 UINT32
96 LockdownSemaphore (
97 IN OUT volatile UINT32 *Sem
98 )
99 {
100 UINT32 Value;
101
102 do {
103 Value = *Sem;
104 } while (InterlockedCompareExchange32 (
105 (UINT32*)Sem,
106 Value, (UINT32)-1
107 ) != Value);
108 return Value;
109 }
110
111 /**
112 Wait all APs to performs an atomic compare exchange operation to release semaphore.
113
114 @param NumberOfAPs AP number
115
116 **/
117 VOID
118 WaitForAllAPs (
119 IN UINTN NumberOfAPs
120 )
121 {
122 UINTN BspIndex;
123
124 BspIndex = mSmmMpSyncData->BspIndex;
125 while (NumberOfAPs-- > 0) {
126 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
127 }
128 }
129
130 /**
131 Performs an atomic compare exchange operation to release semaphore
132 for each AP.
133
134 **/
135 VOID
136 ReleaseAllAPs (
137 VOID
138 )
139 {
140 UINTN Index;
141 UINTN BspIndex;
142
143 BspIndex = mSmmMpSyncData->BspIndex;
144 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
145 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
146 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
147 }
148 }
149 }
150
151 /**
152 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
153
154 @param Exceptions CPU Arrival exception flags.
155
156 @retval TRUE if all CPUs the have checked in.
157 @retval FALSE if at least one Normal AP hasn't checked in.
158
159 **/
160 BOOLEAN
161 AllCpusInSmmWithExceptions (
162 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
163 )
164 {
165 UINTN Index;
166 SMM_CPU_DATA_BLOCK *CpuData;
167 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
168
169 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
170
171 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
172 return TRUE;
173 }
174
175 CpuData = mSmmMpSyncData->CpuData;
176 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
177 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
178 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
179 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
180 continue;
181 }
182 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
183 continue;
184 }
185 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
186 continue;
187 }
188 return FALSE;
189 }
190 }
191
192
193 return TRUE;
194 }
195
196
197 /**
198 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
199 entering SMM, except SMI disabled APs.
200
201 **/
202 VOID
203 SmmWaitForApArrival (
204 VOID
205 )
206 {
207 UINT64 Timer;
208 UINTN Index;
209
210 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
211
212 //
213 // Platform implementor should choose a timeout value appropriately:
214 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
215 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
216 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
217 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
218 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
219 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
220 // - The timeout value must be longer than longest possible IO operation in the system
221 //
222
223 //
224 // Sync with APs 1st timeout
225 //
226 for (Timer = StartSyncTimer ();
227 !IsSyncTimerTimeout (Timer) &&
228 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
229 ) {
230 CpuPause ();
231 }
232
233 //
234 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
235 // because:
236 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
237 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
238 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
239 // work while SMI handling is on-going.
240 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
241 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
242 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
243 // mode work while SMI handling is on-going.
244 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
245 // - In traditional flow, SMI disabling is discouraged.
246 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
247 // In both cases, adding SMI-disabling checking code increases overhead.
248 //
249 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
250 //
251 // Send SMI IPIs to bring outside processors in
252 //
253 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
254 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
255 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
256 }
257 }
258
259 //
260 // Sync with APs 2nd timeout.
261 //
262 for (Timer = StartSyncTimer ();
263 !IsSyncTimerTimeout (Timer) &&
264 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
265 ) {
266 CpuPause ();
267 }
268 }
269
270 return;
271 }
272
273
274 /**
275 Replace OS MTRR's with SMI MTRR's.
276
277 @param CpuIndex Processor Index
278
279 **/
280 VOID
281 ReplaceOSMtrrs (
282 IN UINTN CpuIndex
283 )
284 {
285 PROCESSOR_SMM_DESCRIPTOR *Psd;
286 UINT64 *SmiMtrrs;
287 MTRR_SETTINGS *BiosMtrr;
288
289 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);
290 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;
291
292 SmmCpuFeaturesDisableSmrr ();
293
294 //
295 // Replace all MTRRs registers
296 //
297 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;
298 MtrrSetAllMtrrs(BiosMtrr);
299 }
300
301 /**
302 SMI handler for BSP.
303
304 @param CpuIndex BSP processor Index
305 @param SyncMode SMM MP sync mode
306
307 **/
308 VOID
309 BSPHandler (
310 IN UINTN CpuIndex,
311 IN SMM_CPU_SYNC_MODE SyncMode
312 )
313 {
314 UINTN Index;
315 MTRR_SETTINGS Mtrrs;
316 UINTN ApCount;
317 BOOLEAN ClearTopLevelSmiResult;
318 UINTN PresentCount;
319
320 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
321 ApCount = 0;
322
323 //
324 // Flag BSP's presence
325 //
326 *mSmmMpSyncData->InsideSmm = TRUE;
327
328 //
329 // Initialize Debug Agent to start source level debug in BSP handler
330 //
331 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
332
333 //
334 // Mark this processor's presence
335 //
336 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
337
338 //
339 // Clear platform top level SMI status bit before calling SMI handlers. If
340 // we cleared it after SMI handlers are run, we would miss the SMI that
341 // occurs after SMI handlers are done and before SMI status bit is cleared.
342 //
343 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
344 ASSERT (ClearTopLevelSmiResult == TRUE);
345
346 //
347 // Set running processor index
348 //
349 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
350
351 //
352 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
353 //
354 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
355
356 //
357 // Wait for APs to arrive
358 //
359 SmmWaitForApArrival();
360
361 //
362 // Lock the counter down and retrieve the number of APs
363 //
364 *mSmmMpSyncData->AllCpusInSync = TRUE;
365 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
366
367 //
368 // Wait for all APs to get ready for programming MTRRs
369 //
370 WaitForAllAPs (ApCount);
371
372 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
373 //
374 // Signal all APs it's time for backup MTRRs
375 //
376 ReleaseAllAPs ();
377
378 //
379 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
380 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
381 // to a large enough value to avoid this situation.
382 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
383 // We do the backup first and then set MTRR to avoid race condition for threads
384 // in the same core.
385 //
386 MtrrGetAllMtrrs(&Mtrrs);
387
388 //
389 // Wait for all APs to complete their MTRR saving
390 //
391 WaitForAllAPs (ApCount);
392
393 //
394 // Let all processors program SMM MTRRs together
395 //
396 ReleaseAllAPs ();
397
398 //
399 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
400 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
401 // to a large enough value to avoid this situation.
402 //
403 ReplaceOSMtrrs (CpuIndex);
404
405 //
406 // Wait for all APs to complete their MTRR programming
407 //
408 WaitForAllAPs (ApCount);
409 }
410 }
411
412 //
413 // The BUSY lock is initialized to Acquired state
414 //
415 AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);
416
417 //
418 // Perform the pre tasks
419 //
420 PerformPreTasks ();
421
422 //
423 // Invoke SMM Foundation EntryPoint with the processor information context.
424 //
425 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
426
427 //
428 // Make sure all APs have completed their pending none-block tasks
429 //
430 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
431 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {
432 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
433 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
434 }
435 }
436
437 //
438 // Perform the remaining tasks
439 //
440 PerformRemainingTasks ();
441
442 //
443 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
444 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
445 // will run through freely.
446 //
447 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
448
449 //
450 // Lock the counter down and retrieve the number of APs
451 //
452 *mSmmMpSyncData->AllCpusInSync = TRUE;
453 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
454 //
455 // Make sure all APs have their Present flag set
456 //
457 while (TRUE) {
458 PresentCount = 0;
459 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
460 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
461 PresentCount ++;
462 }
463 }
464 if (PresentCount > ApCount) {
465 break;
466 }
467 }
468 }
469
470 //
471 // Notify all APs to exit
472 //
473 *mSmmMpSyncData->InsideSmm = FALSE;
474 ReleaseAllAPs ();
475
476 //
477 // Wait for all APs to complete their pending tasks
478 //
479 WaitForAllAPs (ApCount);
480
481 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
482 //
483 // Signal APs to restore MTRRs
484 //
485 ReleaseAllAPs ();
486
487 //
488 // Restore OS MTRRs
489 //
490 SmmCpuFeaturesReenableSmrr ();
491 MtrrSetAllMtrrs(&Mtrrs);
492
493 //
494 // Wait for all APs to complete MTRR programming
495 //
496 WaitForAllAPs (ApCount);
497 }
498
499 //
500 // Stop source level debug in BSP handler, the code below will not be
501 // debugged.
502 //
503 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
504
505 //
506 // Signal APs to Reset states/semaphore for this processor
507 //
508 ReleaseAllAPs ();
509
510 //
511 // Perform pending operations for hot-plug
512 //
513 SmmCpuUpdate ();
514
515 //
516 // Clear the Present flag of BSP
517 //
518 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
519
520 //
521 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
522 // WaitForAllAps does not depend on the Present flag.
523 //
524 WaitForAllAPs (ApCount);
525
526 //
527 // Reset BspIndex to -1, meaning BSP has not been elected.
528 //
529 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
530 mSmmMpSyncData->BspIndex = (UINT32)-1;
531 }
532
533 //
534 // Allow APs to check in from this point on
535 //
536 *mSmmMpSyncData->Counter = 0;
537 *mSmmMpSyncData->AllCpusInSync = FALSE;
538 }
539
540 /**
541 SMI handler for AP.
542
543 @param CpuIndex AP processor Index.
544 @param ValidSmi Indicates that current SMI is a valid SMI or not.
545 @param SyncMode SMM MP sync mode.
546
547 **/
548 VOID
549 APHandler (
550 IN UINTN CpuIndex,
551 IN BOOLEAN ValidSmi,
552 IN SMM_CPU_SYNC_MODE SyncMode
553 )
554 {
555 UINT64 Timer;
556 UINTN BspIndex;
557 MTRR_SETTINGS Mtrrs;
558
559 //
560 // Timeout BSP
561 //
562 for (Timer = StartSyncTimer ();
563 !IsSyncTimerTimeout (Timer) &&
564 !(*mSmmMpSyncData->InsideSmm);
565 ) {
566 CpuPause ();
567 }
568
569 if (!(*mSmmMpSyncData->InsideSmm)) {
570 //
571 // BSP timeout in the first round
572 //
573 if (mSmmMpSyncData->BspIndex != -1) {
574 //
575 // BSP Index is known
576 //
577 BspIndex = mSmmMpSyncData->BspIndex;
578 ASSERT (CpuIndex != BspIndex);
579
580 //
581 // Send SMI IPI to bring BSP in
582 //
583 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
584
585 //
586 // Now clock BSP for the 2nd time
587 //
588 for (Timer = StartSyncTimer ();
589 !IsSyncTimerTimeout (Timer) &&
590 !(*mSmmMpSyncData->InsideSmm);
591 ) {
592 CpuPause ();
593 }
594
595 if (!(*mSmmMpSyncData->InsideSmm)) {
596 //
597 // Give up since BSP is unable to enter SMM
598 // and signal the completion of this AP
599 WaitForSemaphore (mSmmMpSyncData->Counter);
600 return;
601 }
602 } else {
603 //
604 // Don't know BSP index. Give up without sending IPI to BSP.
605 //
606 WaitForSemaphore (mSmmMpSyncData->Counter);
607 return;
608 }
609 }
610
611 //
612 // BSP is available
613 //
614 BspIndex = mSmmMpSyncData->BspIndex;
615 ASSERT (CpuIndex != BspIndex);
616
617 //
618 // Mark this processor's presence
619 //
620 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
621
622 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
623 //
624 // Notify BSP of arrival at this point
625 //
626 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
627 }
628
629 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
630 //
631 // Wait for the signal from BSP to backup MTRRs
632 //
633 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
634
635 //
636 // Backup OS MTRRs
637 //
638 MtrrGetAllMtrrs(&Mtrrs);
639
640 //
641 // Signal BSP the completion of this AP
642 //
643 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
644
645 //
646 // Wait for BSP's signal to program MTRRs
647 //
648 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
649
650 //
651 // Replace OS MTRRs with SMI MTRRs
652 //
653 ReplaceOSMtrrs (CpuIndex);
654
655 //
656 // Signal BSP the completion of this AP
657 //
658 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
659 }
660
661 while (TRUE) {
662 //
663 // Wait for something to happen
664 //
665 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
666
667 //
668 // Check if BSP wants to exit SMM
669 //
670 if (!(*mSmmMpSyncData->InsideSmm)) {
671 break;
672 }
673
674 //
675 // BUSY should be acquired by SmmStartupThisAp()
676 //
677 ASSERT (
678 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
679 );
680
681 //
682 // Invoke the scheduled procedure
683 //
684 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
685 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
686 );
687
688 //
689 // Release BUSY
690 //
691 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
692 }
693
694 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
695 //
696 // Notify BSP the readiness of this AP to program MTRRs
697 //
698 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
699
700 //
701 // Wait for the signal from BSP to program MTRRs
702 //
703 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
704
705 //
706 // Restore OS MTRRs
707 //
708 SmmCpuFeaturesReenableSmrr ();
709 MtrrSetAllMtrrs(&Mtrrs);
710 }
711
712 //
713 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
714 //
715 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
716
717 //
718 // Wait for the signal from BSP to Reset states/semaphore for this processor
719 //
720 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
721
722 //
723 // Reset states/semaphore for this processor
724 //
725 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
726
727 //
728 // Notify BSP the readiness of this AP to exit SMM
729 //
730 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
731
732 }
733
734 /**
735 Create 4G PageTable in SMRAM.
736
737 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
738 @return PageTable Address
739
740 **/
741 UINT32
742 Gen4GPageTable (
743 IN BOOLEAN Is32BitPageTable
744 )
745 {
746 VOID *PageTable;
747 UINTN Index;
748 UINT64 *Pte;
749 UINTN PagesNeeded;
750 UINTN Low2MBoundary;
751 UINTN High2MBoundary;
752 UINTN Pages;
753 UINTN GuardPage;
754 UINT64 *Pdpte;
755 UINTN PageIndex;
756 UINTN PageAddress;
757
758 Low2MBoundary = 0;
759 High2MBoundary = 0;
760 PagesNeeded = 0;
761 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
762 //
763 // Add one more page for known good stack, then find the lower 2MB aligned address.
764 //
765 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
766 //
767 // Add two more pages for known good stack and stack guard page,
768 // then find the lower 2MB aligned address.
769 //
770 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
771 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
772 }
773 //
774 // Allocate the page table
775 //
776 PageTable = AllocatePageTableMemory (5 + PagesNeeded);
777 ASSERT (PageTable != NULL);
778
779 PageTable = (VOID *)((UINTN)PageTable);
780 Pte = (UINT64*)PageTable;
781
782 //
783 // Zero out all page table entries first
784 //
785 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
786
787 //
788 // Set Page Directory Pointers
789 //
790 for (Index = 0; Index < 4; Index++) {
791 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
792 }
793 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
794
795 //
796 // Fill in Page Directory Entries
797 //
798 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
799 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
800 }
801
802 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
803 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
804 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
805 Pdpte = (UINT64*)PageTable;
806 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
807 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
808 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;
809 //
810 // Fill in Page Table Entries
811 //
812 Pte = (UINT64*)Pages;
813 PageAddress = PageIndex;
814 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
815 if (PageAddress == GuardPage) {
816 //
817 // Mark the guard page as non-present
818 //
819 Pte[Index] = PageAddress;
820 GuardPage += mSmmStackSize;
821 if (GuardPage > mSmmStackArrayEnd) {
822 GuardPage = 0;
823 }
824 } else {
825 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;
826 }
827 PageAddress+= EFI_PAGE_SIZE;
828 }
829 Pages += EFI_PAGE_SIZE;
830 }
831 }
832
833 return (UINT32)(UINTN)PageTable;
834 }
835
836 /**
837 Set memory cache ability.
838
839 @param PageTable PageTable Address
840 @param Address Memory Address to change cache ability
841 @param Cacheability Cache ability to set
842
843 **/
844 VOID
845 SetCacheability (
846 IN UINT64 *PageTable,
847 IN UINTN Address,
848 IN UINT8 Cacheability
849 )
850 {
851 UINTN PTIndex;
852 VOID *NewPageTableAddress;
853 UINT64 *NewPageTable;
854 UINTN Index;
855
856 ASSERT ((Address & EFI_PAGE_MASK) == 0);
857
858 if (sizeof (UINTN) == sizeof (UINT64)) {
859 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
860 ASSERT (PageTable[PTIndex] & IA32_PG_P);
861 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
862 }
863
864 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
865 ASSERT (PageTable[PTIndex] & IA32_PG_P);
866 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
867
868 //
869 // A perfect implementation should check the original cacheability with the
870 // one being set, and break a 2M page entry into pieces only when they
871 // disagreed.
872 //
873 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
874 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
875 //
876 // Allocate a page from SMRAM
877 //
878 NewPageTableAddress = AllocatePageTableMemory (1);
879 ASSERT (NewPageTableAddress != NULL);
880
881 NewPageTable = (UINT64 *)NewPageTableAddress;
882
883 for (Index = 0; Index < 0x200; Index++) {
884 NewPageTable[Index] = PageTable[PTIndex];
885 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
886 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
887 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
888 }
889 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
890 }
891
892 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
893 }
894
895 ASSERT (PageTable[PTIndex] & IA32_PG_P);
896 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
897
898 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
899 ASSERT (PageTable[PTIndex] & IA32_PG_P);
900 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
901 PageTable[PTIndex] |= (UINT64)Cacheability;
902 }
903
904 /**
905 Schedule a procedure to run on the specified CPU.
906
907 @param[in] Procedure The address of the procedure to run
908 @param[in] CpuIndex Target CPU Index
909 @param[in, OUT] ProcArguments The parameter to pass to the procedure
910 @param[in] BlockingMode Startup AP in blocking mode or not
911
912 @retval EFI_INVALID_PARAMETER CpuNumber not valid
913 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
914 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
915 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
916 @retval EFI_SUCCESS The procedure has been successfully scheduled
917
918 **/
919 EFI_STATUS
920 InternalSmmStartupThisAp (
921 IN EFI_AP_PROCEDURE Procedure,
922 IN UINTN CpuIndex,
923 IN OUT VOID *ProcArguments OPTIONAL,
924 IN BOOLEAN BlockingMode
925 )
926 {
927 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
928 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
929 return EFI_INVALID_PARAMETER;
930 }
931 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
932 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
933 return EFI_INVALID_PARAMETER;
934 }
935 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
936 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
937 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
938 }
939 return EFI_INVALID_PARAMETER;
940 }
941 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
942 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
943 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
944 }
945 return EFI_INVALID_PARAMETER;
946 }
947
948 if (BlockingMode) {
949 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
950 } else {
951 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
952 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
953 return EFI_INVALID_PARAMETER;
954 }
955 }
956
957 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
958 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
959 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
960
961 if (BlockingMode) {
962 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
963 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
964 }
965 return EFI_SUCCESS;
966 }
967
968 /**
969 Schedule a procedure to run on the specified CPU in blocking mode.
970
971 @param[in] Procedure The address of the procedure to run
972 @param[in] CpuIndex Target CPU Index
973 @param[in, out] ProcArguments The parameter to pass to the procedure
974
975 @retval EFI_INVALID_PARAMETER CpuNumber not valid
976 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
977 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
978 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
979 @retval EFI_SUCCESS The procedure has been successfully scheduled
980
981 **/
982 EFI_STATUS
983 EFIAPI
984 SmmBlockingStartupThisAp (
985 IN EFI_AP_PROCEDURE Procedure,
986 IN UINTN CpuIndex,
987 IN OUT VOID *ProcArguments OPTIONAL
988 )
989 {
990 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
991 }
992
993 /**
994 Schedule a procedure to run on the specified CPU.
995
996 @param Procedure The address of the procedure to run
997 @param CpuIndex Target CPU Index
998 @param ProcArguments The parameter to pass to the procedure
999
1000 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1001 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1002 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1003 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1004 @retval EFI_SUCCESS The procedure has been successfully scheduled
1005
1006 **/
1007 EFI_STATUS
1008 EFIAPI
1009 SmmStartupThisAp (
1010 IN EFI_AP_PROCEDURE Procedure,
1011 IN UINTN CpuIndex,
1012 IN OUT VOID *ProcArguments OPTIONAL
1013 )
1014 {
1015 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
1016 }
1017
1018 /**
1019 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1020 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1021
1022 NOTE: It might not be appreciated in runtime since it might
1023 conflict with OS debugging facilities. Turn them off in RELEASE.
1024
1025 @param CpuIndex CPU Index
1026
1027 **/
1028 VOID
1029 EFIAPI
1030 CpuSmmDebugEntry (
1031 IN UINTN CpuIndex
1032 )
1033 {
1034 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1035
1036 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1037 ASSERT(CpuIndex < mMaxNumberOfCpus);
1038 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1039 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1040 AsmWriteDr6 (CpuSaveState->x86._DR6);
1041 AsmWriteDr7 (CpuSaveState->x86._DR7);
1042 } else {
1043 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1044 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1045 }
1046 }
1047 }
1048
1049 /**
1050 This function restores DR6 & DR7 to SMM save state.
1051
1052 NOTE: It might not be appreciated in runtime since it might
1053 conflict with OS debugging facilities. Turn them off in RELEASE.
1054
1055 @param CpuIndex CPU Index
1056
1057 **/
1058 VOID
1059 EFIAPI
1060 CpuSmmDebugExit (
1061 IN UINTN CpuIndex
1062 )
1063 {
1064 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1065
1066 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1067 ASSERT(CpuIndex < mMaxNumberOfCpus);
1068 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1069 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1070 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1071 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1072 } else {
1073 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1074 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1075 }
1076 }
1077 }
1078
1079 /**
1080 C function for SMI entry, each processor comes here upon SMI trigger.
1081
1082 @param CpuIndex CPU Index
1083
1084 **/
1085 VOID
1086 EFIAPI
1087 SmiRendezvous (
1088 IN UINTN CpuIndex
1089 )
1090 {
1091 EFI_STATUS Status;
1092 BOOLEAN ValidSmi;
1093 BOOLEAN IsBsp;
1094 BOOLEAN BspInProgress;
1095 UINTN Index;
1096 UINTN Cr2;
1097
1098 ASSERT(CpuIndex < mMaxNumberOfCpus);
1099
1100 //
1101 // Save Cr2 because Page Fault exception in SMM may override its value
1102 //
1103 Cr2 = AsmReadCr2 ();
1104
1105 //
1106 // Perform CPU specific entry hooks
1107 //
1108 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1109
1110 //
1111 // Determine if this is a valid SMI
1112 //
1113 ValidSmi = PlatformValidSmi();
1114
1115 //
1116 // Determine if BSP has been already in progress. Note this must be checked after
1117 // ValidSmi because BSP may clear a valid SMI source after checking in.
1118 //
1119 BspInProgress = *mSmmMpSyncData->InsideSmm;
1120
1121 if (!BspInProgress && !ValidSmi) {
1122 //
1123 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1124 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1125 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1126 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1127 // is nothing we need to do.
1128 //
1129 goto Exit;
1130 } else {
1131 //
1132 // Signal presence of this processor
1133 //
1134 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
1135 //
1136 // BSP has already ended the synchronization, so QUIT!!!
1137 //
1138
1139 //
1140 // Wait for BSP's signal to finish SMI
1141 //
1142 while (*mSmmMpSyncData->AllCpusInSync) {
1143 CpuPause ();
1144 }
1145 goto Exit;
1146 } else {
1147
1148 //
1149 // The BUSY lock is initialized to Released state.
1150 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1151 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1152 // after AP's present flag is detected.
1153 //
1154 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1155 }
1156
1157 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1158 ActivateSmmProfile (CpuIndex);
1159 }
1160
1161 if (BspInProgress) {
1162 //
1163 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1164 // as BSP may have cleared the SMI status
1165 //
1166 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1167 } else {
1168 //
1169 // We have a valid SMI
1170 //
1171
1172 //
1173 // Elect BSP
1174 //
1175 IsBsp = FALSE;
1176 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1177 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1178 //
1179 // Call platform hook to do BSP election
1180 //
1181 Status = PlatformSmmBspElection (&IsBsp);
1182 if (EFI_SUCCESS == Status) {
1183 //
1184 // Platform hook determines successfully
1185 //
1186 if (IsBsp) {
1187 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1188 }
1189 } else {
1190 //
1191 // Platform hook fails to determine, use default BSP election method
1192 //
1193 InterlockedCompareExchange32 (
1194 (UINT32*)&mSmmMpSyncData->BspIndex,
1195 (UINT32)-1,
1196 (UINT32)CpuIndex
1197 );
1198 }
1199 }
1200 }
1201
1202 //
1203 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1204 //
1205 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1206
1207 //
1208 // Clear last request for SwitchBsp.
1209 //
1210 if (mSmmMpSyncData->SwitchBsp) {
1211 mSmmMpSyncData->SwitchBsp = FALSE;
1212 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1213 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1214 }
1215 }
1216
1217 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1218 SmmProfileRecordSmiNum ();
1219 }
1220
1221 //
1222 // BSP Handler is always called with a ValidSmi == TRUE
1223 //
1224 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1225 } else {
1226 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1227 }
1228 }
1229
1230 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1231
1232 //
1233 // Wait for BSP's signal to exit SMI
1234 //
1235 while (*mSmmMpSyncData->AllCpusInSync) {
1236 CpuPause ();
1237 }
1238 }
1239
1240 Exit:
1241 SmmCpuFeaturesRendezvousExit (CpuIndex);
1242 //
1243 // Restore Cr2
1244 //
1245 AsmWriteCr2 (Cr2);
1246 }
1247
1248 /**
1249 Allocate buffer for all semaphores and spin locks.
1250
1251 **/
1252 VOID
1253 InitializeSmmCpuSemaphores (
1254 VOID
1255 )
1256 {
1257 UINTN ProcessorCount;
1258 UINTN TotalSize;
1259 UINTN GlobalSemaphoresSize;
1260 UINTN CpuSemaphoresSize;
1261 UINTN MsrSemahporeSize;
1262 UINTN SemaphoreSize;
1263 UINTN Pages;
1264 UINTN *SemaphoreBlock;
1265 UINTN SemaphoreAddr;
1266
1267 SemaphoreSize = GetSpinLockProperties ();
1268 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1269 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1270 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1271 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;
1272 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;
1273 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1274 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1275 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1276 SemaphoreBlock = AllocatePages (Pages);
1277 ASSERT (SemaphoreBlock != NULL);
1278 ZeroMem (SemaphoreBlock, TotalSize);
1279
1280 SemaphoreAddr = (UINTN)SemaphoreBlock;
1281 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1282 SemaphoreAddr += SemaphoreSize;
1283 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1284 SemaphoreAddr += SemaphoreSize;
1285 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1286 SemaphoreAddr += SemaphoreSize;
1287 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1288 SemaphoreAddr += SemaphoreSize;
1289 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1290 = (SPIN_LOCK *)SemaphoreAddr;
1291 SemaphoreAddr += SemaphoreSize;
1292 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
1293 = (SPIN_LOCK *)SemaphoreAddr;
1294
1295 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1296 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1297 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1298 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
1299 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1300 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1301
1302 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;
1303 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;
1304 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =
1305 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
1306 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
1307
1308 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1309 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1310 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
1311
1312 mSemaphoreSize = SemaphoreSize;
1313 }
1314
1315 /**
1316 Initialize un-cacheable data.
1317
1318 **/
1319 VOID
1320 EFIAPI
1321 InitializeMpSyncData (
1322 VOID
1323 )
1324 {
1325 UINTN CpuIndex;
1326
1327 if (mSmmMpSyncData != NULL) {
1328 //
1329 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1330 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1331 //
1332 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1333 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1334 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1335 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1336 //
1337 // Enable BSP election by setting BspIndex to -1
1338 //
1339 mSmmMpSyncData->BspIndex = (UINT32)-1;
1340 }
1341 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
1342
1343 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
1344 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1345 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1346 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
1347 mSmmMpSyncData->AllCpusInSync != NULL);
1348 *mSmmMpSyncData->Counter = 0;
1349 *mSmmMpSyncData->InsideSmm = FALSE;
1350 *mSmmMpSyncData->AllCpusInSync = FALSE;
1351
1352 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
1353 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1354 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1355 mSmmMpSyncData->CpuData[CpuIndex].Run =
1356 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
1357 mSmmMpSyncData->CpuData[CpuIndex].Present =
1358 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1359 }
1360 }
1361 }
1362
1363 /**
1364 Initialize global data for MP synchronization.
1365
1366 @param Stacks Base address of SMI stack buffer for all processors.
1367 @param StackSize Stack size for each processor in SMM.
1368
1369 **/
1370 UINT32
1371 InitializeMpServiceData (
1372 IN VOID *Stacks,
1373 IN UINTN StackSize
1374 )
1375 {
1376 UINT32 Cr3;
1377 UINTN Index;
1378 MTRR_SETTINGS *Mtrr;
1379 PROCESSOR_SMM_DESCRIPTOR *Psd;
1380 UINT8 *GdtTssTables;
1381 UINTN GdtTableStepSize;
1382
1383 //
1384 // Allocate memory for all locks and semaphores
1385 //
1386 InitializeSmmCpuSemaphores ();
1387
1388 //
1389 // Initialize mSmmMpSyncData
1390 //
1391 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1392 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1393 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1394 ASSERT (mSmmMpSyncData != NULL);
1395 InitializeMpSyncData ();
1396
1397 //
1398 // Initialize physical address mask
1399 // NOTE: Physical memory above virtual address limit is not supported !!!
1400 //
1401 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1402 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1403 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1404
1405 //
1406 // Create page tables
1407 //
1408 Cr3 = SmmInitPageTable ();
1409
1410 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1411
1412 //
1413 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1414 //
1415 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1416 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
1417 CopyMem (Psd, &gcPsd, sizeof (gcPsd));
1418 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
1419 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
1420
1421 //
1422 // Install SMI handler
1423 //
1424 InstallSmiHandler (
1425 Index,
1426 (UINT32)mCpuHotPlugData.SmBase[Index],
1427 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1428 StackSize,
1429 (UINTN)Psd->SmmGdtPtr,
1430 Psd->SmmGdtSize,
1431 gcSmiIdtr.Base,
1432 gcSmiIdtr.Limit + 1,
1433 Cr3
1434 );
1435 }
1436
1437 //
1438 // Record current MTRR settings
1439 //
1440 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));
1441 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;
1442 MtrrGetAllMtrrs (Mtrr);
1443
1444 return Cr3;
1445 }
1446
1447 /**
1448
1449 Register the SMM Foundation entry point.
1450
1451 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1452 @param SmmEntryPoint SMM Foundation EntryPoint
1453
1454 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1455
1456 **/
1457 EFI_STATUS
1458 EFIAPI
1459 RegisterSmmEntry (
1460 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1461 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1462 )
1463 {
1464 //
1465 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1466 //
1467 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1468 return EFI_SUCCESS;
1469 }