]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Move forward MP sync data initialization
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
1 /** @file
2 SMM MP service implementation
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 //
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
19 //
20 UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];
21 UINT64 gPhyMask;
22 SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
23 UINTN mSmmMpSyncDataSize;
24 SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
25 UINTN mSemaphoreSize;
26
27 /**
28 Performs an atomic compare exchange operation to get semaphore.
29 The compare exchange operation must be performed using
30 MP safe mechanisms.
31
32 @param Sem IN: 32-bit unsigned integer
33 OUT: original integer - 1
34 @return Original integer - 1
35
36 **/
37 UINT32
38 WaitForSemaphore (
39 IN OUT volatile UINT32 *Sem
40 )
41 {
42 UINT32 Value;
43
44 do {
45 Value = *Sem;
46 } while (Value == 0 ||
47 InterlockedCompareExchange32 (
48 (UINT32*)Sem,
49 Value,
50 Value - 1
51 ) != Value);
52 return Value - 1;
53 }
54
55
56 /**
57 Performs an atomic compare exchange operation to release semaphore.
58 The compare exchange operation must be performed using
59 MP safe mechanisms.
60
61 @param Sem IN: 32-bit unsigned integer
62 OUT: original integer + 1
63 @return Original integer + 1
64
65 **/
66 UINT32
67 ReleaseSemaphore (
68 IN OUT volatile UINT32 *Sem
69 )
70 {
71 UINT32 Value;
72
73 do {
74 Value = *Sem;
75 } while (Value + 1 != 0 &&
76 InterlockedCompareExchange32 (
77 (UINT32*)Sem,
78 Value,
79 Value + 1
80 ) != Value);
81 return Value + 1;
82 }
83
84 /**
85 Performs an atomic compare exchange operation to lock semaphore.
86 The compare exchange operation must be performed using
87 MP safe mechanisms.
88
89 @param Sem IN: 32-bit unsigned integer
90 OUT: -1
91 @return Original integer
92
93 **/
94 UINT32
95 LockdownSemaphore (
96 IN OUT volatile UINT32 *Sem
97 )
98 {
99 UINT32 Value;
100
101 do {
102 Value = *Sem;
103 } while (InterlockedCompareExchange32 (
104 (UINT32*)Sem,
105 Value, (UINT32)-1
106 ) != Value);
107 return Value;
108 }
109
110 /**
111 Wait all APs to performs an atomic compare exchange operation to release semaphore.
112
113 @param NumberOfAPs AP number
114
115 **/
116 VOID
117 WaitForAllAPs (
118 IN UINTN NumberOfAPs
119 )
120 {
121 UINTN BspIndex;
122
123 BspIndex = mSmmMpSyncData->BspIndex;
124 while (NumberOfAPs-- > 0) {
125 WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
126 }
127 }
128
129 /**
130 Performs an atomic compare exchange operation to release semaphore
131 for each AP.
132
133 **/
134 VOID
135 ReleaseAllAPs (
136 VOID
137 )
138 {
139 UINTN Index;
140 UINTN BspIndex;
141
142 BspIndex = mSmmMpSyncData->BspIndex;
143 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
144 if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {
145 ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);
146 }
147 }
148 }
149
150 /**
151 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
152
153 @param Exceptions CPU Arrival exception flags.
154
155 @retval TRUE if all CPUs the have checked in.
156 @retval FALSE if at least one Normal AP hasn't checked in.
157
158 **/
159 BOOLEAN
160 AllCpusInSmmWithExceptions (
161 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
162 )
163 {
164 UINTN Index;
165 SMM_CPU_DATA_BLOCK *CpuData;
166 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
167
168 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
169
170 if (mSmmMpSyncData->Counter == mNumberOfCpus) {
171 return TRUE;
172 }
173
174 CpuData = mSmmMpSyncData->CpuData;
175 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
176 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
177 if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
178 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
179 continue;
180 }
181 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
182 continue;
183 }
184 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
185 continue;
186 }
187 return FALSE;
188 }
189 }
190
191
192 return TRUE;
193 }
194
195
196 /**
197 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
198 entering SMM, except SMI disabled APs.
199
200 **/
201 VOID
202 SmmWaitForApArrival (
203 VOID
204 )
205 {
206 UINT64 Timer;
207 UINTN Index;
208
209 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
210
211 //
212 // Platform implementor should choose a timeout value appropriately:
213 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
214 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
215 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
216 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
217 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
218 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
219 // - The timeout value must be longer than longest possible IO operation in the system
220 //
221
222 //
223 // Sync with APs 1st timeout
224 //
225 for (Timer = StartSyncTimer ();
226 !IsSyncTimerTimeout (Timer) &&
227 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
228 ) {
229 CpuPause ();
230 }
231
232 //
233 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
234 // because:
235 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
236 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
237 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
238 // work while SMI handling is on-going.
239 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
240 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
241 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
242 // mode work while SMI handling is on-going.
243 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
244 // - In traditional flow, SMI disabling is discouraged.
245 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
246 // In both cases, adding SMI-disabling checking code increases overhead.
247 //
248 if (mSmmMpSyncData->Counter < mNumberOfCpus) {
249 //
250 // Send SMI IPIs to bring outside processors in
251 //
252 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
253 if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
254 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
255 }
256 }
257
258 //
259 // Sync with APs 2nd timeout.
260 //
261 for (Timer = StartSyncTimer ();
262 !IsSyncTimerTimeout (Timer) &&
263 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
264 ) {
265 CpuPause ();
266 }
267 }
268
269 return;
270 }
271
272
273 /**
274 Replace OS MTRR's with SMI MTRR's.
275
276 @param CpuIndex Processor Index
277
278 **/
279 VOID
280 ReplaceOSMtrrs (
281 IN UINTN CpuIndex
282 )
283 {
284 PROCESSOR_SMM_DESCRIPTOR *Psd;
285 UINT64 *SmiMtrrs;
286 MTRR_SETTINGS *BiosMtrr;
287
288 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);
289 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;
290
291 SmmCpuFeaturesDisableSmrr ();
292
293 //
294 // Replace all MTRRs registers
295 //
296 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;
297 MtrrSetAllMtrrs(BiosMtrr);
298 }
299
300 /**
301 SMI handler for BSP.
302
303 @param CpuIndex BSP processor Index
304 @param SyncMode SMM MP sync mode
305
306 **/
307 VOID
308 BSPHandler (
309 IN UINTN CpuIndex,
310 IN SMM_CPU_SYNC_MODE SyncMode
311 )
312 {
313 UINTN Index;
314 MTRR_SETTINGS Mtrrs;
315 UINTN ApCount;
316 BOOLEAN ClearTopLevelSmiResult;
317 UINTN PresentCount;
318
319 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
320 ApCount = 0;
321
322 //
323 // Flag BSP's presence
324 //
325 mSmmMpSyncData->InsideSmm = TRUE;
326
327 //
328 // Initialize Debug Agent to start source level debug in BSP handler
329 //
330 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
331
332 //
333 // Mark this processor's presence
334 //
335 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
336
337 //
338 // Clear platform top level SMI status bit before calling SMI handlers. If
339 // we cleared it after SMI handlers are run, we would miss the SMI that
340 // occurs after SMI handlers are done and before SMI status bit is cleared.
341 //
342 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
343 ASSERT (ClearTopLevelSmiResult == TRUE);
344
345 //
346 // Set running processor index
347 //
348 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
349
350 //
351 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
352 //
353 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
354
355 //
356 // Wait for APs to arrive
357 //
358 SmmWaitForApArrival();
359
360 //
361 // Lock the counter down and retrieve the number of APs
362 //
363 mSmmMpSyncData->AllCpusInSync = TRUE;
364 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
365
366 //
367 // Wait for all APs to get ready for programming MTRRs
368 //
369 WaitForAllAPs (ApCount);
370
371 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
372 //
373 // Signal all APs it's time for backup MTRRs
374 //
375 ReleaseAllAPs ();
376
377 //
378 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
379 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
380 // to a large enough value to avoid this situation.
381 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
382 // We do the backup first and then set MTRR to avoid race condition for threads
383 // in the same core.
384 //
385 MtrrGetAllMtrrs(&Mtrrs);
386
387 //
388 // Wait for all APs to complete their MTRR saving
389 //
390 WaitForAllAPs (ApCount);
391
392 //
393 // Let all processors program SMM MTRRs together
394 //
395 ReleaseAllAPs ();
396
397 //
398 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
399 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
400 // to a large enough value to avoid this situation.
401 //
402 ReplaceOSMtrrs (CpuIndex);
403
404 //
405 // Wait for all APs to complete their MTRR programming
406 //
407 WaitForAllAPs (ApCount);
408 }
409 }
410
411 //
412 // The BUSY lock is initialized to Acquired state
413 //
414 AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
415
416 //
417 // Perform the pre tasks
418 //
419 PerformPreTasks ();
420
421 //
422 // Invoke SMM Foundation EntryPoint with the processor information context.
423 //
424 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
425
426 //
427 // Make sure all APs have completed their pending none-block tasks
428 //
429 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
430 if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {
431 AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);
432 ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;
433 }
434 }
435
436 //
437 // Perform the remaining tasks
438 //
439 PerformRemainingTasks ();
440
441 //
442 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
443 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
444 // will run through freely.
445 //
446 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
447
448 //
449 // Lock the counter down and retrieve the number of APs
450 //
451 mSmmMpSyncData->AllCpusInSync = TRUE;
452 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
453 //
454 // Make sure all APs have their Present flag set
455 //
456 while (TRUE) {
457 PresentCount = 0;
458 for (Index = mMaxNumberOfCpus; Index-- > 0;) {
459 if (mSmmMpSyncData->CpuData[Index].Present) {
460 PresentCount ++;
461 }
462 }
463 if (PresentCount > ApCount) {
464 break;
465 }
466 }
467 }
468
469 //
470 // Notify all APs to exit
471 //
472 mSmmMpSyncData->InsideSmm = FALSE;
473 ReleaseAllAPs ();
474
475 //
476 // Wait for all APs to complete their pending tasks
477 //
478 WaitForAllAPs (ApCount);
479
480 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
481 //
482 // Signal APs to restore MTRRs
483 //
484 ReleaseAllAPs ();
485
486 //
487 // Restore OS MTRRs
488 //
489 SmmCpuFeaturesReenableSmrr ();
490 MtrrSetAllMtrrs(&Mtrrs);
491
492 //
493 // Wait for all APs to complete MTRR programming
494 //
495 WaitForAllAPs (ApCount);
496 }
497
498 //
499 // Stop source level debug in BSP handler, the code below will not be
500 // debugged.
501 //
502 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
503
504 //
505 // Signal APs to Reset states/semaphore for this processor
506 //
507 ReleaseAllAPs ();
508
509 //
510 // Perform pending operations for hot-plug
511 //
512 SmmCpuUpdate ();
513
514 //
515 // Clear the Present flag of BSP
516 //
517 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
518
519 //
520 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
521 // WaitForAllAps does not depend on the Present flag.
522 //
523 WaitForAllAPs (ApCount);
524
525 //
526 // Reset BspIndex to -1, meaning BSP has not been elected.
527 //
528 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
529 mSmmMpSyncData->BspIndex = (UINT32)-1;
530 }
531
532 //
533 // Allow APs to check in from this point on
534 //
535 mSmmMpSyncData->Counter = 0;
536 mSmmMpSyncData->AllCpusInSync = FALSE;
537 }
538
539 /**
540 SMI handler for AP.
541
542 @param CpuIndex AP processor Index.
543 @param ValidSmi Indicates that current SMI is a valid SMI or not.
544 @param SyncMode SMM MP sync mode.
545
546 **/
547 VOID
548 APHandler (
549 IN UINTN CpuIndex,
550 IN BOOLEAN ValidSmi,
551 IN SMM_CPU_SYNC_MODE SyncMode
552 )
553 {
554 UINT64 Timer;
555 UINTN BspIndex;
556 MTRR_SETTINGS Mtrrs;
557
558 //
559 // Timeout BSP
560 //
561 for (Timer = StartSyncTimer ();
562 !IsSyncTimerTimeout (Timer) &&
563 !mSmmMpSyncData->InsideSmm;
564 ) {
565 CpuPause ();
566 }
567
568 if (!mSmmMpSyncData->InsideSmm) {
569 //
570 // BSP timeout in the first round
571 //
572 if (mSmmMpSyncData->BspIndex != -1) {
573 //
574 // BSP Index is known
575 //
576 BspIndex = mSmmMpSyncData->BspIndex;
577 ASSERT (CpuIndex != BspIndex);
578
579 //
580 // Send SMI IPI to bring BSP in
581 //
582 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
583
584 //
585 // Now clock BSP for the 2nd time
586 //
587 for (Timer = StartSyncTimer ();
588 !IsSyncTimerTimeout (Timer) &&
589 !mSmmMpSyncData->InsideSmm;
590 ) {
591 CpuPause ();
592 }
593
594 if (!mSmmMpSyncData->InsideSmm) {
595 //
596 // Give up since BSP is unable to enter SMM
597 // and signal the completion of this AP
598 WaitForSemaphore (&mSmmMpSyncData->Counter);
599 return;
600 }
601 } else {
602 //
603 // Don't know BSP index. Give up without sending IPI to BSP.
604 //
605 WaitForSemaphore (&mSmmMpSyncData->Counter);
606 return;
607 }
608 }
609
610 //
611 // BSP is available
612 //
613 BspIndex = mSmmMpSyncData->BspIndex;
614 ASSERT (CpuIndex != BspIndex);
615
616 //
617 // Mark this processor's presence
618 //
619 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
620
621 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
622 //
623 // Notify BSP of arrival at this point
624 //
625 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
626 }
627
628 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
629 //
630 // Wait for the signal from BSP to backup MTRRs
631 //
632 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
633
634 //
635 // Backup OS MTRRs
636 //
637 MtrrGetAllMtrrs(&Mtrrs);
638
639 //
640 // Signal BSP the completion of this AP
641 //
642 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
643
644 //
645 // Wait for BSP's signal to program MTRRs
646 //
647 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
648
649 //
650 // Replace OS MTRRs with SMI MTRRs
651 //
652 ReplaceOSMtrrs (CpuIndex);
653
654 //
655 // Signal BSP the completion of this AP
656 //
657 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
658 }
659
660 while (TRUE) {
661 //
662 // Wait for something to happen
663 //
664 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
665
666 //
667 // Check if BSP wants to exit SMM
668 //
669 if (!mSmmMpSyncData->InsideSmm) {
670 break;
671 }
672
673 //
674 // BUSY should be acquired by SmmStartupThisAp()
675 //
676 ASSERT (
677 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)
678 );
679
680 //
681 // Invoke the scheduled procedure
682 //
683 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
684 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
685 );
686
687 //
688 // Release BUSY
689 //
690 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
691 }
692
693 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
694 //
695 // Notify BSP the readiness of this AP to program MTRRs
696 //
697 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
698
699 //
700 // Wait for the signal from BSP to program MTRRs
701 //
702 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
703
704 //
705 // Restore OS MTRRs
706 //
707 SmmCpuFeaturesReenableSmrr ();
708 MtrrSetAllMtrrs(&Mtrrs);
709 }
710
711 //
712 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
713 //
714 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
715
716 //
717 // Wait for the signal from BSP to Reset states/semaphore for this processor
718 //
719 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
720
721 //
722 // Reset states/semaphore for this processor
723 //
724 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
725
726 //
727 // Notify BSP the readiness of this AP to exit SMM
728 //
729 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
730
731 }
732
733 /**
734 Create 4G PageTable in SMRAM.
735
736 @param ExtraPages Additional page numbers besides for 4G memory
737 @param Is32BitPageTable Whether the page table is 32-bit PAE
738 @return PageTable Address
739
740 **/
741 UINT32
742 Gen4GPageTable (
743 IN UINTN ExtraPages,
744 IN BOOLEAN Is32BitPageTable
745 )
746 {
747 VOID *PageTable;
748 UINTN Index;
749 UINT64 *Pte;
750 UINTN PagesNeeded;
751 UINTN Low2MBoundary;
752 UINTN High2MBoundary;
753 UINTN Pages;
754 UINTN GuardPage;
755 UINT64 *Pdpte;
756 UINTN PageIndex;
757 UINTN PageAddress;
758
759 Low2MBoundary = 0;
760 High2MBoundary = 0;
761 PagesNeeded = 0;
762 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
763 //
764 // Add one more page for known good stack, then find the lower 2MB aligned address.
765 //
766 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
767 //
768 // Add two more pages for known good stack and stack guard page,
769 // then find the lower 2MB aligned address.
770 //
771 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
772 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
773 }
774 //
775 // Allocate the page table
776 //
777 PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);
778 ASSERT (PageTable != NULL);
779
780 PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));
781 Pte = (UINT64*)PageTable;
782
783 //
784 // Zero out all page table entries first
785 //
786 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
787
788 //
789 // Set Page Directory Pointers
790 //
791 for (Index = 0; Index < 4; Index++) {
792 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
793 }
794 Pte += EFI_PAGE_SIZE / sizeof (*Pte);
795
796 //
797 // Fill in Page Directory Entries
798 //
799 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
800 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
801 }
802
803 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
804 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
805 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
806 Pdpte = (UINT64*)PageTable;
807 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
808 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
809 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;
810 //
811 // Fill in Page Table Entries
812 //
813 Pte = (UINT64*)Pages;
814 PageAddress = PageIndex;
815 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
816 if (PageAddress == GuardPage) {
817 //
818 // Mark the guard page as non-present
819 //
820 Pte[Index] = PageAddress;
821 GuardPage += mSmmStackSize;
822 if (GuardPage > mSmmStackArrayEnd) {
823 GuardPage = 0;
824 }
825 } else {
826 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;
827 }
828 PageAddress+= EFI_PAGE_SIZE;
829 }
830 Pages += EFI_PAGE_SIZE;
831 }
832 }
833
834 return (UINT32)(UINTN)PageTable;
835 }
836
837 /**
838 Set memory cache ability.
839
840 @param PageTable PageTable Address
841 @param Address Memory Address to change cache ability
842 @param Cacheability Cache ability to set
843
844 **/
845 VOID
846 SetCacheability (
847 IN UINT64 *PageTable,
848 IN UINTN Address,
849 IN UINT8 Cacheability
850 )
851 {
852 UINTN PTIndex;
853 VOID *NewPageTableAddress;
854 UINT64 *NewPageTable;
855 UINTN Index;
856
857 ASSERT ((Address & EFI_PAGE_MASK) == 0);
858
859 if (sizeof (UINTN) == sizeof (UINT64)) {
860 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
861 ASSERT (PageTable[PTIndex] & IA32_PG_P);
862 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
863 }
864
865 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
866 ASSERT (PageTable[PTIndex] & IA32_PG_P);
867 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
868
869 //
870 // A perfect implementation should check the original cacheability with the
871 // one being set, and break a 2M page entry into pieces only when they
872 // disagreed.
873 //
874 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
875 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
876 //
877 // Allocate a page from SMRAM
878 //
879 NewPageTableAddress = AllocatePageTableMemory (1);
880 ASSERT (NewPageTableAddress != NULL);
881
882 NewPageTable = (UINT64 *)NewPageTableAddress;
883
884 for (Index = 0; Index < 0x200; Index++) {
885 NewPageTable[Index] = PageTable[PTIndex];
886 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
887 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
888 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
889 }
890 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
891 }
892
893 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
894 }
895
896 ASSERT (PageTable[PTIndex] & IA32_PG_P);
897 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
898
899 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
900 ASSERT (PageTable[PTIndex] & IA32_PG_P);
901 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
902 PageTable[PTIndex] |= (UINT64)Cacheability;
903 }
904
905
906 /**
907 Schedule a procedure to run on the specified CPU.
908
909 @param Procedure The address of the procedure to run
910 @param CpuIndex Target CPU Index
911 @param ProcArguments The parameter to pass to the procedure
912
913 @retval EFI_INVALID_PARAMETER CpuNumber not valid
914 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
915 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
916 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
917 @retval EFI_SUCCESS The procedure has been successfully scheduled
918
919 **/
920 EFI_STATUS
921 EFIAPI
922 SmmStartupThisAp (
923 IN EFI_AP_PROCEDURE Procedure,
924 IN UINTN CpuIndex,
925 IN OUT VOID *ProcArguments OPTIONAL
926 )
927 {
928 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||
929 CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
930 !mSmmMpSyncData->CpuData[CpuIndex].Present ||
931 gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
932 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
933 return EFI_INVALID_PARAMETER;
934 }
935
936 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
937 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
938 ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
939
940 if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {
941 AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
942 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
943 }
944 return EFI_SUCCESS;
945 }
946
947 /**
948 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
949 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
950
951 NOTE: It might not be appreciated in runtime since it might
952 conflict with OS debugging facilities. Turn them off in RELEASE.
953
954 @param CpuIndex CPU Index
955
956 **/
957 VOID
958 EFIAPI
959 CpuSmmDebugEntry (
960 IN UINTN CpuIndex
961 )
962 {
963 SMRAM_SAVE_STATE_MAP *CpuSaveState;
964
965 if (FeaturePcdGet (PcdCpuSmmDebug)) {
966 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
967 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
968 AsmWriteDr6 (CpuSaveState->x86._DR6);
969 AsmWriteDr7 (CpuSaveState->x86._DR7);
970 } else {
971 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
972 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
973 }
974 }
975 }
976
977 /**
978 This function restores DR6 & DR7 to SMM save state.
979
980 NOTE: It might not be appreciated in runtime since it might
981 conflict with OS debugging facilities. Turn them off in RELEASE.
982
983 @param CpuIndex CPU Index
984
985 **/
986 VOID
987 EFIAPI
988 CpuSmmDebugExit (
989 IN UINTN CpuIndex
990 )
991 {
992 SMRAM_SAVE_STATE_MAP *CpuSaveState;
993
994 if (FeaturePcdGet (PcdCpuSmmDebug)) {
995 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
996 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
997 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
998 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
999 } else {
1000 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1001 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1002 }
1003 }
1004 }
1005
1006 /**
1007 C function for SMI entry, each processor comes here upon SMI trigger.
1008
1009 @param CpuIndex CPU Index
1010
1011 **/
1012 VOID
1013 EFIAPI
1014 SmiRendezvous (
1015 IN UINTN CpuIndex
1016 )
1017 {
1018 EFI_STATUS Status;
1019 BOOLEAN ValidSmi;
1020 BOOLEAN IsBsp;
1021 BOOLEAN BspInProgress;
1022 UINTN Index;
1023 UINTN Cr2;
1024 BOOLEAN XdDisableFlag;
1025 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
1026
1027 //
1028 // Save Cr2 because Page Fault exception in SMM may override its value
1029 //
1030 Cr2 = AsmReadCr2 ();
1031
1032 //
1033 // Perform CPU specific entry hooks
1034 //
1035 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1036
1037 //
1038 // Determine if this is a valid SMI
1039 //
1040 ValidSmi = PlatformValidSmi();
1041
1042 //
1043 // Determine if BSP has been already in progress. Note this must be checked after
1044 // ValidSmi because BSP may clear a valid SMI source after checking in.
1045 //
1046 BspInProgress = mSmmMpSyncData->InsideSmm;
1047
1048 if (!BspInProgress && !ValidSmi) {
1049 //
1050 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1051 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1052 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1053 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1054 // is nothing we need to do.
1055 //
1056 goto Exit;
1057 } else {
1058 //
1059 // Signal presence of this processor
1060 //
1061 if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {
1062 //
1063 // BSP has already ended the synchronization, so QUIT!!!
1064 //
1065
1066 //
1067 // Wait for BSP's signal to finish SMI
1068 //
1069 while (mSmmMpSyncData->AllCpusInSync) {
1070 CpuPause ();
1071 }
1072 goto Exit;
1073 } else {
1074
1075 //
1076 // The BUSY lock is initialized to Released state.
1077 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1078 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1079 // after AP's present flag is detected.
1080 //
1081 InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
1082 }
1083
1084 //
1085 // Try to enable XD
1086 //
1087 XdDisableFlag = FALSE;
1088 if (mXdSupported) {
1089 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1090 if (MiscEnableMsr.Bits.XD == 1) {
1091 XdDisableFlag = TRUE;
1092 MiscEnableMsr.Bits.XD = 0;
1093 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
1094 }
1095 ActivateXd ();
1096 }
1097
1098 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1099 ActivateSmmProfile (CpuIndex);
1100 }
1101
1102 if (BspInProgress) {
1103 //
1104 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1105 // as BSP may have cleared the SMI status
1106 //
1107 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1108 } else {
1109 //
1110 // We have a valid SMI
1111 //
1112
1113 //
1114 // Elect BSP
1115 //
1116 IsBsp = FALSE;
1117 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1118 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1119 //
1120 // Call platform hook to do BSP election
1121 //
1122 Status = PlatformSmmBspElection (&IsBsp);
1123 if (EFI_SUCCESS == Status) {
1124 //
1125 // Platform hook determines successfully
1126 //
1127 if (IsBsp) {
1128 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1129 }
1130 } else {
1131 //
1132 // Platform hook fails to determine, use default BSP election method
1133 //
1134 InterlockedCompareExchange32 (
1135 (UINT32*)&mSmmMpSyncData->BspIndex,
1136 (UINT32)-1,
1137 (UINT32)CpuIndex
1138 );
1139 }
1140 }
1141 }
1142
1143 //
1144 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1145 //
1146 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1147
1148 //
1149 // Clear last request for SwitchBsp.
1150 //
1151 if (mSmmMpSyncData->SwitchBsp) {
1152 mSmmMpSyncData->SwitchBsp = FALSE;
1153 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1154 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1155 }
1156 }
1157
1158 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1159 SmmProfileRecordSmiNum ();
1160 }
1161
1162 //
1163 // BSP Handler is always called with a ValidSmi == TRUE
1164 //
1165 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1166 } else {
1167 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1168 }
1169 }
1170
1171 ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
1172
1173 //
1174 // Wait for BSP's signal to exit SMI
1175 //
1176 while (mSmmMpSyncData->AllCpusInSync) {
1177 CpuPause ();
1178 }
1179
1180 //
1181 // Restore XD
1182 //
1183 if (XdDisableFlag) {
1184 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1185 MiscEnableMsr.Bits.XD = 1;
1186 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
1187 }
1188 }
1189
1190 Exit:
1191 SmmCpuFeaturesRendezvousExit (CpuIndex);
1192 //
1193 // Restore Cr2
1194 //
1195 AsmWriteCr2 (Cr2);
1196 }
1197
1198 /**
1199 Allocate buffer for all semaphores and spin locks.
1200
1201 **/
1202 VOID
1203 InitializeSmmCpuSemaphores (
1204 VOID
1205 )
1206 {
1207 UINTN ProcessorCount;
1208 UINTN TotalSize;
1209 UINTN GlobalSemaphoresSize;
1210 UINTN SemaphoreSize;
1211 UINTN Pages;
1212 UINTN *SemaphoreBlock;
1213 UINTN SemaphoreAddr;
1214
1215 SemaphoreSize = GetSpinLockProperties ();
1216 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1217 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1218 TotalSize = GlobalSemaphoresSize;
1219 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1220 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1221 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1222 SemaphoreBlock = AllocatePages (Pages);
1223 ASSERT (SemaphoreBlock != NULL);
1224 ZeroMem (SemaphoreBlock, TotalSize);
1225
1226 SemaphoreAddr = (UINTN)SemaphoreBlock;
1227 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
1228 SemaphoreAddr += SemaphoreSize;
1229 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1230 SemaphoreAddr += SemaphoreSize;
1231 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1232 SemaphoreAddr += SemaphoreSize;
1233 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1234 SemaphoreAddr += SemaphoreSize;
1235 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1236 = (SPIN_LOCK *)SemaphoreAddr;
1237
1238 mSemaphoreSize = SemaphoreSize;
1239 }
1240
1241 /**
1242 Initialize un-cacheable data.
1243
1244 **/
1245 VOID
1246 EFIAPI
1247 InitializeMpSyncData (
1248 VOID
1249 )
1250 {
1251 if (mSmmMpSyncData != NULL) {
1252 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1253 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1254 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1255 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1256 //
1257 // Enable BSP election by setting BspIndex to -1
1258 //
1259 mSmmMpSyncData->BspIndex = (UINT32)-1;
1260 }
1261 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
1262
1263 InitializeSmmCpuSemaphores ();
1264 }
1265 }
1266
1267 /**
1268 Initialize global data for MP synchronization.
1269
1270 @param Stacks Base address of SMI stack buffer for all processors.
1271 @param StackSize Stack size for each processor in SMM.
1272
1273 **/
1274 UINT32
1275 InitializeMpServiceData (
1276 IN VOID *Stacks,
1277 IN UINTN StackSize
1278 )
1279 {
1280 UINT32 Cr3;
1281 UINTN Index;
1282 MTRR_SETTINGS *Mtrr;
1283 PROCESSOR_SMM_DESCRIPTOR *Psd;
1284 UINT8 *GdtTssTables;
1285 UINTN GdtTableStepSize;
1286
1287 //
1288 // Initialize mSmmMpSyncData
1289 //
1290 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1291 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1292 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1293 ASSERT (mSmmMpSyncData != NULL);
1294 InitializeMpSyncData ();
1295
1296 //
1297 // Initialize physical address mask
1298 // NOTE: Physical memory above virtual address limit is not supported !!!
1299 //
1300 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
1301 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
1302 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
1303
1304 //
1305 // Create page tables
1306 //
1307 Cr3 = SmmInitPageTable ();
1308
1309 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1310
1311 //
1312 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1313 //
1314 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1315 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
1316 CopyMem (Psd, &gcPsd, sizeof (gcPsd));
1317 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
1318 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
1319
1320 //
1321 // Install SMI handler
1322 //
1323 InstallSmiHandler (
1324 Index,
1325 (UINT32)mCpuHotPlugData.SmBase[Index],
1326 (VOID*)((UINTN)Stacks + (StackSize * Index)),
1327 StackSize,
1328 (UINTN)Psd->SmmGdtPtr,
1329 Psd->SmmGdtSize,
1330 gcSmiIdtr.Base,
1331 gcSmiIdtr.Limit + 1,
1332 Cr3
1333 );
1334 }
1335
1336 //
1337 // Record current MTRR settings
1338 //
1339 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));
1340 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;
1341 MtrrGetAllMtrrs (Mtrr);
1342
1343 return Cr3;
1344 }
1345
1346 /**
1347
1348 Register the SMM Foundation entry point.
1349
1350 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1351 @param SmmEntryPoint SMM Foundation EntryPoint
1352
1353 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1354
1355 **/
1356 EFI_STATUS
1357 EFIAPI
1358 RegisterSmmEntry (
1359 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
1360 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1361 )
1362 {
1363 //
1364 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1365 //
1366 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
1367 return EFI_SUCCESS;
1368 }