2 SMM MP service implementation
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
18 // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
20 UINT64 gSmiMtrrs
[MTRR_NUMBER_OF_FIXED_MTRR
+ 2 * MTRR_NUMBER_OF_VARIABLE_MTRR
+ 1];
22 SMM_DISPATCHER_MP_SYNC_DATA
*mSmmMpSyncData
= NULL
;
23 UINTN mSmmMpSyncDataSize
;
24 SMM_CPU_SEMAPHORES mSmmCpuSemaphores
;
26 SPIN_LOCK
*mPFLock
= NULL
;
27 SMM_CPU_SYNC_MODE mCpuSmmSyncMode
;
30 Performs an atomic compare exchange operation to get semaphore.
31 The compare exchange operation must be performed using
34 @param Sem IN: 32-bit unsigned integer
35 OUT: original integer - 1
36 @return Original integer - 1
41 IN OUT
volatile UINT32
*Sem
48 } while (Value
== 0 ||
49 InterlockedCompareExchange32 (
59 Performs an atomic compare exchange operation to release semaphore.
60 The compare exchange operation must be performed using
63 @param Sem IN: 32-bit unsigned integer
64 OUT: original integer + 1
65 @return Original integer + 1
70 IN OUT
volatile UINT32
*Sem
77 } while (Value
+ 1 != 0 &&
78 InterlockedCompareExchange32 (
87 Performs an atomic compare exchange operation to lock semaphore.
88 The compare exchange operation must be performed using
91 @param Sem IN: 32-bit unsigned integer
93 @return Original integer
98 IN OUT
volatile UINT32
*Sem
105 } while (InterlockedCompareExchange32 (
113 Wait all APs to performs an atomic compare exchange operation to release semaphore.
115 @param NumberOfAPs AP number
125 BspIndex
= mSmmMpSyncData
->BspIndex
;
126 while (NumberOfAPs
-- > 0) {
127 WaitForSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
132 Performs an atomic compare exchange operation to release semaphore
144 BspIndex
= mSmmMpSyncData
->BspIndex
;
145 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
146 if (Index
!= BspIndex
&& *(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
147 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[Index
].Run
);
153 Checks if all CPUs (with certain exceptions) have checked in for this SMI run
155 @param Exceptions CPU Arrival exception flags.
157 @retval TRUE if all CPUs the have checked in.
158 @retval FALSE if at least one Normal AP hasn't checked in.
162 AllCpusInSmmWithExceptions (
163 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
167 SMM_CPU_DATA_BLOCK
*CpuData
;
168 EFI_PROCESSOR_INFORMATION
*ProcessorInfo
;
170 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
172 if (*mSmmMpSyncData
->Counter
== mNumberOfCpus
) {
176 CpuData
= mSmmMpSyncData
->CpuData
;
177 ProcessorInfo
= gSmmCpuPrivate
->ProcessorInfo
;
178 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
179 if (!(*(CpuData
[Index
].Present
)) && ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
180 if (((Exceptions
& ARRIVAL_EXCEPTION_DELAYED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmDelayed
) != 0) {
183 if (((Exceptions
& ARRIVAL_EXCEPTION_BLOCKED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmBlocked
) != 0) {
186 if (((Exceptions
& ARRIVAL_EXCEPTION_SMI_DISABLED
) != 0) && SmmCpuFeaturesGetSmmRegister (Index
, SmmRegSmmEnable
) != 0) {
199 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
200 entering SMM, except SMI disabled APs.
204 SmmWaitForApArrival (
211 ASSERT (*mSmmMpSyncData
->Counter
<= mNumberOfCpus
);
214 // Platform implementor should choose a timeout value appropriately:
215 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
216 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
217 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
218 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
219 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
220 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
221 // - The timeout value must be longer than longest possible IO operation in the system
225 // Sync with APs 1st timeout
227 for (Timer
= StartSyncTimer ();
228 !IsSyncTimerTimeout (Timer
) &&
229 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
235 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
237 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
238 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
239 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
240 // work while SMI handling is on-going.
241 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
242 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
243 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
244 // mode work while SMI handling is on-going.
245 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
246 // - In traditional flow, SMI disabling is discouraged.
247 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
248 // In both cases, adding SMI-disabling checking code increases overhead.
250 if (*mSmmMpSyncData
->Counter
< mNumberOfCpus
) {
252 // Send SMI IPIs to bring outside processors in
254 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
255 if (!(*(mSmmMpSyncData
->CpuData
[Index
].Present
)) && gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
!= INVALID_APIC_ID
) {
256 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
);
261 // Sync with APs 2nd timeout.
263 for (Timer
= StartSyncTimer ();
264 !IsSyncTimerTimeout (Timer
) &&
265 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED
| ARRIVAL_EXCEPTION_SMI_DISABLED
);
276 Replace OS MTRR's with SMI MTRR's.
278 @param CpuIndex Processor Index
286 PROCESSOR_SMM_DESCRIPTOR
*Psd
;
288 MTRR_SETTINGS
*BiosMtrr
;
290 Psd
= (PROCESSOR_SMM_DESCRIPTOR
*)(mCpuHotPlugData
.SmBase
[CpuIndex
] + SMM_PSD_OFFSET
);
291 SmiMtrrs
= (UINT64
*)(UINTN
)Psd
->MtrrBaseMaskPtr
;
293 SmmCpuFeaturesDisableSmrr ();
296 // Replace all MTRRs registers
298 BiosMtrr
= (MTRR_SETTINGS
*)SmiMtrrs
;
299 MtrrSetAllMtrrs(BiosMtrr
);
305 @param CpuIndex BSP processor Index
306 @param SyncMode SMM MP sync mode
312 IN SMM_CPU_SYNC_MODE SyncMode
318 BOOLEAN ClearTopLevelSmiResult
;
321 ASSERT (CpuIndex
== mSmmMpSyncData
->BspIndex
);
325 // Flag BSP's presence
327 *mSmmMpSyncData
->InsideSmm
= TRUE
;
330 // Initialize Debug Agent to start source level debug in BSP handler
332 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI
, NULL
, NULL
);
335 // Mark this processor's presence
337 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
340 // Clear platform top level SMI status bit before calling SMI handlers. If
341 // we cleared it after SMI handlers are run, we would miss the SMI that
342 // occurs after SMI handlers are done and before SMI status bit is cleared.
344 ClearTopLevelSmiResult
= ClearTopLevelSmiStatus();
345 ASSERT (ClearTopLevelSmiResult
== TRUE
);
348 // Set running processor index
350 gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
= CpuIndex
;
353 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
355 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
358 // Wait for APs to arrive
360 SmmWaitForApArrival();
363 // Lock the counter down and retrieve the number of APs
365 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
366 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
369 // Wait for all APs to get ready for programming MTRRs
371 WaitForAllAPs (ApCount
);
373 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
375 // Signal all APs it's time for backup MTRRs
380 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
381 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
382 // to a large enough value to avoid this situation.
383 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
384 // We do the backup first and then set MTRR to avoid race condition for threads
387 MtrrGetAllMtrrs(&Mtrrs
);
390 // Wait for all APs to complete their MTRR saving
392 WaitForAllAPs (ApCount
);
395 // Let all processors program SMM MTRRs together
400 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
401 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
402 // to a large enough value to avoid this situation.
404 ReplaceOSMtrrs (CpuIndex
);
407 // Wait for all APs to complete their MTRR programming
409 WaitForAllAPs (ApCount
);
414 // The BUSY lock is initialized to Acquired state
416 AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
419 // Perform the pre tasks
424 // Invoke SMM Foundation EntryPoint with the processor information context.
426 gSmmCpuPrivate
->SmmCoreEntry (&gSmmCpuPrivate
->SmmCoreEntryContext
);
429 // Make sure all APs have completed their pending none-block tasks
431 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
432 if (Index
!= CpuIndex
&& *(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
433 AcquireSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
434 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[Index
].Busy
);
439 // Perform the remaining tasks
441 PerformRemainingTasks ();
444 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
445 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
446 // will run through freely.
448 if (SyncMode
!= SmmCpuSyncModeTradition
&& !SmmCpuFeaturesNeedConfigureMtrrs()) {
451 // Lock the counter down and retrieve the number of APs
453 *mSmmMpSyncData
->AllCpusInSync
= TRUE
;
454 ApCount
= LockdownSemaphore (mSmmMpSyncData
->Counter
) - 1;
456 // Make sure all APs have their Present flag set
460 for (Index
= mMaxNumberOfCpus
; Index
-- > 0;) {
461 if (*(mSmmMpSyncData
->CpuData
[Index
].Present
)) {
465 if (PresentCount
> ApCount
) {
472 // Notify all APs to exit
474 *mSmmMpSyncData
->InsideSmm
= FALSE
;
478 // Wait for all APs to complete their pending tasks
480 WaitForAllAPs (ApCount
);
482 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
484 // Signal APs to restore MTRRs
491 SmmCpuFeaturesReenableSmrr ();
492 MtrrSetAllMtrrs(&Mtrrs
);
495 // Wait for all APs to complete MTRR programming
497 WaitForAllAPs (ApCount
);
501 // Stop source level debug in BSP handler, the code below will not be
504 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI
, NULL
, NULL
);
507 // Signal APs to Reset states/semaphore for this processor
512 // Perform pending operations for hot-plug
517 // Clear the Present flag of BSP
519 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
522 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
523 // WaitForAllAps does not depend on the Present flag.
525 WaitForAllAPs (ApCount
);
528 // Reset BspIndex to -1, meaning BSP has not been elected.
530 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
531 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
535 // Allow APs to check in from this point on
537 *mSmmMpSyncData
->Counter
= 0;
538 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
544 @param CpuIndex AP processor Index.
545 @param ValidSmi Indicates that current SMI is a valid SMI or not.
546 @param SyncMode SMM MP sync mode.
553 IN SMM_CPU_SYNC_MODE SyncMode
563 for (Timer
= StartSyncTimer ();
564 !IsSyncTimerTimeout (Timer
) &&
565 !(*mSmmMpSyncData
->InsideSmm
);
570 if (!(*mSmmMpSyncData
->InsideSmm
)) {
572 // BSP timeout in the first round
574 if (mSmmMpSyncData
->BspIndex
!= -1) {
576 // BSP Index is known
578 BspIndex
= mSmmMpSyncData
->BspIndex
;
579 ASSERT (CpuIndex
!= BspIndex
);
582 // Send SMI IPI to bring BSP in
584 SendSmiIpi ((UINT32
)gSmmCpuPrivate
->ProcessorInfo
[BspIndex
].ProcessorId
);
587 // Now clock BSP for the 2nd time
589 for (Timer
= StartSyncTimer ();
590 !IsSyncTimerTimeout (Timer
) &&
591 !(*mSmmMpSyncData
->InsideSmm
);
596 if (!(*mSmmMpSyncData
->InsideSmm
)) {
598 // Give up since BSP is unable to enter SMM
599 // and signal the completion of this AP
600 WaitForSemaphore (mSmmMpSyncData
->Counter
);
605 // Don't know BSP index. Give up without sending IPI to BSP.
607 WaitForSemaphore (mSmmMpSyncData
->Counter
);
615 BspIndex
= mSmmMpSyncData
->BspIndex
;
616 ASSERT (CpuIndex
!= BspIndex
);
619 // Mark this processor's presence
621 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = TRUE
;
623 if (SyncMode
== SmmCpuSyncModeTradition
|| SmmCpuFeaturesNeedConfigureMtrrs()) {
625 // Notify BSP of arrival at this point
627 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
630 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
632 // Wait for the signal from BSP to backup MTRRs
634 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
639 MtrrGetAllMtrrs(&Mtrrs
);
642 // Signal BSP the completion of this AP
644 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
647 // Wait for BSP's signal to program MTRRs
649 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
652 // Replace OS MTRRs with SMI MTRRs
654 ReplaceOSMtrrs (CpuIndex
);
657 // Signal BSP the completion of this AP
659 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
664 // Wait for something to happen
666 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
669 // Check if BSP wants to exit SMM
671 if (!(*mSmmMpSyncData
->InsideSmm
)) {
676 // BUSY should be acquired by SmmStartupThisAp()
679 !AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)
683 // Invoke the scheduled procedure
685 (*mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
) (
686 (VOID
*)mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
692 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
695 if (SmmCpuFeaturesNeedConfigureMtrrs()) {
697 // Notify BSP the readiness of this AP to program MTRRs
699 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
702 // Wait for the signal from BSP to program MTRRs
704 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
709 SmmCpuFeaturesReenableSmrr ();
710 MtrrSetAllMtrrs(&Mtrrs
);
714 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
716 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
719 // Wait for the signal from BSP to Reset states/semaphore for this processor
721 WaitForSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
724 // Reset states/semaphore for this processor
726 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
729 // Notify BSP the readiness of this AP to exit SMM
731 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[BspIndex
].Run
);
736 Create 4G PageTable in SMRAM.
738 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
739 @return PageTable Address
744 IN BOOLEAN Is32BitPageTable
752 UINTN High2MBoundary
;
762 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
764 // Add one more page for known good stack, then find the lower 2MB aligned address.
766 Low2MBoundary
= (mSmmStackArrayBase
+ EFI_PAGE_SIZE
) & ~(SIZE_2MB
-1);
768 // Add two more pages for known good stack and stack guard page,
769 // then find the lower 2MB aligned address.
771 High2MBoundary
= (mSmmStackArrayEnd
- mSmmStackSize
+ EFI_PAGE_SIZE
* 2) & ~(SIZE_2MB
-1);
772 PagesNeeded
= ((High2MBoundary
- Low2MBoundary
) / SIZE_2MB
) + 1;
775 // Allocate the page table
777 PageTable
= AllocatePageTableMemory (5 + PagesNeeded
);
778 ASSERT (PageTable
!= NULL
);
780 PageTable
= (VOID
*)((UINTN
)PageTable
);
781 Pte
= (UINT64
*)PageTable
;
784 // Zero out all page table entries first
786 ZeroMem (Pte
, EFI_PAGES_TO_SIZE (1));
789 // Set Page Directory Pointers
791 for (Index
= 0; Index
< 4; Index
++) {
792 Pte
[Index
] = (UINTN
)PageTable
+ EFI_PAGE_SIZE
* (Index
+ 1) + (Is32BitPageTable
? IA32_PAE_PDPTE_ATTRIBUTE_BITS
: PAGE_ATTRIBUTE_BITS
);
794 Pte
+= EFI_PAGE_SIZE
/ sizeof (*Pte
);
797 // Fill in Page Directory Entries
799 for (Index
= 0; Index
< EFI_PAGE_SIZE
* 4 / sizeof (*Pte
); Index
++) {
800 Pte
[Index
] = (Index
<< 21) | IA32_PG_PS
| PAGE_ATTRIBUTE_BITS
;
803 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
804 Pages
= (UINTN
)PageTable
+ EFI_PAGES_TO_SIZE (5);
805 GuardPage
= mSmmStackArrayBase
+ EFI_PAGE_SIZE
;
806 Pdpte
= (UINT64
*)PageTable
;
807 for (PageIndex
= Low2MBoundary
; PageIndex
<= High2MBoundary
; PageIndex
+= SIZE_2MB
) {
808 Pte
= (UINT64
*)(UINTN
)(Pdpte
[BitFieldRead32 ((UINT32
)PageIndex
, 30, 31)] & ~(EFI_PAGE_SIZE
- 1));
809 Pte
[BitFieldRead32 ((UINT32
)PageIndex
, 21, 29)] = (UINT64
)Pages
| PAGE_ATTRIBUTE_BITS
;
811 // Fill in Page Table Entries
813 Pte
= (UINT64
*)Pages
;
814 PageAddress
= PageIndex
;
815 for (Index
= 0; Index
< EFI_PAGE_SIZE
/ sizeof (*Pte
); Index
++) {
816 if (PageAddress
== GuardPage
) {
818 // Mark the guard page as non-present
820 Pte
[Index
] = PageAddress
;
821 GuardPage
+= mSmmStackSize
;
822 if (GuardPage
> mSmmStackArrayEnd
) {
826 Pte
[Index
] = PageAddress
| PAGE_ATTRIBUTE_BITS
;
828 PageAddress
+= EFI_PAGE_SIZE
;
830 Pages
+= EFI_PAGE_SIZE
;
834 return (UINT32
)(UINTN
)PageTable
;
838 Set memory cache ability.
840 @param PageTable PageTable Address
841 @param Address Memory Address to change cache ability
842 @param Cacheability Cache ability to set
847 IN UINT64
*PageTable
,
849 IN UINT8 Cacheability
853 VOID
*NewPageTableAddress
;
854 UINT64
*NewPageTable
;
857 ASSERT ((Address
& EFI_PAGE_MASK
) == 0);
859 if (sizeof (UINTN
) == sizeof (UINT64
)) {
860 PTIndex
= (UINTN
)RShiftU64 (Address
, 39) & 0x1ff;
861 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
862 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
865 PTIndex
= (UINTN
)RShiftU64 (Address
, 30) & 0x1ff;
866 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
867 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
870 // A perfect implementation should check the original cacheability with the
871 // one being set, and break a 2M page entry into pieces only when they
874 PTIndex
= (UINTN
)RShiftU64 (Address
, 21) & 0x1ff;
875 if ((PageTable
[PTIndex
] & IA32_PG_PS
) != 0) {
877 // Allocate a page from SMRAM
879 NewPageTableAddress
= AllocatePageTableMemory (1);
880 ASSERT (NewPageTableAddress
!= NULL
);
882 NewPageTable
= (UINT64
*)NewPageTableAddress
;
884 for (Index
= 0; Index
< 0x200; Index
++) {
885 NewPageTable
[Index
] = PageTable
[PTIndex
];
886 if ((NewPageTable
[Index
] & IA32_PG_PAT_2M
) != 0) {
887 NewPageTable
[Index
] &= ~((UINT64
)IA32_PG_PAT_2M
);
888 NewPageTable
[Index
] |= (UINT64
)IA32_PG_PAT_4K
;
890 NewPageTable
[Index
] |= (UINT64
)(Index
<< EFI_PAGE_SHIFT
);
893 PageTable
[PTIndex
] = ((UINTN
)NewPageTableAddress
& gPhyMask
) | PAGE_ATTRIBUTE_BITS
;
896 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
897 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & gPhyMask
);
899 PTIndex
= (UINTN
)RShiftU64 (Address
, 12) & 0x1ff;
900 ASSERT (PageTable
[PTIndex
] & IA32_PG_P
);
901 PageTable
[PTIndex
] &= ~((UINT64
)((IA32_PG_PAT_4K
| IA32_PG_CD
| IA32_PG_WT
)));
902 PageTable
[PTIndex
] |= (UINT64
)Cacheability
;
906 Schedule a procedure to run on the specified CPU.
908 @param[in] Procedure The address of the procedure to run
909 @param[in] CpuIndex Target CPU Index
910 @param[in, OUT] ProcArguments The parameter to pass to the procedure
911 @param[in] BlockingMode Startup AP in blocking mode or not
913 @retval EFI_INVALID_PARAMETER CpuNumber not valid
914 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
915 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
916 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
917 @retval EFI_SUCCESS The procedure has been successfully scheduled
921 InternalSmmStartupThisAp (
922 IN EFI_AP_PROCEDURE Procedure
,
924 IN OUT VOID
*ProcArguments OPTIONAL
,
925 IN BOOLEAN BlockingMode
928 if (CpuIndex
>= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
) {
929 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex
, gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
));
930 return EFI_INVALID_PARAMETER
;
932 if (CpuIndex
== gSmmCpuPrivate
->SmmCoreEntryContext
.CurrentlyExecutingCpu
) {
933 DEBUG((DEBUG_ERROR
, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex
));
934 return EFI_INVALID_PARAMETER
;
936 if (!(*(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
))) {
937 if (mSmmMpSyncData
->EffectiveSyncMode
== SmmCpuSyncModeTradition
) {
938 DEBUG((DEBUG_ERROR
, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex
));
940 return EFI_INVALID_PARAMETER
;
942 if (gSmmCpuPrivate
->Operation
[CpuIndex
] == SmmCpuRemove
) {
943 if (!FeaturePcdGet (PcdCpuHotPlugSupport
)) {
944 DEBUG((DEBUG_ERROR
, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex
));
946 return EFI_INVALID_PARAMETER
;
950 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
952 if (!AcquireSpinLockOrFail (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
)) {
953 DEBUG((DEBUG_ERROR
, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex
));
954 return EFI_INVALID_PARAMETER
;
958 mSmmMpSyncData
->CpuData
[CpuIndex
].Procedure
= Procedure
;
959 mSmmMpSyncData
->CpuData
[CpuIndex
].Parameter
= ProcArguments
;
960 ReleaseSemaphore (mSmmMpSyncData
->CpuData
[CpuIndex
].Run
);
963 AcquireSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
964 ReleaseSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
970 Schedule a procedure to run on the specified CPU in blocking mode.
972 @param[in] Procedure The address of the procedure to run
973 @param[in] CpuIndex Target CPU Index
974 @param[in, out] ProcArguments The parameter to pass to the procedure
976 @retval EFI_INVALID_PARAMETER CpuNumber not valid
977 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
978 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
979 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
980 @retval EFI_SUCCESS The procedure has been successfully scheduled
985 SmmBlockingStartupThisAp (
986 IN EFI_AP_PROCEDURE Procedure
,
988 IN OUT VOID
*ProcArguments OPTIONAL
991 return InternalSmmStartupThisAp(Procedure
, CpuIndex
, ProcArguments
, TRUE
);
995 Schedule a procedure to run on the specified CPU.
997 @param Procedure The address of the procedure to run
998 @param CpuIndex Target CPU Index
999 @param ProcArguments The parameter to pass to the procedure
1001 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1002 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1003 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1004 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1005 @retval EFI_SUCCESS The procedure has been successfully scheduled
1011 IN EFI_AP_PROCEDURE Procedure
,
1013 IN OUT VOID
*ProcArguments OPTIONAL
1016 return InternalSmmStartupThisAp(Procedure
, CpuIndex
, ProcArguments
, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp
));
1020 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1021 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1023 NOTE: It might not be appreciated in runtime since it might
1024 conflict with OS debugging facilities. Turn them off in RELEASE.
1026 @param CpuIndex CPU Index
1035 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1037 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1038 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1039 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1040 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1041 AsmWriteDr6 (CpuSaveState
->x86
._DR6
);
1042 AsmWriteDr7 (CpuSaveState
->x86
._DR7
);
1044 AsmWriteDr6 ((UINTN
)CpuSaveState
->x64
._DR6
);
1045 AsmWriteDr7 ((UINTN
)CpuSaveState
->x64
._DR7
);
1051 This function restores DR6 & DR7 to SMM save state.
1053 NOTE: It might not be appreciated in runtime since it might
1054 conflict with OS debugging facilities. Turn them off in RELEASE.
1056 @param CpuIndex CPU Index
1065 SMRAM_SAVE_STATE_MAP
*CpuSaveState
;
1067 if (FeaturePcdGet (PcdCpuSmmDebug
)) {
1068 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1069 CpuSaveState
= (SMRAM_SAVE_STATE_MAP
*)gSmmCpuPrivate
->CpuSaveState
[CpuIndex
];
1070 if (mSmmSaveStateRegisterLma
== EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT
) {
1071 CpuSaveState
->x86
._DR7
= (UINT32
)AsmReadDr7 ();
1072 CpuSaveState
->x86
._DR6
= (UINT32
)AsmReadDr6 ();
1074 CpuSaveState
->x64
._DR7
= AsmReadDr7 ();
1075 CpuSaveState
->x64
._DR6
= AsmReadDr6 ();
1081 C function for SMI entry, each processor comes here upon SMI trigger.
1083 @param CpuIndex CPU Index
1095 BOOLEAN BspInProgress
;
1099 ASSERT(CpuIndex
< mMaxNumberOfCpus
);
1102 // Save Cr2 because Page Fault exception in SMM may override its value
1104 Cr2
= AsmReadCr2 ();
1107 // Perform CPU specific entry hooks
1109 SmmCpuFeaturesRendezvousEntry (CpuIndex
);
1112 // Determine if this is a valid SMI
1114 ValidSmi
= PlatformValidSmi();
1117 // Determine if BSP has been already in progress. Note this must be checked after
1118 // ValidSmi because BSP may clear a valid SMI source after checking in.
1120 BspInProgress
= *mSmmMpSyncData
->InsideSmm
;
1122 if (!BspInProgress
&& !ValidSmi
) {
1124 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1125 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1126 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1127 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1128 // is nothing we need to do.
1133 // Signal presence of this processor
1135 if (ReleaseSemaphore (mSmmMpSyncData
->Counter
) == 0) {
1137 // BSP has already ended the synchronization, so QUIT!!!
1141 // Wait for BSP's signal to finish SMI
1143 while (*mSmmMpSyncData
->AllCpusInSync
) {
1150 // The BUSY lock is initialized to Released state.
1151 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1152 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1153 // after AP's present flag is detected.
1155 InitializeSpinLock (mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
);
1158 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1159 ActivateSmmProfile (CpuIndex
);
1162 if (BspInProgress
) {
1164 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1165 // as BSP may have cleared the SMI status
1167 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1170 // We have a valid SMI
1177 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1178 if (!mSmmMpSyncData
->SwitchBsp
|| mSmmMpSyncData
->CandidateBsp
[CpuIndex
]) {
1180 // Call platform hook to do BSP election
1182 Status
= PlatformSmmBspElection (&IsBsp
);
1183 if (EFI_SUCCESS
== Status
) {
1185 // Platform hook determines successfully
1188 mSmmMpSyncData
->BspIndex
= (UINT32
)CpuIndex
;
1192 // Platform hook fails to determine, use default BSP election method
1194 InterlockedCompareExchange32 (
1195 (UINT32
*)&mSmmMpSyncData
->BspIndex
,
1204 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1206 if (mSmmMpSyncData
->BspIndex
== CpuIndex
) {
1209 // Clear last request for SwitchBsp.
1211 if (mSmmMpSyncData
->SwitchBsp
) {
1212 mSmmMpSyncData
->SwitchBsp
= FALSE
;
1213 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1214 mSmmMpSyncData
->CandidateBsp
[Index
] = FALSE
;
1218 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1219 SmmProfileRecordSmiNum ();
1223 // BSP Handler is always called with a ValidSmi == TRUE
1225 BSPHandler (CpuIndex
, mSmmMpSyncData
->EffectiveSyncMode
);
1227 APHandler (CpuIndex
, ValidSmi
, mSmmMpSyncData
->EffectiveSyncMode
);
1231 ASSERT (*mSmmMpSyncData
->CpuData
[CpuIndex
].Run
== 0);
1234 // Wait for BSP's signal to exit SMI
1236 while (*mSmmMpSyncData
->AllCpusInSync
) {
1242 SmmCpuFeaturesRendezvousExit (CpuIndex
);
1250 Allocate buffer for all semaphores and spin locks.
1254 InitializeSmmCpuSemaphores (
1258 UINTN ProcessorCount
;
1260 UINTN GlobalSemaphoresSize
;
1261 UINTN CpuSemaphoresSize
;
1262 UINTN MsrSemahporeSize
;
1263 UINTN SemaphoreSize
;
1265 UINTN
*SemaphoreBlock
;
1266 UINTN SemaphoreAddr
;
1268 SemaphoreSize
= GetSpinLockProperties ();
1269 ProcessorCount
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1270 GlobalSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_GLOBAL
) / sizeof (VOID
*)) * SemaphoreSize
;
1271 CpuSemaphoresSize
= (sizeof (SMM_CPU_SEMAPHORE_CPU
) / sizeof (VOID
*)) * ProcessorCount
* SemaphoreSize
;
1272 MsrSemahporeSize
= MSR_SPIN_LOCK_INIT_NUM
* SemaphoreSize
;
1273 TotalSize
= GlobalSemaphoresSize
+ CpuSemaphoresSize
+ MsrSemahporeSize
;
1274 DEBUG((EFI_D_INFO
, "One Semaphore Size = 0x%x\n", SemaphoreSize
));
1275 DEBUG((EFI_D_INFO
, "Total Semaphores Size = 0x%x\n", TotalSize
));
1276 Pages
= EFI_SIZE_TO_PAGES (TotalSize
);
1277 SemaphoreBlock
= AllocatePages (Pages
);
1278 ASSERT (SemaphoreBlock
!= NULL
);
1279 ZeroMem (SemaphoreBlock
, TotalSize
);
1281 SemaphoreAddr
= (UINTN
)SemaphoreBlock
;
1282 mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
= (UINT32
*)SemaphoreAddr
;
1283 SemaphoreAddr
+= SemaphoreSize
;
1284 mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
= (BOOLEAN
*)SemaphoreAddr
;
1285 SemaphoreAddr
+= SemaphoreSize
;
1286 mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
= (BOOLEAN
*)SemaphoreAddr
;
1287 SemaphoreAddr
+= SemaphoreSize
;
1288 mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
= (SPIN_LOCK
*)SemaphoreAddr
;
1289 SemaphoreAddr
+= SemaphoreSize
;
1290 mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
1291 = (SPIN_LOCK
*)SemaphoreAddr
;
1292 SemaphoreAddr
+= SemaphoreSize
;
1293 mSmmCpuSemaphores
.SemaphoreGlobal
.MemoryMappedLock
1294 = (SPIN_LOCK
*)SemaphoreAddr
;
1296 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
;
1297 mSmmCpuSemaphores
.SemaphoreCpu
.Busy
= (SPIN_LOCK
*)SemaphoreAddr
;
1298 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1299 mSmmCpuSemaphores
.SemaphoreCpu
.Run
= (UINT32
*)SemaphoreAddr
;
1300 SemaphoreAddr
+= ProcessorCount
* SemaphoreSize
;
1301 mSmmCpuSemaphores
.SemaphoreCpu
.Present
= (BOOLEAN
*)SemaphoreAddr
;
1303 SemaphoreAddr
= (UINTN
)SemaphoreBlock
+ GlobalSemaphoresSize
+ CpuSemaphoresSize
;
1304 mSmmCpuSemaphores
.SemaphoreMsr
.Msr
= (SPIN_LOCK
*)SemaphoreAddr
;
1305 mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
=
1306 ((UINTN
)SemaphoreBlock
+ Pages
* SIZE_4KB
- SemaphoreAddr
) / SemaphoreSize
;
1307 ASSERT (mSmmCpuSemaphores
.SemaphoreMsr
.AvailableCounter
>= MSR_SPIN_LOCK_INIT_NUM
);
1309 mPFLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.PFLock
;
1310 mConfigSmmCodeAccessCheckLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.CodeAccessCheckLock
;
1311 mMemoryMappedLock
= mSmmCpuSemaphores
.SemaphoreGlobal
.MemoryMappedLock
;
1313 mSemaphoreSize
= SemaphoreSize
;
1317 Initialize un-cacheable data.
1322 InitializeMpSyncData (
1328 if (mSmmMpSyncData
!= NULL
) {
1330 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1331 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1333 ZeroMem (mSmmMpSyncData
, mSmmMpSyncDataSize
);
1334 mSmmMpSyncData
->CpuData
= (SMM_CPU_DATA_BLOCK
*)((UINT8
*)mSmmMpSyncData
+ sizeof (SMM_DISPATCHER_MP_SYNC_DATA
));
1335 mSmmMpSyncData
->CandidateBsp
= (BOOLEAN
*)(mSmmMpSyncData
->CpuData
+ gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
);
1336 if (FeaturePcdGet (PcdCpuSmmEnableBspElection
)) {
1338 // Enable BSP election by setting BspIndex to -1
1340 mSmmMpSyncData
->BspIndex
= (UINT32
)-1;
1342 mSmmMpSyncData
->EffectiveSyncMode
= mCpuSmmSyncMode
;
1344 mSmmMpSyncData
->Counter
= mSmmCpuSemaphores
.SemaphoreGlobal
.Counter
;
1345 mSmmMpSyncData
->InsideSmm
= mSmmCpuSemaphores
.SemaphoreGlobal
.InsideSmm
;
1346 mSmmMpSyncData
->AllCpusInSync
= mSmmCpuSemaphores
.SemaphoreGlobal
.AllCpusInSync
;
1347 ASSERT (mSmmMpSyncData
->Counter
!= NULL
&& mSmmMpSyncData
->InsideSmm
!= NULL
&&
1348 mSmmMpSyncData
->AllCpusInSync
!= NULL
);
1349 *mSmmMpSyncData
->Counter
= 0;
1350 *mSmmMpSyncData
->InsideSmm
= FALSE
;
1351 *mSmmMpSyncData
->AllCpusInSync
= FALSE
;
1353 for (CpuIndex
= 0; CpuIndex
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; CpuIndex
++) {
1354 mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
=
1355 (SPIN_LOCK
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Busy
+ mSemaphoreSize
* CpuIndex
);
1356 mSmmMpSyncData
->CpuData
[CpuIndex
].Run
=
1357 (UINT32
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Run
+ mSemaphoreSize
* CpuIndex
);
1358 mSmmMpSyncData
->CpuData
[CpuIndex
].Present
=
1359 (BOOLEAN
*)((UINTN
)mSmmCpuSemaphores
.SemaphoreCpu
.Present
+ mSemaphoreSize
* CpuIndex
);
1360 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Busy
) = 0;
1361 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Run
) = 0;
1362 *(mSmmMpSyncData
->CpuData
[CpuIndex
].Present
) = FALSE
;
1368 Initialize global data for MP synchronization.
1370 @param Stacks Base address of SMI stack buffer for all processors.
1371 @param StackSize Stack size for each processor in SMM.
1375 InitializeMpServiceData (
1382 MTRR_SETTINGS
*Mtrr
;
1383 PROCESSOR_SMM_DESCRIPTOR
*Psd
;
1384 UINT8
*GdtTssTables
;
1385 UINTN GdtTableStepSize
;
1388 // Allocate memory for all locks and semaphores
1390 InitializeSmmCpuSemaphores ();
1393 // Initialize mSmmMpSyncData
1395 mSmmMpSyncDataSize
= sizeof (SMM_DISPATCHER_MP_SYNC_DATA
) +
1396 (sizeof (SMM_CPU_DATA_BLOCK
) + sizeof (BOOLEAN
)) * gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
1397 mSmmMpSyncData
= (SMM_DISPATCHER_MP_SYNC_DATA
*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize
));
1398 ASSERT (mSmmMpSyncData
!= NULL
);
1399 mCpuSmmSyncMode
= (SMM_CPU_SYNC_MODE
)PcdGet8 (PcdCpuSmmSyncMode
);
1400 InitializeMpSyncData ();
1403 // Initialize physical address mask
1404 // NOTE: Physical memory above virtual address limit is not supported !!!
1406 AsmCpuid (0x80000008, (UINT32
*)&Index
, NULL
, NULL
, NULL
);
1407 gPhyMask
= LShiftU64 (1, (UINT8
)Index
) - 1;
1408 gPhyMask
&= (1ull << 48) - EFI_PAGE_SIZE
;
1411 // Create page tables
1413 Cr3
= SmmInitPageTable ();
1415 GdtTssTables
= InitGdt (Cr3
, &GdtTableStepSize
);
1418 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
1420 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
1421 Psd
= (PROCESSOR_SMM_DESCRIPTOR
*)(VOID
*)(UINTN
)(mCpuHotPlugData
.SmBase
[Index
] + SMM_PSD_OFFSET
);
1422 CopyMem (Psd
, &gcPsd
, sizeof (gcPsd
));
1423 Psd
->SmmGdtPtr
= (UINT64
)(UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
);
1424 Psd
->SmmGdtSize
= gcSmiGdtr
.Limit
+ 1;
1427 // Install SMI handler
1431 (UINT32
)mCpuHotPlugData
.SmBase
[Index
],
1432 (VOID
*)((UINTN
)Stacks
+ (StackSize
* Index
)),
1434 (UINTN
)Psd
->SmmGdtPtr
,
1437 gcSmiIdtr
.Limit
+ 1,
1443 // Record current MTRR settings
1445 ZeroMem(gSmiMtrrs
, sizeof (gSmiMtrrs
));
1446 Mtrr
= (MTRR_SETTINGS
*)gSmiMtrrs
;
1447 MtrrGetAllMtrrs (Mtrr
);
1454 Register the SMM Foundation entry point.
1456 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
1457 @param SmmEntryPoint SMM Foundation EntryPoint
1459 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
1465 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL
*This
,
1466 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
1470 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
1472 gSmmCpuPrivate
->SmmCoreEntry
= SmmEntryPoint
;