4 Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
18 #include "SmmProfileInternal.h"
20 UINT32 mSmmProfileCr3
;
22 SMM_PROFILE_HEADER
*mSmmProfileBase
;
23 MSR_DS_AREA_STRUCT
*mMsrDsAreaBase
;
25 // The buffer to store SMM profile data.
27 UINTN mSmmProfileSize
;
30 // The buffer to enable branch trace store.
32 UINTN mMsrDsAreaSize
= SMM_PROFILE_DTS_SIZE
;
35 // The flag indicates if execute-disable is enabled on processor.
37 BOOLEAN mXdEnabled
= FALSE
;
40 // The flag indicates if BTS is supported by processor.
42 BOOLEAN mBtsSupported
= TRUE
;
45 // The flag indicates if SMM profile starts to record data.
47 BOOLEAN mSmmProfileStart
= FALSE
;
50 // Record the page fault exception count for one instruction execution.
54 UINT64 (*mLastPFEntryValue
)[MAX_PF_ENTRY_COUNT
];
55 UINT64
*(*mLastPFEntryPointer
)[MAX_PF_ENTRY_COUNT
];
57 MSR_DS_AREA_STRUCT
**mMsrDsArea
;
58 BRANCH_TRACE_RECORD
**mMsrBTSRecord
;
59 UINTN mBTSRecordNumber
;
60 PEBS_RECORD
**mMsrPEBSRecord
;
63 // These memory ranges are always present, they does not generate the access type of page fault exception,
64 // but they possibly generate instruction fetch type of page fault exception.
66 MEMORY_PROTECTION_RANGE
*mProtectionMemRange
= NULL
;
67 UINTN mProtectionMemRangeCount
= 0;
70 // Some predefined memory ranges.
72 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate
[] = {
74 // SMRAM range (to be fixed in runtime).
75 // It is always present and instruction fetches are allowed.
77 {{0x00000000, 0x00000000},TRUE
,FALSE
},
80 // SMM profile data range( to be fixed in runtime).
81 // It is always present and instruction fetches are not allowed.
83 {{0x00000000, 0x00000000},TRUE
,TRUE
},
86 // Future extended range could be added here.
90 // PCI MMIO ranges (to be added in runtime).
91 // They are always present and instruction fetches are not allowed.
96 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
98 MEMORY_RANGE
*mSplitMemRange
= NULL
;
99 UINTN mSplitMemRangeCount
= 0;
104 UINT32 mSmiCommandPort
;
107 Disable branch trace store.
115 AsmMsrAnd64 (MSR_DEBUG_CTL
, ~((UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
)));
119 Enable branch trace store.
127 AsmMsrOr64 (MSR_DEBUG_CTL
, (MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
));
131 Get CPU Index from APIC ID.
142 ApicId
= GetApicId ();
144 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
145 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== ApicId
) {
154 Get the source of IP after execute-disable exception is triggered.
156 @param CpuIndex The index of CPU.
157 @param DestinationIP The destination address.
161 GetSourceFromDestinationOnBts (
166 BRANCH_TRACE_RECORD
*CurrentBTSRecord
;
172 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)mMsrDsArea
[CpuIndex
]->BTSIndex
;
173 for (Index
= 0; Index
< mBTSRecordNumber
; Index
++) {
174 if ((UINTN
)CurrentBTSRecord
< (UINTN
)mMsrBTSRecord
[CpuIndex
]) {
178 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[CpuIndex
]->BTSAbsoluteMaximum
- 1);
181 if (CurrentBTSRecord
->LastBranchTo
== DestinationIP
) {
183 // Good! find 1st one, then find 2nd one.
187 // The first one is DEBUG exception
192 // Good find proper one.
194 return CurrentBTSRecord
->LastBranchFrom
;
204 SMM profile specific INT 1 (single-step) exception handler.
206 @param InterruptType Defines the type of interrupt or exception that
207 occurred on the processor.This parameter is processor architecture specific.
208 @param SystemContext A pointer to the processor context when
209 the interrupt occurred on the processor.
213 DebugExceptionHandler (
214 IN EFI_EXCEPTION_TYPE InterruptType
,
215 IN EFI_SYSTEM_CONTEXT SystemContext
221 if (!mSmmProfileStart
) {
224 CpuIndex
= GetCpuIndex ();
227 // Clear last PF entries
229 for (PFEntry
= 0; PFEntry
< mPFEntryCount
[CpuIndex
]; PFEntry
++) {
230 *mLastPFEntryPointer
[CpuIndex
][PFEntry
] = mLastPFEntryValue
[CpuIndex
][PFEntry
];
234 // Reset page fault exception count for next page fault.
236 mPFEntryCount
[CpuIndex
] = 0;
244 // Clear TF in EFLAGS
246 ClearTrapFlag (SystemContext
);
250 Check if the memory address will be mapped by 4KB-page.
252 @param Address The address of Memory.
253 @param Nx The flag indicates if the memory is execute-disable.
258 IN EFI_PHYSICAL_ADDRESS Address
,
265 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
267 // Check configuration
269 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
270 if ((Address
>= mProtectionMemRange
[Index
].Range
.Base
) && (Address
< mProtectionMemRange
[Index
].Range
.Top
)) {
271 *Nx
= mProtectionMemRange
[Index
].Nx
;
272 return mProtectionMemRange
[Index
].Present
;
279 if ((Address
< mCpuHotPlugData
.SmrrBase
) ||
280 (Address
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
288 Check if the memory address will be mapped by 4KB-page.
290 @param Address The address of Memory.
295 IN EFI_PHYSICAL_ADDRESS Address
300 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
302 // Check configuration
304 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
305 if ((Address
>= mSplitMemRange
[Index
].Base
) && (Address
< mSplitMemRange
[Index
].Top
)) {
310 if (Address
< mCpuHotPlugData
.SmrrBase
) {
311 if ((mCpuHotPlugData
.SmrrBase
- Address
) < BASE_2MB
) {
314 } else if (Address
> (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) {
315 if ((Address
- (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) < BASE_2MB
) {
327 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
331 InitProtectedMemRange (
336 UINTN NumberOfDescriptors
;
337 UINTN NumberOfMmioDescriptors
;
338 UINTN NumberOfProtectRange
;
339 UINTN NumberOfSpliteRange
;
340 EFI_GCD_MEMORY_SPACE_DESCRIPTOR
*MemorySpaceMap
;
342 EFI_PHYSICAL_ADDRESS ProtectBaseAddress
;
343 EFI_PHYSICAL_ADDRESS ProtectEndAddress
;
344 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress
;
345 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress
;
346 UINT64 High4KBPageSize
;
347 UINT64 Low4KBPageSize
;
349 NumberOfDescriptors
= 0;
350 NumberOfMmioDescriptors
= 0;
351 NumberOfSpliteRange
= 0;
352 MemorySpaceMap
= NULL
;
355 // Get MMIO ranges from GCD and add them into protected memory ranges.
357 gDS
->GetMemorySpaceMap (
358 &NumberOfDescriptors
,
361 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
362 if (MemorySpaceMap
[Index
].GcdMemoryType
== EfiGcdMemoryTypeMemoryMappedIo
) {
363 NumberOfMmioDescriptors
++;
367 if (NumberOfMmioDescriptors
!= 0) {
368 TotalSize
= NumberOfMmioDescriptors
* sizeof (MEMORY_PROTECTION_RANGE
) + sizeof (mProtectionMemRangeTemplate
);
369 mProtectionMemRange
= (MEMORY_PROTECTION_RANGE
*) AllocateZeroPool (TotalSize
);
370 ASSERT (mProtectionMemRange
!= NULL
);
371 mProtectionMemRangeCount
= TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
);
374 // Copy existing ranges.
376 CopyMem (mProtectionMemRange
, mProtectionMemRangeTemplate
, sizeof (mProtectionMemRangeTemplate
));
379 // Create split ranges which come from protected ranges.
381 TotalSize
= (TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
)) * sizeof (MEMORY_RANGE
);
382 mSplitMemRange
= (MEMORY_RANGE
*) AllocateZeroPool (TotalSize
);
383 ASSERT (mSplitMemRange
!= NULL
);
386 // Create MMIO ranges which are set to present and execution-disable.
388 NumberOfProtectRange
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
389 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
390 if (MemorySpaceMap
[Index
].GcdMemoryType
!= EfiGcdMemoryTypeMemoryMappedIo
) {
393 mProtectionMemRange
[NumberOfProtectRange
].Range
.Base
= MemorySpaceMap
[Index
].BaseAddress
;
394 mProtectionMemRange
[NumberOfProtectRange
].Range
.Top
= MemorySpaceMap
[Index
].BaseAddress
+ MemorySpaceMap
[Index
].Length
;
395 mProtectionMemRange
[NumberOfProtectRange
].Present
= TRUE
;
396 mProtectionMemRange
[NumberOfProtectRange
].Nx
= TRUE
;
397 NumberOfProtectRange
++;
402 // According to protected ranges, create the ranges which will be mapped by 2KB page.
404 NumberOfSpliteRange
= 0;
405 NumberOfProtectRange
= mProtectionMemRangeCount
;
406 for (Index
= 0; Index
< NumberOfProtectRange
; Index
++) {
408 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
410 ProtectBaseAddress
= mProtectionMemRange
[Index
].Range
.Base
;
411 ProtectEndAddress
= mProtectionMemRange
[Index
].Range
.Top
;
412 if (((ProtectBaseAddress
& (SIZE_2MB
- 1)) != 0) || ((ProtectEndAddress
& (SIZE_2MB
- 1)) != 0)) {
414 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
415 // A mix of 4KB and 2MB page could save SMRAM space.
417 Top2MBAlignedAddress
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
418 Base2MBAlignedAddress
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
419 if ((Top2MBAlignedAddress
> Base2MBAlignedAddress
) &&
420 ((Top2MBAlignedAddress
- Base2MBAlignedAddress
) >= SIZE_2MB
)) {
422 // There is an range which could be mapped by 2MB-page.
424 High4KBPageSize
= ((ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectEndAddress
& ~(SIZE_2MB
- 1));
425 Low4KBPageSize
= ((ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectBaseAddress
& ~(SIZE_2MB
- 1));
426 if (High4KBPageSize
!= 0) {
428 // Add not 2MB-aligned range to be mapped by 4KB-page.
430 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
431 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
432 NumberOfSpliteRange
++;
434 if (Low4KBPageSize
!= 0) {
436 // Add not 2MB-aligned range to be mapped by 4KB-page.
438 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
439 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
440 NumberOfSpliteRange
++;
444 // The range could only be mapped by 4KB-page.
446 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
447 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
448 NumberOfSpliteRange
++;
453 mSplitMemRangeCount
= NumberOfSpliteRange
;
455 DEBUG ((EFI_D_INFO
, "SMM Profile Memory Ranges:\n"));
456 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
457 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Base = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Base
));
458 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Top = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Top
));
460 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
461 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Base = %lx\n", Index
, mSplitMemRange
[Index
].Base
));
462 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Top = %lx\n", Index
, mSplitMemRange
[Index
].Top
));
467 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
484 UINTN NumberOfPdpEntries
;
485 UINTN NumberOfPml4Entries
;
486 UINTN SizeOfMemorySpace
;
489 if (sizeof (UINTN
) == sizeof (UINT64
)) {
490 Pml4
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
491 SizeOfMemorySpace
= HighBitSet64 (gPhyMask
) + 1;
493 // Calculate the table entries of PML4E and PDPTE.
495 if (SizeOfMemorySpace
<= 39 ) {
496 NumberOfPml4Entries
= 1;
497 NumberOfPdpEntries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 30));
499 NumberOfPml4Entries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 39));
500 NumberOfPdpEntries
= 512;
503 NumberOfPml4Entries
= 1;
504 NumberOfPdpEntries
= 4;
508 // Go through page table and change 2MB-page into 4KB-page.
510 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
511 if (sizeof (UINTN
) == sizeof (UINT64
)) {
512 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
514 // If Pml4 entry does not exist, skip it
518 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
520 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
522 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
523 if ((*Pde
& IA32_PG_P
) == 0) {
525 // If PDE entry does not exist, skip it
529 if ((*Pde
& IA32_PG_PS
) != 0) {
531 // This is 1G entry, skip it
535 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
539 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
540 if ((*Pte
& IA32_PG_P
) == 0) {
542 // If PTE entry does not exist, skip it
546 Address
= (((Level2
<< 9) + Level3
) << 21);
549 // If it is 2M page, check IsAddressSplit()
551 if (((*Pte
& IA32_PG_PS
) != 0) && IsAddressSplit (Address
)) {
553 // Based on current page table, create 4KB page table for split area.
555 ASSERT (Address
== (*Pte
& PHYSICAL_ADDRESS_MASK
));
557 Pt
= AllocatePageTableMemory (1);
561 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++) {
562 Pt
[Level4
] = Address
+ ((Level4
<< 12) | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
564 *Pte
= (UINT64
)(UINTN
)Pt
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
565 } // end if IsAddressSplit
571 // Go through page table and set several page table entries to absent or execute-disable.
573 DEBUG ((EFI_D_INFO
, "Patch page table start ...\n"));
574 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
575 if (sizeof (UINTN
) == sizeof (UINT64
)) {
576 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
578 // If Pml4 entry does not exist, skip it
582 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
584 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
586 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
587 if ((*Pde
& IA32_PG_P
) == 0) {
589 // If PDE entry does not exist, skip it
593 if ((*Pde
& IA32_PG_PS
) != 0) {
595 // This is 1G entry, set NX bit and skip it
598 *Pde
= *Pde
| IA32_PG_NX
;
602 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
606 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
607 if ((*Pte
& IA32_PG_P
) == 0) {
609 // If PTE entry does not exist, skip it
613 Address
= (((Level2
<< 9) + Level3
) << 21);
615 if ((*Pte
& IA32_PG_PS
) != 0) {
618 if (!IsAddressValid (Address
, &Nx
)) {
620 // Patch to remove Present flag and RW flag
622 *Pte
= *Pte
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
624 if (Nx
&& mXdSupported
) {
625 *Pte
= *Pte
| IA32_PG_NX
;
629 Pt
= (UINT64
*)(UINTN
)(*Pte
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
633 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++, Pt
++) {
634 if (!IsAddressValid (Address
, &Nx
)) {
635 *Pt
= *Pt
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
637 if (Nx
&& mXdSupported
) {
638 *Pt
= *Pt
| IA32_PG_NX
;
651 DEBUG ((EFI_D_INFO
, "Patch page table done!\n"));
653 // Set execute-disable flag
661 To find FADT in ACPI tables.
663 @param AcpiTableGuid The GUID used to find ACPI table in UEFI ConfigurationTable.
665 @return FADT table pointer.
667 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*
668 FindAcpiFadtTableByAcpiGuid (
669 IN EFI_GUID
*AcpiTableGuid
672 EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER
*Rsdp
;
673 EFI_ACPI_DESCRIPTION_HEADER
*Rsdt
;
674 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
681 // found ACPI table RSD_PTR from system table
683 for (Index
= 0; Index
< gST
->NumberOfTableEntries
; Index
++) {
684 if (CompareGuid (&(gST
->ConfigurationTable
[Index
].VendorGuid
), AcpiTableGuid
)) {
686 // A match was found.
688 Rsdp
= gST
->ConfigurationTable
[Index
].VendorTable
;
697 Rsdt
= (EFI_ACPI_DESCRIPTION_HEADER
*)(UINTN
) Rsdp
->RsdtAddress
;
698 if (Rsdt
== NULL
|| Rsdt
->Signature
!= EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE
) {
702 for (Index
= sizeof (EFI_ACPI_DESCRIPTION_HEADER
); Index
< Rsdt
->Length
; Index
= Index
+ sizeof (UINT32
)) {
704 Data32
= *(UINT32
*) ((UINT8
*) Rsdt
+ Index
);
705 Fadt
= (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*) (UINT32
*) (UINTN
) Data32
;
706 if (Fadt
->Header
.Signature
== EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
) {
711 if (Fadt
== NULL
|| Fadt
->Header
.Signature
!= EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
) {
719 To find FADT in ACPI tables.
721 @return FADT table pointer.
723 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*
728 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
730 Fadt
= FindAcpiFadtTableByAcpiGuid (&gEfiAcpi20TableGuid
);
735 return FindAcpiFadtTableByAcpiGuid (&gEfiAcpi10TableGuid
);
739 To get system port address of the SMI Command Port in FADT table.
747 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
749 Fadt
= FindAcpiFadtTable ();
750 ASSERT (Fadt
!= NULL
);
752 mSmiCommandPort
= Fadt
->SmiCmd
;
753 DEBUG ((EFI_D_INFO
, "mSmiCommandPort = %x\n", mSmiCommandPort
));
757 Updates page table to make some memory ranges (like system memory) absent
758 and make some memory ranges (like MMIO) present and execute disable. It also
759 update 2MB-page to 4KB-page for some memory ranges.
768 // The flag indicates SMM profile starts to work.
770 mSmmProfileStart
= TRUE
;
774 Initialize SMM profile in SmmReadyToLock protocol callback function.
776 @param Protocol Points to the protocol's unique identifier.
777 @param Interface Points to the interface instance.
778 @param Handle The handle on which the interface was installed.
780 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
784 InitSmmProfileCallBack (
785 IN CONST EFI_GUID
*Protocol
,
791 // Save to variable so that SMM profile data can be found.
796 EFI_VARIABLE_BOOTSERVICE_ACCESS
| EFI_VARIABLE_RUNTIME_ACCESS
,
797 sizeof(mSmmProfileBase
),
802 // Get Software SMI from FADT
804 GetSmiCommandPort ();
807 // Initialize protected memory range for patching page table later.
809 InitProtectedMemRange ();
815 Initialize SMM profile data structures.
819 InitSmmProfileInternal (
824 EFI_PHYSICAL_ADDRESS Base
;
827 UINTN MsrDsAreaSizePerCpu
;
830 mPFEntryCount
= (UINTN
*)AllocateZeroPool (sizeof (UINTN
) * mMaxNumberOfCpus
);
831 ASSERT (mPFEntryCount
!= NULL
);
832 mLastPFEntryValue
= (UINT64 (*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
833 sizeof (mLastPFEntryValue
[0]) * mMaxNumberOfCpus
);
834 ASSERT (mLastPFEntryValue
!= NULL
);
835 mLastPFEntryPointer
= (UINT64
*(*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
836 sizeof (mLastPFEntryPointer
[0]) * mMaxNumberOfCpus
);
837 ASSERT (mLastPFEntryPointer
!= NULL
);
840 // Allocate memory for SmmProfile below 4GB.
843 mSmmProfileSize
= PcdGet32 (PcdCpuSmmProfileSize
);
844 ASSERT ((mSmmProfileSize
& 0xFFF) == 0);
847 TotalSize
= mSmmProfileSize
+ mMsrDsAreaSize
;
849 TotalSize
= mSmmProfileSize
;
853 Status
= gBS
->AllocatePages (
855 EfiReservedMemoryType
,
856 EFI_SIZE_TO_PAGES (TotalSize
),
859 ASSERT_EFI_ERROR (Status
);
860 ZeroMem ((VOID
*)(UINTN
)Base
, TotalSize
);
861 mSmmProfileBase
= (SMM_PROFILE_HEADER
*)(UINTN
)Base
;
864 // Initialize SMM profile data header.
866 mSmmProfileBase
->HeaderSize
= sizeof (SMM_PROFILE_HEADER
);
867 mSmmProfileBase
->MaxDataEntries
= (UINT64
)((mSmmProfileSize
- sizeof(SMM_PROFILE_HEADER
)) / sizeof (SMM_PROFILE_ENTRY
));
868 mSmmProfileBase
->MaxDataSize
= MultU64x64 (mSmmProfileBase
->MaxDataEntries
, sizeof(SMM_PROFILE_ENTRY
));
869 mSmmProfileBase
->CurDataEntries
= 0;
870 mSmmProfileBase
->CurDataSize
= 0;
871 mSmmProfileBase
->TsegStart
= mCpuHotPlugData
.SmrrBase
;
872 mSmmProfileBase
->TsegSize
= mCpuHotPlugData
.SmrrSize
;
873 mSmmProfileBase
->NumSmis
= 0;
874 mSmmProfileBase
->NumCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
877 mMsrDsArea
= (MSR_DS_AREA_STRUCT
**)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT
*) * mMaxNumberOfCpus
);
878 ASSERT (mMsrDsArea
!= NULL
);
879 mMsrBTSRecord
= (BRANCH_TRACE_RECORD
**)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD
*) * mMaxNumberOfCpus
);
880 ASSERT (mMsrBTSRecord
!= NULL
);
881 mMsrPEBSRecord
= (PEBS_RECORD
**)AllocateZeroPool (sizeof (PEBS_RECORD
*) * mMaxNumberOfCpus
);
882 ASSERT (mMsrPEBSRecord
!= NULL
);
884 mMsrDsAreaBase
= (MSR_DS_AREA_STRUCT
*)((UINTN
)Base
+ mSmmProfileSize
);
885 MsrDsAreaSizePerCpu
= mMsrDsAreaSize
/ mMaxNumberOfCpus
;
886 mBTSRecordNumber
= (MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
- sizeof(MSR_DS_AREA_STRUCT
)) / sizeof(BRANCH_TRACE_RECORD
);
887 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
888 mMsrDsArea
[Index
] = (MSR_DS_AREA_STRUCT
*)((UINTN
)mMsrDsAreaBase
+ MsrDsAreaSizePerCpu
* Index
);
889 mMsrBTSRecord
[Index
] = (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + sizeof(MSR_DS_AREA_STRUCT
));
890 mMsrPEBSRecord
[Index
] = (PEBS_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
);
892 mMsrDsArea
[Index
]->BTSBufferBase
= (UINTN
)mMsrBTSRecord
[Index
];
893 mMsrDsArea
[Index
]->BTSIndex
= mMsrDsArea
[Index
]->BTSBufferBase
;
894 mMsrDsArea
[Index
]->BTSAbsoluteMaximum
= mMsrDsArea
[Index
]->BTSBufferBase
+ mBTSRecordNumber
* sizeof(BRANCH_TRACE_RECORD
) + 1;
895 mMsrDsArea
[Index
]->BTSInterruptThreshold
= mMsrDsArea
[Index
]->BTSAbsoluteMaximum
+ 1;
897 mMsrDsArea
[Index
]->PEBSBufferBase
= (UINTN
)mMsrPEBSRecord
[Index
];
898 mMsrDsArea
[Index
]->PEBSIndex
= mMsrDsArea
[Index
]->PEBSBufferBase
;
899 mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
= mMsrDsArea
[Index
]->PEBSBufferBase
+ PEBS_RECORD_NUMBER
* sizeof(PEBS_RECORD
) + 1;
900 mMsrDsArea
[Index
]->PEBSInterruptThreshold
= mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
+ 1;
904 mProtectionMemRange
= mProtectionMemRangeTemplate
;
905 mProtectionMemRangeCount
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
908 // Update TSeg entry.
910 mProtectionMemRange
[0].Range
.Base
= mCpuHotPlugData
.SmrrBase
;
911 mProtectionMemRange
[0].Range
.Top
= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
;
914 // Update SMM profile entry.
916 mProtectionMemRange
[1].Range
.Base
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
;
917 mProtectionMemRange
[1].Range
.Top
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
+ TotalSize
;
920 // Allocate memory reserved for creating 4KB pages.
922 InitPagesForPFHandler ();
925 // Start SMM profile when SmmReadyToLock protocol is installed.
927 Status
= gSmst
->SmmRegisterProtocolNotify (
928 &gEfiSmmReadyToLockProtocolGuid
,
929 InitSmmProfileCallBack
,
932 ASSERT_EFI_ERROR (Status
);
938 Check if XD feature is supported by a processor.
942 CheckFeatureSupported (
948 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr
;
951 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
952 if (RegEax
<= CPUID_EXTENDED_FUNCTION
) {
954 // Extended CPUID functions are not supported on this processor.
956 mXdSupported
= FALSE
;
959 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
960 if ((RegEdx
& CPUID1_EDX_XD_SUPPORT
) == 0) {
962 // Execute Disable Bit feature is not supported on this processor.
964 mXdSupported
= FALSE
;
969 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
);
970 if ((RegEdx
& CPUID1_EDX_BTS_AVAILABLE
) != 0) {
973 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
974 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
975 // availability of the BTS facilities, including the ability to set the BTS and
976 // BTINT bits in the MSR_DEBUGCTLA MSR.
977 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
979 MiscEnableMsr
.Uint64
= AsmReadMsr64 (MSR_IA32_MISC_ENABLE
);
980 if (MiscEnableMsr
.Bits
.BTS
== 1) {
982 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
984 mBtsSupported
= FALSE
;
995 ActivateSingleStepDB (
1001 Dr6
= AsmReadDr6 ();
1002 if ((Dr6
& DR6_SINGLE_STEP
) != 0) {
1005 Dr6
|= DR6_SINGLE_STEP
;
1020 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1021 if ((DebugCtl
& MSR_DEBUG_CTL_LBR
) != 0) {
1024 DebugCtl
|= MSR_DEBUG_CTL_LBR
;
1025 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1029 Enable branch trace store.
1031 @param CpuIndex The index of the processor.
1041 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1042 if ((DebugCtl
& MSR_DEBUG_CTL_BTS
) != 0) {
1046 AsmWriteMsr64 (MSR_DS_AREA
, (UINT64
)(UINTN
)mMsrDsArea
[CpuIndex
]);
1047 DebugCtl
|= (UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
);
1048 DebugCtl
&= ~((UINT64
)MSR_DEBUG_CTL_BTINT
);
1049 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1053 Increase SMI number in each SMI entry.
1057 SmmProfileRecordSmiNum (
1061 if (mSmmProfileStart
) {
1062 mSmmProfileBase
->NumSmis
++;
1067 Initialize processor environment for SMM profile.
1069 @param CpuIndex The index of the processor.
1073 ActivateSmmProfile (
1078 // Enable Single Step DB#
1080 ActivateSingleStepDB ();
1082 if (mBtsSupported
) {
1084 // We can not get useful information from LER, so we have to use BTS.
1091 ActivateBTS (CpuIndex
);
1096 Initialize SMM profile in SMM CPU entry point.
1098 @param[in] Cr3 The base address of the page tables to use in SMM.
1109 mSmmProfileCr3
= Cr3
;
1112 // Skip SMM profile initialization if feature is disabled
1114 if (!FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1119 // Initialize SmmProfile here
1121 InitSmmProfileInternal ();
1124 // Initialize profile IDT.
1130 Update page table to map the memory correctly in order to make the instruction
1131 which caused page fault execute successfully. And it also save the original page
1132 table to be restored in single-step exception.
1134 @param PageTable PageTable Address.
1135 @param PFAddress The memory address which caused page fault exception.
1136 @param CpuIndex The index of the processor.
1137 @param ErrorCode The Error code of exception.
1141 RestorePageTableBelow4G (
1154 if (sizeof(UINT64
) == sizeof(UINTN
)) {
1155 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 39, 47);
1156 ASSERT (PageTable
[PTIndex
] != 0);
1157 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1163 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 30, 38);
1164 ASSERT (PageTable
[PTIndex
] != 0);
1165 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1170 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 21, 29);
1171 if ((PageTable
[PTIndex
] & IA32_PG_PS
) != 0) {
1177 // Record old entries with non-present status
1178 // Old entries include the memory which instruction is at and the memory which instruction access.
1181 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1182 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1183 PFIndex
= mPFEntryCount
[CpuIndex
];
1184 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1185 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1186 mPFEntryCount
[CpuIndex
]++;
1192 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 21) - 1));
1193 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_PS
;
1194 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1195 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1196 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1202 ASSERT (PageTable
[PTIndex
] != 0);
1203 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1208 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 12, 20);
1211 // Record old entries with non-present status
1212 // Old entries include the memory which instruction is at and the memory which instruction access.
1215 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1216 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1217 PFIndex
= mPFEntryCount
[CpuIndex
];
1218 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1219 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1220 mPFEntryCount
[CpuIndex
]++;
1226 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 12) - 1));
1227 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1228 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1229 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1235 The Page fault handler to save SMM profile data.
1237 @param Rip The RIP when exception happens.
1238 @param ErrorCode The Error code of exception.
1242 SmmProfilePFHandler (
1251 UINT64 InstructionAddress
;
1252 UINTN MaxEntryNumber
;
1253 UINTN CurrentEntryNumber
;
1254 BOOLEAN IsValidPFAddress
;
1255 SMM_PROFILE_ENTRY
*SmmProfileEntry
;
1259 EFI_SMM_SAVE_STATE_IO_INFO IoInfo
;
1261 if (!mSmmProfileStart
) {
1263 // If SMM profile does not start, call original page fault handler.
1265 SmiDefaultPFHandler ();
1269 if (mBtsSupported
) {
1273 IsValidPFAddress
= FALSE
;
1274 PageTable
= (UINT64
*)AsmReadCr3 ();
1275 PFAddress
= AsmReadCr2 ();
1276 CpuIndex
= GetCpuIndex ();
1278 if (PFAddress
<= 0xFFFFFFFF) {
1279 RestorePageTableBelow4G (PageTable
, PFAddress
, CpuIndex
, ErrorCode
);
1281 RestorePageTableAbove4G (PageTable
, PFAddress
, CpuIndex
, ErrorCode
, &IsValidPFAddress
);
1284 if (!IsValidPFAddress
) {
1285 InstructionAddress
= Rip
;
1286 if ((ErrorCode
& IA32_PF_EC_ID
) != 0 && (mBtsSupported
)) {
1288 // If it is instruction fetch failure, get the correct IP from BTS.
1290 InstructionAddress
= GetSourceFromDestinationOnBts (CpuIndex
, Rip
);
1291 if (InstructionAddress
== 0) {
1293 // It indicates the instruction which caused page fault is not a jump instruction,
1294 // set instruction address same as the page fault address.
1296 InstructionAddress
= PFAddress
;
1301 // Indicate it is not software SMI
1303 SmiCommand
= 0xFFFFFFFFFFFFFFFFULL
;
1304 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1305 Status
= SmmReadSaveState(&mSmmCpu
, sizeof(IoInfo
), EFI_SMM_SAVE_STATE_REGISTER_IO
, Index
, &IoInfo
);
1306 if (EFI_ERROR (Status
)) {
1309 if (IoInfo
.IoPort
== mSmiCommandPort
) {
1311 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1313 SoftSmiValue
= IoRead8 (mSmiCommandPort
);
1314 SmiCommand
= (UINT64
)SoftSmiValue
;
1319 SmmProfileEntry
= (SMM_PROFILE_ENTRY
*)(UINTN
)(mSmmProfileBase
+ 1);
1321 // Check if there is already a same entry in profile data.
1323 for (Index
= 0; Index
< (UINTN
) mSmmProfileBase
->CurDataEntries
; Index
++) {
1324 if ((SmmProfileEntry
[Index
].ErrorCode
== (UINT64
)ErrorCode
) &&
1325 (SmmProfileEntry
[Index
].Address
== PFAddress
) &&
1326 (SmmProfileEntry
[Index
].CpuNum
== (UINT64
)CpuIndex
) &&
1327 (SmmProfileEntry
[Index
].Instruction
== InstructionAddress
) &&
1328 (SmmProfileEntry
[Index
].SmiCmd
== SmiCommand
)) {
1330 // Same record exist, need not save again.
1335 if (Index
== mSmmProfileBase
->CurDataEntries
) {
1336 CurrentEntryNumber
= (UINTN
) mSmmProfileBase
->CurDataEntries
;
1337 MaxEntryNumber
= (UINTN
) mSmmProfileBase
->MaxDataEntries
;
1338 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer
)) {
1339 CurrentEntryNumber
= CurrentEntryNumber
% MaxEntryNumber
;
1341 if (CurrentEntryNumber
< MaxEntryNumber
) {
1343 // Log the new entry
1345 SmmProfileEntry
[CurrentEntryNumber
].SmiNum
= mSmmProfileBase
->NumSmis
;
1346 SmmProfileEntry
[CurrentEntryNumber
].ErrorCode
= (UINT64
)ErrorCode
;
1347 SmmProfileEntry
[CurrentEntryNumber
].ApicId
= (UINT64
)GetApicId ();
1348 SmmProfileEntry
[CurrentEntryNumber
].CpuNum
= (UINT64
)CpuIndex
;
1349 SmmProfileEntry
[CurrentEntryNumber
].Address
= PFAddress
;
1350 SmmProfileEntry
[CurrentEntryNumber
].Instruction
= InstructionAddress
;
1351 SmmProfileEntry
[CurrentEntryNumber
].SmiCmd
= SmiCommand
;
1353 // Update current entry index and data size in the header.
1355 mSmmProfileBase
->CurDataEntries
++;
1356 mSmmProfileBase
->CurDataSize
= MultU64x64 (mSmmProfileBase
->CurDataEntries
, sizeof (SMM_PROFILE_ENTRY
));
1365 if (mBtsSupported
) {
1371 Replace INT1 exception handler to restore page table to absent/execute-disable state
1372 in order to trigger page fault again to save SMM profile data..
1382 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_DEBUG
, DebugExceptionHandler
);
1383 ASSERT_EFI_ERROR (Status
);