4 Copyright (c) 2012 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
18 #include "SmmProfileInternal.h"
20 UINT32 mSmmProfileCr3
;
22 SMM_PROFILE_HEADER
*mSmmProfileBase
;
23 MSR_DS_AREA_STRUCT
*mMsrDsAreaBase
;
25 // The buffer to store SMM profile data.
27 UINTN mSmmProfileSize
;
30 // The buffer to enable branch trace store.
32 UINTN mMsrDsAreaSize
= SMM_PROFILE_DTS_SIZE
;
35 // The flag indicates if execute-disable is enabled on processor.
37 BOOLEAN mXdEnabled
= FALSE
;
40 // The flag indicates if BTS is supported by processor.
42 BOOLEAN mBtsSupported
= TRUE
;
45 // The flag indicates if SMM profile starts to record data.
47 BOOLEAN mSmmProfileStart
= FALSE
;
50 // Record the page fault exception count for one instruction execution.
54 UINT64 (*mLastPFEntryValue
)[MAX_PF_ENTRY_COUNT
];
55 UINT64
*(*mLastPFEntryPointer
)[MAX_PF_ENTRY_COUNT
];
57 MSR_DS_AREA_STRUCT
**mMsrDsArea
;
58 BRANCH_TRACE_RECORD
**mMsrBTSRecord
;
59 UINTN mBTSRecordNumber
;
60 PEBS_RECORD
**mMsrPEBSRecord
;
63 // These memory ranges are always present, they does not generate the access type of page fault exception,
64 // but they possibly generate instruction fetch type of page fault exception.
66 MEMORY_PROTECTION_RANGE
*mProtectionMemRange
= NULL
;
67 UINTN mProtectionMemRangeCount
= 0;
70 // Some predefined memory ranges.
72 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate
[] = {
74 // SMRAM range (to be fixed in runtime).
75 // It is always present and instruction fetches are allowed.
77 {{0x00000000, 0x00000000},TRUE
,FALSE
},
80 // SMM profile data range( to be fixed in runtime).
81 // It is always present and instruction fetches are not allowed.
83 {{0x00000000, 0x00000000},TRUE
,TRUE
},
86 // Future extended range could be added here.
90 // PCI MMIO ranges (to be added in runtime).
91 // They are always present and instruction fetches are not allowed.
96 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
98 MEMORY_RANGE
*mSplitMemRange
= NULL
;
99 UINTN mSplitMemRangeCount
= 0;
104 UINT32 mSmiCommandPort
;
107 Disable branch trace store.
115 AsmMsrAnd64 (MSR_DEBUG_CTL
, ~((UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
)));
119 Enable branch trace store.
127 AsmMsrOr64 (MSR_DEBUG_CTL
, (MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
));
131 Get CPU Index from APIC ID.
142 ApicId
= GetApicId ();
144 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
145 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== ApicId
) {
154 Get the source of IP after execute-disable exception is triggered.
156 @param CpuIndex The index of CPU.
157 @param DestinationIP The destination address.
161 GetSourceFromDestinationOnBts (
166 BRANCH_TRACE_RECORD
*CurrentBTSRecord
;
172 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)mMsrDsArea
[CpuIndex
]->BTSIndex
;
173 for (Index
= 0; Index
< mBTSRecordNumber
; Index
++) {
174 if ((UINTN
)CurrentBTSRecord
< (UINTN
)mMsrBTSRecord
[CpuIndex
]) {
178 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[CpuIndex
]->BTSAbsoluteMaximum
- 1);
181 if (CurrentBTSRecord
->LastBranchTo
== DestinationIP
) {
183 // Good! find 1st one, then find 2nd one.
187 // The first one is DEBUG exception
192 // Good find proper one.
194 return CurrentBTSRecord
->LastBranchFrom
;
204 SMM profile specific INT 1 (single-step) exception handler.
206 @param InterruptType Defines the type of interrupt or exception that
207 occurred on the processor.This parameter is processor architecture specific.
208 @param SystemContext A pointer to the processor context when
209 the interrupt occurred on the processor.
213 DebugExceptionHandler (
214 IN EFI_EXCEPTION_TYPE InterruptType
,
215 IN EFI_SYSTEM_CONTEXT SystemContext
221 if (!mSmmProfileStart
) {
224 CpuIndex
= GetCpuIndex ();
227 // Clear last PF entries
229 for (PFEntry
= 0; PFEntry
< mPFEntryCount
[CpuIndex
]; PFEntry
++) {
230 *mLastPFEntryPointer
[CpuIndex
][PFEntry
] = mLastPFEntryValue
[CpuIndex
][PFEntry
];
234 // Reset page fault exception count for next page fault.
236 mPFEntryCount
[CpuIndex
] = 0;
244 // Clear TF in EFLAGS
246 ClearTrapFlag (SystemContext
);
250 Check if the input address is in SMM ranges.
252 @param[in] Address The input address.
254 @retval TRUE The input address is in SMM.
255 @retval FALSE The input address is not in SMM.
259 IN EFI_PHYSICAL_ADDRESS Address
264 if ((Address
< mCpuHotPlugData
.SmrrBase
) || (Address
>= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
267 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
268 if (Address
>= mSmmCpuSmramRanges
[Index
].CpuStart
&&
269 Address
< mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
) {
277 Check if the memory address will be mapped by 4KB-page.
279 @param Address The address of Memory.
280 @param Nx The flag indicates if the memory is execute-disable.
285 IN EFI_PHYSICAL_ADDRESS Address
,
291 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
293 // Check configuration
295 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
296 if ((Address
>= mProtectionMemRange
[Index
].Range
.Base
) && (Address
< mProtectionMemRange
[Index
].Range
.Top
)) {
297 *Nx
= mProtectionMemRange
[Index
].Nx
;
298 return mProtectionMemRange
[Index
].Present
;
306 if (IsInSmmRanges (Address
)) {
314 Check if the memory address will be mapped by 4KB-page.
316 @param Address The address of Memory.
321 IN EFI_PHYSICAL_ADDRESS Address
326 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
328 // Check configuration
330 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
331 if ((Address
>= mSplitMemRange
[Index
].Base
) && (Address
< mSplitMemRange
[Index
].Top
)) {
336 if (Address
< mCpuHotPlugData
.SmrrBase
) {
337 if ((mCpuHotPlugData
.SmrrBase
- Address
) < BASE_2MB
) {
340 } else if (Address
> (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) {
341 if ((Address
- (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) < BASE_2MB
) {
353 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
357 InitProtectedMemRange (
362 UINTN NumberOfDescriptors
;
363 UINTN NumberOfMmioDescriptors
;
364 UINTN NumberOfProtectRange
;
365 UINTN NumberOfSpliteRange
;
366 EFI_GCD_MEMORY_SPACE_DESCRIPTOR
*MemorySpaceMap
;
368 EFI_PHYSICAL_ADDRESS ProtectBaseAddress
;
369 EFI_PHYSICAL_ADDRESS ProtectEndAddress
;
370 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress
;
371 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress
;
372 UINT64 High4KBPageSize
;
373 UINT64 Low4KBPageSize
;
375 NumberOfDescriptors
= 0;
376 NumberOfMmioDescriptors
= 0;
377 NumberOfSpliteRange
= 0;
378 MemorySpaceMap
= NULL
;
381 // Get MMIO ranges from GCD and add them into protected memory ranges.
383 gDS
->GetMemorySpaceMap (
384 &NumberOfDescriptors
,
387 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
388 if (MemorySpaceMap
[Index
].GcdMemoryType
== EfiGcdMemoryTypeMemoryMappedIo
) {
389 NumberOfMmioDescriptors
++;
393 if (NumberOfMmioDescriptors
!= 0) {
394 TotalSize
= NumberOfMmioDescriptors
* sizeof (MEMORY_PROTECTION_RANGE
) + sizeof (mProtectionMemRangeTemplate
);
395 mProtectionMemRange
= (MEMORY_PROTECTION_RANGE
*) AllocateZeroPool (TotalSize
);
396 ASSERT (mProtectionMemRange
!= NULL
);
397 mProtectionMemRangeCount
= TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
);
400 // Copy existing ranges.
402 CopyMem (mProtectionMemRange
, mProtectionMemRangeTemplate
, sizeof (mProtectionMemRangeTemplate
));
405 // Create split ranges which come from protected ranges.
407 TotalSize
= (TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
)) * sizeof (MEMORY_RANGE
);
408 mSplitMemRange
= (MEMORY_RANGE
*) AllocateZeroPool (TotalSize
);
409 ASSERT (mSplitMemRange
!= NULL
);
412 // Create MMIO ranges which are set to present and execution-disable.
414 NumberOfProtectRange
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
415 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
416 if (MemorySpaceMap
[Index
].GcdMemoryType
!= EfiGcdMemoryTypeMemoryMappedIo
) {
419 mProtectionMemRange
[NumberOfProtectRange
].Range
.Base
= MemorySpaceMap
[Index
].BaseAddress
;
420 mProtectionMemRange
[NumberOfProtectRange
].Range
.Top
= MemorySpaceMap
[Index
].BaseAddress
+ MemorySpaceMap
[Index
].Length
;
421 mProtectionMemRange
[NumberOfProtectRange
].Present
= TRUE
;
422 mProtectionMemRange
[NumberOfProtectRange
].Nx
= TRUE
;
423 NumberOfProtectRange
++;
428 // According to protected ranges, create the ranges which will be mapped by 2KB page.
430 NumberOfSpliteRange
= 0;
431 NumberOfProtectRange
= mProtectionMemRangeCount
;
432 for (Index
= 0; Index
< NumberOfProtectRange
; Index
++) {
434 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
436 ProtectBaseAddress
= mProtectionMemRange
[Index
].Range
.Base
;
437 ProtectEndAddress
= mProtectionMemRange
[Index
].Range
.Top
;
438 if (((ProtectBaseAddress
& (SIZE_2MB
- 1)) != 0) || ((ProtectEndAddress
& (SIZE_2MB
- 1)) != 0)) {
440 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
441 // A mix of 4KB and 2MB page could save SMRAM space.
443 Top2MBAlignedAddress
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
444 Base2MBAlignedAddress
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
445 if ((Top2MBAlignedAddress
> Base2MBAlignedAddress
) &&
446 ((Top2MBAlignedAddress
- Base2MBAlignedAddress
) >= SIZE_2MB
)) {
448 // There is an range which could be mapped by 2MB-page.
450 High4KBPageSize
= ((ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectEndAddress
& ~(SIZE_2MB
- 1));
451 Low4KBPageSize
= ((ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectBaseAddress
& ~(SIZE_2MB
- 1));
452 if (High4KBPageSize
!= 0) {
454 // Add not 2MB-aligned range to be mapped by 4KB-page.
456 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
457 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
458 NumberOfSpliteRange
++;
460 if (Low4KBPageSize
!= 0) {
462 // Add not 2MB-aligned range to be mapped by 4KB-page.
464 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
465 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
466 NumberOfSpliteRange
++;
470 // The range could only be mapped by 4KB-page.
472 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
473 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
474 NumberOfSpliteRange
++;
479 mSplitMemRangeCount
= NumberOfSpliteRange
;
481 DEBUG ((EFI_D_INFO
, "SMM Profile Memory Ranges:\n"));
482 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
483 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Base = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Base
));
484 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Top = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Top
));
486 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
487 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Base = %lx\n", Index
, mSplitMemRange
[Index
].Base
));
488 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Top = %lx\n", Index
, mSplitMemRange
[Index
].Top
));
493 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
510 UINTN NumberOfPdpEntries
;
511 UINTN NumberOfPml4Entries
;
512 UINTN SizeOfMemorySpace
;
515 if (sizeof (UINTN
) == sizeof (UINT64
)) {
516 Pml4
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
517 SizeOfMemorySpace
= HighBitSet64 (gPhyMask
) + 1;
519 // Calculate the table entries of PML4E and PDPTE.
521 if (SizeOfMemorySpace
<= 39 ) {
522 NumberOfPml4Entries
= 1;
523 NumberOfPdpEntries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 30));
525 NumberOfPml4Entries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 39));
526 NumberOfPdpEntries
= 512;
529 NumberOfPml4Entries
= 1;
530 NumberOfPdpEntries
= 4;
534 // Go through page table and change 2MB-page into 4KB-page.
536 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
537 if (sizeof (UINTN
) == sizeof (UINT64
)) {
538 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
540 // If Pml4 entry does not exist, skip it
544 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
546 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
548 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
549 if ((*Pde
& IA32_PG_P
) == 0) {
551 // If PDE entry does not exist, skip it
555 if ((*Pde
& IA32_PG_PS
) != 0) {
557 // This is 1G entry, skip it
561 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
565 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
566 if ((*Pte
& IA32_PG_P
) == 0) {
568 // If PTE entry does not exist, skip it
572 Address
= (((Level2
<< 9) + Level3
) << 21);
575 // If it is 2M page, check IsAddressSplit()
577 if (((*Pte
& IA32_PG_PS
) != 0) && IsAddressSplit (Address
)) {
579 // Based on current page table, create 4KB page table for split area.
581 ASSERT (Address
== (*Pte
& PHYSICAL_ADDRESS_MASK
));
583 Pt
= AllocatePageTableMemory (1);
587 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++) {
588 Pt
[Level4
] = Address
+ ((Level4
<< 12) | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
590 *Pte
= (UINT64
)(UINTN
)Pt
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
591 } // end if IsAddressSplit
597 // Go through page table and set several page table entries to absent or execute-disable.
599 DEBUG ((EFI_D_INFO
, "Patch page table start ...\n"));
600 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
601 if (sizeof (UINTN
) == sizeof (UINT64
)) {
602 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
604 // If Pml4 entry does not exist, skip it
608 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
610 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
612 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
613 if ((*Pde
& IA32_PG_P
) == 0) {
615 // If PDE entry does not exist, skip it
619 if ((*Pde
& IA32_PG_PS
) != 0) {
621 // This is 1G entry, set NX bit and skip it
624 *Pde
= *Pde
| IA32_PG_NX
;
628 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
632 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
633 if ((*Pte
& IA32_PG_P
) == 0) {
635 // If PTE entry does not exist, skip it
639 Address
= (((Level2
<< 9) + Level3
) << 21);
641 if ((*Pte
& IA32_PG_PS
) != 0) {
644 if (!IsAddressValid (Address
, &Nx
)) {
646 // Patch to remove Present flag and RW flag
648 *Pte
= *Pte
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
650 if (Nx
&& mXdSupported
) {
651 *Pte
= *Pte
| IA32_PG_NX
;
655 Pt
= (UINT64
*)(UINTN
)(*Pte
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
659 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++, Pt
++) {
660 if (!IsAddressValid (Address
, &Nx
)) {
661 *Pt
= *Pt
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
663 if (Nx
&& mXdSupported
) {
664 *Pt
= *Pt
| IA32_PG_NX
;
677 DEBUG ((EFI_D_INFO
, "Patch page table done!\n"));
679 // Set execute-disable flag
687 To find FADT in ACPI tables.
689 @param AcpiTableGuid The GUID used to find ACPI table in UEFI ConfigurationTable.
691 @return FADT table pointer.
693 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*
694 FindAcpiFadtTableByAcpiGuid (
695 IN EFI_GUID
*AcpiTableGuid
698 EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER
*Rsdp
;
699 EFI_ACPI_DESCRIPTION_HEADER
*Rsdt
;
700 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
707 // found ACPI table RSD_PTR from system table
709 for (Index
= 0; Index
< gST
->NumberOfTableEntries
; Index
++) {
710 if (CompareGuid (&(gST
->ConfigurationTable
[Index
].VendorGuid
), AcpiTableGuid
)) {
712 // A match was found.
714 Rsdp
= gST
->ConfigurationTable
[Index
].VendorTable
;
723 Rsdt
= (EFI_ACPI_DESCRIPTION_HEADER
*)(UINTN
) Rsdp
->RsdtAddress
;
724 if (Rsdt
== NULL
|| Rsdt
->Signature
!= EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE
) {
728 for (Index
= sizeof (EFI_ACPI_DESCRIPTION_HEADER
); Index
< Rsdt
->Length
; Index
= Index
+ sizeof (UINT32
)) {
730 Data32
= *(UINT32
*) ((UINT8
*) Rsdt
+ Index
);
731 Fadt
= (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*) (UINT32
*) (UINTN
) Data32
;
732 if (Fadt
->Header
.Signature
== EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
) {
737 if (Fadt
== NULL
|| Fadt
->Header
.Signature
!= EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
) {
745 To find FADT in ACPI tables.
747 @return FADT table pointer.
749 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*
754 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
756 Fadt
= FindAcpiFadtTableByAcpiGuid (&gEfiAcpi20TableGuid
);
761 return FindAcpiFadtTableByAcpiGuid (&gEfiAcpi10TableGuid
);
765 To get system port address of the SMI Command Port in FADT table.
773 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
775 Fadt
= FindAcpiFadtTable ();
776 ASSERT (Fadt
!= NULL
);
778 mSmiCommandPort
= Fadt
->SmiCmd
;
779 DEBUG ((EFI_D_INFO
, "mSmiCommandPort = %x\n", mSmiCommandPort
));
783 Updates page table to make some memory ranges (like system memory) absent
784 and make some memory ranges (like MMIO) present and execute disable. It also
785 update 2MB-page to 4KB-page for some memory ranges.
794 // The flag indicates SMM profile starts to work.
796 mSmmProfileStart
= TRUE
;
800 Initialize SMM profile in SmmReadyToLock protocol callback function.
802 @param Protocol Points to the protocol's unique identifier.
803 @param Interface Points to the interface instance.
804 @param Handle The handle on which the interface was installed.
806 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
810 InitSmmProfileCallBack (
811 IN CONST EFI_GUID
*Protocol
,
817 // Save to variable so that SMM profile data can be found.
822 EFI_VARIABLE_BOOTSERVICE_ACCESS
| EFI_VARIABLE_RUNTIME_ACCESS
,
823 sizeof(mSmmProfileBase
),
828 // Get Software SMI from FADT
830 GetSmiCommandPort ();
833 // Initialize protected memory range for patching page table later.
835 InitProtectedMemRange ();
841 Initialize SMM profile data structures.
845 InitSmmProfileInternal (
850 EFI_PHYSICAL_ADDRESS Base
;
853 UINTN MsrDsAreaSizePerCpu
;
856 mPFEntryCount
= (UINTN
*)AllocateZeroPool (sizeof (UINTN
) * mMaxNumberOfCpus
);
857 ASSERT (mPFEntryCount
!= NULL
);
858 mLastPFEntryValue
= (UINT64 (*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
859 sizeof (mLastPFEntryValue
[0]) * mMaxNumberOfCpus
);
860 ASSERT (mLastPFEntryValue
!= NULL
);
861 mLastPFEntryPointer
= (UINT64
*(*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
862 sizeof (mLastPFEntryPointer
[0]) * mMaxNumberOfCpus
);
863 ASSERT (mLastPFEntryPointer
!= NULL
);
866 // Allocate memory for SmmProfile below 4GB.
869 mSmmProfileSize
= PcdGet32 (PcdCpuSmmProfileSize
);
870 ASSERT ((mSmmProfileSize
& 0xFFF) == 0);
873 TotalSize
= mSmmProfileSize
+ mMsrDsAreaSize
;
875 TotalSize
= mSmmProfileSize
;
879 Status
= gBS
->AllocatePages (
881 EfiReservedMemoryType
,
882 EFI_SIZE_TO_PAGES (TotalSize
),
885 ASSERT_EFI_ERROR (Status
);
886 ZeroMem ((VOID
*)(UINTN
)Base
, TotalSize
);
887 mSmmProfileBase
= (SMM_PROFILE_HEADER
*)(UINTN
)Base
;
890 // Initialize SMM profile data header.
892 mSmmProfileBase
->HeaderSize
= sizeof (SMM_PROFILE_HEADER
);
893 mSmmProfileBase
->MaxDataEntries
= (UINT64
)((mSmmProfileSize
- sizeof(SMM_PROFILE_HEADER
)) / sizeof (SMM_PROFILE_ENTRY
));
894 mSmmProfileBase
->MaxDataSize
= MultU64x64 (mSmmProfileBase
->MaxDataEntries
, sizeof(SMM_PROFILE_ENTRY
));
895 mSmmProfileBase
->CurDataEntries
= 0;
896 mSmmProfileBase
->CurDataSize
= 0;
897 mSmmProfileBase
->TsegStart
= mCpuHotPlugData
.SmrrBase
;
898 mSmmProfileBase
->TsegSize
= mCpuHotPlugData
.SmrrSize
;
899 mSmmProfileBase
->NumSmis
= 0;
900 mSmmProfileBase
->NumCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
903 mMsrDsArea
= (MSR_DS_AREA_STRUCT
**)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT
*) * mMaxNumberOfCpus
);
904 ASSERT (mMsrDsArea
!= NULL
);
905 mMsrBTSRecord
= (BRANCH_TRACE_RECORD
**)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD
*) * mMaxNumberOfCpus
);
906 ASSERT (mMsrBTSRecord
!= NULL
);
907 mMsrPEBSRecord
= (PEBS_RECORD
**)AllocateZeroPool (sizeof (PEBS_RECORD
*) * mMaxNumberOfCpus
);
908 ASSERT (mMsrPEBSRecord
!= NULL
);
910 mMsrDsAreaBase
= (MSR_DS_AREA_STRUCT
*)((UINTN
)Base
+ mSmmProfileSize
);
911 MsrDsAreaSizePerCpu
= mMsrDsAreaSize
/ mMaxNumberOfCpus
;
912 mBTSRecordNumber
= (MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
- sizeof(MSR_DS_AREA_STRUCT
)) / sizeof(BRANCH_TRACE_RECORD
);
913 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
914 mMsrDsArea
[Index
] = (MSR_DS_AREA_STRUCT
*)((UINTN
)mMsrDsAreaBase
+ MsrDsAreaSizePerCpu
* Index
);
915 mMsrBTSRecord
[Index
] = (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + sizeof(MSR_DS_AREA_STRUCT
));
916 mMsrPEBSRecord
[Index
] = (PEBS_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
);
918 mMsrDsArea
[Index
]->BTSBufferBase
= (UINTN
)mMsrBTSRecord
[Index
];
919 mMsrDsArea
[Index
]->BTSIndex
= mMsrDsArea
[Index
]->BTSBufferBase
;
920 mMsrDsArea
[Index
]->BTSAbsoluteMaximum
= mMsrDsArea
[Index
]->BTSBufferBase
+ mBTSRecordNumber
* sizeof(BRANCH_TRACE_RECORD
) + 1;
921 mMsrDsArea
[Index
]->BTSInterruptThreshold
= mMsrDsArea
[Index
]->BTSAbsoluteMaximum
+ 1;
923 mMsrDsArea
[Index
]->PEBSBufferBase
= (UINTN
)mMsrPEBSRecord
[Index
];
924 mMsrDsArea
[Index
]->PEBSIndex
= mMsrDsArea
[Index
]->PEBSBufferBase
;
925 mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
= mMsrDsArea
[Index
]->PEBSBufferBase
+ PEBS_RECORD_NUMBER
* sizeof(PEBS_RECORD
) + 1;
926 mMsrDsArea
[Index
]->PEBSInterruptThreshold
= mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
+ 1;
930 mProtectionMemRange
= mProtectionMemRangeTemplate
;
931 mProtectionMemRangeCount
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
934 // Update TSeg entry.
936 mProtectionMemRange
[0].Range
.Base
= mCpuHotPlugData
.SmrrBase
;
937 mProtectionMemRange
[0].Range
.Top
= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
;
940 // Update SMM profile entry.
942 mProtectionMemRange
[1].Range
.Base
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
;
943 mProtectionMemRange
[1].Range
.Top
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
+ TotalSize
;
946 // Allocate memory reserved for creating 4KB pages.
948 InitPagesForPFHandler ();
951 // Start SMM profile when SmmReadyToLock protocol is installed.
953 Status
= gSmst
->SmmRegisterProtocolNotify (
954 &gEfiSmmReadyToLockProtocolGuid
,
955 InitSmmProfileCallBack
,
958 ASSERT_EFI_ERROR (Status
);
964 Check if XD feature is supported by a processor.
968 CheckFeatureSupported (
974 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr
;
977 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
978 if (RegEax
<= CPUID_EXTENDED_FUNCTION
) {
980 // Extended CPUID functions are not supported on this processor.
982 mXdSupported
= FALSE
;
985 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
986 if ((RegEdx
& CPUID1_EDX_XD_SUPPORT
) == 0) {
988 // Execute Disable Bit feature is not supported on this processor.
990 mXdSupported
= FALSE
;
995 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
);
996 if ((RegEdx
& CPUID1_EDX_BTS_AVAILABLE
) != 0) {
999 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
1000 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
1001 // availability of the BTS facilities, including the ability to set the BTS and
1002 // BTINT bits in the MSR_DEBUGCTLA MSR.
1003 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
1005 MiscEnableMsr
.Uint64
= AsmReadMsr64 (MSR_IA32_MISC_ENABLE
);
1006 if (MiscEnableMsr
.Bits
.BTS
== 1) {
1008 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
1010 mBtsSupported
= FALSE
;
1021 ActivateSingleStepDB (
1027 Dr6
= AsmReadDr6 ();
1028 if ((Dr6
& DR6_SINGLE_STEP
) != 0) {
1031 Dr6
|= DR6_SINGLE_STEP
;
1046 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1047 if ((DebugCtl
& MSR_DEBUG_CTL_LBR
) != 0) {
1050 DebugCtl
|= MSR_DEBUG_CTL_LBR
;
1051 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1055 Enable branch trace store.
1057 @param CpuIndex The index of the processor.
1067 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1068 if ((DebugCtl
& MSR_DEBUG_CTL_BTS
) != 0) {
1072 AsmWriteMsr64 (MSR_DS_AREA
, (UINT64
)(UINTN
)mMsrDsArea
[CpuIndex
]);
1073 DebugCtl
|= (UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
);
1074 DebugCtl
&= ~((UINT64
)MSR_DEBUG_CTL_BTINT
);
1075 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1079 Increase SMI number in each SMI entry.
1083 SmmProfileRecordSmiNum (
1087 if (mSmmProfileStart
) {
1088 mSmmProfileBase
->NumSmis
++;
1093 Initialize processor environment for SMM profile.
1095 @param CpuIndex The index of the processor.
1099 ActivateSmmProfile (
1104 // Enable Single Step DB#
1106 ActivateSingleStepDB ();
1108 if (mBtsSupported
) {
1110 // We can not get useful information from LER, so we have to use BTS.
1117 ActivateBTS (CpuIndex
);
1122 Initialize SMM profile in SMM CPU entry point.
1124 @param[in] Cr3 The base address of the page tables to use in SMM.
1135 mSmmProfileCr3
= Cr3
;
1138 // Skip SMM profile initialization if feature is disabled
1140 if (!FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1145 // Initialize SmmProfile here
1147 InitSmmProfileInternal ();
1150 // Initialize profile IDT.
1156 Update page table to map the memory correctly in order to make the instruction
1157 which caused page fault execute successfully. And it also save the original page
1158 table to be restored in single-step exception.
1160 @param PageTable PageTable Address.
1161 @param PFAddress The memory address which caused page fault exception.
1162 @param CpuIndex The index of the processor.
1163 @param ErrorCode The Error code of exception.
1167 RestorePageTableBelow4G (
1180 if (sizeof(UINT64
) == sizeof(UINTN
)) {
1181 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 39, 47);
1182 ASSERT (PageTable
[PTIndex
] != 0);
1183 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1189 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 30, 38);
1190 ASSERT (PageTable
[PTIndex
] != 0);
1191 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1196 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 21, 29);
1197 if ((PageTable
[PTIndex
] & IA32_PG_PS
) != 0) {
1203 // Record old entries with non-present status
1204 // Old entries include the memory which instruction is at and the memory which instruction access.
1207 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1208 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1209 PFIndex
= mPFEntryCount
[CpuIndex
];
1210 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1211 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1212 mPFEntryCount
[CpuIndex
]++;
1218 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 21) - 1));
1219 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_PS
;
1220 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1221 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1222 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1228 ASSERT (PageTable
[PTIndex
] != 0);
1229 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1234 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 12, 20);
1237 // Record old entries with non-present status
1238 // Old entries include the memory which instruction is at and the memory which instruction access.
1241 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1242 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1243 PFIndex
= mPFEntryCount
[CpuIndex
];
1244 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1245 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1246 mPFEntryCount
[CpuIndex
]++;
1252 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 12) - 1));
1253 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1254 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1255 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1261 The Page fault handler to save SMM profile data.
1263 @param Rip The RIP when exception happens.
1264 @param ErrorCode The Error code of exception.
1268 SmmProfilePFHandler (
1277 UINT64 InstructionAddress
;
1278 UINTN MaxEntryNumber
;
1279 UINTN CurrentEntryNumber
;
1280 BOOLEAN IsValidPFAddress
;
1281 SMM_PROFILE_ENTRY
*SmmProfileEntry
;
1285 EFI_SMM_SAVE_STATE_IO_INFO IoInfo
;
1287 if (!mSmmProfileStart
) {
1289 // If SMM profile does not start, call original page fault handler.
1291 SmiDefaultPFHandler ();
1295 if (mBtsSupported
) {
1299 IsValidPFAddress
= FALSE
;
1300 PageTable
= (UINT64
*)AsmReadCr3 ();
1301 PFAddress
= AsmReadCr2 ();
1302 CpuIndex
= GetCpuIndex ();
1304 if (PFAddress
<= 0xFFFFFFFF) {
1305 RestorePageTableBelow4G (PageTable
, PFAddress
, CpuIndex
, ErrorCode
);
1307 RestorePageTableAbove4G (PageTable
, PFAddress
, CpuIndex
, ErrorCode
, &IsValidPFAddress
);
1310 if (!IsValidPFAddress
) {
1311 InstructionAddress
= Rip
;
1312 if ((ErrorCode
& IA32_PF_EC_ID
) != 0 && (mBtsSupported
)) {
1314 // If it is instruction fetch failure, get the correct IP from BTS.
1316 InstructionAddress
= GetSourceFromDestinationOnBts (CpuIndex
, Rip
);
1317 if (InstructionAddress
== 0) {
1319 // It indicates the instruction which caused page fault is not a jump instruction,
1320 // set instruction address same as the page fault address.
1322 InstructionAddress
= PFAddress
;
1327 // Indicate it is not software SMI
1329 SmiCommand
= 0xFFFFFFFFFFFFFFFFULL
;
1330 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1331 Status
= SmmReadSaveState(&mSmmCpu
, sizeof(IoInfo
), EFI_SMM_SAVE_STATE_REGISTER_IO
, Index
, &IoInfo
);
1332 if (EFI_ERROR (Status
)) {
1335 if (IoInfo
.IoPort
== mSmiCommandPort
) {
1337 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1339 SoftSmiValue
= IoRead8 (mSmiCommandPort
);
1340 SmiCommand
= (UINT64
)SoftSmiValue
;
1345 SmmProfileEntry
= (SMM_PROFILE_ENTRY
*)(UINTN
)(mSmmProfileBase
+ 1);
1347 // Check if there is already a same entry in profile data.
1349 for (Index
= 0; Index
< (UINTN
) mSmmProfileBase
->CurDataEntries
; Index
++) {
1350 if ((SmmProfileEntry
[Index
].ErrorCode
== (UINT64
)ErrorCode
) &&
1351 (SmmProfileEntry
[Index
].Address
== PFAddress
) &&
1352 (SmmProfileEntry
[Index
].CpuNum
== (UINT64
)CpuIndex
) &&
1353 (SmmProfileEntry
[Index
].Instruction
== InstructionAddress
) &&
1354 (SmmProfileEntry
[Index
].SmiCmd
== SmiCommand
)) {
1356 // Same record exist, need not save again.
1361 if (Index
== mSmmProfileBase
->CurDataEntries
) {
1362 CurrentEntryNumber
= (UINTN
) mSmmProfileBase
->CurDataEntries
;
1363 MaxEntryNumber
= (UINTN
) mSmmProfileBase
->MaxDataEntries
;
1364 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer
)) {
1365 CurrentEntryNumber
= CurrentEntryNumber
% MaxEntryNumber
;
1367 if (CurrentEntryNumber
< MaxEntryNumber
) {
1369 // Log the new entry
1371 SmmProfileEntry
[CurrentEntryNumber
].SmiNum
= mSmmProfileBase
->NumSmis
;
1372 SmmProfileEntry
[CurrentEntryNumber
].ErrorCode
= (UINT64
)ErrorCode
;
1373 SmmProfileEntry
[CurrentEntryNumber
].ApicId
= (UINT64
)GetApicId ();
1374 SmmProfileEntry
[CurrentEntryNumber
].CpuNum
= (UINT64
)CpuIndex
;
1375 SmmProfileEntry
[CurrentEntryNumber
].Address
= PFAddress
;
1376 SmmProfileEntry
[CurrentEntryNumber
].Instruction
= InstructionAddress
;
1377 SmmProfileEntry
[CurrentEntryNumber
].SmiCmd
= SmiCommand
;
1379 // Update current entry index and data size in the header.
1381 mSmmProfileBase
->CurDataEntries
++;
1382 mSmmProfileBase
->CurDataSize
= MultU64x64 (mSmmProfileBase
->CurDataEntries
, sizeof (SMM_PROFILE_ENTRY
));
1391 if (mBtsSupported
) {
1397 Replace INT1 exception handler to restore page table to absent/execute-disable state
1398 in order to trigger page fault again to save SMM profile data..
1408 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_DEBUG
, DebugExceptionHandler
);
1409 ASSERT_EFI_ERROR (Status
);