4 Copyright (c) 2012 - 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
18 #include "SmmProfileInternal.h"
20 UINT32 mSmmProfileCr3
;
22 SMM_PROFILE_HEADER
*mSmmProfileBase
;
23 MSR_DS_AREA_STRUCT
*mMsrDsAreaBase
;
25 // The buffer to store SMM profile data.
27 UINTN mSmmProfileSize
;
30 // The buffer to enable branch trace store.
32 UINTN mMsrDsAreaSize
= SMM_PROFILE_DTS_SIZE
;
35 // The flag indicates if execute-disable is enabled on processor.
37 BOOLEAN mXdEnabled
= FALSE
;
40 // The flag indicates if BTS is supported by processor.
42 BOOLEAN mBtsSupported
= TRUE
;
45 // The flag indicates if SMM profile starts to record data.
47 BOOLEAN mSmmProfileStart
= FALSE
;
50 // Record the page fault exception count for one instruction execution.
54 UINT64 (*mLastPFEntryValue
)[MAX_PF_ENTRY_COUNT
];
55 UINT64
*(*mLastPFEntryPointer
)[MAX_PF_ENTRY_COUNT
];
57 MSR_DS_AREA_STRUCT
**mMsrDsArea
;
58 BRANCH_TRACE_RECORD
**mMsrBTSRecord
;
59 UINTN mBTSRecordNumber
;
60 PEBS_RECORD
**mMsrPEBSRecord
;
63 // These memory ranges are always present, they does not generate the access type of page fault exception,
64 // but they possibly generate instruction fetch type of page fault exception.
66 MEMORY_PROTECTION_RANGE
*mProtectionMemRange
= NULL
;
67 UINTN mProtectionMemRangeCount
= 0;
70 // Some predefined memory ranges.
72 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate
[] = {
74 // SMRAM range (to be fixed in runtime).
75 // It is always present and instruction fetches are allowed.
77 {{0x00000000, 0x00000000},TRUE
,FALSE
},
80 // SMM profile data range( to be fixed in runtime).
81 // It is always present and instruction fetches are not allowed.
83 {{0x00000000, 0x00000000},TRUE
,TRUE
},
86 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
87 // It is always present and instruction fetches are allowed.
88 // {{0x00000000, 0x00000000},TRUE,FALSE},
92 // Future extended range could be added here.
96 // PCI MMIO ranges (to be added in runtime).
97 // They are always present and instruction fetches are not allowed.
102 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
104 MEMORY_RANGE
*mSplitMemRange
= NULL
;
105 UINTN mSplitMemRangeCount
= 0;
110 UINT32 mSmiCommandPort
;
113 Disable branch trace store.
121 AsmMsrAnd64 (MSR_DEBUG_CTL
, ~((UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
)));
125 Enable branch trace store.
133 AsmMsrOr64 (MSR_DEBUG_CTL
, (MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
));
137 Get CPU Index from APIC ID.
148 ApicId
= GetApicId ();
150 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
151 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== ApicId
) {
160 Get the source of IP after execute-disable exception is triggered.
162 @param CpuIndex The index of CPU.
163 @param DestinationIP The destination address.
167 GetSourceFromDestinationOnBts (
172 BRANCH_TRACE_RECORD
*CurrentBTSRecord
;
178 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)mMsrDsArea
[CpuIndex
]->BTSIndex
;
179 for (Index
= 0; Index
< mBTSRecordNumber
; Index
++) {
180 if ((UINTN
)CurrentBTSRecord
< (UINTN
)mMsrBTSRecord
[CpuIndex
]) {
184 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[CpuIndex
]->BTSAbsoluteMaximum
- 1);
187 if (CurrentBTSRecord
->LastBranchTo
== DestinationIP
) {
189 // Good! find 1st one, then find 2nd one.
193 // The first one is DEBUG exception
198 // Good find proper one.
200 return CurrentBTSRecord
->LastBranchFrom
;
210 SMM profile specific INT 1 (single-step) exception handler.
212 @param InterruptType Defines the type of interrupt or exception that
213 occurred on the processor.This parameter is processor architecture specific.
214 @param SystemContext A pointer to the processor context when
215 the interrupt occurred on the processor.
219 DebugExceptionHandler (
220 IN EFI_EXCEPTION_TYPE InterruptType
,
221 IN EFI_SYSTEM_CONTEXT SystemContext
227 if (!mSmmProfileStart
) {
230 CpuIndex
= GetCpuIndex ();
233 // Clear last PF entries
235 for (PFEntry
= 0; PFEntry
< mPFEntryCount
[CpuIndex
]; PFEntry
++) {
236 *mLastPFEntryPointer
[CpuIndex
][PFEntry
] = mLastPFEntryValue
[CpuIndex
][PFEntry
];
240 // Reset page fault exception count for next page fault.
242 mPFEntryCount
[CpuIndex
] = 0;
250 // Clear TF in EFLAGS
252 ClearTrapFlag (SystemContext
);
256 Check if the input address is in SMM ranges.
258 @param[in] Address The input address.
260 @retval TRUE The input address is in SMM.
261 @retval FALSE The input address is not in SMM.
265 IN EFI_PHYSICAL_ADDRESS Address
270 if ((Address
>= mCpuHotPlugData
.SmrrBase
) && (Address
< mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
273 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
274 if (Address
>= mSmmCpuSmramRanges
[Index
].CpuStart
&&
275 Address
< mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
) {
283 Check if the memory address will be mapped by 4KB-page.
285 @param Address The address of Memory.
286 @param Nx The flag indicates if the memory is execute-disable.
291 IN EFI_PHYSICAL_ADDRESS Address
,
297 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
299 // Check configuration
301 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
302 if ((Address
>= mProtectionMemRange
[Index
].Range
.Base
) && (Address
< mProtectionMemRange
[Index
].Range
.Top
)) {
303 *Nx
= mProtectionMemRange
[Index
].Nx
;
304 return mProtectionMemRange
[Index
].Present
;
312 if (IsInSmmRanges (Address
)) {
320 Check if the memory address will be mapped by 4KB-page.
322 @param Address The address of Memory.
327 IN EFI_PHYSICAL_ADDRESS Address
332 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
334 // Check configuration
336 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
337 if ((Address
>= mSplitMemRange
[Index
].Base
) && (Address
< mSplitMemRange
[Index
].Top
)) {
342 if (Address
< mCpuHotPlugData
.SmrrBase
) {
343 if ((mCpuHotPlugData
.SmrrBase
- Address
) < BASE_2MB
) {
346 } else if (Address
> (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) {
347 if ((Address
- (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) < BASE_2MB
) {
359 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
363 InitProtectedMemRange (
368 UINTN NumberOfDescriptors
;
369 UINTN NumberOfAddedDescriptors
;
370 UINTN NumberOfProtectRange
;
371 UINTN NumberOfSpliteRange
;
372 EFI_GCD_MEMORY_SPACE_DESCRIPTOR
*MemorySpaceMap
;
374 EFI_PHYSICAL_ADDRESS ProtectBaseAddress
;
375 EFI_PHYSICAL_ADDRESS ProtectEndAddress
;
376 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress
;
377 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress
;
378 UINT64 High4KBPageSize
;
379 UINT64 Low4KBPageSize
;
381 NumberOfDescriptors
= 0;
382 NumberOfAddedDescriptors
= mSmmCpuSmramRangeCount
;
383 NumberOfSpliteRange
= 0;
384 MemorySpaceMap
= NULL
;
387 // Get MMIO ranges from GCD and add them into protected memory ranges.
389 gDS
->GetMemorySpaceMap (
390 &NumberOfDescriptors
,
393 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
394 if (MemorySpaceMap
[Index
].GcdMemoryType
== EfiGcdMemoryTypeMemoryMappedIo
) {
395 NumberOfAddedDescriptors
++;
399 if (NumberOfAddedDescriptors
!= 0) {
400 TotalSize
= NumberOfAddedDescriptors
* sizeof (MEMORY_PROTECTION_RANGE
) + sizeof (mProtectionMemRangeTemplate
);
401 mProtectionMemRange
= (MEMORY_PROTECTION_RANGE
*) AllocateZeroPool (TotalSize
);
402 ASSERT (mProtectionMemRange
!= NULL
);
403 mProtectionMemRangeCount
= TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
);
406 // Copy existing ranges.
408 CopyMem (mProtectionMemRange
, mProtectionMemRangeTemplate
, sizeof (mProtectionMemRangeTemplate
));
411 // Create split ranges which come from protected ranges.
413 TotalSize
= (TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
)) * sizeof (MEMORY_RANGE
);
414 mSplitMemRange
= (MEMORY_RANGE
*) AllocateZeroPool (TotalSize
);
415 ASSERT (mSplitMemRange
!= NULL
);
418 // Create SMM ranges which are set to present and execution-enable.
420 NumberOfProtectRange
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
421 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
422 if (mSmmCpuSmramRanges
[Index
].CpuStart
>= mProtectionMemRange
[0].Range
.Base
&&
423 mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
< mProtectionMemRange
[0].Range
.Top
) {
425 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
429 mProtectionMemRange
[NumberOfProtectRange
].Range
.Base
= mSmmCpuSmramRanges
[Index
].CpuStart
;
430 mProtectionMemRange
[NumberOfProtectRange
].Range
.Top
= mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
;
431 mProtectionMemRange
[NumberOfProtectRange
].Present
= TRUE
;
432 mProtectionMemRange
[NumberOfProtectRange
].Nx
= FALSE
;
433 NumberOfProtectRange
++;
437 // Create MMIO ranges which are set to present and execution-disable.
439 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
440 if (MemorySpaceMap
[Index
].GcdMemoryType
!= EfiGcdMemoryTypeMemoryMappedIo
) {
443 mProtectionMemRange
[NumberOfProtectRange
].Range
.Base
= MemorySpaceMap
[Index
].BaseAddress
;
444 mProtectionMemRange
[NumberOfProtectRange
].Range
.Top
= MemorySpaceMap
[Index
].BaseAddress
+ MemorySpaceMap
[Index
].Length
;
445 mProtectionMemRange
[NumberOfProtectRange
].Present
= TRUE
;
446 mProtectionMemRange
[NumberOfProtectRange
].Nx
= TRUE
;
447 NumberOfProtectRange
++;
451 // Check and updated actual protected memory ranges count
453 ASSERT (NumberOfProtectRange
<= mProtectionMemRangeCount
);
454 mProtectionMemRangeCount
= NumberOfProtectRange
;
458 // According to protected ranges, create the ranges which will be mapped by 2KB page.
460 NumberOfSpliteRange
= 0;
461 NumberOfProtectRange
= mProtectionMemRangeCount
;
462 for (Index
= 0; Index
< NumberOfProtectRange
; Index
++) {
464 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
466 ProtectBaseAddress
= mProtectionMemRange
[Index
].Range
.Base
;
467 ProtectEndAddress
= mProtectionMemRange
[Index
].Range
.Top
;
468 if (((ProtectBaseAddress
& (SIZE_2MB
- 1)) != 0) || ((ProtectEndAddress
& (SIZE_2MB
- 1)) != 0)) {
470 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
471 // A mix of 4KB and 2MB page could save SMRAM space.
473 Top2MBAlignedAddress
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
474 Base2MBAlignedAddress
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
475 if ((Top2MBAlignedAddress
> Base2MBAlignedAddress
) &&
476 ((Top2MBAlignedAddress
- Base2MBAlignedAddress
) >= SIZE_2MB
)) {
478 // There is an range which could be mapped by 2MB-page.
480 High4KBPageSize
= ((ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectEndAddress
& ~(SIZE_2MB
- 1));
481 Low4KBPageSize
= ((ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectBaseAddress
& ~(SIZE_2MB
- 1));
482 if (High4KBPageSize
!= 0) {
484 // Add not 2MB-aligned range to be mapped by 4KB-page.
486 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
487 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
488 NumberOfSpliteRange
++;
490 if (Low4KBPageSize
!= 0) {
492 // Add not 2MB-aligned range to be mapped by 4KB-page.
494 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
495 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
496 NumberOfSpliteRange
++;
500 // The range could only be mapped by 4KB-page.
502 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
503 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
504 NumberOfSpliteRange
++;
509 mSplitMemRangeCount
= NumberOfSpliteRange
;
511 DEBUG ((EFI_D_INFO
, "SMM Profile Memory Ranges:\n"));
512 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
513 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Base = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Base
));
514 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Top = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Top
));
516 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
517 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Base = %lx\n", Index
, mSplitMemRange
[Index
].Base
));
518 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Top = %lx\n", Index
, mSplitMemRange
[Index
].Top
));
523 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
540 UINTN NumberOfPdpEntries
;
541 UINTN NumberOfPml4Entries
;
542 UINTN SizeOfMemorySpace
;
545 if (sizeof (UINTN
) == sizeof (UINT64
)) {
546 Pml4
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
547 SizeOfMemorySpace
= HighBitSet64 (gPhyMask
) + 1;
549 // Calculate the table entries of PML4E and PDPTE.
551 if (SizeOfMemorySpace
<= 39 ) {
552 NumberOfPml4Entries
= 1;
553 NumberOfPdpEntries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 30));
555 NumberOfPml4Entries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 39));
556 NumberOfPdpEntries
= 512;
559 NumberOfPml4Entries
= 1;
560 NumberOfPdpEntries
= 4;
564 // Go through page table and change 2MB-page into 4KB-page.
566 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
567 if (sizeof (UINTN
) == sizeof (UINT64
)) {
568 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
570 // If Pml4 entry does not exist, skip it
574 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
576 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
578 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
579 if ((*Pde
& IA32_PG_P
) == 0) {
581 // If PDE entry does not exist, skip it
585 if ((*Pde
& IA32_PG_PS
) != 0) {
587 // This is 1G entry, skip it
591 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
595 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
596 if ((*Pte
& IA32_PG_P
) == 0) {
598 // If PTE entry does not exist, skip it
602 Address
= (((Level2
<< 9) + Level3
) << 21);
605 // If it is 2M page, check IsAddressSplit()
607 if (((*Pte
& IA32_PG_PS
) != 0) && IsAddressSplit (Address
)) {
609 // Based on current page table, create 4KB page table for split area.
611 ASSERT (Address
== (*Pte
& PHYSICAL_ADDRESS_MASK
));
613 Pt
= AllocatePageTableMemory (1);
617 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++) {
618 Pt
[Level4
] = Address
+ ((Level4
<< 12) | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
620 *Pte
= (UINT64
)(UINTN
)Pt
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
621 } // end if IsAddressSplit
627 // Go through page table and set several page table entries to absent or execute-disable.
629 DEBUG ((EFI_D_INFO
, "Patch page table start ...\n"));
630 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
631 if (sizeof (UINTN
) == sizeof (UINT64
)) {
632 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
634 // If Pml4 entry does not exist, skip it
638 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
640 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
642 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
643 if ((*Pde
& IA32_PG_P
) == 0) {
645 // If PDE entry does not exist, skip it
649 if ((*Pde
& IA32_PG_PS
) != 0) {
651 // This is 1G entry, set NX bit and skip it
654 *Pde
= *Pde
| IA32_PG_NX
;
658 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
662 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
663 if ((*Pte
& IA32_PG_P
) == 0) {
665 // If PTE entry does not exist, skip it
669 Address
= (((Level2
<< 9) + Level3
) << 21);
671 if ((*Pte
& IA32_PG_PS
) != 0) {
674 if (!IsAddressValid (Address
, &Nx
)) {
676 // Patch to remove Present flag and RW flag
678 *Pte
= *Pte
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
680 if (Nx
&& mXdSupported
) {
681 *Pte
= *Pte
| IA32_PG_NX
;
685 Pt
= (UINT64
*)(UINTN
)(*Pte
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
689 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++, Pt
++) {
690 if (!IsAddressValid (Address
, &Nx
)) {
691 *Pt
= *Pt
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
693 if (Nx
&& mXdSupported
) {
694 *Pt
= *Pt
| IA32_PG_NX
;
707 DEBUG ((EFI_D_INFO
, "Patch page table done!\n"));
709 // Set execute-disable flag
717 To find FADT in ACPI tables.
719 @param AcpiTableGuid The GUID used to find ACPI table in UEFI ConfigurationTable.
721 @return FADT table pointer.
723 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*
724 FindAcpiFadtTableByAcpiGuid (
725 IN EFI_GUID
*AcpiTableGuid
728 EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER
*Rsdp
;
729 EFI_ACPI_DESCRIPTION_HEADER
*Rsdt
;
730 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
737 // found ACPI table RSD_PTR from system table
739 for (Index
= 0; Index
< gST
->NumberOfTableEntries
; Index
++) {
740 if (CompareGuid (&(gST
->ConfigurationTable
[Index
].VendorGuid
), AcpiTableGuid
)) {
742 // A match was found.
744 Rsdp
= gST
->ConfigurationTable
[Index
].VendorTable
;
753 Rsdt
= (EFI_ACPI_DESCRIPTION_HEADER
*)(UINTN
) Rsdp
->RsdtAddress
;
754 if (Rsdt
== NULL
|| Rsdt
->Signature
!= EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE
) {
758 for (Index
= sizeof (EFI_ACPI_DESCRIPTION_HEADER
); Index
< Rsdt
->Length
; Index
= Index
+ sizeof (UINT32
)) {
760 Data32
= *(UINT32
*) ((UINT8
*) Rsdt
+ Index
);
761 Fadt
= (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*) (UINT32
*) (UINTN
) Data32
;
762 if (Fadt
->Header
.Signature
== EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
) {
767 if (Fadt
== NULL
|| Fadt
->Header
.Signature
!= EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
) {
775 To find FADT in ACPI tables.
777 @return FADT table pointer.
779 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*
784 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
786 Fadt
= FindAcpiFadtTableByAcpiGuid (&gEfiAcpi20TableGuid
);
791 return FindAcpiFadtTableByAcpiGuid (&gEfiAcpi10TableGuid
);
795 To get system port address of the SMI Command Port in FADT table.
803 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
805 Fadt
= FindAcpiFadtTable ();
806 ASSERT (Fadt
!= NULL
);
808 mSmiCommandPort
= Fadt
->SmiCmd
;
809 DEBUG ((EFI_D_INFO
, "mSmiCommandPort = %x\n", mSmiCommandPort
));
813 Updates page table to make some memory ranges (like system memory) absent
814 and make some memory ranges (like MMIO) present and execute disable. It also
815 update 2MB-page to 4KB-page for some memory ranges.
824 // The flag indicates SMM profile starts to work.
826 mSmmProfileStart
= TRUE
;
830 Initialize SMM profile in SmmReadyToLock protocol callback function.
832 @param Protocol Points to the protocol's unique identifier.
833 @param Interface Points to the interface instance.
834 @param Handle The handle on which the interface was installed.
836 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
840 InitSmmProfileCallBack (
841 IN CONST EFI_GUID
*Protocol
,
847 // Save to variable so that SMM profile data can be found.
852 EFI_VARIABLE_BOOTSERVICE_ACCESS
| EFI_VARIABLE_RUNTIME_ACCESS
,
853 sizeof(mSmmProfileBase
),
858 // Get Software SMI from FADT
860 GetSmiCommandPort ();
863 // Initialize protected memory range for patching page table later.
865 InitProtectedMemRange ();
871 Initialize SMM profile data structures.
875 InitSmmProfileInternal (
880 EFI_PHYSICAL_ADDRESS Base
;
883 UINTN MsrDsAreaSizePerCpu
;
886 mPFEntryCount
= (UINTN
*)AllocateZeroPool (sizeof (UINTN
) * mMaxNumberOfCpus
);
887 ASSERT (mPFEntryCount
!= NULL
);
888 mLastPFEntryValue
= (UINT64 (*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
889 sizeof (mLastPFEntryValue
[0]) * mMaxNumberOfCpus
);
890 ASSERT (mLastPFEntryValue
!= NULL
);
891 mLastPFEntryPointer
= (UINT64
*(*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
892 sizeof (mLastPFEntryPointer
[0]) * mMaxNumberOfCpus
);
893 ASSERT (mLastPFEntryPointer
!= NULL
);
896 // Allocate memory for SmmProfile below 4GB.
899 mSmmProfileSize
= PcdGet32 (PcdCpuSmmProfileSize
);
900 ASSERT ((mSmmProfileSize
& 0xFFF) == 0);
903 TotalSize
= mSmmProfileSize
+ mMsrDsAreaSize
;
905 TotalSize
= mSmmProfileSize
;
909 Status
= gBS
->AllocatePages (
911 EfiReservedMemoryType
,
912 EFI_SIZE_TO_PAGES (TotalSize
),
915 ASSERT_EFI_ERROR (Status
);
916 ZeroMem ((VOID
*)(UINTN
)Base
, TotalSize
);
917 mSmmProfileBase
= (SMM_PROFILE_HEADER
*)(UINTN
)Base
;
920 // Initialize SMM profile data header.
922 mSmmProfileBase
->HeaderSize
= sizeof (SMM_PROFILE_HEADER
);
923 mSmmProfileBase
->MaxDataEntries
= (UINT64
)((mSmmProfileSize
- sizeof(SMM_PROFILE_HEADER
)) / sizeof (SMM_PROFILE_ENTRY
));
924 mSmmProfileBase
->MaxDataSize
= MultU64x64 (mSmmProfileBase
->MaxDataEntries
, sizeof(SMM_PROFILE_ENTRY
));
925 mSmmProfileBase
->CurDataEntries
= 0;
926 mSmmProfileBase
->CurDataSize
= 0;
927 mSmmProfileBase
->TsegStart
= mCpuHotPlugData
.SmrrBase
;
928 mSmmProfileBase
->TsegSize
= mCpuHotPlugData
.SmrrSize
;
929 mSmmProfileBase
->NumSmis
= 0;
930 mSmmProfileBase
->NumCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
933 mMsrDsArea
= (MSR_DS_AREA_STRUCT
**)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT
*) * mMaxNumberOfCpus
);
934 ASSERT (mMsrDsArea
!= NULL
);
935 mMsrBTSRecord
= (BRANCH_TRACE_RECORD
**)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD
*) * mMaxNumberOfCpus
);
936 ASSERT (mMsrBTSRecord
!= NULL
);
937 mMsrPEBSRecord
= (PEBS_RECORD
**)AllocateZeroPool (sizeof (PEBS_RECORD
*) * mMaxNumberOfCpus
);
938 ASSERT (mMsrPEBSRecord
!= NULL
);
940 mMsrDsAreaBase
= (MSR_DS_AREA_STRUCT
*)((UINTN
)Base
+ mSmmProfileSize
);
941 MsrDsAreaSizePerCpu
= mMsrDsAreaSize
/ mMaxNumberOfCpus
;
942 mBTSRecordNumber
= (MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
- sizeof(MSR_DS_AREA_STRUCT
)) / sizeof(BRANCH_TRACE_RECORD
);
943 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
944 mMsrDsArea
[Index
] = (MSR_DS_AREA_STRUCT
*)((UINTN
)mMsrDsAreaBase
+ MsrDsAreaSizePerCpu
* Index
);
945 mMsrBTSRecord
[Index
] = (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + sizeof(MSR_DS_AREA_STRUCT
));
946 mMsrPEBSRecord
[Index
] = (PEBS_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
);
948 mMsrDsArea
[Index
]->BTSBufferBase
= (UINTN
)mMsrBTSRecord
[Index
];
949 mMsrDsArea
[Index
]->BTSIndex
= mMsrDsArea
[Index
]->BTSBufferBase
;
950 mMsrDsArea
[Index
]->BTSAbsoluteMaximum
= mMsrDsArea
[Index
]->BTSBufferBase
+ mBTSRecordNumber
* sizeof(BRANCH_TRACE_RECORD
) + 1;
951 mMsrDsArea
[Index
]->BTSInterruptThreshold
= mMsrDsArea
[Index
]->BTSAbsoluteMaximum
+ 1;
953 mMsrDsArea
[Index
]->PEBSBufferBase
= (UINTN
)mMsrPEBSRecord
[Index
];
954 mMsrDsArea
[Index
]->PEBSIndex
= mMsrDsArea
[Index
]->PEBSBufferBase
;
955 mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
= mMsrDsArea
[Index
]->PEBSBufferBase
+ PEBS_RECORD_NUMBER
* sizeof(PEBS_RECORD
) + 1;
956 mMsrDsArea
[Index
]->PEBSInterruptThreshold
= mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
+ 1;
960 mProtectionMemRange
= mProtectionMemRangeTemplate
;
961 mProtectionMemRangeCount
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
964 // Update TSeg entry.
966 mProtectionMemRange
[0].Range
.Base
= mCpuHotPlugData
.SmrrBase
;
967 mProtectionMemRange
[0].Range
.Top
= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
;
970 // Update SMM profile entry.
972 mProtectionMemRange
[1].Range
.Base
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
;
973 mProtectionMemRange
[1].Range
.Top
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
+ TotalSize
;
976 // Allocate memory reserved for creating 4KB pages.
978 InitPagesForPFHandler ();
981 // Start SMM profile when SmmReadyToLock protocol is installed.
983 Status
= gSmst
->SmmRegisterProtocolNotify (
984 &gEfiSmmReadyToLockProtocolGuid
,
985 InitSmmProfileCallBack
,
988 ASSERT_EFI_ERROR (Status
);
994 Check if XD feature is supported by a processor.
998 CheckFeatureSupported (
1004 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr
;
1007 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
1008 if (RegEax
<= CPUID_EXTENDED_FUNCTION
) {
1010 // Extended CPUID functions are not supported on this processor.
1012 mXdSupported
= FALSE
;
1015 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
1016 if ((RegEdx
& CPUID1_EDX_XD_SUPPORT
) == 0) {
1018 // Execute Disable Bit feature is not supported on this processor.
1020 mXdSupported
= FALSE
;
1024 if (mBtsSupported
) {
1025 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
);
1026 if ((RegEdx
& CPUID1_EDX_BTS_AVAILABLE
) != 0) {
1028 // Per IA32 manuals:
1029 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
1030 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
1031 // availability of the BTS facilities, including the ability to set the BTS and
1032 // BTINT bits in the MSR_DEBUGCTLA MSR.
1033 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
1035 MiscEnableMsr
.Uint64
= AsmReadMsr64 (MSR_IA32_MISC_ENABLE
);
1036 if (MiscEnableMsr
.Bits
.BTS
== 1) {
1038 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
1040 mBtsSupported
= FALSE
;
1051 ActivateSingleStepDB (
1057 Dr6
= AsmReadDr6 ();
1058 if ((Dr6
& DR6_SINGLE_STEP
) != 0) {
1061 Dr6
|= DR6_SINGLE_STEP
;
1076 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1077 if ((DebugCtl
& MSR_DEBUG_CTL_LBR
) != 0) {
1080 DebugCtl
|= MSR_DEBUG_CTL_LBR
;
1081 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1085 Enable branch trace store.
1087 @param CpuIndex The index of the processor.
1097 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1098 if ((DebugCtl
& MSR_DEBUG_CTL_BTS
) != 0) {
1102 AsmWriteMsr64 (MSR_DS_AREA
, (UINT64
)(UINTN
)mMsrDsArea
[CpuIndex
]);
1103 DebugCtl
|= (UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
);
1104 DebugCtl
&= ~((UINT64
)MSR_DEBUG_CTL_BTINT
);
1105 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1109 Increase SMI number in each SMI entry.
1113 SmmProfileRecordSmiNum (
1117 if (mSmmProfileStart
) {
1118 mSmmProfileBase
->NumSmis
++;
1123 Initialize processor environment for SMM profile.
1125 @param CpuIndex The index of the processor.
1129 ActivateSmmProfile (
1134 // Enable Single Step DB#
1136 ActivateSingleStepDB ();
1138 if (mBtsSupported
) {
1140 // We can not get useful information from LER, so we have to use BTS.
1147 ActivateBTS (CpuIndex
);
1152 Initialize SMM profile in SMM CPU entry point.
1154 @param[in] Cr3 The base address of the page tables to use in SMM.
1165 mSmmProfileCr3
= Cr3
;
1168 // Skip SMM profile initialization if feature is disabled
1170 if (!FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
1175 // Initialize SmmProfile here
1177 InitSmmProfileInternal ();
1180 // Initialize profile IDT.
1186 Update page table to map the memory correctly in order to make the instruction
1187 which caused page fault execute successfully. And it also save the original page
1188 table to be restored in single-step exception.
1190 @param PageTable PageTable Address.
1191 @param PFAddress The memory address which caused page fault exception.
1192 @param CpuIndex The index of the processor.
1193 @param ErrorCode The Error code of exception.
1197 RestorePageTableBelow4G (
1210 if (sizeof(UINT64
) == sizeof(UINTN
)) {
1211 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 39, 47);
1212 ASSERT (PageTable
[PTIndex
] != 0);
1213 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1219 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 30, 38);
1220 ASSERT (PageTable
[PTIndex
] != 0);
1221 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1226 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 21, 29);
1227 if ((PageTable
[PTIndex
] & IA32_PG_PS
) != 0) {
1233 // Record old entries with non-present status
1234 // Old entries include the memory which instruction is at and the memory which instruction access.
1237 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1238 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1239 PFIndex
= mPFEntryCount
[CpuIndex
];
1240 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1241 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1242 mPFEntryCount
[CpuIndex
]++;
1248 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 21) - 1));
1249 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_PS
;
1250 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1251 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1252 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1258 ASSERT (PageTable
[PTIndex
] != 0);
1259 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1264 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 12, 20);
1267 // Record old entries with non-present status
1268 // Old entries include the memory which instruction is at and the memory which instruction access.
1271 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1272 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1273 PFIndex
= mPFEntryCount
[CpuIndex
];
1274 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1275 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1276 mPFEntryCount
[CpuIndex
]++;
1282 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 12) - 1));
1283 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1284 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1285 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1291 The Page fault handler to save SMM profile data.
1293 @param Rip The RIP when exception happens.
1294 @param ErrorCode The Error code of exception.
1298 SmmProfilePFHandler (
1307 UINT64 InstructionAddress
;
1308 UINTN MaxEntryNumber
;
1309 UINTN CurrentEntryNumber
;
1310 BOOLEAN IsValidPFAddress
;
1311 SMM_PROFILE_ENTRY
*SmmProfileEntry
;
1315 EFI_SMM_SAVE_STATE_IO_INFO IoInfo
;
1317 if (!mSmmProfileStart
) {
1319 // If SMM profile does not start, call original page fault handler.
1321 SmiDefaultPFHandler ();
1325 if (mBtsSupported
) {
1329 IsValidPFAddress
= FALSE
;
1330 PageTable
= (UINT64
*)AsmReadCr3 ();
1331 PFAddress
= AsmReadCr2 ();
1332 CpuIndex
= GetCpuIndex ();
1334 if (PFAddress
<= 0xFFFFFFFF) {
1335 RestorePageTableBelow4G (PageTable
, PFAddress
, CpuIndex
, ErrorCode
);
1337 RestorePageTableAbove4G (PageTable
, PFAddress
, CpuIndex
, ErrorCode
, &IsValidPFAddress
);
1340 if (!IsValidPFAddress
) {
1341 InstructionAddress
= Rip
;
1342 if ((ErrorCode
& IA32_PF_EC_ID
) != 0 && (mBtsSupported
)) {
1344 // If it is instruction fetch failure, get the correct IP from BTS.
1346 InstructionAddress
= GetSourceFromDestinationOnBts (CpuIndex
, Rip
);
1347 if (InstructionAddress
== 0) {
1349 // It indicates the instruction which caused page fault is not a jump instruction,
1350 // set instruction address same as the page fault address.
1352 InstructionAddress
= PFAddress
;
1357 // Indicate it is not software SMI
1359 SmiCommand
= 0xFFFFFFFFFFFFFFFFULL
;
1360 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1361 Status
= SmmReadSaveState(&mSmmCpu
, sizeof(IoInfo
), EFI_SMM_SAVE_STATE_REGISTER_IO
, Index
, &IoInfo
);
1362 if (EFI_ERROR (Status
)) {
1365 if (IoInfo
.IoPort
== mSmiCommandPort
) {
1367 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1369 SoftSmiValue
= IoRead8 (mSmiCommandPort
);
1370 SmiCommand
= (UINT64
)SoftSmiValue
;
1375 SmmProfileEntry
= (SMM_PROFILE_ENTRY
*)(UINTN
)(mSmmProfileBase
+ 1);
1377 // Check if there is already a same entry in profile data.
1379 for (Index
= 0; Index
< (UINTN
) mSmmProfileBase
->CurDataEntries
; Index
++) {
1380 if ((SmmProfileEntry
[Index
].ErrorCode
== (UINT64
)ErrorCode
) &&
1381 (SmmProfileEntry
[Index
].Address
== PFAddress
) &&
1382 (SmmProfileEntry
[Index
].CpuNum
== (UINT64
)CpuIndex
) &&
1383 (SmmProfileEntry
[Index
].Instruction
== InstructionAddress
) &&
1384 (SmmProfileEntry
[Index
].SmiCmd
== SmiCommand
)) {
1386 // Same record exist, need not save again.
1391 if (Index
== mSmmProfileBase
->CurDataEntries
) {
1392 CurrentEntryNumber
= (UINTN
) mSmmProfileBase
->CurDataEntries
;
1393 MaxEntryNumber
= (UINTN
) mSmmProfileBase
->MaxDataEntries
;
1394 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer
)) {
1395 CurrentEntryNumber
= CurrentEntryNumber
% MaxEntryNumber
;
1397 if (CurrentEntryNumber
< MaxEntryNumber
) {
1399 // Log the new entry
1401 SmmProfileEntry
[CurrentEntryNumber
].SmiNum
= mSmmProfileBase
->NumSmis
;
1402 SmmProfileEntry
[CurrentEntryNumber
].ErrorCode
= (UINT64
)ErrorCode
;
1403 SmmProfileEntry
[CurrentEntryNumber
].ApicId
= (UINT64
)GetApicId ();
1404 SmmProfileEntry
[CurrentEntryNumber
].CpuNum
= (UINT64
)CpuIndex
;
1405 SmmProfileEntry
[CurrentEntryNumber
].Address
= PFAddress
;
1406 SmmProfileEntry
[CurrentEntryNumber
].Instruction
= InstructionAddress
;
1407 SmmProfileEntry
[CurrentEntryNumber
].SmiCmd
= SmiCommand
;
1409 // Update current entry index and data size in the header.
1411 mSmmProfileBase
->CurDataEntries
++;
1412 mSmmProfileBase
->CurDataSize
= MultU64x64 (mSmmProfileBase
->CurDataEntries
, sizeof (SMM_PROFILE_ENTRY
));
1421 if (mBtsSupported
) {
1427 Replace INT1 exception handler to restore page table to absent/execute-disable state
1428 in order to trigger page fault again to save SMM profile data..
1438 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_DEBUG
, DebugExceptionHandler
);
1439 ASSERT_EFI_ERROR (Status
);