4 Copyright (c) 2012 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #include "PiSmmCpuDxeSmm.h"
18 #include "SmmProfileInternal.h"
20 UINT32 mSmmProfileCr3
;
22 SMM_PROFILE_HEADER
*mSmmProfileBase
;
23 MSR_DS_AREA_STRUCT
*mMsrDsAreaBase
;
25 // The buffer to store SMM profile data.
27 UINTN mSmmProfileSize
;
30 // The buffer to enable branch trace store.
32 UINTN mMsrDsAreaSize
= SMM_PROFILE_DTS_SIZE
;
35 // The flag indicates if execute-disable is supported by processor.
37 BOOLEAN mXdSupported
= TRUE
;
40 // The flag indicates if execute-disable is enabled on processor.
42 BOOLEAN mXdEnabled
= FALSE
;
45 // The flag indicates if BTS is supported by processor.
47 BOOLEAN mBtsSupported
= TRUE
;
50 // The flag indicates if SMM profile starts to record data.
52 BOOLEAN mSmmProfileStart
= FALSE
;
55 // The flag indicates if #DB will be setup in #PF handler.
57 BOOLEAN mSetupDebugTrap
= FALSE
;
60 // Record the page fault exception count for one instruction execution.
64 UINT64 (*mLastPFEntryValue
)[MAX_PF_ENTRY_COUNT
];
65 UINT64
*(*mLastPFEntryPointer
)[MAX_PF_ENTRY_COUNT
];
67 MSR_DS_AREA_STRUCT
**mMsrDsArea
;
68 BRANCH_TRACE_RECORD
**mMsrBTSRecord
;
69 UINTN mBTSRecordNumber
;
70 PEBS_RECORD
**mMsrPEBSRecord
;
73 // These memory ranges are always present, they does not generate the access type of page fault exception,
74 // but they possibly generate instruction fetch type of page fault exception.
76 MEMORY_PROTECTION_RANGE
*mProtectionMemRange
= NULL
;
77 UINTN mProtectionMemRangeCount
= 0;
80 // Some predefined memory ranges.
82 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate
[] = {
84 // SMRAM range (to be fixed in runtime).
85 // It is always present and instruction fetches are allowed.
87 {{0x00000000, 0x00000000},TRUE
,FALSE
},
90 // SMM profile data range( to be fixed in runtime).
91 // It is always present and instruction fetches are not allowed.
93 {{0x00000000, 0x00000000},TRUE
,TRUE
},
96 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
97 // It is always present and instruction fetches are allowed.
98 // {{0x00000000, 0x00000000},TRUE,FALSE},
102 // Future extended range could be added here.
106 // PCI MMIO ranges (to be added in runtime).
107 // They are always present and instruction fetches are not allowed.
112 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
114 MEMORY_RANGE
*mSplitMemRange
= NULL
;
115 UINTN mSplitMemRangeCount
= 0;
120 UINT32 mSmiCommandPort
;
123 Disable branch trace store.
131 AsmMsrAnd64 (MSR_DEBUG_CTL
, ~((UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
)));
135 Enable branch trace store.
143 AsmMsrOr64 (MSR_DEBUG_CTL
, (MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
));
147 Get CPU Index from APIC ID.
158 ApicId
= GetApicId ();
160 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
161 if (gSmmCpuPrivate
->ProcessorInfo
[Index
].ProcessorId
== ApicId
) {
170 Get the source of IP after execute-disable exception is triggered.
172 @param CpuIndex The index of CPU.
173 @param DestinationIP The destination address.
177 GetSourceFromDestinationOnBts (
182 BRANCH_TRACE_RECORD
*CurrentBTSRecord
;
188 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)mMsrDsArea
[CpuIndex
]->BTSIndex
;
189 for (Index
= 0; Index
< mBTSRecordNumber
; Index
++) {
190 if ((UINTN
)CurrentBTSRecord
< (UINTN
)mMsrBTSRecord
[CpuIndex
]) {
194 CurrentBTSRecord
= (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[CpuIndex
]->BTSAbsoluteMaximum
- 1);
197 if (CurrentBTSRecord
->LastBranchTo
== DestinationIP
) {
199 // Good! find 1st one, then find 2nd one.
203 // The first one is DEBUG exception
208 // Good find proper one.
210 return CurrentBTSRecord
->LastBranchFrom
;
220 SMM profile specific INT 1 (single-step) exception handler.
222 @param InterruptType Defines the type of interrupt or exception that
223 occurred on the processor.This parameter is processor architecture specific.
224 @param SystemContext A pointer to the processor context when
225 the interrupt occurred on the processor.
229 DebugExceptionHandler (
230 IN EFI_EXCEPTION_TYPE InterruptType
,
231 IN EFI_SYSTEM_CONTEXT SystemContext
237 if (!mSmmProfileStart
&&
238 !HEAP_GUARD_NONSTOP_MODE
&&
239 !NULL_DETECTION_NONSTOP_MODE
) {
242 CpuIndex
= GetCpuIndex ();
245 // Clear last PF entries
247 for (PFEntry
= 0; PFEntry
< mPFEntryCount
[CpuIndex
]; PFEntry
++) {
248 *mLastPFEntryPointer
[CpuIndex
][PFEntry
] = mLastPFEntryValue
[CpuIndex
][PFEntry
];
252 // Reset page fault exception count for next page fault.
254 mPFEntryCount
[CpuIndex
] = 0;
262 // Clear TF in EFLAGS
264 ClearTrapFlag (SystemContext
);
268 Check if the input address is in SMM ranges.
270 @param[in] Address The input address.
272 @retval TRUE The input address is in SMM.
273 @retval FALSE The input address is not in SMM.
277 IN EFI_PHYSICAL_ADDRESS Address
282 if ((Address
>= mCpuHotPlugData
.SmrrBase
) && (Address
< mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
)) {
285 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
286 if (Address
>= mSmmCpuSmramRanges
[Index
].CpuStart
&&
287 Address
< mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
) {
295 Check if the memory address will be mapped by 4KB-page.
297 @param Address The address of Memory.
298 @param Nx The flag indicates if the memory is execute-disable.
303 IN EFI_PHYSICAL_ADDRESS Address
,
309 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
311 // Check configuration
313 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
314 if ((Address
>= mProtectionMemRange
[Index
].Range
.Base
) && (Address
< mProtectionMemRange
[Index
].Range
.Top
)) {
315 *Nx
= mProtectionMemRange
[Index
].Nx
;
316 return mProtectionMemRange
[Index
].Present
;
324 if (IsInSmmRanges (Address
)) {
332 Check if the memory address will be mapped by 4KB-page.
334 @param Address The address of Memory.
339 IN EFI_PHYSICAL_ADDRESS Address
344 if (FeaturePcdGet (PcdCpuSmmProfileEnable
)) {
346 // Check configuration
348 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
349 if ((Address
>= mSplitMemRange
[Index
].Base
) && (Address
< mSplitMemRange
[Index
].Top
)) {
354 if (Address
< mCpuHotPlugData
.SmrrBase
) {
355 if ((mCpuHotPlugData
.SmrrBase
- Address
) < BASE_2MB
) {
358 } else if (Address
> (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) {
359 if ((Address
- (mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
- BASE_2MB
)) < BASE_2MB
) {
371 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
375 InitProtectedMemRange (
380 UINTN NumberOfDescriptors
;
381 UINTN NumberOfAddedDescriptors
;
382 UINTN NumberOfProtectRange
;
383 UINTN NumberOfSpliteRange
;
384 EFI_GCD_MEMORY_SPACE_DESCRIPTOR
*MemorySpaceMap
;
386 EFI_PHYSICAL_ADDRESS ProtectBaseAddress
;
387 EFI_PHYSICAL_ADDRESS ProtectEndAddress
;
388 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress
;
389 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress
;
390 UINT64 High4KBPageSize
;
391 UINT64 Low4KBPageSize
;
393 NumberOfDescriptors
= 0;
394 NumberOfAddedDescriptors
= mSmmCpuSmramRangeCount
;
395 NumberOfSpliteRange
= 0;
396 MemorySpaceMap
= NULL
;
399 // Get MMIO ranges from GCD and add them into protected memory ranges.
401 gDS
->GetMemorySpaceMap (
402 &NumberOfDescriptors
,
405 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
406 if (MemorySpaceMap
[Index
].GcdMemoryType
== EfiGcdMemoryTypeMemoryMappedIo
) {
407 NumberOfAddedDescriptors
++;
411 if (NumberOfAddedDescriptors
!= 0) {
412 TotalSize
= NumberOfAddedDescriptors
* sizeof (MEMORY_PROTECTION_RANGE
) + sizeof (mProtectionMemRangeTemplate
);
413 mProtectionMemRange
= (MEMORY_PROTECTION_RANGE
*) AllocateZeroPool (TotalSize
);
414 ASSERT (mProtectionMemRange
!= NULL
);
415 mProtectionMemRangeCount
= TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
);
418 // Copy existing ranges.
420 CopyMem (mProtectionMemRange
, mProtectionMemRangeTemplate
, sizeof (mProtectionMemRangeTemplate
));
423 // Create split ranges which come from protected ranges.
425 TotalSize
= (TotalSize
/ sizeof (MEMORY_PROTECTION_RANGE
)) * sizeof (MEMORY_RANGE
);
426 mSplitMemRange
= (MEMORY_RANGE
*) AllocateZeroPool (TotalSize
);
427 ASSERT (mSplitMemRange
!= NULL
);
430 // Create SMM ranges which are set to present and execution-enable.
432 NumberOfProtectRange
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
433 for (Index
= 0; Index
< mSmmCpuSmramRangeCount
; Index
++) {
434 if (mSmmCpuSmramRanges
[Index
].CpuStart
>= mProtectionMemRange
[0].Range
.Base
&&
435 mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
< mProtectionMemRange
[0].Range
.Top
) {
437 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
441 mProtectionMemRange
[NumberOfProtectRange
].Range
.Base
= mSmmCpuSmramRanges
[Index
].CpuStart
;
442 mProtectionMemRange
[NumberOfProtectRange
].Range
.Top
= mSmmCpuSmramRanges
[Index
].CpuStart
+ mSmmCpuSmramRanges
[Index
].PhysicalSize
;
443 mProtectionMemRange
[NumberOfProtectRange
].Present
= TRUE
;
444 mProtectionMemRange
[NumberOfProtectRange
].Nx
= FALSE
;
445 NumberOfProtectRange
++;
449 // Create MMIO ranges which are set to present and execution-disable.
451 for (Index
= 0; Index
< NumberOfDescriptors
; Index
++) {
452 if (MemorySpaceMap
[Index
].GcdMemoryType
!= EfiGcdMemoryTypeMemoryMappedIo
) {
455 mProtectionMemRange
[NumberOfProtectRange
].Range
.Base
= MemorySpaceMap
[Index
].BaseAddress
;
456 mProtectionMemRange
[NumberOfProtectRange
].Range
.Top
= MemorySpaceMap
[Index
].BaseAddress
+ MemorySpaceMap
[Index
].Length
;
457 mProtectionMemRange
[NumberOfProtectRange
].Present
= TRUE
;
458 mProtectionMemRange
[NumberOfProtectRange
].Nx
= TRUE
;
459 NumberOfProtectRange
++;
463 // Check and updated actual protected memory ranges count
465 ASSERT (NumberOfProtectRange
<= mProtectionMemRangeCount
);
466 mProtectionMemRangeCount
= NumberOfProtectRange
;
470 // According to protected ranges, create the ranges which will be mapped by 2KB page.
472 NumberOfSpliteRange
= 0;
473 NumberOfProtectRange
= mProtectionMemRangeCount
;
474 for (Index
= 0; Index
< NumberOfProtectRange
; Index
++) {
476 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
478 ProtectBaseAddress
= mProtectionMemRange
[Index
].Range
.Base
;
479 ProtectEndAddress
= mProtectionMemRange
[Index
].Range
.Top
;
480 if (((ProtectBaseAddress
& (SIZE_2MB
- 1)) != 0) || ((ProtectEndAddress
& (SIZE_2MB
- 1)) != 0)) {
482 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
483 // A mix of 4KB and 2MB page could save SMRAM space.
485 Top2MBAlignedAddress
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
486 Base2MBAlignedAddress
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
487 if ((Top2MBAlignedAddress
> Base2MBAlignedAddress
) &&
488 ((Top2MBAlignedAddress
- Base2MBAlignedAddress
) >= SIZE_2MB
)) {
490 // There is an range which could be mapped by 2MB-page.
492 High4KBPageSize
= ((ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectEndAddress
& ~(SIZE_2MB
- 1));
493 Low4KBPageSize
= ((ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1)) - (ProtectBaseAddress
& ~(SIZE_2MB
- 1));
494 if (High4KBPageSize
!= 0) {
496 // Add not 2MB-aligned range to be mapped by 4KB-page.
498 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectEndAddress
& ~(SIZE_2MB
- 1);
499 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
500 NumberOfSpliteRange
++;
502 if (Low4KBPageSize
!= 0) {
504 // Add not 2MB-aligned range to be mapped by 4KB-page.
506 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
507 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectBaseAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
508 NumberOfSpliteRange
++;
512 // The range could only be mapped by 4KB-page.
514 mSplitMemRange
[NumberOfSpliteRange
].Base
= ProtectBaseAddress
& ~(SIZE_2MB
- 1);
515 mSplitMemRange
[NumberOfSpliteRange
].Top
= (ProtectEndAddress
+ SIZE_2MB
- 1) & ~(SIZE_2MB
- 1);
516 NumberOfSpliteRange
++;
521 mSplitMemRangeCount
= NumberOfSpliteRange
;
523 DEBUG ((EFI_D_INFO
, "SMM Profile Memory Ranges:\n"));
524 for (Index
= 0; Index
< mProtectionMemRangeCount
; Index
++) {
525 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Base = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Base
));
526 DEBUG ((EFI_D_INFO
, "mProtectionMemRange[%d].Top = %lx\n", Index
, mProtectionMemRange
[Index
].Range
.Top
));
528 for (Index
= 0; Index
< mSplitMemRangeCount
; Index
++) {
529 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Base = %lx\n", Index
, mSplitMemRange
[Index
].Base
));
530 DEBUG ((EFI_D_INFO
, "mSplitMemRange[%d].Top = %lx\n", Index
, mSplitMemRange
[Index
].Top
));
535 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
552 UINTN NumberOfPdpEntries
;
553 UINTN NumberOfPml4Entries
;
554 UINTN SizeOfMemorySpace
;
557 if (sizeof (UINTN
) == sizeof (UINT64
)) {
558 Pml4
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
559 SizeOfMemorySpace
= HighBitSet64 (gPhyMask
) + 1;
561 // Calculate the table entries of PML4E and PDPTE.
563 if (SizeOfMemorySpace
<= 39 ) {
564 NumberOfPml4Entries
= 1;
565 NumberOfPdpEntries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 30));
567 NumberOfPml4Entries
= (UINT32
)LShiftU64 (1, (SizeOfMemorySpace
- 39));
568 NumberOfPdpEntries
= 512;
571 NumberOfPml4Entries
= 1;
572 NumberOfPdpEntries
= 4;
576 // Go through page table and change 2MB-page into 4KB-page.
578 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
579 if (sizeof (UINTN
) == sizeof (UINT64
)) {
580 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
582 // If Pml4 entry does not exist, skip it
586 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
588 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
590 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
591 if ((*Pde
& IA32_PG_P
) == 0) {
593 // If PDE entry does not exist, skip it
597 if ((*Pde
& IA32_PG_PS
) != 0) {
599 // This is 1G entry, skip it
603 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
607 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
608 if ((*Pte
& IA32_PG_P
) == 0) {
610 // If PTE entry does not exist, skip it
614 Address
= (((Level2
<< 9) + Level3
) << 21);
617 // If it is 2M page, check IsAddressSplit()
619 if (((*Pte
& IA32_PG_PS
) != 0) && IsAddressSplit (Address
)) {
621 // Based on current page table, create 4KB page table for split area.
623 ASSERT (Address
== (*Pte
& PHYSICAL_ADDRESS_MASK
));
625 Pt
= AllocatePageTableMemory (1);
629 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++) {
630 Pt
[Level4
] = Address
+ ((Level4
<< 12) | mAddressEncMask
| PAGE_ATTRIBUTE_BITS
);
632 *Pte
= (UINT64
)(UINTN
)Pt
| mAddressEncMask
| PAGE_ATTRIBUTE_BITS
;
633 } // end if IsAddressSplit
639 // Go through page table and set several page table entries to absent or execute-disable.
641 DEBUG ((EFI_D_INFO
, "Patch page table start ...\n"));
642 for (Level1
= 0; Level1
< NumberOfPml4Entries
; Level1
++) {
643 if (sizeof (UINTN
) == sizeof (UINT64
)) {
644 if ((Pml4
[Level1
] & IA32_PG_P
) == 0) {
646 // If Pml4 entry does not exist, skip it
650 Pde
= (UINT64
*)(UINTN
)(Pml4
[Level1
] & ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
652 Pde
= (UINT64
*)(UINTN
)mSmmProfileCr3
;
654 for (Level2
= 0; Level2
< NumberOfPdpEntries
; Level2
++, Pde
++) {
655 if ((*Pde
& IA32_PG_P
) == 0) {
657 // If PDE entry does not exist, skip it
661 if ((*Pde
& IA32_PG_PS
) != 0) {
663 // This is 1G entry, set NX bit and skip it
666 *Pde
= *Pde
| IA32_PG_NX
;
670 Pte
= (UINT64
*)(UINTN
)(*Pde
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
674 for (Level3
= 0; Level3
< SIZE_4KB
/ sizeof (*Pte
); Level3
++, Pte
++) {
675 if ((*Pte
& IA32_PG_P
) == 0) {
677 // If PTE entry does not exist, skip it
681 Address
= (((Level2
<< 9) + Level3
) << 21);
683 if ((*Pte
& IA32_PG_PS
) != 0) {
686 if (!IsAddressValid (Address
, &Nx
)) {
688 // Patch to remove Present flag and RW flag
690 *Pte
= *Pte
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
692 if (Nx
&& mXdSupported
) {
693 *Pte
= *Pte
| IA32_PG_NX
;
697 Pt
= (UINT64
*)(UINTN
)(*Pte
& ~mAddressEncMask
& PHYSICAL_ADDRESS_MASK
);
701 for (Level4
= 0; Level4
< SIZE_4KB
/ sizeof(*Pt
); Level4
++, Pt
++) {
702 if (!IsAddressValid (Address
, &Nx
)) {
703 *Pt
= *Pt
& (INTN
)(INT32
)(~PAGE_ATTRIBUTE_BITS
);
705 if (Nx
&& mXdSupported
) {
706 *Pt
= *Pt
| IA32_PG_NX
;
719 DEBUG ((EFI_D_INFO
, "Patch page table done!\n"));
721 // Set execute-disable flag
729 To get system port address of the SMI Command Port in FADT table.
737 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*Fadt
;
739 Fadt
= (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE
*) EfiLocateFirstAcpiTable (
740 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
742 ASSERT (Fadt
!= NULL
);
744 mSmiCommandPort
= Fadt
->SmiCmd
;
745 DEBUG ((EFI_D_INFO
, "mSmiCommandPort = %x\n", mSmiCommandPort
));
749 Updates page table to make some memory ranges (like system memory) absent
750 and make some memory ranges (like MMIO) present and execute disable. It also
751 update 2MB-page to 4KB-page for some memory ranges.
760 // The flag indicates SMM profile starts to work.
762 mSmmProfileStart
= TRUE
;
766 Initialize SMM profile in SmmReadyToLock protocol callback function.
768 @param Protocol Points to the protocol's unique identifier.
769 @param Interface Points to the interface instance.
770 @param Handle The handle on which the interface was installed.
772 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
776 InitSmmProfileCallBack (
777 IN CONST EFI_GUID
*Protocol
,
783 // Save to variable so that SMM profile data can be found.
788 EFI_VARIABLE_BOOTSERVICE_ACCESS
| EFI_VARIABLE_RUNTIME_ACCESS
,
789 sizeof(mSmmProfileBase
),
794 // Get Software SMI from FADT
796 GetSmiCommandPort ();
799 // Initialize protected memory range for patching page table later.
801 InitProtectedMemRange ();
807 Initialize SMM profile data structures.
811 InitSmmProfileInternal (
816 EFI_PHYSICAL_ADDRESS Base
;
819 UINTN MsrDsAreaSizePerCpu
;
822 mPFEntryCount
= (UINTN
*)AllocateZeroPool (sizeof (UINTN
) * mMaxNumberOfCpus
);
823 ASSERT (mPFEntryCount
!= NULL
);
824 mLastPFEntryValue
= (UINT64 (*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
825 sizeof (mLastPFEntryValue
[0]) * mMaxNumberOfCpus
);
826 ASSERT (mLastPFEntryValue
!= NULL
);
827 mLastPFEntryPointer
= (UINT64
*(*)[MAX_PF_ENTRY_COUNT
])AllocateZeroPool (
828 sizeof (mLastPFEntryPointer
[0]) * mMaxNumberOfCpus
);
829 ASSERT (mLastPFEntryPointer
!= NULL
);
832 // Allocate memory for SmmProfile below 4GB.
835 mSmmProfileSize
= PcdGet32 (PcdCpuSmmProfileSize
);
836 ASSERT ((mSmmProfileSize
& 0xFFF) == 0);
839 TotalSize
= mSmmProfileSize
+ mMsrDsAreaSize
;
841 TotalSize
= mSmmProfileSize
;
845 Status
= gBS
->AllocatePages (
847 EfiReservedMemoryType
,
848 EFI_SIZE_TO_PAGES (TotalSize
),
851 ASSERT_EFI_ERROR (Status
);
852 ZeroMem ((VOID
*)(UINTN
)Base
, TotalSize
);
853 mSmmProfileBase
= (SMM_PROFILE_HEADER
*)(UINTN
)Base
;
856 // Initialize SMM profile data header.
858 mSmmProfileBase
->HeaderSize
= sizeof (SMM_PROFILE_HEADER
);
859 mSmmProfileBase
->MaxDataEntries
= (UINT64
)((mSmmProfileSize
- sizeof(SMM_PROFILE_HEADER
)) / sizeof (SMM_PROFILE_ENTRY
));
860 mSmmProfileBase
->MaxDataSize
= MultU64x64 (mSmmProfileBase
->MaxDataEntries
, sizeof(SMM_PROFILE_ENTRY
));
861 mSmmProfileBase
->CurDataEntries
= 0;
862 mSmmProfileBase
->CurDataSize
= 0;
863 mSmmProfileBase
->TsegStart
= mCpuHotPlugData
.SmrrBase
;
864 mSmmProfileBase
->TsegSize
= mCpuHotPlugData
.SmrrSize
;
865 mSmmProfileBase
->NumSmis
= 0;
866 mSmmProfileBase
->NumCpus
= gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
869 mMsrDsArea
= (MSR_DS_AREA_STRUCT
**)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT
*) * mMaxNumberOfCpus
);
870 ASSERT (mMsrDsArea
!= NULL
);
871 mMsrBTSRecord
= (BRANCH_TRACE_RECORD
**)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD
*) * mMaxNumberOfCpus
);
872 ASSERT (mMsrBTSRecord
!= NULL
);
873 mMsrPEBSRecord
= (PEBS_RECORD
**)AllocateZeroPool (sizeof (PEBS_RECORD
*) * mMaxNumberOfCpus
);
874 ASSERT (mMsrPEBSRecord
!= NULL
);
876 mMsrDsAreaBase
= (MSR_DS_AREA_STRUCT
*)((UINTN
)Base
+ mSmmProfileSize
);
877 MsrDsAreaSizePerCpu
= mMsrDsAreaSize
/ mMaxNumberOfCpus
;
878 mBTSRecordNumber
= (MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
- sizeof(MSR_DS_AREA_STRUCT
)) / sizeof(BRANCH_TRACE_RECORD
);
879 for (Index
= 0; Index
< mMaxNumberOfCpus
; Index
++) {
880 mMsrDsArea
[Index
] = (MSR_DS_AREA_STRUCT
*)((UINTN
)mMsrDsAreaBase
+ MsrDsAreaSizePerCpu
* Index
);
881 mMsrBTSRecord
[Index
] = (BRANCH_TRACE_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + sizeof(MSR_DS_AREA_STRUCT
));
882 mMsrPEBSRecord
[Index
] = (PEBS_RECORD
*)((UINTN
)mMsrDsArea
[Index
] + MsrDsAreaSizePerCpu
- sizeof(PEBS_RECORD
) * PEBS_RECORD_NUMBER
);
884 mMsrDsArea
[Index
]->BTSBufferBase
= (UINTN
)mMsrBTSRecord
[Index
];
885 mMsrDsArea
[Index
]->BTSIndex
= mMsrDsArea
[Index
]->BTSBufferBase
;
886 mMsrDsArea
[Index
]->BTSAbsoluteMaximum
= mMsrDsArea
[Index
]->BTSBufferBase
+ mBTSRecordNumber
* sizeof(BRANCH_TRACE_RECORD
) + 1;
887 mMsrDsArea
[Index
]->BTSInterruptThreshold
= mMsrDsArea
[Index
]->BTSAbsoluteMaximum
+ 1;
889 mMsrDsArea
[Index
]->PEBSBufferBase
= (UINTN
)mMsrPEBSRecord
[Index
];
890 mMsrDsArea
[Index
]->PEBSIndex
= mMsrDsArea
[Index
]->PEBSBufferBase
;
891 mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
= mMsrDsArea
[Index
]->PEBSBufferBase
+ PEBS_RECORD_NUMBER
* sizeof(PEBS_RECORD
) + 1;
892 mMsrDsArea
[Index
]->PEBSInterruptThreshold
= mMsrDsArea
[Index
]->PEBSAbsoluteMaximum
+ 1;
896 mProtectionMemRange
= mProtectionMemRangeTemplate
;
897 mProtectionMemRangeCount
= sizeof (mProtectionMemRangeTemplate
) / sizeof (MEMORY_PROTECTION_RANGE
);
900 // Update TSeg entry.
902 mProtectionMemRange
[0].Range
.Base
= mCpuHotPlugData
.SmrrBase
;
903 mProtectionMemRange
[0].Range
.Top
= mCpuHotPlugData
.SmrrBase
+ mCpuHotPlugData
.SmrrSize
;
906 // Update SMM profile entry.
908 mProtectionMemRange
[1].Range
.Base
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
;
909 mProtectionMemRange
[1].Range
.Top
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)mSmmProfileBase
+ TotalSize
;
912 // Allocate memory reserved for creating 4KB pages.
914 InitPagesForPFHandler ();
917 // Start SMM profile when SmmReadyToLock protocol is installed.
919 Status
= gSmst
->SmmRegisterProtocolNotify (
920 &gEfiSmmReadyToLockProtocolGuid
,
921 InitSmmProfileCallBack
,
924 ASSERT_EFI_ERROR (Status
);
930 Check if XD feature is supported by a processor.
934 CheckFeatureSupported (
940 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr
;
943 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
944 if (RegEax
<= CPUID_EXTENDED_FUNCTION
) {
946 // Extended CPUID functions are not supported on this processor.
948 mXdSupported
= FALSE
;
949 PatchInstructionX86 (gPatchXdSupported
, mXdSupported
, 1);
952 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
953 if ((RegEdx
& CPUID1_EDX_XD_SUPPORT
) == 0) {
955 // Execute Disable Bit feature is not supported on this processor.
957 mXdSupported
= FALSE
;
958 PatchInstructionX86 (gPatchXdSupported
, mXdSupported
, 1);
963 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
);
964 if ((RegEdx
& CPUID1_EDX_BTS_AVAILABLE
) != 0) {
967 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
968 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
969 // availability of the BTS facilities, including the ability to set the BTS and
970 // BTINT bits in the MSR_DEBUGCTLA MSR.
971 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
973 MiscEnableMsr
.Uint64
= AsmReadMsr64 (MSR_IA32_MISC_ENABLE
);
974 if (MiscEnableMsr
.Bits
.BTS
== 1) {
976 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
978 mBtsSupported
= FALSE
;
989 ActivateSingleStepDB (
996 if ((Dr6
& DR6_SINGLE_STEP
) != 0) {
999 Dr6
|= DR6_SINGLE_STEP
;
1014 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1015 if ((DebugCtl
& MSR_DEBUG_CTL_LBR
) != 0) {
1018 DebugCtl
|= MSR_DEBUG_CTL_LBR
;
1019 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1023 Enable branch trace store.
1025 @param CpuIndex The index of the processor.
1035 DebugCtl
= AsmReadMsr64 (MSR_DEBUG_CTL
);
1036 if ((DebugCtl
& MSR_DEBUG_CTL_BTS
) != 0) {
1040 AsmWriteMsr64 (MSR_DS_AREA
, (UINT64
)(UINTN
)mMsrDsArea
[CpuIndex
]);
1041 DebugCtl
|= (UINT64
)(MSR_DEBUG_CTL_BTS
| MSR_DEBUG_CTL_TR
);
1042 DebugCtl
&= ~((UINT64
)MSR_DEBUG_CTL_BTINT
);
1043 AsmWriteMsr64 (MSR_DEBUG_CTL
, DebugCtl
);
1047 Increase SMI number in each SMI entry.
1051 SmmProfileRecordSmiNum (
1055 if (mSmmProfileStart
) {
1056 mSmmProfileBase
->NumSmis
++;
1061 Initialize processor environment for SMM profile.
1063 @param CpuIndex The index of the processor.
1067 ActivateSmmProfile (
1072 // Enable Single Step DB#
1074 ActivateSingleStepDB ();
1076 if (mBtsSupported
) {
1078 // We can not get useful information from LER, so we have to use BTS.
1085 ActivateBTS (CpuIndex
);
1090 Initialize SMM profile in SMM CPU entry point.
1092 @param[in] Cr3 The base address of the page tables to use in SMM.
1103 mSmmProfileCr3
= Cr3
;
1106 // Skip SMM profile initialization if feature is disabled
1108 if (!FeaturePcdGet (PcdCpuSmmProfileEnable
) &&
1109 !HEAP_GUARD_NONSTOP_MODE
&&
1110 !NULL_DETECTION_NONSTOP_MODE
) {
1115 // Initialize SmmProfile here
1117 InitSmmProfileInternal ();
1120 // Initialize profile IDT.
1125 // Tell #PF handler to prepare a #DB subsequently.
1127 mSetupDebugTrap
= TRUE
;
1131 Update page table to map the memory correctly in order to make the instruction
1132 which caused page fault execute successfully. And it also save the original page
1133 table to be restored in single-step exception.
1135 @param PageTable PageTable Address.
1136 @param PFAddress The memory address which caused page fault exception.
1137 @param CpuIndex The index of the processor.
1138 @param ErrorCode The Error code of exception.
1142 RestorePageTableBelow4G (
1155 if (sizeof(UINT64
) == sizeof(UINTN
)) {
1156 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 39, 47);
1157 ASSERT (PageTable
[PTIndex
] != 0);
1158 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1164 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 30, 38);
1165 ASSERT (PageTable
[PTIndex
] != 0);
1166 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1171 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 21, 29);
1172 if ((PageTable
[PTIndex
] & IA32_PG_PS
) != 0) {
1178 // Record old entries with non-present status
1179 // Old entries include the memory which instruction is at and the memory which instruction access.
1182 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1183 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1184 PFIndex
= mPFEntryCount
[CpuIndex
];
1185 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1186 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1187 mPFEntryCount
[CpuIndex
]++;
1193 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 21) - 1));
1194 PageTable
[PTIndex
] |= (UINT64
)IA32_PG_PS
;
1195 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1196 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1197 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1203 ASSERT (PageTable
[PTIndex
] != 0);
1204 PageTable
= (UINT64
*)(UINTN
)(PageTable
[PTIndex
] & PHYSICAL_ADDRESS_MASK
);
1209 PTIndex
= (UINTN
)BitFieldRead64 (PFAddress
, 12, 20);
1212 // Record old entries with non-present status
1213 // Old entries include the memory which instruction is at and the memory which instruction access.
1216 ASSERT (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
);
1217 if (mPFEntryCount
[CpuIndex
] < MAX_PF_ENTRY_COUNT
) {
1218 PFIndex
= mPFEntryCount
[CpuIndex
];
1219 mLastPFEntryValue
[CpuIndex
][PFIndex
] = PageTable
[PTIndex
];
1220 mLastPFEntryPointer
[CpuIndex
][PFIndex
] = &PageTable
[PTIndex
];
1221 mPFEntryCount
[CpuIndex
]++;
1227 PageTable
[PTIndex
] = (PFAddress
& ~((1ull << 12) - 1));
1228 PageTable
[PTIndex
] |= (UINT64
)PAGE_ATTRIBUTE_BITS
;
1229 if ((ErrorCode
& IA32_PF_EC_ID
) != 0) {
1230 PageTable
[PTIndex
] &= ~IA32_PG_NX
;
1236 Handler for Page Fault triggered by Guard page.
1238 @param ErrorCode The Error code of exception.
1242 GuardPagePFHandler (
1248 UINT64 RestoreAddress
;
1249 UINTN RestorePageNumber
;
1252 PageTable
= (UINT64
*)AsmReadCr3 ();
1253 PFAddress
= AsmReadCr2 ();
1254 CpuIndex
= GetCpuIndex ();
1257 // Memory operation cross pages, like "rep mov" instruction, will cause
1258 // infinite loop between this and Debug Trap handler. We have to make sure
1259 // that current page and the page followed are both in PRESENT state.
1261 RestorePageNumber
= 2;
1262 RestoreAddress
= PFAddress
;
1263 while (RestorePageNumber
> 0) {
1264 RestorePageTableBelow4G (PageTable
, RestoreAddress
, CpuIndex
, ErrorCode
);
1265 RestoreAddress
+= EFI_PAGE_SIZE
;
1266 RestorePageNumber
--;
1276 The Page fault handler to save SMM profile data.
1278 @param Rip The RIP when exception happens.
1279 @param ErrorCode The Error code of exception.
1283 SmmProfilePFHandler (
1290 UINT64 RestoreAddress
;
1291 UINTN RestorePageNumber
;
1294 UINT64 InstructionAddress
;
1295 UINTN MaxEntryNumber
;
1296 UINTN CurrentEntryNumber
;
1297 BOOLEAN IsValidPFAddress
;
1298 SMM_PROFILE_ENTRY
*SmmProfileEntry
;
1302 EFI_SMM_SAVE_STATE_IO_INFO IoInfo
;
1304 if (!mSmmProfileStart
) {
1306 // If SMM profile does not start, call original page fault handler.
1308 SmiDefaultPFHandler ();
1312 if (mBtsSupported
) {
1316 IsValidPFAddress
= FALSE
;
1317 PageTable
= (UINT64
*)AsmReadCr3 ();
1318 PFAddress
= AsmReadCr2 ();
1319 CpuIndex
= GetCpuIndex ();
1322 // Memory operation cross pages, like "rep mov" instruction, will cause
1323 // infinite loop between this and Debug Trap handler. We have to make sure
1324 // that current page and the page followed are both in PRESENT state.
1326 RestorePageNumber
= 2;
1327 RestoreAddress
= PFAddress
;
1328 while (RestorePageNumber
> 0) {
1329 if (RestoreAddress
<= 0xFFFFFFFF) {
1330 RestorePageTableBelow4G (PageTable
, RestoreAddress
, CpuIndex
, ErrorCode
);
1332 RestorePageTableAbove4G (PageTable
, RestoreAddress
, CpuIndex
, ErrorCode
, &IsValidPFAddress
);
1334 RestoreAddress
+= EFI_PAGE_SIZE
;
1335 RestorePageNumber
--;
1338 if (!IsValidPFAddress
) {
1339 InstructionAddress
= Rip
;
1340 if ((ErrorCode
& IA32_PF_EC_ID
) != 0 && (mBtsSupported
)) {
1342 // If it is instruction fetch failure, get the correct IP from BTS.
1344 InstructionAddress
= GetSourceFromDestinationOnBts (CpuIndex
, Rip
);
1345 if (InstructionAddress
== 0) {
1347 // It indicates the instruction which caused page fault is not a jump instruction,
1348 // set instruction address same as the page fault address.
1350 InstructionAddress
= PFAddress
;
1355 // Indicate it is not software SMI
1357 SmiCommand
= 0xFFFFFFFFFFFFFFFFULL
;
1358 for (Index
= 0; Index
< gSmst
->NumberOfCpus
; Index
++) {
1359 Status
= SmmReadSaveState(&mSmmCpu
, sizeof(IoInfo
), EFI_SMM_SAVE_STATE_REGISTER_IO
, Index
, &IoInfo
);
1360 if (EFI_ERROR (Status
)) {
1363 if (IoInfo
.IoPort
== mSmiCommandPort
) {
1365 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1367 SoftSmiValue
= IoRead8 (mSmiCommandPort
);
1368 SmiCommand
= (UINT64
)SoftSmiValue
;
1373 SmmProfileEntry
= (SMM_PROFILE_ENTRY
*)(UINTN
)(mSmmProfileBase
+ 1);
1375 // Check if there is already a same entry in profile data.
1377 for (Index
= 0; Index
< (UINTN
) mSmmProfileBase
->CurDataEntries
; Index
++) {
1378 if ((SmmProfileEntry
[Index
].ErrorCode
== (UINT64
)ErrorCode
) &&
1379 (SmmProfileEntry
[Index
].Address
== PFAddress
) &&
1380 (SmmProfileEntry
[Index
].CpuNum
== (UINT64
)CpuIndex
) &&
1381 (SmmProfileEntry
[Index
].Instruction
== InstructionAddress
) &&
1382 (SmmProfileEntry
[Index
].SmiCmd
== SmiCommand
)) {
1384 // Same record exist, need not save again.
1389 if (Index
== mSmmProfileBase
->CurDataEntries
) {
1390 CurrentEntryNumber
= (UINTN
) mSmmProfileBase
->CurDataEntries
;
1391 MaxEntryNumber
= (UINTN
) mSmmProfileBase
->MaxDataEntries
;
1392 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer
)) {
1393 CurrentEntryNumber
= CurrentEntryNumber
% MaxEntryNumber
;
1395 if (CurrentEntryNumber
< MaxEntryNumber
) {
1397 // Log the new entry
1399 SmmProfileEntry
[CurrentEntryNumber
].SmiNum
= mSmmProfileBase
->NumSmis
;
1400 SmmProfileEntry
[CurrentEntryNumber
].ErrorCode
= (UINT64
)ErrorCode
;
1401 SmmProfileEntry
[CurrentEntryNumber
].ApicId
= (UINT64
)GetApicId ();
1402 SmmProfileEntry
[CurrentEntryNumber
].CpuNum
= (UINT64
)CpuIndex
;
1403 SmmProfileEntry
[CurrentEntryNumber
].Address
= PFAddress
;
1404 SmmProfileEntry
[CurrentEntryNumber
].Instruction
= InstructionAddress
;
1405 SmmProfileEntry
[CurrentEntryNumber
].SmiCmd
= SmiCommand
;
1407 // Update current entry index and data size in the header.
1409 mSmmProfileBase
->CurDataEntries
++;
1410 mSmmProfileBase
->CurDataSize
= MultU64x64 (mSmmProfileBase
->CurDataEntries
, sizeof (SMM_PROFILE_ENTRY
));
1419 if (mBtsSupported
) {
1425 Replace INT1 exception handler to restore page table to absent/execute-disable state
1426 in order to trigger page fault again to save SMM profile data..
1436 Status
= SmmRegisterExceptionHandler (&mSmmCpuService
, EXCEPT_IA32_DEBUG
, DebugExceptionHandler
);
1437 ASSERT_EFI_ERROR (Status
);