1 #include <linux/perf_event.h>
2 #include <linux/types.h>
4 #include <asm/perf_event.h>
8 #include "../perf_event.h"
12 LBR_FORMAT_LIP
= 0x01,
13 LBR_FORMAT_EIP
= 0x02,
14 LBR_FORMAT_EIP_FLAGS
= 0x03,
15 LBR_FORMAT_EIP_FLAGS2
= 0x04,
16 LBR_FORMAT_INFO
= 0x05,
17 LBR_FORMAT_TIME
= 0x06,
18 LBR_FORMAT_MAX_KNOWN
= LBR_FORMAT_TIME
,
24 } lbr_desc
[LBR_FORMAT_MAX_KNOWN
+ 1] = {
25 [LBR_FORMAT_EIP_FLAGS
] = LBR_EIP_FLAGS
,
26 [LBR_FORMAT_EIP_FLAGS2
] = LBR_EIP_FLAGS
| LBR_TSX
,
30 * Intel LBR_SELECT bits
31 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
33 * Hardware branch filter (not available on all CPUs)
35 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
36 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
37 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
38 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
39 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
40 #define LBR_RETURN_BIT 5 /* do not capture near returns */
41 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
42 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
43 #define LBR_FAR_BIT 8 /* do not capture far branches */
44 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
47 * Following bit only exists in Linux; we mask it out before writing it to
48 * the actual MSR. But it helps the constraint perf code to understand
49 * that this is a separate configuration.
51 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
53 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
54 #define LBR_USER (1 << LBR_USER_BIT)
55 #define LBR_JCC (1 << LBR_JCC_BIT)
56 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
57 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
58 #define LBR_RETURN (1 << LBR_RETURN_BIT)
59 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
60 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
61 #define LBR_FAR (1 << LBR_FAR_BIT)
62 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
63 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
65 #define LBR_PLM (LBR_KERNEL | LBR_USER)
67 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
68 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
69 #define LBR_IGN 0 /* ignored */
80 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
81 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
82 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
84 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
87 * x86control flow change classification
88 * x86control flow changes include branches, interrupts, traps, faults
91 X86_BR_NONE
= 0, /* unknown */
93 X86_BR_USER
= 1 << 0, /* branch target is user */
94 X86_BR_KERNEL
= 1 << 1, /* branch target is kernel */
96 X86_BR_CALL
= 1 << 2, /* call */
97 X86_BR_RET
= 1 << 3, /* return */
98 X86_BR_SYSCALL
= 1 << 4, /* syscall */
99 X86_BR_SYSRET
= 1 << 5, /* syscall return */
100 X86_BR_INT
= 1 << 6, /* sw interrupt */
101 X86_BR_IRET
= 1 << 7, /* return from interrupt */
102 X86_BR_JCC
= 1 << 8, /* conditional */
103 X86_BR_JMP
= 1 << 9, /* jump */
104 X86_BR_IRQ
= 1 << 10,/* hw interrupt or trap or fault */
105 X86_BR_IND_CALL
= 1 << 11,/* indirect calls */
106 X86_BR_ABORT
= 1 << 12,/* transaction abort */
107 X86_BR_IN_TX
= 1 << 13,/* in transaction */
108 X86_BR_NO_TX
= 1 << 14,/* not in transaction */
109 X86_BR_ZERO_CALL
= 1 << 15,/* zero length call */
110 X86_BR_CALL_STACK
= 1 << 16,/* call stack */
111 X86_BR_IND_JMP
= 1 << 17,/* indirect jump */
113 X86_BR_TYPE_SAVE
= 1 << 18,/* indicate to save branch type */
117 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
118 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
135 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
137 #define X86_BR_ANY_CALL \
145 static void intel_pmu_lbr_filter(struct cpu_hw_events
*cpuc
);
148 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
149 * otherwise it becomes near impossible to get a reliable stack.
152 static void __intel_pmu_lbr_enable(bool pmi
)
154 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
155 u64 debugctl
, lbr_select
= 0, orig_debugctl
;
158 * No need to unfreeze manually, as v4 can do that as part
159 * of the GLOBAL_STATUS ack.
161 if (pmi
&& x86_pmu
.version
>= 4)
165 * No need to reprogram LBR_SELECT in a PMI, as it
169 lbr_select
= cpuc
->lbr_sel
->config
& x86_pmu
.lbr_sel_mask
;
170 if (!pmi
&& cpuc
->lbr_sel
)
171 wrmsrl(MSR_LBR_SELECT
, lbr_select
);
173 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
174 orig_debugctl
= debugctl
;
175 debugctl
|= DEBUGCTLMSR_LBR
;
177 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
178 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
179 * may cause superfluous increase/decrease of LBR_TOS.
181 if (!(lbr_select
& LBR_CALL_STACK
))
182 debugctl
|= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
;
183 if (orig_debugctl
!= debugctl
)
184 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
187 static void __intel_pmu_lbr_disable(void)
191 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
192 debugctl
&= ~(DEBUGCTLMSR_LBR
| DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
);
193 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
196 static void intel_pmu_lbr_reset_32(void)
200 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++)
201 wrmsrl(x86_pmu
.lbr_from
+ i
, 0);
204 static void intel_pmu_lbr_reset_64(void)
208 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
209 wrmsrl(x86_pmu
.lbr_from
+ i
, 0);
210 wrmsrl(x86_pmu
.lbr_to
+ i
, 0);
211 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
)
212 wrmsrl(MSR_LBR_INFO_0
+ i
, 0);
216 void intel_pmu_lbr_reset(void)
221 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_32
)
222 intel_pmu_lbr_reset_32();
224 intel_pmu_lbr_reset_64();
228 * TOS = most recently recorded branch
230 static inline u64
intel_pmu_lbr_tos(void)
234 rdmsrl(x86_pmu
.lbr_tos
, tos
);
244 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
245 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
246 * TSX is not supported they have no consistent behavior:
248 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
249 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
250 * part of the sign extension.
254 * 1) LBR has TSX format
255 * 2) CPU has no TSX support enabled
257 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
258 * value from rdmsr() must be converted to have a 61 bits sign extension,
259 * ignoring the TSX flags.
261 static inline bool lbr_from_signext_quirk_needed(void)
263 int lbr_format
= x86_pmu
.intel_cap
.lbr_format
;
264 bool tsx_support
= boot_cpu_has(X86_FEATURE_HLE
) ||
265 boot_cpu_has(X86_FEATURE_RTM
);
267 return !tsx_support
&& (lbr_desc
[lbr_format
] & LBR_TSX
);
270 DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key
);
272 /* If quirk is enabled, ensure sign extension is 63 bits: */
273 inline u64
lbr_from_signext_quirk_wr(u64 val
)
275 if (static_branch_unlikely(&lbr_from_quirk_key
)) {
277 * Sign extend into bits 61:62 while preserving bit 63.
279 * Quirk is enabled when TSX is disabled. Therefore TSX bits
280 * in val are always OFF and must be changed to be sign
281 * extension bits. Since bits 59:60 are guaranteed to be
282 * part of the sign extension bits, we can just copy them
285 val
|= (LBR_FROM_SIGNEXT_2MSB
& val
) << 2;
291 * If quirk is needed, ensure sign extension is 61 bits:
293 static u64
lbr_from_signext_quirk_rd(u64 val
)
295 if (static_branch_unlikely(&lbr_from_quirk_key
)) {
297 * Quirk is on when TSX is not enabled. Therefore TSX
298 * flags must be read as OFF.
300 val
&= ~(LBR_FROM_FLAG_IN_TX
| LBR_FROM_FLAG_ABORT
);
305 static inline void wrlbr_from(unsigned int idx
, u64 val
)
307 val
= lbr_from_signext_quirk_wr(val
);
308 wrmsrl(x86_pmu
.lbr_from
+ idx
, val
);
311 static inline void wrlbr_to(unsigned int idx
, u64 val
)
313 wrmsrl(x86_pmu
.lbr_to
+ idx
, val
);
316 static inline u64
rdlbr_from(unsigned int idx
)
320 rdmsrl(x86_pmu
.lbr_from
+ idx
, val
);
322 return lbr_from_signext_quirk_rd(val
);
325 static inline u64
rdlbr_to(unsigned int idx
)
329 rdmsrl(x86_pmu
.lbr_to
+ idx
, val
);
334 static void __intel_pmu_lbr_restore(struct x86_perf_task_context
*task_ctx
)
337 unsigned lbr_idx
, mask
;
340 if (task_ctx
->lbr_callstack_users
== 0 ||
341 task_ctx
->lbr_stack_state
== LBR_NONE
) {
342 intel_pmu_lbr_reset();
346 mask
= x86_pmu
.lbr_nr
- 1;
348 for (i
= 0; i
< tos
; i
++) {
349 lbr_idx
= (tos
- i
) & mask
;
350 wrlbr_from(lbr_idx
, task_ctx
->lbr_from
[i
]);
351 wrlbr_to (lbr_idx
, task_ctx
->lbr_to
[i
]);
353 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
)
354 wrmsrl(MSR_LBR_INFO_0
+ lbr_idx
, task_ctx
->lbr_info
[i
]);
356 wrmsrl(x86_pmu
.lbr_tos
, tos
);
357 task_ctx
->lbr_stack_state
= LBR_NONE
;
360 static void __intel_pmu_lbr_save(struct x86_perf_task_context
*task_ctx
)
362 unsigned lbr_idx
, mask
;
366 if (task_ctx
->lbr_callstack_users
== 0) {
367 task_ctx
->lbr_stack_state
= LBR_NONE
;
371 mask
= x86_pmu
.lbr_nr
- 1;
372 tos
= intel_pmu_lbr_tos();
373 for (i
= 0; i
< tos
; i
++) {
374 lbr_idx
= (tos
- i
) & mask
;
375 task_ctx
->lbr_from
[i
] = rdlbr_from(lbr_idx
);
376 task_ctx
->lbr_to
[i
] = rdlbr_to(lbr_idx
);
377 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
)
378 rdmsrl(MSR_LBR_INFO_0
+ lbr_idx
, task_ctx
->lbr_info
[i
]);
381 task_ctx
->lbr_stack_state
= LBR_VALID
;
384 void intel_pmu_lbr_sched_task(struct perf_event_context
*ctx
, bool sched_in
)
386 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
387 struct x86_perf_task_context
*task_ctx
;
389 if (!cpuc
->lbr_users
)
393 * If LBR callstack feature is enabled and the stack was saved when
394 * the task was scheduled out, restore the stack. Otherwise flush
397 task_ctx
= ctx
? ctx
->task_ctx_data
: NULL
;
400 __intel_pmu_lbr_restore(task_ctx
);
402 __intel_pmu_lbr_save(task_ctx
);
407 * Since a context switch can flip the address space and LBR entries
408 * are not tagged with an identifier, we need to wipe the LBR, even for
409 * per-cpu events. You simply cannot resolve the branches from the old
413 intel_pmu_lbr_reset();
416 static inline bool branch_user_callstack(unsigned br_sel
)
418 return (br_sel
& X86_BR_USER
) && (br_sel
& X86_BR_CALL_STACK
);
421 void intel_pmu_lbr_add(struct perf_event
*event
)
423 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
424 struct x86_perf_task_context
*task_ctx
;
429 cpuc
->br_sel
= event
->hw
.branch_reg
.reg
;
431 if (branch_user_callstack(cpuc
->br_sel
) && event
->ctx
->task_ctx_data
) {
432 task_ctx
= event
->ctx
->task_ctx_data
;
433 task_ctx
->lbr_callstack_users
++;
437 * Request pmu::sched_task() callback, which will fire inside the
438 * regular perf event scheduling, so that call will:
440 * - restore or wipe; when LBR-callstack,
443 * when this is from __perf_event_task_sched_in().
445 * However, if this is from perf_install_in_context(), no such callback
446 * will follow and we'll need to reset the LBR here if this is the
449 * The problem is, we cannot tell these cases apart... but we can
450 * exclude the biggest chunk of cases by looking at
451 * event->total_time_running. An event that has accrued runtime cannot
452 * be 'new'. Conversely, a new event can get installed through the
453 * context switch path for the first time.
455 perf_sched_cb_inc(event
->ctx
->pmu
);
456 if (!cpuc
->lbr_users
++ && !event
->total_time_running
)
457 intel_pmu_lbr_reset();
460 void intel_pmu_lbr_del(struct perf_event
*event
)
462 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
463 struct x86_perf_task_context
*task_ctx
;
468 if (branch_user_callstack(cpuc
->br_sel
) &&
469 event
->ctx
->task_ctx_data
) {
470 task_ctx
= event
->ctx
->task_ctx_data
;
471 task_ctx
->lbr_callstack_users
--;
475 WARN_ON_ONCE(cpuc
->lbr_users
< 0);
476 perf_sched_cb_dec(event
->ctx
->pmu
);
479 void intel_pmu_lbr_enable_all(bool pmi
)
481 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
484 __intel_pmu_lbr_enable(pmi
);
487 void intel_pmu_lbr_disable_all(void)
489 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
492 __intel_pmu_lbr_disable();
495 static void intel_pmu_lbr_read_32(struct cpu_hw_events
*cpuc
)
497 unsigned long mask
= x86_pmu
.lbr_nr
- 1;
498 u64 tos
= intel_pmu_lbr_tos();
501 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
502 unsigned long lbr_idx
= (tos
- i
) & mask
;
511 rdmsrl(x86_pmu
.lbr_from
+ lbr_idx
, msr_lastbranch
.lbr
);
513 cpuc
->lbr_entries
[i
].from
= msr_lastbranch
.from
;
514 cpuc
->lbr_entries
[i
].to
= msr_lastbranch
.to
;
515 cpuc
->lbr_entries
[i
].mispred
= 0;
516 cpuc
->lbr_entries
[i
].predicted
= 0;
517 cpuc
->lbr_entries
[i
].in_tx
= 0;
518 cpuc
->lbr_entries
[i
].abort
= 0;
519 cpuc
->lbr_entries
[i
].cycles
= 0;
520 cpuc
->lbr_entries
[i
].type
= 0;
521 cpuc
->lbr_entries
[i
].reserved
= 0;
523 cpuc
->lbr_stack
.nr
= i
;
527 * Due to lack of segmentation in Linux the effective address (offset)
528 * is the same as the linear address, allowing us to merge the LIP and EIP
531 static void intel_pmu_lbr_read_64(struct cpu_hw_events
*cpuc
)
533 bool need_info
= false;
534 unsigned long mask
= x86_pmu
.lbr_nr
- 1;
535 int lbr_format
= x86_pmu
.intel_cap
.lbr_format
;
536 u64 tos
= intel_pmu_lbr_tos();
539 int num
= x86_pmu
.lbr_nr
;
542 need_info
= !(cpuc
->lbr_sel
->config
& LBR_NO_INFO
);
543 if (cpuc
->lbr_sel
->config
& LBR_CALL_STACK
)
547 for (i
= 0; i
< num
; i
++) {
548 unsigned long lbr_idx
= (tos
- i
) & mask
;
549 u64 from
, to
, mis
= 0, pred
= 0, in_tx
= 0, abort
= 0;
552 int lbr_flags
= lbr_desc
[lbr_format
];
554 from
= rdlbr_from(lbr_idx
);
555 to
= rdlbr_to(lbr_idx
);
557 if (lbr_format
== LBR_FORMAT_INFO
&& need_info
) {
560 rdmsrl(MSR_LBR_INFO_0
+ lbr_idx
, info
);
561 mis
= !!(info
& LBR_INFO_MISPRED
);
563 in_tx
= !!(info
& LBR_INFO_IN_TX
);
564 abort
= !!(info
& LBR_INFO_ABORT
);
565 cycles
= (info
& LBR_INFO_CYCLES
);
568 if (lbr_format
== LBR_FORMAT_TIME
) {
569 mis
= !!(from
& LBR_FROM_FLAG_MISPRED
);
572 cycles
= ((to
>> 48) & LBR_INFO_CYCLES
);
574 to
= (u64
)((((s64
)to
) << 16) >> 16);
577 if (lbr_flags
& LBR_EIP_FLAGS
) {
578 mis
= !!(from
& LBR_FROM_FLAG_MISPRED
);
582 if (lbr_flags
& LBR_TSX
) {
583 in_tx
= !!(from
& LBR_FROM_FLAG_IN_TX
);
584 abort
= !!(from
& LBR_FROM_FLAG_ABORT
);
587 from
= (u64
)((((s64
)from
) << skip
) >> skip
);
590 * Some CPUs report duplicated abort records,
591 * with the second entry not having an abort bit set.
592 * Skip them here. This loop runs backwards,
593 * so we need to undo the previous record.
594 * If the abort just happened outside the window
595 * the extra entry cannot be removed.
597 if (abort
&& x86_pmu
.lbr_double_abort
&& out
> 0)
600 cpuc
->lbr_entries
[out
].from
= from
;
601 cpuc
->lbr_entries
[out
].to
= to
;
602 cpuc
->lbr_entries
[out
].mispred
= mis
;
603 cpuc
->lbr_entries
[out
].predicted
= pred
;
604 cpuc
->lbr_entries
[out
].in_tx
= in_tx
;
605 cpuc
->lbr_entries
[out
].abort
= abort
;
606 cpuc
->lbr_entries
[out
].cycles
= cycles
;
607 cpuc
->lbr_entries
[out
].type
= 0;
608 cpuc
->lbr_entries
[out
].reserved
= 0;
611 cpuc
->lbr_stack
.nr
= out
;
614 void intel_pmu_lbr_read(void)
616 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
618 if (!cpuc
->lbr_users
)
621 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_32
)
622 intel_pmu_lbr_read_32(cpuc
);
624 intel_pmu_lbr_read_64(cpuc
);
626 intel_pmu_lbr_filter(cpuc
);
631 * - in case there is no HW filter
632 * - in case the HW filter has errata or limitations
634 static int intel_pmu_setup_sw_lbr_filter(struct perf_event
*event
)
636 u64 br_type
= event
->attr
.branch_sample_type
;
639 if (br_type
& PERF_SAMPLE_BRANCH_USER
)
642 if (br_type
& PERF_SAMPLE_BRANCH_KERNEL
)
643 mask
|= X86_BR_KERNEL
;
645 /* we ignore BRANCH_HV here */
647 if (br_type
& PERF_SAMPLE_BRANCH_ANY
)
650 if (br_type
& PERF_SAMPLE_BRANCH_ANY_CALL
)
651 mask
|= X86_BR_ANY_CALL
;
653 if (br_type
& PERF_SAMPLE_BRANCH_ANY_RETURN
)
654 mask
|= X86_BR_RET
| X86_BR_IRET
| X86_BR_SYSRET
;
656 if (br_type
& PERF_SAMPLE_BRANCH_IND_CALL
)
657 mask
|= X86_BR_IND_CALL
;
659 if (br_type
& PERF_SAMPLE_BRANCH_ABORT_TX
)
660 mask
|= X86_BR_ABORT
;
662 if (br_type
& PERF_SAMPLE_BRANCH_IN_TX
)
663 mask
|= X86_BR_IN_TX
;
665 if (br_type
& PERF_SAMPLE_BRANCH_NO_TX
)
666 mask
|= X86_BR_NO_TX
;
668 if (br_type
& PERF_SAMPLE_BRANCH_COND
)
671 if (br_type
& PERF_SAMPLE_BRANCH_CALL_STACK
) {
672 if (!x86_pmu_has_lbr_callstack())
674 if (mask
& ~(X86_BR_USER
| X86_BR_KERNEL
))
676 mask
|= X86_BR_CALL
| X86_BR_IND_CALL
| X86_BR_RET
|
680 if (br_type
& PERF_SAMPLE_BRANCH_IND_JUMP
)
681 mask
|= X86_BR_IND_JMP
;
683 if (br_type
& PERF_SAMPLE_BRANCH_CALL
)
684 mask
|= X86_BR_CALL
| X86_BR_ZERO_CALL
;
686 if (br_type
& PERF_SAMPLE_BRANCH_TYPE_SAVE
)
687 mask
|= X86_BR_TYPE_SAVE
;
690 * stash actual user request into reg, it may
691 * be used by fixup code for some CPU
693 event
->hw
.branch_reg
.reg
= mask
;
698 * setup the HW LBR filter
699 * Used only when available, may not be enough to disambiguate
700 * all branches, may need the help of the SW filter
702 static int intel_pmu_setup_hw_lbr_filter(struct perf_event
*event
)
704 struct hw_perf_event_extra
*reg
;
705 u64 br_type
= event
->attr
.branch_sample_type
;
709 for (i
= 0; i
< PERF_SAMPLE_BRANCH_MAX_SHIFT
; i
++) {
710 if (!(br_type
& (1ULL << i
)))
713 v
= x86_pmu
.lbr_sel_map
[i
];
714 if (v
== LBR_NOT_SUPP
)
721 reg
= &event
->hw
.branch_reg
;
722 reg
->idx
= EXTRA_REG_LBR
;
725 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
726 * in suppress mode. So LBR_SELECT should be set to
727 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
728 * But the 10th bit LBR_CALL_STACK does not operate
731 reg
->config
= mask
^ (x86_pmu
.lbr_sel_mask
& ~LBR_CALL_STACK
);
733 if ((br_type
& PERF_SAMPLE_BRANCH_NO_CYCLES
) &&
734 (br_type
& PERF_SAMPLE_BRANCH_NO_FLAGS
) &&
735 (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
))
736 reg
->config
|= LBR_NO_INFO
;
741 int intel_pmu_setup_lbr_filter(struct perf_event
*event
)
752 * setup SW LBR filter
754 ret
= intel_pmu_setup_sw_lbr_filter(event
);
759 * setup HW LBR filter, if any
761 if (x86_pmu
.lbr_sel_map
)
762 ret
= intel_pmu_setup_hw_lbr_filter(event
);
768 * return the type of control flow change at address "from"
769 * instruction is not necessarily a branch (in case of interrupt).
771 * The branch type returned also includes the priv level of the
772 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
774 * If a branch type is unknown OR the instruction cannot be
775 * decoded (e.g., text page not present), then X86_BR_NONE is
778 static int branch_type(unsigned long from
, unsigned long to
, int abort
)
782 int bytes_read
, bytes_left
;
783 int ret
= X86_BR_NONE
;
784 int ext
, to_plm
, from_plm
;
785 u8 buf
[MAX_INSN_SIZE
];
788 to_plm
= kernel_ip(to
) ? X86_BR_KERNEL
: X86_BR_USER
;
789 from_plm
= kernel_ip(from
) ? X86_BR_KERNEL
: X86_BR_USER
;
792 * maybe zero if lbr did not fill up after a reset by the time
793 * we get a PMU interrupt
795 if (from
== 0 || to
== 0)
799 return X86_BR_ABORT
| to_plm
;
801 if (from_plm
== X86_BR_USER
) {
803 * can happen if measuring at the user level only
804 * and we interrupt in a kernel thread, e.g., idle.
809 /* may fail if text not present */
810 bytes_left
= copy_from_user_nmi(buf
, (void __user
*)from
,
812 bytes_read
= MAX_INSN_SIZE
- bytes_left
;
819 * The LBR logs any address in the IP, even if the IP just
820 * faulted. This means userspace can control the from address.
821 * Ensure we don't blindy read any address by validating it is
822 * a known text address.
824 if (kernel_text_address(from
)) {
827 * Assume we can get the maximum possible size
828 * when grabbing kernel data. This is not
829 * _strictly_ true since we could possibly be
830 * executing up next to a memory hole, but
831 * it is very unlikely to be a problem.
833 bytes_read
= MAX_INSN_SIZE
;
840 * decoder needs to know the ABI especially
841 * on 64-bit systems running 32-bit apps
844 is64
= kernel_ip((unsigned long)addr
) || !test_thread_flag(TIF_IA32
);
846 insn_init(&insn
, addr
, bytes_read
, is64
);
847 insn_get_opcode(&insn
);
848 if (!insn
.opcode
.got
)
851 switch (insn
.opcode
.bytes
[0]) {
853 switch (insn
.opcode
.bytes
[1]) {
854 case 0x05: /* syscall */
855 case 0x34: /* sysenter */
856 ret
= X86_BR_SYSCALL
;
858 case 0x07: /* sysret */
859 case 0x35: /* sysexit */
862 case 0x80 ... 0x8f: /* conditional */
869 case 0x70 ... 0x7f: /* conditional */
872 case 0xc2: /* near ret */
873 case 0xc3: /* near ret */
874 case 0xca: /* far ret */
875 case 0xcb: /* far ret */
878 case 0xcf: /* iret */
881 case 0xcc ... 0xce: /* int */
884 case 0xe8: /* call near rel */
885 insn_get_immediate(&insn
);
886 if (insn
.immediate1
.value
== 0) {
887 /* zero length call */
888 ret
= X86_BR_ZERO_CALL
;
891 case 0x9a: /* call far absolute */
894 case 0xe0 ... 0xe3: /* loop jmp */
897 case 0xe9 ... 0xeb: /* jmp */
900 case 0xff: /* call near absolute, call far absolute ind */
901 insn_get_modrm(&insn
);
902 ext
= (insn
.modrm
.bytes
[0] >> 3) & 0x7;
904 case 2: /* near ind call */
905 case 3: /* far ind call */
906 ret
= X86_BR_IND_CALL
;
910 ret
= X86_BR_IND_JMP
;
918 * interrupts, traps, faults (and thus ring transition) may
919 * occur on any instructions. Thus, to classify them correctly,
920 * we need to first look at the from and to priv levels. If they
921 * are different and to is in the kernel, then it indicates
922 * a ring transition. If the from instruction is not a ring
923 * transition instr (syscall, systenter, int), then it means
924 * it was a irq, trap or fault.
926 * we have no way of detecting kernel to kernel faults.
928 if (from_plm
== X86_BR_USER
&& to_plm
== X86_BR_KERNEL
929 && ret
!= X86_BR_SYSCALL
&& ret
!= X86_BR_INT
)
933 * branch priv level determined by target as
934 * is done by HW when LBR_SELECT is implemented
936 if (ret
!= X86_BR_NONE
)
942 #define X86_BR_TYPE_MAP_MAX 16
944 static int branch_map
[X86_BR_TYPE_MAP_MAX
] = {
945 PERF_BR_CALL
, /* X86_BR_CALL */
946 PERF_BR_RET
, /* X86_BR_RET */
947 PERF_BR_SYSCALL
, /* X86_BR_SYSCALL */
948 PERF_BR_SYSRET
, /* X86_BR_SYSRET */
949 PERF_BR_UNKNOWN
, /* X86_BR_INT */
950 PERF_BR_UNKNOWN
, /* X86_BR_IRET */
951 PERF_BR_COND
, /* X86_BR_JCC */
952 PERF_BR_UNCOND
, /* X86_BR_JMP */
953 PERF_BR_UNKNOWN
, /* X86_BR_IRQ */
954 PERF_BR_IND_CALL
, /* X86_BR_IND_CALL */
955 PERF_BR_UNKNOWN
, /* X86_BR_ABORT */
956 PERF_BR_UNKNOWN
, /* X86_BR_IN_TX */
957 PERF_BR_UNKNOWN
, /* X86_BR_NO_TX */
958 PERF_BR_CALL
, /* X86_BR_ZERO_CALL */
959 PERF_BR_UNKNOWN
, /* X86_BR_CALL_STACK */
960 PERF_BR_IND
, /* X86_BR_IND_JMP */
964 common_branch_type(int type
)
968 type
>>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
972 if (i
< X86_BR_TYPE_MAP_MAX
)
973 return branch_map
[i
];
976 return PERF_BR_UNKNOWN
;
980 * implement actual branch filter based on user demand.
981 * Hardware may not exactly satisfy that request, thus
982 * we need to inspect opcodes. Mismatched branches are
983 * discarded. Therefore, the number of branches returned
984 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
987 intel_pmu_lbr_filter(struct cpu_hw_events
*cpuc
)
990 int br_sel
= cpuc
->br_sel
;
992 bool compress
= false;
994 /* if sampling all branches, then nothing to filter */
995 if (((br_sel
& X86_BR_ALL
) == X86_BR_ALL
) &&
996 ((br_sel
& X86_BR_TYPE_SAVE
) != X86_BR_TYPE_SAVE
))
999 for (i
= 0; i
< cpuc
->lbr_stack
.nr
; i
++) {
1001 from
= cpuc
->lbr_entries
[i
].from
;
1002 to
= cpuc
->lbr_entries
[i
].to
;
1004 type
= branch_type(from
, to
, cpuc
->lbr_entries
[i
].abort
);
1005 if (type
!= X86_BR_NONE
&& (br_sel
& X86_BR_ANYTX
)) {
1006 if (cpuc
->lbr_entries
[i
].in_tx
)
1007 type
|= X86_BR_IN_TX
;
1009 type
|= X86_BR_NO_TX
;
1012 /* if type does not correspond, then discard */
1013 if (type
== X86_BR_NONE
|| (br_sel
& type
) != type
) {
1014 cpuc
->lbr_entries
[i
].from
= 0;
1018 if ((br_sel
& X86_BR_TYPE_SAVE
) == X86_BR_TYPE_SAVE
)
1019 cpuc
->lbr_entries
[i
].type
= common_branch_type(type
);
1025 /* remove all entries with from=0 */
1026 for (i
= 0; i
< cpuc
->lbr_stack
.nr
; ) {
1027 if (!cpuc
->lbr_entries
[i
].from
) {
1029 while (++j
< cpuc
->lbr_stack
.nr
)
1030 cpuc
->lbr_entries
[j
-1] = cpuc
->lbr_entries
[j
];
1031 cpuc
->lbr_stack
.nr
--;
1032 if (!cpuc
->lbr_entries
[i
].from
)
1040 * Map interface branch filters onto LBR filters
1042 static const int nhm_lbr_sel_map
[PERF_SAMPLE_BRANCH_MAX_SHIFT
] = {
1043 [PERF_SAMPLE_BRANCH_ANY_SHIFT
] = LBR_ANY
,
1044 [PERF_SAMPLE_BRANCH_USER_SHIFT
] = LBR_USER
,
1045 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT
] = LBR_KERNEL
,
1046 [PERF_SAMPLE_BRANCH_HV_SHIFT
] = LBR_IGN
,
1047 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
] = LBR_RETURN
| LBR_REL_JMP
1048 | LBR_IND_JMP
| LBR_FAR
,
1050 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1052 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
] =
1053 LBR_REL_CALL
| LBR_IND_CALL
| LBR_REL_JMP
| LBR_IND_JMP
| LBR_FAR
,
1055 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1057 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
] = LBR_IND_CALL
| LBR_IND_JMP
,
1058 [PERF_SAMPLE_BRANCH_COND_SHIFT
] = LBR_JCC
,
1059 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
] = LBR_IND_JMP
,
1062 static const int snb_lbr_sel_map
[PERF_SAMPLE_BRANCH_MAX_SHIFT
] = {
1063 [PERF_SAMPLE_BRANCH_ANY_SHIFT
] = LBR_ANY
,
1064 [PERF_SAMPLE_BRANCH_USER_SHIFT
] = LBR_USER
,
1065 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT
] = LBR_KERNEL
,
1066 [PERF_SAMPLE_BRANCH_HV_SHIFT
] = LBR_IGN
,
1067 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
] = LBR_RETURN
| LBR_FAR
,
1068 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
] = LBR_REL_CALL
| LBR_IND_CALL
1070 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
] = LBR_IND_CALL
,
1071 [PERF_SAMPLE_BRANCH_COND_SHIFT
] = LBR_JCC
,
1072 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
] = LBR_IND_JMP
,
1073 [PERF_SAMPLE_BRANCH_CALL_SHIFT
] = LBR_REL_CALL
,
1076 static const int hsw_lbr_sel_map
[PERF_SAMPLE_BRANCH_MAX_SHIFT
] = {
1077 [PERF_SAMPLE_BRANCH_ANY_SHIFT
] = LBR_ANY
,
1078 [PERF_SAMPLE_BRANCH_USER_SHIFT
] = LBR_USER
,
1079 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT
] = LBR_KERNEL
,
1080 [PERF_SAMPLE_BRANCH_HV_SHIFT
] = LBR_IGN
,
1081 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
] = LBR_RETURN
| LBR_FAR
,
1082 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
] = LBR_REL_CALL
| LBR_IND_CALL
1084 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
] = LBR_IND_CALL
,
1085 [PERF_SAMPLE_BRANCH_COND_SHIFT
] = LBR_JCC
,
1086 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT
] = LBR_REL_CALL
| LBR_IND_CALL
1087 | LBR_RETURN
| LBR_CALL_STACK
,
1088 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
] = LBR_IND_JMP
,
1089 [PERF_SAMPLE_BRANCH_CALL_SHIFT
] = LBR_REL_CALL
,
1093 void __init
intel_pmu_lbr_init_core(void)
1096 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1097 x86_pmu
.lbr_from
= MSR_LBR_CORE_FROM
;
1098 x86_pmu
.lbr_to
= MSR_LBR_CORE_TO
;
1101 * SW branch filter usage:
1102 * - compensate for lack of HW filter
1106 /* nehalem/westmere */
1107 void __init
intel_pmu_lbr_init_nhm(void)
1109 x86_pmu
.lbr_nr
= 16;
1110 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1111 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1112 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1114 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1115 x86_pmu
.lbr_sel_map
= nhm_lbr_sel_map
;
1118 * SW branch filter usage:
1119 * - workaround LBR_SEL errata (see above)
1120 * - support syscall, sysret capture.
1121 * That requires LBR_FAR but that means far
1122 * jmp need to be filtered out
1127 void __init
intel_pmu_lbr_init_snb(void)
1129 x86_pmu
.lbr_nr
= 16;
1130 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1131 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1132 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1134 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1135 x86_pmu
.lbr_sel_map
= snb_lbr_sel_map
;
1138 * SW branch filter usage:
1139 * - support syscall, sysret capture.
1140 * That requires LBR_FAR but that means far
1141 * jmp need to be filtered out
1146 void intel_pmu_lbr_init_hsw(void)
1148 x86_pmu
.lbr_nr
= 16;
1149 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1150 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1151 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1153 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1154 x86_pmu
.lbr_sel_map
= hsw_lbr_sel_map
;
1156 if (lbr_from_signext_quirk_needed())
1157 static_branch_enable(&lbr_from_quirk_key
);
1161 __init
void intel_pmu_lbr_init_skl(void)
1163 x86_pmu
.lbr_nr
= 32;
1164 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1165 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1166 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1168 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1169 x86_pmu
.lbr_sel_map
= hsw_lbr_sel_map
;
1172 * SW branch filter usage:
1173 * - support syscall, sysret capture.
1174 * That requires LBR_FAR but that means far
1175 * jmp need to be filtered out
1180 void __init
intel_pmu_lbr_init_atom(void)
1183 * only models starting at stepping 10 seems
1184 * to have an operational LBR which can freeze
1187 if (boot_cpu_data
.x86_model
== 28
1188 && boot_cpu_data
.x86_mask
< 10) {
1189 pr_cont("LBR disabled due to erratum");
1194 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1195 x86_pmu
.lbr_from
= MSR_LBR_CORE_FROM
;
1196 x86_pmu
.lbr_to
= MSR_LBR_CORE_TO
;
1199 * SW branch filter usage:
1200 * - compensate for lack of HW filter
1205 void __init
intel_pmu_lbr_init_slm(void)
1208 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1209 x86_pmu
.lbr_from
= MSR_LBR_CORE_FROM
;
1210 x86_pmu
.lbr_to
= MSR_LBR_CORE_TO
;
1212 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1213 x86_pmu
.lbr_sel_map
= nhm_lbr_sel_map
;
1216 * SW branch filter usage:
1217 * - compensate for lack of HW filter
1219 pr_cont("8-deep LBR, ");
1222 /* Knights Landing */
1223 void intel_pmu_lbr_init_knl(void)
1226 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1227 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1228 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1230 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1231 x86_pmu
.lbr_sel_map
= snb_lbr_sel_map
;