1 #include <linux/perf_event.h>
2 #include <linux/types.h>
4 #include <asm/perf_event.h>
8 #include "../perf_event.h"
12 LBR_FORMAT_LIP
= 0x01,
13 LBR_FORMAT_EIP
= 0x02,
14 LBR_FORMAT_EIP_FLAGS
= 0x03,
15 LBR_FORMAT_EIP_FLAGS2
= 0x04,
16 LBR_FORMAT_INFO
= 0x05,
17 LBR_FORMAT_TIME
= 0x06,
18 LBR_FORMAT_MAX_KNOWN
= LBR_FORMAT_TIME
,
24 } lbr_desc
[LBR_FORMAT_MAX_KNOWN
+ 1] = {
25 [LBR_FORMAT_EIP_FLAGS
] = LBR_EIP_FLAGS
,
26 [LBR_FORMAT_EIP_FLAGS2
] = LBR_EIP_FLAGS
| LBR_TSX
,
30 * Intel LBR_SELECT bits
31 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
33 * Hardware branch filter (not available on all CPUs)
35 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
36 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
37 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
38 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
39 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
40 #define LBR_RETURN_BIT 5 /* do not capture near returns */
41 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
42 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
43 #define LBR_FAR_BIT 8 /* do not capture far branches */
44 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
47 * Following bit only exists in Linux; we mask it out before writing it to
48 * the actual MSR. But it helps the constraint perf code to understand
49 * that this is a separate configuration.
51 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
53 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
54 #define LBR_USER (1 << LBR_USER_BIT)
55 #define LBR_JCC (1 << LBR_JCC_BIT)
56 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
57 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
58 #define LBR_RETURN (1 << LBR_RETURN_BIT)
59 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
60 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
61 #define LBR_FAR (1 << LBR_FAR_BIT)
62 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
63 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
65 #define LBR_PLM (LBR_KERNEL | LBR_USER)
67 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
68 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
69 #define LBR_IGN 0 /* ignored */
80 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
81 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
82 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
84 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
87 * x86control flow change classification
88 * x86control flow changes include branches, interrupts, traps, faults
91 X86_BR_NONE
= 0, /* unknown */
93 X86_BR_USER
= 1 << 0, /* branch target is user */
94 X86_BR_KERNEL
= 1 << 1, /* branch target is kernel */
96 X86_BR_CALL
= 1 << 2, /* call */
97 X86_BR_RET
= 1 << 3, /* return */
98 X86_BR_SYSCALL
= 1 << 4, /* syscall */
99 X86_BR_SYSRET
= 1 << 5, /* syscall return */
100 X86_BR_INT
= 1 << 6, /* sw interrupt */
101 X86_BR_IRET
= 1 << 7, /* return from interrupt */
102 X86_BR_JCC
= 1 << 8, /* conditional */
103 X86_BR_JMP
= 1 << 9, /* jump */
104 X86_BR_IRQ
= 1 << 10,/* hw interrupt or trap or fault */
105 X86_BR_IND_CALL
= 1 << 11,/* indirect calls */
106 X86_BR_ABORT
= 1 << 12,/* transaction abort */
107 X86_BR_IN_TX
= 1 << 13,/* in transaction */
108 X86_BR_NO_TX
= 1 << 14,/* not in transaction */
109 X86_BR_ZERO_CALL
= 1 << 15,/* zero length call */
110 X86_BR_CALL_STACK
= 1 << 16,/* call stack */
111 X86_BR_IND_JMP
= 1 << 17,/* indirect jump */
114 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
115 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
132 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
134 #define X86_BR_ANY_CALL \
142 static void intel_pmu_lbr_filter(struct cpu_hw_events
*cpuc
);
145 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
146 * otherwise it becomes near impossible to get a reliable stack.
149 static void __intel_pmu_lbr_enable(bool pmi
)
151 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
152 u64 debugctl
, lbr_select
= 0, orig_debugctl
;
155 * No need to unfreeze manually, as v4 can do that as part
156 * of the GLOBAL_STATUS ack.
158 if (pmi
&& x86_pmu
.version
>= 4)
162 * No need to reprogram LBR_SELECT in a PMI, as it
166 lbr_select
= cpuc
->lbr_sel
->config
& x86_pmu
.lbr_sel_mask
;
167 if (!pmi
&& cpuc
->lbr_sel
)
168 wrmsrl(MSR_LBR_SELECT
, lbr_select
);
170 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
171 orig_debugctl
= debugctl
;
172 debugctl
|= DEBUGCTLMSR_LBR
;
174 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
175 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
176 * may cause superfluous increase/decrease of LBR_TOS.
178 if (!(lbr_select
& LBR_CALL_STACK
))
179 debugctl
|= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
;
180 if (orig_debugctl
!= debugctl
)
181 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
184 static void __intel_pmu_lbr_disable(void)
188 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
189 debugctl
&= ~(DEBUGCTLMSR_LBR
| DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
);
190 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
193 static void intel_pmu_lbr_reset_32(void)
197 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++)
198 wrmsrl(x86_pmu
.lbr_from
+ i
, 0);
201 static void intel_pmu_lbr_reset_64(void)
205 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
206 wrmsrl(x86_pmu
.lbr_from
+ i
, 0);
207 wrmsrl(x86_pmu
.lbr_to
+ i
, 0);
208 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
)
209 wrmsrl(MSR_LBR_INFO_0
+ i
, 0);
213 void intel_pmu_lbr_reset(void)
218 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_32
)
219 intel_pmu_lbr_reset_32();
221 intel_pmu_lbr_reset_64();
225 * TOS = most recently recorded branch
227 static inline u64
intel_pmu_lbr_tos(void)
231 rdmsrl(x86_pmu
.lbr_tos
, tos
);
241 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
242 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
243 * TSX is not supported they have no consistent behavior:
245 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
246 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
247 * part of the sign extension.
251 * 1) LBR has TSX format
252 * 2) CPU has no TSX support enabled
254 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
255 * value from rdmsr() must be converted to have a 61 bits sign extension,
256 * ignoring the TSX flags.
258 static inline bool lbr_from_signext_quirk_needed(void)
260 int lbr_format
= x86_pmu
.intel_cap
.lbr_format
;
261 bool tsx_support
= boot_cpu_has(X86_FEATURE_HLE
) ||
262 boot_cpu_has(X86_FEATURE_RTM
);
264 return !tsx_support
&& (lbr_desc
[lbr_format
] & LBR_TSX
);
267 DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key
);
269 /* If quirk is enabled, ensure sign extension is 63 bits: */
270 inline u64
lbr_from_signext_quirk_wr(u64 val
)
272 if (static_branch_unlikely(&lbr_from_quirk_key
)) {
274 * Sign extend into bits 61:62 while preserving bit 63.
276 * Quirk is enabled when TSX is disabled. Therefore TSX bits
277 * in val are always OFF and must be changed to be sign
278 * extension bits. Since bits 59:60 are guaranteed to be
279 * part of the sign extension bits, we can just copy them
282 val
|= (LBR_FROM_SIGNEXT_2MSB
& val
) << 2;
287 static void __intel_pmu_lbr_restore(struct x86_perf_task_context
*task_ctx
)
290 unsigned lbr_idx
, mask
;
293 if (task_ctx
->lbr_callstack_users
== 0 ||
294 task_ctx
->lbr_stack_state
== LBR_NONE
) {
295 intel_pmu_lbr_reset();
299 mask
= x86_pmu
.lbr_nr
- 1;
301 for (i
= 0; i
< tos
; i
++) {
302 lbr_idx
= (tos
- i
) & mask
;
303 wrmsrl(x86_pmu
.lbr_from
+ lbr_idx
, task_ctx
->lbr_from
[i
]);
304 wrmsrl(x86_pmu
.lbr_to
+ lbr_idx
, task_ctx
->lbr_to
[i
]);
305 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
)
306 wrmsrl(MSR_LBR_INFO_0
+ lbr_idx
, task_ctx
->lbr_info
[i
]);
308 wrmsrl(x86_pmu
.lbr_tos
, tos
);
309 task_ctx
->lbr_stack_state
= LBR_NONE
;
312 static void __intel_pmu_lbr_save(struct x86_perf_task_context
*task_ctx
)
315 unsigned lbr_idx
, mask
;
318 if (task_ctx
->lbr_callstack_users
== 0) {
319 task_ctx
->lbr_stack_state
= LBR_NONE
;
323 mask
= x86_pmu
.lbr_nr
- 1;
324 tos
= intel_pmu_lbr_tos();
325 for (i
= 0; i
< tos
; i
++) {
326 lbr_idx
= (tos
- i
) & mask
;
327 rdmsrl(x86_pmu
.lbr_from
+ lbr_idx
, task_ctx
->lbr_from
[i
]);
328 rdmsrl(x86_pmu
.lbr_to
+ lbr_idx
, task_ctx
->lbr_to
[i
]);
329 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
)
330 rdmsrl(MSR_LBR_INFO_0
+ lbr_idx
, task_ctx
->lbr_info
[i
]);
333 task_ctx
->lbr_stack_state
= LBR_VALID
;
336 void intel_pmu_lbr_sched_task(struct perf_event_context
*ctx
, bool sched_in
)
338 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
339 struct x86_perf_task_context
*task_ctx
;
342 * If LBR callstack feature is enabled and the stack was saved when
343 * the task was scheduled out, restore the stack. Otherwise flush
346 task_ctx
= ctx
? ctx
->task_ctx_data
: NULL
;
349 __intel_pmu_lbr_restore(task_ctx
);
350 cpuc
->lbr_context
= ctx
;
352 __intel_pmu_lbr_save(task_ctx
);
358 * When sampling the branck stack in system-wide, it may be
359 * necessary to flush the stack on context switch. This happens
360 * when the branch stack does not tag its entries with the pid
361 * of the current task. Otherwise it becomes impossible to
362 * associate a branch entry with a task. This ambiguity is more
363 * likely to appear when the branch stack supports priv level
364 * filtering and the user sets it to monitor only at the user
365 * level (which could be a useful measurement in system-wide
366 * mode). In that case, the risk is high of having a branch
367 * stack with branch from multiple tasks.
370 intel_pmu_lbr_reset();
371 cpuc
->lbr_context
= ctx
;
375 static inline bool branch_user_callstack(unsigned br_sel
)
377 return (br_sel
& X86_BR_USER
) && (br_sel
& X86_BR_CALL_STACK
);
380 void intel_pmu_lbr_enable(struct perf_event
*event
)
382 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
383 struct x86_perf_task_context
*task_ctx
;
389 * Reset the LBR stack if we changed task context to
392 if (event
->ctx
->task
&& cpuc
->lbr_context
!= event
->ctx
) {
393 intel_pmu_lbr_reset();
394 cpuc
->lbr_context
= event
->ctx
;
396 cpuc
->br_sel
= event
->hw
.branch_reg
.reg
;
398 if (branch_user_callstack(cpuc
->br_sel
) && event
->ctx
&&
399 event
->ctx
->task_ctx_data
) {
400 task_ctx
= event
->ctx
->task_ctx_data
;
401 task_ctx
->lbr_callstack_users
++;
405 perf_sched_cb_inc(event
->ctx
->pmu
);
408 void intel_pmu_lbr_disable(struct perf_event
*event
)
410 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
411 struct x86_perf_task_context
*task_ctx
;
416 if (branch_user_callstack(cpuc
->br_sel
) && event
->ctx
&&
417 event
->ctx
->task_ctx_data
) {
418 task_ctx
= event
->ctx
->task_ctx_data
;
419 task_ctx
->lbr_callstack_users
--;
423 WARN_ON_ONCE(cpuc
->lbr_users
< 0);
424 perf_sched_cb_dec(event
->ctx
->pmu
);
426 if (cpuc
->enabled
&& !cpuc
->lbr_users
) {
427 __intel_pmu_lbr_disable();
428 /* avoid stale pointer */
429 cpuc
->lbr_context
= NULL
;
433 void intel_pmu_lbr_enable_all(bool pmi
)
435 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
438 __intel_pmu_lbr_enable(pmi
);
441 void intel_pmu_lbr_disable_all(void)
443 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
446 __intel_pmu_lbr_disable();
449 static void intel_pmu_lbr_read_32(struct cpu_hw_events
*cpuc
)
451 unsigned long mask
= x86_pmu
.lbr_nr
- 1;
452 u64 tos
= intel_pmu_lbr_tos();
455 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
456 unsigned long lbr_idx
= (tos
- i
) & mask
;
465 rdmsrl(x86_pmu
.lbr_from
+ lbr_idx
, msr_lastbranch
.lbr
);
467 cpuc
->lbr_entries
[i
].from
= msr_lastbranch
.from
;
468 cpuc
->lbr_entries
[i
].to
= msr_lastbranch
.to
;
469 cpuc
->lbr_entries
[i
].mispred
= 0;
470 cpuc
->lbr_entries
[i
].predicted
= 0;
471 cpuc
->lbr_entries
[i
].reserved
= 0;
473 cpuc
->lbr_stack
.nr
= i
;
477 * Due to lack of segmentation in Linux the effective address (offset)
478 * is the same as the linear address, allowing us to merge the LIP and EIP
481 static void intel_pmu_lbr_read_64(struct cpu_hw_events
*cpuc
)
483 bool need_info
= false;
484 unsigned long mask
= x86_pmu
.lbr_nr
- 1;
485 int lbr_format
= x86_pmu
.intel_cap
.lbr_format
;
486 u64 tos
= intel_pmu_lbr_tos();
489 int num
= x86_pmu
.lbr_nr
;
492 need_info
= !(cpuc
->lbr_sel
->config
& LBR_NO_INFO
);
493 if (cpuc
->lbr_sel
->config
& LBR_CALL_STACK
)
497 for (i
= 0; i
< num
; i
++) {
498 unsigned long lbr_idx
= (tos
- i
) & mask
;
499 u64 from
, to
, mis
= 0, pred
= 0, in_tx
= 0, abort
= 0;
502 int lbr_flags
= lbr_desc
[lbr_format
];
504 rdmsrl(x86_pmu
.lbr_from
+ lbr_idx
, from
);
505 rdmsrl(x86_pmu
.lbr_to
+ lbr_idx
, to
);
507 if (lbr_format
== LBR_FORMAT_INFO
&& need_info
) {
510 rdmsrl(MSR_LBR_INFO_0
+ lbr_idx
, info
);
511 mis
= !!(info
& LBR_INFO_MISPRED
);
513 in_tx
= !!(info
& LBR_INFO_IN_TX
);
514 abort
= !!(info
& LBR_INFO_ABORT
);
515 cycles
= (info
& LBR_INFO_CYCLES
);
518 if (lbr_format
== LBR_FORMAT_TIME
) {
519 mis
= !!(from
& LBR_FROM_FLAG_MISPRED
);
522 cycles
= ((to
>> 48) & LBR_INFO_CYCLES
);
524 to
= (u64
)((((s64
)to
) << 16) >> 16);
527 if (lbr_flags
& LBR_EIP_FLAGS
) {
528 mis
= !!(from
& LBR_FROM_FLAG_MISPRED
);
532 if (lbr_flags
& LBR_TSX
) {
533 in_tx
= !!(from
& LBR_FROM_FLAG_IN_TX
);
534 abort
= !!(from
& LBR_FROM_FLAG_ABORT
);
537 from
= (u64
)((((s64
)from
) << skip
) >> skip
);
540 * Some CPUs report duplicated abort records,
541 * with the second entry not having an abort bit set.
542 * Skip them here. This loop runs backwards,
543 * so we need to undo the previous record.
544 * If the abort just happened outside the window
545 * the extra entry cannot be removed.
547 if (abort
&& x86_pmu
.lbr_double_abort
&& out
> 0)
550 cpuc
->lbr_entries
[out
].from
= from
;
551 cpuc
->lbr_entries
[out
].to
= to
;
552 cpuc
->lbr_entries
[out
].mispred
= mis
;
553 cpuc
->lbr_entries
[out
].predicted
= pred
;
554 cpuc
->lbr_entries
[out
].in_tx
= in_tx
;
555 cpuc
->lbr_entries
[out
].abort
= abort
;
556 cpuc
->lbr_entries
[out
].cycles
= cycles
;
557 cpuc
->lbr_entries
[out
].reserved
= 0;
560 cpuc
->lbr_stack
.nr
= out
;
563 void intel_pmu_lbr_read(void)
565 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
567 if (!cpuc
->lbr_users
)
570 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_32
)
571 intel_pmu_lbr_read_32(cpuc
);
573 intel_pmu_lbr_read_64(cpuc
);
575 intel_pmu_lbr_filter(cpuc
);
580 * - in case there is no HW filter
581 * - in case the HW filter has errata or limitations
583 static int intel_pmu_setup_sw_lbr_filter(struct perf_event
*event
)
585 u64 br_type
= event
->attr
.branch_sample_type
;
588 if (br_type
& PERF_SAMPLE_BRANCH_USER
)
591 if (br_type
& PERF_SAMPLE_BRANCH_KERNEL
)
592 mask
|= X86_BR_KERNEL
;
594 /* we ignore BRANCH_HV here */
596 if (br_type
& PERF_SAMPLE_BRANCH_ANY
)
599 if (br_type
& PERF_SAMPLE_BRANCH_ANY_CALL
)
600 mask
|= X86_BR_ANY_CALL
;
602 if (br_type
& PERF_SAMPLE_BRANCH_ANY_RETURN
)
603 mask
|= X86_BR_RET
| X86_BR_IRET
| X86_BR_SYSRET
;
605 if (br_type
& PERF_SAMPLE_BRANCH_IND_CALL
)
606 mask
|= X86_BR_IND_CALL
;
608 if (br_type
& PERF_SAMPLE_BRANCH_ABORT_TX
)
609 mask
|= X86_BR_ABORT
;
611 if (br_type
& PERF_SAMPLE_BRANCH_IN_TX
)
612 mask
|= X86_BR_IN_TX
;
614 if (br_type
& PERF_SAMPLE_BRANCH_NO_TX
)
615 mask
|= X86_BR_NO_TX
;
617 if (br_type
& PERF_SAMPLE_BRANCH_COND
)
620 if (br_type
& PERF_SAMPLE_BRANCH_CALL_STACK
) {
621 if (!x86_pmu_has_lbr_callstack())
623 if (mask
& ~(X86_BR_USER
| X86_BR_KERNEL
))
625 mask
|= X86_BR_CALL
| X86_BR_IND_CALL
| X86_BR_RET
|
629 if (br_type
& PERF_SAMPLE_BRANCH_IND_JUMP
)
630 mask
|= X86_BR_IND_JMP
;
632 if (br_type
& PERF_SAMPLE_BRANCH_CALL
)
633 mask
|= X86_BR_CALL
| X86_BR_ZERO_CALL
;
635 * stash actual user request into reg, it may
636 * be used by fixup code for some CPU
638 event
->hw
.branch_reg
.reg
= mask
;
643 * setup the HW LBR filter
644 * Used only when available, may not be enough to disambiguate
645 * all branches, may need the help of the SW filter
647 static int intel_pmu_setup_hw_lbr_filter(struct perf_event
*event
)
649 struct hw_perf_event_extra
*reg
;
650 u64 br_type
= event
->attr
.branch_sample_type
;
654 for (i
= 0; i
< PERF_SAMPLE_BRANCH_MAX_SHIFT
; i
++) {
655 if (!(br_type
& (1ULL << i
)))
658 v
= x86_pmu
.lbr_sel_map
[i
];
659 if (v
== LBR_NOT_SUPP
)
666 reg
= &event
->hw
.branch_reg
;
667 reg
->idx
= EXTRA_REG_LBR
;
670 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
671 * in suppress mode. So LBR_SELECT should be set to
672 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
673 * But the 10th bit LBR_CALL_STACK does not operate
676 reg
->config
= mask
^ (x86_pmu
.lbr_sel_mask
& ~LBR_CALL_STACK
);
678 if ((br_type
& PERF_SAMPLE_BRANCH_NO_CYCLES
) &&
679 (br_type
& PERF_SAMPLE_BRANCH_NO_FLAGS
) &&
680 (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_INFO
))
681 reg
->config
|= LBR_NO_INFO
;
686 int intel_pmu_setup_lbr_filter(struct perf_event
*event
)
697 * setup SW LBR filter
699 ret
= intel_pmu_setup_sw_lbr_filter(event
);
704 * setup HW LBR filter, if any
706 if (x86_pmu
.lbr_sel_map
)
707 ret
= intel_pmu_setup_hw_lbr_filter(event
);
713 * return the type of control flow change at address "from"
714 * instruction is not necessarily a branch (in case of interrupt).
716 * The branch type returned also includes the priv level of the
717 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
719 * If a branch type is unknown OR the instruction cannot be
720 * decoded (e.g., text page not present), then X86_BR_NONE is
723 static int branch_type(unsigned long from
, unsigned long to
, int abort
)
727 int bytes_read
, bytes_left
;
728 int ret
= X86_BR_NONE
;
729 int ext
, to_plm
, from_plm
;
730 u8 buf
[MAX_INSN_SIZE
];
733 to_plm
= kernel_ip(to
) ? X86_BR_KERNEL
: X86_BR_USER
;
734 from_plm
= kernel_ip(from
) ? X86_BR_KERNEL
: X86_BR_USER
;
737 * maybe zero if lbr did not fill up after a reset by the time
738 * we get a PMU interrupt
740 if (from
== 0 || to
== 0)
744 return X86_BR_ABORT
| to_plm
;
746 if (from_plm
== X86_BR_USER
) {
748 * can happen if measuring at the user level only
749 * and we interrupt in a kernel thread, e.g., idle.
754 /* may fail if text not present */
755 bytes_left
= copy_from_user_nmi(buf
, (void __user
*)from
,
757 bytes_read
= MAX_INSN_SIZE
- bytes_left
;
764 * The LBR logs any address in the IP, even if the IP just
765 * faulted. This means userspace can control the from address.
766 * Ensure we don't blindy read any address by validating it is
767 * a known text address.
769 if (kernel_text_address(from
)) {
772 * Assume we can get the maximum possible size
773 * when grabbing kernel data. This is not
774 * _strictly_ true since we could possibly be
775 * executing up next to a memory hole, but
776 * it is very unlikely to be a problem.
778 bytes_read
= MAX_INSN_SIZE
;
785 * decoder needs to know the ABI especially
786 * on 64-bit systems running 32-bit apps
789 is64
= kernel_ip((unsigned long)addr
) || !test_thread_flag(TIF_IA32
);
791 insn_init(&insn
, addr
, bytes_read
, is64
);
792 insn_get_opcode(&insn
);
793 if (!insn
.opcode
.got
)
796 switch (insn
.opcode
.bytes
[0]) {
798 switch (insn
.opcode
.bytes
[1]) {
799 case 0x05: /* syscall */
800 case 0x34: /* sysenter */
801 ret
= X86_BR_SYSCALL
;
803 case 0x07: /* sysret */
804 case 0x35: /* sysexit */
807 case 0x80 ... 0x8f: /* conditional */
814 case 0x70 ... 0x7f: /* conditional */
817 case 0xc2: /* near ret */
818 case 0xc3: /* near ret */
819 case 0xca: /* far ret */
820 case 0xcb: /* far ret */
823 case 0xcf: /* iret */
826 case 0xcc ... 0xce: /* int */
829 case 0xe8: /* call near rel */
830 insn_get_immediate(&insn
);
831 if (insn
.immediate1
.value
== 0) {
832 /* zero length call */
833 ret
= X86_BR_ZERO_CALL
;
836 case 0x9a: /* call far absolute */
839 case 0xe0 ... 0xe3: /* loop jmp */
842 case 0xe9 ... 0xeb: /* jmp */
845 case 0xff: /* call near absolute, call far absolute ind */
846 insn_get_modrm(&insn
);
847 ext
= (insn
.modrm
.bytes
[0] >> 3) & 0x7;
849 case 2: /* near ind call */
850 case 3: /* far ind call */
851 ret
= X86_BR_IND_CALL
;
855 ret
= X86_BR_IND_JMP
;
863 * interrupts, traps, faults (and thus ring transition) may
864 * occur on any instructions. Thus, to classify them correctly,
865 * we need to first look at the from and to priv levels. If they
866 * are different and to is in the kernel, then it indicates
867 * a ring transition. If the from instruction is not a ring
868 * transition instr (syscall, systenter, int), then it means
869 * it was a irq, trap or fault.
871 * we have no way of detecting kernel to kernel faults.
873 if (from_plm
== X86_BR_USER
&& to_plm
== X86_BR_KERNEL
874 && ret
!= X86_BR_SYSCALL
&& ret
!= X86_BR_INT
)
878 * branch priv level determined by target as
879 * is done by HW when LBR_SELECT is implemented
881 if (ret
!= X86_BR_NONE
)
888 * implement actual branch filter based on user demand.
889 * Hardware may not exactly satisfy that request, thus
890 * we need to inspect opcodes. Mismatched branches are
891 * discarded. Therefore, the number of branches returned
892 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
895 intel_pmu_lbr_filter(struct cpu_hw_events
*cpuc
)
898 int br_sel
= cpuc
->br_sel
;
900 bool compress
= false;
902 /* if sampling all branches, then nothing to filter */
903 if ((br_sel
& X86_BR_ALL
) == X86_BR_ALL
)
906 for (i
= 0; i
< cpuc
->lbr_stack
.nr
; i
++) {
908 from
= cpuc
->lbr_entries
[i
].from
;
909 to
= cpuc
->lbr_entries
[i
].to
;
911 type
= branch_type(from
, to
, cpuc
->lbr_entries
[i
].abort
);
912 if (type
!= X86_BR_NONE
&& (br_sel
& X86_BR_ANYTX
)) {
913 if (cpuc
->lbr_entries
[i
].in_tx
)
914 type
|= X86_BR_IN_TX
;
916 type
|= X86_BR_NO_TX
;
919 /* if type does not correspond, then discard */
920 if (type
== X86_BR_NONE
|| (br_sel
& type
) != type
) {
921 cpuc
->lbr_entries
[i
].from
= 0;
929 /* remove all entries with from=0 */
930 for (i
= 0; i
< cpuc
->lbr_stack
.nr
; ) {
931 if (!cpuc
->lbr_entries
[i
].from
) {
933 while (++j
< cpuc
->lbr_stack
.nr
)
934 cpuc
->lbr_entries
[j
-1] = cpuc
->lbr_entries
[j
];
935 cpuc
->lbr_stack
.nr
--;
936 if (!cpuc
->lbr_entries
[i
].from
)
944 * Map interface branch filters onto LBR filters
946 static const int nhm_lbr_sel_map
[PERF_SAMPLE_BRANCH_MAX_SHIFT
] = {
947 [PERF_SAMPLE_BRANCH_ANY_SHIFT
] = LBR_ANY
,
948 [PERF_SAMPLE_BRANCH_USER_SHIFT
] = LBR_USER
,
949 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT
] = LBR_KERNEL
,
950 [PERF_SAMPLE_BRANCH_HV_SHIFT
] = LBR_IGN
,
951 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
] = LBR_RETURN
| LBR_REL_JMP
952 | LBR_IND_JMP
| LBR_FAR
,
954 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
956 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
] =
957 LBR_REL_CALL
| LBR_IND_CALL
| LBR_REL_JMP
| LBR_IND_JMP
| LBR_FAR
,
959 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
961 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
] = LBR_IND_CALL
| LBR_IND_JMP
,
962 [PERF_SAMPLE_BRANCH_COND_SHIFT
] = LBR_JCC
,
963 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
] = LBR_IND_JMP
,
966 static const int snb_lbr_sel_map
[PERF_SAMPLE_BRANCH_MAX_SHIFT
] = {
967 [PERF_SAMPLE_BRANCH_ANY_SHIFT
] = LBR_ANY
,
968 [PERF_SAMPLE_BRANCH_USER_SHIFT
] = LBR_USER
,
969 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT
] = LBR_KERNEL
,
970 [PERF_SAMPLE_BRANCH_HV_SHIFT
] = LBR_IGN
,
971 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
] = LBR_RETURN
| LBR_FAR
,
972 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
] = LBR_REL_CALL
| LBR_IND_CALL
974 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
] = LBR_IND_CALL
,
975 [PERF_SAMPLE_BRANCH_COND_SHIFT
] = LBR_JCC
,
976 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
] = LBR_IND_JMP
,
977 [PERF_SAMPLE_BRANCH_CALL_SHIFT
] = LBR_REL_CALL
,
980 static const int hsw_lbr_sel_map
[PERF_SAMPLE_BRANCH_MAX_SHIFT
] = {
981 [PERF_SAMPLE_BRANCH_ANY_SHIFT
] = LBR_ANY
,
982 [PERF_SAMPLE_BRANCH_USER_SHIFT
] = LBR_USER
,
983 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT
] = LBR_KERNEL
,
984 [PERF_SAMPLE_BRANCH_HV_SHIFT
] = LBR_IGN
,
985 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT
] = LBR_RETURN
| LBR_FAR
,
986 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT
] = LBR_REL_CALL
| LBR_IND_CALL
988 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT
] = LBR_IND_CALL
,
989 [PERF_SAMPLE_BRANCH_COND_SHIFT
] = LBR_JCC
,
990 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT
] = LBR_REL_CALL
| LBR_IND_CALL
991 | LBR_RETURN
| LBR_CALL_STACK
,
992 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT
] = LBR_IND_JMP
,
993 [PERF_SAMPLE_BRANCH_CALL_SHIFT
] = LBR_REL_CALL
,
997 void __init
intel_pmu_lbr_init_core(void)
1000 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1001 x86_pmu
.lbr_from
= MSR_LBR_CORE_FROM
;
1002 x86_pmu
.lbr_to
= MSR_LBR_CORE_TO
;
1005 * SW branch filter usage:
1006 * - compensate for lack of HW filter
1010 /* nehalem/westmere */
1011 void __init
intel_pmu_lbr_init_nhm(void)
1013 x86_pmu
.lbr_nr
= 16;
1014 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1015 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1016 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1018 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1019 x86_pmu
.lbr_sel_map
= nhm_lbr_sel_map
;
1022 * SW branch filter usage:
1023 * - workaround LBR_SEL errata (see above)
1024 * - support syscall, sysret capture.
1025 * That requires LBR_FAR but that means far
1026 * jmp need to be filtered out
1031 void __init
intel_pmu_lbr_init_snb(void)
1033 x86_pmu
.lbr_nr
= 16;
1034 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1035 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1036 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1038 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1039 x86_pmu
.lbr_sel_map
= snb_lbr_sel_map
;
1042 * SW branch filter usage:
1043 * - support syscall, sysret capture.
1044 * That requires LBR_FAR but that means far
1045 * jmp need to be filtered out
1050 void intel_pmu_lbr_init_hsw(void)
1052 x86_pmu
.lbr_nr
= 16;
1053 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1054 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1055 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1057 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1058 x86_pmu
.lbr_sel_map
= hsw_lbr_sel_map
;
1060 if (lbr_from_signext_quirk_needed())
1061 static_branch_enable(&lbr_from_quirk_key
);
1065 __init
void intel_pmu_lbr_init_skl(void)
1067 x86_pmu
.lbr_nr
= 32;
1068 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1069 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1070 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1072 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1073 x86_pmu
.lbr_sel_map
= hsw_lbr_sel_map
;
1076 * SW branch filter usage:
1077 * - support syscall, sysret capture.
1078 * That requires LBR_FAR but that means far
1079 * jmp need to be filtered out
1084 void __init
intel_pmu_lbr_init_atom(void)
1087 * only models starting at stepping 10 seems
1088 * to have an operational LBR which can freeze
1091 if (boot_cpu_data
.x86_model
== 28
1092 && boot_cpu_data
.x86_mask
< 10) {
1093 pr_cont("LBR disabled due to erratum");
1098 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1099 x86_pmu
.lbr_from
= MSR_LBR_CORE_FROM
;
1100 x86_pmu
.lbr_to
= MSR_LBR_CORE_TO
;
1103 * SW branch filter usage:
1104 * - compensate for lack of HW filter
1109 void __init
intel_pmu_lbr_init_slm(void)
1112 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1113 x86_pmu
.lbr_from
= MSR_LBR_CORE_FROM
;
1114 x86_pmu
.lbr_to
= MSR_LBR_CORE_TO
;
1116 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1117 x86_pmu
.lbr_sel_map
= nhm_lbr_sel_map
;
1120 * SW branch filter usage:
1121 * - compensate for lack of HW filter
1123 pr_cont("8-deep LBR, ");
1126 /* Knights Landing */
1127 void intel_pmu_lbr_init_knl(void)
1130 x86_pmu
.lbr_tos
= MSR_LBR_TOS
;
1131 x86_pmu
.lbr_from
= MSR_LBR_NHM_FROM
;
1132 x86_pmu
.lbr_to
= MSR_LBR_NHM_TO
;
1134 x86_pmu
.lbr_sel_mask
= LBR_SEL_MASK
;
1135 x86_pmu
.lbr_sel_map
= snb_lbr_sel_map
;