1 #include <linux/bitops.h>
2 #include <linux/types.h>
3 #include <linux/slab.h>
5 #include <asm/perf_event.h>
8 #include "perf_event.h"
10 /* The size of a BTS record in bytes: */
11 #define BTS_RECORD_SIZE 24
13 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
14 #define PEBS_BUFFER_SIZE PAGE_SIZE
15 #define PEBS_FIXUP_SIZE PAGE_SIZE
18 * pebs_record_32 for p4 and core not supported
20 struct pebs_record_32 {
28 union intel_x86_pebs_dse
{
31 unsigned int ld_dse
:4;
32 unsigned int ld_stlb_miss
:1;
33 unsigned int ld_locked
:1;
34 unsigned int ld_reserved
:26;
37 unsigned int st_l1d_hit
:1;
38 unsigned int st_reserved1
:3;
39 unsigned int st_stlb_miss
:1;
40 unsigned int st_locked
:1;
41 unsigned int st_reserved2
:26;
47 * Map PEBS Load Latency Data Source encodings to generic
48 * memory data source information
50 #define P(a, b) PERF_MEM_S(a, b)
51 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
52 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
54 static const u64 pebs_data_source
[] = {
55 P(OP
, LOAD
) | P(LVL
, MISS
) | P(LVL
, L3
) | P(SNOOP
, NA
),/* 0x00:ukn L3 */
56 OP_LH
| P(LVL
, L1
) | P(SNOOP
, NONE
), /* 0x01: L1 local */
57 OP_LH
| P(LVL
, LFB
) | P(SNOOP
, NONE
), /* 0x02: LFB hit */
58 OP_LH
| P(LVL
, L2
) | P(SNOOP
, NONE
), /* 0x03: L2 hit */
59 OP_LH
| P(LVL
, L3
) | P(SNOOP
, NONE
), /* 0x04: L3 hit */
60 OP_LH
| P(LVL
, L3
) | P(SNOOP
, MISS
), /* 0x05: L3 hit, snoop miss */
61 OP_LH
| P(LVL
, L3
) | P(SNOOP
, HIT
), /* 0x06: L3 hit, snoop hit */
62 OP_LH
| P(LVL
, L3
) | P(SNOOP
, HITM
), /* 0x07: L3 hit, snoop hitm */
63 OP_LH
| P(LVL
, REM_CCE1
) | P(SNOOP
, HIT
), /* 0x08: L3 miss snoop hit */
64 OP_LH
| P(LVL
, REM_CCE1
) | P(SNOOP
, HITM
), /* 0x09: L3 miss snoop hitm*/
65 OP_LH
| P(LVL
, LOC_RAM
) | P(SNOOP
, HIT
), /* 0x0a: L3 miss, shared */
66 OP_LH
| P(LVL
, REM_RAM1
) | P(SNOOP
, HIT
), /* 0x0b: L3 miss, shared */
67 OP_LH
| P(LVL
, LOC_RAM
) | SNOOP_NONE_MISS
,/* 0x0c: L3 miss, excl */
68 OP_LH
| P(LVL
, REM_RAM1
) | SNOOP_NONE_MISS
,/* 0x0d: L3 miss, excl */
69 OP_LH
| P(LVL
, IO
) | P(SNOOP
, NONE
), /* 0x0e: I/O */
70 OP_LH
| P(LVL
, UNC
) | P(SNOOP
, NONE
), /* 0x0f: uncached */
73 static u64
precise_store_data(u64 status
)
75 union intel_x86_pebs_dse dse
;
76 u64 val
= P(OP
, STORE
) | P(SNOOP
, NA
) | P(LVL
, L1
) | P(TLB
, L2
);
82 * 1 = stored missed 2nd level TLB
84 * so it either hit the walker or the OS
85 * otherwise hit 2nd level TLB
93 * bit 0: hit L1 data cache
94 * if not set, then all we know is that
103 * bit 5: Locked prefix
106 val
|= P(LOCK
, LOCKED
);
111 static u64
precise_store_data_hsw(u64 status
)
113 union perf_mem_data_src dse
;
116 dse
.mem_op
= PERF_MEM_OP_STORE
;
117 dse
.mem_lvl
= PERF_MEM_LVL_NA
;
119 dse
.mem_lvl
= PERF_MEM_LVL_L1
;
120 /* Nothing else supported. Sorry. */
124 static u64
load_latency_data(u64 status
)
126 union intel_x86_pebs_dse dse
;
128 int model
= boot_cpu_data
.x86_model
;
129 int fam
= boot_cpu_data
.x86
;
134 * use the mapping table for bit 0-3
136 val
= pebs_data_source
[dse
.ld_dse
];
139 * Nehalem models do not support TLB, Lock infos
141 if (fam
== 0x6 && (model
== 26 || model
== 30
142 || model
== 31 || model
== 46)) {
143 val
|= P(TLB
, NA
) | P(LOCK
, NA
);
148 * 0 = did not miss 2nd level TLB
149 * 1 = missed 2nd level TLB
151 if (dse
.ld_stlb_miss
)
152 val
|= P(TLB
, MISS
) | P(TLB
, L2
);
154 val
|= P(TLB
, HIT
) | P(TLB
, L1
) | P(TLB
, L2
);
157 * bit 5: locked prefix
160 val
|= P(LOCK
, LOCKED
);
165 struct pebs_record_core
{
169 u64 r8
, r9
, r10
, r11
;
170 u64 r12
, r13
, r14
, r15
;
173 struct pebs_record_nhm
{
177 u64 r8
, r9
, r10
, r11
;
178 u64 r12
, r13
, r14
, r15
;
179 u64 status
, dla
, dse
, lat
;
183 * Same as pebs_record_nhm, with two additional fields.
185 struct pebs_record_hsw
{
189 u64 r8
, r9
, r10
, r11
;
190 u64 r12
, r13
, r14
, r15
;
191 u64 status
, dla
, dse
, lat
;
192 u64 real_ip
, tsx_tuning
;
195 union hsw_tsx_tuning
{
197 u32 cycles_last_block
: 32,
200 instruction_abort
: 1,
201 non_instruction_abort
: 1,
210 #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
212 void init_debug_store_on_cpu(int cpu
)
214 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
219 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
220 (u32
)((u64
)(unsigned long)ds
),
221 (u32
)((u64
)(unsigned long)ds
>> 32));
224 void fini_debug_store_on_cpu(int cpu
)
226 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
229 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
232 static DEFINE_PER_CPU(void *, insn_buffer
);
234 static int alloc_pebs_buffer(int cpu
)
236 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
237 int node
= cpu_to_node(cpu
);
238 int max
, thresh
= 1; /* always use a single PEBS record */
239 void *buffer
, *ibuffer
;
244 buffer
= kzalloc_node(PEBS_BUFFER_SIZE
, GFP_KERNEL
, node
);
245 if (unlikely(!buffer
))
249 * HSW+ already provides us the eventing ip; no need to allocate this
252 if (x86_pmu
.intel_cap
.pebs_format
< 2) {
253 ibuffer
= kzalloc_node(PEBS_FIXUP_SIZE
, GFP_KERNEL
, node
);
258 per_cpu(insn_buffer
, cpu
) = ibuffer
;
261 max
= PEBS_BUFFER_SIZE
/ x86_pmu
.pebs_record_size
;
263 ds
->pebs_buffer_base
= (u64
)(unsigned long)buffer
;
264 ds
->pebs_index
= ds
->pebs_buffer_base
;
265 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+
266 max
* x86_pmu
.pebs_record_size
;
268 ds
->pebs_interrupt_threshold
= ds
->pebs_buffer_base
+
269 thresh
* x86_pmu
.pebs_record_size
;
274 static void release_pebs_buffer(int cpu
)
276 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
278 if (!ds
|| !x86_pmu
.pebs
)
281 kfree(per_cpu(insn_buffer
, cpu
));
282 per_cpu(insn_buffer
, cpu
) = NULL
;
284 kfree((void *)(unsigned long)ds
->pebs_buffer_base
);
285 ds
->pebs_buffer_base
= 0;
288 static int alloc_bts_buffer(int cpu
)
290 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
291 int node
= cpu_to_node(cpu
);
298 buffer
= kzalloc_node(BTS_BUFFER_SIZE
, GFP_KERNEL
, node
);
299 if (unlikely(!buffer
))
302 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
305 ds
->bts_buffer_base
= (u64
)(unsigned long)buffer
;
306 ds
->bts_index
= ds
->bts_buffer_base
;
307 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
308 max
* BTS_RECORD_SIZE
;
309 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
310 thresh
* BTS_RECORD_SIZE
;
315 static void release_bts_buffer(int cpu
)
317 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
319 if (!ds
|| !x86_pmu
.bts
)
322 kfree((void *)(unsigned long)ds
->bts_buffer_base
);
323 ds
->bts_buffer_base
= 0;
326 static int alloc_ds_buffer(int cpu
)
328 int node
= cpu_to_node(cpu
);
329 struct debug_store
*ds
;
331 ds
= kzalloc_node(sizeof(*ds
), GFP_KERNEL
, node
);
335 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
340 static void release_ds_buffer(int cpu
)
342 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
347 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
351 void release_ds_buffers(void)
355 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
359 for_each_online_cpu(cpu
)
360 fini_debug_store_on_cpu(cpu
);
362 for_each_possible_cpu(cpu
) {
363 release_pebs_buffer(cpu
);
364 release_bts_buffer(cpu
);
365 release_ds_buffer(cpu
);
370 void reserve_ds_buffers(void)
372 int bts_err
= 0, pebs_err
= 0;
375 x86_pmu
.bts_active
= 0;
376 x86_pmu
.pebs_active
= 0;
378 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
389 for_each_possible_cpu(cpu
) {
390 if (alloc_ds_buffer(cpu
)) {
395 if (!bts_err
&& alloc_bts_buffer(cpu
))
398 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
401 if (bts_err
&& pebs_err
)
406 for_each_possible_cpu(cpu
)
407 release_bts_buffer(cpu
);
411 for_each_possible_cpu(cpu
)
412 release_pebs_buffer(cpu
);
415 if (bts_err
&& pebs_err
) {
416 for_each_possible_cpu(cpu
)
417 release_ds_buffer(cpu
);
419 if (x86_pmu
.bts
&& !bts_err
)
420 x86_pmu
.bts_active
= 1;
422 if (x86_pmu
.pebs
&& !pebs_err
)
423 x86_pmu
.pebs_active
= 1;
425 for_each_online_cpu(cpu
)
426 init_debug_store_on_cpu(cpu
);
436 struct event_constraint bts_constraint
=
437 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS
, 0);
439 void intel_pmu_enable_bts(u64 config
)
441 unsigned long debugctlmsr
;
443 debugctlmsr
= get_debugctlmsr();
445 debugctlmsr
|= DEBUGCTLMSR_TR
;
446 debugctlmsr
|= DEBUGCTLMSR_BTS
;
447 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
449 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
450 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
452 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
453 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
455 update_debugctlmsr(debugctlmsr
);
458 void intel_pmu_disable_bts(void)
460 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
461 unsigned long debugctlmsr
;
466 debugctlmsr
= get_debugctlmsr();
469 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
470 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
472 update_debugctlmsr(debugctlmsr
);
475 int intel_pmu_drain_bts_buffer(void)
477 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
478 struct debug_store
*ds
= cpuc
->ds
;
484 struct perf_event
*event
= cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
485 struct bts_record
*at
, *top
;
486 struct perf_output_handle handle
;
487 struct perf_event_header header
;
488 struct perf_sample_data data
;
494 if (!x86_pmu
.bts_active
)
497 at
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
498 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
503 memset(®s
, 0, sizeof(regs
));
505 ds
->bts_index
= ds
->bts_buffer_base
;
507 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
510 * Prepare a generic sample, i.e. fill in the invariant fields.
511 * We will overwrite the from and to address before we output
514 perf_prepare_sample(&header
, &data
, event
, ®s
);
516 if (perf_output_begin(&handle
, event
, header
.size
* (top
- at
)))
519 for (; at
< top
; at
++) {
523 perf_output_sample(&handle
, &header
, &data
, event
);
526 perf_output_end(&handle
);
528 /* There's new data available. */
529 event
->hw
.interrupts
++;
530 event
->pending_kill
= POLL_IN
;
537 struct event_constraint intel_core2_pebs_event_constraints
[] = {
538 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
539 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
540 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
541 INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
542 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
546 struct event_constraint intel_atom_pebs_event_constraints
[] = {
547 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
548 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
549 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
553 struct event_constraint intel_slm_pebs_event_constraints
[] = {
554 INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */
555 INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */
556 INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */
557 INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */
558 INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */
559 INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */
560 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */
561 INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */
562 INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */
563 INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */
564 INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */
565 INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */
566 INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */
567 INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */
568 INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */
569 INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */
570 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */
571 INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */
572 INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */
573 INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */
574 INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */
575 INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */
579 struct event_constraint intel_nehalem_pebs_event_constraints
[] = {
580 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
581 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
582 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
583 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
584 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
585 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
586 INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
587 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
588 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
589 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
590 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
594 struct event_constraint intel_westmere_pebs_event_constraints
[] = {
595 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
596 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
597 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
598 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
599 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
600 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
601 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
602 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
603 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
604 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
605 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
609 struct event_constraint intel_snb_pebs_event_constraints
[] = {
610 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
611 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
612 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
613 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
614 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
615 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
616 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
617 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
618 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
619 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
620 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
621 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
625 struct event_constraint intel_ivb_pebs_event_constraints
[] = {
626 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
627 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
628 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
629 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
630 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
631 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
632 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
633 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
634 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
635 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
636 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
640 struct event_constraint intel_hsw_pebs_event_constraints
[] = {
641 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
642 INTEL_PST_HSW_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
643 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
644 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
645 INTEL_UEVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */
646 INTEL_UEVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */
647 INTEL_UEVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.NEAR_TAKEN */
648 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.* */
649 /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
650 INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf),
651 /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
652 INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf),
653 INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
654 INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
655 /* MEM_UOPS_RETIRED.SPLIT_STORES */
656 INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf),
657 INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
658 INTEL_PST_HSW_CONSTRAINT(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
659 INTEL_UEVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */
660 INTEL_UEVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */
661 INTEL_UEVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L3_HIT */
662 /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */
663 INTEL_UEVENT_CONSTRAINT(0x40d1, 0xf),
664 /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */
665 INTEL_UEVENT_CONSTRAINT(0x01d2, 0xf),
666 /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */
667 INTEL_UEVENT_CONSTRAINT(0x02d2, 0xf),
668 /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM */
669 INTEL_UEVENT_CONSTRAINT(0x01d3, 0xf),
670 INTEL_UEVENT_CONSTRAINT(0x04c8, 0xf), /* HLE_RETIRED.Abort */
671 INTEL_UEVENT_CONSTRAINT(0x04c9, 0xf), /* RTM_RETIRED.Abort */
676 struct event_constraint
*intel_pebs_constraints(struct perf_event
*event
)
678 struct event_constraint
*c
;
680 if (!event
->attr
.precise_ip
)
683 if (x86_pmu
.pebs_constraints
) {
684 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
685 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
686 event
->hw
.flags
|= c
->flags
;
692 return &emptyconstraint
;
695 void intel_pmu_pebs_enable(struct perf_event
*event
)
697 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
698 struct hw_perf_event
*hwc
= &event
->hw
;
700 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
702 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
704 if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
)
705 cpuc
->pebs_enabled
|= 1ULL << (hwc
->idx
+ 32);
706 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST
)
707 cpuc
->pebs_enabled
|= 1ULL << 63;
710 void intel_pmu_pebs_disable(struct perf_event
*event
)
712 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
713 struct hw_perf_event
*hwc
= &event
->hw
;
715 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
717 if (event
->hw
.constraint
->flags
& PERF_X86_EVENT_PEBS_LDLAT
)
718 cpuc
->pebs_enabled
&= ~(1ULL << (hwc
->idx
+ 32));
719 else if (event
->hw
.constraint
->flags
& PERF_X86_EVENT_PEBS_ST
)
720 cpuc
->pebs_enabled
&= ~(1ULL << 63);
723 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
725 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
728 void intel_pmu_pebs_enable_all(void)
730 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
732 if (cpuc
->pebs_enabled
)
733 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
736 void intel_pmu_pebs_disable_all(void)
738 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
740 if (cpuc
->pebs_enabled
)
741 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
744 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
746 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
747 unsigned long from
= cpuc
->lbr_entries
[0].from
;
748 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
749 unsigned long ip
= regs
->ip
;
754 * We don't need to fixup if the PEBS assist is fault like
756 if (!x86_pmu
.intel_cap
.pebs_trap
)
760 * No LBR entry, no basic block, no rewinding
762 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
766 * Basic blocks should never cross user/kernel boundaries
768 if (kernel_ip(ip
) != kernel_ip(to
))
772 * unsigned math, either ip is before the start (impossible) or
773 * the basic block is larger than 1 page (sanity)
775 if ((ip
- to
) > PEBS_FIXUP_SIZE
)
779 * We sampled a branch insn, rewind using the LBR stack
782 set_linear_ip(regs
, from
);
786 if (!kernel_ip(ip
)) {
788 u8
*buf
= this_cpu_read(insn_buffer
);
790 size
= ip
- to
; /* Must fit our buffer, see above */
791 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
806 is_64bit
= kernel_ip(to
) || !test_thread_flag(TIF_IA32
);
808 insn_init(&insn
, kaddr
, is_64bit
);
809 insn_get_length(&insn
);
812 kaddr
+= insn
.length
;
816 set_linear_ip(regs
, old_to
);
821 * Even though we decoded the basic block, the instruction stream
822 * never matched the given IP, either the TO or the IP got corrupted.
827 static inline u64
intel_hsw_weight(struct pebs_record_hsw
*pebs
)
829 if (pebs
->tsx_tuning
) {
830 union hsw_tsx_tuning tsx
= { .value
= pebs
->tsx_tuning
};
831 return tsx
.cycles_last_block
;
836 static inline u64
intel_hsw_transaction(struct pebs_record_hsw
*pebs
)
838 u64 txn
= (pebs
->tsx_tuning
& PEBS_HSW_TSX_FLAGS
) >> 32;
840 /* For RTM XABORTs also log the abort code from AX */
841 if ((txn
& PERF_TXN_TRANSACTION
) && (pebs
->ax
& 1))
842 txn
|= ((pebs
->ax
>> 24) & 0xff) << PERF_TXN_ABORT_SHIFT
;
846 static void __intel_pmu_pebs_event(struct perf_event
*event
,
847 struct pt_regs
*iregs
, void *__pebs
)
850 * We cast to the biggest pebs_record but are careful not to
851 * unconditionally access the 'extra' entries.
853 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
854 struct pebs_record_hsw
*pebs
= __pebs
;
855 struct perf_sample_data data
;
860 if (!intel_pmu_save_and_restart(event
))
863 fll
= event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
;
864 fst
= event
->hw
.flags
& (PERF_X86_EVENT_PEBS_ST
|
865 PERF_X86_EVENT_PEBS_ST_HSW
);
867 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
869 data
.period
= event
->hw
.last_period
;
870 sample_type
= event
->attr
.sample_type
;
873 * if PEBS-LL or PreciseStore
877 * Use latency for weight (only avail with PEBS-LL)
879 if (fll
&& (sample_type
& PERF_SAMPLE_WEIGHT
))
880 data
.weight
= pebs
->lat
;
883 * data.data_src encodes the data source
885 if (sample_type
& PERF_SAMPLE_DATA_SRC
) {
887 data
.data_src
.val
= load_latency_data(pebs
->dse
);
888 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST_HSW
)
890 precise_store_data_hsw(pebs
->dse
);
892 data
.data_src
.val
= precise_store_data(pebs
->dse
);
897 * We use the interrupt regs as a base because the PEBS record
898 * does not contain a full regs set, specifically it seems to
899 * lack segment descriptors, which get used by things like
902 * In the simple case fix up only the IP and BP,SP regs, for
903 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
904 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
907 regs
.flags
= pebs
->flags
;
908 set_linear_ip(®s
, pebs
->ip
);
912 if (event
->attr
.precise_ip
> 1 && x86_pmu
.intel_cap
.pebs_format
>= 2) {
913 regs
.ip
= pebs
->real_ip
;
914 regs
.flags
|= PERF_EFLAGS_EXACT
;
915 } else if (event
->attr
.precise_ip
> 1 && intel_pmu_pebs_fixup_ip(®s
))
916 regs
.flags
|= PERF_EFLAGS_EXACT
;
918 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
920 if ((event
->attr
.sample_type
& PERF_SAMPLE_ADDR
) &&
921 x86_pmu
.intel_cap
.pebs_format
>= 1)
922 data
.addr
= pebs
->dla
;
924 if (x86_pmu
.intel_cap
.pebs_format
>= 2) {
925 /* Only set the TSX weight when no memory weight. */
926 if ((event
->attr
.sample_type
& PERF_SAMPLE_WEIGHT
) && !fll
)
927 data
.weight
= intel_hsw_weight(pebs
);
929 if (event
->attr
.sample_type
& PERF_SAMPLE_TRANSACTION
)
930 data
.txn
= intel_hsw_transaction(pebs
);
933 if (has_branch_stack(event
))
934 data
.br_stack
= &cpuc
->lbr_stack
;
936 if (perf_event_overflow(event
, &data
, ®s
))
937 x86_pmu_stop(event
, 0);
940 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
)
942 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
943 struct debug_store
*ds
= cpuc
->ds
;
944 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
945 struct pebs_record_core
*at
, *top
;
948 if (!x86_pmu
.pebs_active
)
951 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
952 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
955 * Whatever else happens, drain the thing
957 ds
->pebs_index
= ds
->pebs_buffer_base
;
959 if (!test_bit(0, cpuc
->active_mask
))
962 WARN_ON_ONCE(!event
);
964 if (!event
->attr
.precise_ip
)
972 * Should not happen, we program the threshold at 1 and do not
975 WARN_ONCE(n
> 1, "bad leftover pebs %d\n", n
);
978 __intel_pmu_pebs_event(event
, iregs
, at
);
981 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
)
983 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
984 struct debug_store
*ds
= cpuc
->ds
;
985 struct perf_event
*event
= NULL
;
990 if (!x86_pmu
.pebs_active
)
993 at
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
994 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
996 ds
->pebs_index
= ds
->pebs_buffer_base
;
998 if (unlikely(at
> top
))
1002 * Should not happen, we program the threshold at 1 and do not
1003 * set a reset value.
1005 WARN_ONCE(top
- at
> x86_pmu
.max_pebs_events
* x86_pmu
.pebs_record_size
,
1006 "Unexpected number of pebs records %ld\n",
1007 (long)(top
- at
) / x86_pmu
.pebs_record_size
);
1009 for (; at
< top
; at
+= x86_pmu
.pebs_record_size
) {
1010 struct pebs_record_nhm
*p
= at
;
1012 for_each_set_bit(bit
, (unsigned long *)&p
->status
,
1013 x86_pmu
.max_pebs_events
) {
1014 event
= cpuc
->events
[bit
];
1015 if (!test_bit(bit
, cpuc
->active_mask
))
1018 WARN_ON_ONCE(!event
);
1020 if (!event
->attr
.precise_ip
)
1023 if (__test_and_set_bit(bit
, (unsigned long *)&status
))
1029 if (!event
|| bit
>= x86_pmu
.max_pebs_events
)
1032 __intel_pmu_pebs_event(event
, iregs
, at
);
1037 * BTS, PEBS probe and setup
1040 void intel_ds_init(void)
1043 * No support for 32bit formats
1045 if (!boot_cpu_has(X86_FEATURE_DTES64
))
1048 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
1049 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
1051 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
1052 int format
= x86_pmu
.intel_cap
.pebs_format
;
1056 printk(KERN_CONT
"PEBS fmt0%c, ", pebs_type
);
1057 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
1058 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
1062 printk(KERN_CONT
"PEBS fmt1%c, ", pebs_type
);
1063 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
1064 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
1068 pr_cont("PEBS fmt2%c, ", pebs_type
);
1069 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_hsw
);
1070 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
1074 printk(KERN_CONT
"no PEBS fmt%d%c, ", format
, pebs_type
);
1080 void perf_restore_debug_store(void)
1082 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1084 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
1087 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)ds
);