1 #ifdef CONFIG_CPU_SUP_INTEL
3 /* The maximal number of PEBS events: */
4 #define MAX_PEBS_EVENTS 4
6 /* The size of a BTS record in bytes: */
7 #define BTS_RECORD_SIZE 24
9 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
10 #define PEBS_BUFFER_SIZE PAGE_SIZE
13 * pebs_record_32 for p4 and core not supported
15 struct pebs_record_32 {
23 struct pebs_record_core
{
28 u64 r12
, r13
, r14
, r15
;
31 struct pebs_record_nhm
{
36 u64 r12
, r13
, r14
, r15
;
37 u64 status
, dla
, dse
, lat
;
41 * A debug store configuration.
43 * We only support architectures that use 64bit fields.
48 u64 bts_absolute_maximum
;
49 u64 bts_interrupt_threshold
;
52 u64 pebs_absolute_maximum
;
53 u64 pebs_interrupt_threshold
;
54 u64 pebs_event_reset
[MAX_PEBS_EVENTS
];
57 static void init_debug_store_on_cpu(int cpu
)
59 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
64 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
65 (u32
)((u64
)(unsigned long)ds
),
66 (u32
)((u64
)(unsigned long)ds
>> 32));
69 static void fini_debug_store_on_cpu(int cpu
)
71 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
74 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
77 static int alloc_pebs_buffer(int cpu
)
79 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
80 int node
= cpu_to_node(cpu
);
81 int max
, thresh
= 1; /* always use a single PEBS record */
87 buffer
= kmalloc_node(PEBS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
88 if (unlikely(!buffer
))
91 max
= PEBS_BUFFER_SIZE
/ x86_pmu
.pebs_record_size
;
93 ds
->pebs_buffer_base
= (u64
)(unsigned long)buffer
;
94 ds
->pebs_index
= ds
->pebs_buffer_base
;
95 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+
96 max
* x86_pmu
.pebs_record_size
;
98 ds
->pebs_interrupt_threshold
= ds
->pebs_buffer_base
+
99 thresh
* x86_pmu
.pebs_record_size
;
104 static void release_pebs_buffer(int cpu
)
106 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
108 if (!ds
|| !x86_pmu
.pebs
)
111 kfree((void *)(unsigned long)ds
->pebs_buffer_base
);
112 ds
->pebs_buffer_base
= 0;
115 static int alloc_bts_buffer(int cpu
)
117 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
118 int node
= cpu_to_node(cpu
);
125 buffer
= kmalloc_node(BTS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
126 if (unlikely(!buffer
))
129 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
132 ds
->bts_buffer_base
= (u64
)(unsigned long)buffer
;
133 ds
->bts_index
= ds
->bts_buffer_base
;
134 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
135 max
* BTS_RECORD_SIZE
;
136 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
137 thresh
* BTS_RECORD_SIZE
;
142 static void release_bts_buffer(int cpu
)
144 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
146 if (!ds
|| !x86_pmu
.bts
)
149 kfree((void *)(unsigned long)ds
->bts_buffer_base
);
150 ds
->bts_buffer_base
= 0;
153 static int alloc_ds_buffer(int cpu
)
155 int node
= cpu_to_node(cpu
);
156 struct debug_store
*ds
;
158 ds
= kmalloc_node(sizeof(*ds
), GFP_KERNEL
| __GFP_ZERO
, node
);
162 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
167 static void release_ds_buffer(int cpu
)
169 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
174 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
178 static void release_ds_buffers(void)
182 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
186 for_each_online_cpu(cpu
)
187 fini_debug_store_on_cpu(cpu
);
189 for_each_possible_cpu(cpu
) {
190 release_pebs_buffer(cpu
);
191 release_bts_buffer(cpu
);
192 release_ds_buffer(cpu
);
197 static void reserve_ds_buffers(void)
199 int bts_err
= 0, pebs_err
= 0;
202 x86_pmu
.bts_active
= 0;
203 x86_pmu
.pebs_active
= 0;
205 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
216 for_each_possible_cpu(cpu
) {
217 if (alloc_ds_buffer(cpu
)) {
222 if (!bts_err
&& alloc_bts_buffer(cpu
))
225 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
228 if (bts_err
&& pebs_err
)
233 for_each_possible_cpu(cpu
)
234 release_bts_buffer(cpu
);
238 for_each_possible_cpu(cpu
)
239 release_pebs_buffer(cpu
);
242 if (bts_err
&& pebs_err
) {
243 for_each_possible_cpu(cpu
)
244 release_ds_buffer(cpu
);
246 if (x86_pmu
.bts
&& !bts_err
)
247 x86_pmu
.bts_active
= 1;
249 if (x86_pmu
.pebs
&& !pebs_err
)
250 x86_pmu
.pebs_active
= 1;
252 for_each_online_cpu(cpu
)
253 init_debug_store_on_cpu(cpu
);
263 static struct event_constraint bts_constraint
=
264 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS
, 0);
266 static void intel_pmu_enable_bts(u64 config
)
268 unsigned long debugctlmsr
;
270 debugctlmsr
= get_debugctlmsr();
272 debugctlmsr
|= DEBUGCTLMSR_TR
;
273 debugctlmsr
|= DEBUGCTLMSR_BTS
;
274 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
276 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
277 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
279 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
280 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
282 update_debugctlmsr(debugctlmsr
);
285 static void intel_pmu_disable_bts(void)
287 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
288 unsigned long debugctlmsr
;
293 debugctlmsr
= get_debugctlmsr();
296 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
297 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
299 update_debugctlmsr(debugctlmsr
);
302 static int intel_pmu_drain_bts_buffer(void)
304 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
305 struct debug_store
*ds
= cpuc
->ds
;
311 struct perf_event
*event
= cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
312 struct bts_record
*at
, *top
;
313 struct perf_output_handle handle
;
314 struct perf_event_header header
;
315 struct perf_sample_data data
;
321 if (!x86_pmu
.bts_active
)
324 at
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
325 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
330 ds
->bts_index
= ds
->bts_buffer_base
;
332 perf_sample_data_init(&data
, 0);
333 data
.period
= event
->hw
.last_period
;
337 * Prepare a generic sample, i.e. fill in the invariant fields.
338 * We will overwrite the from and to address before we output
341 perf_prepare_sample(&header
, &data
, event
, ®s
);
343 if (perf_output_begin(&handle
, event
, header
.size
* (top
- at
), 1, 1))
346 for (; at
< top
; at
++) {
350 perf_output_sample(&handle
, &header
, &data
, event
);
353 perf_output_end(&handle
);
355 /* There's new data available. */
356 event
->hw
.interrupts
++;
357 event
->pending_kill
= POLL_IN
;
365 static struct event_constraint intel_core_pebs_events
[] = {
366 PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
367 PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
368 PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
369 PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
370 PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
371 PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
372 PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
373 PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
374 PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
378 static struct event_constraint intel_nehalem_pebs_events
[] = {
379 PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
380 PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
381 PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
382 PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
383 PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
384 PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
385 PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
386 PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
387 PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
391 static struct event_constraint
*
392 intel_pebs_constraints(struct perf_event
*event
)
394 struct event_constraint
*c
;
396 if (!event
->attr
.precise_ip
)
399 if (x86_pmu
.pebs_constraints
) {
400 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
401 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
406 return &emptyconstraint
;
409 static void intel_pmu_pebs_enable(struct perf_event
*event
)
411 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
412 struct hw_perf_event
*hwc
= &event
->hw
;
414 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
416 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
417 WARN_ON_ONCE(cpuc
->enabled
);
419 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
420 intel_pmu_lbr_enable(event
);
423 static void intel_pmu_pebs_disable(struct perf_event
*event
)
425 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
426 struct hw_perf_event
*hwc
= &event
->hw
;
428 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
430 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
432 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
434 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
435 intel_pmu_lbr_disable(event
);
438 static void intel_pmu_pebs_enable_all(void)
440 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
442 if (cpuc
->pebs_enabled
)
443 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
446 static void intel_pmu_pebs_disable_all(void)
448 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
450 if (cpuc
->pebs_enabled
)
451 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
454 #include <asm/insn.h>
456 static inline bool kernel_ip(unsigned long ip
)
459 return ip
> PAGE_OFFSET
;
465 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
467 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
468 unsigned long from
= cpuc
->lbr_entries
[0].from
;
469 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
470 unsigned long ip
= regs
->ip
;
473 * We don't need to fixup if the PEBS assist is fault like
475 if (!x86_pmu
.intel_cap
.pebs_trap
)
479 * No LBR entry, no basic block, no rewinding
481 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
485 * Basic blocks should never cross user/kernel boundaries
487 if (kernel_ip(ip
) != kernel_ip(to
))
491 * unsigned math, either ip is before the start (impossible) or
492 * the basic block is larger than 1 page (sanity)
494 if ((ip
- to
) > PAGE_SIZE
)
498 * We sampled a branch insn, rewind using the LBR stack
507 u8 buf
[MAX_INSN_SIZE
];
511 if (!kernel_ip(ip
)) {
512 int bytes
, size
= MAX_INSN_SIZE
;
514 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
522 kernel_insn_init(&insn
, kaddr
);
523 insn_get_length(&insn
);
533 * Even though we decoded the basic block, the instruction stream
534 * never matched the given IP, either the TO or the IP got corrupted.
539 static int intel_pmu_save_and_restart(struct perf_event
*event
);
541 static void __intel_pmu_pebs_event(struct perf_event
*event
,
542 struct pt_regs
*iregs
, void *__pebs
)
545 * We cast to pebs_record_core since that is a subset of
546 * both formats and we don't use the other fields in this
549 struct pebs_record_core
*pebs
= __pebs
;
550 struct perf_sample_data data
;
553 if (!intel_pmu_save_and_restart(event
))
556 perf_sample_data_init(&data
, 0);
557 data
.period
= event
->hw
.last_period
;
560 * We use the interrupt regs as a base because the PEBS record
561 * does not contain a full regs set, specifically it seems to
562 * lack segment descriptors, which get used by things like
565 * In the simple case fix up only the IP and BP,SP regs, for
566 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
567 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
574 if (event
->attr
.precise_ip
> 1 && intel_pmu_pebs_fixup_ip(®s
))
575 regs
.flags
|= PERF_EFLAGS_EXACT
;
577 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
579 if (perf_event_overflow(event
, 1, &data
, ®s
))
580 x86_pmu_stop(event
, 0);
583 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
)
585 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
586 struct debug_store
*ds
= cpuc
->ds
;
587 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
588 struct pebs_record_core
*at
, *top
;
591 if (!x86_pmu
.pebs_active
)
594 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
595 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
598 * Whatever else happens, drain the thing
600 ds
->pebs_index
= ds
->pebs_buffer_base
;
602 if (!test_bit(0, cpuc
->active_mask
))
605 WARN_ON_ONCE(!event
);
607 if (!event
->attr
.precise_ip
)
615 * Should not happen, we program the threshold at 1 and do not
621 __intel_pmu_pebs_event(event
, iregs
, at
);
624 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
)
626 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
627 struct debug_store
*ds
= cpuc
->ds
;
628 struct pebs_record_nhm
*at
, *top
;
629 struct perf_event
*event
= NULL
;
633 if (!x86_pmu
.pebs_active
)
636 at
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
637 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
639 ds
->pebs_index
= ds
->pebs_buffer_base
;
646 * Should not happen, we program the threshold at 1 and do not
649 WARN_ON_ONCE(n
> MAX_PEBS_EVENTS
);
651 for ( ; at
< top
; at
++) {
652 for_each_set_bit(bit
, (unsigned long *)&at
->status
, MAX_PEBS_EVENTS
) {
653 event
= cpuc
->events
[bit
];
654 if (!test_bit(bit
, cpuc
->active_mask
))
657 WARN_ON_ONCE(!event
);
659 if (!event
->attr
.precise_ip
)
662 if (__test_and_set_bit(bit
, (unsigned long *)&status
))
668 if (!event
|| bit
>= MAX_PEBS_EVENTS
)
671 __intel_pmu_pebs_event(event
, iregs
, at
);
676 * BTS, PEBS probe and setup
679 static void intel_ds_init(void)
682 * No support for 32bit formats
684 if (!boot_cpu_has(X86_FEATURE_DTES64
))
687 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
688 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
690 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
691 int format
= x86_pmu
.intel_cap
.pebs_format
;
695 printk(KERN_CONT
"PEBS fmt0%c, ", pebs_type
);
696 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
697 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
698 x86_pmu
.pebs_constraints
= intel_core_pebs_events
;
702 printk(KERN_CONT
"PEBS fmt1%c, ", pebs_type
);
703 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
704 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
705 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_events
;
709 printk(KERN_CONT
"no PEBS fmt%d%c, ", format
, pebs_type
);
716 #else /* CONFIG_CPU_SUP_INTEL */
718 static void reserve_ds_buffers(void)
722 static void release_ds_buffers(void)
726 #endif /* CONFIG_CPU_SUP_INTEL */