2 * BTS PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/bitops.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/debugfs.h>
23 #include <linux/device.h>
24 #include <linux/coredump.h>
26 #include <asm-generic/sizes.h>
27 #include <asm/perf_event.h>
29 #include "../perf_event.h"
32 struct perf_output_handle handle
;
33 struct debug_store ds_back
;
37 /* BTS context states: */
39 /* no ongoing AUX transactions */
40 BTS_STATE_STOPPED
= 0,
41 /* AUX transaction is on, BTS tracing is disabled */
43 /* AUX transaction is on, BTS tracing is running */
47 static DEFINE_PER_CPU(struct bts_ctx
, bts_ctx
);
49 #define BTS_RECORD_SIZE 24
50 #define BTS_SAFETY_MARGIN 4080
56 unsigned long displacement
;
60 size_t real_size
; /* multiple of BTS_RECORD_SIZE */
61 unsigned int nr_pages
;
69 struct bts_phys buf
[0];
74 static size_t buf_size(struct page
*page
)
76 return 1 << (PAGE_SHIFT
+ page_private(page
));
80 bts_buffer_setup_aux(int cpu
, void **pages
, int nr_pages
, bool overwrite
)
82 struct bts_buffer
*buf
;
84 int node
= (cpu
== -1) ? cpu
: cpu_to_node(cpu
);
86 size_t size
= nr_pages
<< PAGE_SHIFT
;
89 /* count all the high order buffers */
90 for (pg
= 0, nbuf
= 0; pg
< nr_pages
;) {
91 page
= virt_to_page(pages
[pg
]);
92 if (WARN_ON_ONCE(!PagePrivate(page
) && nr_pages
> 1))
94 pg
+= 1 << page_private(page
);
99 * to avoid interrupts in overwrite mode, only allow one physical
101 if (overwrite
&& nbuf
> 1)
104 buf
= kzalloc_node(offsetof(struct bts_buffer
, buf
[nbuf
]), GFP_KERNEL
, node
);
108 buf
->nr_pages
= nr_pages
;
110 buf
->snapshot
= overwrite
;
111 buf
->data_pages
= pages
;
112 buf
->real_size
= size
- size
% BTS_RECORD_SIZE
;
114 for (pg
= 0, nbuf
= 0, offset
= 0, pad
= 0; nbuf
< buf
->nr_bufs
; nbuf
++) {
115 unsigned int __nr_pages
;
117 page
= virt_to_page(pages
[pg
]);
118 __nr_pages
= PagePrivate(page
) ? 1 << page_private(page
) : 1;
119 buf
->buf
[nbuf
].page
= page
;
120 buf
->buf
[nbuf
].offset
= offset
;
121 buf
->buf
[nbuf
].displacement
= (pad
? BTS_RECORD_SIZE
- pad
: 0);
122 buf
->buf
[nbuf
].size
= buf_size(page
) - buf
->buf
[nbuf
].displacement
;
123 pad
= buf
->buf
[nbuf
].size
% BTS_RECORD_SIZE
;
124 buf
->buf
[nbuf
].size
-= pad
;
127 offset
+= __nr_pages
<< PAGE_SHIFT
;
133 static void bts_buffer_free_aux(void *data
)
138 static unsigned long bts_buffer_offset(struct bts_buffer
*buf
, unsigned int idx
)
140 return buf
->buf
[idx
].offset
+ buf
->buf
[idx
].displacement
;
144 bts_config_buffer(struct bts_buffer
*buf
)
146 int cpu
= raw_smp_processor_id();
147 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
148 struct bts_phys
*phys
= &buf
->buf
[buf
->cur_buf
];
149 unsigned long index
, thresh
= 0, end
= phys
->size
;
150 struct page
*page
= phys
->page
;
152 index
= local_read(&buf
->head
);
154 if (!buf
->snapshot
) {
155 if (buf
->end
< phys
->offset
+ buf_size(page
))
156 end
= buf
->end
- phys
->offset
- phys
->displacement
;
158 index
-= phys
->offset
+ phys
->displacement
;
160 if (end
- index
> BTS_SAFETY_MARGIN
)
161 thresh
= end
- BTS_SAFETY_MARGIN
;
162 else if (end
- index
> BTS_RECORD_SIZE
)
163 thresh
= end
- BTS_RECORD_SIZE
;
168 ds
->bts_buffer_base
= (u64
)(long)page_address(page
) + phys
->displacement
;
169 ds
->bts_index
= ds
->bts_buffer_base
+ index
;
170 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+ end
;
171 ds
->bts_interrupt_threshold
= !buf
->snapshot
172 ? ds
->bts_buffer_base
+ thresh
173 : ds
->bts_absolute_maximum
+ BTS_RECORD_SIZE
;
176 static void bts_buffer_pad_out(struct bts_phys
*phys
, unsigned long head
)
178 unsigned long index
= head
- phys
->offset
;
180 memset(page_address(phys
->page
) + index
, 0, phys
->size
- index
);
183 static void bts_update(struct bts_ctx
*bts
)
185 int cpu
= raw_smp_processor_id();
186 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
187 struct bts_buffer
*buf
= perf_get_aux(&bts
->handle
);
188 unsigned long index
= ds
->bts_index
- ds
->bts_buffer_base
, old
, head
;
193 head
= index
+ bts_buffer_offset(buf
, buf
->cur_buf
);
194 old
= local_xchg(&buf
->head
, head
);
196 if (!buf
->snapshot
) {
200 if (ds
->bts_index
>= ds
->bts_absolute_maximum
)
201 perf_aux_output_flag(&bts
->handle
,
202 PERF_AUX_FLAG_TRUNCATED
);
205 * old and head are always in the same physical buffer, so we
206 * can subtract them to get the data size.
208 local_add(head
- old
, &buf
->data_size
);
210 local_set(&buf
->data_size
, head
);
215 bts_buffer_reset(struct bts_buffer
*buf
, struct perf_output_handle
*handle
);
218 * Ordering PMU callbacks wrt themselves and the PMI is done by means
219 * of bts::state, which:
220 * - is set when bts::handle::event is valid, that is, between
221 * perf_aux_output_begin() and perf_aux_output_end();
222 * - is zero otherwise;
223 * - is ordered against bts::handle::event with a compiler barrier.
226 static void __bts_event_start(struct perf_event
*event
)
228 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
229 struct bts_buffer
*buf
= perf_get_aux(&bts
->handle
);
233 config
|= ARCH_PERFMON_EVENTSEL_INT
;
234 if (!event
->attr
.exclude_kernel
)
235 config
|= ARCH_PERFMON_EVENTSEL_OS
;
236 if (!event
->attr
.exclude_user
)
237 config
|= ARCH_PERFMON_EVENTSEL_USR
;
239 bts_config_buffer(buf
);
242 * local barrier to make sure that ds configuration made it
243 * before we enable BTS and bts::state goes ACTIVE
247 /* INACTIVE/STOPPED -> ACTIVE */
248 WRITE_ONCE(bts
->state
, BTS_STATE_ACTIVE
);
250 intel_pmu_enable_bts(config
);
254 static void bts_event_start(struct perf_event
*event
, int flags
)
256 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
257 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
258 struct bts_buffer
*buf
;
260 buf
= perf_aux_output_begin(&bts
->handle
, event
);
264 if (bts_buffer_reset(buf
, &bts
->handle
))
267 bts
->ds_back
.bts_buffer_base
= cpuc
->ds
->bts_buffer_base
;
268 bts
->ds_back
.bts_absolute_maximum
= cpuc
->ds
->bts_absolute_maximum
;
269 bts
->ds_back
.bts_interrupt_threshold
= cpuc
->ds
->bts_interrupt_threshold
;
271 event
->hw
.itrace_started
= 1;
274 __bts_event_start(event
);
279 perf_aux_output_end(&bts
->handle
, 0);
282 event
->hw
.state
= PERF_HES_STOPPED
;
285 static void __bts_event_stop(struct perf_event
*event
, int state
)
287 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
289 /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
290 WRITE_ONCE(bts
->state
, state
);
293 * No extra synchronization is mandated by the documentation to have
294 * BTS data stores globally visible.
296 intel_pmu_disable_bts();
299 static void bts_event_stop(struct perf_event
*event
, int flags
)
301 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
302 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
303 struct bts_buffer
*buf
= NULL
;
304 int state
= READ_ONCE(bts
->state
);
306 if (state
== BTS_STATE_ACTIVE
)
307 __bts_event_stop(event
, BTS_STATE_STOPPED
);
309 if (state
!= BTS_STATE_STOPPED
)
310 buf
= perf_get_aux(&bts
->handle
);
312 event
->hw
.state
|= PERF_HES_STOPPED
;
314 if (flags
& PERF_EF_UPDATE
) {
320 local_xchg(&buf
->data_size
,
321 buf
->nr_pages
<< PAGE_SHIFT
);
322 perf_aux_output_end(&bts
->handle
,
323 local_xchg(&buf
->data_size
, 0));
326 cpuc
->ds
->bts_index
= bts
->ds_back
.bts_buffer_base
;
327 cpuc
->ds
->bts_buffer_base
= bts
->ds_back
.bts_buffer_base
;
328 cpuc
->ds
->bts_absolute_maximum
= bts
->ds_back
.bts_absolute_maximum
;
329 cpuc
->ds
->bts_interrupt_threshold
= bts
->ds_back
.bts_interrupt_threshold
;
333 void intel_bts_enable_local(void)
335 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
336 int state
= READ_ONCE(bts
->state
);
339 * Here we transition from INACTIVE to ACTIVE;
340 * if we instead are STOPPED from the interrupt handler,
341 * stay that way. Can't be ACTIVE here though.
343 if (WARN_ON_ONCE(state
== BTS_STATE_ACTIVE
))
346 if (state
== BTS_STATE_STOPPED
)
349 if (bts
->handle
.event
)
350 __bts_event_start(bts
->handle
.event
);
353 void intel_bts_disable_local(void)
355 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
358 * Here we transition from ACTIVE to INACTIVE;
359 * do nothing for STOPPED or INACTIVE.
361 if (READ_ONCE(bts
->state
) != BTS_STATE_ACTIVE
)
364 if (bts
->handle
.event
)
365 __bts_event_stop(bts
->handle
.event
, BTS_STATE_INACTIVE
);
369 bts_buffer_reset(struct bts_buffer
*buf
, struct perf_output_handle
*handle
)
371 unsigned long head
, space
, next_space
, pad
, gap
, skip
, wakeup
;
372 unsigned int next_buf
;
373 struct bts_phys
*phys
, *next_phys
;
379 head
= handle
->head
& ((buf
->nr_pages
<< PAGE_SHIFT
) - 1);
381 phys
= &buf
->buf
[buf
->cur_buf
];
382 space
= phys
->offset
+ phys
->displacement
+ phys
->size
- head
;
384 if (space
> handle
->size
) {
385 space
= handle
->size
;
386 space
-= space
% BTS_RECORD_SIZE
;
388 if (space
<= BTS_SAFETY_MARGIN
) {
389 /* See if next phys buffer has more space */
390 next_buf
= buf
->cur_buf
+ 1;
391 if (next_buf
>= buf
->nr_bufs
)
393 next_phys
= &buf
->buf
[next_buf
];
394 gap
= buf_size(phys
->page
) - phys
->displacement
- phys
->size
+
395 next_phys
->displacement
;
397 if (handle
->size
>= skip
) {
398 next_space
= next_phys
->size
;
399 if (next_space
+ skip
> handle
->size
) {
400 next_space
= handle
->size
- skip
;
401 next_space
-= next_space
% BTS_RECORD_SIZE
;
403 if (next_space
> space
|| !space
) {
405 bts_buffer_pad_out(phys
, head
);
406 ret
= perf_aux_output_skip(handle
, skip
);
409 /* Advance to next phys buffer */
412 head
= phys
->offset
+ phys
->displacement
;
414 * After this, cur_buf and head won't match ds
415 * anymore, so we must not be racing with
418 buf
->cur_buf
= next_buf
;
419 local_set(&buf
->head
, head
);
424 /* Don't go far beyond wakeup watermark */
425 wakeup
= BTS_SAFETY_MARGIN
+ BTS_RECORD_SIZE
+ handle
->wakeup
-
427 if (space
> wakeup
) {
429 space
-= space
% BTS_RECORD_SIZE
;
432 buf
->end
= head
+ space
;
435 * If we have no space, the lost notification would have been sent when
436 * we hit absolute_maximum - see bts_update()
444 int intel_bts_interrupt(void)
446 struct debug_store
*ds
= this_cpu_ptr(&cpu_hw_events
)->ds
;
447 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
448 struct perf_event
*event
= bts
->handle
.event
;
449 struct bts_buffer
*buf
;
451 int err
= -ENOSPC
, handled
= 0;
454 * The only surefire way of knowing if this NMI is ours is by checking
455 * the write ptr against the PMI threshold.
457 if (ds
&& (ds
->bts_index
>= ds
->bts_interrupt_threshold
))
461 * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
462 * so we can only be INACTIVE or STOPPED
464 if (READ_ONCE(bts
->state
) == BTS_STATE_STOPPED
)
467 buf
= perf_get_aux(&bts
->handle
);
472 * Skip snapshot counters: they don't use the interrupt, but
473 * there's no other way of telling, because the pointer will
479 old_head
= local_read(&buf
->head
);
483 if (old_head
== local_read(&buf
->head
))
486 perf_aux_output_end(&bts
->handle
, local_xchg(&buf
->data_size
, 0));
488 buf
= perf_aux_output_begin(&bts
->handle
, event
);
490 err
= bts_buffer_reset(buf
, &bts
->handle
);
493 WRITE_ONCE(bts
->state
, BTS_STATE_STOPPED
);
497 * BTS_STATE_STOPPED should be visible before
498 * cleared handle::event
501 perf_aux_output_end(&bts
->handle
, 0);
508 static void bts_event_del(struct perf_event
*event
, int mode
)
510 bts_event_stop(event
, PERF_EF_UPDATE
);
513 static int bts_event_add(struct perf_event
*event
, int mode
)
515 struct bts_ctx
*bts
= this_cpu_ptr(&bts_ctx
);
516 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
517 struct hw_perf_event
*hwc
= &event
->hw
;
519 event
->hw
.state
= PERF_HES_STOPPED
;
521 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
524 if (bts
->handle
.event
)
527 if (mode
& PERF_EF_START
) {
528 bts_event_start(event
, 0);
529 if (hwc
->state
& PERF_HES_STOPPED
)
536 static void bts_event_destroy(struct perf_event
*event
)
538 x86_release_hardware();
539 x86_del_exclusive(x86_lbr_exclusive_bts
);
542 static int bts_event_init(struct perf_event
*event
)
546 if (event
->attr
.type
!= bts_pmu
.type
)
549 if (x86_add_exclusive(x86_lbr_exclusive_bts
))
553 * BTS leaks kernel addresses even when CPL0 tracing is
554 * disabled, so disallow intel_bts driver for unprivileged
555 * users on paranoid systems since it provides trace data
556 * to the user in a zero-copy fashion.
558 * Note that the default paranoia setting permits unprivileged
559 * users to profile the kernel.
561 if (event
->attr
.exclude_kernel
&& perf_paranoid_kernel() &&
562 !capable(CAP_SYS_ADMIN
))
565 ret
= x86_reserve_hardware();
567 x86_del_exclusive(x86_lbr_exclusive_bts
);
571 event
->destroy
= bts_event_destroy
;
576 static void bts_event_read(struct perf_event
*event
)
580 static __init
int bts_init(void)
582 if (!boot_cpu_has(X86_FEATURE_DTES64
) || !x86_pmu
.bts
)
585 bts_pmu
.capabilities
= PERF_PMU_CAP_AUX_NO_SG
| PERF_PMU_CAP_ITRACE
|
586 PERF_PMU_CAP_EXCLUSIVE
;
587 bts_pmu
.task_ctx_nr
= perf_sw_context
;
588 bts_pmu
.event_init
= bts_event_init
;
589 bts_pmu
.add
= bts_event_add
;
590 bts_pmu
.del
= bts_event_del
;
591 bts_pmu
.start
= bts_event_start
;
592 bts_pmu
.stop
= bts_event_stop
;
593 bts_pmu
.read
= bts_event_read
;
594 bts_pmu
.setup_aux
= bts_buffer_setup_aux
;
595 bts_pmu
.free_aux
= bts_buffer_free_aux
;
597 return perf_pmu_register(&bts_pmu
, "intel_bts", -1);
599 arch_initcall(bts_init
);