2 * Intel(R) Processor Trace PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * Intel PT is specified in the Intel Architecture Instruction Set Extensions
15 * Programming Reference:
16 * http://software.intel.com/en-us/intel-isa-extensions
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/device.h>
27 #include <asm/perf_event.h>
30 #include <asm/intel_pt.h>
32 #include "../perf_event.h"
35 static DEFINE_PER_CPU(struct pt
, pt_ctx
);
37 static struct pt_pmu pt_pmu
;
47 * Capabilities of Intel PT hardware, such as number of address bits or
48 * supported output schemes, are cached and exported to userspace as "caps"
49 * attribute group of pt pmu device
50 * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
51 * relevant bits together with intel_pt traces.
53 * These are necessary for both trace decoding (payloads_lip, contains address
54 * width encoded in IP-related packets), and event configuration (bitmasks with
55 * permitted values for certain bit fields).
57 #define PT_CAP(_n, _l, _r, _m) \
58 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
59 .reg = _r, .mask = _m }
61 static struct pt_cap_desc
{
67 PT_CAP(max_subleaf
, 0, CR_EAX
, 0xffffffff),
68 PT_CAP(cr3_filtering
, 0, CR_EBX
, BIT(0)),
69 PT_CAP(psb_cyc
, 0, CR_EBX
, BIT(1)),
70 PT_CAP(mtc
, 0, CR_EBX
, BIT(3)),
71 PT_CAP(topa_output
, 0, CR_ECX
, BIT(0)),
72 PT_CAP(topa_multiple_entries
, 0, CR_ECX
, BIT(1)),
73 PT_CAP(single_range_output
, 0, CR_ECX
, BIT(2)),
74 PT_CAP(payloads_lip
, 0, CR_ECX
, BIT(31)),
75 PT_CAP(mtc_periods
, 1, CR_EAX
, 0xffff0000),
76 PT_CAP(cycle_thresholds
, 1, CR_EBX
, 0xffff),
77 PT_CAP(psb_periods
, 1, CR_EBX
, 0xffff0000),
80 static u32
pt_cap_get(enum pt_capabilities cap
)
82 struct pt_cap_desc
*cd
= &pt_caps
[cap
];
83 u32 c
= pt_pmu
.caps
[cd
->leaf
* PT_CPUID_REGS_NUM
+ cd
->reg
];
84 unsigned int shift
= __ffs(cd
->mask
);
86 return (c
& cd
->mask
) >> shift
;
89 static ssize_t
pt_cap_show(struct device
*cdev
,
90 struct device_attribute
*attr
,
93 struct dev_ext_attribute
*ea
=
94 container_of(attr
, struct dev_ext_attribute
, attr
);
95 enum pt_capabilities cap
= (long)ea
->var
;
97 return snprintf(buf
, PAGE_SIZE
, "%x\n", pt_cap_get(cap
));
100 static struct attribute_group pt_cap_group
= {
104 PMU_FORMAT_ATTR(cyc
, "config:1" );
105 PMU_FORMAT_ATTR(mtc
, "config:9" );
106 PMU_FORMAT_ATTR(tsc
, "config:10" );
107 PMU_FORMAT_ATTR(noretcomp
, "config:11" );
108 PMU_FORMAT_ATTR(mtc_period
, "config:14-17" );
109 PMU_FORMAT_ATTR(cyc_thresh
, "config:19-22" );
110 PMU_FORMAT_ATTR(psb_period
, "config:24-27" );
112 static struct attribute
*pt_formats_attr
[] = {
113 &format_attr_cyc
.attr
,
114 &format_attr_mtc
.attr
,
115 &format_attr_tsc
.attr
,
116 &format_attr_noretcomp
.attr
,
117 &format_attr_mtc_period
.attr
,
118 &format_attr_cyc_thresh
.attr
,
119 &format_attr_psb_period
.attr
,
123 static struct attribute_group pt_format_group
= {
125 .attrs
= pt_formats_attr
,
128 static const struct attribute_group
*pt_attr_groups
[] = {
134 static int __init
pt_pmu_hw_init(void)
136 struct dev_ext_attribute
*de_attrs
;
137 struct attribute
**attrs
;
143 if (boot_cpu_has(X86_FEATURE_VMX
)) {
145 * Intel SDM, 36.5 "Tracing post-VMXON" says that
146 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
149 rdmsrl(MSR_IA32_VMX_MISC
, reg
);
156 for (i
= 0; i
< PT_CPUID_LEAVES
; i
++) {
158 &pt_pmu
.caps
[CR_EAX
+ i
*PT_CPUID_REGS_NUM
],
159 &pt_pmu
.caps
[CR_EBX
+ i
*PT_CPUID_REGS_NUM
],
160 &pt_pmu
.caps
[CR_ECX
+ i
*PT_CPUID_REGS_NUM
],
161 &pt_pmu
.caps
[CR_EDX
+ i
*PT_CPUID_REGS_NUM
]);
165 size
= sizeof(struct attribute
*) * (ARRAY_SIZE(pt_caps
)+1);
166 attrs
= kzalloc(size
, GFP_KERNEL
);
170 size
= sizeof(struct dev_ext_attribute
) * (ARRAY_SIZE(pt_caps
)+1);
171 de_attrs
= kzalloc(size
, GFP_KERNEL
);
175 for (i
= 0; i
< ARRAY_SIZE(pt_caps
); i
++) {
176 struct dev_ext_attribute
*de_attr
= de_attrs
+ i
;
178 de_attr
->attr
.attr
.name
= pt_caps
[i
].name
;
180 sysfs_attr_init(&de_attr
->attr
.attr
);
182 de_attr
->attr
.attr
.mode
= S_IRUGO
;
183 de_attr
->attr
.show
= pt_cap_show
;
184 de_attr
->var
= (void *)i
;
186 attrs
[i
] = &de_attr
->attr
.attr
;
189 pt_cap_group
.attrs
= attrs
;
199 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
200 RTIT_CTL_CYC_THRESH | \
203 #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
206 #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \
211 static bool pt_event_valid(struct perf_event
*event
)
213 u64 config
= event
->attr
.config
;
214 u64 allowed
, requested
;
216 if ((config
& PT_CONFIG_MASK
) != config
)
219 if (config
& RTIT_CTL_CYC_PSB
) {
220 if (!pt_cap_get(PT_CAP_psb_cyc
))
223 allowed
= pt_cap_get(PT_CAP_psb_periods
);
224 requested
= (config
& RTIT_CTL_PSB_FREQ
) >>
225 RTIT_CTL_PSB_FREQ_OFFSET
;
226 if (requested
&& (!(allowed
& BIT(requested
))))
229 allowed
= pt_cap_get(PT_CAP_cycle_thresholds
);
230 requested
= (config
& RTIT_CTL_CYC_THRESH
) >>
231 RTIT_CTL_CYC_THRESH_OFFSET
;
232 if (requested
&& (!(allowed
& BIT(requested
))))
236 if (config
& RTIT_CTL_MTC
) {
238 * In the unlikely case that CPUID lists valid mtc periods,
239 * but not the mtc capability, drop out here.
241 * Spec says that setting mtc period bits while mtc bit in
242 * CPUID is 0 will #GP, so better safe than sorry.
244 if (!pt_cap_get(PT_CAP_mtc
))
247 allowed
= pt_cap_get(PT_CAP_mtc_periods
);
251 requested
= (config
& RTIT_CTL_MTC_RANGE
) >>
252 RTIT_CTL_MTC_RANGE_OFFSET
;
254 if (!(allowed
& BIT(requested
)))
262 * PT configuration helpers
263 * These all are cpu affine and operate on a local PT
266 static void pt_config(struct perf_event
*event
)
270 if (!event
->hw
.itrace_started
) {
271 event
->hw
.itrace_started
= 1;
272 wrmsrl(MSR_IA32_RTIT_STATUS
, 0);
275 reg
= RTIT_CTL_TOPA
| RTIT_CTL_BRANCH_EN
| RTIT_CTL_TRACEEN
;
277 if (!event
->attr
.exclude_kernel
)
279 if (!event
->attr
.exclude_user
)
282 reg
|= (event
->attr
.config
& PT_CONFIG_MASK
);
284 event
->hw
.config
= reg
;
285 wrmsrl(MSR_IA32_RTIT_CTL
, reg
);
288 static void pt_config_stop(struct perf_event
*event
)
290 u64 ctl
= READ_ONCE(event
->hw
.config
);
292 /* may be already stopped by a PMI */
293 if (!(ctl
& RTIT_CTL_TRACEEN
))
296 ctl
&= ~RTIT_CTL_TRACEEN
;
297 wrmsrl(MSR_IA32_RTIT_CTL
, ctl
);
299 WRITE_ONCE(event
->hw
.config
, ctl
);
302 * A wrmsr that disables trace generation serializes other PT
303 * registers and causes all data packets to be written to memory,
304 * but a fence is required for the data to become globally visible.
306 * The below WMB, separating data store and aux_head store matches
307 * the consumer's RMB that separates aux_head load and data load.
312 static void pt_config_buffer(void *buf
, unsigned int topa_idx
,
313 unsigned int output_off
)
317 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, virt_to_phys(buf
));
319 reg
= 0x7f | ((u64
)topa_idx
<< 7) | ((u64
)output_off
<< 32);
321 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, reg
);
325 * Keep ToPA table-related metadata on the same page as the actual table,
326 * taking up a few words from the top
329 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
332 * struct topa - page-sized ToPA table with metadata at the top
333 * @table: actual ToPA table entries, as understood by PT hardware
334 * @list: linkage to struct pt_buffer's list of tables
335 * @phys: physical address of this page
336 * @offset: offset of the first entry in this table in the buffer
337 * @size: total size of all entries in this table
338 * @last: index of the last initialized entry in this table
341 struct topa_entry table
[TENTS_PER_PAGE
];
342 struct list_head list
;
349 /* make -1 stand for the last table entry */
350 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
353 * topa_alloc() - allocate page-sized ToPA table
354 * @cpu: CPU on which to allocate.
355 * @gfp: Allocation flags.
357 * Return: On success, return the pointer to ToPA table page.
359 static struct topa
*topa_alloc(int cpu
, gfp_t gfp
)
361 int node
= cpu_to_node(cpu
);
365 p
= alloc_pages_node(node
, gfp
| __GFP_ZERO
, 0);
369 topa
= page_address(p
);
371 topa
->phys
= page_to_phys(p
);
374 * In case of singe-entry ToPA, always put the self-referencing END
375 * link as the 2nd entry in the table
377 if (!pt_cap_get(PT_CAP_topa_multiple_entries
)) {
378 TOPA_ENTRY(topa
, 1)->base
= topa
->phys
>> TOPA_SHIFT
;
379 TOPA_ENTRY(topa
, 1)->end
= 1;
386 * topa_free() - free a page-sized ToPA table
387 * @topa: Table to deallocate.
389 static void topa_free(struct topa
*topa
)
391 free_page((unsigned long)topa
);
395 * topa_insert_table() - insert a ToPA table into a buffer
396 * @buf: PT buffer that's being extended.
397 * @topa: New topa table to be inserted.
399 * If it's the first table in this buffer, set up buffer's pointers
400 * accordingly; otherwise, add a END=1 link entry to @topa to the current
401 * "last" table and adjust the last table pointer to @topa.
403 static void topa_insert_table(struct pt_buffer
*buf
, struct topa
*topa
)
405 struct topa
*last
= buf
->last
;
407 list_add_tail(&topa
->list
, &buf
->tables
);
410 buf
->first
= buf
->last
= buf
->cur
= topa
;
414 topa
->offset
= last
->offset
+ last
->size
;
417 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
420 BUG_ON(last
->last
!= TENTS_PER_PAGE
- 1);
422 TOPA_ENTRY(last
, -1)->base
= topa
->phys
>> TOPA_SHIFT
;
423 TOPA_ENTRY(last
, -1)->end
= 1;
427 * topa_table_full() - check if a ToPA table is filled up
430 static bool topa_table_full(struct topa
*topa
)
432 /* single-entry ToPA is a special case */
433 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
436 return topa
->last
== TENTS_PER_PAGE
- 1;
440 * topa_insert_pages() - create a list of ToPA tables
441 * @buf: PT buffer being initialized.
442 * @gfp: Allocation flags.
444 * This initializes a list of ToPA tables with entries from
445 * the data_pages provided by rb_alloc_aux().
447 * Return: 0 on success or error code.
449 static int topa_insert_pages(struct pt_buffer
*buf
, gfp_t gfp
)
451 struct topa
*topa
= buf
->last
;
455 p
= virt_to_page(buf
->data_pages
[buf
->nr_pages
]);
457 order
= page_private(p
);
459 if (topa_table_full(topa
)) {
460 topa
= topa_alloc(buf
->cpu
, gfp
);
464 topa_insert_table(buf
, topa
);
467 TOPA_ENTRY(topa
, -1)->base
= page_to_phys(p
) >> TOPA_SHIFT
;
468 TOPA_ENTRY(topa
, -1)->size
= order
;
469 if (!buf
->snapshot
&& !pt_cap_get(PT_CAP_topa_multiple_entries
)) {
470 TOPA_ENTRY(topa
, -1)->intr
= 1;
471 TOPA_ENTRY(topa
, -1)->stop
= 1;
475 topa
->size
+= sizes(order
);
477 buf
->nr_pages
+= 1ul << order
;
483 * pt_topa_dump() - print ToPA tables and their entries
486 static void pt_topa_dump(struct pt_buffer
*buf
)
490 list_for_each_entry(topa
, &buf
->tables
, list
) {
493 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa
->table
,
494 topa
->phys
, topa
->offset
, topa
->size
);
495 for (i
= 0; i
< TENTS_PER_PAGE
; i
++) {
496 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
498 (unsigned long)topa
->table
[i
].base
<< TOPA_SHIFT
,
499 sizes(topa
->table
[i
].size
),
500 topa
->table
[i
].end
? 'E' : ' ',
501 topa
->table
[i
].intr
? 'I' : ' ',
502 topa
->table
[i
].stop
? 'S' : ' ',
503 *(u64
*)&topa
->table
[i
]);
504 if ((pt_cap_get(PT_CAP_topa_multiple_entries
) &&
505 topa
->table
[i
].stop
) ||
513 * pt_buffer_advance() - advance to the next output region
516 * Advance the current pointers in the buffer to the next ToPA entry.
518 static void pt_buffer_advance(struct pt_buffer
*buf
)
523 if (buf
->cur_idx
== buf
->cur
->last
) {
524 if (buf
->cur
== buf
->last
)
525 buf
->cur
= buf
->first
;
527 buf
->cur
= list_entry(buf
->cur
->list
.next
, struct topa
,
534 * pt_update_head() - calculate current offsets and sizes
535 * @pt: Per-cpu pt context.
537 * Update buffer's current write pointer position and data size.
539 static void pt_update_head(struct pt
*pt
)
541 struct pt_buffer
*buf
= perf_get_aux(&pt
->handle
);
542 u64 topa_idx
, base
, old
;
544 /* offset of the first region in this table from the beginning of buf */
545 base
= buf
->cur
->offset
+ buf
->output_off
;
547 /* offset of the current output region within this table */
548 for (topa_idx
= 0; topa_idx
< buf
->cur_idx
; topa_idx
++)
549 base
+= sizes(buf
->cur
->table
[topa_idx
].size
);
552 local_set(&buf
->data_size
, base
);
554 old
= (local64_xchg(&buf
->head
, base
) &
555 ((buf
->nr_pages
<< PAGE_SHIFT
) - 1));
557 base
+= buf
->nr_pages
<< PAGE_SHIFT
;
559 local_add(base
- old
, &buf
->data_size
);
564 * pt_buffer_region() - obtain current output region's address
567 static void *pt_buffer_region(struct pt_buffer
*buf
)
569 return phys_to_virt(buf
->cur
->table
[buf
->cur_idx
].base
<< TOPA_SHIFT
);
573 * pt_buffer_region_size() - obtain current output region's size
576 static size_t pt_buffer_region_size(struct pt_buffer
*buf
)
578 return sizes(buf
->cur
->table
[buf
->cur_idx
].size
);
582 * pt_handle_status() - take care of possible status conditions
583 * @pt: Per-cpu pt context.
585 static void pt_handle_status(struct pt
*pt
)
587 struct pt_buffer
*buf
= perf_get_aux(&pt
->handle
);
591 rdmsrl(MSR_IA32_RTIT_STATUS
, status
);
593 if (status
& RTIT_STATUS_ERROR
) {
594 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
596 status
&= ~RTIT_STATUS_ERROR
;
599 if (status
& RTIT_STATUS_STOPPED
) {
600 status
&= ~RTIT_STATUS_STOPPED
;
603 * On systems that only do single-entry ToPA, hitting STOP
604 * means we are already losing data; need to let the decoder
607 if (!pt_cap_get(PT_CAP_topa_multiple_entries
) ||
608 buf
->output_off
== sizes(TOPA_ENTRY(buf
->cur
, buf
->cur_idx
)->size
)) {
609 local_inc(&buf
->lost
);
615 * Also on single-entry ToPA implementations, interrupt will come
616 * before the output reaches its output region's boundary.
618 if (!pt_cap_get(PT_CAP_topa_multiple_entries
) && !buf
->snapshot
&&
619 pt_buffer_region_size(buf
) - buf
->output_off
<= TOPA_PMI_MARGIN
) {
620 void *head
= pt_buffer_region(buf
);
622 /* everything within this margin needs to be zeroed out */
623 memset(head
+ buf
->output_off
, 0,
624 pt_buffer_region_size(buf
) -
630 pt_buffer_advance(buf
);
632 wrmsrl(MSR_IA32_RTIT_STATUS
, status
);
636 * pt_read_offset() - translate registers into buffer pointers
639 * Set buffer's output pointers from MSR values.
641 static void pt_read_offset(struct pt_buffer
*buf
)
643 u64 offset
, base_topa
;
645 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, base_topa
);
646 buf
->cur
= phys_to_virt(base_topa
);
648 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, offset
);
649 /* offset within current output region */
650 buf
->output_off
= offset
>> 32;
651 /* index of current output region within this table */
652 buf
->cur_idx
= (offset
& 0xffffff80) >> 7;
656 * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
658 * @pg: Page offset in the buffer.
660 * When advancing to the next output region (ToPA entry), given a page offset
661 * into the buffer, we need to find the offset of the first page in the next
664 static unsigned int pt_topa_next_entry(struct pt_buffer
*buf
, unsigned int pg
)
666 struct topa_entry
*te
= buf
->topa_index
[pg
];
669 if (buf
->first
== buf
->last
&& buf
->first
->last
== 1)
674 pg
&= buf
->nr_pages
- 1;
675 } while (buf
->topa_index
[pg
] == te
);
681 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
683 * @handle: Current output handle.
685 * Place INT and STOP marks to prevent overwriting old data that the consumer
686 * hasn't yet collected and waking up the consumer after a certain fraction of
687 * the buffer has filled up. Only needed and sensible for non-snapshot counters.
689 * This obviously relies on buf::head to figure out buffer markers, so it has
690 * to be called after pt_buffer_reset_offsets() and before the hardware tracing
693 static int pt_buffer_reset_markers(struct pt_buffer
*buf
,
694 struct perf_output_handle
*handle
)
697 unsigned long head
= local64_read(&buf
->head
);
698 unsigned long idx
, npages
, wakeup
;
700 /* can't stop in the middle of an output region */
701 if (buf
->output_off
+ handle
->size
+ 1 <
702 sizes(TOPA_ENTRY(buf
->cur
, buf
->cur_idx
)->size
))
706 /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
707 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
710 /* clear STOP and INT from current entry */
711 buf
->topa_index
[buf
->stop_pos
]->stop
= 0;
712 buf
->topa_index
[buf
->intr_pos
]->intr
= 0;
714 /* how many pages till the STOP marker */
715 npages
= handle
->size
>> PAGE_SHIFT
;
717 /* if it's on a page boundary, fill up one more page */
718 if (!offset_in_page(head
+ handle
->size
+ 1))
721 idx
= (head
>> PAGE_SHIFT
) + npages
;
722 idx
&= buf
->nr_pages
- 1;
725 wakeup
= handle
->wakeup
>> PAGE_SHIFT
;
727 /* in the worst case, wake up the consumer one page before hard stop */
728 idx
= (head
>> PAGE_SHIFT
) + npages
- 1;
732 idx
&= buf
->nr_pages
- 1;
735 buf
->topa_index
[buf
->stop_pos
]->stop
= 1;
736 buf
->topa_index
[buf
->intr_pos
]->intr
= 1;
742 * pt_buffer_setup_topa_index() - build topa_index[] table of regions
745 * topa_index[] references output regions indexed by offset into the
746 * buffer for purposes of quick reverse lookup.
748 static void pt_buffer_setup_topa_index(struct pt_buffer
*buf
)
750 struct topa
*cur
= buf
->first
, *prev
= buf
->last
;
751 struct topa_entry
*te_cur
= TOPA_ENTRY(cur
, 0),
752 *te_prev
= TOPA_ENTRY(prev
, prev
->last
- 1);
755 while (pg
< buf
->nr_pages
) {
758 /* pages within one topa entry */
759 for (tidx
= 0; tidx
< 1 << te_cur
->size
; tidx
++, pg
++)
760 buf
->topa_index
[pg
] = te_prev
;
764 if (idx
== cur
->last
- 1) {
765 /* advance to next topa table */
767 cur
= list_entry(cur
->list
.next
, struct topa
, list
);
771 te_cur
= TOPA_ENTRY(cur
, idx
);
777 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
779 * @head: Write pointer (aux_head) from AUX buffer.
781 * Find the ToPA table and entry corresponding to given @head and set buffer's
782 * "current" pointers accordingly. This is done after we have obtained the
783 * current aux_head position from a successful call to perf_aux_output_begin()
784 * to make sure the hardware is writing to the right place.
786 * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
787 * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
788 * which are used to determine INT and STOP markers' locations by a subsequent
789 * call to pt_buffer_reset_markers().
791 static void pt_buffer_reset_offsets(struct pt_buffer
*buf
, unsigned long head
)
796 head
&= (buf
->nr_pages
<< PAGE_SHIFT
) - 1;
798 pg
= (head
>> PAGE_SHIFT
) & (buf
->nr_pages
- 1);
799 pg
= pt_topa_next_entry(buf
, pg
);
801 buf
->cur
= (struct topa
*)((unsigned long)buf
->topa_index
[pg
] & PAGE_MASK
);
802 buf
->cur_idx
= ((unsigned long)buf
->topa_index
[pg
] -
803 (unsigned long)buf
->cur
) / sizeof(struct topa_entry
);
804 buf
->output_off
= head
& (sizes(buf
->cur
->table
[buf
->cur_idx
].size
) - 1);
806 local64_set(&buf
->head
, head
);
807 local_set(&buf
->data_size
, 0);
811 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
814 static void pt_buffer_fini_topa(struct pt_buffer
*buf
)
816 struct topa
*topa
, *iter
;
818 list_for_each_entry_safe(topa
, iter
, &buf
->tables
, list
) {
820 * right now, this is in free_aux() path only, so
821 * no need to unlink this table from the list
828 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
830 * @size: Total size of all regions within this ToPA.
831 * @gfp: Allocation flags.
833 static int pt_buffer_init_topa(struct pt_buffer
*buf
, unsigned long nr_pages
,
839 topa
= topa_alloc(buf
->cpu
, gfp
);
843 topa_insert_table(buf
, topa
);
845 while (buf
->nr_pages
< nr_pages
) {
846 err
= topa_insert_pages(buf
, gfp
);
848 pt_buffer_fini_topa(buf
);
853 pt_buffer_setup_topa_index(buf
);
855 /* link last table to the first one, unless we're double buffering */
856 if (pt_cap_get(PT_CAP_topa_multiple_entries
)) {
857 TOPA_ENTRY(buf
->last
, -1)->base
= buf
->first
->phys
>> TOPA_SHIFT
;
858 TOPA_ENTRY(buf
->last
, -1)->end
= 1;
866 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
867 * @cpu: Cpu on which to allocate, -1 means current.
868 * @pages: Array of pointers to buffer pages passed from perf core.
869 * @nr_pages: Number of pages in the buffer.
870 * @snapshot: If this is a snapshot/overwrite counter.
872 * This is a pmu::setup_aux callback that sets up ToPA tables and all the
873 * bookkeeping for an AUX buffer.
875 * Return: Our private PT buffer structure.
878 pt_buffer_setup_aux(int cpu
, void **pages
, int nr_pages
, bool snapshot
)
880 struct pt_buffer
*buf
;
887 cpu
= raw_smp_processor_id();
888 node
= cpu_to_node(cpu
);
890 buf
= kzalloc_node(offsetof(struct pt_buffer
, topa_index
[nr_pages
]),
896 buf
->snapshot
= snapshot
;
897 buf
->data_pages
= pages
;
899 INIT_LIST_HEAD(&buf
->tables
);
901 ret
= pt_buffer_init_topa(buf
, nr_pages
, GFP_KERNEL
);
911 * pt_buffer_free_aux() - perf AUX deallocation path callback
914 static void pt_buffer_free_aux(void *data
)
916 struct pt_buffer
*buf
= data
;
918 pt_buffer_fini_topa(buf
);
923 * pt_buffer_is_full() - check if the buffer is full
925 * @pt: Per-cpu pt handle.
927 * If the user hasn't read data from the output region that aux_head
928 * points to, the buffer is considered full: the user needs to read at
929 * least this region and update aux_tail to point past it.
931 static bool pt_buffer_is_full(struct pt_buffer
*buf
, struct pt
*pt
)
936 if (local_read(&buf
->data_size
) >= pt
->handle
.size
)
943 * intel_pt_interrupt() - PT PMI handler
945 void intel_pt_interrupt(void)
947 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
948 struct pt_buffer
*buf
;
949 struct perf_event
*event
= pt
->handle
.event
;
952 * There may be a dangling PT bit in the interrupt status register
953 * after PT has been disabled by pt_event_stop(). Make sure we don't
954 * do anything (particularly, re-enable) for this event here.
956 if (!ACCESS_ONCE(pt
->handle_nmi
))
960 * If VMX is on and PT does not support it, don't touch anything.
962 if (READ_ONCE(pt
->vmx_on
))
968 pt_config_stop(event
);
970 buf
= perf_get_aux(&pt
->handle
);
976 pt_handle_status(pt
);
980 perf_aux_output_end(&pt
->handle
, local_xchg(&buf
->data_size
, 0),
981 local_xchg(&buf
->lost
, 0));
983 if (!event
->hw
.state
) {
986 buf
= perf_aux_output_begin(&pt
->handle
, event
);
988 event
->hw
.state
= PERF_HES_STOPPED
;
992 pt_buffer_reset_offsets(buf
, pt
->handle
.head
);
993 /* snapshot counters don't use PMI, so it's safe */
994 ret
= pt_buffer_reset_markers(buf
, &pt
->handle
);
996 perf_aux_output_end(&pt
->handle
, 0, true);
1000 pt_config_buffer(buf
->cur
->table
, buf
->cur_idx
,
1006 void intel_pt_handle_vmx(int on
)
1008 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1009 struct perf_event
*event
;
1010 unsigned long flags
;
1012 /* PT plays nice with VMX, do nothing */
1017 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1018 * sure to not try to set it while VMX is on. Disable
1019 * interrupts to avoid racing with pmu callbacks;
1020 * concurrent PMI should be handled fine.
1022 local_irq_save(flags
);
1023 WRITE_ONCE(pt
->vmx_on
, on
);
1026 /* prevent pt_config_stop() from writing RTIT_CTL */
1027 event
= pt
->handle
.event
;
1029 event
->hw
.config
= 0;
1031 local_irq_restore(flags
);
1033 EXPORT_SYMBOL_GPL(intel_pt_handle_vmx
);
1039 static void pt_event_start(struct perf_event
*event
, int mode
)
1041 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1042 struct pt_buffer
*buf
= perf_get_aux(&pt
->handle
);
1044 if (READ_ONCE(pt
->vmx_on
))
1047 if (!buf
|| pt_buffer_is_full(buf
, pt
)) {
1048 event
->hw
.state
= PERF_HES_STOPPED
;
1052 ACCESS_ONCE(pt
->handle_nmi
) = 1;
1053 event
->hw
.state
= 0;
1055 pt_config_buffer(buf
->cur
->table
, buf
->cur_idx
,
1060 static void pt_event_stop(struct perf_event
*event
, int mode
)
1062 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1065 * Protect against the PMI racing with disabling wrmsr,
1066 * see comment in intel_pt_interrupt().
1068 ACCESS_ONCE(pt
->handle_nmi
) = 0;
1070 pt_config_stop(event
);
1072 if (event
->hw
.state
== PERF_HES_STOPPED
)
1075 event
->hw
.state
= PERF_HES_STOPPED
;
1077 if (mode
& PERF_EF_UPDATE
) {
1078 struct pt_buffer
*buf
= perf_get_aux(&pt
->handle
);
1083 if (WARN_ON_ONCE(pt
->handle
.event
!= event
))
1086 pt_read_offset(buf
);
1088 pt_handle_status(pt
);
1094 static void pt_event_del(struct perf_event
*event
, int mode
)
1096 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1097 struct pt_buffer
*buf
;
1099 pt_event_stop(event
, PERF_EF_UPDATE
);
1101 buf
= perf_get_aux(&pt
->handle
);
1106 local_xchg(&buf
->data_size
,
1107 buf
->nr_pages
<< PAGE_SHIFT
);
1108 perf_aux_output_end(&pt
->handle
, local_xchg(&buf
->data_size
, 0),
1109 local_xchg(&buf
->lost
, 0));
1113 static int pt_event_add(struct perf_event
*event
, int mode
)
1115 struct pt_buffer
*buf
;
1116 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1117 struct hw_perf_event
*hwc
= &event
->hw
;
1120 if (pt
->handle
.event
)
1123 buf
= perf_aux_output_begin(&pt
->handle
, event
);
1128 pt_buffer_reset_offsets(buf
, pt
->handle
.head
);
1129 if (!buf
->snapshot
) {
1130 ret
= pt_buffer_reset_markers(buf
, &pt
->handle
);
1135 if (mode
& PERF_EF_START
) {
1136 pt_event_start(event
, 0);
1138 if (hwc
->state
== PERF_HES_STOPPED
)
1141 hwc
->state
= PERF_HES_STOPPED
;
1147 perf_aux_output_end(&pt
->handle
, 0, true);
1149 hwc
->state
= PERF_HES_STOPPED
;
1154 static void pt_event_read(struct perf_event
*event
)
1158 static void pt_event_destroy(struct perf_event
*event
)
1160 x86_del_exclusive(x86_lbr_exclusive_pt
);
1163 static int pt_event_init(struct perf_event
*event
)
1165 if (event
->attr
.type
!= pt_pmu
.pmu
.type
)
1168 if (!pt_event_valid(event
))
1171 if (x86_add_exclusive(x86_lbr_exclusive_pt
))
1174 event
->destroy
= pt_event_destroy
;
1179 void cpu_emergency_stop_pt(void)
1181 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1183 if (pt
->handle
.event
)
1184 pt_event_stop(pt
->handle
.event
, PERF_EF_UPDATE
);
1187 static __init
int pt_init(void)
1189 int ret
, cpu
, prior_warn
= 0;
1191 BUILD_BUG_ON(sizeof(struct topa
) > PAGE_SIZE
);
1193 if (!test_cpu_cap(&boot_cpu_data
, X86_FEATURE_INTEL_PT
))
1197 for_each_online_cpu(cpu
) {
1200 ret
= rdmsrl_safe_on_cpu(cpu
, MSR_IA32_RTIT_CTL
, &ctl
);
1201 if (!ret
&& (ctl
& RTIT_CTL_TRACEEN
))
1207 x86_add_exclusive(x86_lbr_exclusive_pt
);
1208 pr_warn("PT is enabled at boot time, doing nothing\n");
1213 ret
= pt_pmu_hw_init();
1217 if (!pt_cap_get(PT_CAP_topa_output
)) {
1218 pr_warn("ToPA output is not supported on this CPU\n");
1222 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
1223 pt_pmu
.pmu
.capabilities
=
1224 PERF_PMU_CAP_AUX_NO_SG
| PERF_PMU_CAP_AUX_SW_DOUBLEBUF
;
1226 pt_pmu
.pmu
.capabilities
|= PERF_PMU_CAP_EXCLUSIVE
| PERF_PMU_CAP_ITRACE
;
1227 pt_pmu
.pmu
.attr_groups
= pt_attr_groups
;
1228 pt_pmu
.pmu
.task_ctx_nr
= perf_sw_context
;
1229 pt_pmu
.pmu
.event_init
= pt_event_init
;
1230 pt_pmu
.pmu
.add
= pt_event_add
;
1231 pt_pmu
.pmu
.del
= pt_event_del
;
1232 pt_pmu
.pmu
.start
= pt_event_start
;
1233 pt_pmu
.pmu
.stop
= pt_event_stop
;
1234 pt_pmu
.pmu
.read
= pt_event_read
;
1235 pt_pmu
.pmu
.setup_aux
= pt_buffer_setup_aux
;
1236 pt_pmu
.pmu
.free_aux
= pt_buffer_free_aux
;
1237 ret
= perf_pmu_register(&pt_pmu
.pmu
, "intel_pt", -1);
1241 arch_initcall(pt_init
);