2 * Intel(R) Processor Trace PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * Intel PT is specified in the Intel Architecture Instruction Set Extensions
15 * Programming Reference:
16 * http://software.intel.com/en-us/intel-isa-extensions
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/device.h>
27 #include <asm/perf_event.h>
30 #include <asm/intel_pt.h>
32 #include "../perf_event.h"
35 static DEFINE_PER_CPU(struct pt
, pt_ctx
);
37 static struct pt_pmu pt_pmu
;
47 * Capabilities of Intel PT hardware, such as number of address bits or
48 * supported output schemes, are cached and exported to userspace as "caps"
49 * attribute group of pt pmu device
50 * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
51 * relevant bits together with intel_pt traces.
53 * These are necessary for both trace decoding (payloads_lip, contains address
54 * width encoded in IP-related packets), and event configuration (bitmasks with
55 * permitted values for certain bit fields).
57 #define PT_CAP(_n, _l, _r, _m) \
58 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
59 .reg = _r, .mask = _m }
61 static struct pt_cap_desc
{
67 PT_CAP(max_subleaf
, 0, CR_EAX
, 0xffffffff),
68 PT_CAP(cr3_filtering
, 0, CR_EBX
, BIT(0)),
69 PT_CAP(psb_cyc
, 0, CR_EBX
, BIT(1)),
70 PT_CAP(ip_filtering
, 0, CR_EBX
, BIT(2)),
71 PT_CAP(mtc
, 0, CR_EBX
, BIT(3)),
72 PT_CAP(topa_output
, 0, CR_ECX
, BIT(0)),
73 PT_CAP(topa_multiple_entries
, 0, CR_ECX
, BIT(1)),
74 PT_CAP(single_range_output
, 0, CR_ECX
, BIT(2)),
75 PT_CAP(payloads_lip
, 0, CR_ECX
, BIT(31)),
76 PT_CAP(num_address_ranges
, 1, CR_EAX
, 0x3),
77 PT_CAP(mtc_periods
, 1, CR_EAX
, 0xffff0000),
78 PT_CAP(cycle_thresholds
, 1, CR_EBX
, 0xffff),
79 PT_CAP(psb_periods
, 1, CR_EBX
, 0xffff0000),
82 static u32
pt_cap_get(enum pt_capabilities cap
)
84 struct pt_cap_desc
*cd
= &pt_caps
[cap
];
85 u32 c
= pt_pmu
.caps
[cd
->leaf
* PT_CPUID_REGS_NUM
+ cd
->reg
];
86 unsigned int shift
= __ffs(cd
->mask
);
88 return (c
& cd
->mask
) >> shift
;
91 static ssize_t
pt_cap_show(struct device
*cdev
,
92 struct device_attribute
*attr
,
95 struct dev_ext_attribute
*ea
=
96 container_of(attr
, struct dev_ext_attribute
, attr
);
97 enum pt_capabilities cap
= (long)ea
->var
;
99 return snprintf(buf
, PAGE_SIZE
, "%x\n", pt_cap_get(cap
));
102 static struct attribute_group pt_cap_group
= {
106 PMU_FORMAT_ATTR(cyc
, "config:1" );
107 PMU_FORMAT_ATTR(mtc
, "config:9" );
108 PMU_FORMAT_ATTR(tsc
, "config:10" );
109 PMU_FORMAT_ATTR(noretcomp
, "config:11" );
110 PMU_FORMAT_ATTR(mtc_period
, "config:14-17" );
111 PMU_FORMAT_ATTR(cyc_thresh
, "config:19-22" );
112 PMU_FORMAT_ATTR(psb_period
, "config:24-27" );
114 static struct attribute
*pt_formats_attr
[] = {
115 &format_attr_cyc
.attr
,
116 &format_attr_mtc
.attr
,
117 &format_attr_tsc
.attr
,
118 &format_attr_noretcomp
.attr
,
119 &format_attr_mtc_period
.attr
,
120 &format_attr_cyc_thresh
.attr
,
121 &format_attr_psb_period
.attr
,
125 static struct attribute_group pt_format_group
= {
127 .attrs
= pt_formats_attr
,
131 pt_timing_attr_show(struct device
*dev
, struct device_attribute
*attr
,
134 struct perf_pmu_events_attr
*pmu_attr
=
135 container_of(attr
, struct perf_pmu_events_attr
, attr
);
137 switch (pmu_attr
->id
) {
139 return sprintf(page
, "%lu\n", pt_pmu
.max_nonturbo_ratio
);
141 return sprintf(page
, "%u:%u\n",
151 PMU_EVENT_ATTR(max_nonturbo_ratio
, timing_attr_max_nonturbo_ratio
, 0,
152 pt_timing_attr_show
);
153 PMU_EVENT_ATTR(tsc_art_ratio
, timing_attr_tsc_art_ratio
, 1,
154 pt_timing_attr_show
);
156 static struct attribute
*pt_timing_attr
[] = {
157 &timing_attr_max_nonturbo_ratio
.attr
.attr
,
158 &timing_attr_tsc_art_ratio
.attr
.attr
,
162 static struct attribute_group pt_timing_group
= {
163 .attrs
= pt_timing_attr
,
166 static const struct attribute_group
*pt_attr_groups
[] = {
173 static int __init
pt_pmu_hw_init(void)
175 struct dev_ext_attribute
*de_attrs
;
176 struct attribute
**attrs
;
182 rdmsrl(MSR_PLATFORM_INFO
, reg
);
183 pt_pmu
.max_nonturbo_ratio
= (reg
& 0xff00) >> 8;
186 * if available, read in TSC to core crystal clock ratio,
187 * otherwise, zero for numerator stands for "not enumerated"
190 if (boot_cpu_data
.cpuid_level
>= CPUID_TSC_LEAF
) {
191 u32 eax
, ebx
, ecx
, edx
;
193 cpuid(CPUID_TSC_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
195 pt_pmu
.tsc_art_num
= ebx
;
196 pt_pmu
.tsc_art_den
= eax
;
199 if (boot_cpu_has(X86_FEATURE_VMX
)) {
201 * Intel SDM, 36.5 "Tracing post-VMXON" says that
202 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
205 rdmsrl(MSR_IA32_VMX_MISC
, reg
);
212 for (i
= 0; i
< PT_CPUID_LEAVES
; i
++) {
214 &pt_pmu
.caps
[CR_EAX
+ i
*PT_CPUID_REGS_NUM
],
215 &pt_pmu
.caps
[CR_EBX
+ i
*PT_CPUID_REGS_NUM
],
216 &pt_pmu
.caps
[CR_ECX
+ i
*PT_CPUID_REGS_NUM
],
217 &pt_pmu
.caps
[CR_EDX
+ i
*PT_CPUID_REGS_NUM
]);
221 size
= sizeof(struct attribute
*) * (ARRAY_SIZE(pt_caps
)+1);
222 attrs
= kzalloc(size
, GFP_KERNEL
);
226 size
= sizeof(struct dev_ext_attribute
) * (ARRAY_SIZE(pt_caps
)+1);
227 de_attrs
= kzalloc(size
, GFP_KERNEL
);
231 for (i
= 0; i
< ARRAY_SIZE(pt_caps
); i
++) {
232 struct dev_ext_attribute
*de_attr
= de_attrs
+ i
;
234 de_attr
->attr
.attr
.name
= pt_caps
[i
].name
;
236 sysfs_attr_init(&de_attr
->attr
.attr
);
238 de_attr
->attr
.attr
.mode
= S_IRUGO
;
239 de_attr
->attr
.show
= pt_cap_show
;
240 de_attr
->var
= (void *)i
;
242 attrs
[i
] = &de_attr
->attr
.attr
;
245 pt_cap_group
.attrs
= attrs
;
255 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
256 RTIT_CTL_CYC_THRESH | \
259 #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
262 #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \
267 static bool pt_event_valid(struct perf_event
*event
)
269 u64 config
= event
->attr
.config
;
270 u64 allowed
, requested
;
272 if ((config
& PT_CONFIG_MASK
) != config
)
275 if (config
& RTIT_CTL_CYC_PSB
) {
276 if (!pt_cap_get(PT_CAP_psb_cyc
))
279 allowed
= pt_cap_get(PT_CAP_psb_periods
);
280 requested
= (config
& RTIT_CTL_PSB_FREQ
) >>
281 RTIT_CTL_PSB_FREQ_OFFSET
;
282 if (requested
&& (!(allowed
& BIT(requested
))))
285 allowed
= pt_cap_get(PT_CAP_cycle_thresholds
);
286 requested
= (config
& RTIT_CTL_CYC_THRESH
) >>
287 RTIT_CTL_CYC_THRESH_OFFSET
;
288 if (requested
&& (!(allowed
& BIT(requested
))))
292 if (config
& RTIT_CTL_MTC
) {
294 * In the unlikely case that CPUID lists valid mtc periods,
295 * but not the mtc capability, drop out here.
297 * Spec says that setting mtc period bits while mtc bit in
298 * CPUID is 0 will #GP, so better safe than sorry.
300 if (!pt_cap_get(PT_CAP_mtc
))
303 allowed
= pt_cap_get(PT_CAP_mtc_periods
);
307 requested
= (config
& RTIT_CTL_MTC_RANGE
) >>
308 RTIT_CTL_MTC_RANGE_OFFSET
;
310 if (!(allowed
& BIT(requested
)))
318 * PT configuration helpers
319 * These all are cpu affine and operate on a local PT
322 /* Address ranges and their corresponding msr configuration registers */
323 static const struct pt_address_range
{
326 unsigned int reg_off
;
327 } pt_address_ranges
[] = {
329 .msr_a
= MSR_IA32_RTIT_ADDR0_A
,
330 .msr_b
= MSR_IA32_RTIT_ADDR0_B
,
331 .reg_off
= RTIT_CTL_ADDR0_OFFSET
,
334 .msr_a
= MSR_IA32_RTIT_ADDR1_A
,
335 .msr_b
= MSR_IA32_RTIT_ADDR1_B
,
336 .reg_off
= RTIT_CTL_ADDR1_OFFSET
,
339 .msr_a
= MSR_IA32_RTIT_ADDR2_A
,
340 .msr_b
= MSR_IA32_RTIT_ADDR2_B
,
341 .reg_off
= RTIT_CTL_ADDR2_OFFSET
,
344 .msr_a
= MSR_IA32_RTIT_ADDR3_A
,
345 .msr_b
= MSR_IA32_RTIT_ADDR3_B
,
346 .reg_off
= RTIT_CTL_ADDR3_OFFSET
,
350 static u64
pt_config_filters(struct perf_event
*event
)
352 struct pt_filters
*filters
= event
->hw
.addr_filters
;
353 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
354 unsigned int range
= 0;
360 perf_event_addr_filters_sync(event
);
362 for (range
= 0; range
< filters
->nr_filters
; range
++) {
363 struct pt_filter
*filter
= &filters
->filter
[range
];
366 * Note, if the range has zero start/end addresses due
367 * to its dynamic object not being loaded yet, we just
368 * go ahead and program zeroed range, which will simply
369 * produce no data. Note^2: if executable code at 0x0
370 * is a concern, we can set up an "invalid" configuration
371 * such as msr_b < msr_a.
374 /* avoid redundant msr writes */
375 if (pt
->filters
.filter
[range
].msr_a
!= filter
->msr_a
) {
376 wrmsrl(pt_address_ranges
[range
].msr_a
, filter
->msr_a
);
377 pt
->filters
.filter
[range
].msr_a
= filter
->msr_a
;
380 if (pt
->filters
.filter
[range
].msr_b
!= filter
->msr_b
) {
381 wrmsrl(pt_address_ranges
[range
].msr_b
, filter
->msr_b
);
382 pt
->filters
.filter
[range
].msr_b
= filter
->msr_b
;
385 rtit_ctl
|= filter
->config
<< pt_address_ranges
[range
].reg_off
;
391 static void pt_config(struct perf_event
*event
)
395 if (!event
->hw
.itrace_started
) {
396 event
->hw
.itrace_started
= 1;
397 wrmsrl(MSR_IA32_RTIT_STATUS
, 0);
400 reg
= pt_config_filters(event
);
401 reg
|= RTIT_CTL_TOPA
| RTIT_CTL_BRANCH_EN
| RTIT_CTL_TRACEEN
;
403 if (!event
->attr
.exclude_kernel
)
405 if (!event
->attr
.exclude_user
)
408 reg
|= (event
->attr
.config
& PT_CONFIG_MASK
);
410 event
->hw
.config
= reg
;
411 wrmsrl(MSR_IA32_RTIT_CTL
, reg
);
414 static void pt_config_stop(struct perf_event
*event
)
416 u64 ctl
= READ_ONCE(event
->hw
.config
);
418 /* may be already stopped by a PMI */
419 if (!(ctl
& RTIT_CTL_TRACEEN
))
422 ctl
&= ~RTIT_CTL_TRACEEN
;
423 wrmsrl(MSR_IA32_RTIT_CTL
, ctl
);
425 WRITE_ONCE(event
->hw
.config
, ctl
);
428 * A wrmsr that disables trace generation serializes other PT
429 * registers and causes all data packets to be written to memory,
430 * but a fence is required for the data to become globally visible.
432 * The below WMB, separating data store and aux_head store matches
433 * the consumer's RMB that separates aux_head load and data load.
438 static void pt_config_buffer(void *buf
, unsigned int topa_idx
,
439 unsigned int output_off
)
443 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, virt_to_phys(buf
));
445 reg
= 0x7f | ((u64
)topa_idx
<< 7) | ((u64
)output_off
<< 32);
447 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, reg
);
451 * Keep ToPA table-related metadata on the same page as the actual table,
452 * taking up a few words from the top
455 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
458 * struct topa - page-sized ToPA table with metadata at the top
459 * @table: actual ToPA table entries, as understood by PT hardware
460 * @list: linkage to struct pt_buffer's list of tables
461 * @phys: physical address of this page
462 * @offset: offset of the first entry in this table in the buffer
463 * @size: total size of all entries in this table
464 * @last: index of the last initialized entry in this table
467 struct topa_entry table
[TENTS_PER_PAGE
];
468 struct list_head list
;
475 /* make -1 stand for the last table entry */
476 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
479 * topa_alloc() - allocate page-sized ToPA table
480 * @cpu: CPU on which to allocate.
481 * @gfp: Allocation flags.
483 * Return: On success, return the pointer to ToPA table page.
485 static struct topa
*topa_alloc(int cpu
, gfp_t gfp
)
487 int node
= cpu_to_node(cpu
);
491 p
= alloc_pages_node(node
, gfp
| __GFP_ZERO
, 0);
495 topa
= page_address(p
);
497 topa
->phys
= page_to_phys(p
);
500 * In case of singe-entry ToPA, always put the self-referencing END
501 * link as the 2nd entry in the table
503 if (!pt_cap_get(PT_CAP_topa_multiple_entries
)) {
504 TOPA_ENTRY(topa
, 1)->base
= topa
->phys
>> TOPA_SHIFT
;
505 TOPA_ENTRY(topa
, 1)->end
= 1;
512 * topa_free() - free a page-sized ToPA table
513 * @topa: Table to deallocate.
515 static void topa_free(struct topa
*topa
)
517 free_page((unsigned long)topa
);
521 * topa_insert_table() - insert a ToPA table into a buffer
522 * @buf: PT buffer that's being extended.
523 * @topa: New topa table to be inserted.
525 * If it's the first table in this buffer, set up buffer's pointers
526 * accordingly; otherwise, add a END=1 link entry to @topa to the current
527 * "last" table and adjust the last table pointer to @topa.
529 static void topa_insert_table(struct pt_buffer
*buf
, struct topa
*topa
)
531 struct topa
*last
= buf
->last
;
533 list_add_tail(&topa
->list
, &buf
->tables
);
536 buf
->first
= buf
->last
= buf
->cur
= topa
;
540 topa
->offset
= last
->offset
+ last
->size
;
543 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
546 BUG_ON(last
->last
!= TENTS_PER_PAGE
- 1);
548 TOPA_ENTRY(last
, -1)->base
= topa
->phys
>> TOPA_SHIFT
;
549 TOPA_ENTRY(last
, -1)->end
= 1;
553 * topa_table_full() - check if a ToPA table is filled up
556 static bool topa_table_full(struct topa
*topa
)
558 /* single-entry ToPA is a special case */
559 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
562 return topa
->last
== TENTS_PER_PAGE
- 1;
566 * topa_insert_pages() - create a list of ToPA tables
567 * @buf: PT buffer being initialized.
568 * @gfp: Allocation flags.
570 * This initializes a list of ToPA tables with entries from
571 * the data_pages provided by rb_alloc_aux().
573 * Return: 0 on success or error code.
575 static int topa_insert_pages(struct pt_buffer
*buf
, gfp_t gfp
)
577 struct topa
*topa
= buf
->last
;
581 p
= virt_to_page(buf
->data_pages
[buf
->nr_pages
]);
583 order
= page_private(p
);
585 if (topa_table_full(topa
)) {
586 topa
= topa_alloc(buf
->cpu
, gfp
);
590 topa_insert_table(buf
, topa
);
593 TOPA_ENTRY(topa
, -1)->base
= page_to_phys(p
) >> TOPA_SHIFT
;
594 TOPA_ENTRY(topa
, -1)->size
= order
;
595 if (!buf
->snapshot
&& !pt_cap_get(PT_CAP_topa_multiple_entries
)) {
596 TOPA_ENTRY(topa
, -1)->intr
= 1;
597 TOPA_ENTRY(topa
, -1)->stop
= 1;
601 topa
->size
+= sizes(order
);
603 buf
->nr_pages
+= 1ul << order
;
609 * pt_topa_dump() - print ToPA tables and their entries
612 static void pt_topa_dump(struct pt_buffer
*buf
)
616 list_for_each_entry(topa
, &buf
->tables
, list
) {
619 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa
->table
,
620 topa
->phys
, topa
->offset
, topa
->size
);
621 for (i
= 0; i
< TENTS_PER_PAGE
; i
++) {
622 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
624 (unsigned long)topa
->table
[i
].base
<< TOPA_SHIFT
,
625 sizes(topa
->table
[i
].size
),
626 topa
->table
[i
].end
? 'E' : ' ',
627 topa
->table
[i
].intr
? 'I' : ' ',
628 topa
->table
[i
].stop
? 'S' : ' ',
629 *(u64
*)&topa
->table
[i
]);
630 if ((pt_cap_get(PT_CAP_topa_multiple_entries
) &&
631 topa
->table
[i
].stop
) ||
639 * pt_buffer_advance() - advance to the next output region
642 * Advance the current pointers in the buffer to the next ToPA entry.
644 static void pt_buffer_advance(struct pt_buffer
*buf
)
649 if (buf
->cur_idx
== buf
->cur
->last
) {
650 if (buf
->cur
== buf
->last
)
651 buf
->cur
= buf
->first
;
653 buf
->cur
= list_entry(buf
->cur
->list
.next
, struct topa
,
660 * pt_update_head() - calculate current offsets and sizes
661 * @pt: Per-cpu pt context.
663 * Update buffer's current write pointer position and data size.
665 static void pt_update_head(struct pt
*pt
)
667 struct pt_buffer
*buf
= perf_get_aux(&pt
->handle
);
668 u64 topa_idx
, base
, old
;
670 /* offset of the first region in this table from the beginning of buf */
671 base
= buf
->cur
->offset
+ buf
->output_off
;
673 /* offset of the current output region within this table */
674 for (topa_idx
= 0; topa_idx
< buf
->cur_idx
; topa_idx
++)
675 base
+= sizes(buf
->cur
->table
[topa_idx
].size
);
678 local_set(&buf
->data_size
, base
);
680 old
= (local64_xchg(&buf
->head
, base
) &
681 ((buf
->nr_pages
<< PAGE_SHIFT
) - 1));
683 base
+= buf
->nr_pages
<< PAGE_SHIFT
;
685 local_add(base
- old
, &buf
->data_size
);
690 * pt_buffer_region() - obtain current output region's address
693 static void *pt_buffer_region(struct pt_buffer
*buf
)
695 return phys_to_virt(buf
->cur
->table
[buf
->cur_idx
].base
<< TOPA_SHIFT
);
699 * pt_buffer_region_size() - obtain current output region's size
702 static size_t pt_buffer_region_size(struct pt_buffer
*buf
)
704 return sizes(buf
->cur
->table
[buf
->cur_idx
].size
);
708 * pt_handle_status() - take care of possible status conditions
709 * @pt: Per-cpu pt context.
711 static void pt_handle_status(struct pt
*pt
)
713 struct pt_buffer
*buf
= perf_get_aux(&pt
->handle
);
717 rdmsrl(MSR_IA32_RTIT_STATUS
, status
);
719 if (status
& RTIT_STATUS_ERROR
) {
720 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
722 status
&= ~RTIT_STATUS_ERROR
;
725 if (status
& RTIT_STATUS_STOPPED
) {
726 status
&= ~RTIT_STATUS_STOPPED
;
729 * On systems that only do single-entry ToPA, hitting STOP
730 * means we are already losing data; need to let the decoder
733 if (!pt_cap_get(PT_CAP_topa_multiple_entries
) ||
734 buf
->output_off
== sizes(TOPA_ENTRY(buf
->cur
, buf
->cur_idx
)->size
)) {
735 local_inc(&buf
->lost
);
741 * Also on single-entry ToPA implementations, interrupt will come
742 * before the output reaches its output region's boundary.
744 if (!pt_cap_get(PT_CAP_topa_multiple_entries
) && !buf
->snapshot
&&
745 pt_buffer_region_size(buf
) - buf
->output_off
<= TOPA_PMI_MARGIN
) {
746 void *head
= pt_buffer_region(buf
);
748 /* everything within this margin needs to be zeroed out */
749 memset(head
+ buf
->output_off
, 0,
750 pt_buffer_region_size(buf
) -
756 pt_buffer_advance(buf
);
758 wrmsrl(MSR_IA32_RTIT_STATUS
, status
);
762 * pt_read_offset() - translate registers into buffer pointers
765 * Set buffer's output pointers from MSR values.
767 static void pt_read_offset(struct pt_buffer
*buf
)
769 u64 offset
, base_topa
;
771 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, base_topa
);
772 buf
->cur
= phys_to_virt(base_topa
);
774 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, offset
);
775 /* offset within current output region */
776 buf
->output_off
= offset
>> 32;
777 /* index of current output region within this table */
778 buf
->cur_idx
= (offset
& 0xffffff80) >> 7;
782 * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
784 * @pg: Page offset in the buffer.
786 * When advancing to the next output region (ToPA entry), given a page offset
787 * into the buffer, we need to find the offset of the first page in the next
790 static unsigned int pt_topa_next_entry(struct pt_buffer
*buf
, unsigned int pg
)
792 struct topa_entry
*te
= buf
->topa_index
[pg
];
795 if (buf
->first
== buf
->last
&& buf
->first
->last
== 1)
800 pg
&= buf
->nr_pages
- 1;
801 } while (buf
->topa_index
[pg
] == te
);
807 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
809 * @handle: Current output handle.
811 * Place INT and STOP marks to prevent overwriting old data that the consumer
812 * hasn't yet collected and waking up the consumer after a certain fraction of
813 * the buffer has filled up. Only needed and sensible for non-snapshot counters.
815 * This obviously relies on buf::head to figure out buffer markers, so it has
816 * to be called after pt_buffer_reset_offsets() and before the hardware tracing
819 static int pt_buffer_reset_markers(struct pt_buffer
*buf
,
820 struct perf_output_handle
*handle
)
823 unsigned long head
= local64_read(&buf
->head
);
824 unsigned long idx
, npages
, wakeup
;
826 /* can't stop in the middle of an output region */
827 if (buf
->output_off
+ handle
->size
+ 1 <
828 sizes(TOPA_ENTRY(buf
->cur
, buf
->cur_idx
)->size
))
832 /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
833 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
836 /* clear STOP and INT from current entry */
837 buf
->topa_index
[buf
->stop_pos
]->stop
= 0;
838 buf
->topa_index
[buf
->stop_pos
]->intr
= 0;
839 buf
->topa_index
[buf
->intr_pos
]->intr
= 0;
841 /* how many pages till the STOP marker */
842 npages
= handle
->size
>> PAGE_SHIFT
;
844 /* if it's on a page boundary, fill up one more page */
845 if (!offset_in_page(head
+ handle
->size
+ 1))
848 idx
= (head
>> PAGE_SHIFT
) + npages
;
849 idx
&= buf
->nr_pages
- 1;
852 wakeup
= handle
->wakeup
>> PAGE_SHIFT
;
854 /* in the worst case, wake up the consumer one page before hard stop */
855 idx
= (head
>> PAGE_SHIFT
) + npages
- 1;
859 idx
&= buf
->nr_pages
- 1;
862 buf
->topa_index
[buf
->stop_pos
]->stop
= 1;
863 buf
->topa_index
[buf
->stop_pos
]->intr
= 1;
864 buf
->topa_index
[buf
->intr_pos
]->intr
= 1;
870 * pt_buffer_setup_topa_index() - build topa_index[] table of regions
873 * topa_index[] references output regions indexed by offset into the
874 * buffer for purposes of quick reverse lookup.
876 static void pt_buffer_setup_topa_index(struct pt_buffer
*buf
)
878 struct topa
*cur
= buf
->first
, *prev
= buf
->last
;
879 struct topa_entry
*te_cur
= TOPA_ENTRY(cur
, 0),
880 *te_prev
= TOPA_ENTRY(prev
, prev
->last
- 1);
883 while (pg
< buf
->nr_pages
) {
886 /* pages within one topa entry */
887 for (tidx
= 0; tidx
< 1 << te_cur
->size
; tidx
++, pg
++)
888 buf
->topa_index
[pg
] = te_prev
;
892 if (idx
== cur
->last
- 1) {
893 /* advance to next topa table */
895 cur
= list_entry(cur
->list
.next
, struct topa
, list
);
899 te_cur
= TOPA_ENTRY(cur
, idx
);
905 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
907 * @head: Write pointer (aux_head) from AUX buffer.
909 * Find the ToPA table and entry corresponding to given @head and set buffer's
910 * "current" pointers accordingly. This is done after we have obtained the
911 * current aux_head position from a successful call to perf_aux_output_begin()
912 * to make sure the hardware is writing to the right place.
914 * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
915 * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
916 * which are used to determine INT and STOP markers' locations by a subsequent
917 * call to pt_buffer_reset_markers().
919 static void pt_buffer_reset_offsets(struct pt_buffer
*buf
, unsigned long head
)
924 head
&= (buf
->nr_pages
<< PAGE_SHIFT
) - 1;
926 pg
= (head
>> PAGE_SHIFT
) & (buf
->nr_pages
- 1);
927 pg
= pt_topa_next_entry(buf
, pg
);
929 buf
->cur
= (struct topa
*)((unsigned long)buf
->topa_index
[pg
] & PAGE_MASK
);
930 buf
->cur_idx
= ((unsigned long)buf
->topa_index
[pg
] -
931 (unsigned long)buf
->cur
) / sizeof(struct topa_entry
);
932 buf
->output_off
= head
& (sizes(buf
->cur
->table
[buf
->cur_idx
].size
) - 1);
934 local64_set(&buf
->head
, head
);
935 local_set(&buf
->data_size
, 0);
939 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
942 static void pt_buffer_fini_topa(struct pt_buffer
*buf
)
944 struct topa
*topa
, *iter
;
946 list_for_each_entry_safe(topa
, iter
, &buf
->tables
, list
) {
948 * right now, this is in free_aux() path only, so
949 * no need to unlink this table from the list
956 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
958 * @size: Total size of all regions within this ToPA.
959 * @gfp: Allocation flags.
961 static int pt_buffer_init_topa(struct pt_buffer
*buf
, unsigned long nr_pages
,
967 topa
= topa_alloc(buf
->cpu
, gfp
);
971 topa_insert_table(buf
, topa
);
973 while (buf
->nr_pages
< nr_pages
) {
974 err
= topa_insert_pages(buf
, gfp
);
976 pt_buffer_fini_topa(buf
);
981 pt_buffer_setup_topa_index(buf
);
983 /* link last table to the first one, unless we're double buffering */
984 if (pt_cap_get(PT_CAP_topa_multiple_entries
)) {
985 TOPA_ENTRY(buf
->last
, -1)->base
= buf
->first
->phys
>> TOPA_SHIFT
;
986 TOPA_ENTRY(buf
->last
, -1)->end
= 1;
994 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
995 * @cpu: Cpu on which to allocate, -1 means current.
996 * @pages: Array of pointers to buffer pages passed from perf core.
997 * @nr_pages: Number of pages in the buffer.
998 * @snapshot: If this is a snapshot/overwrite counter.
1000 * This is a pmu::setup_aux callback that sets up ToPA tables and all the
1001 * bookkeeping for an AUX buffer.
1003 * Return: Our private PT buffer structure.
1006 pt_buffer_setup_aux(int cpu
, void **pages
, int nr_pages
, bool snapshot
)
1008 struct pt_buffer
*buf
;
1015 cpu
= raw_smp_processor_id();
1016 node
= cpu_to_node(cpu
);
1018 buf
= kzalloc_node(offsetof(struct pt_buffer
, topa_index
[nr_pages
]),
1024 buf
->snapshot
= snapshot
;
1025 buf
->data_pages
= pages
;
1027 INIT_LIST_HEAD(&buf
->tables
);
1029 ret
= pt_buffer_init_topa(buf
, nr_pages
, GFP_KERNEL
);
1039 * pt_buffer_free_aux() - perf AUX deallocation path callback
1042 static void pt_buffer_free_aux(void *data
)
1044 struct pt_buffer
*buf
= data
;
1046 pt_buffer_fini_topa(buf
);
1050 static int pt_addr_filters_init(struct perf_event
*event
)
1052 struct pt_filters
*filters
;
1053 int node
= event
->cpu
== -1 ? -1 : cpu_to_node(event
->cpu
);
1055 if (!pt_cap_get(PT_CAP_num_address_ranges
))
1058 filters
= kzalloc_node(sizeof(struct pt_filters
), GFP_KERNEL
, node
);
1063 memcpy(filters
, event
->parent
->hw
.addr_filters
,
1066 event
->hw
.addr_filters
= filters
;
1071 static void pt_addr_filters_fini(struct perf_event
*event
)
1073 kfree(event
->hw
.addr_filters
);
1074 event
->hw
.addr_filters
= NULL
;
1077 static int pt_event_addr_filters_validate(struct list_head
*filters
)
1079 struct perf_addr_filter
*filter
;
1082 list_for_each_entry(filter
, filters
, entry
) {
1083 /* PT doesn't support single address triggers */
1087 if (!filter
->inode
&& !kernel_ip(filter
->offset
))
1090 if (++range
> pt_cap_get(PT_CAP_num_address_ranges
))
1097 static void pt_event_addr_filters_sync(struct perf_event
*event
)
1099 struct perf_addr_filters_head
*head
= perf_event_addr_filters(event
);
1100 unsigned long msr_a
, msr_b
, *offs
= event
->addr_filters_offs
;
1101 struct pt_filters
*filters
= event
->hw
.addr_filters
;
1102 struct perf_addr_filter
*filter
;
1108 list_for_each_entry(filter
, &head
->list
, entry
) {
1109 if (filter
->inode
&& !offs
[range
]) {
1112 /* apply the offset */
1113 msr_a
= filter
->offset
+ offs
[range
];
1114 msr_b
= filter
->size
+ msr_a
;
1117 filters
->filter
[range
].msr_a
= msr_a
;
1118 filters
->filter
[range
].msr_b
= msr_b
;
1119 filters
->filter
[range
].config
= filter
->filter
? 1 : 2;
1123 filters
->nr_filters
= range
;
1127 * intel_pt_interrupt() - PT PMI handler
1129 void intel_pt_interrupt(void)
1131 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1132 struct pt_buffer
*buf
;
1133 struct perf_event
*event
= pt
->handle
.event
;
1136 * There may be a dangling PT bit in the interrupt status register
1137 * after PT has been disabled by pt_event_stop(). Make sure we don't
1138 * do anything (particularly, re-enable) for this event here.
1140 if (!READ_ONCE(pt
->handle_nmi
))
1144 * If VMX is on and PT does not support it, don't touch anything.
1146 if (READ_ONCE(pt
->vmx_on
))
1152 pt_config_stop(event
);
1154 buf
= perf_get_aux(&pt
->handle
);
1158 pt_read_offset(buf
);
1160 pt_handle_status(pt
);
1164 perf_aux_output_end(&pt
->handle
, local_xchg(&buf
->data_size
, 0),
1165 local_xchg(&buf
->lost
, 0));
1167 if (!event
->hw
.state
) {
1170 buf
= perf_aux_output_begin(&pt
->handle
, event
);
1172 event
->hw
.state
= PERF_HES_STOPPED
;
1176 pt_buffer_reset_offsets(buf
, pt
->handle
.head
);
1177 /* snapshot counters don't use PMI, so it's safe */
1178 ret
= pt_buffer_reset_markers(buf
, &pt
->handle
);
1180 perf_aux_output_end(&pt
->handle
, 0, true);
1184 pt_config_buffer(buf
->cur
->table
, buf
->cur_idx
,
1190 void intel_pt_handle_vmx(int on
)
1192 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1193 struct perf_event
*event
;
1194 unsigned long flags
;
1196 /* PT plays nice with VMX, do nothing */
1201 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1202 * sure to not try to set it while VMX is on. Disable
1203 * interrupts to avoid racing with pmu callbacks;
1204 * concurrent PMI should be handled fine.
1206 local_irq_save(flags
);
1207 WRITE_ONCE(pt
->vmx_on
, on
);
1210 /* prevent pt_config_stop() from writing RTIT_CTL */
1211 event
= pt
->handle
.event
;
1213 event
->hw
.config
= 0;
1215 local_irq_restore(flags
);
1217 EXPORT_SYMBOL_GPL(intel_pt_handle_vmx
);
1223 static void pt_event_start(struct perf_event
*event
, int mode
)
1225 struct hw_perf_event
*hwc
= &event
->hw
;
1226 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1227 struct pt_buffer
*buf
;
1229 if (READ_ONCE(pt
->vmx_on
))
1232 buf
= perf_aux_output_begin(&pt
->handle
, event
);
1236 pt_buffer_reset_offsets(buf
, pt
->handle
.head
);
1237 if (!buf
->snapshot
) {
1238 if (pt_buffer_reset_markers(buf
, &pt
->handle
))
1242 WRITE_ONCE(pt
->handle_nmi
, 1);
1245 pt_config_buffer(buf
->cur
->table
, buf
->cur_idx
,
1252 perf_aux_output_end(&pt
->handle
, 0, true);
1254 hwc
->state
= PERF_HES_STOPPED
;
1257 static void pt_event_stop(struct perf_event
*event
, int mode
)
1259 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1262 * Protect against the PMI racing with disabling wrmsr,
1263 * see comment in intel_pt_interrupt().
1265 WRITE_ONCE(pt
->handle_nmi
, 0);
1267 pt_config_stop(event
);
1269 if (event
->hw
.state
== PERF_HES_STOPPED
)
1272 event
->hw
.state
= PERF_HES_STOPPED
;
1274 if (mode
& PERF_EF_UPDATE
) {
1275 struct pt_buffer
*buf
= perf_get_aux(&pt
->handle
);
1280 if (WARN_ON_ONCE(pt
->handle
.event
!= event
))
1283 pt_read_offset(buf
);
1285 pt_handle_status(pt
);
1291 local_xchg(&buf
->data_size
,
1292 buf
->nr_pages
<< PAGE_SHIFT
);
1293 perf_aux_output_end(&pt
->handle
, local_xchg(&buf
->data_size
, 0),
1294 local_xchg(&buf
->lost
, 0));
1298 static void pt_event_del(struct perf_event
*event
, int mode
)
1300 pt_event_stop(event
, PERF_EF_UPDATE
);
1303 static int pt_event_add(struct perf_event
*event
, int mode
)
1305 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1306 struct hw_perf_event
*hwc
= &event
->hw
;
1309 if (pt
->handle
.event
)
1312 if (mode
& PERF_EF_START
) {
1313 pt_event_start(event
, 0);
1315 if (hwc
->state
== PERF_HES_STOPPED
)
1318 hwc
->state
= PERF_HES_STOPPED
;
1327 static void pt_event_read(struct perf_event
*event
)
1331 static void pt_event_destroy(struct perf_event
*event
)
1333 pt_addr_filters_fini(event
);
1334 x86_del_exclusive(x86_lbr_exclusive_pt
);
1337 static int pt_event_init(struct perf_event
*event
)
1339 if (event
->attr
.type
!= pt_pmu
.pmu
.type
)
1342 if (!pt_event_valid(event
))
1345 if (x86_add_exclusive(x86_lbr_exclusive_pt
))
1348 if (pt_addr_filters_init(event
)) {
1349 x86_del_exclusive(x86_lbr_exclusive_pt
);
1353 event
->destroy
= pt_event_destroy
;
1358 void cpu_emergency_stop_pt(void)
1360 struct pt
*pt
= this_cpu_ptr(&pt_ctx
);
1362 if (pt
->handle
.event
)
1363 pt_event_stop(pt
->handle
.event
, PERF_EF_UPDATE
);
1366 static __init
int pt_init(void)
1368 int ret
, cpu
, prior_warn
= 0;
1370 BUILD_BUG_ON(sizeof(struct topa
) > PAGE_SIZE
);
1372 if (!boot_cpu_has(X86_FEATURE_INTEL_PT
))
1376 for_each_online_cpu(cpu
) {
1379 ret
= rdmsrl_safe_on_cpu(cpu
, MSR_IA32_RTIT_CTL
, &ctl
);
1380 if (!ret
&& (ctl
& RTIT_CTL_TRACEEN
))
1386 x86_add_exclusive(x86_lbr_exclusive_pt
);
1387 pr_warn("PT is enabled at boot time, doing nothing\n");
1392 ret
= pt_pmu_hw_init();
1396 if (!pt_cap_get(PT_CAP_topa_output
)) {
1397 pr_warn("ToPA output is not supported on this CPU\n");
1401 if (!pt_cap_get(PT_CAP_topa_multiple_entries
))
1402 pt_pmu
.pmu
.capabilities
=
1403 PERF_PMU_CAP_AUX_NO_SG
| PERF_PMU_CAP_AUX_SW_DOUBLEBUF
;
1405 pt_pmu
.pmu
.capabilities
|= PERF_PMU_CAP_EXCLUSIVE
| PERF_PMU_CAP_ITRACE
;
1406 pt_pmu
.pmu
.attr_groups
= pt_attr_groups
;
1407 pt_pmu
.pmu
.task_ctx_nr
= perf_sw_context
;
1408 pt_pmu
.pmu
.event_init
= pt_event_init
;
1409 pt_pmu
.pmu
.add
= pt_event_add
;
1410 pt_pmu
.pmu
.del
= pt_event_del
;
1411 pt_pmu
.pmu
.start
= pt_event_start
;
1412 pt_pmu
.pmu
.stop
= pt_event_stop
;
1413 pt_pmu
.pmu
.read
= pt_event_read
;
1414 pt_pmu
.pmu
.setup_aux
= pt_buffer_setup_aux
;
1415 pt_pmu
.pmu
.free_aux
= pt_buffer_free_aux
;
1416 pt_pmu
.pmu
.addr_filters_sync
= pt_event_addr_filters_sync
;
1417 pt_pmu
.pmu
.addr_filters_validate
= pt_event_addr_filters_validate
;
1418 pt_pmu
.pmu
.nr_addr_filters
=
1419 pt_cap_get(PT_CAP_num_address_ranges
);
1421 ret
= perf_pmu_register(&pt_pmu
.pmu
, "intel_pt", -1);
1425 arch_initcall(pt_init
);