]>
Commit | Line | Data |
---|---|---|
2025cf9e | 1 | // SPDX-License-Identifier: GPL-2.0-only |
52ca9ced AS |
2 | /* |
3 | * Intel(R) Processor Trace PMU driver for perf | |
4 | * Copyright (c) 2013-2014, Intel Corporation. | |
5 | * | |
52ca9ced AS |
6 | * Intel PT is specified in the Intel Architecture Instruction Set Extensions |
7 | * Programming Reference: | |
8 | * http://software.intel.com/en-us/intel-isa-extensions | |
9 | */ | |
10 | ||
11 | #undef DEBUG | |
12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
14 | ||
15 | #include <linux/types.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/device.h> | |
18 | ||
19 | #include <asm/perf_event.h> | |
20 | #include <asm/insn.h> | |
21 | #include <asm/io.h> | |
24cc12b1 | 22 | #include <asm/intel_pt.h> |
d35869ba | 23 | #include <asm/intel-family.h> |
52ca9ced | 24 | |
27f6d22b | 25 | #include "../perf_event.h" |
fd1c601c | 26 | #include "pt.h" |
52ca9ced AS |
27 | |
28 | static DEFINE_PER_CPU(struct pt, pt_ctx); | |
29 | ||
30 | static struct pt_pmu pt_pmu; | |
31 | ||
52ca9ced AS |
32 | /* |
33 | * Capabilities of Intel PT hardware, such as number of address bits or | |
34 | * supported output schemes, are cached and exported to userspace as "caps" | |
35 | * attribute group of pt pmu device | |
36 | * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store | |
37 | * relevant bits together with intel_pt traces. | |
38 | * | |
39 | * These are necessary for both trace decoding (payloads_lip, contains address | |
40 | * width encoded in IP-related packets), and event configuration (bitmasks with | |
41 | * permitted values for certain bit fields). | |
42 | */ | |
43 | #define PT_CAP(_n, _l, _r, _m) \ | |
44 | [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \ | |
45 | .reg = _r, .mask = _m } | |
46 | ||
47 | static struct pt_cap_desc { | |
48 | const char *name; | |
49 | u32 leaf; | |
50 | u8 reg; | |
51 | u32 mask; | |
52 | } pt_caps[] = { | |
47f10a36 HC |
53 | PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff), |
54 | PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)), | |
55 | PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)), | |
56 | PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)), | |
57 | PT_CAP(mtc, 0, CPUID_EBX, BIT(3)), | |
58 | PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)), | |
59 | PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)), | |
60 | PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)), | |
61 | PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)), | |
62 | PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)), | |
e0018afe | 63 | PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)), |
47f10a36 | 64 | PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)), |
c53c6b74 | 65 | PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7), |
47f10a36 HC |
66 | PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000), |
67 | PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff), | |
68 | PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000), | |
52ca9ced AS |
69 | }; |
70 | ||
61be2998 | 71 | u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability) |
52ca9ced | 72 | { |
61be2998 LK |
73 | struct pt_cap_desc *cd = &pt_caps[capability]; |
74 | u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; | |
52ca9ced AS |
75 | unsigned int shift = __ffs(cd->mask); |
76 | ||
77 | return (c & cd->mask) >> shift; | |
78 | } | |
61be2998 LK |
79 | EXPORT_SYMBOL_GPL(intel_pt_validate_cap); |
80 | ||
81 | u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) | |
82 | { | |
83 | return intel_pt_validate_cap(pt_pmu.caps, cap); | |
84 | } | |
f6d079ce | 85 | EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap); |
52ca9ced AS |
86 | |
87 | static ssize_t pt_cap_show(struct device *cdev, | |
88 | struct device_attribute *attr, | |
89 | char *buf) | |
90 | { | |
91 | struct dev_ext_attribute *ea = | |
92 | container_of(attr, struct dev_ext_attribute, attr); | |
93 | enum pt_capabilities cap = (long)ea->var; | |
94 | ||
f6d079ce | 95 | return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap)); |
52ca9ced AS |
96 | } |
97 | ||
49e73246 | 98 | static struct attribute_group pt_cap_group __ro_after_init = { |
52ca9ced AS |
99 | .name = "caps", |
100 | }; | |
101 | ||
d35869ba | 102 | PMU_FORMAT_ATTR(pt, "config:0" ); |
b1bf72d6 | 103 | PMU_FORMAT_ATTR(cyc, "config:1" ); |
5443624b AS |
104 | PMU_FORMAT_ATTR(pwr_evt, "config:4" ); |
105 | PMU_FORMAT_ATTR(fup_on_ptw, "config:5" ); | |
b1bf72d6 | 106 | PMU_FORMAT_ATTR(mtc, "config:9" ); |
52ca9ced AS |
107 | PMU_FORMAT_ATTR(tsc, "config:10" ); |
108 | PMU_FORMAT_ATTR(noretcomp, "config:11" ); | |
5443624b | 109 | PMU_FORMAT_ATTR(ptw, "config:12" ); |
d35869ba | 110 | PMU_FORMAT_ATTR(branch, "config:13" ); |
b1bf72d6 AS |
111 | PMU_FORMAT_ATTR(mtc_period, "config:14-17" ); |
112 | PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" ); | |
113 | PMU_FORMAT_ATTR(psb_period, "config:24-27" ); | |
52ca9ced AS |
114 | |
115 | static struct attribute *pt_formats_attr[] = { | |
d35869ba | 116 | &format_attr_pt.attr, |
b1bf72d6 | 117 | &format_attr_cyc.attr, |
5443624b AS |
118 | &format_attr_pwr_evt.attr, |
119 | &format_attr_fup_on_ptw.attr, | |
b1bf72d6 | 120 | &format_attr_mtc.attr, |
52ca9ced AS |
121 | &format_attr_tsc.attr, |
122 | &format_attr_noretcomp.attr, | |
5443624b | 123 | &format_attr_ptw.attr, |
d35869ba | 124 | &format_attr_branch.attr, |
b1bf72d6 AS |
125 | &format_attr_mtc_period.attr, |
126 | &format_attr_cyc_thresh.attr, | |
127 | &format_attr_psb_period.attr, | |
52ca9ced AS |
128 | NULL, |
129 | }; | |
130 | ||
131 | static struct attribute_group pt_format_group = { | |
132 | .name = "format", | |
133 | .attrs = pt_formats_attr, | |
134 | }; | |
135 | ||
65c7e6f1 AS |
136 | static ssize_t |
137 | pt_timing_attr_show(struct device *dev, struct device_attribute *attr, | |
138 | char *page) | |
139 | { | |
140 | struct perf_pmu_events_attr *pmu_attr = | |
141 | container_of(attr, struct perf_pmu_events_attr, attr); | |
142 | ||
143 | switch (pmu_attr->id) { | |
144 | case 0: | |
145 | return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio); | |
146 | case 1: | |
147 | return sprintf(page, "%u:%u\n", | |
148 | pt_pmu.tsc_art_num, | |
149 | pt_pmu.tsc_art_den); | |
150 | default: | |
151 | break; | |
152 | } | |
153 | ||
154 | return -EINVAL; | |
155 | } | |
156 | ||
157 | PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0, | |
158 | pt_timing_attr_show); | |
159 | PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1, | |
160 | pt_timing_attr_show); | |
161 | ||
162 | static struct attribute *pt_timing_attr[] = { | |
163 | &timing_attr_max_nonturbo_ratio.attr.attr, | |
164 | &timing_attr_tsc_art_ratio.attr.attr, | |
165 | NULL, | |
166 | }; | |
167 | ||
168 | static struct attribute_group pt_timing_group = { | |
169 | .attrs = pt_timing_attr, | |
170 | }; | |
171 | ||
52ca9ced AS |
172 | static const struct attribute_group *pt_attr_groups[] = { |
173 | &pt_cap_group, | |
174 | &pt_format_group, | |
65c7e6f1 | 175 | &pt_timing_group, |
52ca9ced AS |
176 | NULL, |
177 | }; | |
178 | ||
179 | static int __init pt_pmu_hw_init(void) | |
180 | { | |
181 | struct dev_ext_attribute *de_attrs; | |
182 | struct attribute **attrs; | |
183 | size_t size; | |
1c5ac21a | 184 | u64 reg; |
066450be | 185 | int ret; |
52ca9ced AS |
186 | long i; |
187 | ||
65c7e6f1 AS |
188 | rdmsrl(MSR_PLATFORM_INFO, reg); |
189 | pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8; | |
190 | ||
191 | /* | |
192 | * if available, read in TSC to core crystal clock ratio, | |
193 | * otherwise, zero for numerator stands for "not enumerated" | |
194 | * as per SDM | |
195 | */ | |
196 | if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) { | |
197 | u32 eax, ebx, ecx, edx; | |
198 | ||
199 | cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx); | |
200 | ||
201 | pt_pmu.tsc_art_num = ebx; | |
202 | pt_pmu.tsc_art_den = eax; | |
203 | } | |
204 | ||
d35869ba AS |
205 | /* model-specific quirks */ |
206 | switch (boot_cpu_data.x86_model) { | |
c66f78a6 | 207 | case INTEL_FAM6_BROADWELL: |
5ebb34ed | 208 | case INTEL_FAM6_BROADWELL_D: |
5e741407 | 209 | case INTEL_FAM6_BROADWELL_G: |
d35869ba AS |
210 | case INTEL_FAM6_BROADWELL_X: |
211 | /* not setting BRANCH_EN will #GP, erratum BDM106 */ | |
212 | pt_pmu.branch_en_always_on = true; | |
213 | break; | |
214 | default: | |
215 | break; | |
216 | } | |
217 | ||
1c5ac21a AS |
218 | if (boot_cpu_has(X86_FEATURE_VMX)) { |
219 | /* | |
220 | * Intel SDM, 36.5 "Tracing post-VMXON" says that | |
221 | * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace | |
222 | * post-VMXON. | |
223 | */ | |
224 | rdmsrl(MSR_IA32_VMX_MISC, reg); | |
225 | if (reg & BIT(14)) | |
226 | pt_pmu.vmx = true; | |
227 | } | |
228 | ||
066450be IM |
229 | for (i = 0; i < PT_CPUID_LEAVES; i++) { |
230 | cpuid_count(20, i, | |
47f10a36 HC |
231 | &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM], |
232 | &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM], | |
233 | &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM], | |
234 | &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]); | |
52ca9ced AS |
235 | } |
236 | ||
066450be IM |
237 | ret = -ENOMEM; |
238 | size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1); | |
52ca9ced AS |
239 | attrs = kzalloc(size, GFP_KERNEL); |
240 | if (!attrs) | |
066450be | 241 | goto fail; |
52ca9ced | 242 | |
066450be | 243 | size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1); |
52ca9ced AS |
244 | de_attrs = kzalloc(size, GFP_KERNEL); |
245 | if (!de_attrs) | |
066450be | 246 | goto fail; |
52ca9ced AS |
247 | |
248 | for (i = 0; i < ARRAY_SIZE(pt_caps); i++) { | |
066450be | 249 | struct dev_ext_attribute *de_attr = de_attrs + i; |
52ca9ced | 250 | |
066450be IM |
251 | de_attr->attr.attr.name = pt_caps[i].name; |
252 | ||
b44a2b53 | 253 | sysfs_attr_init(&de_attr->attr.attr); |
066450be IM |
254 | |
255 | de_attr->attr.attr.mode = S_IRUGO; | |
256 | de_attr->attr.show = pt_cap_show; | |
257 | de_attr->var = (void *)i; | |
258 | ||
259 | attrs[i] = &de_attr->attr.attr; | |
52ca9ced AS |
260 | } |
261 | ||
262 | pt_cap_group.attrs = attrs; | |
066450be | 263 | |
52ca9ced AS |
264 | return 0; |
265 | ||
066450be | 266 | fail: |
52ca9ced AS |
267 | kfree(attrs); |
268 | ||
066450be | 269 | return ret; |
52ca9ced AS |
270 | } |
271 | ||
b1bf72d6 AS |
272 | #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \ |
273 | RTIT_CTL_CYC_THRESH | \ | |
274 | RTIT_CTL_PSB_FREQ) | |
275 | ||
276 | #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \ | |
277 | RTIT_CTL_MTC_RANGE) | |
278 | ||
8ee83b2a AS |
279 | #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \ |
280 | RTIT_CTL_FUP_ON_PTW) | |
281 | ||
d35869ba AS |
282 | /* |
283 | * Bit 0 (TraceEn) in the attr.config is meaningless as the | |
284 | * corresponding bit in the RTIT_CTL can only be controlled | |
285 | * by the driver; therefore, repurpose it to mean: pass | |
286 | * through the bit that was previously assumed to be always | |
287 | * on for PT, thereby allowing the user to *not* set it if | |
288 | * they so wish. See also pt_event_valid() and pt_config(). | |
289 | */ | |
290 | #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN | |
291 | ||
292 | #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \ | |
293 | RTIT_CTL_TSC_EN | \ | |
b1bf72d6 | 294 | RTIT_CTL_DISRETC | \ |
d35869ba | 295 | RTIT_CTL_BRANCH_EN | \ |
b1bf72d6 | 296 | RTIT_CTL_CYC_PSB | \ |
8ee83b2a AS |
297 | RTIT_CTL_MTC | \ |
298 | RTIT_CTL_PWR_EVT_EN | \ | |
299 | RTIT_CTL_FUP_ON_PTW | \ | |
300 | RTIT_CTL_PTW_EN) | |
52ca9ced AS |
301 | |
302 | static bool pt_event_valid(struct perf_event *event) | |
303 | { | |
304 | u64 config = event->attr.config; | |
b1bf72d6 | 305 | u64 allowed, requested; |
52ca9ced AS |
306 | |
307 | if ((config & PT_CONFIG_MASK) != config) | |
308 | return false; | |
309 | ||
b1bf72d6 | 310 | if (config & RTIT_CTL_CYC_PSB) { |
f6d079ce | 311 | if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc)) |
b1bf72d6 AS |
312 | return false; |
313 | ||
f6d079ce | 314 | allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods); |
b1bf72d6 AS |
315 | requested = (config & RTIT_CTL_PSB_FREQ) >> |
316 | RTIT_CTL_PSB_FREQ_OFFSET; | |
317 | if (requested && (!(allowed & BIT(requested)))) | |
318 | return false; | |
319 | ||
f6d079ce | 320 | allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds); |
b1bf72d6 AS |
321 | requested = (config & RTIT_CTL_CYC_THRESH) >> |
322 | RTIT_CTL_CYC_THRESH_OFFSET; | |
323 | if (requested && (!(allowed & BIT(requested)))) | |
324 | return false; | |
325 | } | |
326 | ||
327 | if (config & RTIT_CTL_MTC) { | |
328 | /* | |
329 | * In the unlikely case that CPUID lists valid mtc periods, | |
330 | * but not the mtc capability, drop out here. | |
331 | * | |
332 | * Spec says that setting mtc period bits while mtc bit in | |
333 | * CPUID is 0 will #GP, so better safe than sorry. | |
334 | */ | |
f6d079ce | 335 | if (!intel_pt_validate_hw_cap(PT_CAP_mtc)) |
b1bf72d6 AS |
336 | return false; |
337 | ||
f6d079ce | 338 | allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods); |
b1bf72d6 AS |
339 | if (!allowed) |
340 | return false; | |
341 | ||
342 | requested = (config & RTIT_CTL_MTC_RANGE) >> | |
343 | RTIT_CTL_MTC_RANGE_OFFSET; | |
344 | ||
345 | if (!(allowed & BIT(requested))) | |
346 | return false; | |
347 | } | |
348 | ||
8ee83b2a | 349 | if (config & RTIT_CTL_PWR_EVT_EN && |
f6d079ce | 350 | !intel_pt_validate_hw_cap(PT_CAP_power_event_trace)) |
8ee83b2a AS |
351 | return false; |
352 | ||
353 | if (config & RTIT_CTL_PTW) { | |
f6d079ce | 354 | if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite)) |
8ee83b2a AS |
355 | return false; |
356 | ||
357 | /* FUPonPTW without PTW doesn't make sense */ | |
358 | if ((config & RTIT_CTL_FUP_ON_PTW) && | |
359 | !(config & RTIT_CTL_PTW_EN)) | |
360 | return false; | |
361 | } | |
362 | ||
d35869ba AS |
363 | /* |
364 | * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config | |
d9f6e12f | 365 | * clears the assumption that BranchEn must always be enabled, |
d35869ba AS |
366 | * as was the case with the first implementation of PT. |
367 | * If this bit is not set, the legacy behavior is preserved | |
368 | * for compatibility with the older userspace. | |
369 | * | |
370 | * Re-using bit 0 for this purpose is fine because it is never | |
371 | * directly set by the user; previous attempts at setting it in | |
372 | * the attr.config resulted in -EINVAL. | |
373 | */ | |
374 | if (config & RTIT_CTL_PASSTHROUGH) { | |
375 | /* | |
376 | * Disallow not setting BRANCH_EN where BRANCH_EN is | |
377 | * always required. | |
378 | */ | |
379 | if (pt_pmu.branch_en_always_on && | |
380 | !(config & RTIT_CTL_BRANCH_EN)) | |
381 | return false; | |
382 | } else { | |
383 | /* | |
384 | * Disallow BRANCH_EN without the PASSTHROUGH. | |
385 | */ | |
386 | if (config & RTIT_CTL_BRANCH_EN) | |
387 | return false; | |
388 | } | |
389 | ||
52ca9ced AS |
390 | return true; |
391 | } | |
392 | ||
393 | /* | |
394 | * PT configuration helpers | |
395 | * These all are cpu affine and operate on a local PT | |
396 | */ | |
397 | ||
8e105a1f AS |
398 | static void pt_config_start(struct perf_event *event) |
399 | { | |
400 | struct pt *pt = this_cpu_ptr(&pt_ctx); | |
401 | u64 ctl = event->hw.config; | |
402 | ||
403 | ctl |= RTIT_CTL_TRACEEN; | |
404 | if (READ_ONCE(pt->vmx_on)) | |
405 | perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); | |
406 | else | |
407 | wrmsrl(MSR_IA32_RTIT_CTL, ctl); | |
408 | ||
409 | WRITE_ONCE(event->hw.config, ctl); | |
410 | } | |
411 | ||
eadf48ca AS |
412 | /* Address ranges and their corresponding msr configuration registers */ |
413 | static const struct pt_address_range { | |
414 | unsigned long msr_a; | |
415 | unsigned long msr_b; | |
416 | unsigned int reg_off; | |
417 | } pt_address_ranges[] = { | |
418 | { | |
419 | .msr_a = MSR_IA32_RTIT_ADDR0_A, | |
420 | .msr_b = MSR_IA32_RTIT_ADDR0_B, | |
421 | .reg_off = RTIT_CTL_ADDR0_OFFSET, | |
422 | }, | |
423 | { | |
424 | .msr_a = MSR_IA32_RTIT_ADDR1_A, | |
425 | .msr_b = MSR_IA32_RTIT_ADDR1_B, | |
426 | .reg_off = RTIT_CTL_ADDR1_OFFSET, | |
427 | }, | |
428 | { | |
429 | .msr_a = MSR_IA32_RTIT_ADDR2_A, | |
430 | .msr_b = MSR_IA32_RTIT_ADDR2_B, | |
431 | .reg_off = RTIT_CTL_ADDR2_OFFSET, | |
432 | }, | |
433 | { | |
434 | .msr_a = MSR_IA32_RTIT_ADDR3_A, | |
435 | .msr_b = MSR_IA32_RTIT_ADDR3_B, | |
436 | .reg_off = RTIT_CTL_ADDR3_OFFSET, | |
437 | } | |
438 | }; | |
439 | ||
440 | static u64 pt_config_filters(struct perf_event *event) | |
441 | { | |
442 | struct pt_filters *filters = event->hw.addr_filters; | |
443 | struct pt *pt = this_cpu_ptr(&pt_ctx); | |
444 | unsigned int range = 0; | |
445 | u64 rtit_ctl = 0; | |
446 | ||
447 | if (!filters) | |
448 | return 0; | |
449 | ||
450 | perf_event_addr_filters_sync(event); | |
451 | ||
452 | for (range = 0; range < filters->nr_filters; range++) { | |
453 | struct pt_filter *filter = &filters->filter[range]; | |
454 | ||
455 | /* | |
456 | * Note, if the range has zero start/end addresses due | |
457 | * to its dynamic object not being loaded yet, we just | |
458 | * go ahead and program zeroed range, which will simply | |
459 | * produce no data. Note^2: if executable code at 0x0 | |
460 | * is a concern, we can set up an "invalid" configuration | |
461 | * such as msr_b < msr_a. | |
462 | */ | |
463 | ||
464 | /* avoid redundant msr writes */ | |
465 | if (pt->filters.filter[range].msr_a != filter->msr_a) { | |
466 | wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a); | |
467 | pt->filters.filter[range].msr_a = filter->msr_a; | |
468 | } | |
469 | ||
470 | if (pt->filters.filter[range].msr_b != filter->msr_b) { | |
471 | wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b); | |
472 | pt->filters.filter[range].msr_b = filter->msr_b; | |
473 | } | |
474 | ||
a44812c5 | 475 | rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off; |
eadf48ca AS |
476 | } |
477 | ||
478 | return rtit_ctl; | |
479 | } | |
480 | ||
52ca9ced AS |
481 | static void pt_config(struct perf_event *event) |
482 | { | |
67063847 AS |
483 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
484 | struct pt_buffer *buf = perf_get_aux(&pt->handle); | |
52ca9ced AS |
485 | u64 reg; |
486 | ||
8d4e6c4c AS |
487 | /* First round: clear STATUS, in particular the PSB byte counter. */ |
488 | if (!event->hw.config) { | |
489 | perf_event_itrace_started(event); | |
9a6694cf AS |
490 | wrmsrl(MSR_IA32_RTIT_STATUS, 0); |
491 | } | |
492 | ||
eadf48ca | 493 | reg = pt_config_filters(event); |
67063847 AS |
494 | reg |= RTIT_CTL_TRACEEN; |
495 | if (!buf->single) | |
496 | reg |= RTIT_CTL_TOPA; | |
d35869ba AS |
497 | |
498 | /* | |
499 | * Previously, we had BRANCH_EN on by default, but now that PT has | |
500 | * grown features outside of branch tracing, it is useful to allow | |
501 | * the user to disable it. Setting bit 0 in the event's attr.config | |
502 | * allows BRANCH_EN to pass through instead of being always on. See | |
503 | * also the comment in pt_event_valid(). | |
504 | */ | |
505 | if (event->attr.config & BIT(0)) { | |
506 | reg |= event->attr.config & RTIT_CTL_BRANCH_EN; | |
507 | } else { | |
508 | reg |= RTIT_CTL_BRANCH_EN; | |
509 | } | |
52ca9ced AS |
510 | |
511 | if (!event->attr.exclude_kernel) | |
512 | reg |= RTIT_CTL_OS; | |
513 | if (!event->attr.exclude_user) | |
514 | reg |= RTIT_CTL_USR; | |
515 | ||
516 | reg |= (event->attr.config & PT_CONFIG_MASK); | |
517 | ||
1c5ac21a | 518 | event->hw.config = reg; |
8e105a1f | 519 | pt_config_start(event); |
52ca9ced AS |
520 | } |
521 | ||
1c5ac21a | 522 | static void pt_config_stop(struct perf_event *event) |
52ca9ced | 523 | { |
ee368428 | 524 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1c5ac21a AS |
525 | u64 ctl = READ_ONCE(event->hw.config); |
526 | ||
527 | /* may be already stopped by a PMI */ | |
528 | if (!(ctl & RTIT_CTL_TRACEEN)) | |
529 | return; | |
52ca9ced | 530 | |
1c5ac21a | 531 | ctl &= ~RTIT_CTL_TRACEEN; |
ee368428 AS |
532 | if (!READ_ONCE(pt->vmx_on)) |
533 | wrmsrl(MSR_IA32_RTIT_CTL, ctl); | |
52ca9ced | 534 | |
1c5ac21a AS |
535 | WRITE_ONCE(event->hw.config, ctl); |
536 | ||
52ca9ced AS |
537 | /* |
538 | * A wrmsr that disables trace generation serializes other PT | |
539 | * registers and causes all data packets to be written to memory, | |
540 | * but a fence is required for the data to become globally visible. | |
541 | * | |
542 | * The below WMB, separating data store and aux_head store matches | |
543 | * the consumer's RMB that separates aux_head load and data load. | |
544 | */ | |
1c5ac21a | 545 | wmb(); |
52ca9ced AS |
546 | } |
547 | ||
52ca9ced | 548 | /** |
38bb8d77 | 549 | * struct topa - ToPA metadata |
52ca9ced | 550 | * @list: linkage to struct pt_buffer's list of tables |
52ca9ced AS |
551 | * @offset: offset of the first entry in this table in the buffer |
552 | * @size: total size of all entries in this table | |
553 | * @last: index of the last initialized entry in this table | |
39152ee5 | 554 | * @z_count: how many times the first entry repeats |
52ca9ced AS |
555 | */ |
556 | struct topa { | |
52ca9ced | 557 | struct list_head list; |
52ca9ced AS |
558 | u64 offset; |
559 | size_t size; | |
560 | int last; | |
39152ee5 | 561 | unsigned int z_count; |
52ca9ced AS |
562 | }; |
563 | ||
38bb8d77 AS |
564 | /* |
565 | * Keep ToPA table-related metadata on the same page as the actual table, | |
566 | * taking up a few words from the top | |
567 | */ | |
568 | ||
569 | #define TENTS_PER_PAGE \ | |
570 | ((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry)) | |
571 | ||
572 | /** | |
573 | * struct topa_page - page-sized ToPA table with metadata at the top | |
574 | * @table: actual ToPA table entries, as understood by PT hardware | |
575 | * @topa: metadata | |
576 | */ | |
577 | struct topa_page { | |
578 | struct topa_entry table[TENTS_PER_PAGE]; | |
579 | struct topa topa; | |
580 | }; | |
581 | ||
582 | static inline struct topa_page *topa_to_page(struct topa *topa) | |
583 | { | |
584 | return container_of(topa, struct topa_page, topa); | |
585 | } | |
586 | ||
587 | static inline struct topa_page *topa_entry_to_page(struct topa_entry *te) | |
588 | { | |
589 | return (struct topa_page *)((unsigned long)te & PAGE_MASK); | |
590 | } | |
591 | ||
91feca5e AS |
592 | static inline phys_addr_t topa_pfn(struct topa *topa) |
593 | { | |
594 | return PFN_DOWN(virt_to_phys(topa_to_page(topa))); | |
595 | } | |
596 | ||
52ca9ced | 597 | /* make -1 stand for the last table entry */ |
38bb8d77 AS |
598 | #define TOPA_ENTRY(t, i) \ |
599 | ((i) == -1 \ | |
600 | ? &topa_to_page(t)->table[(t)->last] \ | |
601 | : &topa_to_page(t)->table[(i)]) | |
fffec50f | 602 | #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size)) |
39152ee5 | 603 | #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size) |
52ca9ced | 604 | |
67063847 AS |
605 | static void pt_config_buffer(struct pt_buffer *buf) |
606 | { | |
295c52ee | 607 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
67063847 AS |
608 | u64 reg, mask; |
609 | void *base; | |
610 | ||
611 | if (buf->single) { | |
612 | base = buf->data_pages[0]; | |
613 | mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7; | |
614 | } else { | |
615 | base = topa_to_page(buf->cur)->table; | |
616 | mask = (u64)buf->cur_idx; | |
617 | } | |
618 | ||
295c52ee AS |
619 | reg = virt_to_phys(base); |
620 | if (pt->output_base != reg) { | |
621 | pt->output_base = reg; | |
622 | wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg); | |
623 | } | |
67063847 AS |
624 | |
625 | reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); | |
295c52ee AS |
626 | if (pt->output_mask != reg) { |
627 | pt->output_mask = reg; | |
628 | wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); | |
629 | } | |
67063847 AS |
630 | } |
631 | ||
52ca9ced AS |
632 | /** |
633 | * topa_alloc() - allocate page-sized ToPA table | |
634 | * @cpu: CPU on which to allocate. | |
635 | * @gfp: Allocation flags. | |
636 | * | |
637 | * Return: On success, return the pointer to ToPA table page. | |
638 | */ | |
639 | static struct topa *topa_alloc(int cpu, gfp_t gfp) | |
640 | { | |
641 | int node = cpu_to_node(cpu); | |
38bb8d77 | 642 | struct topa_page *tp; |
52ca9ced AS |
643 | struct page *p; |
644 | ||
645 | p = alloc_pages_node(node, gfp | __GFP_ZERO, 0); | |
646 | if (!p) | |
647 | return NULL; | |
648 | ||
38bb8d77 AS |
649 | tp = page_address(p); |
650 | tp->topa.last = 0; | |
52ca9ced AS |
651 | |
652 | /* | |
653 | * In case of singe-entry ToPA, always put the self-referencing END | |
654 | * link as the 2nd entry in the table | |
655 | */ | |
f6d079ce | 656 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { |
13301c6b | 657 | TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT; |
38bb8d77 | 658 | TOPA_ENTRY(&tp->topa, 1)->end = 1; |
52ca9ced AS |
659 | } |
660 | ||
38bb8d77 | 661 | return &tp->topa; |
52ca9ced AS |
662 | } |
663 | ||
664 | /** | |
665 | * topa_free() - free a page-sized ToPA table | |
666 | * @topa: Table to deallocate. | |
667 | */ | |
668 | static void topa_free(struct topa *topa) | |
669 | { | |
670 | free_page((unsigned long)topa); | |
671 | } | |
672 | ||
673 | /** | |
674 | * topa_insert_table() - insert a ToPA table into a buffer | |
675 | * @buf: PT buffer that's being extended. | |
676 | * @topa: New topa table to be inserted. | |
677 | * | |
678 | * If it's the first table in this buffer, set up buffer's pointers | |
679 | * accordingly; otherwise, add a END=1 link entry to @topa to the current | |
680 | * "last" table and adjust the last table pointer to @topa. | |
681 | */ | |
682 | static void topa_insert_table(struct pt_buffer *buf, struct topa *topa) | |
683 | { | |
684 | struct topa *last = buf->last; | |
685 | ||
686 | list_add_tail(&topa->list, &buf->tables); | |
687 | ||
688 | if (!buf->first) { | |
689 | buf->first = buf->last = buf->cur = topa; | |
690 | return; | |
691 | } | |
692 | ||
693 | topa->offset = last->offset + last->size; | |
694 | buf->last = topa; | |
695 | ||
f6d079ce | 696 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
52ca9ced AS |
697 | return; |
698 | ||
699 | BUG_ON(last->last != TENTS_PER_PAGE - 1); | |
700 | ||
91feca5e | 701 | TOPA_ENTRY(last, -1)->base = topa_pfn(topa); |
52ca9ced AS |
702 | TOPA_ENTRY(last, -1)->end = 1; |
703 | } | |
704 | ||
705 | /** | |
706 | * topa_table_full() - check if a ToPA table is filled up | |
707 | * @topa: ToPA table. | |
708 | */ | |
709 | static bool topa_table_full(struct topa *topa) | |
710 | { | |
711 | /* single-entry ToPA is a special case */ | |
f6d079ce | 712 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
52ca9ced AS |
713 | return !!topa->last; |
714 | ||
715 | return topa->last == TENTS_PER_PAGE - 1; | |
716 | } | |
717 | ||
718 | /** | |
719 | * topa_insert_pages() - create a list of ToPA tables | |
720 | * @buf: PT buffer being initialized. | |
721 | * @gfp: Allocation flags. | |
722 | * | |
723 | * This initializes a list of ToPA tables with entries from | |
724 | * the data_pages provided by rb_alloc_aux(). | |
725 | * | |
726 | * Return: 0 on success or error code. | |
727 | */ | |
90583af6 | 728 | static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp) |
52ca9ced AS |
729 | { |
730 | struct topa *topa = buf->last; | |
731 | int order = 0; | |
732 | struct page *p; | |
733 | ||
734 | p = virt_to_page(buf->data_pages[buf->nr_pages]); | |
735 | if (PagePrivate(p)) | |
736 | order = page_private(p); | |
737 | ||
738 | if (topa_table_full(topa)) { | |
90583af6 | 739 | topa = topa_alloc(cpu, gfp); |
52ca9ced AS |
740 | if (!topa) |
741 | return -ENOMEM; | |
742 | ||
743 | topa_insert_table(buf, topa); | |
744 | } | |
745 | ||
39152ee5 AS |
746 | if (topa->z_count == topa->last - 1) { |
747 | if (order == TOPA_ENTRY(topa, topa->last - 1)->size) | |
748 | topa->z_count++; | |
749 | } | |
750 | ||
52ca9ced AS |
751 | TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT; |
752 | TOPA_ENTRY(topa, -1)->size = order; | |
f6d079ce CP |
753 | if (!buf->snapshot && |
754 | !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { | |
52ca9ced AS |
755 | TOPA_ENTRY(topa, -1)->intr = 1; |
756 | TOPA_ENTRY(topa, -1)->stop = 1; | |
757 | } | |
758 | ||
759 | topa->last++; | |
760 | topa->size += sizes(order); | |
761 | ||
762 | buf->nr_pages += 1ul << order; | |
763 | ||
764 | return 0; | |
765 | } | |
766 | ||
767 | /** | |
768 | * pt_topa_dump() - print ToPA tables and their entries | |
769 | * @buf: PT buffer. | |
770 | */ | |
771 | static void pt_topa_dump(struct pt_buffer *buf) | |
772 | { | |
773 | struct topa *topa; | |
774 | ||
775 | list_for_each_entry(topa, &buf->tables, list) { | |
38bb8d77 | 776 | struct topa_page *tp = topa_to_page(topa); |
52ca9ced AS |
777 | int i; |
778 | ||
91feca5e AS |
779 | pr_debug("# table @%p, off %llx size %zx\n", tp->table, |
780 | topa->offset, topa->size); | |
52ca9ced AS |
781 | for (i = 0; i < TENTS_PER_PAGE; i++) { |
782 | pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n", | |
38bb8d77 AS |
783 | &tp->table[i], |
784 | (unsigned long)tp->table[i].base << TOPA_SHIFT, | |
785 | sizes(tp->table[i].size), | |
786 | tp->table[i].end ? 'E' : ' ', | |
787 | tp->table[i].intr ? 'I' : ' ', | |
788 | tp->table[i].stop ? 'S' : ' ', | |
789 | *(u64 *)&tp->table[i]); | |
f6d079ce | 790 | if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) && |
38bb8d77 AS |
791 | tp->table[i].stop) || |
792 | tp->table[i].end) | |
52ca9ced | 793 | break; |
39152ee5 AS |
794 | if (!i && topa->z_count) |
795 | i += topa->z_count; | |
52ca9ced AS |
796 | } |
797 | } | |
798 | } | |
799 | ||
800 | /** | |
801 | * pt_buffer_advance() - advance to the next output region | |
802 | * @buf: PT buffer. | |
803 | * | |
804 | * Advance the current pointers in the buffer to the next ToPA entry. | |
805 | */ | |
806 | static void pt_buffer_advance(struct pt_buffer *buf) | |
807 | { | |
808 | buf->output_off = 0; | |
809 | buf->cur_idx++; | |
810 | ||
811 | if (buf->cur_idx == buf->cur->last) { | |
812 | if (buf->cur == buf->last) | |
813 | buf->cur = buf->first; | |
814 | else | |
815 | buf->cur = list_entry(buf->cur->list.next, struct topa, | |
816 | list); | |
817 | buf->cur_idx = 0; | |
818 | } | |
819 | } | |
820 | ||
821 | /** | |
822 | * pt_update_head() - calculate current offsets and sizes | |
823 | * @pt: Per-cpu pt context. | |
824 | * | |
825 | * Update buffer's current write pointer position and data size. | |
826 | */ | |
827 | static void pt_update_head(struct pt *pt) | |
828 | { | |
829 | struct pt_buffer *buf = perf_get_aux(&pt->handle); | |
830 | u64 topa_idx, base, old; | |
831 | ||
67063847 AS |
832 | if (buf->single) { |
833 | local_set(&buf->data_size, buf->output_off); | |
834 | return; | |
835 | } | |
836 | ||
52ca9ced AS |
837 | /* offset of the first region in this table from the beginning of buf */ |
838 | base = buf->cur->offset + buf->output_off; | |
839 | ||
840 | /* offset of the current output region within this table */ | |
841 | for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++) | |
fffec50f | 842 | base += TOPA_ENTRY_SIZE(buf->cur, topa_idx); |
52ca9ced AS |
843 | |
844 | if (buf->snapshot) { | |
845 | local_set(&buf->data_size, base); | |
846 | } else { | |
847 | old = (local64_xchg(&buf->head, base) & | |
848 | ((buf->nr_pages << PAGE_SHIFT) - 1)); | |
849 | if (base < old) | |
850 | base += buf->nr_pages << PAGE_SHIFT; | |
851 | ||
852 | local_add(base - old, &buf->data_size); | |
853 | } | |
854 | } | |
855 | ||
856 | /** | |
857 | * pt_buffer_region() - obtain current output region's address | |
858 | * @buf: PT buffer. | |
859 | */ | |
860 | static void *pt_buffer_region(struct pt_buffer *buf) | |
861 | { | |
38bb8d77 | 862 | return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); |
52ca9ced AS |
863 | } |
864 | ||
865 | /** | |
866 | * pt_buffer_region_size() - obtain current output region's size | |
867 | * @buf: PT buffer. | |
868 | */ | |
869 | static size_t pt_buffer_region_size(struct pt_buffer *buf) | |
870 | { | |
fffec50f | 871 | return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx); |
52ca9ced AS |
872 | } |
873 | ||
874 | /** | |
875 | * pt_handle_status() - take care of possible status conditions | |
876 | * @pt: Per-cpu pt context. | |
877 | */ | |
878 | static void pt_handle_status(struct pt *pt) | |
879 | { | |
880 | struct pt_buffer *buf = perf_get_aux(&pt->handle); | |
881 | int advance = 0; | |
882 | u64 status; | |
883 | ||
884 | rdmsrl(MSR_IA32_RTIT_STATUS, status); | |
885 | ||
886 | if (status & RTIT_STATUS_ERROR) { | |
887 | pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n"); | |
888 | pt_topa_dump(buf); | |
889 | status &= ~RTIT_STATUS_ERROR; | |
890 | } | |
891 | ||
892 | if (status & RTIT_STATUS_STOPPED) { | |
893 | status &= ~RTIT_STATUS_STOPPED; | |
894 | ||
895 | /* | |
896 | * On systems that only do single-entry ToPA, hitting STOP | |
897 | * means we are already losing data; need to let the decoder | |
898 | * know. | |
899 | */ | |
012f5126 TH |
900 | if (!buf->single && |
901 | (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || | |
902 | buf->output_off == pt_buffer_region_size(buf))) { | |
f4c0b0aa WD |
903 | perf_aux_output_flag(&pt->handle, |
904 | PERF_AUX_FLAG_TRUNCATED); | |
52ca9ced AS |
905 | advance++; |
906 | } | |
907 | } | |
908 | ||
909 | /* | |
910 | * Also on single-entry ToPA implementations, interrupt will come | |
911 | * before the output reaches its output region's boundary. | |
912 | */ | |
f6d079ce CP |
913 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) && |
914 | !buf->snapshot && | |
52ca9ced AS |
915 | pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) { |
916 | void *head = pt_buffer_region(buf); | |
917 | ||
918 | /* everything within this margin needs to be zeroed out */ | |
919 | memset(head + buf->output_off, 0, | |
920 | pt_buffer_region_size(buf) - | |
921 | buf->output_off); | |
922 | advance++; | |
923 | } | |
924 | ||
925 | if (advance) | |
926 | pt_buffer_advance(buf); | |
927 | ||
928 | wrmsrl(MSR_IA32_RTIT_STATUS, status); | |
929 | } | |
930 | ||
931 | /** | |
932 | * pt_read_offset() - translate registers into buffer pointers | |
933 | * @buf: PT buffer. | |
934 | * | |
935 | * Set buffer's output pointers from MSR values. | |
936 | */ | |
937 | static void pt_read_offset(struct pt_buffer *buf) | |
938 | { | |
295c52ee | 939 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
38bb8d77 | 940 | struct topa_page *tp; |
52ca9ced | 941 | |
67063847 | 942 | if (!buf->single) { |
295c52ee AS |
943 | rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); |
944 | tp = phys_to_virt(pt->output_base); | |
67063847 AS |
945 | buf->cur = &tp->topa; |
946 | } | |
52ca9ced | 947 | |
295c52ee | 948 | rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); |
52ca9ced | 949 | /* offset within current output region */ |
295c52ee | 950 | buf->output_off = pt->output_mask >> 32; |
52ca9ced | 951 | /* index of current output region within this table */ |
67063847 | 952 | if (!buf->single) |
295c52ee | 953 | buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7; |
52ca9ced AS |
954 | } |
955 | ||
39152ee5 AS |
956 | static struct topa_entry * |
957 | pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg) | |
52ca9ced | 958 | { |
39152ee5 AS |
959 | struct topa_page *tp; |
960 | struct topa *topa; | |
961 | unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0; | |
962 | ||
963 | /* | |
964 | * Indicates a bug in the caller. | |
965 | */ | |
966 | if (WARN_ON_ONCE(pg >= buf->nr_pages)) | |
967 | return NULL; | |
968 | ||
969 | /* | |
970 | * First, find the ToPA table where @pg fits. With high | |
971 | * order allocations, there shouldn't be many of these. | |
972 | */ | |
973 | list_for_each_entry(topa, &buf->tables, list) { | |
974 | if (topa->offset + topa->size > pg << PAGE_SHIFT) | |
975 | goto found; | |
976 | } | |
977 | ||
978 | /* | |
979 | * Hitting this means we have a problem in the ToPA | |
980 | * allocation code. | |
981 | */ | |
982 | WARN_ON_ONCE(1); | |
983 | ||
984 | return NULL; | |
985 | ||
986 | found: | |
987 | /* | |
988 | * Indicates a problem in the ToPA allocation code. | |
989 | */ | |
990 | if (WARN_ON_ONCE(topa->last == -1)) | |
991 | return NULL; | |
992 | ||
993 | tp = topa_to_page(topa); | |
994 | cur_pg = PFN_DOWN(topa->offset); | |
995 | if (topa->z_count) { | |
996 | z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1); | |
997 | start_idx = topa->z_count + 1; | |
998 | } | |
52ca9ced | 999 | |
39152ee5 AS |
1000 | /* |
1001 | * Multiple entries at the beginning of the table have the same size, | |
1002 | * ideally all of them; if @pg falls there, the search is done. | |
1003 | */ | |
1004 | if (pg >= cur_pg && pg < cur_pg + z_pg) { | |
1005 | idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0); | |
1006 | return &tp->table[idx]; | |
1007 | } | |
52ca9ced | 1008 | |
39152ee5 AS |
1009 | /* |
1010 | * Otherwise, slow path: iterate through the remaining entries. | |
1011 | */ | |
1012 | for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) { | |
1013 | if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg) | |
1014 | return &tp->table[idx]; | |
52ca9ced | 1015 | |
39152ee5 AS |
1016 | cur_pg += TOPA_ENTRY_PAGES(topa, idx); |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * Means we couldn't find a ToPA entry in the table that does match. | |
1021 | */ | |
1022 | WARN_ON_ONCE(1); | |
1023 | ||
1024 | return NULL; | |
1025 | } | |
1026 | ||
1027 | static struct topa_entry * | |
1028 | pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te) | |
1029 | { | |
1030 | unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1); | |
1031 | struct topa_page *tp; | |
1032 | struct topa *topa; | |
1033 | ||
1034 | tp = (struct topa_page *)table; | |
1035 | if (tp->table != te) | |
1036 | return --te; | |
1037 | ||
1038 | topa = &tp->topa; | |
1039 | if (topa == buf->first) | |
1040 | topa = buf->last; | |
1041 | else | |
1042 | topa = list_prev_entry(topa, list); | |
1043 | ||
1044 | tp = topa_to_page(topa); | |
1045 | ||
1046 | return &tp->table[topa->last - 1]; | |
52ca9ced AS |
1047 | } |
1048 | ||
1049 | /** | |
1050 | * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer | |
1051 | * @buf: PT buffer. | |
1052 | * @handle: Current output handle. | |
1053 | * | |
1054 | * Place INT and STOP marks to prevent overwriting old data that the consumer | |
cf302bfd AS |
1055 | * hasn't yet collected and waking up the consumer after a certain fraction of |
1056 | * the buffer has filled up. Only needed and sensible for non-snapshot counters. | |
1057 | * | |
1058 | * This obviously relies on buf::head to figure out buffer markers, so it has | |
1059 | * to be called after pt_buffer_reset_offsets() and before the hardware tracing | |
1060 | * is enabled. | |
52ca9ced AS |
1061 | */ |
1062 | static int pt_buffer_reset_markers(struct pt_buffer *buf, | |
1063 | struct perf_output_handle *handle) | |
1064 | ||
1065 | { | |
f73ec48c AS |
1066 | unsigned long head = local64_read(&buf->head); |
1067 | unsigned long idx, npages, wakeup; | |
52ca9ced | 1068 | |
67063847 AS |
1069 | if (buf->single) |
1070 | return 0; | |
1071 | ||
52ca9ced | 1072 | /* can't stop in the middle of an output region */ |
fffec50f | 1073 | if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) { |
f4c0b0aa | 1074 | perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); |
52ca9ced | 1075 | return -EINVAL; |
f4c0b0aa | 1076 | } |
52ca9ced AS |
1077 | |
1078 | ||
1079 | /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */ | |
f6d079ce | 1080 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
52ca9ced AS |
1081 | return 0; |
1082 | ||
1083 | /* clear STOP and INT from current entry */ | |
39152ee5 AS |
1084 | if (buf->stop_te) { |
1085 | buf->stop_te->stop = 0; | |
1086 | buf->stop_te->intr = 0; | |
1087 | } | |
1088 | ||
1089 | if (buf->intr_te) | |
1090 | buf->intr_te->intr = 0; | |
52ca9ced | 1091 | |
f73ec48c AS |
1092 | /* how many pages till the STOP marker */ |
1093 | npages = handle->size >> PAGE_SHIFT; | |
1094 | ||
1095 | /* if it's on a page boundary, fill up one more page */ | |
1096 | if (!offset_in_page(head + handle->size + 1)) | |
1097 | npages++; | |
1098 | ||
1099 | idx = (head >> PAGE_SHIFT) + npages; | |
1100 | idx &= buf->nr_pages - 1; | |
39152ee5 AS |
1101 | |
1102 | if (idx != buf->stop_pos) { | |
1103 | buf->stop_pos = idx; | |
1104 | buf->stop_te = pt_topa_entry_for_page(buf, idx); | |
1105 | buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te); | |
1106 | } | |
f73ec48c AS |
1107 | |
1108 | wakeup = handle->wakeup >> PAGE_SHIFT; | |
1109 | ||
1110 | /* in the worst case, wake up the consumer one page before hard stop */ | |
1111 | idx = (head >> PAGE_SHIFT) + npages - 1; | |
1112 | if (idx > wakeup) | |
1113 | idx = wakeup; | |
1114 | ||
1115 | idx &= buf->nr_pages - 1; | |
39152ee5 AS |
1116 | if (idx != buf->intr_pos) { |
1117 | buf->intr_pos = idx; | |
1118 | buf->intr_te = pt_topa_entry_for_page(buf, idx); | |
1119 | buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te); | |
1120 | } | |
52ca9ced | 1121 | |
39152ee5 AS |
1122 | buf->stop_te->stop = 1; |
1123 | buf->stop_te->intr = 1; | |
1124 | buf->intr_te->intr = 1; | |
52ca9ced AS |
1125 | |
1126 | return 0; | |
1127 | } | |
1128 | ||
52ca9ced AS |
1129 | /** |
1130 | * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head | |
1131 | * @buf: PT buffer. | |
1132 | * @head: Write pointer (aux_head) from AUX buffer. | |
1133 | * | |
1134 | * Find the ToPA table and entry corresponding to given @head and set buffer's | |
5b1dbd17 AS |
1135 | * "current" pointers accordingly. This is done after we have obtained the |
1136 | * current aux_head position from a successful call to perf_aux_output_begin() | |
1137 | * to make sure the hardware is writing to the right place. | |
1138 | * | |
1139 | * This function modifies buf::{cur,cur_idx,output_off} that will be programmed | |
1140 | * into PT msrs when the tracing is enabled and buf::head and buf::data_size, | |
1141 | * which are used to determine INT and STOP markers' locations by a subsequent | |
1142 | * call to pt_buffer_reset_markers(). | |
52ca9ced AS |
1143 | */ |
1144 | static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head) | |
1145 | { | |
38bb8d77 | 1146 | struct topa_page *cur_tp; |
39152ee5 | 1147 | struct topa_entry *te; |
52ca9ced AS |
1148 | int pg; |
1149 | ||
1150 | if (buf->snapshot) | |
1151 | head &= (buf->nr_pages << PAGE_SHIFT) - 1; | |
1152 | ||
67063847 AS |
1153 | if (!buf->single) { |
1154 | pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); | |
1155 | te = pt_topa_entry_for_page(buf, pg); | |
52ca9ced | 1156 | |
67063847 AS |
1157 | cur_tp = topa_entry_to_page(te); |
1158 | buf->cur = &cur_tp->topa; | |
1159 | buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0); | |
1160 | buf->output_off = head & (pt_buffer_region_size(buf) - 1); | |
1161 | } else { | |
1162 | buf->output_off = head; | |
1163 | } | |
52ca9ced AS |
1164 | |
1165 | local64_set(&buf->head, head); | |
1166 | local_set(&buf->data_size, 0); | |
1167 | } | |
1168 | ||
1169 | /** | |
1170 | * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer | |
1171 | * @buf: PT buffer. | |
1172 | */ | |
1173 | static void pt_buffer_fini_topa(struct pt_buffer *buf) | |
1174 | { | |
1175 | struct topa *topa, *iter; | |
1176 | ||
67063847 AS |
1177 | if (buf->single) |
1178 | return; | |
1179 | ||
52ca9ced AS |
1180 | list_for_each_entry_safe(topa, iter, &buf->tables, list) { |
1181 | /* | |
1182 | * right now, this is in free_aux() path only, so | |
1183 | * no need to unlink this table from the list | |
1184 | */ | |
1185 | topa_free(topa); | |
1186 | } | |
1187 | } | |
1188 | ||
1189 | /** | |
1190 | * pt_buffer_init_topa() - initialize ToPA table for pt buffer | |
1191 | * @buf: PT buffer. | |
1192 | * @size: Total size of all regions within this ToPA. | |
1193 | * @gfp: Allocation flags. | |
1194 | */ | |
90583af6 AS |
1195 | static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu, |
1196 | unsigned long nr_pages, gfp_t gfp) | |
52ca9ced AS |
1197 | { |
1198 | struct topa *topa; | |
1199 | int err; | |
1200 | ||
90583af6 | 1201 | topa = topa_alloc(cpu, gfp); |
52ca9ced AS |
1202 | if (!topa) |
1203 | return -ENOMEM; | |
1204 | ||
1205 | topa_insert_table(buf, topa); | |
1206 | ||
1207 | while (buf->nr_pages < nr_pages) { | |
90583af6 | 1208 | err = topa_insert_pages(buf, cpu, gfp); |
52ca9ced AS |
1209 | if (err) { |
1210 | pt_buffer_fini_topa(buf); | |
1211 | return -ENOMEM; | |
1212 | } | |
1213 | } | |
1214 | ||
52ca9ced | 1215 | /* link last table to the first one, unless we're double buffering */ |
f6d079ce | 1216 | if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { |
91feca5e | 1217 | TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first); |
52ca9ced AS |
1218 | TOPA_ENTRY(buf->last, -1)->end = 1; |
1219 | } | |
1220 | ||
1221 | pt_topa_dump(buf); | |
1222 | return 0; | |
1223 | } | |
1224 | ||
67063847 AS |
1225 | static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages) |
1226 | { | |
1227 | struct page *p = virt_to_page(buf->data_pages[0]); | |
1228 | int ret = -ENOTSUPP, order = 0; | |
1229 | ||
1230 | /* | |
1231 | * We can use single range output mode | |
1232 | * + in snapshot mode, where we don't need interrupts; | |
1233 | * + if the hardware supports it; | |
1234 | * + if the entire buffer is one contiguous allocation. | |
1235 | */ | |
1236 | if (!buf->snapshot) | |
1237 | goto out; | |
1238 | ||
1239 | if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output)) | |
1240 | goto out; | |
1241 | ||
1242 | if (PagePrivate(p)) | |
1243 | order = page_private(p); | |
1244 | ||
1245 | if (1 << order != nr_pages) | |
1246 | goto out; | |
1247 | ||
1248 | buf->single = true; | |
1249 | buf->nr_pages = nr_pages; | |
1250 | ret = 0; | |
1251 | out: | |
1252 | return ret; | |
1253 | } | |
1254 | ||
52ca9ced AS |
1255 | /** |
1256 | * pt_buffer_setup_aux() - set up topa tables for a PT buffer | |
1257 | * @cpu: Cpu on which to allocate, -1 means current. | |
1258 | * @pages: Array of pointers to buffer pages passed from perf core. | |
1259 | * @nr_pages: Number of pages in the buffer. | |
1260 | * @snapshot: If this is a snapshot/overwrite counter. | |
1261 | * | |
1262 | * This is a pmu::setup_aux callback that sets up ToPA tables and all the | |
1263 | * bookkeeping for an AUX buffer. | |
1264 | * | |
1265 | * Return: Our private PT buffer structure. | |
1266 | */ | |
1267 | static void * | |
84001866 MP |
1268 | pt_buffer_setup_aux(struct perf_event *event, void **pages, |
1269 | int nr_pages, bool snapshot) | |
52ca9ced AS |
1270 | { |
1271 | struct pt_buffer *buf; | |
84001866 | 1272 | int node, ret, cpu = event->cpu; |
52ca9ced AS |
1273 | |
1274 | if (!nr_pages) | |
1275 | return NULL; | |
1276 | ||
25e8920b AS |
1277 | /* |
1278 | * Only support AUX sampling in snapshot mode, where we don't | |
1279 | * generate NMIs. | |
1280 | */ | |
1281 | if (event->attr.aux_sample_size && !snapshot) | |
1282 | return NULL; | |
1283 | ||
52ca9ced AS |
1284 | if (cpu == -1) |
1285 | cpu = raw_smp_processor_id(); | |
1286 | node = cpu_to_node(cpu); | |
1287 | ||
39152ee5 | 1288 | buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node); |
52ca9ced AS |
1289 | if (!buf) |
1290 | return NULL; | |
1291 | ||
52ca9ced AS |
1292 | buf->snapshot = snapshot; |
1293 | buf->data_pages = pages; | |
39152ee5 AS |
1294 | buf->stop_pos = -1; |
1295 | buf->intr_pos = -1; | |
52ca9ced AS |
1296 | |
1297 | INIT_LIST_HEAD(&buf->tables); | |
1298 | ||
67063847 AS |
1299 | ret = pt_buffer_try_single(buf, nr_pages); |
1300 | if (!ret) | |
1301 | return buf; | |
1302 | ||
90583af6 | 1303 | ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL); |
52ca9ced AS |
1304 | if (ret) { |
1305 | kfree(buf); | |
1306 | return NULL; | |
1307 | } | |
1308 | ||
1309 | return buf; | |
1310 | } | |
1311 | ||
1312 | /** | |
1313 | * pt_buffer_free_aux() - perf AUX deallocation path callback | |
1314 | * @data: PT buffer. | |
1315 | */ | |
1316 | static void pt_buffer_free_aux(void *data) | |
1317 | { | |
1318 | struct pt_buffer *buf = data; | |
1319 | ||
1320 | pt_buffer_fini_topa(buf); | |
1321 | kfree(buf); | |
1322 | } | |
1323 | ||
eadf48ca AS |
1324 | static int pt_addr_filters_init(struct perf_event *event) |
1325 | { | |
1326 | struct pt_filters *filters; | |
1327 | int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); | |
1328 | ||
f6d079ce | 1329 | if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) |
eadf48ca AS |
1330 | return 0; |
1331 | ||
1332 | filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node); | |
1333 | if (!filters) | |
1334 | return -ENOMEM; | |
1335 | ||
1336 | if (event->parent) | |
1337 | memcpy(filters, event->parent->hw.addr_filters, | |
1338 | sizeof(*filters)); | |
1339 | ||
1340 | event->hw.addr_filters = filters; | |
1341 | ||
1342 | return 0; | |
1343 | } | |
1344 | ||
1345 | static void pt_addr_filters_fini(struct perf_event *event) | |
1346 | { | |
1347 | kfree(event->hw.addr_filters); | |
1348 | event->hw.addr_filters = NULL; | |
1349 | } | |
1350 | ||
ddfdad99 AS |
1351 | static inline bool valid_kernel_ip(unsigned long ip) |
1352 | { | |
1353 | return virt_addr_valid(ip) && kernel_ip(ip); | |
1354 | } | |
1355 | ||
eadf48ca AS |
1356 | static int pt_event_addr_filters_validate(struct list_head *filters) |
1357 | { | |
1358 | struct perf_addr_filter *filter; | |
1359 | int range = 0; | |
1360 | ||
1361 | list_for_each_entry(filter, filters, entry) { | |
6ed70cf3 AS |
1362 | /* |
1363 | * PT doesn't support single address triggers and | |
1364 | * 'start' filters. | |
1365 | */ | |
1366 | if (!filter->size || | |
1367 | filter->action == PERF_ADDR_FILTER_ACTION_START) | |
eadf48ca AS |
1368 | return -EOPNOTSUPP; |
1369 | ||
9511bce9 | 1370 | if (!filter->path.dentry) { |
1155bafc AS |
1371 | if (!valid_kernel_ip(filter->offset)) |
1372 | return -EINVAL; | |
1373 | ||
1374 | if (!valid_kernel_ip(filter->offset + filter->size)) | |
1375 | return -EINVAL; | |
1376 | } | |
eadf48ca | 1377 | |
f6d079ce | 1378 | if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) |
eadf48ca AS |
1379 | return -EOPNOTSUPP; |
1380 | } | |
1381 | ||
1382 | return 0; | |
1383 | } | |
1384 | ||
1385 | static void pt_event_addr_filters_sync(struct perf_event *event) | |
1386 | { | |
1387 | struct perf_addr_filters_head *head = perf_event_addr_filters(event); | |
c60f83b8 AS |
1388 | unsigned long msr_a, msr_b; |
1389 | struct perf_addr_filter_range *fr = event->addr_filter_ranges; | |
eadf48ca AS |
1390 | struct pt_filters *filters = event->hw.addr_filters; |
1391 | struct perf_addr_filter *filter; | |
1392 | int range = 0; | |
1393 | ||
1394 | if (!filters) | |
1395 | return; | |
1396 | ||
1397 | list_for_each_entry(filter, &head->list, entry) { | |
c60f83b8 | 1398 | if (filter->path.dentry && !fr[range].start) { |
eadf48ca AS |
1399 | msr_a = msr_b = 0; |
1400 | } else { | |
1401 | /* apply the offset */ | |
c60f83b8 AS |
1402 | msr_a = fr[range].start; |
1403 | msr_b = msr_a + fr[range].size - 1; | |
eadf48ca AS |
1404 | } |
1405 | ||
1406 | filters->filter[range].msr_a = msr_a; | |
1407 | filters->filter[range].msr_b = msr_b; | |
6ed70cf3 AS |
1408 | if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER) |
1409 | filters->filter[range].config = 1; | |
1410 | else | |
1411 | filters->filter[range].config = 2; | |
eadf48ca AS |
1412 | range++; |
1413 | } | |
1414 | ||
1415 | filters->nr_filters = range; | |
1416 | } | |
1417 | ||
52ca9ced AS |
1418 | /** |
1419 | * intel_pt_interrupt() - PT PMI handler | |
1420 | */ | |
1421 | void intel_pt_interrupt(void) | |
1422 | { | |
1423 | struct pt *pt = this_cpu_ptr(&pt_ctx); | |
1424 | struct pt_buffer *buf; | |
1425 | struct perf_event *event = pt->handle.event; | |
1426 | ||
1427 | /* | |
1428 | * There may be a dangling PT bit in the interrupt status register | |
1429 | * after PT has been disabled by pt_event_stop(). Make sure we don't | |
1430 | * do anything (particularly, re-enable) for this event here. | |
1431 | */ | |
1b6de591 | 1432 | if (!READ_ONCE(pt->handle_nmi)) |
52ca9ced AS |
1433 | return; |
1434 | ||
52ca9ced AS |
1435 | if (!event) |
1436 | return; | |
1437 | ||
1c5ac21a AS |
1438 | pt_config_stop(event); |
1439 | ||
52ca9ced AS |
1440 | buf = perf_get_aux(&pt->handle); |
1441 | if (!buf) | |
1442 | return; | |
1443 | ||
1444 | pt_read_offset(buf); | |
1445 | ||
1446 | pt_handle_status(pt); | |
1447 | ||
1448 | pt_update_head(pt); | |
1449 | ||
f4c0b0aa | 1450 | perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0)); |
52ca9ced AS |
1451 | |
1452 | if (!event->hw.state) { | |
1453 | int ret; | |
1454 | ||
1455 | buf = perf_aux_output_begin(&pt->handle, event); | |
1456 | if (!buf) { | |
1457 | event->hw.state = PERF_HES_STOPPED; | |
1458 | return; | |
1459 | } | |
1460 | ||
1461 | pt_buffer_reset_offsets(buf, pt->handle.head); | |
cf302bfd | 1462 | /* snapshot counters don't use PMI, so it's safe */ |
52ca9ced AS |
1463 | ret = pt_buffer_reset_markers(buf, &pt->handle); |
1464 | if (ret) { | |
f4c0b0aa | 1465 | perf_aux_output_end(&pt->handle, 0); |
52ca9ced AS |
1466 | return; |
1467 | } | |
1468 | ||
67063847 | 1469 | pt_config_buffer(buf); |
8e105a1f | 1470 | pt_config_start(event); |
52ca9ced AS |
1471 | } |
1472 | } | |
1473 | ||
1c5ac21a AS |
1474 | void intel_pt_handle_vmx(int on) |
1475 | { | |
1476 | struct pt *pt = this_cpu_ptr(&pt_ctx); | |
1477 | struct perf_event *event; | |
1478 | unsigned long flags; | |
1479 | ||
1480 | /* PT plays nice with VMX, do nothing */ | |
1481 | if (pt_pmu.vmx) | |
1482 | return; | |
1483 | ||
1484 | /* | |
1485 | * VMXON will clear RTIT_CTL.TraceEn; we need to make | |
1486 | * sure to not try to set it while VMX is on. Disable | |
1487 | * interrupts to avoid racing with pmu callbacks; | |
1488 | * concurrent PMI should be handled fine. | |
1489 | */ | |
1490 | local_irq_save(flags); | |
1491 | WRITE_ONCE(pt->vmx_on, on); | |
1492 | ||
ee368428 AS |
1493 | /* |
1494 | * If an AUX transaction is in progress, it will contain | |
1495 | * gap(s), so flag it PARTIAL to inform the user. | |
1496 | */ | |
1497 | event = pt->handle.event; | |
1498 | if (event) | |
1499 | perf_aux_output_flag(&pt->handle, | |
1500 | PERF_AUX_FLAG_PARTIAL); | |
1501 | ||
1502 | /* Turn PTs back on */ | |
1503 | if (!on && event) | |
1504 | wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config); | |
1505 | ||
1c5ac21a AS |
1506 | local_irq_restore(flags); |
1507 | } | |
1508 | EXPORT_SYMBOL_GPL(intel_pt_handle_vmx); | |
1509 | ||
52ca9ced AS |
1510 | /* |
1511 | * PMU callbacks | |
1512 | */ | |
1513 | ||
1514 | static void pt_event_start(struct perf_event *event, int mode) | |
1515 | { | |
66d21901 | 1516 | struct hw_perf_event *hwc = &event->hw; |
52ca9ced | 1517 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
66d21901 | 1518 | struct pt_buffer *buf; |
52ca9ced | 1519 | |
66d21901 AS |
1520 | buf = perf_aux_output_begin(&pt->handle, event); |
1521 | if (!buf) | |
1522 | goto fail_stop; | |
1523 | ||
1524 | pt_buffer_reset_offsets(buf, pt->handle.head); | |
1525 | if (!buf->snapshot) { | |
1526 | if (pt_buffer_reset_markers(buf, &pt->handle)) | |
1527 | goto fail_end_stop; | |
52ca9ced AS |
1528 | } |
1529 | ||
1b6de591 | 1530 | WRITE_ONCE(pt->handle_nmi, 1); |
66d21901 | 1531 | hwc->state = 0; |
52ca9ced | 1532 | |
67063847 | 1533 | pt_config_buffer(buf); |
52ca9ced | 1534 | pt_config(event); |
66d21901 AS |
1535 | |
1536 | return; | |
1537 | ||
1538 | fail_end_stop: | |
f4c0b0aa | 1539 | perf_aux_output_end(&pt->handle, 0); |
66d21901 AS |
1540 | fail_stop: |
1541 | hwc->state = PERF_HES_STOPPED; | |
52ca9ced AS |
1542 | } |
1543 | ||
1544 | static void pt_event_stop(struct perf_event *event, int mode) | |
1545 | { | |
1546 | struct pt *pt = this_cpu_ptr(&pt_ctx); | |
1547 | ||
1548 | /* | |
1549 | * Protect against the PMI racing with disabling wrmsr, | |
1550 | * see comment in intel_pt_interrupt(). | |
1551 | */ | |
1b6de591 | 1552 | WRITE_ONCE(pt->handle_nmi, 0); |
1c5ac21a AS |
1553 | |
1554 | pt_config_stop(event); | |
52ca9ced AS |
1555 | |
1556 | if (event->hw.state == PERF_HES_STOPPED) | |
1557 | return; | |
1558 | ||
1559 | event->hw.state = PERF_HES_STOPPED; | |
1560 | ||
1561 | if (mode & PERF_EF_UPDATE) { | |
52ca9ced AS |
1562 | struct pt_buffer *buf = perf_get_aux(&pt->handle); |
1563 | ||
1564 | if (!buf) | |
1565 | return; | |
1566 | ||
1567 | if (WARN_ON_ONCE(pt->handle.event != event)) | |
1568 | return; | |
1569 | ||
1570 | pt_read_offset(buf); | |
1571 | ||
1572 | pt_handle_status(pt); | |
1573 | ||
1574 | pt_update_head(pt); | |
52ca9ced | 1575 | |
52ca9ced AS |
1576 | if (buf->snapshot) |
1577 | pt->handle.head = | |
1578 | local_xchg(&buf->data_size, | |
1579 | buf->nr_pages << PAGE_SHIFT); | |
f4c0b0aa | 1580 | perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0)); |
52ca9ced AS |
1581 | } |
1582 | } | |
1583 | ||
25e8920b AS |
1584 | static long pt_event_snapshot_aux(struct perf_event *event, |
1585 | struct perf_output_handle *handle, | |
1586 | unsigned long size) | |
1587 | { | |
1588 | struct pt *pt = this_cpu_ptr(&pt_ctx); | |
1589 | struct pt_buffer *buf = perf_get_aux(&pt->handle); | |
1590 | unsigned long from = 0, to; | |
1591 | long ret; | |
1592 | ||
1593 | if (WARN_ON_ONCE(!buf)) | |
1594 | return 0; | |
1595 | ||
1596 | /* | |
1597 | * Sampling is only allowed on snapshot events; | |
1598 | * see pt_buffer_setup_aux(). | |
1599 | */ | |
1600 | if (WARN_ON_ONCE(!buf->snapshot)) | |
1601 | return 0; | |
1602 | ||
1603 | /* | |
1604 | * Here, handle_nmi tells us if the tracing is on | |
1605 | */ | |
1606 | if (READ_ONCE(pt->handle_nmi)) | |
1607 | pt_config_stop(event); | |
1608 | ||
1609 | pt_read_offset(buf); | |
1610 | pt_update_head(pt); | |
1611 | ||
1612 | to = local_read(&buf->data_size); | |
1613 | if (to < size) | |
1614 | from = buf->nr_pages << PAGE_SHIFT; | |
1615 | from += to - size; | |
1616 | ||
1617 | ret = perf_output_copy_aux(&pt->handle, handle, from, to); | |
1618 | ||
1619 | /* | |
1620 | * If the tracing was on when we turned up, restart it. | |
1621 | * Compiler barrier not needed as we couldn't have been | |
1622 | * preempted by anything that touches pt->handle_nmi. | |
1623 | */ | |
1624 | if (pt->handle_nmi) | |
1625 | pt_config_start(event); | |
1626 | ||
1627 | return ret; | |
1628 | } | |
1629 | ||
66d21901 AS |
1630 | static void pt_event_del(struct perf_event *event, int mode) |
1631 | { | |
1632 | pt_event_stop(event, PERF_EF_UPDATE); | |
1633 | } | |
1634 | ||
52ca9ced AS |
1635 | static int pt_event_add(struct perf_event *event, int mode) |
1636 | { | |
52ca9ced AS |
1637 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1638 | struct hw_perf_event *hwc = &event->hw; | |
1639 | int ret = -EBUSY; | |
1640 | ||
1641 | if (pt->handle.event) | |
0c99241c | 1642 | goto fail; |
52ca9ced | 1643 | |
52ca9ced AS |
1644 | if (mode & PERF_EF_START) { |
1645 | pt_event_start(event, 0); | |
66d21901 | 1646 | ret = -EINVAL; |
0c99241c | 1647 | if (hwc->state == PERF_HES_STOPPED) |
66d21901 | 1648 | goto fail; |
52ca9ced AS |
1649 | } else { |
1650 | hwc->state = PERF_HES_STOPPED; | |
1651 | } | |
1652 | ||
66d21901 | 1653 | ret = 0; |
0c99241c | 1654 | fail: |
66d21901 | 1655 | |
52ca9ced AS |
1656 | return ret; |
1657 | } | |
1658 | ||
1659 | static void pt_event_read(struct perf_event *event) | |
1660 | { | |
1661 | } | |
1662 | ||
1663 | static void pt_event_destroy(struct perf_event *event) | |
1664 | { | |
eadf48ca | 1665 | pt_addr_filters_fini(event); |
52ca9ced AS |
1666 | x86_del_exclusive(x86_lbr_exclusive_pt); |
1667 | } | |
1668 | ||
1669 | static int pt_event_init(struct perf_event *event) | |
1670 | { | |
1671 | if (event->attr.type != pt_pmu.pmu.type) | |
1672 | return -ENOENT; | |
1673 | ||
1674 | if (!pt_event_valid(event)) | |
1675 | return -EINVAL; | |
1676 | ||
1677 | if (x86_add_exclusive(x86_lbr_exclusive_pt)) | |
1678 | return -EBUSY; | |
1679 | ||
eadf48ca AS |
1680 | if (pt_addr_filters_init(event)) { |
1681 | x86_del_exclusive(x86_lbr_exclusive_pt); | |
1682 | return -ENOMEM; | |
1683 | } | |
1684 | ||
52ca9ced AS |
1685 | event->destroy = pt_event_destroy; |
1686 | ||
1687 | return 0; | |
1688 | } | |
1689 | ||
24cc12b1 TI |
1690 | void cpu_emergency_stop_pt(void) |
1691 | { | |
1692 | struct pt *pt = this_cpu_ptr(&pt_ctx); | |
1693 | ||
1694 | if (pt->handle.event) | |
1695 | pt_event_stop(pt->handle.event, PERF_EF_UPDATE); | |
1696 | } | |
1697 | ||
42880f72 AS |
1698 | int is_intel_pt_event(struct perf_event *event) |
1699 | { | |
1700 | return event->pmu == &pt_pmu.pmu; | |
1701 | } | |
1702 | ||
52ca9ced AS |
1703 | static __init int pt_init(void) |
1704 | { | |
1705 | int ret, cpu, prior_warn = 0; | |
1706 | ||
1707 | BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE); | |
73fdeb66 | 1708 | |
e465de1c | 1709 | if (!boot_cpu_has(X86_FEATURE_INTEL_PT)) |
73fdeb66 HH |
1710 | return -ENODEV; |
1711 | ||
eda8a2c5 | 1712 | cpus_read_lock(); |
52ca9ced AS |
1713 | for_each_online_cpu(cpu) { |
1714 | u64 ctl; | |
1715 | ||
1716 | ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl); | |
1717 | if (!ret && (ctl & RTIT_CTL_TRACEEN)) | |
1718 | prior_warn++; | |
1719 | } | |
eda8a2c5 | 1720 | cpus_read_unlock(); |
52ca9ced AS |
1721 | |
1722 | if (prior_warn) { | |
1723 | x86_add_exclusive(x86_lbr_exclusive_pt); | |
1724 | pr_warn("PT is enabled at boot time, doing nothing\n"); | |
1725 | ||
1726 | return -EBUSY; | |
1727 | } | |
1728 | ||
1729 | ret = pt_pmu_hw_init(); | |
1730 | if (ret) | |
1731 | return ret; | |
1732 | ||
f6d079ce | 1733 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) { |
52ca9ced AS |
1734 | pr_warn("ToPA output is not supported on this CPU\n"); |
1735 | return -ENODEV; | |
1736 | } | |
1737 | ||
f6d079ce | 1738 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
72e830f6 | 1739 | pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG; |
52ca9ced AS |
1740 | |
1741 | pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE; | |
eadf48ca AS |
1742 | pt_pmu.pmu.attr_groups = pt_attr_groups; |
1743 | pt_pmu.pmu.task_ctx_nr = perf_sw_context; | |
1744 | pt_pmu.pmu.event_init = pt_event_init; | |
1745 | pt_pmu.pmu.add = pt_event_add; | |
1746 | pt_pmu.pmu.del = pt_event_del; | |
1747 | pt_pmu.pmu.start = pt_event_start; | |
1748 | pt_pmu.pmu.stop = pt_event_stop; | |
25e8920b | 1749 | pt_pmu.pmu.snapshot_aux = pt_event_snapshot_aux; |
eadf48ca AS |
1750 | pt_pmu.pmu.read = pt_event_read; |
1751 | pt_pmu.pmu.setup_aux = pt_buffer_setup_aux; | |
1752 | pt_pmu.pmu.free_aux = pt_buffer_free_aux; | |
1753 | pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync; | |
1754 | pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate; | |
1755 | pt_pmu.pmu.nr_addr_filters = | |
f6d079ce | 1756 | intel_pt_validate_hw_cap(PT_CAP_num_address_ranges); |
eadf48ca | 1757 | |
52ca9ced AS |
1758 | ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1); |
1759 | ||
1760 | return ret; | |
1761 | } | |
5b00c1eb | 1762 | arch_initcall(pt_init); |