]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1b8873a0 JI |
2 | #undef DEBUG |
3 | ||
4 | /* | |
5 | * ARM performance counter support. | |
6 | * | |
7 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 8 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
796d1295 | 9 | * |
1b8873a0 | 10 | * This code is based on the sparc64 perf event code, which is in turn based |
d39976f0 | 11 | * on the x86 code. |
1b8873a0 JI |
12 | */ |
13 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
14 | ||
74cf0bc7 | 15 | #include <linux/bitmap.h> |
cc88116d | 16 | #include <linux/cpumask.h> |
da4e4f18 | 17 | #include <linux/cpu_pm.h> |
74cf0bc7 | 18 | #include <linux/export.h> |
1b8873a0 | 19 | #include <linux/kernel.h> |
fa8ad788 | 20 | #include <linux/perf/arm_pmu.h> |
74cf0bc7 | 21 | #include <linux/slab.h> |
e6017571 | 22 | #include <linux/sched/clock.h> |
74cf0bc7 | 23 | #include <linux/spinlock.h> |
bbd64559 SB |
24 | #include <linux/irq.h> |
25 | #include <linux/irqdesc.h> | |
1b8873a0 | 26 | |
1b8873a0 | 27 | #include <asm/irq_regs.h> |
1b8873a0 | 28 | |
f76b130b JT |
29 | static int armpmu_count_irq_users(const int irq); |
30 | ||
31 | struct pmu_irq_ops { | |
32 | void (*enable_pmuirq)(unsigned int irq); | |
33 | void (*disable_pmuirq)(unsigned int irq); | |
34 | void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); | |
35 | }; | |
36 | ||
37 | static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) | |
38 | { | |
39 | free_irq(irq, per_cpu_ptr(devid, cpu)); | |
40 | } | |
41 | ||
42 | static const struct pmu_irq_ops pmuirq_ops = { | |
43 | .enable_pmuirq = enable_irq, | |
44 | .disable_pmuirq = disable_irq_nosync, | |
45 | .free_pmuirq = armpmu_free_pmuirq | |
46 | }; | |
47 | ||
d8f6267f JT |
48 | static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) |
49 | { | |
50 | free_nmi(irq, per_cpu_ptr(devid, cpu)); | |
51 | } | |
52 | ||
53 | static const struct pmu_irq_ops pmunmi_ops = { | |
54 | .enable_pmuirq = enable_nmi, | |
55 | .disable_pmuirq = disable_nmi_nosync, | |
56 | .free_pmuirq = armpmu_free_pmunmi | |
57 | }; | |
58 | ||
f76b130b JT |
59 | static void armpmu_enable_percpu_pmuirq(unsigned int irq) |
60 | { | |
61 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | |
62 | } | |
63 | ||
64 | static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, | |
65 | void __percpu *devid) | |
66 | { | |
67 | if (armpmu_count_irq_users(irq) == 1) | |
68 | free_percpu_irq(irq, devid); | |
69 | } | |
70 | ||
71 | static const struct pmu_irq_ops percpu_pmuirq_ops = { | |
72 | .enable_pmuirq = armpmu_enable_percpu_pmuirq, | |
73 | .disable_pmuirq = disable_percpu_irq, | |
74 | .free_pmuirq = armpmu_free_percpu_pmuirq | |
75 | }; | |
76 | ||
d8f6267f JT |
77 | static void armpmu_enable_percpu_pmunmi(unsigned int irq) |
78 | { | |
79 | if (!prepare_percpu_nmi(irq)) | |
80 | enable_percpu_nmi(irq, IRQ_TYPE_NONE); | |
81 | } | |
82 | ||
83 | static void armpmu_disable_percpu_pmunmi(unsigned int irq) | |
84 | { | |
85 | disable_percpu_nmi(irq); | |
86 | teardown_percpu_nmi(irq); | |
87 | } | |
88 | ||
89 | static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, | |
90 | void __percpu *devid) | |
91 | { | |
92 | if (armpmu_count_irq_users(irq) == 1) | |
93 | free_percpu_nmi(irq, devid); | |
94 | } | |
95 | ||
96 | static const struct pmu_irq_ops percpu_pmunmi_ops = { | |
97 | .enable_pmuirq = armpmu_enable_percpu_pmunmi, | |
98 | .disable_pmuirq = armpmu_disable_percpu_pmunmi, | |
99 | .free_pmuirq = armpmu_free_percpu_pmunmi | |
100 | }; | |
101 | ||
84b4be57 MR |
102 | static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); |
103 | static DEFINE_PER_CPU(int, cpu_irq); | |
f76b130b | 104 | static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); |
84b4be57 | 105 | |
d8f6267f JT |
106 | static bool has_nmi; |
107 | ||
e2da97d3 | 108 | static inline u64 arm_pmu_event_max_period(struct perf_event *event) |
8d3e9942 | 109 | { |
e2da97d3 SP |
110 | if (event->hw.flags & ARMPMU_EVT_64BIT) |
111 | return GENMASK_ULL(63, 0); | |
112 | else | |
113 | return GENMASK_ULL(31, 0); | |
8d3e9942 SP |
114 | } |
115 | ||
1b8873a0 | 116 | static int |
e1f431b5 MR |
117 | armpmu_map_cache_event(const unsigned (*cache_map) |
118 | [PERF_COUNT_HW_CACHE_MAX] | |
119 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
120 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
121 | u64 config) | |
1b8873a0 JI |
122 | { |
123 | unsigned int cache_type, cache_op, cache_result, ret; | |
124 | ||
125 | cache_type = (config >> 0) & 0xff; | |
126 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
127 | return -EINVAL; | |
128 | ||
129 | cache_op = (config >> 8) & 0xff; | |
130 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
131 | return -EINVAL; | |
132 | ||
133 | cache_result = (config >> 16) & 0xff; | |
134 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
135 | return -EINVAL; | |
136 | ||
6c833bb9 WD |
137 | if (!cache_map) |
138 | return -ENOENT; | |
139 | ||
e1f431b5 | 140 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
141 | |
142 | if (ret == CACHE_OP_UNSUPPORTED) | |
143 | return -ENOENT; | |
144 | ||
145 | return ret; | |
146 | } | |
147 | ||
84fee97a | 148 | static int |
6dbc0029 | 149 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 150 | { |
d9f96635 SB |
151 | int mapping; |
152 | ||
153 | if (config >= PERF_COUNT_HW_MAX) | |
154 | return -EINVAL; | |
155 | ||
6c833bb9 WD |
156 | if (!event_map) |
157 | return -ENOENT; | |
158 | ||
d9f96635 | 159 | mapping = (*event_map)[config]; |
e1f431b5 | 160 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
84fee97a WD |
161 | } |
162 | ||
163 | static int | |
e1f431b5 | 164 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 165 | { |
e1f431b5 MR |
166 | return (int)(config & raw_event_mask); |
167 | } | |
168 | ||
6dbc0029 WD |
169 | int |
170 | armpmu_map_event(struct perf_event *event, | |
171 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
172 | const unsigned (*cache_map) | |
173 | [PERF_COUNT_HW_CACHE_MAX] | |
174 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
175 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
176 | u32 raw_event_mask) | |
e1f431b5 MR |
177 | { |
178 | u64 config = event->attr.config; | |
67b4305a | 179 | int type = event->attr.type; |
e1f431b5 | 180 | |
67b4305a MR |
181 | if (type == event->pmu->type) |
182 | return armpmu_map_raw_event(raw_event_mask, config); | |
183 | ||
184 | switch (type) { | |
e1f431b5 | 185 | case PERF_TYPE_HARDWARE: |
6dbc0029 | 186 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
187 | case PERF_TYPE_HW_CACHE: |
188 | return armpmu_map_cache_event(cache_map, config); | |
189 | case PERF_TYPE_RAW: | |
190 | return armpmu_map_raw_event(raw_event_mask, config); | |
191 | } | |
192 | ||
193 | return -ENOENT; | |
84fee97a WD |
194 | } |
195 | ||
ed6f2a52 | 196 | int armpmu_event_set_period(struct perf_event *event) |
1b8873a0 | 197 | { |
8a16b34e | 198 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 199 | struct hw_perf_event *hwc = &event->hw; |
e7850595 | 200 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 | 201 | s64 period = hwc->sample_period; |
8d3e9942 | 202 | u64 max_period; |
1b8873a0 JI |
203 | int ret = 0; |
204 | ||
e2da97d3 | 205 | max_period = arm_pmu_event_max_period(event); |
1b8873a0 JI |
206 | if (unlikely(left <= -period)) { |
207 | left = period; | |
e7850595 | 208 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
209 | hwc->last_period = period; |
210 | ret = 1; | |
211 | } | |
212 | ||
213 | if (unlikely(left <= 0)) { | |
214 | left += period; | |
e7850595 | 215 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
216 | hwc->last_period = period; |
217 | ret = 1; | |
218 | } | |
219 | ||
2d9ed740 DT |
220 | /* |
221 | * Limit the maximum period to prevent the counter value | |
222 | * from overtaking the one we are about to program. In | |
223 | * effect we are reducing max_period to account for | |
224 | * interrupt latency (and we are being very conservative). | |
225 | */ | |
8d3e9942 SP |
226 | if (left > (max_period >> 1)) |
227 | left = (max_period >> 1); | |
1b8873a0 | 228 | |
e7850595 | 229 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 | 230 | |
e2da97d3 | 231 | armpmu->write_counter(event, (u64)(-left) & max_period); |
1b8873a0 JI |
232 | |
233 | perf_event_update_userpage(event); | |
234 | ||
235 | return ret; | |
236 | } | |
237 | ||
ed6f2a52 | 238 | u64 armpmu_event_update(struct perf_event *event) |
1b8873a0 | 239 | { |
8a16b34e | 240 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 241 | struct hw_perf_event *hwc = &event->hw; |
a737823d | 242 | u64 delta, prev_raw_count, new_raw_count; |
e2da97d3 | 243 | u64 max_period = arm_pmu_event_max_period(event); |
1b8873a0 JI |
244 | |
245 | again: | |
e7850595 | 246 | prev_raw_count = local64_read(&hwc->prev_count); |
ed6f2a52 | 247 | new_raw_count = armpmu->read_counter(event); |
1b8873a0 | 248 | |
e7850595 | 249 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
250 | new_raw_count) != prev_raw_count) |
251 | goto again; | |
252 | ||
8d3e9942 | 253 | delta = (new_raw_count - prev_raw_count) & max_period; |
1b8873a0 | 254 | |
e7850595 PZ |
255 | local64_add(delta, &event->count); |
256 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
257 | |
258 | return new_raw_count; | |
259 | } | |
260 | ||
261 | static void | |
a4eaf7f1 | 262 | armpmu_read(struct perf_event *event) |
1b8873a0 | 263 | { |
ed6f2a52 | 264 | armpmu_event_update(event); |
1b8873a0 JI |
265 | } |
266 | ||
267 | static void | |
a4eaf7f1 | 268 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 269 | { |
8a16b34e | 270 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
271 | struct hw_perf_event *hwc = &event->hw; |
272 | ||
a4eaf7f1 PZ |
273 | /* |
274 | * ARM pmu always has to update the counter, so ignore | |
275 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
276 | */ | |
277 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
ed6f2a52 SH |
278 | armpmu->disable(event); |
279 | armpmu_event_update(event); | |
a4eaf7f1 PZ |
280 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
281 | } | |
1b8873a0 JI |
282 | } |
283 | ||
ed6f2a52 | 284 | static void armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 285 | { |
8a16b34e | 286 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
287 | struct hw_perf_event *hwc = &event->hw; |
288 | ||
a4eaf7f1 PZ |
289 | /* |
290 | * ARM pmu always has to reprogram the period, so ignore | |
291 | * PERF_EF_RELOAD, see the comment below. | |
292 | */ | |
293 | if (flags & PERF_EF_RELOAD) | |
294 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
295 | ||
296 | hwc->state = 0; | |
1b8873a0 JI |
297 | /* |
298 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 299 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
300 | * may have been left counting. If we don't do this step then we may |
301 | * get an interrupt too soon or *way* too late if the overflow has | |
302 | * happened since disabling. | |
303 | */ | |
ed6f2a52 SH |
304 | armpmu_event_set_period(event); |
305 | armpmu->enable(event); | |
1b8873a0 JI |
306 | } |
307 | ||
a4eaf7f1 PZ |
308 | static void |
309 | armpmu_del(struct perf_event *event, int flags) | |
310 | { | |
8a16b34e | 311 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 312 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
a4eaf7f1 PZ |
313 | struct hw_perf_event *hwc = &event->hw; |
314 | int idx = hwc->idx; | |
315 | ||
a4eaf7f1 | 316 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 | 317 | hw_events->events[idx] = NULL; |
7dfc8db1 | 318 | armpmu->clear_event_idx(hw_events, event); |
a4eaf7f1 | 319 | perf_event_update_userpage(event); |
7dfc8db1 SP |
320 | /* Clear the allocated counter */ |
321 | hwc->idx = -1; | |
a4eaf7f1 PZ |
322 | } |
323 | ||
1b8873a0 | 324 | static int |
a4eaf7f1 | 325 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 326 | { |
8a16b34e | 327 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 328 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
1b8873a0 JI |
329 | struct hw_perf_event *hwc = &event->hw; |
330 | int idx; | |
1b8873a0 | 331 | |
cc88116d MR |
332 | /* An event following a process won't be stopped earlier */ |
333 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
334 | return -ENOENT; | |
335 | ||
1b8873a0 | 336 | /* If we don't have a space for the counter then finish early. */ |
ed6f2a52 | 337 | idx = armpmu->get_event_idx(hw_events, event); |
a9e469d1 MR |
338 | if (idx < 0) |
339 | return idx; | |
1b8873a0 JI |
340 | |
341 | /* | |
342 | * If there is an event in the counter we are going to use then make | |
343 | * sure it is disabled. | |
344 | */ | |
345 | event->hw.idx = idx; | |
ed6f2a52 | 346 | armpmu->disable(event); |
8be3f9a2 | 347 | hw_events->events[idx] = event; |
1b8873a0 | 348 | |
a4eaf7f1 PZ |
349 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
350 | if (flags & PERF_EF_START) | |
351 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
352 | |
353 | /* Propagate our changes to the userspace mapping. */ | |
354 | perf_event_update_userpage(event); | |
355 | ||
a9e469d1 | 356 | return 0; |
1b8873a0 JI |
357 | } |
358 | ||
1b8873a0 | 359 | static int |
e429817b SP |
360 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
361 | struct perf_event *event) | |
1b8873a0 | 362 | { |
e429817b | 363 | struct arm_pmu *armpmu; |
1b8873a0 | 364 | |
c95eb318 WD |
365 | if (is_software_event(event)) |
366 | return 1; | |
367 | ||
e429817b SP |
368 | /* |
369 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
370 | * core perf code won't check that the pmu->ctx == leader->ctx | |
371 | * until after pmu->event_init(event). | |
372 | */ | |
373 | if (event->pmu != pmu) | |
374 | return 0; | |
375 | ||
2dfcb802 | 376 | if (event->state < PERF_EVENT_STATE_OFF) |
cb2d8b34 WD |
377 | return 1; |
378 | ||
379 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
65b4711f | 380 | return 1; |
1b8873a0 | 381 | |
e429817b | 382 | armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 383 | return armpmu->get_event_idx(hw_events, event) >= 0; |
1b8873a0 JI |
384 | } |
385 | ||
386 | static int | |
387 | validate_group(struct perf_event *event) | |
388 | { | |
389 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 390 | struct pmu_hw_events fake_pmu; |
1b8873a0 | 391 | |
bce34d14 WD |
392 | /* |
393 | * Initialise the fake PMU. We only need to populate the | |
394 | * used_mask for the purposes of validation. | |
395 | */ | |
a4560846 | 396 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
1b8873a0 | 397 | |
e429817b | 398 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
aa2bc1ad | 399 | return -EINVAL; |
1b8873a0 | 400 | |
edb39592 | 401 | for_each_sibling_event(sibling, leader) { |
e429817b | 402 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
aa2bc1ad | 403 | return -EINVAL; |
1b8873a0 JI |
404 | } |
405 | ||
e429817b | 406 | if (!validate_event(event->pmu, &fake_pmu, event)) |
aa2bc1ad | 407 | return -EINVAL; |
1b8873a0 JI |
408 | |
409 | return 0; | |
410 | } | |
411 | ||
051f1b13 | 412 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
0e25a5c9 | 413 | { |
bbd64559 | 414 | struct arm_pmu *armpmu; |
5f5092e7 WD |
415 | int ret; |
416 | u64 start_clock, finish_clock; | |
bbd64559 | 417 | |
5ebd9200 MR |
418 | /* |
419 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but | |
420 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will | |
421 | * do any necessary shifting, we just need to perform the first | |
422 | * dereference. | |
423 | */ | |
424 | armpmu = *(void **)dev; | |
84b4be57 MR |
425 | if (WARN_ON_ONCE(!armpmu)) |
426 | return IRQ_NONE; | |
76541370 | 427 | |
5f5092e7 | 428 | start_clock = sched_clock(); |
0788f1e9 | 429 | ret = armpmu->handle_irq(armpmu); |
5f5092e7 WD |
430 | finish_clock = sched_clock(); |
431 | ||
432 | perf_sample_event_took(finish_clock - start_clock); | |
433 | return ret; | |
0e25a5c9 RV |
434 | } |
435 | ||
1b8873a0 JI |
436 | static int |
437 | __hw_perf_event_init(struct perf_event *event) | |
438 | { | |
8a16b34e | 439 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 440 | struct hw_perf_event *hwc = &event->hw; |
9dcbf466 | 441 | int mapping; |
1b8873a0 | 442 | |
e2da97d3 | 443 | hwc->flags = 0; |
e1f431b5 | 444 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
445 | |
446 | if (mapping < 0) { | |
447 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
448 | event->attr.config); | |
449 | return mapping; | |
450 | } | |
451 | ||
05d22fde WD |
452 | /* |
453 | * We don't assign an index until we actually place the event onto | |
454 | * hardware. Use -1 to signify that we haven't decided where to put it | |
455 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
456 | * clever allocation or constraints checking at this point. | |
457 | */ | |
458 | hwc->idx = -1; | |
459 | hwc->config_base = 0; | |
460 | hwc->config = 0; | |
461 | hwc->event_base = 0; | |
462 | ||
1b8873a0 JI |
463 | /* |
464 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 465 | */ |
1d899c0e AM |
466 | if (armpmu->set_event_filter && |
467 | armpmu->set_event_filter(hwc, &event->attr)) { | |
1b8873a0 JI |
468 | pr_debug("ARM performance counters do not support " |
469 | "mode exclusion\n"); | |
fdeb8e35 | 470 | return -EOPNOTSUPP; |
1b8873a0 JI |
471 | } |
472 | ||
473 | /* | |
05d22fde | 474 | * Store the event encoding into the config_base field. |
1b8873a0 | 475 | */ |
05d22fde | 476 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 | 477 | |
edcb4d3c | 478 | if (!is_sampling_event(event)) { |
57273471 WD |
479 | /* |
480 | * For non-sampling runs, limit the sample_period to half | |
481 | * of the counter width. That way, the new counter value | |
482 | * is far less likely to overtake the previous one unless | |
483 | * you have some serious IRQ latency issues. | |
484 | */ | |
e2da97d3 | 485 | hwc->sample_period = arm_pmu_event_max_period(event) >> 1; |
1b8873a0 | 486 | hwc->last_period = hwc->sample_period; |
e7850595 | 487 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
488 | } |
489 | ||
1b8873a0 | 490 | if (event->group_leader != event) { |
e595ede6 | 491 | if (validate_group(event) != 0) |
1b8873a0 JI |
492 | return -EINVAL; |
493 | } | |
494 | ||
9dcbf466 | 495 | return 0; |
1b8873a0 JI |
496 | } |
497 | ||
b0a873eb | 498 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 499 | { |
8a16b34e | 500 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 501 | |
cc88116d MR |
502 | /* |
503 | * Reject CPU-affine events for CPUs that are of a different class to | |
504 | * that which this PMU handles. Process-following events (where | |
505 | * event->cpu == -1) can be migrated between CPUs, and thus we have to | |
506 | * reject them later (in armpmu_add) if they're scheduled on a | |
507 | * different class of CPU. | |
508 | */ | |
509 | if (event->cpu != -1 && | |
510 | !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) | |
511 | return -ENOENT; | |
512 | ||
2481c5fa SE |
513 | /* does not support taken branch sampling */ |
514 | if (has_branch_stack(event)) | |
515 | return -EOPNOTSUPP; | |
516 | ||
e1f431b5 | 517 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 518 | return -ENOENT; |
b0a873eb | 519 | |
c09adab0 | 520 | return __hw_perf_event_init(event); |
1b8873a0 JI |
521 | } |
522 | ||
a4eaf7f1 | 523 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 524 | { |
8be3f9a2 | 525 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
11679250 | 526 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
7325eaec | 527 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 528 | |
cc88116d MR |
529 | /* For task-bound events we may be called on other CPUs */ |
530 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
531 | return; | |
532 | ||
f4f38430 | 533 | if (enabled) |
ed6f2a52 | 534 | armpmu->start(armpmu); |
1b8873a0 JI |
535 | } |
536 | ||
a4eaf7f1 | 537 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 538 | { |
8a16b34e | 539 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
cc88116d MR |
540 | |
541 | /* For task-bound events we may be called on other CPUs */ | |
542 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
543 | return; | |
544 | ||
ed6f2a52 | 545 | armpmu->stop(armpmu); |
1b8873a0 JI |
546 | } |
547 | ||
c904e32a MR |
548 | /* |
549 | * In heterogeneous systems, events are specific to a particular | |
550 | * microarchitecture, and aren't suitable for another. Thus, only match CPUs of | |
551 | * the same microarchitecture. | |
552 | */ | |
553 | static int armpmu_filter_match(struct perf_event *event) | |
554 | { | |
555 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
556 | unsigned int cpu = smp_processor_id(); | |
ca2b4972 WD |
557 | int ret; |
558 | ||
559 | ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus); | |
560 | if (ret && armpmu->filter_match) | |
561 | return armpmu->filter_match(event); | |
562 | ||
563 | return ret; | |
c904e32a MR |
564 | } |
565 | ||
48538b58 MR |
566 | static ssize_t armpmu_cpumask_show(struct device *dev, |
567 | struct device_attribute *attr, char *buf) | |
568 | { | |
569 | struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); | |
570 | return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); | |
571 | } | |
572 | ||
573 | static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); | |
574 | ||
575 | static struct attribute *armpmu_common_attrs[] = { | |
576 | &dev_attr_cpus.attr, | |
577 | NULL, | |
578 | }; | |
579 | ||
f0c14048 | 580 | static const struct attribute_group armpmu_common_attr_group = { |
48538b58 MR |
581 | .attrs = armpmu_common_attrs, |
582 | }; | |
583 | ||
74cf0bc7 MR |
584 | /* Set at runtime when we know what CPU type we are. */ |
585 | static struct arm_pmu *__oprofile_cpu_pmu; | |
586 | ||
587 | /* | |
588 | * Despite the names, these two functions are CPU-specific and are used | |
589 | * by the OProfile/perf code. | |
590 | */ | |
591 | const char *perf_pmu_name(void) | |
592 | { | |
593 | if (!__oprofile_cpu_pmu) | |
594 | return NULL; | |
595 | ||
596 | return __oprofile_cpu_pmu->name; | |
597 | } | |
598 | EXPORT_SYMBOL_GPL(perf_pmu_name); | |
599 | ||
600 | int perf_num_counters(void) | |
601 | { | |
602 | int max_events = 0; | |
603 | ||
604 | if (__oprofile_cpu_pmu != NULL) | |
605 | max_events = __oprofile_cpu_pmu->num_events; | |
606 | ||
607 | return max_events; | |
608 | } | |
609 | EXPORT_SYMBOL_GPL(perf_num_counters); | |
610 | ||
84b4be57 | 611 | static int armpmu_count_irq_users(const int irq) |
74cf0bc7 | 612 | { |
84b4be57 | 613 | int cpu, count = 0; |
74cf0bc7 | 614 | |
84b4be57 MR |
615 | for_each_possible_cpu(cpu) { |
616 | if (per_cpu(cpu_irq, cpu) == irq) | |
617 | count++; | |
618 | } | |
619 | ||
620 | return count; | |
621 | } | |
7ed98e01 | 622 | |
f76b130b JT |
623 | static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) |
624 | { | |
625 | const struct pmu_irq_ops *ops = NULL; | |
626 | int cpu; | |
627 | ||
628 | for_each_possible_cpu(cpu) { | |
629 | if (per_cpu(cpu_irq, cpu) != irq) | |
630 | continue; | |
631 | ||
632 | ops = per_cpu(cpu_irq_ops, cpu); | |
633 | if (ops) | |
634 | break; | |
635 | } | |
636 | ||
637 | return ops; | |
638 | } | |
639 | ||
167e6143 | 640 | void armpmu_free_irq(int irq, int cpu) |
84b4be57 MR |
641 | { |
642 | if (per_cpu(cpu_irq, cpu) == 0) | |
643 | return; | |
644 | if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) | |
0e2663d9 | 645 | return; |
7ed98e01 | 646 | |
f76b130b | 647 | per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); |
84b4be57 MR |
648 | |
649 | per_cpu(cpu_irq, cpu) = 0; | |
f76b130b | 650 | per_cpu(cpu_irq_ops, cpu) = NULL; |
0e2663d9 | 651 | } |
7ed98e01 | 652 | |
167e6143 | 653 | int armpmu_request_irq(int irq, int cpu) |
84b4be57 MR |
654 | { |
655 | int err = 0; | |
656 | const irq_handler_t handler = armpmu_dispatch_irq; | |
f76b130b JT |
657 | const struct pmu_irq_ops *irq_ops; |
658 | ||
0e2663d9 MR |
659 | if (!irq) |
660 | return 0; | |
74cf0bc7 | 661 | |
43fc9a2f | 662 | if (!irq_is_percpu_devid(irq)) { |
a3287c41 WD |
663 | unsigned long irq_flags; |
664 | ||
665 | err = irq_force_affinity(irq, cpumask_of(cpu)); | |
666 | ||
667 | if (err && num_possible_cpus() > 1) { | |
668 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | |
669 | irq, cpu); | |
670 | goto err_out; | |
671 | } | |
672 | ||
c0248c96 MR |
673 | irq_flags = IRQF_PERCPU | |
674 | IRQF_NOBALANCING | | |
675 | IRQF_NO_THREAD; | |
a3287c41 | 676 | |
6de3f791 | 677 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
d8f6267f JT |
678 | |
679 | err = request_nmi(irq, handler, irq_flags, "arm-pmu", | |
84b4be57 | 680 | per_cpu_ptr(&cpu_armpmu, cpu)); |
f76b130b | 681 | |
d8f6267f JT |
682 | /* If cannot get an NMI, get a normal interrupt */ |
683 | if (err) { | |
684 | err = request_irq(irq, handler, irq_flags, "arm-pmu", | |
685 | per_cpu_ptr(&cpu_armpmu, cpu)); | |
686 | irq_ops = &pmuirq_ops; | |
687 | } else { | |
688 | has_nmi = true; | |
689 | irq_ops = &pmunmi_ops; | |
690 | } | |
84b4be57 | 691 | } else if (armpmu_count_irq_users(irq) == 0) { |
d8f6267f JT |
692 | err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); |
693 | ||
694 | /* If cannot get an NMI, get a normal interrupt */ | |
695 | if (err) { | |
696 | err = request_percpu_irq(irq, handler, "arm-pmu", | |
697 | &cpu_armpmu); | |
698 | irq_ops = &percpu_pmuirq_ops; | |
699 | } else { | |
700 | has_nmi= true; | |
701 | irq_ops = &percpu_pmunmi_ops; | |
702 | } | |
f76b130b JT |
703 | } else { |
704 | /* Per cpudevid irq was already requested by another CPU */ | |
705 | irq_ops = armpmu_find_irq_ops(irq); | |
706 | ||
707 | if (WARN_ON(!irq_ops)) | |
708 | err = -EINVAL; | |
0e2663d9 | 709 | } |
7ed98e01 | 710 | |
a3287c41 WD |
711 | if (err) |
712 | goto err_out; | |
74cf0bc7 | 713 | |
84b4be57 | 714 | per_cpu(cpu_irq, cpu) = irq; |
f76b130b | 715 | per_cpu(cpu_irq_ops, cpu) = irq_ops; |
74cf0bc7 | 716 | return 0; |
a3287c41 WD |
717 | |
718 | err_out: | |
719 | pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); | |
720 | return err; | |
74cf0bc7 MR |
721 | } |
722 | ||
c09adab0 MR |
723 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) |
724 | { | |
725 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | |
726 | return per_cpu(hw_events->irq, cpu); | |
727 | } | |
728 | ||
74cf0bc7 MR |
729 | /* |
730 | * PMU hardware loses all context when a CPU goes offline. | |
731 | * When a CPU is hotplugged back in, since some hardware registers are | |
732 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | |
733 | * junk values out of them. | |
734 | */ | |
6e103c0c | 735 | static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) |
74cf0bc7 | 736 | { |
6e103c0c | 737 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); |
c09adab0 | 738 | int irq; |
74cf0bc7 | 739 | |
6e103c0c SAS |
740 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
741 | return 0; | |
742 | if (pmu->reset) | |
743 | pmu->reset(pmu); | |
c09adab0 | 744 | |
84b4be57 MR |
745 | per_cpu(cpu_armpmu, cpu) = pmu; |
746 | ||
c09adab0 | 747 | irq = armpmu_get_cpu_irq(pmu, cpu); |
f76b130b JT |
748 | if (irq) |
749 | per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); | |
c09adab0 MR |
750 | |
751 | return 0; | |
752 | } | |
753 | ||
754 | static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) | |
755 | { | |
756 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); | |
757 | int irq; | |
758 | ||
759 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | |
760 | return 0; | |
761 | ||
762 | irq = armpmu_get_cpu_irq(pmu, cpu); | |
f76b130b JT |
763 | if (irq) |
764 | per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); | |
c09adab0 | 765 | |
84b4be57 MR |
766 | per_cpu(cpu_armpmu, cpu) = NULL; |
767 | ||
7d88eb69 | 768 | return 0; |
74cf0bc7 MR |
769 | } |
770 | ||
da4e4f18 LP |
771 | #ifdef CONFIG_CPU_PM |
772 | static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) | |
773 | { | |
774 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
775 | struct perf_event *event; | |
776 | int idx; | |
777 | ||
778 | for (idx = 0; idx < armpmu->num_events; idx++) { | |
da4e4f18 | 779 | event = hw_events->events[idx]; |
c1320790 SP |
780 | if (!event) |
781 | continue; | |
da4e4f18 LP |
782 | |
783 | switch (cmd) { | |
784 | case CPU_PM_ENTER: | |
785 | /* | |
786 | * Stop and update the counter | |
787 | */ | |
788 | armpmu_stop(event, PERF_EF_UPDATE); | |
789 | break; | |
790 | case CPU_PM_EXIT: | |
791 | case CPU_PM_ENTER_FAILED: | |
cbcc72e0 LP |
792 | /* |
793 | * Restore and enable the counter. | |
794 | * armpmu_start() indirectly calls | |
795 | * | |
796 | * perf_event_update_userpage() | |
797 | * | |
798 | * that requires RCU read locking to be functional, | |
799 | * wrap the call within RCU_NONIDLE to make the | |
800 | * RCU subsystem aware this cpu is not idle from | |
801 | * an RCU perspective for the armpmu_start() call | |
802 | * duration. | |
803 | */ | |
804 | RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); | |
da4e4f18 LP |
805 | break; |
806 | default: | |
807 | break; | |
808 | } | |
809 | } | |
810 | } | |
811 | ||
812 | static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, | |
813 | void *v) | |
814 | { | |
815 | struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); | |
816 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
817 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | |
818 | ||
819 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
820 | return NOTIFY_DONE; | |
821 | ||
822 | /* | |
823 | * Always reset the PMU registers on power-up even if | |
824 | * there are no events running. | |
825 | */ | |
826 | if (cmd == CPU_PM_EXIT && armpmu->reset) | |
827 | armpmu->reset(armpmu); | |
828 | ||
829 | if (!enabled) | |
830 | return NOTIFY_OK; | |
831 | ||
832 | switch (cmd) { | |
833 | case CPU_PM_ENTER: | |
834 | armpmu->stop(armpmu); | |
835 | cpu_pm_pmu_setup(armpmu, cmd); | |
836 | break; | |
837 | case CPU_PM_EXIT: | |
da4e4f18 | 838 | case CPU_PM_ENTER_FAILED: |
0d7fd70f | 839 | cpu_pm_pmu_setup(armpmu, cmd); |
da4e4f18 LP |
840 | armpmu->start(armpmu); |
841 | break; | |
842 | default: | |
843 | return NOTIFY_DONE; | |
844 | } | |
845 | ||
846 | return NOTIFY_OK; | |
847 | } | |
848 | ||
849 | static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) | |
850 | { | |
851 | cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; | |
852 | return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); | |
853 | } | |
854 | ||
855 | static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) | |
856 | { | |
857 | cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); | |
858 | } | |
859 | #else | |
860 | static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } | |
861 | static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } | |
862 | #endif | |
863 | ||
74cf0bc7 MR |
864 | static int cpu_pmu_init(struct arm_pmu *cpu_pmu) |
865 | { | |
866 | int err; | |
74cf0bc7 | 867 | |
c09adab0 MR |
868 | err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, |
869 | &cpu_pmu->node); | |
6e103c0c | 870 | if (err) |
2681f018 | 871 | goto out; |
74cf0bc7 | 872 | |
da4e4f18 LP |
873 | err = cpu_pm_pmu_register(cpu_pmu); |
874 | if (err) | |
875 | goto out_unregister; | |
876 | ||
74cf0bc7 MR |
877 | return 0; |
878 | ||
da4e4f18 | 879 | out_unregister: |
6e103c0c SAS |
880 | cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, |
881 | &cpu_pmu->node); | |
2681f018 | 882 | out: |
74cf0bc7 MR |
883 | return err; |
884 | } | |
885 | ||
886 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | |
887 | { | |
da4e4f18 | 888 | cpu_pm_pmu_unregister(cpu_pmu); |
6e103c0c SAS |
889 | cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, |
890 | &cpu_pmu->node); | |
74cf0bc7 MR |
891 | } |
892 | ||
0dc1a185 | 893 | static struct arm_pmu *__armpmu_alloc(gfp_t flags) |
2681f018 MR |
894 | { |
895 | struct arm_pmu *pmu; | |
896 | int cpu; | |
897 | ||
0dc1a185 | 898 | pmu = kzalloc(sizeof(*pmu), flags); |
2681f018 MR |
899 | if (!pmu) { |
900 | pr_info("failed to allocate PMU device!\n"); | |
901 | goto out; | |
902 | } | |
903 | ||
0dc1a185 | 904 | pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags); |
2681f018 MR |
905 | if (!pmu->hw_events) { |
906 | pr_info("failed to allocate per-cpu PMU data.\n"); | |
907 | goto out_free_pmu; | |
908 | } | |
909 | ||
70cd908a MR |
910 | pmu->pmu = (struct pmu) { |
911 | .pmu_enable = armpmu_enable, | |
912 | .pmu_disable = armpmu_disable, | |
913 | .event_init = armpmu_event_init, | |
914 | .add = armpmu_add, | |
915 | .del = armpmu_del, | |
916 | .start = armpmu_start, | |
917 | .stop = armpmu_stop, | |
918 | .read = armpmu_read, | |
919 | .filter_match = armpmu_filter_match, | |
920 | .attr_groups = pmu->attr_groups, | |
921 | /* | |
922 | * This is a CPU PMU potentially in a heterogeneous | |
923 | * configuration (e.g. big.LITTLE). This is not an uncore PMU, | |
924 | * and we have taken ctx sharing into account (e.g. with our | |
925 | * pmu::filter_match callback and pmu::event_init group | |
926 | * validation). | |
927 | */ | |
928 | .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS, | |
929 | }; | |
930 | ||
931 | pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = | |
932 | &armpmu_common_attr_group; | |
933 | ||
2681f018 MR |
934 | for_each_possible_cpu(cpu) { |
935 | struct pmu_hw_events *events; | |
936 | ||
937 | events = per_cpu_ptr(pmu->hw_events, cpu); | |
938 | raw_spin_lock_init(&events->pmu_lock); | |
939 | events->percpu_pmu = pmu; | |
940 | } | |
941 | ||
942 | return pmu; | |
943 | ||
944 | out_free_pmu: | |
945 | kfree(pmu); | |
946 | out: | |
947 | return NULL; | |
948 | } | |
949 | ||
0dc1a185 MR |
950 | struct arm_pmu *armpmu_alloc(void) |
951 | { | |
952 | return __armpmu_alloc(GFP_KERNEL); | |
953 | } | |
954 | ||
955 | struct arm_pmu *armpmu_alloc_atomic(void) | |
956 | { | |
957 | return __armpmu_alloc(GFP_ATOMIC); | |
958 | } | |
959 | ||
960 | ||
18bfcfe5 | 961 | void armpmu_free(struct arm_pmu *pmu) |
2681f018 MR |
962 | { |
963 | free_percpu(pmu->hw_events); | |
964 | kfree(pmu); | |
965 | } | |
966 | ||
74a2b3ea MR |
967 | int armpmu_register(struct arm_pmu *pmu) |
968 | { | |
969 | int ret; | |
970 | ||
971 | ret = cpu_pmu_init(pmu); | |
972 | if (ret) | |
973 | return ret; | |
974 | ||
1d899c0e AM |
975 | if (!pmu->set_event_filter) |
976 | pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; | |
977 | ||
74a2b3ea MR |
978 | ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); |
979 | if (ret) | |
980 | goto out_destroy; | |
981 | ||
982 | if (!__oprofile_cpu_pmu) | |
983 | __oprofile_cpu_pmu = pmu; | |
984 | ||
d8f6267f JT |
985 | pr_info("enabled with %s PMU driver, %d counters available%s\n", |
986 | pmu->name, pmu->num_events, | |
987 | has_nmi ? ", using NMIs" : ""); | |
74a2b3ea MR |
988 | |
989 | return 0; | |
990 | ||
991 | out_destroy: | |
992 | cpu_pmu_destroy(pmu); | |
993 | return ret; | |
994 | } | |
995 | ||
37b502f1 SAS |
996 | static int arm_pmu_hp_init(void) |
997 | { | |
998 | int ret; | |
999 | ||
6e103c0c | 1000 | ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, |
73c1b41e | 1001 | "perf/arm/pmu:starting", |
c09adab0 MR |
1002 | arm_perf_starting_cpu, |
1003 | arm_perf_teardown_cpu); | |
37b502f1 SAS |
1004 | if (ret) |
1005 | pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", | |
1006 | ret); | |
1007 | return ret; | |
1008 | } | |
1009 | subsys_initcall(arm_pmu_hp_init); |