]>
Commit | Line | Data |
---|---|---|
1b8873a0 JI |
1 | #undef DEBUG |
2 | ||
3 | /* | |
4 | * ARM performance counter support. | |
5 | * | |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
796d1295 | 8 | * |
1b8873a0 | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
d39976f0 | 10 | * on the x86 code. |
1b8873a0 JI |
11 | */ |
12 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
13 | ||
74cf0bc7 | 14 | #include <linux/bitmap.h> |
cc88116d | 15 | #include <linux/cpumask.h> |
da4e4f18 | 16 | #include <linux/cpu_pm.h> |
74cf0bc7 | 17 | #include <linux/export.h> |
1b8873a0 | 18 | #include <linux/kernel.h> |
fa8ad788 | 19 | #include <linux/perf/arm_pmu.h> |
49c006b9 | 20 | #include <linux/platform_device.h> |
74cf0bc7 | 21 | #include <linux/slab.h> |
e6017571 | 22 | #include <linux/sched/clock.h> |
74cf0bc7 | 23 | #include <linux/spinlock.h> |
bbd64559 SB |
24 | #include <linux/irq.h> |
25 | #include <linux/irqdesc.h> | |
1b8873a0 | 26 | |
1b8873a0 | 27 | #include <asm/irq_regs.h> |
1b8873a0 | 28 | |
1b8873a0 | 29 | static int |
e1f431b5 MR |
30 | armpmu_map_cache_event(const unsigned (*cache_map) |
31 | [PERF_COUNT_HW_CACHE_MAX] | |
32 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
33 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
34 | u64 config) | |
1b8873a0 JI |
35 | { |
36 | unsigned int cache_type, cache_op, cache_result, ret; | |
37 | ||
38 | cache_type = (config >> 0) & 0xff; | |
39 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
40 | return -EINVAL; | |
41 | ||
42 | cache_op = (config >> 8) & 0xff; | |
43 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
44 | return -EINVAL; | |
45 | ||
46 | cache_result = (config >> 16) & 0xff; | |
47 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
48 | return -EINVAL; | |
49 | ||
6c833bb9 WD |
50 | if (!cache_map) |
51 | return -ENOENT; | |
52 | ||
e1f431b5 | 53 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
54 | |
55 | if (ret == CACHE_OP_UNSUPPORTED) | |
56 | return -ENOENT; | |
57 | ||
58 | return ret; | |
59 | } | |
60 | ||
84fee97a | 61 | static int |
6dbc0029 | 62 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 63 | { |
d9f96635 SB |
64 | int mapping; |
65 | ||
66 | if (config >= PERF_COUNT_HW_MAX) | |
67 | return -EINVAL; | |
68 | ||
6c833bb9 WD |
69 | if (!event_map) |
70 | return -ENOENT; | |
71 | ||
d9f96635 | 72 | mapping = (*event_map)[config]; |
e1f431b5 | 73 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
84fee97a WD |
74 | } |
75 | ||
76 | static int | |
e1f431b5 | 77 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 78 | { |
e1f431b5 MR |
79 | return (int)(config & raw_event_mask); |
80 | } | |
81 | ||
6dbc0029 WD |
82 | int |
83 | armpmu_map_event(struct perf_event *event, | |
84 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
85 | const unsigned (*cache_map) | |
86 | [PERF_COUNT_HW_CACHE_MAX] | |
87 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
88 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
89 | u32 raw_event_mask) | |
e1f431b5 MR |
90 | { |
91 | u64 config = event->attr.config; | |
67b4305a | 92 | int type = event->attr.type; |
e1f431b5 | 93 | |
67b4305a MR |
94 | if (type == event->pmu->type) |
95 | return armpmu_map_raw_event(raw_event_mask, config); | |
96 | ||
97 | switch (type) { | |
e1f431b5 | 98 | case PERF_TYPE_HARDWARE: |
6dbc0029 | 99 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
100 | case PERF_TYPE_HW_CACHE: |
101 | return armpmu_map_cache_event(cache_map, config); | |
102 | case PERF_TYPE_RAW: | |
103 | return armpmu_map_raw_event(raw_event_mask, config); | |
104 | } | |
105 | ||
106 | return -ENOENT; | |
84fee97a WD |
107 | } |
108 | ||
ed6f2a52 | 109 | int armpmu_event_set_period(struct perf_event *event) |
1b8873a0 | 110 | { |
8a16b34e | 111 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 112 | struct hw_perf_event *hwc = &event->hw; |
e7850595 | 113 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 JI |
114 | s64 period = hwc->sample_period; |
115 | int ret = 0; | |
116 | ||
117 | if (unlikely(left <= -period)) { | |
118 | left = period; | |
e7850595 | 119 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
120 | hwc->last_period = period; |
121 | ret = 1; | |
122 | } | |
123 | ||
124 | if (unlikely(left <= 0)) { | |
125 | left += period; | |
e7850595 | 126 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
127 | hwc->last_period = period; |
128 | ret = 1; | |
129 | } | |
130 | ||
2d9ed740 DT |
131 | /* |
132 | * Limit the maximum period to prevent the counter value | |
133 | * from overtaking the one we are about to program. In | |
134 | * effect we are reducing max_period to account for | |
135 | * interrupt latency (and we are being very conservative). | |
136 | */ | |
137 | if (left > (armpmu->max_period >> 1)) | |
138 | left = armpmu->max_period >> 1; | |
1b8873a0 | 139 | |
e7850595 | 140 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 | 141 | |
ed6f2a52 | 142 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
1b8873a0 JI |
143 | |
144 | perf_event_update_userpage(event); | |
145 | ||
146 | return ret; | |
147 | } | |
148 | ||
ed6f2a52 | 149 | u64 armpmu_event_update(struct perf_event *event) |
1b8873a0 | 150 | { |
8a16b34e | 151 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 152 | struct hw_perf_event *hwc = &event->hw; |
a737823d | 153 | u64 delta, prev_raw_count, new_raw_count; |
1b8873a0 JI |
154 | |
155 | again: | |
e7850595 | 156 | prev_raw_count = local64_read(&hwc->prev_count); |
ed6f2a52 | 157 | new_raw_count = armpmu->read_counter(event); |
1b8873a0 | 158 | |
e7850595 | 159 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
160 | new_raw_count) != prev_raw_count) |
161 | goto again; | |
162 | ||
57273471 | 163 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
1b8873a0 | 164 | |
e7850595 PZ |
165 | local64_add(delta, &event->count); |
166 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
167 | |
168 | return new_raw_count; | |
169 | } | |
170 | ||
171 | static void | |
a4eaf7f1 | 172 | armpmu_read(struct perf_event *event) |
1b8873a0 | 173 | { |
ed6f2a52 | 174 | armpmu_event_update(event); |
1b8873a0 JI |
175 | } |
176 | ||
177 | static void | |
a4eaf7f1 | 178 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 179 | { |
8a16b34e | 180 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
181 | struct hw_perf_event *hwc = &event->hw; |
182 | ||
a4eaf7f1 PZ |
183 | /* |
184 | * ARM pmu always has to update the counter, so ignore | |
185 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
186 | */ | |
187 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
ed6f2a52 SH |
188 | armpmu->disable(event); |
189 | armpmu_event_update(event); | |
a4eaf7f1 PZ |
190 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
191 | } | |
1b8873a0 JI |
192 | } |
193 | ||
ed6f2a52 | 194 | static void armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 195 | { |
8a16b34e | 196 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
197 | struct hw_perf_event *hwc = &event->hw; |
198 | ||
a4eaf7f1 PZ |
199 | /* |
200 | * ARM pmu always has to reprogram the period, so ignore | |
201 | * PERF_EF_RELOAD, see the comment below. | |
202 | */ | |
203 | if (flags & PERF_EF_RELOAD) | |
204 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
205 | ||
206 | hwc->state = 0; | |
1b8873a0 JI |
207 | /* |
208 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 209 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
210 | * may have been left counting. If we don't do this step then we may |
211 | * get an interrupt too soon or *way* too late if the overflow has | |
212 | * happened since disabling. | |
213 | */ | |
ed6f2a52 SH |
214 | armpmu_event_set_period(event); |
215 | armpmu->enable(event); | |
1b8873a0 JI |
216 | } |
217 | ||
a4eaf7f1 PZ |
218 | static void |
219 | armpmu_del(struct perf_event *event, int flags) | |
220 | { | |
8a16b34e | 221 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 222 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
a4eaf7f1 PZ |
223 | struct hw_perf_event *hwc = &event->hw; |
224 | int idx = hwc->idx; | |
225 | ||
a4eaf7f1 | 226 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 MR |
227 | hw_events->events[idx] = NULL; |
228 | clear_bit(idx, hw_events->used_mask); | |
eab443ef SB |
229 | if (armpmu->clear_event_idx) |
230 | armpmu->clear_event_idx(hw_events, event); | |
a4eaf7f1 PZ |
231 | |
232 | perf_event_update_userpage(event); | |
233 | } | |
234 | ||
1b8873a0 | 235 | static int |
a4eaf7f1 | 236 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 237 | { |
8a16b34e | 238 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 239 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
1b8873a0 JI |
240 | struct hw_perf_event *hwc = &event->hw; |
241 | int idx; | |
1b8873a0 | 242 | |
cc88116d MR |
243 | /* An event following a process won't be stopped earlier */ |
244 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
245 | return -ENOENT; | |
246 | ||
1b8873a0 | 247 | /* If we don't have a space for the counter then finish early. */ |
ed6f2a52 | 248 | idx = armpmu->get_event_idx(hw_events, event); |
a9e469d1 MR |
249 | if (idx < 0) |
250 | return idx; | |
1b8873a0 JI |
251 | |
252 | /* | |
253 | * If there is an event in the counter we are going to use then make | |
254 | * sure it is disabled. | |
255 | */ | |
256 | event->hw.idx = idx; | |
ed6f2a52 | 257 | armpmu->disable(event); |
8be3f9a2 | 258 | hw_events->events[idx] = event; |
1b8873a0 | 259 | |
a4eaf7f1 PZ |
260 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
261 | if (flags & PERF_EF_START) | |
262 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
263 | |
264 | /* Propagate our changes to the userspace mapping. */ | |
265 | perf_event_update_userpage(event); | |
266 | ||
a9e469d1 | 267 | return 0; |
1b8873a0 JI |
268 | } |
269 | ||
1b8873a0 | 270 | static int |
e429817b SP |
271 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
272 | struct perf_event *event) | |
1b8873a0 | 273 | { |
e429817b | 274 | struct arm_pmu *armpmu; |
1b8873a0 | 275 | |
c95eb318 WD |
276 | if (is_software_event(event)) |
277 | return 1; | |
278 | ||
e429817b SP |
279 | /* |
280 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
281 | * core perf code won't check that the pmu->ctx == leader->ctx | |
282 | * until after pmu->event_init(event). | |
283 | */ | |
284 | if (event->pmu != pmu) | |
285 | return 0; | |
286 | ||
2dfcb802 | 287 | if (event->state < PERF_EVENT_STATE_OFF) |
cb2d8b34 WD |
288 | return 1; |
289 | ||
290 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
65b4711f | 291 | return 1; |
1b8873a0 | 292 | |
e429817b | 293 | armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 294 | return armpmu->get_event_idx(hw_events, event) >= 0; |
1b8873a0 JI |
295 | } |
296 | ||
297 | static int | |
298 | validate_group(struct perf_event *event) | |
299 | { | |
300 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 301 | struct pmu_hw_events fake_pmu; |
1b8873a0 | 302 | |
bce34d14 WD |
303 | /* |
304 | * Initialise the fake PMU. We only need to populate the | |
305 | * used_mask for the purposes of validation. | |
306 | */ | |
a4560846 | 307 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
1b8873a0 | 308 | |
e429817b | 309 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
aa2bc1ad | 310 | return -EINVAL; |
1b8873a0 JI |
311 | |
312 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
e429817b | 313 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
aa2bc1ad | 314 | return -EINVAL; |
1b8873a0 JI |
315 | } |
316 | ||
e429817b | 317 | if (!validate_event(event->pmu, &fake_pmu, event)) |
aa2bc1ad | 318 | return -EINVAL; |
1b8873a0 JI |
319 | |
320 | return 0; | |
321 | } | |
322 | ||
76541370 MR |
323 | static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) |
324 | { | |
325 | struct platform_device *pdev = armpmu->plat_device; | |
326 | ||
327 | return pdev ? dev_get_platdata(&pdev->dev) : NULL; | |
328 | } | |
329 | ||
051f1b13 | 330 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
0e25a5c9 | 331 | { |
bbd64559 | 332 | struct arm_pmu *armpmu; |
bbd64559 | 333 | struct arm_pmu_platdata *plat; |
5f5092e7 WD |
334 | int ret; |
335 | u64 start_clock, finish_clock; | |
bbd64559 | 336 | |
5ebd9200 MR |
337 | /* |
338 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but | |
339 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will | |
340 | * do any necessary shifting, we just need to perform the first | |
341 | * dereference. | |
342 | */ | |
343 | armpmu = *(void **)dev; | |
76541370 MR |
344 | |
345 | plat = armpmu_get_platdata(armpmu); | |
0e25a5c9 | 346 | |
5f5092e7 | 347 | start_clock = sched_clock(); |
051f1b13 | 348 | if (plat && plat->handle_irq) |
5ebd9200 | 349 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); |
051f1b13 | 350 | else |
5ebd9200 | 351 | ret = armpmu->handle_irq(irq, armpmu); |
5f5092e7 WD |
352 | finish_clock = sched_clock(); |
353 | ||
354 | perf_sample_event_took(finish_clock - start_clock); | |
355 | return ret; | |
0e25a5c9 RV |
356 | } |
357 | ||
05d22fde WD |
358 | static int |
359 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
360 | { | |
361 | return attr->exclude_idle || attr->exclude_user || | |
362 | attr->exclude_kernel || attr->exclude_hv; | |
363 | } | |
364 | ||
1b8873a0 JI |
365 | static int |
366 | __hw_perf_event_init(struct perf_event *event) | |
367 | { | |
8a16b34e | 368 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 369 | struct hw_perf_event *hwc = &event->hw; |
9dcbf466 | 370 | int mapping; |
1b8873a0 | 371 | |
e1f431b5 | 372 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
373 | |
374 | if (mapping < 0) { | |
375 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
376 | event->attr.config); | |
377 | return mapping; | |
378 | } | |
379 | ||
05d22fde WD |
380 | /* |
381 | * We don't assign an index until we actually place the event onto | |
382 | * hardware. Use -1 to signify that we haven't decided where to put it | |
383 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
384 | * clever allocation or constraints checking at this point. | |
385 | */ | |
386 | hwc->idx = -1; | |
387 | hwc->config_base = 0; | |
388 | hwc->config = 0; | |
389 | hwc->event_base = 0; | |
390 | ||
1b8873a0 JI |
391 | /* |
392 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 393 | */ |
05d22fde WD |
394 | if ((!armpmu->set_event_filter || |
395 | armpmu->set_event_filter(hwc, &event->attr)) && | |
396 | event_requires_mode_exclusion(&event->attr)) { | |
1b8873a0 JI |
397 | pr_debug("ARM performance counters do not support " |
398 | "mode exclusion\n"); | |
fdeb8e35 | 399 | return -EOPNOTSUPP; |
1b8873a0 JI |
400 | } |
401 | ||
402 | /* | |
05d22fde | 403 | * Store the event encoding into the config_base field. |
1b8873a0 | 404 | */ |
05d22fde | 405 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 | 406 | |
edcb4d3c | 407 | if (!is_sampling_event(event)) { |
57273471 WD |
408 | /* |
409 | * For non-sampling runs, limit the sample_period to half | |
410 | * of the counter width. That way, the new counter value | |
411 | * is far less likely to overtake the previous one unless | |
412 | * you have some serious IRQ latency issues. | |
413 | */ | |
414 | hwc->sample_period = armpmu->max_period >> 1; | |
1b8873a0 | 415 | hwc->last_period = hwc->sample_period; |
e7850595 | 416 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
417 | } |
418 | ||
1b8873a0 | 419 | if (event->group_leader != event) { |
e595ede6 | 420 | if (validate_group(event) != 0) |
1b8873a0 JI |
421 | return -EINVAL; |
422 | } | |
423 | ||
9dcbf466 | 424 | return 0; |
1b8873a0 JI |
425 | } |
426 | ||
b0a873eb | 427 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 428 | { |
8a16b34e | 429 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 430 | |
cc88116d MR |
431 | /* |
432 | * Reject CPU-affine events for CPUs that are of a different class to | |
433 | * that which this PMU handles. Process-following events (where | |
434 | * event->cpu == -1) can be migrated between CPUs, and thus we have to | |
435 | * reject them later (in armpmu_add) if they're scheduled on a | |
436 | * different class of CPU. | |
437 | */ | |
438 | if (event->cpu != -1 && | |
439 | !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) | |
440 | return -ENOENT; | |
441 | ||
2481c5fa SE |
442 | /* does not support taken branch sampling */ |
443 | if (has_branch_stack(event)) | |
444 | return -EOPNOTSUPP; | |
445 | ||
e1f431b5 | 446 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 447 | return -ENOENT; |
b0a873eb | 448 | |
c09adab0 | 449 | return __hw_perf_event_init(event); |
1b8873a0 JI |
450 | } |
451 | ||
a4eaf7f1 | 452 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 453 | { |
8be3f9a2 | 454 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
11679250 | 455 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
7325eaec | 456 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 457 | |
cc88116d MR |
458 | /* For task-bound events we may be called on other CPUs */ |
459 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
460 | return; | |
461 | ||
f4f38430 | 462 | if (enabled) |
ed6f2a52 | 463 | armpmu->start(armpmu); |
1b8873a0 JI |
464 | } |
465 | ||
a4eaf7f1 | 466 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 467 | { |
8a16b34e | 468 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
cc88116d MR |
469 | |
470 | /* For task-bound events we may be called on other CPUs */ | |
471 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
472 | return; | |
473 | ||
ed6f2a52 | 474 | armpmu->stop(armpmu); |
1b8873a0 JI |
475 | } |
476 | ||
c904e32a MR |
477 | /* |
478 | * In heterogeneous systems, events are specific to a particular | |
479 | * microarchitecture, and aren't suitable for another. Thus, only match CPUs of | |
480 | * the same microarchitecture. | |
481 | */ | |
482 | static int armpmu_filter_match(struct perf_event *event) | |
483 | { | |
484 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
485 | unsigned int cpu = smp_processor_id(); | |
486 | return cpumask_test_cpu(cpu, &armpmu->supported_cpus); | |
487 | } | |
488 | ||
48538b58 MR |
489 | static ssize_t armpmu_cpumask_show(struct device *dev, |
490 | struct device_attribute *attr, char *buf) | |
491 | { | |
492 | struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); | |
493 | return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); | |
494 | } | |
495 | ||
496 | static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); | |
497 | ||
498 | static struct attribute *armpmu_common_attrs[] = { | |
499 | &dev_attr_cpus.attr, | |
500 | NULL, | |
501 | }; | |
502 | ||
503 | static struct attribute_group armpmu_common_attr_group = { | |
504 | .attrs = armpmu_common_attrs, | |
505 | }; | |
506 | ||
74cf0bc7 MR |
507 | /* Set at runtime when we know what CPU type we are. */ |
508 | static struct arm_pmu *__oprofile_cpu_pmu; | |
509 | ||
510 | /* | |
511 | * Despite the names, these two functions are CPU-specific and are used | |
512 | * by the OProfile/perf code. | |
513 | */ | |
514 | const char *perf_pmu_name(void) | |
515 | { | |
516 | if (!__oprofile_cpu_pmu) | |
517 | return NULL; | |
518 | ||
519 | return __oprofile_cpu_pmu->name; | |
520 | } | |
521 | EXPORT_SYMBOL_GPL(perf_pmu_name); | |
522 | ||
523 | int perf_num_counters(void) | |
524 | { | |
525 | int max_events = 0; | |
526 | ||
527 | if (__oprofile_cpu_pmu != NULL) | |
528 | max_events = __oprofile_cpu_pmu->num_events; | |
529 | ||
530 | return max_events; | |
531 | } | |
532 | EXPORT_SYMBOL_GPL(perf_num_counters); | |
533 | ||
45736a72 | 534 | void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) |
74cf0bc7 | 535 | { |
3cf61110 | 536 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; |
0e2663d9 | 537 | int irq = per_cpu(hw_events->irq, cpu); |
74cf0bc7 | 538 | |
0e2663d9 MR |
539 | if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) |
540 | return; | |
7ed98e01 | 541 | |
611479c7 | 542 | if (irq_is_percpu_devid(irq)) { |
0e2663d9 MR |
543 | free_percpu_irq(irq, &hw_events->percpu_pmu); |
544 | cpumask_clear(&armpmu->active_irqs); | |
545 | return; | |
546 | } | |
7ed98e01 | 547 | |
0e2663d9 MR |
548 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
549 | } | |
7ed98e01 | 550 | |
18bfcfe5 | 551 | void armpmu_free_irqs(struct arm_pmu *armpmu) |
0e2663d9 MR |
552 | { |
553 | int cpu; | |
554 | ||
555 | for_each_cpu(cpu, &armpmu->supported_cpus) | |
556 | armpmu_free_irq(armpmu, cpu); | |
74cf0bc7 MR |
557 | } |
558 | ||
45736a72 | 559 | int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) |
74cf0bc7 | 560 | { |
0e2663d9 | 561 | int err = 0; |
3cf61110 | 562 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; |
3a5a89d3 | 563 | const irq_handler_t handler = armpmu_dispatch_irq; |
0e2663d9 MR |
564 | int irq = per_cpu(hw_events->irq, cpu); |
565 | if (!irq) | |
566 | return 0; | |
74cf0bc7 | 567 | |
611479c7 | 568 | if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) { |
0e2663d9 MR |
569 | err = request_percpu_irq(irq, handler, "arm-pmu", |
570 | &hw_events->percpu_pmu); | |
611479c7 | 571 | } else if (irq_is_percpu_devid(irq)) { |
0e2663d9 MR |
572 | int other_cpu = cpumask_first(&armpmu->active_irqs); |
573 | int other_irq = per_cpu(hw_events->irq, other_cpu); | |
74cf0bc7 | 574 | |
0e2663d9 MR |
575 | if (irq != other_irq) { |
576 | pr_warn("mismatched PPIs detected.\n"); | |
577 | err = -EINVAL; | |
a3287c41 | 578 | goto err_out; |
7ed98e01 | 579 | } |
0e2663d9 | 580 | } else { |
a3287c41 WD |
581 | struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); |
582 | unsigned long irq_flags; | |
583 | ||
584 | err = irq_force_affinity(irq, cpumask_of(cpu)); | |
585 | ||
586 | if (err && num_possible_cpus() > 1) { | |
587 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | |
588 | irq, cpu); | |
589 | goto err_out; | |
590 | } | |
591 | ||
592 | if (platdata && platdata->irq_flags) { | |
593 | irq_flags = platdata->irq_flags; | |
594 | } else { | |
595 | irq_flags = IRQF_PERCPU | | |
596 | IRQF_NOBALANCING | | |
597 | IRQF_NO_THREAD; | |
598 | } | |
599 | ||
600 | err = request_irq(irq, handler, irq_flags, "arm-pmu", | |
7ed98e01 | 601 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
0e2663d9 | 602 | } |
7ed98e01 | 603 | |
a3287c41 WD |
604 | if (err) |
605 | goto err_out; | |
74cf0bc7 | 606 | |
0e2663d9 | 607 | cpumask_set_cpu(cpu, &armpmu->active_irqs); |
74cf0bc7 | 608 | return 0; |
a3287c41 WD |
609 | |
610 | err_out: | |
611 | pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); | |
612 | return err; | |
74cf0bc7 MR |
613 | } |
614 | ||
18bfcfe5 | 615 | int armpmu_request_irqs(struct arm_pmu *armpmu) |
0e2663d9 MR |
616 | { |
617 | int cpu, err; | |
618 | ||
619 | for_each_cpu(cpu, &armpmu->supported_cpus) { | |
620 | err = armpmu_request_irq(armpmu, cpu); | |
621 | if (err) | |
622 | break; | |
623 | } | |
624 | ||
625 | return err; | |
626 | } | |
627 | ||
c09adab0 MR |
628 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) |
629 | { | |
630 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | |
631 | return per_cpu(hw_events->irq, cpu); | |
632 | } | |
633 | ||
74cf0bc7 MR |
634 | /* |
635 | * PMU hardware loses all context when a CPU goes offline. | |
636 | * When a CPU is hotplugged back in, since some hardware registers are | |
637 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | |
638 | * junk values out of them. | |
639 | */ | |
6e103c0c | 640 | static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) |
74cf0bc7 | 641 | { |
6e103c0c | 642 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); |
c09adab0 | 643 | int irq; |
74cf0bc7 | 644 | |
6e103c0c SAS |
645 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
646 | return 0; | |
647 | if (pmu->reset) | |
648 | pmu->reset(pmu); | |
c09adab0 MR |
649 | |
650 | irq = armpmu_get_cpu_irq(pmu, cpu); | |
651 | if (irq) { | |
611479c7 | 652 | if (irq_is_percpu_devid(irq)) { |
c09adab0 MR |
653 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
654 | return 0; | |
655 | } | |
c09adab0 MR |
656 | } |
657 | ||
658 | return 0; | |
659 | } | |
660 | ||
661 | static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) | |
662 | { | |
663 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); | |
664 | int irq; | |
665 | ||
666 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | |
667 | return 0; | |
668 | ||
669 | irq = armpmu_get_cpu_irq(pmu, cpu); | |
611479c7 | 670 | if (irq && irq_is_percpu_devid(irq)) |
c09adab0 MR |
671 | disable_percpu_irq(irq); |
672 | ||
7d88eb69 | 673 | return 0; |
74cf0bc7 MR |
674 | } |
675 | ||
da4e4f18 LP |
676 | #ifdef CONFIG_CPU_PM |
677 | static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) | |
678 | { | |
679 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
680 | struct perf_event *event; | |
681 | int idx; | |
682 | ||
683 | for (idx = 0; idx < armpmu->num_events; idx++) { | |
684 | /* | |
685 | * If the counter is not used skip it, there is no | |
686 | * need of stopping/restarting it. | |
687 | */ | |
688 | if (!test_bit(idx, hw_events->used_mask)) | |
689 | continue; | |
690 | ||
691 | event = hw_events->events[idx]; | |
692 | ||
693 | switch (cmd) { | |
694 | case CPU_PM_ENTER: | |
695 | /* | |
696 | * Stop and update the counter | |
697 | */ | |
698 | armpmu_stop(event, PERF_EF_UPDATE); | |
699 | break; | |
700 | case CPU_PM_EXIT: | |
701 | case CPU_PM_ENTER_FAILED: | |
cbcc72e0 LP |
702 | /* |
703 | * Restore and enable the counter. | |
704 | * armpmu_start() indirectly calls | |
705 | * | |
706 | * perf_event_update_userpage() | |
707 | * | |
708 | * that requires RCU read locking to be functional, | |
709 | * wrap the call within RCU_NONIDLE to make the | |
710 | * RCU subsystem aware this cpu is not idle from | |
711 | * an RCU perspective for the armpmu_start() call | |
712 | * duration. | |
713 | */ | |
714 | RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); | |
da4e4f18 LP |
715 | break; |
716 | default: | |
717 | break; | |
718 | } | |
719 | } | |
720 | } | |
721 | ||
722 | static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, | |
723 | void *v) | |
724 | { | |
725 | struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); | |
726 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
727 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | |
728 | ||
729 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
730 | return NOTIFY_DONE; | |
731 | ||
732 | /* | |
733 | * Always reset the PMU registers on power-up even if | |
734 | * there are no events running. | |
735 | */ | |
736 | if (cmd == CPU_PM_EXIT && armpmu->reset) | |
737 | armpmu->reset(armpmu); | |
738 | ||
739 | if (!enabled) | |
740 | return NOTIFY_OK; | |
741 | ||
742 | switch (cmd) { | |
743 | case CPU_PM_ENTER: | |
744 | armpmu->stop(armpmu); | |
745 | cpu_pm_pmu_setup(armpmu, cmd); | |
746 | break; | |
747 | case CPU_PM_EXIT: | |
748 | cpu_pm_pmu_setup(armpmu, cmd); | |
749 | case CPU_PM_ENTER_FAILED: | |
750 | armpmu->start(armpmu); | |
751 | break; | |
752 | default: | |
753 | return NOTIFY_DONE; | |
754 | } | |
755 | ||
756 | return NOTIFY_OK; | |
757 | } | |
758 | ||
759 | static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) | |
760 | { | |
761 | cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; | |
762 | return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); | |
763 | } | |
764 | ||
765 | static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) | |
766 | { | |
767 | cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); | |
768 | } | |
769 | #else | |
770 | static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } | |
771 | static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } | |
772 | #endif | |
773 | ||
74cf0bc7 MR |
774 | static int cpu_pmu_init(struct arm_pmu *cpu_pmu) |
775 | { | |
776 | int err; | |
74cf0bc7 | 777 | |
c09adab0 MR |
778 | err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, |
779 | &cpu_pmu->node); | |
6e103c0c | 780 | if (err) |
2681f018 | 781 | goto out; |
74cf0bc7 | 782 | |
da4e4f18 LP |
783 | err = cpu_pm_pmu_register(cpu_pmu); |
784 | if (err) | |
785 | goto out_unregister; | |
786 | ||
74cf0bc7 MR |
787 | return 0; |
788 | ||
da4e4f18 | 789 | out_unregister: |
6e103c0c SAS |
790 | cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, |
791 | &cpu_pmu->node); | |
2681f018 | 792 | out: |
74cf0bc7 MR |
793 | return err; |
794 | } | |
795 | ||
796 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | |
797 | { | |
da4e4f18 | 798 | cpu_pm_pmu_unregister(cpu_pmu); |
6e103c0c SAS |
799 | cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, |
800 | &cpu_pmu->node); | |
74cf0bc7 MR |
801 | } |
802 | ||
18bfcfe5 | 803 | struct arm_pmu *armpmu_alloc(void) |
2681f018 MR |
804 | { |
805 | struct arm_pmu *pmu; | |
806 | int cpu; | |
807 | ||
808 | pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); | |
809 | if (!pmu) { | |
810 | pr_info("failed to allocate PMU device!\n"); | |
811 | goto out; | |
812 | } | |
813 | ||
814 | pmu->hw_events = alloc_percpu(struct pmu_hw_events); | |
815 | if (!pmu->hw_events) { | |
816 | pr_info("failed to allocate per-cpu PMU data.\n"); | |
817 | goto out_free_pmu; | |
818 | } | |
819 | ||
70cd908a MR |
820 | pmu->pmu = (struct pmu) { |
821 | .pmu_enable = armpmu_enable, | |
822 | .pmu_disable = armpmu_disable, | |
823 | .event_init = armpmu_event_init, | |
824 | .add = armpmu_add, | |
825 | .del = armpmu_del, | |
826 | .start = armpmu_start, | |
827 | .stop = armpmu_stop, | |
828 | .read = armpmu_read, | |
829 | .filter_match = armpmu_filter_match, | |
830 | .attr_groups = pmu->attr_groups, | |
831 | /* | |
832 | * This is a CPU PMU potentially in a heterogeneous | |
833 | * configuration (e.g. big.LITTLE). This is not an uncore PMU, | |
834 | * and we have taken ctx sharing into account (e.g. with our | |
835 | * pmu::filter_match callback and pmu::event_init group | |
836 | * validation). | |
837 | */ | |
838 | .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS, | |
839 | }; | |
840 | ||
841 | pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = | |
842 | &armpmu_common_attr_group; | |
843 | ||
2681f018 MR |
844 | for_each_possible_cpu(cpu) { |
845 | struct pmu_hw_events *events; | |
846 | ||
847 | events = per_cpu_ptr(pmu->hw_events, cpu); | |
848 | raw_spin_lock_init(&events->pmu_lock); | |
849 | events->percpu_pmu = pmu; | |
850 | } | |
851 | ||
852 | return pmu; | |
853 | ||
854 | out_free_pmu: | |
855 | kfree(pmu); | |
856 | out: | |
857 | return NULL; | |
858 | } | |
859 | ||
18bfcfe5 | 860 | void armpmu_free(struct arm_pmu *pmu) |
2681f018 MR |
861 | { |
862 | free_percpu(pmu->hw_events); | |
863 | kfree(pmu); | |
864 | } | |
865 | ||
74a2b3ea MR |
866 | int armpmu_register(struct arm_pmu *pmu) |
867 | { | |
868 | int ret; | |
869 | ||
870 | ret = cpu_pmu_init(pmu); | |
871 | if (ret) | |
872 | return ret; | |
873 | ||
874 | ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); | |
875 | if (ret) | |
876 | goto out_destroy; | |
877 | ||
878 | if (!__oprofile_cpu_pmu) | |
879 | __oprofile_cpu_pmu = pmu; | |
880 | ||
881 | pr_info("enabled with %s PMU driver, %d counters available\n", | |
882 | pmu->name, pmu->num_events); | |
883 | ||
884 | return 0; | |
885 | ||
886 | out_destroy: | |
887 | cpu_pmu_destroy(pmu); | |
888 | return ret; | |
889 | } | |
890 | ||
37b502f1 SAS |
891 | static int arm_pmu_hp_init(void) |
892 | { | |
893 | int ret; | |
894 | ||
6e103c0c | 895 | ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, |
73c1b41e | 896 | "perf/arm/pmu:starting", |
c09adab0 MR |
897 | arm_perf_starting_cpu, |
898 | arm_perf_teardown_cpu); | |
37b502f1 SAS |
899 | if (ret) |
900 | pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", | |
901 | ret); | |
902 | return ret; | |
903 | } | |
904 | subsys_initcall(arm_pmu_hp_init); |