]>
Commit | Line | Data |
---|---|---|
1b8873a0 JI |
1 | #undef DEBUG |
2 | ||
3 | /* | |
4 | * ARM performance counter support. | |
5 | * | |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
796d1295 | 8 | * |
1b8873a0 | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
d39976f0 | 10 | * on the x86 code. |
1b8873a0 JI |
11 | */ |
12 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
13 | ||
74cf0bc7 | 14 | #include <linux/bitmap.h> |
cc88116d | 15 | #include <linux/cpumask.h> |
da4e4f18 | 16 | #include <linux/cpu_pm.h> |
74cf0bc7 | 17 | #include <linux/export.h> |
1b8873a0 | 18 | #include <linux/kernel.h> |
bc1e3c46 | 19 | #include <linux/of_device.h> |
fa8ad788 | 20 | #include <linux/perf/arm_pmu.h> |
49c006b9 | 21 | #include <linux/platform_device.h> |
74cf0bc7 | 22 | #include <linux/slab.h> |
e6017571 | 23 | #include <linux/sched/clock.h> |
74cf0bc7 | 24 | #include <linux/spinlock.h> |
bbd64559 SB |
25 | #include <linux/irq.h> |
26 | #include <linux/irqdesc.h> | |
1b8873a0 | 27 | |
74cf0bc7 | 28 | #include <asm/cputype.h> |
1b8873a0 | 29 | #include <asm/irq_regs.h> |
1b8873a0 | 30 | |
1b8873a0 | 31 | static int |
e1f431b5 MR |
32 | armpmu_map_cache_event(const unsigned (*cache_map) |
33 | [PERF_COUNT_HW_CACHE_MAX] | |
34 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
35 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
36 | u64 config) | |
1b8873a0 JI |
37 | { |
38 | unsigned int cache_type, cache_op, cache_result, ret; | |
39 | ||
40 | cache_type = (config >> 0) & 0xff; | |
41 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
42 | return -EINVAL; | |
43 | ||
44 | cache_op = (config >> 8) & 0xff; | |
45 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
46 | return -EINVAL; | |
47 | ||
48 | cache_result = (config >> 16) & 0xff; | |
49 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
50 | return -EINVAL; | |
51 | ||
e1f431b5 | 52 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
53 | |
54 | if (ret == CACHE_OP_UNSUPPORTED) | |
55 | return -ENOENT; | |
56 | ||
57 | return ret; | |
58 | } | |
59 | ||
84fee97a | 60 | static int |
6dbc0029 | 61 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 62 | { |
d9f96635 SB |
63 | int mapping; |
64 | ||
65 | if (config >= PERF_COUNT_HW_MAX) | |
66 | return -EINVAL; | |
67 | ||
68 | mapping = (*event_map)[config]; | |
e1f431b5 | 69 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
84fee97a WD |
70 | } |
71 | ||
72 | static int | |
e1f431b5 | 73 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 74 | { |
e1f431b5 MR |
75 | return (int)(config & raw_event_mask); |
76 | } | |
77 | ||
6dbc0029 WD |
78 | int |
79 | armpmu_map_event(struct perf_event *event, | |
80 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
81 | const unsigned (*cache_map) | |
82 | [PERF_COUNT_HW_CACHE_MAX] | |
83 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
84 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
85 | u32 raw_event_mask) | |
e1f431b5 MR |
86 | { |
87 | u64 config = event->attr.config; | |
67b4305a | 88 | int type = event->attr.type; |
e1f431b5 | 89 | |
67b4305a MR |
90 | if (type == event->pmu->type) |
91 | return armpmu_map_raw_event(raw_event_mask, config); | |
92 | ||
93 | switch (type) { | |
e1f431b5 | 94 | case PERF_TYPE_HARDWARE: |
6dbc0029 | 95 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
96 | case PERF_TYPE_HW_CACHE: |
97 | return armpmu_map_cache_event(cache_map, config); | |
98 | case PERF_TYPE_RAW: | |
99 | return armpmu_map_raw_event(raw_event_mask, config); | |
100 | } | |
101 | ||
102 | return -ENOENT; | |
84fee97a WD |
103 | } |
104 | ||
ed6f2a52 | 105 | int armpmu_event_set_period(struct perf_event *event) |
1b8873a0 | 106 | { |
8a16b34e | 107 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 108 | struct hw_perf_event *hwc = &event->hw; |
e7850595 | 109 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 JI |
110 | s64 period = hwc->sample_period; |
111 | int ret = 0; | |
112 | ||
113 | if (unlikely(left <= -period)) { | |
114 | left = period; | |
e7850595 | 115 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
116 | hwc->last_period = period; |
117 | ret = 1; | |
118 | } | |
119 | ||
120 | if (unlikely(left <= 0)) { | |
121 | left += period; | |
e7850595 | 122 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
123 | hwc->last_period = period; |
124 | ret = 1; | |
125 | } | |
126 | ||
2d9ed740 DT |
127 | /* |
128 | * Limit the maximum period to prevent the counter value | |
129 | * from overtaking the one we are about to program. In | |
130 | * effect we are reducing max_period to account for | |
131 | * interrupt latency (and we are being very conservative). | |
132 | */ | |
133 | if (left > (armpmu->max_period >> 1)) | |
134 | left = armpmu->max_period >> 1; | |
1b8873a0 | 135 | |
e7850595 | 136 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 | 137 | |
ed6f2a52 | 138 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
1b8873a0 JI |
139 | |
140 | perf_event_update_userpage(event); | |
141 | ||
142 | return ret; | |
143 | } | |
144 | ||
ed6f2a52 | 145 | u64 armpmu_event_update(struct perf_event *event) |
1b8873a0 | 146 | { |
8a16b34e | 147 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 148 | struct hw_perf_event *hwc = &event->hw; |
a737823d | 149 | u64 delta, prev_raw_count, new_raw_count; |
1b8873a0 JI |
150 | |
151 | again: | |
e7850595 | 152 | prev_raw_count = local64_read(&hwc->prev_count); |
ed6f2a52 | 153 | new_raw_count = armpmu->read_counter(event); |
1b8873a0 | 154 | |
e7850595 | 155 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
156 | new_raw_count) != prev_raw_count) |
157 | goto again; | |
158 | ||
57273471 | 159 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
1b8873a0 | 160 | |
e7850595 PZ |
161 | local64_add(delta, &event->count); |
162 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
163 | |
164 | return new_raw_count; | |
165 | } | |
166 | ||
167 | static void | |
a4eaf7f1 | 168 | armpmu_read(struct perf_event *event) |
1b8873a0 | 169 | { |
ed6f2a52 | 170 | armpmu_event_update(event); |
1b8873a0 JI |
171 | } |
172 | ||
173 | static void | |
a4eaf7f1 | 174 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 175 | { |
8a16b34e | 176 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
177 | struct hw_perf_event *hwc = &event->hw; |
178 | ||
a4eaf7f1 PZ |
179 | /* |
180 | * ARM pmu always has to update the counter, so ignore | |
181 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
182 | */ | |
183 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
ed6f2a52 SH |
184 | armpmu->disable(event); |
185 | armpmu_event_update(event); | |
a4eaf7f1 PZ |
186 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
187 | } | |
1b8873a0 JI |
188 | } |
189 | ||
ed6f2a52 | 190 | static void armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 191 | { |
8a16b34e | 192 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
193 | struct hw_perf_event *hwc = &event->hw; |
194 | ||
a4eaf7f1 PZ |
195 | /* |
196 | * ARM pmu always has to reprogram the period, so ignore | |
197 | * PERF_EF_RELOAD, see the comment below. | |
198 | */ | |
199 | if (flags & PERF_EF_RELOAD) | |
200 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
201 | ||
202 | hwc->state = 0; | |
1b8873a0 JI |
203 | /* |
204 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 205 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
206 | * may have been left counting. If we don't do this step then we may |
207 | * get an interrupt too soon or *way* too late if the overflow has | |
208 | * happened since disabling. | |
209 | */ | |
ed6f2a52 SH |
210 | armpmu_event_set_period(event); |
211 | armpmu->enable(event); | |
1b8873a0 JI |
212 | } |
213 | ||
a4eaf7f1 PZ |
214 | static void |
215 | armpmu_del(struct perf_event *event, int flags) | |
216 | { | |
8a16b34e | 217 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 218 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
a4eaf7f1 PZ |
219 | struct hw_perf_event *hwc = &event->hw; |
220 | int idx = hwc->idx; | |
221 | ||
a4eaf7f1 | 222 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 MR |
223 | hw_events->events[idx] = NULL; |
224 | clear_bit(idx, hw_events->used_mask); | |
eab443ef SB |
225 | if (armpmu->clear_event_idx) |
226 | armpmu->clear_event_idx(hw_events, event); | |
a4eaf7f1 PZ |
227 | |
228 | perf_event_update_userpage(event); | |
229 | } | |
230 | ||
1b8873a0 | 231 | static int |
a4eaf7f1 | 232 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 233 | { |
8a16b34e | 234 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 235 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
1b8873a0 JI |
236 | struct hw_perf_event *hwc = &event->hw; |
237 | int idx; | |
238 | int err = 0; | |
239 | ||
cc88116d MR |
240 | /* An event following a process won't be stopped earlier */ |
241 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
242 | return -ENOENT; | |
243 | ||
33696fc0 | 244 | perf_pmu_disable(event->pmu); |
24cd7f54 | 245 | |
1b8873a0 | 246 | /* If we don't have a space for the counter then finish early. */ |
ed6f2a52 | 247 | idx = armpmu->get_event_idx(hw_events, event); |
1b8873a0 JI |
248 | if (idx < 0) { |
249 | err = idx; | |
250 | goto out; | |
251 | } | |
252 | ||
253 | /* | |
254 | * If there is an event in the counter we are going to use then make | |
255 | * sure it is disabled. | |
256 | */ | |
257 | event->hw.idx = idx; | |
ed6f2a52 | 258 | armpmu->disable(event); |
8be3f9a2 | 259 | hw_events->events[idx] = event; |
1b8873a0 | 260 | |
a4eaf7f1 PZ |
261 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
262 | if (flags & PERF_EF_START) | |
263 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
264 | |
265 | /* Propagate our changes to the userspace mapping. */ | |
266 | perf_event_update_userpage(event); | |
267 | ||
268 | out: | |
33696fc0 | 269 | perf_pmu_enable(event->pmu); |
1b8873a0 JI |
270 | return err; |
271 | } | |
272 | ||
1b8873a0 | 273 | static int |
e429817b SP |
274 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
275 | struct perf_event *event) | |
1b8873a0 | 276 | { |
e429817b | 277 | struct arm_pmu *armpmu; |
1b8873a0 | 278 | |
c95eb318 WD |
279 | if (is_software_event(event)) |
280 | return 1; | |
281 | ||
e429817b SP |
282 | /* |
283 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
284 | * core perf code won't check that the pmu->ctx == leader->ctx | |
285 | * until after pmu->event_init(event). | |
286 | */ | |
287 | if (event->pmu != pmu) | |
288 | return 0; | |
289 | ||
2dfcb802 | 290 | if (event->state < PERF_EVENT_STATE_OFF) |
cb2d8b34 WD |
291 | return 1; |
292 | ||
293 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
65b4711f | 294 | return 1; |
1b8873a0 | 295 | |
e429817b | 296 | armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 297 | return armpmu->get_event_idx(hw_events, event) >= 0; |
1b8873a0 JI |
298 | } |
299 | ||
300 | static int | |
301 | validate_group(struct perf_event *event) | |
302 | { | |
303 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 304 | struct pmu_hw_events fake_pmu; |
1b8873a0 | 305 | |
bce34d14 WD |
306 | /* |
307 | * Initialise the fake PMU. We only need to populate the | |
308 | * used_mask for the purposes of validation. | |
309 | */ | |
a4560846 | 310 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
1b8873a0 | 311 | |
e429817b | 312 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
aa2bc1ad | 313 | return -EINVAL; |
1b8873a0 JI |
314 | |
315 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
e429817b | 316 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
aa2bc1ad | 317 | return -EINVAL; |
1b8873a0 JI |
318 | } |
319 | ||
e429817b | 320 | if (!validate_event(event->pmu, &fake_pmu, event)) |
aa2bc1ad | 321 | return -EINVAL; |
1b8873a0 JI |
322 | |
323 | return 0; | |
324 | } | |
325 | ||
051f1b13 | 326 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
0e25a5c9 | 327 | { |
bbd64559 SB |
328 | struct arm_pmu *armpmu; |
329 | struct platform_device *plat_device; | |
330 | struct arm_pmu_platdata *plat; | |
5f5092e7 WD |
331 | int ret; |
332 | u64 start_clock, finish_clock; | |
bbd64559 | 333 | |
5ebd9200 MR |
334 | /* |
335 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but | |
336 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will | |
337 | * do any necessary shifting, we just need to perform the first | |
338 | * dereference. | |
339 | */ | |
340 | armpmu = *(void **)dev; | |
bbd64559 SB |
341 | plat_device = armpmu->plat_device; |
342 | plat = dev_get_platdata(&plat_device->dev); | |
0e25a5c9 | 343 | |
5f5092e7 | 344 | start_clock = sched_clock(); |
051f1b13 | 345 | if (plat && plat->handle_irq) |
5ebd9200 | 346 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); |
051f1b13 | 347 | else |
5ebd9200 | 348 | ret = armpmu->handle_irq(irq, armpmu); |
5f5092e7 WD |
349 | finish_clock = sched_clock(); |
350 | ||
351 | perf_sample_event_took(finish_clock - start_clock); | |
352 | return ret; | |
0e25a5c9 RV |
353 | } |
354 | ||
05d22fde WD |
355 | static int |
356 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
357 | { | |
358 | return attr->exclude_idle || attr->exclude_user || | |
359 | attr->exclude_kernel || attr->exclude_hv; | |
360 | } | |
361 | ||
1b8873a0 JI |
362 | static int |
363 | __hw_perf_event_init(struct perf_event *event) | |
364 | { | |
8a16b34e | 365 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 366 | struct hw_perf_event *hwc = &event->hw; |
9dcbf466 | 367 | int mapping; |
1b8873a0 | 368 | |
e1f431b5 | 369 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
370 | |
371 | if (mapping < 0) { | |
372 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
373 | event->attr.config); | |
374 | return mapping; | |
375 | } | |
376 | ||
05d22fde WD |
377 | /* |
378 | * We don't assign an index until we actually place the event onto | |
379 | * hardware. Use -1 to signify that we haven't decided where to put it | |
380 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
381 | * clever allocation or constraints checking at this point. | |
382 | */ | |
383 | hwc->idx = -1; | |
384 | hwc->config_base = 0; | |
385 | hwc->config = 0; | |
386 | hwc->event_base = 0; | |
387 | ||
1b8873a0 JI |
388 | /* |
389 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 390 | */ |
05d22fde WD |
391 | if ((!armpmu->set_event_filter || |
392 | armpmu->set_event_filter(hwc, &event->attr)) && | |
393 | event_requires_mode_exclusion(&event->attr)) { | |
1b8873a0 JI |
394 | pr_debug("ARM performance counters do not support " |
395 | "mode exclusion\n"); | |
fdeb8e35 | 396 | return -EOPNOTSUPP; |
1b8873a0 JI |
397 | } |
398 | ||
399 | /* | |
05d22fde | 400 | * Store the event encoding into the config_base field. |
1b8873a0 | 401 | */ |
05d22fde | 402 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 | 403 | |
edcb4d3c | 404 | if (!is_sampling_event(event)) { |
57273471 WD |
405 | /* |
406 | * For non-sampling runs, limit the sample_period to half | |
407 | * of the counter width. That way, the new counter value | |
408 | * is far less likely to overtake the previous one unless | |
409 | * you have some serious IRQ latency issues. | |
410 | */ | |
411 | hwc->sample_period = armpmu->max_period >> 1; | |
1b8873a0 | 412 | hwc->last_period = hwc->sample_period; |
e7850595 | 413 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
414 | } |
415 | ||
1b8873a0 | 416 | if (event->group_leader != event) { |
e595ede6 | 417 | if (validate_group(event) != 0) |
1b8873a0 JI |
418 | return -EINVAL; |
419 | } | |
420 | ||
9dcbf466 | 421 | return 0; |
1b8873a0 JI |
422 | } |
423 | ||
b0a873eb | 424 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 425 | { |
8a16b34e | 426 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 427 | |
cc88116d MR |
428 | /* |
429 | * Reject CPU-affine events for CPUs that are of a different class to | |
430 | * that which this PMU handles. Process-following events (where | |
431 | * event->cpu == -1) can be migrated between CPUs, and thus we have to | |
432 | * reject them later (in armpmu_add) if they're scheduled on a | |
433 | * different class of CPU. | |
434 | */ | |
435 | if (event->cpu != -1 && | |
436 | !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) | |
437 | return -ENOENT; | |
438 | ||
2481c5fa SE |
439 | /* does not support taken branch sampling */ |
440 | if (has_branch_stack(event)) | |
441 | return -EOPNOTSUPP; | |
442 | ||
e1f431b5 | 443 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 444 | return -ENOENT; |
b0a873eb | 445 | |
c09adab0 | 446 | return __hw_perf_event_init(event); |
1b8873a0 JI |
447 | } |
448 | ||
a4eaf7f1 | 449 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 450 | { |
8be3f9a2 | 451 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
11679250 | 452 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
7325eaec | 453 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 454 | |
cc88116d MR |
455 | /* For task-bound events we may be called on other CPUs */ |
456 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
457 | return; | |
458 | ||
f4f38430 | 459 | if (enabled) |
ed6f2a52 | 460 | armpmu->start(armpmu); |
1b8873a0 JI |
461 | } |
462 | ||
a4eaf7f1 | 463 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 464 | { |
8a16b34e | 465 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
cc88116d MR |
466 | |
467 | /* For task-bound events we may be called on other CPUs */ | |
468 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
469 | return; | |
470 | ||
ed6f2a52 | 471 | armpmu->stop(armpmu); |
1b8873a0 JI |
472 | } |
473 | ||
c904e32a MR |
474 | /* |
475 | * In heterogeneous systems, events are specific to a particular | |
476 | * microarchitecture, and aren't suitable for another. Thus, only match CPUs of | |
477 | * the same microarchitecture. | |
478 | */ | |
479 | static int armpmu_filter_match(struct perf_event *event) | |
480 | { | |
481 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
482 | unsigned int cpu = smp_processor_id(); | |
483 | return cpumask_test_cpu(cpu, &armpmu->supported_cpus); | |
484 | } | |
485 | ||
48538b58 MR |
486 | static ssize_t armpmu_cpumask_show(struct device *dev, |
487 | struct device_attribute *attr, char *buf) | |
488 | { | |
489 | struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); | |
490 | return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); | |
491 | } | |
492 | ||
493 | static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); | |
494 | ||
495 | static struct attribute *armpmu_common_attrs[] = { | |
496 | &dev_attr_cpus.attr, | |
497 | NULL, | |
498 | }; | |
499 | ||
500 | static struct attribute_group armpmu_common_attr_group = { | |
501 | .attrs = armpmu_common_attrs, | |
502 | }; | |
503 | ||
44d6b1fc | 504 | static void armpmu_init(struct arm_pmu *armpmu) |
03b7898d | 505 | { |
8a16b34e MR |
506 | armpmu->pmu = (struct pmu) { |
507 | .pmu_enable = armpmu_enable, | |
508 | .pmu_disable = armpmu_disable, | |
509 | .event_init = armpmu_event_init, | |
510 | .add = armpmu_add, | |
511 | .del = armpmu_del, | |
512 | .start = armpmu_start, | |
513 | .stop = armpmu_stop, | |
514 | .read = armpmu_read, | |
c904e32a | 515 | .filter_match = armpmu_filter_match, |
1589680d | 516 | .attr_groups = armpmu->attr_groups, |
8a16b34e | 517 | }; |
48538b58 MR |
518 | armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = |
519 | &armpmu_common_attr_group; | |
8a16b34e MR |
520 | } |
521 | ||
74cf0bc7 MR |
522 | /* Set at runtime when we know what CPU type we are. */ |
523 | static struct arm_pmu *__oprofile_cpu_pmu; | |
524 | ||
525 | /* | |
526 | * Despite the names, these two functions are CPU-specific and are used | |
527 | * by the OProfile/perf code. | |
528 | */ | |
529 | const char *perf_pmu_name(void) | |
530 | { | |
531 | if (!__oprofile_cpu_pmu) | |
532 | return NULL; | |
533 | ||
534 | return __oprofile_cpu_pmu->name; | |
535 | } | |
536 | EXPORT_SYMBOL_GPL(perf_pmu_name); | |
537 | ||
538 | int perf_num_counters(void) | |
539 | { | |
540 | int max_events = 0; | |
541 | ||
542 | if (__oprofile_cpu_pmu != NULL) | |
543 | max_events = __oprofile_cpu_pmu->num_events; | |
544 | ||
545 | return max_events; | |
546 | } | |
547 | EXPORT_SYMBOL_GPL(perf_num_counters); | |
548 | ||
c09adab0 | 549 | static void cpu_pmu_free_irqs(struct arm_pmu *cpu_pmu) |
74cf0bc7 | 550 | { |
7ed98e01 | 551 | int cpu; |
74cf0bc7 MR |
552 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; |
553 | ||
7ed98e01 MR |
554 | for_each_cpu(cpu, &cpu_pmu->supported_cpus) { |
555 | int irq = per_cpu(hw_events->irq, cpu); | |
556 | if (!irq) | |
557 | continue; | |
558 | ||
559 | if (irq_is_percpu(irq)) { | |
7ed98e01 | 560 | free_percpu_irq(irq, &hw_events->percpu_pmu); |
7ed98e01 | 561 | break; |
74cf0bc7 | 562 | } |
7ed98e01 MR |
563 | |
564 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) | |
565 | continue; | |
566 | ||
567 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | |
74cf0bc7 MR |
568 | } |
569 | } | |
570 | ||
c09adab0 | 571 | static int cpu_pmu_request_irqs(struct arm_pmu *cpu_pmu, irq_handler_t handler) |
74cf0bc7 | 572 | { |
7ed98e01 | 573 | int cpu, err; |
74cf0bc7 MR |
574 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; |
575 | ||
7ed98e01 MR |
576 | for_each_cpu(cpu, &cpu_pmu->supported_cpus) { |
577 | int irq = per_cpu(hw_events->irq, cpu); | |
578 | if (!irq) | |
579 | continue; | |
74cf0bc7 | 580 | |
7ed98e01 MR |
581 | if (irq_is_percpu(irq)) { |
582 | err = request_percpu_irq(irq, handler, "arm-pmu", | |
583 | &hw_events->percpu_pmu); | |
74cf0bc7 MR |
584 | if (err) { |
585 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | |
586 | irq); | |
74cf0bc7 MR |
587 | } |
588 | ||
c09adab0 | 589 | return err; |
7ed98e01 MR |
590 | } |
591 | ||
592 | err = request_irq(irq, handler, | |
593 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", | |
594 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | |
595 | if (err) { | |
596 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | |
597 | irq); | |
598 | return err; | |
599 | } | |
600 | ||
601 | cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); | |
74cf0bc7 MR |
602 | } |
603 | ||
604 | return 0; | |
605 | } | |
606 | ||
c09adab0 MR |
607 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) |
608 | { | |
609 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | |
610 | return per_cpu(hw_events->irq, cpu); | |
611 | } | |
612 | ||
74cf0bc7 MR |
613 | /* |
614 | * PMU hardware loses all context when a CPU goes offline. | |
615 | * When a CPU is hotplugged back in, since some hardware registers are | |
616 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | |
617 | * junk values out of them. | |
618 | */ | |
6e103c0c | 619 | static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) |
74cf0bc7 | 620 | { |
6e103c0c | 621 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); |
c09adab0 | 622 | int irq; |
74cf0bc7 | 623 | |
6e103c0c SAS |
624 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
625 | return 0; | |
626 | if (pmu->reset) | |
627 | pmu->reset(pmu); | |
c09adab0 MR |
628 | |
629 | irq = armpmu_get_cpu_irq(pmu, cpu); | |
630 | if (irq) { | |
631 | if (irq_is_percpu(irq)) { | |
632 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | |
633 | return 0; | |
634 | } | |
635 | ||
636 | if (irq_force_affinity(irq, cpumask_of(cpu)) && | |
637 | num_possible_cpus() > 1) { | |
638 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | |
639 | irq, cpu); | |
640 | } | |
641 | } | |
642 | ||
643 | return 0; | |
644 | } | |
645 | ||
646 | static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) | |
647 | { | |
648 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); | |
649 | int irq; | |
650 | ||
651 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | |
652 | return 0; | |
653 | ||
654 | irq = armpmu_get_cpu_irq(pmu, cpu); | |
655 | if (irq && irq_is_percpu(irq)) | |
656 | disable_percpu_irq(irq); | |
657 | ||
7d88eb69 | 658 | return 0; |
74cf0bc7 MR |
659 | } |
660 | ||
da4e4f18 LP |
661 | #ifdef CONFIG_CPU_PM |
662 | static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) | |
663 | { | |
664 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
665 | struct perf_event *event; | |
666 | int idx; | |
667 | ||
668 | for (idx = 0; idx < armpmu->num_events; idx++) { | |
669 | /* | |
670 | * If the counter is not used skip it, there is no | |
671 | * need of stopping/restarting it. | |
672 | */ | |
673 | if (!test_bit(idx, hw_events->used_mask)) | |
674 | continue; | |
675 | ||
676 | event = hw_events->events[idx]; | |
677 | ||
678 | switch (cmd) { | |
679 | case CPU_PM_ENTER: | |
680 | /* | |
681 | * Stop and update the counter | |
682 | */ | |
683 | armpmu_stop(event, PERF_EF_UPDATE); | |
684 | break; | |
685 | case CPU_PM_EXIT: | |
686 | case CPU_PM_ENTER_FAILED: | |
cbcc72e0 LP |
687 | /* |
688 | * Restore and enable the counter. | |
689 | * armpmu_start() indirectly calls | |
690 | * | |
691 | * perf_event_update_userpage() | |
692 | * | |
693 | * that requires RCU read locking to be functional, | |
694 | * wrap the call within RCU_NONIDLE to make the | |
695 | * RCU subsystem aware this cpu is not idle from | |
696 | * an RCU perspective for the armpmu_start() call | |
697 | * duration. | |
698 | */ | |
699 | RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); | |
da4e4f18 LP |
700 | break; |
701 | default: | |
702 | break; | |
703 | } | |
704 | } | |
705 | } | |
706 | ||
707 | static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, | |
708 | void *v) | |
709 | { | |
710 | struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); | |
711 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
712 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | |
713 | ||
714 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
715 | return NOTIFY_DONE; | |
716 | ||
717 | /* | |
718 | * Always reset the PMU registers on power-up even if | |
719 | * there are no events running. | |
720 | */ | |
721 | if (cmd == CPU_PM_EXIT && armpmu->reset) | |
722 | armpmu->reset(armpmu); | |
723 | ||
724 | if (!enabled) | |
725 | return NOTIFY_OK; | |
726 | ||
727 | switch (cmd) { | |
728 | case CPU_PM_ENTER: | |
729 | armpmu->stop(armpmu); | |
730 | cpu_pm_pmu_setup(armpmu, cmd); | |
731 | break; | |
732 | case CPU_PM_EXIT: | |
733 | cpu_pm_pmu_setup(armpmu, cmd); | |
734 | case CPU_PM_ENTER_FAILED: | |
735 | armpmu->start(armpmu); | |
736 | break; | |
737 | default: | |
738 | return NOTIFY_DONE; | |
739 | } | |
740 | ||
741 | return NOTIFY_OK; | |
742 | } | |
743 | ||
744 | static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) | |
745 | { | |
746 | cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; | |
747 | return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); | |
748 | } | |
749 | ||
750 | static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) | |
751 | { | |
752 | cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); | |
753 | } | |
754 | #else | |
755 | static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } | |
756 | static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } | |
757 | #endif | |
758 | ||
74cf0bc7 MR |
759 | static int cpu_pmu_init(struct arm_pmu *cpu_pmu) |
760 | { | |
761 | int err; | |
74cf0bc7 | 762 | |
c09adab0 MR |
763 | err = cpu_pmu_request_irqs(cpu_pmu, armpmu_dispatch_irq); |
764 | if (err) | |
765 | goto out; | |
766 | ||
767 | err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, | |
768 | &cpu_pmu->node); | |
6e103c0c | 769 | if (err) |
2681f018 | 770 | goto out; |
74cf0bc7 | 771 | |
da4e4f18 LP |
772 | err = cpu_pm_pmu_register(cpu_pmu); |
773 | if (err) | |
774 | goto out_unregister; | |
775 | ||
5101ef20 MR |
776 | /* |
777 | * This is a CPU PMU potentially in a heterogeneous configuration (e.g. | |
778 | * big.LITTLE). This is not an uncore PMU, and we have taken ctx | |
779 | * sharing into account (e.g. with our pmu::filter_match callback and | |
780 | * pmu::event_init group validation). | |
781 | */ | |
782 | cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS; | |
783 | ||
74cf0bc7 MR |
784 | return 0; |
785 | ||
da4e4f18 | 786 | out_unregister: |
6e103c0c SAS |
787 | cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, |
788 | &cpu_pmu->node); | |
2681f018 | 789 | out: |
c09adab0 | 790 | cpu_pmu_free_irqs(cpu_pmu); |
74cf0bc7 MR |
791 | return err; |
792 | } | |
793 | ||
794 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | |
795 | { | |
da4e4f18 | 796 | cpu_pm_pmu_unregister(cpu_pmu); |
6e103c0c SAS |
797 | cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, |
798 | &cpu_pmu->node); | |
74cf0bc7 MR |
799 | } |
800 | ||
801 | /* | |
802 | * CPU PMU identification and probing. | |
803 | */ | |
804 | static int probe_current_pmu(struct arm_pmu *pmu, | |
805 | const struct pmu_probe_info *info) | |
806 | { | |
807 | int cpu = get_cpu(); | |
808 | unsigned int cpuid = read_cpuid_id(); | |
809 | int ret = -ENODEV; | |
810 | ||
811 | pr_info("probing PMU on CPU %d\n", cpu); | |
812 | ||
813 | for (; info->init != NULL; info++) { | |
814 | if ((cpuid & info->mask) != info->cpuid) | |
815 | continue; | |
816 | ret = info->init(pmu); | |
817 | break; | |
818 | } | |
819 | ||
820 | put_cpu(); | |
821 | return ret; | |
822 | } | |
823 | ||
7ed98e01 | 824 | static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) |
74cf0bc7 | 825 | { |
7ed98e01 MR |
826 | int cpu, ret; |
827 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | |
74cf0bc7 | 828 | |
7ed98e01 MR |
829 | ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); |
830 | if (ret) | |
831 | return ret; | |
74cf0bc7 | 832 | |
7ed98e01 MR |
833 | for_each_cpu(cpu, &pmu->supported_cpus) |
834 | per_cpu(hw_events->irq, cpu) = irq; | |
74cf0bc7 | 835 | |
7ed98e01 MR |
836 | return 0; |
837 | } | |
b6c084d7 | 838 | |
7ed98e01 MR |
839 | static bool pmu_has_irq_affinity(struct device_node *node) |
840 | { | |
841 | return !!of_find_property(node, "interrupt-affinity", NULL); | |
842 | } | |
b6c084d7 | 843 | |
7ed98e01 MR |
844 | static int pmu_parse_irq_affinity(struct device_node *node, int i) |
845 | { | |
846 | struct device_node *dn; | |
847 | int cpu; | |
74cf0bc7 | 848 | |
7ed98e01 MR |
849 | /* |
850 | * If we don't have an interrupt-affinity property, we guess irq | |
851 | * affinity matches our logical CPU order, as we used to assume. | |
852 | * This is fragile, so we'll warn in pmu_parse_irqs(). | |
853 | */ | |
854 | if (!pmu_has_irq_affinity(node)) | |
855 | return i; | |
fb659882 | 856 | |
7ed98e01 MR |
857 | dn = of_parse_phandle(node, "interrupt-affinity", i); |
858 | if (!dn) { | |
859 | pr_warn("failed to parse interrupt-affinity[%d] for %s\n", | |
860 | i, node->name); | |
861 | return -EINVAL; | |
862 | } | |
fb659882 | 863 | |
7ed98e01 MR |
864 | /* Now look up the logical CPU number */ |
865 | for_each_possible_cpu(cpu) { | |
866 | struct device_node *cpu_dn; | |
867 | ||
868 | cpu_dn = of_cpu_device_node_get(cpu); | |
869 | of_node_put(cpu_dn); | |
74cf0bc7 | 870 | |
7ed98e01 | 871 | if (dn == cpu_dn) |
74cf0bc7 | 872 | break; |
7ed98e01 | 873 | } |
74cf0bc7 | 874 | |
7ed98e01 MR |
875 | if (cpu >= nr_cpu_ids) { |
876 | pr_warn("failed to find logical CPU for %s\n", dn->name); | |
877 | } | |
b6c084d7 | 878 | |
7ed98e01 | 879 | of_node_put(dn); |
b6c084d7 | 880 | |
7ed98e01 MR |
881 | return cpu; |
882 | } | |
883 | ||
884 | static int pmu_parse_irqs(struct arm_pmu *pmu) | |
885 | { | |
886 | int i = 0, irqs; | |
887 | struct platform_device *pdev = pmu->plat_device; | |
888 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | |
889 | ||
890 | irqs = platform_irq_count(pdev); | |
891 | if (irqs < 0) { | |
892 | pr_err("unable to count PMU IRQs\n"); | |
893 | return irqs; | |
894 | } | |
895 | ||
896 | /* | |
897 | * In this case we have no idea which CPUs are covered by the PMU. | |
898 | * To match our prior behaviour, we assume all CPUs in this case. | |
899 | */ | |
900 | if (irqs == 0) { | |
901 | pr_warn("no irqs for PMU, sampling events not supported\n"); | |
902 | pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | |
903 | cpumask_setall(&pmu->supported_cpus); | |
904 | return 0; | |
905 | } | |
74cf0bc7 | 906 | |
7ed98e01 | 907 | if (irqs == 1) { |
7f1d642f | 908 | int irq = platform_get_irq(pdev, 0); |
7ed98e01 MR |
909 | if (irq && irq_is_percpu(irq)) |
910 | return pmu_parse_percpu_irq(pmu, irq); | |
911 | } | |
7f1d642f | 912 | |
7ed98e01 MR |
913 | if (!pmu_has_irq_affinity(pdev->dev.of_node)) { |
914 | pr_warn("no interrupt-affinity property for %s, guessing.\n", | |
915 | of_node_full_name(pdev->dev.of_node)); | |
916 | } | |
19a469a5 | 917 | |
7ed98e01 MR |
918 | /* |
919 | * Some platforms have all PMU IRQs OR'd into a single IRQ, with a | |
920 | * special platdata function that attempts to demux them. | |
921 | */ | |
922 | if (dev_get_platdata(&pdev->dev)) | |
923 | cpumask_setall(&pmu->supported_cpus); | |
924 | ||
925 | for (i = 0; i < irqs; i++) { | |
926 | int cpu, irq; | |
927 | ||
928 | irq = platform_get_irq(pdev, i); | |
929 | if (WARN_ON(irq <= 0)) | |
930 | continue; | |
931 | ||
932 | if (irq_is_percpu(irq)) { | |
933 | pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); | |
934 | return -EINVAL; | |
19a469a5 | 935 | } |
b6c084d7 | 936 | |
7ed98e01 MR |
937 | cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); |
938 | if (cpu < 0) | |
939 | return cpu; | |
940 | if (cpu >= nr_cpu_ids) | |
941 | continue; | |
942 | ||
943 | if (per_cpu(hw_events->irq, cpu)) { | |
944 | pr_warn("multiple PMU IRQs for the same CPU detected\n"); | |
945 | return -EINVAL; | |
946 | } | |
947 | ||
948 | per_cpu(hw_events->irq, cpu) = irq; | |
949 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | |
950 | } | |
74cf0bc7 MR |
951 | |
952 | return 0; | |
953 | } | |
954 | ||
2681f018 MR |
955 | static struct arm_pmu *armpmu_alloc(void) |
956 | { | |
957 | struct arm_pmu *pmu; | |
958 | int cpu; | |
959 | ||
960 | pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); | |
961 | if (!pmu) { | |
962 | pr_info("failed to allocate PMU device!\n"); | |
963 | goto out; | |
964 | } | |
965 | ||
966 | pmu->hw_events = alloc_percpu(struct pmu_hw_events); | |
967 | if (!pmu->hw_events) { | |
968 | pr_info("failed to allocate per-cpu PMU data.\n"); | |
969 | goto out_free_pmu; | |
970 | } | |
971 | ||
972 | for_each_possible_cpu(cpu) { | |
973 | struct pmu_hw_events *events; | |
974 | ||
975 | events = per_cpu_ptr(pmu->hw_events, cpu); | |
976 | raw_spin_lock_init(&events->pmu_lock); | |
977 | events->percpu_pmu = pmu; | |
978 | } | |
979 | ||
980 | return pmu; | |
981 | ||
982 | out_free_pmu: | |
983 | kfree(pmu); | |
984 | out: | |
985 | return NULL; | |
986 | } | |
987 | ||
988 | static void armpmu_free(struct arm_pmu *pmu) | |
989 | { | |
990 | free_percpu(pmu->hw_events); | |
991 | kfree(pmu); | |
992 | } | |
993 | ||
74cf0bc7 MR |
994 | int arm_pmu_device_probe(struct platform_device *pdev, |
995 | const struct of_device_id *of_table, | |
996 | const struct pmu_probe_info *probe_table) | |
997 | { | |
998 | const struct of_device_id *of_id; | |
999 | const int (*init_fn)(struct arm_pmu *); | |
1000 | struct device_node *node = pdev->dev.of_node; | |
1001 | struct arm_pmu *pmu; | |
1002 | int ret = -ENODEV; | |
1003 | ||
2681f018 MR |
1004 | pmu = armpmu_alloc(); |
1005 | if (!pmu) | |
74cf0bc7 | 1006 | return -ENOMEM; |
74cf0bc7 | 1007 | |
b916b785 MR |
1008 | armpmu_init(pmu); |
1009 | ||
74cf0bc7 MR |
1010 | pmu->plat_device = pdev; |
1011 | ||
7ed98e01 MR |
1012 | ret = pmu_parse_irqs(pmu); |
1013 | if (ret) | |
1014 | goto out_free; | |
1015 | ||
74cf0bc7 MR |
1016 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { |
1017 | init_fn = of_id->data; | |
1018 | ||
8d1a0ae7 MF |
1019 | pmu->secure_access = of_property_read_bool(pdev->dev.of_node, |
1020 | "secure-reg-access"); | |
1021 | ||
1022 | /* arm64 systems boot only as non-secure */ | |
1023 | if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { | |
1024 | pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); | |
1025 | pmu->secure_access = false; | |
1026 | } | |
1027 | ||
7ed98e01 | 1028 | ret = init_fn(pmu); |
dbee3a74 | 1029 | } else if (probe_table) { |
74cf0bc7 | 1030 | cpumask_setall(&pmu->supported_cpus); |
f7a6c149 | 1031 | ret = probe_current_pmu(pmu, probe_table); |
74cf0bc7 MR |
1032 | } |
1033 | ||
1034 | if (ret) { | |
357b565d | 1035 | pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); |
74cf0bc7 MR |
1036 | goto out_free; |
1037 | } | |
1038 | ||
86cdd72a | 1039 | |
74cf0bc7 MR |
1040 | ret = cpu_pmu_init(pmu); |
1041 | if (ret) | |
1042 | goto out_free; | |
1043 | ||
b916b785 | 1044 | ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); |
74cf0bc7 MR |
1045 | if (ret) |
1046 | goto out_destroy; | |
1047 | ||
0f254c76 JG |
1048 | if (!__oprofile_cpu_pmu) |
1049 | __oprofile_cpu_pmu = pmu; | |
1050 | ||
b916b785 MR |
1051 | pr_info("enabled with %s PMU driver, %d counters available\n", |
1052 | pmu->name, pmu->num_events); | |
1053 | ||
74cf0bc7 MR |
1054 | return 0; |
1055 | ||
1056 | out_destroy: | |
1057 | cpu_pmu_destroy(pmu); | |
1058 | out_free: | |
357b565d WD |
1059 | pr_info("%s: failed to register PMU devices!\n", |
1060 | of_node_full_name(node)); | |
2681f018 | 1061 | armpmu_free(pmu); |
74cf0bc7 MR |
1062 | return ret; |
1063 | } | |
37b502f1 SAS |
1064 | |
1065 | static int arm_pmu_hp_init(void) | |
1066 | { | |
1067 | int ret; | |
1068 | ||
6e103c0c | 1069 | ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, |
73c1b41e | 1070 | "perf/arm/pmu:starting", |
c09adab0 MR |
1071 | arm_perf_starting_cpu, |
1072 | arm_perf_teardown_cpu); | |
37b502f1 SAS |
1073 | if (ret) |
1074 | pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", | |
1075 | ret); | |
1076 | return ret; | |
1077 | } | |
1078 | subsys_initcall(arm_pmu_hp_init); |