]>
Commit | Line | Data |
---|---|---|
03089688 WD |
1 | /* |
2 | * PMU support | |
3 | * | |
4 | * Copyright (C) 2012 ARM Limited | |
5 | * Author: Will Deacon <will.deacon@arm.com> | |
6 | * | |
7 | * This code is based heavily on the ARMv7 perf event code. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
22 | ||
23 | #include <linux/bitmap.h> | |
24 | #include <linux/interrupt.h> | |
66aa8d6a | 25 | #include <linux/irq.h> |
03089688 WD |
26 | #include <linux/kernel.h> |
27 | #include <linux/export.h> | |
d5efd9cc | 28 | #include <linux/of.h> |
03089688 WD |
29 | #include <linux/perf_event.h> |
30 | #include <linux/platform_device.h> | |
d5efd9cc | 31 | #include <linux/slab.h> |
03089688 WD |
32 | #include <linux/spinlock.h> |
33 | #include <linux/uaccess.h> | |
34 | ||
35 | #include <asm/cputype.h> | |
36 | #include <asm/irq.h> | |
37 | #include <asm/irq_regs.h> | |
38 | #include <asm/pmu.h> | |
39 | #include <asm/stacktrace.h> | |
40 | ||
41 | /* | |
42 | * ARMv8 supports a maximum of 32 events. | |
43 | * The cycle counter is included in this total. | |
44 | */ | |
45 | #define ARMPMU_MAX_HWEVENTS 32 | |
46 | ||
47 | static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); | |
48 | static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); | |
49 | static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); | |
50 | ||
51 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) | |
52 | ||
53 | /* Set at runtime when we know what CPU type we are. */ | |
54 | static struct arm_pmu *cpu_pmu; | |
55 | ||
56 | int | |
57 | armpmu_get_max_events(void) | |
58 | { | |
59 | int max_events = 0; | |
60 | ||
61 | if (cpu_pmu != NULL) | |
62 | max_events = cpu_pmu->num_events; | |
63 | ||
64 | return max_events; | |
65 | } | |
66 | EXPORT_SYMBOL_GPL(armpmu_get_max_events); | |
67 | ||
68 | int perf_num_counters(void) | |
69 | { | |
70 | return armpmu_get_max_events(); | |
71 | } | |
72 | EXPORT_SYMBOL_GPL(perf_num_counters); | |
73 | ||
74 | #define HW_OP_UNSUPPORTED 0xFFFF | |
75 | ||
76 | #define C(_x) \ | |
77 | PERF_COUNT_HW_CACHE_##_x | |
78 | ||
79 | #define CACHE_OP_UNSUPPORTED 0xFFFF | |
80 | ||
81 | static int | |
82 | armpmu_map_cache_event(const unsigned (*cache_map) | |
83 | [PERF_COUNT_HW_CACHE_MAX] | |
84 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
85 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
86 | u64 config) | |
87 | { | |
88 | unsigned int cache_type, cache_op, cache_result, ret; | |
89 | ||
90 | cache_type = (config >> 0) & 0xff; | |
91 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
92 | return -EINVAL; | |
93 | ||
94 | cache_op = (config >> 8) & 0xff; | |
95 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
96 | return -EINVAL; | |
97 | ||
98 | cache_result = (config >> 16) & 0xff; | |
99 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
100 | return -EINVAL; | |
101 | ||
102 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; | |
103 | ||
104 | if (ret == CACHE_OP_UNSUPPORTED) | |
105 | return -ENOENT; | |
106 | ||
107 | return ret; | |
108 | } | |
109 | ||
110 | static int | |
111 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | |
112 | { | |
868f6fea WD |
113 | int mapping; |
114 | ||
115 | if (config >= PERF_COUNT_HW_MAX) | |
116 | return -EINVAL; | |
117 | ||
118 | mapping = (*event_map)[config]; | |
03089688 WD |
119 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
120 | } | |
121 | ||
122 | static int | |
123 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) | |
124 | { | |
125 | return (int)(config & raw_event_mask); | |
126 | } | |
127 | ||
128 | static int map_cpu_event(struct perf_event *event, | |
129 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
130 | const unsigned (*cache_map) | |
131 | [PERF_COUNT_HW_CACHE_MAX] | |
132 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
133 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
134 | u32 raw_event_mask) | |
135 | { | |
136 | u64 config = event->attr.config; | |
137 | ||
138 | switch (event->attr.type) { | |
139 | case PERF_TYPE_HARDWARE: | |
140 | return armpmu_map_event(event_map, config); | |
141 | case PERF_TYPE_HW_CACHE: | |
142 | return armpmu_map_cache_event(cache_map, config); | |
143 | case PERF_TYPE_RAW: | |
144 | return armpmu_map_raw_event(raw_event_mask, config); | |
145 | } | |
146 | ||
147 | return -ENOENT; | |
148 | } | |
149 | ||
150 | int | |
151 | armpmu_event_set_period(struct perf_event *event, | |
152 | struct hw_perf_event *hwc, | |
153 | int idx) | |
154 | { | |
155 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
156 | s64 left = local64_read(&hwc->period_left); | |
157 | s64 period = hwc->sample_period; | |
158 | int ret = 0; | |
159 | ||
160 | if (unlikely(left <= -period)) { | |
161 | left = period; | |
162 | local64_set(&hwc->period_left, left); | |
163 | hwc->last_period = period; | |
164 | ret = 1; | |
165 | } | |
166 | ||
167 | if (unlikely(left <= 0)) { | |
168 | left += period; | |
169 | local64_set(&hwc->period_left, left); | |
170 | hwc->last_period = period; | |
171 | ret = 1; | |
172 | } | |
173 | ||
cbbf2e6e DT |
174 | /* |
175 | * Limit the maximum period to prevent the counter value | |
176 | * from overtaking the one we are about to program. In | |
177 | * effect we are reducing max_period to account for | |
178 | * interrupt latency (and we are being very conservative). | |
179 | */ | |
180 | if (left > (armpmu->max_period >> 1)) | |
181 | left = armpmu->max_period >> 1; | |
03089688 WD |
182 | |
183 | local64_set(&hwc->prev_count, (u64)-left); | |
184 | ||
185 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | |
186 | ||
187 | perf_event_update_userpage(event); | |
188 | ||
189 | return ret; | |
190 | } | |
191 | ||
192 | u64 | |
193 | armpmu_event_update(struct perf_event *event, | |
194 | struct hw_perf_event *hwc, | |
195 | int idx) | |
196 | { | |
197 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
198 | u64 delta, prev_raw_count, new_raw_count; | |
199 | ||
200 | again: | |
201 | prev_raw_count = local64_read(&hwc->prev_count); | |
202 | new_raw_count = armpmu->read_counter(idx); | |
203 | ||
204 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
205 | new_raw_count) != prev_raw_count) | |
206 | goto again; | |
207 | ||
208 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; | |
209 | ||
210 | local64_add(delta, &event->count); | |
211 | local64_sub(delta, &hwc->period_left); | |
212 | ||
213 | return new_raw_count; | |
214 | } | |
215 | ||
216 | static void | |
217 | armpmu_read(struct perf_event *event) | |
218 | { | |
219 | struct hw_perf_event *hwc = &event->hw; | |
220 | ||
221 | /* Don't read disabled counters! */ | |
222 | if (hwc->idx < 0) | |
223 | return; | |
224 | ||
225 | armpmu_event_update(event, hwc, hwc->idx); | |
226 | } | |
227 | ||
228 | static void | |
229 | armpmu_stop(struct perf_event *event, int flags) | |
230 | { | |
231 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
232 | struct hw_perf_event *hwc = &event->hw; | |
233 | ||
234 | /* | |
235 | * ARM pmu always has to update the counter, so ignore | |
236 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
237 | */ | |
238 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
239 | armpmu->disable(hwc, hwc->idx); | |
240 | barrier(); /* why? */ | |
241 | armpmu_event_update(event, hwc, hwc->idx); | |
242 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
243 | } | |
244 | } | |
245 | ||
246 | static void | |
247 | armpmu_start(struct perf_event *event, int flags) | |
248 | { | |
249 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
250 | struct hw_perf_event *hwc = &event->hw; | |
251 | ||
252 | /* | |
253 | * ARM pmu always has to reprogram the period, so ignore | |
254 | * PERF_EF_RELOAD, see the comment below. | |
255 | */ | |
256 | if (flags & PERF_EF_RELOAD) | |
257 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
258 | ||
259 | hwc->state = 0; | |
260 | /* | |
261 | * Set the period again. Some counters can't be stopped, so when we | |
262 | * were stopped we simply disabled the IRQ source and the counter | |
263 | * may have been left counting. If we don't do this step then we may | |
264 | * get an interrupt too soon or *way* too late if the overflow has | |
265 | * happened since disabling. | |
266 | */ | |
267 | armpmu_event_set_period(event, hwc, hwc->idx); | |
268 | armpmu->enable(hwc, hwc->idx); | |
269 | } | |
270 | ||
271 | static void | |
272 | armpmu_del(struct perf_event *event, int flags) | |
273 | { | |
274 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
275 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | |
276 | struct hw_perf_event *hwc = &event->hw; | |
277 | int idx = hwc->idx; | |
278 | ||
279 | WARN_ON(idx < 0); | |
280 | ||
281 | armpmu_stop(event, PERF_EF_UPDATE); | |
282 | hw_events->events[idx] = NULL; | |
283 | clear_bit(idx, hw_events->used_mask); | |
284 | ||
285 | perf_event_update_userpage(event); | |
286 | } | |
287 | ||
288 | static int | |
289 | armpmu_add(struct perf_event *event, int flags) | |
290 | { | |
291 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
292 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | |
293 | struct hw_perf_event *hwc = &event->hw; | |
294 | int idx; | |
295 | int err = 0; | |
296 | ||
297 | perf_pmu_disable(event->pmu); | |
298 | ||
299 | /* If we don't have a space for the counter then finish early. */ | |
300 | idx = armpmu->get_event_idx(hw_events, hwc); | |
301 | if (idx < 0) { | |
302 | err = idx; | |
303 | goto out; | |
304 | } | |
305 | ||
306 | /* | |
307 | * If there is an event in the counter we are going to use then make | |
308 | * sure it is disabled. | |
309 | */ | |
310 | event->hw.idx = idx; | |
311 | armpmu->disable(hwc, idx); | |
312 | hw_events->events[idx] = event; | |
313 | ||
314 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
315 | if (flags & PERF_EF_START) | |
316 | armpmu_start(event, PERF_EF_RELOAD); | |
317 | ||
318 | /* Propagate our changes to the userspace mapping. */ | |
319 | perf_event_update_userpage(event); | |
320 | ||
321 | out: | |
322 | perf_pmu_enable(event->pmu); | |
323 | return err; | |
324 | } | |
325 | ||
326 | static int | |
8fff105e SP |
327 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
328 | struct perf_event *event) | |
03089688 | 329 | { |
8fff105e | 330 | struct arm_pmu *armpmu; |
03089688 WD |
331 | struct hw_perf_event fake_event = event->hw; |
332 | struct pmu *leader_pmu = event->group_leader->pmu; | |
333 | ||
ee7538a0 WD |
334 | if (is_software_event(event)) |
335 | return 1; | |
336 | ||
8fff105e SP |
337 | /* |
338 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
339 | * core perf code won't check that the pmu->ctx == leader->ctx | |
340 | * until after pmu->event_init(event). | |
341 | */ | |
342 | if (event->pmu != pmu) | |
343 | return 0; | |
344 | ||
8455e6ec WD |
345 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
346 | return 1; | |
347 | ||
348 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
03089688 WD |
349 | return 1; |
350 | ||
8fff105e | 351 | armpmu = to_arm_pmu(event->pmu); |
03089688 WD |
352 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; |
353 | } | |
354 | ||
355 | static int | |
356 | validate_group(struct perf_event *event) | |
357 | { | |
358 | struct perf_event *sibling, *leader = event->group_leader; | |
359 | struct pmu_hw_events fake_pmu; | |
360 | DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); | |
361 | ||
362 | /* | |
363 | * Initialise the fake PMU. We only need to populate the | |
364 | * used_mask for the purposes of validation. | |
365 | */ | |
366 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); | |
367 | fake_pmu.used_mask = fake_used_mask; | |
368 | ||
8fff105e | 369 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
03089688 WD |
370 | return -EINVAL; |
371 | ||
372 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
8fff105e | 373 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
03089688 WD |
374 | return -EINVAL; |
375 | } | |
376 | ||
8fff105e | 377 | if (!validate_event(event->pmu, &fake_pmu, event)) |
03089688 WD |
378 | return -EINVAL; |
379 | ||
380 | return 0; | |
381 | } | |
382 | ||
66aa8d6a VK |
383 | static void |
384 | armpmu_disable_percpu_irq(void *data) | |
385 | { | |
386 | unsigned int irq = *(unsigned int *)data; | |
387 | disable_percpu_irq(irq); | |
388 | } | |
389 | ||
03089688 WD |
390 | static void |
391 | armpmu_release_hardware(struct arm_pmu *armpmu) | |
392 | { | |
66aa8d6a VK |
393 | int irq; |
394 | unsigned int i, irqs; | |
03089688 WD |
395 | struct platform_device *pmu_device = armpmu->plat_device; |
396 | ||
397 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | |
66aa8d6a VK |
398 | if (!irqs) |
399 | return; | |
03089688 | 400 | |
66aa8d6a VK |
401 | irq = platform_get_irq(pmu_device, 0); |
402 | if (irq <= 0) | |
403 | return; | |
404 | ||
405 | if (irq_is_percpu(irq)) { | |
406 | on_each_cpu(armpmu_disable_percpu_irq, &irq, 1); | |
407 | free_percpu_irq(irq, &cpu_hw_events); | |
408 | } else { | |
409 | for (i = 0; i < irqs; ++i) { | |
d5efd9cc WD |
410 | int cpu = i; |
411 | ||
412 | if (armpmu->irq_affinity) | |
413 | cpu = armpmu->irq_affinity[i]; | |
414 | ||
415 | if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) | |
66aa8d6a VK |
416 | continue; |
417 | irq = platform_get_irq(pmu_device, i); | |
418 | if (irq > 0) | |
419 | free_irq(irq, armpmu); | |
420 | } | |
03089688 WD |
421 | } |
422 | } | |
423 | ||
66aa8d6a VK |
424 | static void |
425 | armpmu_enable_percpu_irq(void *data) | |
426 | { | |
427 | unsigned int irq = *(unsigned int *)data; | |
428 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | |
429 | } | |
430 | ||
03089688 WD |
431 | static int |
432 | armpmu_reserve_hardware(struct arm_pmu *armpmu) | |
433 | { | |
66aa8d6a VK |
434 | int err, irq; |
435 | unsigned int i, irqs; | |
03089688 WD |
436 | struct platform_device *pmu_device = armpmu->plat_device; |
437 | ||
438 | if (!pmu_device) { | |
439 | pr_err("no PMU device registered\n"); | |
440 | return -ENODEV; | |
441 | } | |
442 | ||
443 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | |
66aa8d6a | 444 | if (!irqs) { |
03089688 WD |
445 | pr_err("no irqs for PMUs defined\n"); |
446 | return -ENODEV; | |
447 | } | |
448 | ||
66aa8d6a VK |
449 | irq = platform_get_irq(pmu_device, 0); |
450 | if (irq <= 0) { | |
451 | pr_err("failed to get valid irq for PMU device\n"); | |
452 | return -ENODEV; | |
453 | } | |
454 | ||
455 | if (irq_is_percpu(irq)) { | |
456 | err = request_percpu_irq(irq, armpmu->handle_irq, | |
457 | "arm-pmu", &cpu_hw_events); | |
03089688 | 458 | |
03089688 | 459 | if (err) { |
66aa8d6a VK |
460 | pr_err("unable to request percpu IRQ%d for ARM PMU counters\n", |
461 | irq); | |
03089688 WD |
462 | armpmu_release_hardware(armpmu); |
463 | return err; | |
464 | } | |
465 | ||
66aa8d6a VK |
466 | on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); |
467 | } else { | |
468 | for (i = 0; i < irqs; ++i) { | |
d5efd9cc WD |
469 | int cpu = i; |
470 | ||
66aa8d6a VK |
471 | err = 0; |
472 | irq = platform_get_irq(pmu_device, i); | |
473 | if (irq <= 0) | |
474 | continue; | |
475 | ||
d5efd9cc WD |
476 | if (armpmu->irq_affinity) |
477 | cpu = armpmu->irq_affinity[i]; | |
478 | ||
66aa8d6a VK |
479 | /* |
480 | * If we have a single PMU interrupt that we can't shift, | |
481 | * assume that we're running on a uniprocessor machine and | |
482 | * continue. Otherwise, continue without this interrupt. | |
483 | */ | |
d5efd9cc | 484 | if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { |
66aa8d6a | 485 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", |
d5efd9cc | 486 | irq, cpu); |
66aa8d6a VK |
487 | continue; |
488 | } | |
489 | ||
490 | err = request_irq(irq, armpmu->handle_irq, | |
491 | IRQF_NOBALANCING, | |
492 | "arm-pmu", armpmu); | |
493 | if (err) { | |
494 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | |
495 | irq); | |
496 | armpmu_release_hardware(armpmu); | |
497 | return err; | |
498 | } | |
499 | ||
d5efd9cc | 500 | cpumask_set_cpu(cpu, &armpmu->active_irqs); |
66aa8d6a | 501 | } |
03089688 WD |
502 | } |
503 | ||
504 | return 0; | |
505 | } | |
506 | ||
507 | static void | |
508 | hw_perf_event_destroy(struct perf_event *event) | |
509 | { | |
510 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
511 | atomic_t *active_events = &armpmu->active_events; | |
512 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | |
513 | ||
514 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | |
515 | armpmu_release_hardware(armpmu); | |
516 | mutex_unlock(pmu_reserve_mutex); | |
517 | } | |
518 | } | |
519 | ||
520 | static int | |
521 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
522 | { | |
523 | return attr->exclude_idle || attr->exclude_user || | |
524 | attr->exclude_kernel || attr->exclude_hv; | |
525 | } | |
526 | ||
527 | static int | |
528 | __hw_perf_event_init(struct perf_event *event) | |
529 | { | |
530 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
531 | struct hw_perf_event *hwc = &event->hw; | |
532 | int mapping, err; | |
533 | ||
534 | mapping = armpmu->map_event(event); | |
535 | ||
536 | if (mapping < 0) { | |
537 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
538 | event->attr.config); | |
539 | return mapping; | |
540 | } | |
541 | ||
542 | /* | |
543 | * We don't assign an index until we actually place the event onto | |
544 | * hardware. Use -1 to signify that we haven't decided where to put it | |
545 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
546 | * clever allocation or constraints checking at this point. | |
547 | */ | |
548 | hwc->idx = -1; | |
549 | hwc->config_base = 0; | |
550 | hwc->config = 0; | |
551 | hwc->event_base = 0; | |
552 | ||
553 | /* | |
554 | * Check whether we need to exclude the counter from certain modes. | |
555 | */ | |
556 | if ((!armpmu->set_event_filter || | |
557 | armpmu->set_event_filter(hwc, &event->attr)) && | |
558 | event_requires_mode_exclusion(&event->attr)) { | |
559 | pr_debug("ARM performance counters do not support mode exclusion\n"); | |
560 | return -EPERM; | |
561 | } | |
562 | ||
563 | /* | |
564 | * Store the event encoding into the config_base field. | |
565 | */ | |
566 | hwc->config_base |= (unsigned long)mapping; | |
567 | ||
568 | if (!hwc->sample_period) { | |
569 | /* | |
570 | * For non-sampling runs, limit the sample_period to half | |
571 | * of the counter width. That way, the new counter value | |
572 | * is far less likely to overtake the previous one unless | |
573 | * you have some serious IRQ latency issues. | |
574 | */ | |
575 | hwc->sample_period = armpmu->max_period >> 1; | |
576 | hwc->last_period = hwc->sample_period; | |
577 | local64_set(&hwc->period_left, hwc->sample_period); | |
578 | } | |
579 | ||
580 | err = 0; | |
581 | if (event->group_leader != event) { | |
582 | err = validate_group(event); | |
583 | if (err) | |
584 | return -EINVAL; | |
585 | } | |
586 | ||
587 | return err; | |
588 | } | |
589 | ||
590 | static int armpmu_event_init(struct perf_event *event) | |
591 | { | |
592 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
593 | int err = 0; | |
594 | atomic_t *active_events = &armpmu->active_events; | |
595 | ||
596 | if (armpmu->map_event(event) == -ENOENT) | |
597 | return -ENOENT; | |
598 | ||
599 | event->destroy = hw_perf_event_destroy; | |
600 | ||
601 | if (!atomic_inc_not_zero(active_events)) { | |
602 | mutex_lock(&armpmu->reserve_mutex); | |
603 | if (atomic_read(active_events) == 0) | |
604 | err = armpmu_reserve_hardware(armpmu); | |
605 | ||
606 | if (!err) | |
607 | atomic_inc(active_events); | |
608 | mutex_unlock(&armpmu->reserve_mutex); | |
609 | } | |
610 | ||
611 | if (err) | |
612 | return err; | |
613 | ||
614 | err = __hw_perf_event_init(event); | |
615 | if (err) | |
616 | hw_perf_event_destroy(event); | |
617 | ||
618 | return err; | |
619 | } | |
620 | ||
621 | static void armpmu_enable(struct pmu *pmu) | |
622 | { | |
623 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | |
624 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | |
625 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | |
626 | ||
627 | if (enabled) | |
628 | armpmu->start(); | |
629 | } | |
630 | ||
631 | static void armpmu_disable(struct pmu *pmu) | |
632 | { | |
633 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | |
634 | armpmu->stop(); | |
635 | } | |
636 | ||
637 | static void __init armpmu_init(struct arm_pmu *armpmu) | |
638 | { | |
639 | atomic_set(&armpmu->active_events, 0); | |
640 | mutex_init(&armpmu->reserve_mutex); | |
641 | ||
642 | armpmu->pmu = (struct pmu) { | |
643 | .pmu_enable = armpmu_enable, | |
644 | .pmu_disable = armpmu_disable, | |
645 | .event_init = armpmu_event_init, | |
646 | .add = armpmu_add, | |
647 | .del = armpmu_del, | |
648 | .start = armpmu_start, | |
649 | .stop = armpmu_stop, | |
650 | .read = armpmu_read, | |
651 | }; | |
652 | } | |
653 | ||
654 | int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) | |
655 | { | |
656 | armpmu_init(armpmu); | |
657 | return perf_pmu_register(&armpmu->pmu, name, type); | |
658 | } | |
659 | ||
660 | /* | |
661 | * ARMv8 PMUv3 Performance Events handling code. | |
662 | * Common event types. | |
663 | */ | |
664 | enum armv8_pmuv3_perf_types { | |
665 | /* Required events. */ | |
666 | ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00, | |
667 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03, | |
668 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04, | |
669 | ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | |
670 | ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11, | |
671 | ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12, | |
672 | ||
673 | /* At least one of the following is required. */ | |
674 | ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08, | |
675 | ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B, | |
676 | ||
677 | /* Common architectural events. */ | |
678 | ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06, | |
679 | ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07, | |
680 | ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09, | |
681 | ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A, | |
682 | ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B, | |
683 | ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C, | |
684 | ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D, | |
685 | ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E, | |
686 | ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F, | |
687 | ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C, | |
688 | ||
689 | /* Common microarchitectural events. */ | |
690 | ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01, | |
691 | ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02, | |
692 | ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05, | |
693 | ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13, | |
694 | ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14, | |
695 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15, | |
696 | ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16, | |
697 | ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17, | |
698 | ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18, | |
699 | ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, | |
700 | ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, | |
701 | ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, | |
03089688 WD |
702 | }; |
703 | ||
704 | /* PMUv3 HW events mapping. */ | |
705 | static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { | |
f46f979f | 706 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, |
03089688 WD |
707 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, |
708 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, | |
709 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, | |
710 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED, | |
711 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, | |
712 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | |
713 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, | |
714 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, | |
715 | }; | |
716 | ||
717 | static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
718 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
719 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
720 | [C(L1D)] = { | |
721 | [C(OP_READ)] = { | |
722 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, | |
723 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, | |
724 | }, | |
725 | [C(OP_WRITE)] = { | |
726 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, | |
727 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, | |
728 | }, | |
729 | [C(OP_PREFETCH)] = { | |
730 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
731 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
732 | }, | |
733 | }, | |
734 | [C(L1I)] = { | |
735 | [C(OP_READ)] = { | |
736 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
737 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
738 | }, | |
739 | [C(OP_WRITE)] = { | |
740 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
741 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
742 | }, | |
743 | [C(OP_PREFETCH)] = { | |
744 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
745 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
746 | }, | |
747 | }, | |
748 | [C(LL)] = { | |
749 | [C(OP_READ)] = { | |
750 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
751 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
752 | }, | |
753 | [C(OP_WRITE)] = { | |
754 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
755 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
756 | }, | |
757 | [C(OP_PREFETCH)] = { | |
758 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
759 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
760 | }, | |
761 | }, | |
762 | [C(DTLB)] = { | |
763 | [C(OP_READ)] = { | |
764 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
765 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
766 | }, | |
767 | [C(OP_WRITE)] = { | |
768 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
769 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
770 | }, | |
771 | [C(OP_PREFETCH)] = { | |
772 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
773 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
774 | }, | |
775 | }, | |
776 | [C(ITLB)] = { | |
777 | [C(OP_READ)] = { | |
778 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
779 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
780 | }, | |
781 | [C(OP_WRITE)] = { | |
782 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
783 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
784 | }, | |
785 | [C(OP_PREFETCH)] = { | |
786 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
787 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
788 | }, | |
789 | }, | |
790 | [C(BPU)] = { | |
791 | [C(OP_READ)] = { | |
792 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, | |
793 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, | |
794 | }, | |
795 | [C(OP_WRITE)] = { | |
796 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, | |
797 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, | |
798 | }, | |
799 | [C(OP_PREFETCH)] = { | |
800 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
801 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
802 | }, | |
803 | }, | |
804 | [C(NODE)] = { | |
805 | [C(OP_READ)] = { | |
806 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
807 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
808 | }, | |
809 | [C(OP_WRITE)] = { | |
810 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
811 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
812 | }, | |
813 | [C(OP_PREFETCH)] = { | |
814 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
815 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
816 | }, | |
817 | }, | |
818 | }; | |
819 | ||
820 | /* | |
821 | * Perf Events' indices | |
822 | */ | |
823 | #define ARMV8_IDX_CYCLE_COUNTER 0 | |
824 | #define ARMV8_IDX_COUNTER0 1 | |
825 | #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | |
826 | ||
827 | #define ARMV8_MAX_COUNTERS 32 | |
828 | #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) | |
829 | ||
830 | /* | |
831 | * ARMv8 low level PMU access | |
832 | */ | |
833 | ||
834 | /* | |
835 | * Perf Event to low level counters mapping | |
836 | */ | |
837 | #define ARMV8_IDX_TO_COUNTER(x) \ | |
838 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) | |
839 | ||
840 | /* | |
841 | * Per-CPU PMCR: config reg | |
842 | */ | |
843 | #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ | |
844 | #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ | |
845 | #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ | |
846 | #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | |
847 | #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ | |
848 | #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | |
849 | #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ | |
850 | #define ARMV8_PMCR_N_MASK 0x1f | |
851 | #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ | |
852 | ||
853 | /* | |
854 | * PMOVSR: counters overflow flag status reg | |
855 | */ | |
856 | #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ | |
857 | #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK | |
858 | ||
859 | /* | |
860 | * PMXEVTYPER: Event selection reg | |
861 | */ | |
c019de3d VK |
862 | #define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ |
863 | #define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ | |
03089688 WD |
864 | |
865 | /* | |
866 | * Event filters for PMUv3 | |
867 | */ | |
868 | #define ARMV8_EXCLUDE_EL1 (1 << 31) | |
869 | #define ARMV8_EXCLUDE_EL0 (1 << 30) | |
870 | #define ARMV8_INCLUDE_EL2 (1 << 27) | |
871 | ||
872 | static inline u32 armv8pmu_pmcr_read(void) | |
873 | { | |
874 | u32 val; | |
875 | asm volatile("mrs %0, pmcr_el0" : "=r" (val)); | |
876 | return val; | |
877 | } | |
878 | ||
879 | static inline void armv8pmu_pmcr_write(u32 val) | |
880 | { | |
881 | val &= ARMV8_PMCR_MASK; | |
882 | isb(); | |
883 | asm volatile("msr pmcr_el0, %0" :: "r" (val)); | |
884 | } | |
885 | ||
886 | static inline int armv8pmu_has_overflowed(u32 pmovsr) | |
887 | { | |
888 | return pmovsr & ARMV8_OVERFLOWED_MASK; | |
889 | } | |
890 | ||
891 | static inline int armv8pmu_counter_valid(int idx) | |
892 | { | |
893 | return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST; | |
894 | } | |
895 | ||
896 | static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) | |
897 | { | |
898 | int ret = 0; | |
899 | u32 counter; | |
900 | ||
901 | if (!armv8pmu_counter_valid(idx)) { | |
902 | pr_err("CPU%u checking wrong counter %d overflow status\n", | |
903 | smp_processor_id(), idx); | |
904 | } else { | |
905 | counter = ARMV8_IDX_TO_COUNTER(idx); | |
906 | ret = pmnc & BIT(counter); | |
907 | } | |
908 | ||
909 | return ret; | |
910 | } | |
911 | ||
912 | static inline int armv8pmu_select_counter(int idx) | |
913 | { | |
914 | u32 counter; | |
915 | ||
916 | if (!armv8pmu_counter_valid(idx)) { | |
917 | pr_err("CPU%u selecting wrong PMNC counter %d\n", | |
918 | smp_processor_id(), idx); | |
919 | return -EINVAL; | |
920 | } | |
921 | ||
922 | counter = ARMV8_IDX_TO_COUNTER(idx); | |
923 | asm volatile("msr pmselr_el0, %0" :: "r" (counter)); | |
924 | isb(); | |
925 | ||
926 | return idx; | |
927 | } | |
928 | ||
929 | static inline u32 armv8pmu_read_counter(int idx) | |
930 | { | |
931 | u32 value = 0; | |
932 | ||
933 | if (!armv8pmu_counter_valid(idx)) | |
934 | pr_err("CPU%u reading wrong counter %d\n", | |
935 | smp_processor_id(), idx); | |
936 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) | |
937 | asm volatile("mrs %0, pmccntr_el0" : "=r" (value)); | |
938 | else if (armv8pmu_select_counter(idx) == idx) | |
939 | asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value)); | |
940 | ||
941 | return value; | |
942 | } | |
943 | ||
944 | static inline void armv8pmu_write_counter(int idx, u32 value) | |
945 | { | |
946 | if (!armv8pmu_counter_valid(idx)) | |
947 | pr_err("CPU%u writing wrong counter %d\n", | |
948 | smp_processor_id(), idx); | |
949 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) | |
950 | asm volatile("msr pmccntr_el0, %0" :: "r" (value)); | |
951 | else if (armv8pmu_select_counter(idx) == idx) | |
952 | asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); | |
953 | } | |
954 | ||
955 | static inline void armv8pmu_write_evtype(int idx, u32 val) | |
956 | { | |
957 | if (armv8pmu_select_counter(idx) == idx) { | |
958 | val &= ARMV8_EVTYPE_MASK; | |
959 | asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); | |
960 | } | |
961 | } | |
962 | ||
963 | static inline int armv8pmu_enable_counter(int idx) | |
964 | { | |
965 | u32 counter; | |
966 | ||
967 | if (!armv8pmu_counter_valid(idx)) { | |
968 | pr_err("CPU%u enabling wrong PMNC counter %d\n", | |
969 | smp_processor_id(), idx); | |
970 | return -EINVAL; | |
971 | } | |
972 | ||
973 | counter = ARMV8_IDX_TO_COUNTER(idx); | |
974 | asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); | |
975 | return idx; | |
976 | } | |
977 | ||
978 | static inline int armv8pmu_disable_counter(int idx) | |
979 | { | |
980 | u32 counter; | |
981 | ||
982 | if (!armv8pmu_counter_valid(idx)) { | |
983 | pr_err("CPU%u disabling wrong PMNC counter %d\n", | |
984 | smp_processor_id(), idx); | |
985 | return -EINVAL; | |
986 | } | |
987 | ||
988 | counter = ARMV8_IDX_TO_COUNTER(idx); | |
989 | asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); | |
990 | return idx; | |
991 | } | |
992 | ||
993 | static inline int armv8pmu_enable_intens(int idx) | |
994 | { | |
995 | u32 counter; | |
996 | ||
997 | if (!armv8pmu_counter_valid(idx)) { | |
998 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | |
999 | smp_processor_id(), idx); | |
1000 | return -EINVAL; | |
1001 | } | |
1002 | ||
1003 | counter = ARMV8_IDX_TO_COUNTER(idx); | |
1004 | asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); | |
1005 | return idx; | |
1006 | } | |
1007 | ||
1008 | static inline int armv8pmu_disable_intens(int idx) | |
1009 | { | |
1010 | u32 counter; | |
1011 | ||
1012 | if (!armv8pmu_counter_valid(idx)) { | |
1013 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | |
1014 | smp_processor_id(), idx); | |
1015 | return -EINVAL; | |
1016 | } | |
1017 | ||
1018 | counter = ARMV8_IDX_TO_COUNTER(idx); | |
1019 | asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); | |
1020 | isb(); | |
1021 | /* Clear the overflow flag in case an interrupt is pending. */ | |
1022 | asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); | |
1023 | isb(); | |
1024 | return idx; | |
1025 | } | |
1026 | ||
1027 | static inline u32 armv8pmu_getreset_flags(void) | |
1028 | { | |
1029 | u32 value; | |
1030 | ||
1031 | /* Read */ | |
1032 | asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); | |
1033 | ||
1034 | /* Write to clear flags */ | |
1035 | value &= ARMV8_OVSR_MASK; | |
1036 | asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); | |
1037 | ||
1038 | return value; | |
1039 | } | |
1040 | ||
1041 | static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) | |
1042 | { | |
1043 | unsigned long flags; | |
1044 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | |
1045 | ||
1046 | /* | |
1047 | * Enable counter and interrupt, and set the counter to count | |
1048 | * the event that we're interested in. | |
1049 | */ | |
1050 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1051 | ||
1052 | /* | |
1053 | * Disable counter | |
1054 | */ | |
1055 | armv8pmu_disable_counter(idx); | |
1056 | ||
1057 | /* | |
1058 | * Set event (if destined for PMNx counters). | |
1059 | */ | |
1060 | armv8pmu_write_evtype(idx, hwc->config_base); | |
1061 | ||
1062 | /* | |
1063 | * Enable interrupt for this counter | |
1064 | */ | |
1065 | armv8pmu_enable_intens(idx); | |
1066 | ||
1067 | /* | |
1068 | * Enable counter | |
1069 | */ | |
1070 | armv8pmu_enable_counter(idx); | |
1071 | ||
1072 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1073 | } | |
1074 | ||
1075 | static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx) | |
1076 | { | |
1077 | unsigned long flags; | |
1078 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | |
1079 | ||
1080 | /* | |
1081 | * Disable counter and interrupt | |
1082 | */ | |
1083 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1084 | ||
1085 | /* | |
1086 | * Disable counter | |
1087 | */ | |
1088 | armv8pmu_disable_counter(idx); | |
1089 | ||
1090 | /* | |
1091 | * Disable interrupt for this counter | |
1092 | */ | |
1093 | armv8pmu_disable_intens(idx); | |
1094 | ||
1095 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1096 | } | |
1097 | ||
1098 | static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) | |
1099 | { | |
1100 | u32 pmovsr; | |
1101 | struct perf_sample_data data; | |
1102 | struct pmu_hw_events *cpuc; | |
1103 | struct pt_regs *regs; | |
1104 | int idx; | |
1105 | ||
1106 | /* | |
1107 | * Get and reset the IRQ flags | |
1108 | */ | |
1109 | pmovsr = armv8pmu_getreset_flags(); | |
1110 | ||
1111 | /* | |
1112 | * Did an overflow occur? | |
1113 | */ | |
1114 | if (!armv8pmu_has_overflowed(pmovsr)) | |
1115 | return IRQ_NONE; | |
1116 | ||
1117 | /* | |
1118 | * Handle the counter(s) overflow(s) | |
1119 | */ | |
1120 | regs = get_irq_regs(); | |
1121 | ||
1436c1aa | 1122 | cpuc = this_cpu_ptr(&cpu_hw_events); |
03089688 WD |
1123 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1124 | struct perf_event *event = cpuc->events[idx]; | |
1125 | struct hw_perf_event *hwc; | |
1126 | ||
1127 | /* Ignore if we don't have an event. */ | |
1128 | if (!event) | |
1129 | continue; | |
1130 | ||
1131 | /* | |
1132 | * We have a single interrupt for all counters. Check that | |
1133 | * each counter has overflowed before we process it. | |
1134 | */ | |
1135 | if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) | |
1136 | continue; | |
1137 | ||
1138 | hwc = &event->hw; | |
1139 | armpmu_event_update(event, hwc, idx); | |
1140 | perf_sample_data_init(&data, 0, hwc->last_period); | |
1141 | if (!armpmu_event_set_period(event, hwc, idx)) | |
1142 | continue; | |
1143 | ||
1144 | if (perf_event_overflow(event, &data, regs)) | |
1145 | cpu_pmu->disable(hwc, idx); | |
1146 | } | |
1147 | ||
1148 | /* | |
1149 | * Handle the pending perf events. | |
1150 | * | |
1151 | * Note: this call *must* be run with interrupts disabled. For | |
1152 | * platforms that can have the PMU interrupts raised as an NMI, this | |
1153 | * will not work. | |
1154 | */ | |
1155 | irq_work_run(); | |
1156 | ||
1157 | return IRQ_HANDLED; | |
1158 | } | |
1159 | ||
1160 | static void armv8pmu_start(void) | |
1161 | { | |
1162 | unsigned long flags; | |
1163 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | |
1164 | ||
1165 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1166 | /* Enable all counters */ | |
1167 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); | |
1168 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1169 | } | |
1170 | ||
1171 | static void armv8pmu_stop(void) | |
1172 | { | |
1173 | unsigned long flags; | |
1174 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | |
1175 | ||
1176 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1177 | /* Disable all counters */ | |
1178 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); | |
1179 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1180 | } | |
1181 | ||
1182 | static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, | |
1183 | struct hw_perf_event *event) | |
1184 | { | |
1185 | int idx; | |
1186 | unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; | |
1187 | ||
1188 | /* Always place a cycle counter into the cycle counter. */ | |
f46f979f | 1189 | if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { |
03089688 WD |
1190 | if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
1191 | return -EAGAIN; | |
1192 | ||
1193 | return ARMV8_IDX_CYCLE_COUNTER; | |
1194 | } | |
1195 | ||
1196 | /* | |
1197 | * For anything other than a cycle counter, try and use | |
1198 | * the events counters | |
1199 | */ | |
1200 | for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { | |
1201 | if (!test_and_set_bit(idx, cpuc->used_mask)) | |
1202 | return idx; | |
1203 | } | |
1204 | ||
1205 | /* The counters are all in use. */ | |
1206 | return -EAGAIN; | |
1207 | } | |
1208 | ||
1209 | /* | |
1210 | * Add an event filter to a given event. This will only work for PMUv2 PMUs. | |
1211 | */ | |
1212 | static int armv8pmu_set_event_filter(struct hw_perf_event *event, | |
1213 | struct perf_event_attr *attr) | |
1214 | { | |
1215 | unsigned long config_base = 0; | |
1216 | ||
1217 | if (attr->exclude_idle) | |
1218 | return -EPERM; | |
1219 | if (attr->exclude_user) | |
1220 | config_base |= ARMV8_EXCLUDE_EL0; | |
1221 | if (attr->exclude_kernel) | |
1222 | config_base |= ARMV8_EXCLUDE_EL1; | |
1223 | if (!attr->exclude_hv) | |
1224 | config_base |= ARMV8_INCLUDE_EL2; | |
1225 | ||
1226 | /* | |
1227 | * Install the filter into config_base as this is used to | |
1228 | * construct the event type. | |
1229 | */ | |
1230 | event->config_base = config_base; | |
1231 | ||
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | static void armv8pmu_reset(void *info) | |
1236 | { | |
1237 | u32 idx, nb_cnt = cpu_pmu->num_events; | |
1238 | ||
1239 | /* The counter and interrupt enable registers are unknown at reset. */ | |
1240 | for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) | |
1241 | armv8pmu_disable_event(NULL, idx); | |
1242 | ||
1243 | /* Initialize & Reset PMNC: C and P bits. */ | |
1244 | armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); | |
1245 | ||
1246 | /* Disable access from userspace. */ | |
1247 | asm volatile("msr pmuserenr_el0, %0" :: "r" (0)); | |
1248 | } | |
1249 | ||
1250 | static int armv8_pmuv3_map_event(struct perf_event *event) | |
1251 | { | |
1252 | return map_cpu_event(event, &armv8_pmuv3_perf_map, | |
c019de3d VK |
1253 | &armv8_pmuv3_perf_cache_map, |
1254 | ARMV8_EVTYPE_EVENT); | |
03089688 WD |
1255 | } |
1256 | ||
1257 | static struct arm_pmu armv8pmu = { | |
1258 | .handle_irq = armv8pmu_handle_irq, | |
1259 | .enable = armv8pmu_enable_event, | |
1260 | .disable = armv8pmu_disable_event, | |
1261 | .read_counter = armv8pmu_read_counter, | |
1262 | .write_counter = armv8pmu_write_counter, | |
1263 | .get_event_idx = armv8pmu_get_event_idx, | |
1264 | .start = armv8pmu_start, | |
1265 | .stop = armv8pmu_stop, | |
1266 | .reset = armv8pmu_reset, | |
1267 | .max_period = (1LLU << 32) - 1, | |
1268 | }; | |
1269 | ||
1270 | static u32 __init armv8pmu_read_num_pmnc_events(void) | |
1271 | { | |
1272 | u32 nb_cnt; | |
1273 | ||
1274 | /* Read the nb of CNTx counters supported from PMNC */ | |
1275 | nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; | |
1276 | ||
1277 | /* Add the CPU cycles counter and return */ | |
1278 | return nb_cnt + 1; | |
1279 | } | |
1280 | ||
1281 | static struct arm_pmu *__init armv8_pmuv3_pmu_init(void) | |
1282 | { | |
1283 | armv8pmu.name = "arm/armv8-pmuv3"; | |
1284 | armv8pmu.map_event = armv8_pmuv3_map_event; | |
1285 | armv8pmu.num_events = armv8pmu_read_num_pmnc_events(); | |
1286 | armv8pmu.set_event_filter = armv8pmu_set_event_filter; | |
1287 | return &armv8pmu; | |
1288 | } | |
1289 | ||
1290 | /* | |
1291 | * Ensure the PMU has sane values out of reset. | |
1292 | * This requires SMP to be available, so exists as a separate initcall. | |
1293 | */ | |
1294 | static int __init | |
1295 | cpu_pmu_reset(void) | |
1296 | { | |
1297 | if (cpu_pmu && cpu_pmu->reset) | |
1298 | return on_each_cpu(cpu_pmu->reset, NULL, 1); | |
1299 | return 0; | |
1300 | } | |
1301 | arch_initcall(cpu_pmu_reset); | |
1302 | ||
1303 | /* | |
1304 | * PMU platform driver and devicetree bindings. | |
1305 | */ | |
c8fdd497 | 1306 | static const struct of_device_id armpmu_of_device_ids[] = { |
03089688 WD |
1307 | {.compatible = "arm,armv8-pmuv3"}, |
1308 | {}, | |
1309 | }; | |
1310 | ||
b881bc46 | 1311 | static int armpmu_device_probe(struct platform_device *pdev) |
03089688 | 1312 | { |
d795ef9a | 1313 | int i, irq, *irqs; |
d5efd9cc | 1314 | |
03089688 WD |
1315 | if (!cpu_pmu) |
1316 | return -ENODEV; | |
1317 | ||
d5efd9cc WD |
1318 | irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); |
1319 | if (!irqs) | |
1320 | return -ENOMEM; | |
1321 | ||
d795ef9a WD |
1322 | /* Don't bother with PPIs; they're already affine */ |
1323 | irq = platform_get_irq(pdev, 0); | |
1324 | if (irq >= 0 && irq_is_percpu(irq)) | |
1325 | return 0; | |
1326 | ||
d5efd9cc WD |
1327 | for (i = 0; i < pdev->num_resources; ++i) { |
1328 | struct device_node *dn; | |
1329 | int cpu; | |
1330 | ||
1331 | dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", | |
1332 | i); | |
1333 | if (!dn) { | |
1334 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", | |
8291fd04 | 1335 | of_node_full_name(pdev->dev.of_node), i); |
d5efd9cc WD |
1336 | break; |
1337 | } | |
1338 | ||
1339 | for_each_possible_cpu(cpu) | |
1340 | if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) | |
1341 | break; | |
1342 | ||
1343 | of_node_put(dn); | |
1344 | if (cpu >= nr_cpu_ids) { | |
1345 | pr_warn("Failed to find logical CPU for %s\n", | |
1346 | dn->name); | |
1347 | break; | |
1348 | } | |
1349 | ||
1350 | irqs[i] = cpu; | |
1351 | } | |
1352 | ||
1353 | if (i == pdev->num_resources) | |
1354 | cpu_pmu->irq_affinity = irqs; | |
1355 | else | |
1356 | kfree(irqs); | |
1357 | ||
03089688 WD |
1358 | cpu_pmu->plat_device = pdev; |
1359 | return 0; | |
1360 | } | |
1361 | ||
1362 | static struct platform_driver armpmu_driver = { | |
1363 | .driver = { | |
1364 | .name = "arm-pmu", | |
1365 | .of_match_table = armpmu_of_device_ids, | |
1366 | }, | |
1367 | .probe = armpmu_device_probe, | |
1368 | }; | |
1369 | ||
1370 | static int __init register_pmu_driver(void) | |
1371 | { | |
1372 | return platform_driver_register(&armpmu_driver); | |
1373 | } | |
1374 | device_initcall(register_pmu_driver); | |
1375 | ||
1376 | static struct pmu_hw_events *armpmu_get_cpu_events(void) | |
1377 | { | |
1436c1aa | 1378 | return this_cpu_ptr(&cpu_hw_events); |
03089688 WD |
1379 | } |
1380 | ||
1381 | static void __init cpu_pmu_init(struct arm_pmu *armpmu) | |
1382 | { | |
1383 | int cpu; | |
1384 | for_each_possible_cpu(cpu) { | |
1385 | struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); | |
1386 | events->events = per_cpu(hw_events, cpu); | |
1387 | events->used_mask = per_cpu(used_mask, cpu); | |
1388 | raw_spin_lock_init(&events->pmu_lock); | |
1389 | } | |
1390 | armpmu->get_hw_events = armpmu_get_cpu_events; | |
1391 | } | |
1392 | ||
1393 | static int __init init_hw_perf_events(void) | |
1394 | { | |
1395 | u64 dfr = read_cpuid(ID_AA64DFR0_EL1); | |
1396 | ||
1397 | switch ((dfr >> 8) & 0xf) { | |
1398 | case 0x1: /* PMUv3 */ | |
1399 | cpu_pmu = armv8_pmuv3_pmu_init(); | |
1400 | break; | |
1401 | } | |
1402 | ||
1403 | if (cpu_pmu) { | |
1404 | pr_info("enabled with %s PMU driver, %d counters available\n", | |
1405 | cpu_pmu->name, cpu_pmu->num_events); | |
1406 | cpu_pmu_init(cpu_pmu); | |
1407 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); | |
1408 | } else { | |
1409 | pr_info("no hardware support available\n"); | |
1410 | } | |
1411 | ||
1412 | return 0; | |
1413 | } | |
1414 | early_initcall(init_hw_perf_events); | |
1415 | ||
1416 | /* | |
1417 | * Callchain handling code. | |
1418 | */ | |
1419 | struct frame_tail { | |
23c7d70d JP |
1420 | struct frame_tail __user *fp; |
1421 | unsigned long lr; | |
03089688 WD |
1422 | } __attribute__((packed)); |
1423 | ||
1424 | /* | |
1425 | * Get the return address for a single stackframe and return a pointer to the | |
1426 | * next frame tail. | |
1427 | */ | |
1428 | static struct frame_tail __user * | |
1429 | user_backtrace(struct frame_tail __user *tail, | |
1430 | struct perf_callchain_entry *entry) | |
1431 | { | |
1432 | struct frame_tail buftail; | |
1433 | unsigned long err; | |
1434 | ||
1435 | /* Also check accessibility of one struct frame_tail beyond */ | |
1436 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) | |
1437 | return NULL; | |
1438 | ||
1439 | pagefault_disable(); | |
1440 | err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); | |
1441 | pagefault_enable(); | |
1442 | ||
1443 | if (err) | |
1444 | return NULL; | |
1445 | ||
1446 | perf_callchain_store(entry, buftail.lr); | |
1447 | ||
1448 | /* | |
1449 | * Frame pointers should strictly progress back up the stack | |
1450 | * (towards higher addresses). | |
1451 | */ | |
1452 | if (tail >= buftail.fp) | |
1453 | return NULL; | |
1454 | ||
1455 | return buftail.fp; | |
1456 | } | |
1457 | ||
ff268ff7 | 1458 | #ifdef CONFIG_COMPAT |
23c7d70d JP |
1459 | /* |
1460 | * The registers we're interested in are at the end of the variable | |
1461 | * length saved register structure. The fp points at the end of this | |
1462 | * structure so the address of this struct is: | |
1463 | * (struct compat_frame_tail *)(xxx->fp)-1 | |
1464 | * | |
1465 | * This code has been adapted from the ARM OProfile support. | |
1466 | */ | |
1467 | struct compat_frame_tail { | |
1468 | compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */ | |
1469 | u32 sp; | |
1470 | u32 lr; | |
1471 | } __attribute__((packed)); | |
1472 | ||
1473 | static struct compat_frame_tail __user * | |
1474 | compat_user_backtrace(struct compat_frame_tail __user *tail, | |
1475 | struct perf_callchain_entry *entry) | |
1476 | { | |
1477 | struct compat_frame_tail buftail; | |
1478 | unsigned long err; | |
1479 | ||
1480 | /* Also check accessibility of one struct frame_tail beyond */ | |
1481 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) | |
1482 | return NULL; | |
1483 | ||
1484 | pagefault_disable(); | |
1485 | err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); | |
1486 | pagefault_enable(); | |
1487 | ||
1488 | if (err) | |
1489 | return NULL; | |
1490 | ||
1491 | perf_callchain_store(entry, buftail.lr); | |
1492 | ||
1493 | /* | |
1494 | * Frame pointers should strictly progress back up the stack | |
1495 | * (towards higher addresses). | |
1496 | */ | |
1497 | if (tail + 1 >= (struct compat_frame_tail __user *) | |
1498 | compat_ptr(buftail.fp)) | |
1499 | return NULL; | |
1500 | ||
1501 | return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; | |
1502 | } | |
ff268ff7 | 1503 | #endif /* CONFIG_COMPAT */ |
23c7d70d | 1504 | |
03089688 WD |
1505 | void perf_callchain_user(struct perf_callchain_entry *entry, |
1506 | struct pt_regs *regs) | |
1507 | { | |
75e42462 MZ |
1508 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
1509 | /* We don't support guest os callchain now */ | |
1510 | return; | |
1511 | } | |
1512 | ||
abc41254 | 1513 | perf_callchain_store(entry, regs->pc); |
03089688 | 1514 | |
23c7d70d JP |
1515 | if (!compat_user_mode(regs)) { |
1516 | /* AARCH64 mode */ | |
1517 | struct frame_tail __user *tail; | |
1518 | ||
1519 | tail = (struct frame_tail __user *)regs->regs[29]; | |
1520 | ||
1521 | while (entry->nr < PERF_MAX_STACK_DEPTH && | |
1522 | tail && !((unsigned long)tail & 0xf)) | |
1523 | tail = user_backtrace(tail, entry); | |
1524 | } else { | |
ff268ff7 | 1525 | #ifdef CONFIG_COMPAT |
23c7d70d JP |
1526 | /* AARCH32 compat mode */ |
1527 | struct compat_frame_tail __user *tail; | |
1528 | ||
1529 | tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; | |
1530 | ||
1531 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && | |
1532 | tail && !((unsigned long)tail & 0x3)) | |
1533 | tail = compat_user_backtrace(tail, entry); | |
ff268ff7 | 1534 | #endif |
23c7d70d | 1535 | } |
03089688 WD |
1536 | } |
1537 | ||
1538 | /* | |
1539 | * Gets called by walk_stackframe() for every stackframe. This will be called | |
1540 | * whist unwinding the stackframe and is like a subroutine return so we use | |
1541 | * the PC. | |
1542 | */ | |
1543 | static int callchain_trace(struct stackframe *frame, void *data) | |
1544 | { | |
1545 | struct perf_callchain_entry *entry = data; | |
1546 | perf_callchain_store(entry, frame->pc); | |
1547 | return 0; | |
1548 | } | |
1549 | ||
1550 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | |
1551 | struct pt_regs *regs) | |
1552 | { | |
1553 | struct stackframe frame; | |
1554 | ||
75e42462 MZ |
1555 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
1556 | /* We don't support guest os callchain now */ | |
1557 | return; | |
1558 | } | |
1559 | ||
03089688 WD |
1560 | frame.fp = regs->regs[29]; |
1561 | frame.sp = regs->sp; | |
1562 | frame.pc = regs->pc; | |
23c7d70d | 1563 | |
03089688 WD |
1564 | walk_stackframe(&frame, callchain_trace, entry); |
1565 | } | |
75e42462 MZ |
1566 | |
1567 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | |
1568 | { | |
1569 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) | |
1570 | return perf_guest_cbs->get_guest_ip(); | |
1571 | ||
1572 | return instruction_pointer(regs); | |
1573 | } | |
1574 | ||
1575 | unsigned long perf_misc_flags(struct pt_regs *regs) | |
1576 | { | |
1577 | int misc = 0; | |
1578 | ||
1579 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | |
1580 | if (perf_guest_cbs->is_user_mode()) | |
1581 | misc |= PERF_RECORD_MISC_GUEST_USER; | |
1582 | else | |
1583 | misc |= PERF_RECORD_MISC_GUEST_KERNEL; | |
1584 | } else { | |
1585 | if (user_mode(regs)) | |
1586 | misc |= PERF_RECORD_MISC_USER; | |
1587 | else | |
1588 | misc |= PERF_RECORD_MISC_KERNEL; | |
1589 | } | |
1590 | ||
1591 | return misc; | |
1592 | } |