]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/perf/arm_pmu.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / drivers / perf / arm_pmu.c
1 #undef DEBUG
2
3 /*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8 *
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code.
11 */
12 #define pr_fmt(fmt) "hw perfevents: " fmt
13
14 #include <linux/bitmap.h>
15 #include <linux/cpumask.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/perf/arm_pmu.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/sched/clock.h>
23 #include <linux/spinlock.h>
24 #include <linux/irq.h>
25 #include <linux/irqdesc.h>
26
27 #include <asm/irq_regs.h>
28
29 static int
30 armpmu_map_cache_event(const unsigned (*cache_map)
31 [PERF_COUNT_HW_CACHE_MAX]
32 [PERF_COUNT_HW_CACHE_OP_MAX]
33 [PERF_COUNT_HW_CACHE_RESULT_MAX],
34 u64 config)
35 {
36 unsigned int cache_type, cache_op, cache_result, ret;
37
38 cache_type = (config >> 0) & 0xff;
39 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
40 return -EINVAL;
41
42 cache_op = (config >> 8) & 0xff;
43 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
44 return -EINVAL;
45
46 cache_result = (config >> 16) & 0xff;
47 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
48 return -EINVAL;
49
50 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
51
52 if (ret == CACHE_OP_UNSUPPORTED)
53 return -ENOENT;
54
55 return ret;
56 }
57
58 static int
59 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
60 {
61 int mapping;
62
63 if (config >= PERF_COUNT_HW_MAX)
64 return -EINVAL;
65
66 mapping = (*event_map)[config];
67 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
68 }
69
70 static int
71 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
72 {
73 return (int)(config & raw_event_mask);
74 }
75
76 int
77 armpmu_map_event(struct perf_event *event,
78 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
79 const unsigned (*cache_map)
80 [PERF_COUNT_HW_CACHE_MAX]
81 [PERF_COUNT_HW_CACHE_OP_MAX]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX],
83 u32 raw_event_mask)
84 {
85 u64 config = event->attr.config;
86 int type = event->attr.type;
87
88 if (type == event->pmu->type)
89 return armpmu_map_raw_event(raw_event_mask, config);
90
91 switch (type) {
92 case PERF_TYPE_HARDWARE:
93 return armpmu_map_hw_event(event_map, config);
94 case PERF_TYPE_HW_CACHE:
95 return armpmu_map_cache_event(cache_map, config);
96 case PERF_TYPE_RAW:
97 return armpmu_map_raw_event(raw_event_mask, config);
98 }
99
100 return -ENOENT;
101 }
102
103 int armpmu_event_set_period(struct perf_event *event)
104 {
105 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
106 struct hw_perf_event *hwc = &event->hw;
107 s64 left = local64_read(&hwc->period_left);
108 s64 period = hwc->sample_period;
109 int ret = 0;
110
111 if (unlikely(left <= -period)) {
112 left = period;
113 local64_set(&hwc->period_left, left);
114 hwc->last_period = period;
115 ret = 1;
116 }
117
118 if (unlikely(left <= 0)) {
119 left += period;
120 local64_set(&hwc->period_left, left);
121 hwc->last_period = period;
122 ret = 1;
123 }
124
125 /*
126 * Limit the maximum period to prevent the counter value
127 * from overtaking the one we are about to program. In
128 * effect we are reducing max_period to account for
129 * interrupt latency (and we are being very conservative).
130 */
131 if (left > (armpmu->max_period >> 1))
132 left = armpmu->max_period >> 1;
133
134 local64_set(&hwc->prev_count, (u64)-left);
135
136 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
137
138 perf_event_update_userpage(event);
139
140 return ret;
141 }
142
143 u64 armpmu_event_update(struct perf_event *event)
144 {
145 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
146 struct hw_perf_event *hwc = &event->hw;
147 u64 delta, prev_raw_count, new_raw_count;
148
149 again:
150 prev_raw_count = local64_read(&hwc->prev_count);
151 new_raw_count = armpmu->read_counter(event);
152
153 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
154 new_raw_count) != prev_raw_count)
155 goto again;
156
157 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
158
159 local64_add(delta, &event->count);
160 local64_sub(delta, &hwc->period_left);
161
162 return new_raw_count;
163 }
164
165 static void
166 armpmu_read(struct perf_event *event)
167 {
168 armpmu_event_update(event);
169 }
170
171 static void
172 armpmu_stop(struct perf_event *event, int flags)
173 {
174 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
175 struct hw_perf_event *hwc = &event->hw;
176
177 /*
178 * ARM pmu always has to update the counter, so ignore
179 * PERF_EF_UPDATE, see comments in armpmu_start().
180 */
181 if (!(hwc->state & PERF_HES_STOPPED)) {
182 armpmu->disable(event);
183 armpmu_event_update(event);
184 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
185 }
186 }
187
188 static void armpmu_start(struct perf_event *event, int flags)
189 {
190 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
191 struct hw_perf_event *hwc = &event->hw;
192
193 /*
194 * ARM pmu always has to reprogram the period, so ignore
195 * PERF_EF_RELOAD, see the comment below.
196 */
197 if (flags & PERF_EF_RELOAD)
198 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
199
200 hwc->state = 0;
201 /*
202 * Set the period again. Some counters can't be stopped, so when we
203 * were stopped we simply disabled the IRQ source and the counter
204 * may have been left counting. If we don't do this step then we may
205 * get an interrupt too soon or *way* too late if the overflow has
206 * happened since disabling.
207 */
208 armpmu_event_set_period(event);
209 armpmu->enable(event);
210 }
211
212 static void
213 armpmu_del(struct perf_event *event, int flags)
214 {
215 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
216 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
217 struct hw_perf_event *hwc = &event->hw;
218 int idx = hwc->idx;
219
220 armpmu_stop(event, PERF_EF_UPDATE);
221 hw_events->events[idx] = NULL;
222 clear_bit(idx, hw_events->used_mask);
223 if (armpmu->clear_event_idx)
224 armpmu->clear_event_idx(hw_events, event);
225
226 perf_event_update_userpage(event);
227 }
228
229 static int
230 armpmu_add(struct perf_event *event, int flags)
231 {
232 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
233 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
234 struct hw_perf_event *hwc = &event->hw;
235 int idx;
236
237 /* An event following a process won't be stopped earlier */
238 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
239 return -ENOENT;
240
241 /* If we don't have a space for the counter then finish early. */
242 idx = armpmu->get_event_idx(hw_events, event);
243 if (idx < 0)
244 return idx;
245
246 /*
247 * If there is an event in the counter we are going to use then make
248 * sure it is disabled.
249 */
250 event->hw.idx = idx;
251 armpmu->disable(event);
252 hw_events->events[idx] = event;
253
254 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
255 if (flags & PERF_EF_START)
256 armpmu_start(event, PERF_EF_RELOAD);
257
258 /* Propagate our changes to the userspace mapping. */
259 perf_event_update_userpage(event);
260
261 return 0;
262 }
263
264 static int
265 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
266 struct perf_event *event)
267 {
268 struct arm_pmu *armpmu;
269
270 if (is_software_event(event))
271 return 1;
272
273 /*
274 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
275 * core perf code won't check that the pmu->ctx == leader->ctx
276 * until after pmu->event_init(event).
277 */
278 if (event->pmu != pmu)
279 return 0;
280
281 if (event->state < PERF_EVENT_STATE_OFF)
282 return 1;
283
284 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
285 return 1;
286
287 armpmu = to_arm_pmu(event->pmu);
288 return armpmu->get_event_idx(hw_events, event) >= 0;
289 }
290
291 static int
292 validate_group(struct perf_event *event)
293 {
294 struct perf_event *sibling, *leader = event->group_leader;
295 struct pmu_hw_events fake_pmu;
296
297 /*
298 * Initialise the fake PMU. We only need to populate the
299 * used_mask for the purposes of validation.
300 */
301 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
302
303 if (!validate_event(event->pmu, &fake_pmu, leader))
304 return -EINVAL;
305
306 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
307 if (!validate_event(event->pmu, &fake_pmu, sibling))
308 return -EINVAL;
309 }
310
311 if (!validate_event(event->pmu, &fake_pmu, event))
312 return -EINVAL;
313
314 return 0;
315 }
316
317 static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
318 {
319 struct platform_device *pdev = armpmu->plat_device;
320
321 return pdev ? dev_get_platdata(&pdev->dev) : NULL;
322 }
323
324 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
325 {
326 struct arm_pmu *armpmu;
327 struct arm_pmu_platdata *plat;
328 int ret;
329 u64 start_clock, finish_clock;
330
331 /*
332 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
333 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
334 * do any necessary shifting, we just need to perform the first
335 * dereference.
336 */
337 armpmu = *(void **)dev;
338
339 plat = armpmu_get_platdata(armpmu);
340
341 start_clock = sched_clock();
342 if (plat && plat->handle_irq)
343 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
344 else
345 ret = armpmu->handle_irq(irq, armpmu);
346 finish_clock = sched_clock();
347
348 perf_sample_event_took(finish_clock - start_clock);
349 return ret;
350 }
351
352 static int
353 event_requires_mode_exclusion(struct perf_event_attr *attr)
354 {
355 return attr->exclude_idle || attr->exclude_user ||
356 attr->exclude_kernel || attr->exclude_hv;
357 }
358
359 static int
360 __hw_perf_event_init(struct perf_event *event)
361 {
362 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
363 struct hw_perf_event *hwc = &event->hw;
364 int mapping;
365
366 mapping = armpmu->map_event(event);
367
368 if (mapping < 0) {
369 pr_debug("event %x:%llx not supported\n", event->attr.type,
370 event->attr.config);
371 return mapping;
372 }
373
374 /*
375 * We don't assign an index until we actually place the event onto
376 * hardware. Use -1 to signify that we haven't decided where to put it
377 * yet. For SMP systems, each core has it's own PMU so we can't do any
378 * clever allocation or constraints checking at this point.
379 */
380 hwc->idx = -1;
381 hwc->config_base = 0;
382 hwc->config = 0;
383 hwc->event_base = 0;
384
385 /*
386 * Check whether we need to exclude the counter from certain modes.
387 */
388 if ((!armpmu->set_event_filter ||
389 armpmu->set_event_filter(hwc, &event->attr)) &&
390 event_requires_mode_exclusion(&event->attr)) {
391 pr_debug("ARM performance counters do not support "
392 "mode exclusion\n");
393 return -EOPNOTSUPP;
394 }
395
396 /*
397 * Store the event encoding into the config_base field.
398 */
399 hwc->config_base |= (unsigned long)mapping;
400
401 if (!is_sampling_event(event)) {
402 /*
403 * For non-sampling runs, limit the sample_period to half
404 * of the counter width. That way, the new counter value
405 * is far less likely to overtake the previous one unless
406 * you have some serious IRQ latency issues.
407 */
408 hwc->sample_period = armpmu->max_period >> 1;
409 hwc->last_period = hwc->sample_period;
410 local64_set(&hwc->period_left, hwc->sample_period);
411 }
412
413 if (event->group_leader != event) {
414 if (validate_group(event) != 0)
415 return -EINVAL;
416 }
417
418 return 0;
419 }
420
421 static int armpmu_event_init(struct perf_event *event)
422 {
423 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
424
425 /*
426 * Reject CPU-affine events for CPUs that are of a different class to
427 * that which this PMU handles. Process-following events (where
428 * event->cpu == -1) can be migrated between CPUs, and thus we have to
429 * reject them later (in armpmu_add) if they're scheduled on a
430 * different class of CPU.
431 */
432 if (event->cpu != -1 &&
433 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
434 return -ENOENT;
435
436 /* does not support taken branch sampling */
437 if (has_branch_stack(event))
438 return -EOPNOTSUPP;
439
440 if (armpmu->map_event(event) == -ENOENT)
441 return -ENOENT;
442
443 return __hw_perf_event_init(event);
444 }
445
446 static void armpmu_enable(struct pmu *pmu)
447 {
448 struct arm_pmu *armpmu = to_arm_pmu(pmu);
449 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
450 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
451
452 /* For task-bound events we may be called on other CPUs */
453 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
454 return;
455
456 if (enabled)
457 armpmu->start(armpmu);
458 }
459
460 static void armpmu_disable(struct pmu *pmu)
461 {
462 struct arm_pmu *armpmu = to_arm_pmu(pmu);
463
464 /* For task-bound events we may be called on other CPUs */
465 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
466 return;
467
468 armpmu->stop(armpmu);
469 }
470
471 /*
472 * In heterogeneous systems, events are specific to a particular
473 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
474 * the same microarchitecture.
475 */
476 static int armpmu_filter_match(struct perf_event *event)
477 {
478 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
479 unsigned int cpu = smp_processor_id();
480 return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
481 }
482
483 static ssize_t armpmu_cpumask_show(struct device *dev,
484 struct device_attribute *attr, char *buf)
485 {
486 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
487 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
488 }
489
490 static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
491
492 static struct attribute *armpmu_common_attrs[] = {
493 &dev_attr_cpus.attr,
494 NULL,
495 };
496
497 static struct attribute_group armpmu_common_attr_group = {
498 .attrs = armpmu_common_attrs,
499 };
500
501 /* Set at runtime when we know what CPU type we are. */
502 static struct arm_pmu *__oprofile_cpu_pmu;
503
504 /*
505 * Despite the names, these two functions are CPU-specific and are used
506 * by the OProfile/perf code.
507 */
508 const char *perf_pmu_name(void)
509 {
510 if (!__oprofile_cpu_pmu)
511 return NULL;
512
513 return __oprofile_cpu_pmu->name;
514 }
515 EXPORT_SYMBOL_GPL(perf_pmu_name);
516
517 int perf_num_counters(void)
518 {
519 int max_events = 0;
520
521 if (__oprofile_cpu_pmu != NULL)
522 max_events = __oprofile_cpu_pmu->num_events;
523
524 return max_events;
525 }
526 EXPORT_SYMBOL_GPL(perf_num_counters);
527
528 void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
529 {
530 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
531 int irq = per_cpu(hw_events->irq, cpu);
532
533 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
534 return;
535
536 if (irq_is_percpu(irq)) {
537 free_percpu_irq(irq, &hw_events->percpu_pmu);
538 cpumask_clear(&armpmu->active_irqs);
539 return;
540 }
541
542 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
543 }
544
545 void armpmu_free_irqs(struct arm_pmu *armpmu)
546 {
547 int cpu;
548
549 for_each_cpu(cpu, &armpmu->supported_cpus)
550 armpmu_free_irq(armpmu, cpu);
551 }
552
553 int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
554 {
555 int err = 0;
556 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
557 const irq_handler_t handler = armpmu_dispatch_irq;
558 int irq = per_cpu(hw_events->irq, cpu);
559 if (!irq)
560 return 0;
561
562 if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
563 err = request_percpu_irq(irq, handler, "arm-pmu",
564 &hw_events->percpu_pmu);
565 } else if (irq_is_percpu(irq)) {
566 int other_cpu = cpumask_first(&armpmu->active_irqs);
567 int other_irq = per_cpu(hw_events->irq, other_cpu);
568
569 if (irq != other_irq) {
570 pr_warn("mismatched PPIs detected.\n");
571 err = -EINVAL;
572 }
573 } else {
574 err = request_irq(irq, handler,
575 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
576 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
577 }
578
579 if (err) {
580 pr_err("unable to request IRQ%d for ARM PMU counters\n",
581 irq);
582 return err;
583 }
584
585 cpumask_set_cpu(cpu, &armpmu->active_irqs);
586
587 return 0;
588 }
589
590 int armpmu_request_irqs(struct arm_pmu *armpmu)
591 {
592 int cpu, err;
593
594 for_each_cpu(cpu, &armpmu->supported_cpus) {
595 err = armpmu_request_irq(armpmu, cpu);
596 if (err)
597 break;
598 }
599
600 return err;
601 }
602
603 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
604 {
605 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
606 return per_cpu(hw_events->irq, cpu);
607 }
608
609 /*
610 * PMU hardware loses all context when a CPU goes offline.
611 * When a CPU is hotplugged back in, since some hardware registers are
612 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
613 * junk values out of them.
614 */
615 static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
616 {
617 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
618 int irq;
619
620 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
621 return 0;
622 if (pmu->reset)
623 pmu->reset(pmu);
624
625 irq = armpmu_get_cpu_irq(pmu, cpu);
626 if (irq) {
627 if (irq_is_percpu(irq)) {
628 enable_percpu_irq(irq, IRQ_TYPE_NONE);
629 return 0;
630 }
631
632 if (irq_force_affinity(irq, cpumask_of(cpu)) &&
633 num_possible_cpus() > 1) {
634 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
635 irq, cpu);
636 }
637 }
638
639 return 0;
640 }
641
642 static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
643 {
644 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
645 int irq;
646
647 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
648 return 0;
649
650 irq = armpmu_get_cpu_irq(pmu, cpu);
651 if (irq && irq_is_percpu(irq))
652 disable_percpu_irq(irq);
653
654 return 0;
655 }
656
657 #ifdef CONFIG_CPU_PM
658 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
659 {
660 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
661 struct perf_event *event;
662 int idx;
663
664 for (idx = 0; idx < armpmu->num_events; idx++) {
665 /*
666 * If the counter is not used skip it, there is no
667 * need of stopping/restarting it.
668 */
669 if (!test_bit(idx, hw_events->used_mask))
670 continue;
671
672 event = hw_events->events[idx];
673
674 switch (cmd) {
675 case CPU_PM_ENTER:
676 /*
677 * Stop and update the counter
678 */
679 armpmu_stop(event, PERF_EF_UPDATE);
680 break;
681 case CPU_PM_EXIT:
682 case CPU_PM_ENTER_FAILED:
683 /*
684 * Restore and enable the counter.
685 * armpmu_start() indirectly calls
686 *
687 * perf_event_update_userpage()
688 *
689 * that requires RCU read locking to be functional,
690 * wrap the call within RCU_NONIDLE to make the
691 * RCU subsystem aware this cpu is not idle from
692 * an RCU perspective for the armpmu_start() call
693 * duration.
694 */
695 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
696 break;
697 default:
698 break;
699 }
700 }
701 }
702
703 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
704 void *v)
705 {
706 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
707 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
708 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
709
710 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
711 return NOTIFY_DONE;
712
713 /*
714 * Always reset the PMU registers on power-up even if
715 * there are no events running.
716 */
717 if (cmd == CPU_PM_EXIT && armpmu->reset)
718 armpmu->reset(armpmu);
719
720 if (!enabled)
721 return NOTIFY_OK;
722
723 switch (cmd) {
724 case CPU_PM_ENTER:
725 armpmu->stop(armpmu);
726 cpu_pm_pmu_setup(armpmu, cmd);
727 break;
728 case CPU_PM_EXIT:
729 cpu_pm_pmu_setup(armpmu, cmd);
730 case CPU_PM_ENTER_FAILED:
731 armpmu->start(armpmu);
732 break;
733 default:
734 return NOTIFY_DONE;
735 }
736
737 return NOTIFY_OK;
738 }
739
740 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
741 {
742 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
743 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
744 }
745
746 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
747 {
748 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
749 }
750 #else
751 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
752 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
753 #endif
754
755 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
756 {
757 int err;
758
759 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
760 &cpu_pmu->node);
761 if (err)
762 goto out;
763
764 err = cpu_pm_pmu_register(cpu_pmu);
765 if (err)
766 goto out_unregister;
767
768 return 0;
769
770 out_unregister:
771 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
772 &cpu_pmu->node);
773 out:
774 return err;
775 }
776
777 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
778 {
779 cpu_pm_pmu_unregister(cpu_pmu);
780 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
781 &cpu_pmu->node);
782 }
783
784 struct arm_pmu *armpmu_alloc(void)
785 {
786 struct arm_pmu *pmu;
787 int cpu;
788
789 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
790 if (!pmu) {
791 pr_info("failed to allocate PMU device!\n");
792 goto out;
793 }
794
795 pmu->hw_events = alloc_percpu(struct pmu_hw_events);
796 if (!pmu->hw_events) {
797 pr_info("failed to allocate per-cpu PMU data.\n");
798 goto out_free_pmu;
799 }
800
801 pmu->pmu = (struct pmu) {
802 .pmu_enable = armpmu_enable,
803 .pmu_disable = armpmu_disable,
804 .event_init = armpmu_event_init,
805 .add = armpmu_add,
806 .del = armpmu_del,
807 .start = armpmu_start,
808 .stop = armpmu_stop,
809 .read = armpmu_read,
810 .filter_match = armpmu_filter_match,
811 .attr_groups = pmu->attr_groups,
812 /*
813 * This is a CPU PMU potentially in a heterogeneous
814 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
815 * and we have taken ctx sharing into account (e.g. with our
816 * pmu::filter_match callback and pmu::event_init group
817 * validation).
818 */
819 .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
820 };
821
822 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
823 &armpmu_common_attr_group;
824
825 for_each_possible_cpu(cpu) {
826 struct pmu_hw_events *events;
827
828 events = per_cpu_ptr(pmu->hw_events, cpu);
829 raw_spin_lock_init(&events->pmu_lock);
830 events->percpu_pmu = pmu;
831 }
832
833 return pmu;
834
835 out_free_pmu:
836 kfree(pmu);
837 out:
838 return NULL;
839 }
840
841 void armpmu_free(struct arm_pmu *pmu)
842 {
843 free_percpu(pmu->hw_events);
844 kfree(pmu);
845 }
846
847 int armpmu_register(struct arm_pmu *pmu)
848 {
849 int ret;
850
851 ret = cpu_pmu_init(pmu);
852 if (ret)
853 return ret;
854
855 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
856 if (ret)
857 goto out_destroy;
858
859 if (!__oprofile_cpu_pmu)
860 __oprofile_cpu_pmu = pmu;
861
862 pr_info("enabled with %s PMU driver, %d counters available\n",
863 pmu->name, pmu->num_events);
864
865 return 0;
866
867 out_destroy:
868 cpu_pmu_destroy(pmu);
869 return ret;
870 }
871
872 static int arm_pmu_hp_init(void)
873 {
874 int ret;
875
876 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
877 "perf/arm/pmu:starting",
878 arm_perf_starting_cpu,
879 arm_perf_teardown_cpu);
880 if (ret)
881 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
882 ret);
883 return ret;
884 }
885 subsys_initcall(arm_pmu_hp_init);