4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
12 #define pr_fmt(fmt) "hw perfevents: " fmt
14 #include <linux/bitmap.h>
15 #include <linux/cpumask.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/perf/arm_pmu.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/sched/clock.h>
23 #include <linux/spinlock.h>
24 #include <linux/irq.h>
25 #include <linux/irqdesc.h>
27 #include <asm/irq_regs.h>
30 armpmu_map_cache_event(const unsigned (*cache_map
)
31 [PERF_COUNT_HW_CACHE_MAX
]
32 [PERF_COUNT_HW_CACHE_OP_MAX
]
33 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
36 unsigned int cache_type
, cache_op
, cache_result
, ret
;
38 cache_type
= (config
>> 0) & 0xff;
39 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
42 cache_op
= (config
>> 8) & 0xff;
43 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
46 cache_result
= (config
>> 16) & 0xff;
47 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
50 ret
= (int)(*cache_map
)[cache_type
][cache_op
][cache_result
];
52 if (ret
== CACHE_OP_UNSUPPORTED
)
59 armpmu_map_hw_event(const unsigned (*event_map
)[PERF_COUNT_HW_MAX
], u64 config
)
63 if (config
>= PERF_COUNT_HW_MAX
)
66 mapping
= (*event_map
)[config
];
67 return mapping
== HW_OP_UNSUPPORTED
? -ENOENT
: mapping
;
71 armpmu_map_raw_event(u32 raw_event_mask
, u64 config
)
73 return (int)(config
& raw_event_mask
);
77 armpmu_map_event(struct perf_event
*event
,
78 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
79 const unsigned (*cache_map
)
80 [PERF_COUNT_HW_CACHE_MAX
]
81 [PERF_COUNT_HW_CACHE_OP_MAX
]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
85 u64 config
= event
->attr
.config
;
86 int type
= event
->attr
.type
;
88 if (type
== event
->pmu
->type
)
89 return armpmu_map_raw_event(raw_event_mask
, config
);
92 case PERF_TYPE_HARDWARE
:
93 return armpmu_map_hw_event(event_map
, config
);
94 case PERF_TYPE_HW_CACHE
:
95 return armpmu_map_cache_event(cache_map
, config
);
97 return armpmu_map_raw_event(raw_event_mask
, config
);
103 int armpmu_event_set_period(struct perf_event
*event
)
105 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
106 struct hw_perf_event
*hwc
= &event
->hw
;
107 s64 left
= local64_read(&hwc
->period_left
);
108 s64 period
= hwc
->sample_period
;
111 if (unlikely(left
<= -period
)) {
113 local64_set(&hwc
->period_left
, left
);
114 hwc
->last_period
= period
;
118 if (unlikely(left
<= 0)) {
120 local64_set(&hwc
->period_left
, left
);
121 hwc
->last_period
= period
;
126 * Limit the maximum period to prevent the counter value
127 * from overtaking the one we are about to program. In
128 * effect we are reducing max_period to account for
129 * interrupt latency (and we are being very conservative).
131 if (left
> (armpmu
->max_period
>> 1))
132 left
= armpmu
->max_period
>> 1;
134 local64_set(&hwc
->prev_count
, (u64
)-left
);
136 armpmu
->write_counter(event
, (u64
)(-left
) & 0xffffffff);
138 perf_event_update_userpage(event
);
143 u64
armpmu_event_update(struct perf_event
*event
)
145 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
146 struct hw_perf_event
*hwc
= &event
->hw
;
147 u64 delta
, prev_raw_count
, new_raw_count
;
150 prev_raw_count
= local64_read(&hwc
->prev_count
);
151 new_raw_count
= armpmu
->read_counter(event
);
153 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
154 new_raw_count
) != prev_raw_count
)
157 delta
= (new_raw_count
- prev_raw_count
) & armpmu
->max_period
;
159 local64_add(delta
, &event
->count
);
160 local64_sub(delta
, &hwc
->period_left
);
162 return new_raw_count
;
166 armpmu_read(struct perf_event
*event
)
168 armpmu_event_update(event
);
172 armpmu_stop(struct perf_event
*event
, int flags
)
174 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
175 struct hw_perf_event
*hwc
= &event
->hw
;
178 * ARM pmu always has to update the counter, so ignore
179 * PERF_EF_UPDATE, see comments in armpmu_start().
181 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
182 armpmu
->disable(event
);
183 armpmu_event_update(event
);
184 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
188 static void armpmu_start(struct perf_event
*event
, int flags
)
190 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
191 struct hw_perf_event
*hwc
= &event
->hw
;
194 * ARM pmu always has to reprogram the period, so ignore
195 * PERF_EF_RELOAD, see the comment below.
197 if (flags
& PERF_EF_RELOAD
)
198 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
202 * Set the period again. Some counters can't be stopped, so when we
203 * were stopped we simply disabled the IRQ source and the counter
204 * may have been left counting. If we don't do this step then we may
205 * get an interrupt too soon or *way* too late if the overflow has
206 * happened since disabling.
208 armpmu_event_set_period(event
);
209 armpmu
->enable(event
);
213 armpmu_del(struct perf_event
*event
, int flags
)
215 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
216 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
217 struct hw_perf_event
*hwc
= &event
->hw
;
220 armpmu_stop(event
, PERF_EF_UPDATE
);
221 hw_events
->events
[idx
] = NULL
;
222 clear_bit(idx
, hw_events
->used_mask
);
223 if (armpmu
->clear_event_idx
)
224 armpmu
->clear_event_idx(hw_events
, event
);
226 perf_event_update_userpage(event
);
230 armpmu_add(struct perf_event
*event
, int flags
)
232 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
233 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
234 struct hw_perf_event
*hwc
= &event
->hw
;
237 /* An event following a process won't be stopped earlier */
238 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
241 /* If we don't have a space for the counter then finish early. */
242 idx
= armpmu
->get_event_idx(hw_events
, event
);
247 * If there is an event in the counter we are going to use then make
248 * sure it is disabled.
251 armpmu
->disable(event
);
252 hw_events
->events
[idx
] = event
;
254 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
255 if (flags
& PERF_EF_START
)
256 armpmu_start(event
, PERF_EF_RELOAD
);
258 /* Propagate our changes to the userspace mapping. */
259 perf_event_update_userpage(event
);
265 validate_event(struct pmu
*pmu
, struct pmu_hw_events
*hw_events
,
266 struct perf_event
*event
)
268 struct arm_pmu
*armpmu
;
270 if (is_software_event(event
))
274 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
275 * core perf code won't check that the pmu->ctx == leader->ctx
276 * until after pmu->event_init(event).
278 if (event
->pmu
!= pmu
)
281 if (event
->state
< PERF_EVENT_STATE_OFF
)
284 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
287 armpmu
= to_arm_pmu(event
->pmu
);
288 return armpmu
->get_event_idx(hw_events
, event
) >= 0;
292 validate_group(struct perf_event
*event
)
294 struct perf_event
*sibling
, *leader
= event
->group_leader
;
295 struct pmu_hw_events fake_pmu
;
298 * Initialise the fake PMU. We only need to populate the
299 * used_mask for the purposes of validation.
301 memset(&fake_pmu
.used_mask
, 0, sizeof(fake_pmu
.used_mask
));
303 if (!validate_event(event
->pmu
, &fake_pmu
, leader
))
306 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
307 if (!validate_event(event
->pmu
, &fake_pmu
, sibling
))
311 if (!validate_event(event
->pmu
, &fake_pmu
, event
))
317 static struct arm_pmu_platdata
*armpmu_get_platdata(struct arm_pmu
*armpmu
)
319 struct platform_device
*pdev
= armpmu
->plat_device
;
321 return pdev
? dev_get_platdata(&pdev
->dev
) : NULL
;
324 static irqreturn_t
armpmu_dispatch_irq(int irq
, void *dev
)
326 struct arm_pmu
*armpmu
;
327 struct arm_pmu_platdata
*plat
;
329 u64 start_clock
, finish_clock
;
332 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
333 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
334 * do any necessary shifting, we just need to perform the first
337 armpmu
= *(void **)dev
;
339 plat
= armpmu_get_platdata(armpmu
);
341 start_clock
= sched_clock();
342 if (plat
&& plat
->handle_irq
)
343 ret
= plat
->handle_irq(irq
, armpmu
, armpmu
->handle_irq
);
345 ret
= armpmu
->handle_irq(irq
, armpmu
);
346 finish_clock
= sched_clock();
348 perf_sample_event_took(finish_clock
- start_clock
);
353 event_requires_mode_exclusion(struct perf_event_attr
*attr
)
355 return attr
->exclude_idle
|| attr
->exclude_user
||
356 attr
->exclude_kernel
|| attr
->exclude_hv
;
360 __hw_perf_event_init(struct perf_event
*event
)
362 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
363 struct hw_perf_event
*hwc
= &event
->hw
;
366 mapping
= armpmu
->map_event(event
);
369 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
375 * We don't assign an index until we actually place the event onto
376 * hardware. Use -1 to signify that we haven't decided where to put it
377 * yet. For SMP systems, each core has it's own PMU so we can't do any
378 * clever allocation or constraints checking at this point.
381 hwc
->config_base
= 0;
386 * Check whether we need to exclude the counter from certain modes.
388 if ((!armpmu
->set_event_filter
||
389 armpmu
->set_event_filter(hwc
, &event
->attr
)) &&
390 event_requires_mode_exclusion(&event
->attr
)) {
391 pr_debug("ARM performance counters do not support "
397 * Store the event encoding into the config_base field.
399 hwc
->config_base
|= (unsigned long)mapping
;
401 if (!is_sampling_event(event
)) {
403 * For non-sampling runs, limit the sample_period to half
404 * of the counter width. That way, the new counter value
405 * is far less likely to overtake the previous one unless
406 * you have some serious IRQ latency issues.
408 hwc
->sample_period
= armpmu
->max_period
>> 1;
409 hwc
->last_period
= hwc
->sample_period
;
410 local64_set(&hwc
->period_left
, hwc
->sample_period
);
413 if (event
->group_leader
!= event
) {
414 if (validate_group(event
) != 0)
421 static int armpmu_event_init(struct perf_event
*event
)
423 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
426 * Reject CPU-affine events for CPUs that are of a different class to
427 * that which this PMU handles. Process-following events (where
428 * event->cpu == -1) can be migrated between CPUs, and thus we have to
429 * reject them later (in armpmu_add) if they're scheduled on a
430 * different class of CPU.
432 if (event
->cpu
!= -1 &&
433 !cpumask_test_cpu(event
->cpu
, &armpmu
->supported_cpus
))
436 /* does not support taken branch sampling */
437 if (has_branch_stack(event
))
440 if (armpmu
->map_event(event
) == -ENOENT
)
443 return __hw_perf_event_init(event
);
446 static void armpmu_enable(struct pmu
*pmu
)
448 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
449 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
450 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
452 /* For task-bound events we may be called on other CPUs */
453 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
457 armpmu
->start(armpmu
);
460 static void armpmu_disable(struct pmu
*pmu
)
462 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
464 /* For task-bound events we may be called on other CPUs */
465 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
468 armpmu
->stop(armpmu
);
472 * In heterogeneous systems, events are specific to a particular
473 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
474 * the same microarchitecture.
476 static int armpmu_filter_match(struct perf_event
*event
)
478 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
479 unsigned int cpu
= smp_processor_id();
480 return cpumask_test_cpu(cpu
, &armpmu
->supported_cpus
);
483 static ssize_t
armpmu_cpumask_show(struct device
*dev
,
484 struct device_attribute
*attr
, char *buf
)
486 struct arm_pmu
*armpmu
= to_arm_pmu(dev_get_drvdata(dev
));
487 return cpumap_print_to_pagebuf(true, buf
, &armpmu
->supported_cpus
);
490 static DEVICE_ATTR(cpus
, S_IRUGO
, armpmu_cpumask_show
, NULL
);
492 static struct attribute
*armpmu_common_attrs
[] = {
497 static struct attribute_group armpmu_common_attr_group
= {
498 .attrs
= armpmu_common_attrs
,
501 /* Set at runtime when we know what CPU type we are. */
502 static struct arm_pmu
*__oprofile_cpu_pmu
;
505 * Despite the names, these two functions are CPU-specific and are used
506 * by the OProfile/perf code.
508 const char *perf_pmu_name(void)
510 if (!__oprofile_cpu_pmu
)
513 return __oprofile_cpu_pmu
->name
;
515 EXPORT_SYMBOL_GPL(perf_pmu_name
);
517 int perf_num_counters(void)
521 if (__oprofile_cpu_pmu
!= NULL
)
522 max_events
= __oprofile_cpu_pmu
->num_events
;
526 EXPORT_SYMBOL_GPL(perf_num_counters
);
528 void armpmu_free_irq(struct arm_pmu
*armpmu
, int cpu
)
530 struct pmu_hw_events __percpu
*hw_events
= armpmu
->hw_events
;
531 int irq
= per_cpu(hw_events
->irq
, cpu
);
533 if (!cpumask_test_and_clear_cpu(cpu
, &armpmu
->active_irqs
))
536 if (irq_is_percpu(irq
)) {
537 free_percpu_irq(irq
, &hw_events
->percpu_pmu
);
538 cpumask_clear(&armpmu
->active_irqs
);
542 free_irq(irq
, per_cpu_ptr(&hw_events
->percpu_pmu
, cpu
));
545 void armpmu_free_irqs(struct arm_pmu
*armpmu
)
549 for_each_cpu(cpu
, &armpmu
->supported_cpus
)
550 armpmu_free_irq(armpmu
, cpu
);
553 int armpmu_request_irq(struct arm_pmu
*armpmu
, int cpu
)
556 struct pmu_hw_events __percpu
*hw_events
= armpmu
->hw_events
;
557 const irq_handler_t handler
= armpmu_dispatch_irq
;
558 int irq
= per_cpu(hw_events
->irq
, cpu
);
562 if (irq_is_percpu(irq
) && cpumask_empty(&armpmu
->active_irqs
)) {
563 err
= request_percpu_irq(irq
, handler
, "arm-pmu",
564 &hw_events
->percpu_pmu
);
565 } else if (irq_is_percpu(irq
)) {
566 int other_cpu
= cpumask_first(&armpmu
->active_irqs
);
567 int other_irq
= per_cpu(hw_events
->irq
, other_cpu
);
569 if (irq
!= other_irq
) {
570 pr_warn("mismatched PPIs detected.\n");
574 err
= request_irq(irq
, handler
,
575 IRQF_NOBALANCING
| IRQF_NO_THREAD
, "arm-pmu",
576 per_cpu_ptr(&hw_events
->percpu_pmu
, cpu
));
580 pr_err("unable to request IRQ%d for ARM PMU counters\n",
585 cpumask_set_cpu(cpu
, &armpmu
->active_irqs
);
590 int armpmu_request_irqs(struct arm_pmu
*armpmu
)
594 for_each_cpu(cpu
, &armpmu
->supported_cpus
) {
595 err
= armpmu_request_irq(armpmu
, cpu
);
603 static int armpmu_get_cpu_irq(struct arm_pmu
*pmu
, int cpu
)
605 struct pmu_hw_events __percpu
*hw_events
= pmu
->hw_events
;
606 return per_cpu(hw_events
->irq
, cpu
);
610 * PMU hardware loses all context when a CPU goes offline.
611 * When a CPU is hotplugged back in, since some hardware registers are
612 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
613 * junk values out of them.
615 static int arm_perf_starting_cpu(unsigned int cpu
, struct hlist_node
*node
)
617 struct arm_pmu
*pmu
= hlist_entry_safe(node
, struct arm_pmu
, node
);
620 if (!cpumask_test_cpu(cpu
, &pmu
->supported_cpus
))
625 irq
= armpmu_get_cpu_irq(pmu
, cpu
);
627 if (irq_is_percpu(irq
)) {
628 enable_percpu_irq(irq
, IRQ_TYPE_NONE
);
632 if (irq_force_affinity(irq
, cpumask_of(cpu
)) &&
633 num_possible_cpus() > 1) {
634 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
642 static int arm_perf_teardown_cpu(unsigned int cpu
, struct hlist_node
*node
)
644 struct arm_pmu
*pmu
= hlist_entry_safe(node
, struct arm_pmu
, node
);
647 if (!cpumask_test_cpu(cpu
, &pmu
->supported_cpus
))
650 irq
= armpmu_get_cpu_irq(pmu
, cpu
);
651 if (irq
&& irq_is_percpu(irq
))
652 disable_percpu_irq(irq
);
658 static void cpu_pm_pmu_setup(struct arm_pmu
*armpmu
, unsigned long cmd
)
660 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
661 struct perf_event
*event
;
664 for (idx
= 0; idx
< armpmu
->num_events
; idx
++) {
666 * If the counter is not used skip it, there is no
667 * need of stopping/restarting it.
669 if (!test_bit(idx
, hw_events
->used_mask
))
672 event
= hw_events
->events
[idx
];
677 * Stop and update the counter
679 armpmu_stop(event
, PERF_EF_UPDATE
);
682 case CPU_PM_ENTER_FAILED
:
684 * Restore and enable the counter.
685 * armpmu_start() indirectly calls
687 * perf_event_update_userpage()
689 * that requires RCU read locking to be functional,
690 * wrap the call within RCU_NONIDLE to make the
691 * RCU subsystem aware this cpu is not idle from
692 * an RCU perspective for the armpmu_start() call
695 RCU_NONIDLE(armpmu_start(event
, PERF_EF_RELOAD
));
703 static int cpu_pm_pmu_notify(struct notifier_block
*b
, unsigned long cmd
,
706 struct arm_pmu
*armpmu
= container_of(b
, struct arm_pmu
, cpu_pm_nb
);
707 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
708 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
710 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
714 * Always reset the PMU registers on power-up even if
715 * there are no events running.
717 if (cmd
== CPU_PM_EXIT
&& armpmu
->reset
)
718 armpmu
->reset(armpmu
);
725 armpmu
->stop(armpmu
);
726 cpu_pm_pmu_setup(armpmu
, cmd
);
729 cpu_pm_pmu_setup(armpmu
, cmd
);
730 case CPU_PM_ENTER_FAILED
:
731 armpmu
->start(armpmu
);
740 static int cpu_pm_pmu_register(struct arm_pmu
*cpu_pmu
)
742 cpu_pmu
->cpu_pm_nb
.notifier_call
= cpu_pm_pmu_notify
;
743 return cpu_pm_register_notifier(&cpu_pmu
->cpu_pm_nb
);
746 static void cpu_pm_pmu_unregister(struct arm_pmu
*cpu_pmu
)
748 cpu_pm_unregister_notifier(&cpu_pmu
->cpu_pm_nb
);
751 static inline int cpu_pm_pmu_register(struct arm_pmu
*cpu_pmu
) { return 0; }
752 static inline void cpu_pm_pmu_unregister(struct arm_pmu
*cpu_pmu
) { }
755 static int cpu_pmu_init(struct arm_pmu
*cpu_pmu
)
759 err
= cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING
,
764 err
= cpu_pm_pmu_register(cpu_pmu
);
771 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING
,
777 static void cpu_pmu_destroy(struct arm_pmu
*cpu_pmu
)
779 cpu_pm_pmu_unregister(cpu_pmu
);
780 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING
,
784 struct arm_pmu
*armpmu_alloc(void)
789 pmu
= kzalloc(sizeof(*pmu
), GFP_KERNEL
);
791 pr_info("failed to allocate PMU device!\n");
795 pmu
->hw_events
= alloc_percpu(struct pmu_hw_events
);
796 if (!pmu
->hw_events
) {
797 pr_info("failed to allocate per-cpu PMU data.\n");
801 pmu
->pmu
= (struct pmu
) {
802 .pmu_enable
= armpmu_enable
,
803 .pmu_disable
= armpmu_disable
,
804 .event_init
= armpmu_event_init
,
807 .start
= armpmu_start
,
810 .filter_match
= armpmu_filter_match
,
811 .attr_groups
= pmu
->attr_groups
,
813 * This is a CPU PMU potentially in a heterogeneous
814 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
815 * and we have taken ctx sharing into account (e.g. with our
816 * pmu::filter_match callback and pmu::event_init group
819 .capabilities
= PERF_PMU_CAP_HETEROGENEOUS_CPUS
,
822 pmu
->attr_groups
[ARMPMU_ATTR_GROUP_COMMON
] =
823 &armpmu_common_attr_group
;
825 for_each_possible_cpu(cpu
) {
826 struct pmu_hw_events
*events
;
828 events
= per_cpu_ptr(pmu
->hw_events
, cpu
);
829 raw_spin_lock_init(&events
->pmu_lock
);
830 events
->percpu_pmu
= pmu
;
841 void armpmu_free(struct arm_pmu
*pmu
)
843 free_percpu(pmu
->hw_events
);
847 int armpmu_register(struct arm_pmu
*pmu
)
851 ret
= cpu_pmu_init(pmu
);
855 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
859 if (!__oprofile_cpu_pmu
)
860 __oprofile_cpu_pmu
= pmu
;
862 pr_info("enabled with %s PMU driver, %d counters available\n",
863 pmu
->name
, pmu
->num_events
);
868 cpu_pmu_destroy(pmu
);
872 static int arm_pmu_hp_init(void)
876 ret
= cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING
,
877 "perf/arm/pmu:starting",
878 arm_perf_starting_cpu
,
879 arm_perf_teardown_cpu
);
881 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
885 subsys_initcall(arm_pmu_hp_init
);