4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
12 #define pr_fmt(fmt) "hw perfevents: " fmt
14 #include <linux/bitmap.h>
15 #include <linux/cpumask.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/perf/arm_pmu.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/irq.h>
24 #include <linux/irqdesc.h>
26 #include <asm/irq_regs.h>
29 armpmu_map_cache_event(const unsigned (*cache_map
)
30 [PERF_COUNT_HW_CACHE_MAX
]
31 [PERF_COUNT_HW_CACHE_OP_MAX
]
32 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
35 unsigned int cache_type
, cache_op
, cache_result
, ret
;
37 cache_type
= (config
>> 0) & 0xff;
38 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
41 cache_op
= (config
>> 8) & 0xff;
42 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
45 cache_result
= (config
>> 16) & 0xff;
46 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
49 ret
= (int)(*cache_map
)[cache_type
][cache_op
][cache_result
];
51 if (ret
== CACHE_OP_UNSUPPORTED
)
58 armpmu_map_hw_event(const unsigned (*event_map
)[PERF_COUNT_HW_MAX
], u64 config
)
62 if (config
>= PERF_COUNT_HW_MAX
)
65 mapping
= (*event_map
)[config
];
66 return mapping
== HW_OP_UNSUPPORTED
? -ENOENT
: mapping
;
70 armpmu_map_raw_event(u32 raw_event_mask
, u64 config
)
72 return (int)(config
& raw_event_mask
);
76 armpmu_map_event(struct perf_event
*event
,
77 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
78 const unsigned (*cache_map
)
79 [PERF_COUNT_HW_CACHE_MAX
]
80 [PERF_COUNT_HW_CACHE_OP_MAX
]
81 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
84 u64 config
= event
->attr
.config
;
85 int type
= event
->attr
.type
;
87 if (type
== event
->pmu
->type
)
88 return armpmu_map_raw_event(raw_event_mask
, config
);
91 case PERF_TYPE_HARDWARE
:
92 return armpmu_map_hw_event(event_map
, config
);
93 case PERF_TYPE_HW_CACHE
:
94 return armpmu_map_cache_event(cache_map
, config
);
96 return armpmu_map_raw_event(raw_event_mask
, config
);
102 int armpmu_event_set_period(struct perf_event
*event
)
104 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
105 struct hw_perf_event
*hwc
= &event
->hw
;
106 s64 left
= local64_read(&hwc
->period_left
);
107 s64 period
= hwc
->sample_period
;
110 if (unlikely(left
<= -period
)) {
112 local64_set(&hwc
->period_left
, left
);
113 hwc
->last_period
= period
;
117 if (unlikely(left
<= 0)) {
119 local64_set(&hwc
->period_left
, left
);
120 hwc
->last_period
= period
;
125 * Limit the maximum period to prevent the counter value
126 * from overtaking the one we are about to program. In
127 * effect we are reducing max_period to account for
128 * interrupt latency (and we are being very conservative).
130 if (left
> (armpmu
->max_period
>> 1))
131 left
= armpmu
->max_period
>> 1;
133 local64_set(&hwc
->prev_count
, (u64
)-left
);
135 armpmu
->write_counter(event
, (u64
)(-left
) & 0xffffffff);
137 perf_event_update_userpage(event
);
142 u64
armpmu_event_update(struct perf_event
*event
)
144 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
145 struct hw_perf_event
*hwc
= &event
->hw
;
146 u64 delta
, prev_raw_count
, new_raw_count
;
149 prev_raw_count
= local64_read(&hwc
->prev_count
);
150 new_raw_count
= armpmu
->read_counter(event
);
152 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
153 new_raw_count
) != prev_raw_count
)
156 delta
= (new_raw_count
- prev_raw_count
) & armpmu
->max_period
;
158 local64_add(delta
, &event
->count
);
159 local64_sub(delta
, &hwc
->period_left
);
161 return new_raw_count
;
165 armpmu_read(struct perf_event
*event
)
167 armpmu_event_update(event
);
171 armpmu_stop(struct perf_event
*event
, int flags
)
173 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
174 struct hw_perf_event
*hwc
= &event
->hw
;
177 * ARM pmu always has to update the counter, so ignore
178 * PERF_EF_UPDATE, see comments in armpmu_start().
180 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
181 armpmu
->disable(event
);
182 armpmu_event_update(event
);
183 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
187 static void armpmu_start(struct perf_event
*event
, int flags
)
189 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
190 struct hw_perf_event
*hwc
= &event
->hw
;
193 * ARM pmu always has to reprogram the period, so ignore
194 * PERF_EF_RELOAD, see the comment below.
196 if (flags
& PERF_EF_RELOAD
)
197 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
201 * Set the period again. Some counters can't be stopped, so when we
202 * were stopped we simply disabled the IRQ source and the counter
203 * may have been left counting. If we don't do this step then we may
204 * get an interrupt too soon or *way* too late if the overflow has
205 * happened since disabling.
207 armpmu_event_set_period(event
);
208 armpmu
->enable(event
);
212 armpmu_del(struct perf_event
*event
, int flags
)
214 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
215 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
216 struct hw_perf_event
*hwc
= &event
->hw
;
219 armpmu_stop(event
, PERF_EF_UPDATE
);
220 hw_events
->events
[idx
] = NULL
;
221 clear_bit(idx
, hw_events
->used_mask
);
222 if (armpmu
->clear_event_idx
)
223 armpmu
->clear_event_idx(hw_events
, event
);
225 perf_event_update_userpage(event
);
229 armpmu_add(struct perf_event
*event
, int flags
)
231 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
232 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
233 struct hw_perf_event
*hwc
= &event
->hw
;
236 /* An event following a process won't be stopped earlier */
237 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
240 /* If we don't have a space for the counter then finish early. */
241 idx
= armpmu
->get_event_idx(hw_events
, event
);
246 * If there is an event in the counter we are going to use then make
247 * sure it is disabled.
250 armpmu
->disable(event
);
251 hw_events
->events
[idx
] = event
;
253 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
254 if (flags
& PERF_EF_START
)
255 armpmu_start(event
, PERF_EF_RELOAD
);
257 /* Propagate our changes to the userspace mapping. */
258 perf_event_update_userpage(event
);
264 validate_event(struct pmu
*pmu
, struct pmu_hw_events
*hw_events
,
265 struct perf_event
*event
)
267 struct arm_pmu
*armpmu
;
269 if (is_software_event(event
))
273 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
274 * core perf code won't check that the pmu->ctx == leader->ctx
275 * until after pmu->event_init(event).
277 if (event
->pmu
!= pmu
)
280 if (event
->state
< PERF_EVENT_STATE_OFF
)
283 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
286 armpmu
= to_arm_pmu(event
->pmu
);
287 return armpmu
->get_event_idx(hw_events
, event
) >= 0;
291 validate_group(struct perf_event
*event
)
293 struct perf_event
*sibling
, *leader
= event
->group_leader
;
294 struct pmu_hw_events fake_pmu
;
297 * Initialise the fake PMU. We only need to populate the
298 * used_mask for the purposes of validation.
300 memset(&fake_pmu
.used_mask
, 0, sizeof(fake_pmu
.used_mask
));
302 if (!validate_event(event
->pmu
, &fake_pmu
, leader
))
305 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
306 if (!validate_event(event
->pmu
, &fake_pmu
, sibling
))
310 if (!validate_event(event
->pmu
, &fake_pmu
, event
))
316 static struct arm_pmu_platdata
*armpmu_get_platdata(struct arm_pmu
*armpmu
)
318 struct platform_device
*pdev
= armpmu
->plat_device
;
320 return pdev
? dev_get_platdata(&pdev
->dev
) : NULL
;
323 static irqreturn_t
armpmu_dispatch_irq(int irq
, void *dev
)
325 struct arm_pmu
*armpmu
;
326 struct arm_pmu_platdata
*plat
;
328 u64 start_clock
, finish_clock
;
331 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
332 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
333 * do any necessary shifting, we just need to perform the first
336 armpmu
= *(void **)dev
;
338 plat
= armpmu_get_platdata(armpmu
);
340 start_clock
= sched_clock();
341 if (plat
&& plat
->handle_irq
)
342 ret
= plat
->handle_irq(irq
, armpmu
, armpmu
->handle_irq
);
344 ret
= armpmu
->handle_irq(irq
, armpmu
);
345 finish_clock
= sched_clock();
347 perf_sample_event_took(finish_clock
- start_clock
);
352 event_requires_mode_exclusion(struct perf_event_attr
*attr
)
354 return attr
->exclude_idle
|| attr
->exclude_user
||
355 attr
->exclude_kernel
|| attr
->exclude_hv
;
359 __hw_perf_event_init(struct perf_event
*event
)
361 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
362 struct hw_perf_event
*hwc
= &event
->hw
;
365 mapping
= armpmu
->map_event(event
);
368 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
374 * We don't assign an index until we actually place the event onto
375 * hardware. Use -1 to signify that we haven't decided where to put it
376 * yet. For SMP systems, each core has it's own PMU so we can't do any
377 * clever allocation or constraints checking at this point.
380 hwc
->config_base
= 0;
385 * Check whether we need to exclude the counter from certain modes.
387 if ((!armpmu
->set_event_filter
||
388 armpmu
->set_event_filter(hwc
, &event
->attr
)) &&
389 event_requires_mode_exclusion(&event
->attr
)) {
390 pr_debug("ARM performance counters do not support "
396 * Store the event encoding into the config_base field.
398 hwc
->config_base
|= (unsigned long)mapping
;
400 if (!is_sampling_event(event
)) {
402 * For non-sampling runs, limit the sample_period to half
403 * of the counter width. That way, the new counter value
404 * is far less likely to overtake the previous one unless
405 * you have some serious IRQ latency issues.
407 hwc
->sample_period
= armpmu
->max_period
>> 1;
408 hwc
->last_period
= hwc
->sample_period
;
409 local64_set(&hwc
->period_left
, hwc
->sample_period
);
412 if (event
->group_leader
!= event
) {
413 if (validate_group(event
) != 0)
420 static int armpmu_event_init(struct perf_event
*event
)
422 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
425 * Reject CPU-affine events for CPUs that are of a different class to
426 * that which this PMU handles. Process-following events (where
427 * event->cpu == -1) can be migrated between CPUs, and thus we have to
428 * reject them later (in armpmu_add) if they're scheduled on a
429 * different class of CPU.
431 if (event
->cpu
!= -1 &&
432 !cpumask_test_cpu(event
->cpu
, &armpmu
->supported_cpus
))
435 /* does not support taken branch sampling */
436 if (has_branch_stack(event
))
439 if (armpmu
->map_event(event
) == -ENOENT
)
442 return __hw_perf_event_init(event
);
445 static void armpmu_enable(struct pmu
*pmu
)
447 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
448 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
449 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
451 /* For task-bound events we may be called on other CPUs */
452 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
456 armpmu
->start(armpmu
);
459 static void armpmu_disable(struct pmu
*pmu
)
461 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
463 /* For task-bound events we may be called on other CPUs */
464 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
467 armpmu
->stop(armpmu
);
471 * In heterogeneous systems, events are specific to a particular
472 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
473 * the same microarchitecture.
475 static int armpmu_filter_match(struct perf_event
*event
)
477 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
478 unsigned int cpu
= smp_processor_id();
479 return cpumask_test_cpu(cpu
, &armpmu
->supported_cpus
);
482 static ssize_t
armpmu_cpumask_show(struct device
*dev
,
483 struct device_attribute
*attr
, char *buf
)
485 struct arm_pmu
*armpmu
= to_arm_pmu(dev_get_drvdata(dev
));
486 return cpumap_print_to_pagebuf(true, buf
, &armpmu
->supported_cpus
);
489 static DEVICE_ATTR(cpus
, S_IRUGO
, armpmu_cpumask_show
, NULL
);
491 static struct attribute
*armpmu_common_attrs
[] = {
496 static struct attribute_group armpmu_common_attr_group
= {
497 .attrs
= armpmu_common_attrs
,
500 /* Set at runtime when we know what CPU type we are. */
501 static struct arm_pmu
*__oprofile_cpu_pmu
;
504 * Despite the names, these two functions are CPU-specific and are used
505 * by the OProfile/perf code.
507 const char *perf_pmu_name(void)
509 if (!__oprofile_cpu_pmu
)
512 return __oprofile_cpu_pmu
->name
;
514 EXPORT_SYMBOL_GPL(perf_pmu_name
);
516 int perf_num_counters(void)
520 if (__oprofile_cpu_pmu
!= NULL
)
521 max_events
= __oprofile_cpu_pmu
->num_events
;
525 EXPORT_SYMBOL_GPL(perf_num_counters
);
527 void armpmu_free_irq(struct arm_pmu
*armpmu
, int cpu
)
529 struct pmu_hw_events __percpu
*hw_events
= armpmu
->hw_events
;
530 int irq
= per_cpu(hw_events
->irq
, cpu
);
532 if (!cpumask_test_and_clear_cpu(cpu
, &armpmu
->active_irqs
))
535 if (irq_is_percpu(irq
)) {
536 free_percpu_irq(irq
, &hw_events
->percpu_pmu
);
537 cpumask_clear(&armpmu
->active_irqs
);
541 free_irq(irq
, per_cpu_ptr(&hw_events
->percpu_pmu
, cpu
));
544 void armpmu_free_irqs(struct arm_pmu
*armpmu
)
548 for_each_cpu(cpu
, &armpmu
->supported_cpus
)
549 armpmu_free_irq(armpmu
, cpu
);
552 int armpmu_request_irq(struct arm_pmu
*armpmu
, int cpu
)
555 struct pmu_hw_events __percpu
*hw_events
= armpmu
->hw_events
;
556 const irq_handler_t handler
= armpmu_dispatch_irq
;
557 int irq
= per_cpu(hw_events
->irq
, cpu
);
561 if (irq_is_percpu(irq
) && cpumask_empty(&armpmu
->active_irqs
)) {
562 err
= request_percpu_irq(irq
, handler
, "arm-pmu",
563 &hw_events
->percpu_pmu
);
564 } else if (irq_is_percpu(irq
)) {
565 int other_cpu
= cpumask_first(&armpmu
->active_irqs
);
566 int other_irq
= per_cpu(hw_events
->irq
, other_cpu
);
568 if (irq
!= other_irq
) {
569 pr_warn("mismatched PPIs detected.\n");
573 err
= request_irq(irq
, handler
,
574 IRQF_NOBALANCING
| IRQF_NO_THREAD
, "arm-pmu",
575 per_cpu_ptr(&hw_events
->percpu_pmu
, cpu
));
579 pr_err("unable to request IRQ%d for ARM PMU counters\n",
584 cpumask_set_cpu(cpu
, &armpmu
->active_irqs
);
589 int armpmu_request_irqs(struct arm_pmu
*armpmu
)
593 for_each_cpu(cpu
, &armpmu
->supported_cpus
) {
594 err
= armpmu_request_irq(armpmu
, cpu
);
602 static int armpmu_get_cpu_irq(struct arm_pmu
*pmu
, int cpu
)
604 struct pmu_hw_events __percpu
*hw_events
= pmu
->hw_events
;
605 return per_cpu(hw_events
->irq
, cpu
);
609 * PMU hardware loses all context when a CPU goes offline.
610 * When a CPU is hotplugged back in, since some hardware registers are
611 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
612 * junk values out of them.
614 static int arm_perf_starting_cpu(unsigned int cpu
, struct hlist_node
*node
)
616 struct arm_pmu
*pmu
= hlist_entry_safe(node
, struct arm_pmu
, node
);
619 if (!cpumask_test_cpu(cpu
, &pmu
->supported_cpus
))
624 irq
= armpmu_get_cpu_irq(pmu
, cpu
);
626 if (irq_is_percpu(irq
)) {
627 enable_percpu_irq(irq
, IRQ_TYPE_NONE
);
631 if (irq_force_affinity(irq
, cpumask_of(cpu
)) &&
632 num_possible_cpus() > 1) {
633 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
641 static int arm_perf_teardown_cpu(unsigned int cpu
, struct hlist_node
*node
)
643 struct arm_pmu
*pmu
= hlist_entry_safe(node
, struct arm_pmu
, node
);
646 if (!cpumask_test_cpu(cpu
, &pmu
->supported_cpus
))
649 irq
= armpmu_get_cpu_irq(pmu
, cpu
);
650 if (irq
&& irq_is_percpu(irq
))
651 disable_percpu_irq(irq
);
657 static void cpu_pm_pmu_setup(struct arm_pmu
*armpmu
, unsigned long cmd
)
659 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
660 struct perf_event
*event
;
663 for (idx
= 0; idx
< armpmu
->num_events
; idx
++) {
665 * If the counter is not used skip it, there is no
666 * need of stopping/restarting it.
668 if (!test_bit(idx
, hw_events
->used_mask
))
671 event
= hw_events
->events
[idx
];
676 * Stop and update the counter
678 armpmu_stop(event
, PERF_EF_UPDATE
);
681 case CPU_PM_ENTER_FAILED
:
683 * Restore and enable the counter.
684 * armpmu_start() indirectly calls
686 * perf_event_update_userpage()
688 * that requires RCU read locking to be functional,
689 * wrap the call within RCU_NONIDLE to make the
690 * RCU subsystem aware this cpu is not idle from
691 * an RCU perspective for the armpmu_start() call
694 RCU_NONIDLE(armpmu_start(event
, PERF_EF_RELOAD
));
702 static int cpu_pm_pmu_notify(struct notifier_block
*b
, unsigned long cmd
,
705 struct arm_pmu
*armpmu
= container_of(b
, struct arm_pmu
, cpu_pm_nb
);
706 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
707 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
709 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
713 * Always reset the PMU registers on power-up even if
714 * there are no events running.
716 if (cmd
== CPU_PM_EXIT
&& armpmu
->reset
)
717 armpmu
->reset(armpmu
);
724 armpmu
->stop(armpmu
);
725 cpu_pm_pmu_setup(armpmu
, cmd
);
728 cpu_pm_pmu_setup(armpmu
, cmd
);
729 case CPU_PM_ENTER_FAILED
:
730 armpmu
->start(armpmu
);
739 static int cpu_pm_pmu_register(struct arm_pmu
*cpu_pmu
)
741 cpu_pmu
->cpu_pm_nb
.notifier_call
= cpu_pm_pmu_notify
;
742 return cpu_pm_register_notifier(&cpu_pmu
->cpu_pm_nb
);
745 static void cpu_pm_pmu_unregister(struct arm_pmu
*cpu_pmu
)
747 cpu_pm_unregister_notifier(&cpu_pmu
->cpu_pm_nb
);
750 static inline int cpu_pm_pmu_register(struct arm_pmu
*cpu_pmu
) { return 0; }
751 static inline void cpu_pm_pmu_unregister(struct arm_pmu
*cpu_pmu
) { }
754 static int cpu_pmu_init(struct arm_pmu
*cpu_pmu
)
758 err
= cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING
,
763 err
= cpu_pm_pmu_register(cpu_pmu
);
770 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING
,
776 static void cpu_pmu_destroy(struct arm_pmu
*cpu_pmu
)
778 cpu_pm_pmu_unregister(cpu_pmu
);
779 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING
,
783 struct arm_pmu
*armpmu_alloc(void)
788 pmu
= kzalloc(sizeof(*pmu
), GFP_KERNEL
);
790 pr_info("failed to allocate PMU device!\n");
794 pmu
->hw_events
= alloc_percpu(struct pmu_hw_events
);
795 if (!pmu
->hw_events
) {
796 pr_info("failed to allocate per-cpu PMU data.\n");
800 pmu
->pmu
= (struct pmu
) {
801 .pmu_enable
= armpmu_enable
,
802 .pmu_disable
= armpmu_disable
,
803 .event_init
= armpmu_event_init
,
806 .start
= armpmu_start
,
809 .filter_match
= armpmu_filter_match
,
810 .attr_groups
= pmu
->attr_groups
,
812 * This is a CPU PMU potentially in a heterogeneous
813 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
814 * and we have taken ctx sharing into account (e.g. with our
815 * pmu::filter_match callback and pmu::event_init group
818 .capabilities
= PERF_PMU_CAP_HETEROGENEOUS_CPUS
,
821 pmu
->attr_groups
[ARMPMU_ATTR_GROUP_COMMON
] =
822 &armpmu_common_attr_group
;
824 for_each_possible_cpu(cpu
) {
825 struct pmu_hw_events
*events
;
827 events
= per_cpu_ptr(pmu
->hw_events
, cpu
);
828 raw_spin_lock_init(&events
->pmu_lock
);
829 events
->percpu_pmu
= pmu
;
840 void armpmu_free(struct arm_pmu
*pmu
)
842 free_percpu(pmu
->hw_events
);
846 int armpmu_register(struct arm_pmu
*pmu
)
850 ret
= cpu_pmu_init(pmu
);
854 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
858 if (!__oprofile_cpu_pmu
)
859 __oprofile_cpu_pmu
= pmu
;
861 pr_info("enabled with %s PMU driver, %d counters available\n",
862 pmu
->name
, pmu
->num_events
);
867 cpu_pmu_destroy(pmu
);
871 static int arm_pmu_hp_init(void)
875 ret
= cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING
,
876 "perf/arm/pmu:starting",
877 arm_perf_starting_cpu
,
878 arm_perf_teardown_cpu
);
880 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
884 subsys_initcall(arm_pmu_hp_init
);