4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
12 #define pr_fmt(fmt) "hw perfevents: " fmt
14 #include <linux/bitmap.h>
15 #include <linux/cpumask.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/of_device.h>
20 #include <linux/perf/arm_pmu.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/sched/clock.h>
24 #include <linux/spinlock.h>
25 #include <linux/irq.h>
26 #include <linux/irqdesc.h>
28 #include <asm/cputype.h>
29 #include <asm/irq_regs.h>
32 armpmu_map_cache_event(const unsigned (*cache_map
)
33 [PERF_COUNT_HW_CACHE_MAX
]
34 [PERF_COUNT_HW_CACHE_OP_MAX
]
35 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
38 unsigned int cache_type
, cache_op
, cache_result
, ret
;
40 cache_type
= (config
>> 0) & 0xff;
41 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
44 cache_op
= (config
>> 8) & 0xff;
45 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
48 cache_result
= (config
>> 16) & 0xff;
49 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
52 ret
= (int)(*cache_map
)[cache_type
][cache_op
][cache_result
];
54 if (ret
== CACHE_OP_UNSUPPORTED
)
61 armpmu_map_hw_event(const unsigned (*event_map
)[PERF_COUNT_HW_MAX
], u64 config
)
65 if (config
>= PERF_COUNT_HW_MAX
)
68 mapping
= (*event_map
)[config
];
69 return mapping
== HW_OP_UNSUPPORTED
? -ENOENT
: mapping
;
73 armpmu_map_raw_event(u32 raw_event_mask
, u64 config
)
75 return (int)(config
& raw_event_mask
);
79 armpmu_map_event(struct perf_event
*event
,
80 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
81 const unsigned (*cache_map
)
82 [PERF_COUNT_HW_CACHE_MAX
]
83 [PERF_COUNT_HW_CACHE_OP_MAX
]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
87 u64 config
= event
->attr
.config
;
88 int type
= event
->attr
.type
;
90 if (type
== event
->pmu
->type
)
91 return armpmu_map_raw_event(raw_event_mask
, config
);
94 case PERF_TYPE_HARDWARE
:
95 return armpmu_map_hw_event(event_map
, config
);
96 case PERF_TYPE_HW_CACHE
:
97 return armpmu_map_cache_event(cache_map
, config
);
99 return armpmu_map_raw_event(raw_event_mask
, config
);
105 int armpmu_event_set_period(struct perf_event
*event
)
107 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
108 struct hw_perf_event
*hwc
= &event
->hw
;
109 s64 left
= local64_read(&hwc
->period_left
);
110 s64 period
= hwc
->sample_period
;
113 if (unlikely(left
<= -period
)) {
115 local64_set(&hwc
->period_left
, left
);
116 hwc
->last_period
= period
;
120 if (unlikely(left
<= 0)) {
122 local64_set(&hwc
->period_left
, left
);
123 hwc
->last_period
= period
;
128 * Limit the maximum period to prevent the counter value
129 * from overtaking the one we are about to program. In
130 * effect we are reducing max_period to account for
131 * interrupt latency (and we are being very conservative).
133 if (left
> (armpmu
->max_period
>> 1))
134 left
= armpmu
->max_period
>> 1;
136 local64_set(&hwc
->prev_count
, (u64
)-left
);
138 armpmu
->write_counter(event
, (u64
)(-left
) & 0xffffffff);
140 perf_event_update_userpage(event
);
145 u64
armpmu_event_update(struct perf_event
*event
)
147 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
148 struct hw_perf_event
*hwc
= &event
->hw
;
149 u64 delta
, prev_raw_count
, new_raw_count
;
152 prev_raw_count
= local64_read(&hwc
->prev_count
);
153 new_raw_count
= armpmu
->read_counter(event
);
155 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
156 new_raw_count
) != prev_raw_count
)
159 delta
= (new_raw_count
- prev_raw_count
) & armpmu
->max_period
;
161 local64_add(delta
, &event
->count
);
162 local64_sub(delta
, &hwc
->period_left
);
164 return new_raw_count
;
168 armpmu_read(struct perf_event
*event
)
170 armpmu_event_update(event
);
174 armpmu_stop(struct perf_event
*event
, int flags
)
176 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
177 struct hw_perf_event
*hwc
= &event
->hw
;
180 * ARM pmu always has to update the counter, so ignore
181 * PERF_EF_UPDATE, see comments in armpmu_start().
183 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
184 armpmu
->disable(event
);
185 armpmu_event_update(event
);
186 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
190 static void armpmu_start(struct perf_event
*event
, int flags
)
192 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
193 struct hw_perf_event
*hwc
= &event
->hw
;
196 * ARM pmu always has to reprogram the period, so ignore
197 * PERF_EF_RELOAD, see the comment below.
199 if (flags
& PERF_EF_RELOAD
)
200 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
204 * Set the period again. Some counters can't be stopped, so when we
205 * were stopped we simply disabled the IRQ source and the counter
206 * may have been left counting. If we don't do this step then we may
207 * get an interrupt too soon or *way* too late if the overflow has
208 * happened since disabling.
210 armpmu_event_set_period(event
);
211 armpmu
->enable(event
);
215 armpmu_del(struct perf_event
*event
, int flags
)
217 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
218 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
219 struct hw_perf_event
*hwc
= &event
->hw
;
222 armpmu_stop(event
, PERF_EF_UPDATE
);
223 hw_events
->events
[idx
] = NULL
;
224 clear_bit(idx
, hw_events
->used_mask
);
225 if (armpmu
->clear_event_idx
)
226 armpmu
->clear_event_idx(hw_events
, event
);
228 perf_event_update_userpage(event
);
232 armpmu_add(struct perf_event
*event
, int flags
)
234 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
235 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
236 struct hw_perf_event
*hwc
= &event
->hw
;
240 /* An event following a process won't be stopped earlier */
241 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
244 perf_pmu_disable(event
->pmu
);
246 /* If we don't have a space for the counter then finish early. */
247 idx
= armpmu
->get_event_idx(hw_events
, event
);
254 * If there is an event in the counter we are going to use then make
255 * sure it is disabled.
258 armpmu
->disable(event
);
259 hw_events
->events
[idx
] = event
;
261 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
262 if (flags
& PERF_EF_START
)
263 armpmu_start(event
, PERF_EF_RELOAD
);
265 /* Propagate our changes to the userspace mapping. */
266 perf_event_update_userpage(event
);
269 perf_pmu_enable(event
->pmu
);
274 validate_event(struct pmu
*pmu
, struct pmu_hw_events
*hw_events
,
275 struct perf_event
*event
)
277 struct arm_pmu
*armpmu
;
279 if (is_software_event(event
))
283 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
284 * core perf code won't check that the pmu->ctx == leader->ctx
285 * until after pmu->event_init(event).
287 if (event
->pmu
!= pmu
)
290 if (event
->state
< PERF_EVENT_STATE_OFF
)
293 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
296 armpmu
= to_arm_pmu(event
->pmu
);
297 return armpmu
->get_event_idx(hw_events
, event
) >= 0;
301 validate_group(struct perf_event
*event
)
303 struct perf_event
*sibling
, *leader
= event
->group_leader
;
304 struct pmu_hw_events fake_pmu
;
307 * Initialise the fake PMU. We only need to populate the
308 * used_mask for the purposes of validation.
310 memset(&fake_pmu
.used_mask
, 0, sizeof(fake_pmu
.used_mask
));
312 if (!validate_event(event
->pmu
, &fake_pmu
, leader
))
315 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
316 if (!validate_event(event
->pmu
, &fake_pmu
, sibling
))
320 if (!validate_event(event
->pmu
, &fake_pmu
, event
))
326 static irqreturn_t
armpmu_dispatch_irq(int irq
, void *dev
)
328 struct arm_pmu
*armpmu
;
329 struct platform_device
*plat_device
;
330 struct arm_pmu_platdata
*plat
;
332 u64 start_clock
, finish_clock
;
335 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
336 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
337 * do any necessary shifting, we just need to perform the first
340 armpmu
= *(void **)dev
;
341 plat_device
= armpmu
->plat_device
;
342 plat
= dev_get_platdata(&plat_device
->dev
);
344 start_clock
= sched_clock();
345 if (plat
&& plat
->handle_irq
)
346 ret
= plat
->handle_irq(irq
, armpmu
, armpmu
->handle_irq
);
348 ret
= armpmu
->handle_irq(irq
, armpmu
);
349 finish_clock
= sched_clock();
351 perf_sample_event_took(finish_clock
- start_clock
);
356 armpmu_release_hardware(struct arm_pmu
*armpmu
)
358 armpmu
->free_irq(armpmu
);
362 armpmu_reserve_hardware(struct arm_pmu
*armpmu
)
364 int err
= armpmu
->request_irq(armpmu
, armpmu_dispatch_irq
);
366 armpmu_release_hardware(armpmu
);
374 hw_perf_event_destroy(struct perf_event
*event
)
376 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
377 atomic_t
*active_events
= &armpmu
->active_events
;
378 struct mutex
*pmu_reserve_mutex
= &armpmu
->reserve_mutex
;
380 if (atomic_dec_and_mutex_lock(active_events
, pmu_reserve_mutex
)) {
381 armpmu_release_hardware(armpmu
);
382 mutex_unlock(pmu_reserve_mutex
);
387 event_requires_mode_exclusion(struct perf_event_attr
*attr
)
389 return attr
->exclude_idle
|| attr
->exclude_user
||
390 attr
->exclude_kernel
|| attr
->exclude_hv
;
394 __hw_perf_event_init(struct perf_event
*event
)
396 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
397 struct hw_perf_event
*hwc
= &event
->hw
;
400 mapping
= armpmu
->map_event(event
);
403 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
409 * We don't assign an index until we actually place the event onto
410 * hardware. Use -1 to signify that we haven't decided where to put it
411 * yet. For SMP systems, each core has it's own PMU so we can't do any
412 * clever allocation or constraints checking at this point.
415 hwc
->config_base
= 0;
420 * Check whether we need to exclude the counter from certain modes.
422 if ((!armpmu
->set_event_filter
||
423 armpmu
->set_event_filter(hwc
, &event
->attr
)) &&
424 event_requires_mode_exclusion(&event
->attr
)) {
425 pr_debug("ARM performance counters do not support "
431 * Store the event encoding into the config_base field.
433 hwc
->config_base
|= (unsigned long)mapping
;
435 if (!is_sampling_event(event
)) {
437 * For non-sampling runs, limit the sample_period to half
438 * of the counter width. That way, the new counter value
439 * is far less likely to overtake the previous one unless
440 * you have some serious IRQ latency issues.
442 hwc
->sample_period
= armpmu
->max_period
>> 1;
443 hwc
->last_period
= hwc
->sample_period
;
444 local64_set(&hwc
->period_left
, hwc
->sample_period
);
447 if (event
->group_leader
!= event
) {
448 if (validate_group(event
) != 0)
455 static int armpmu_event_init(struct perf_event
*event
)
457 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
459 atomic_t
*active_events
= &armpmu
->active_events
;
462 * Reject CPU-affine events for CPUs that are of a different class to
463 * that which this PMU handles. Process-following events (where
464 * event->cpu == -1) can be migrated between CPUs, and thus we have to
465 * reject them later (in armpmu_add) if they're scheduled on a
466 * different class of CPU.
468 if (event
->cpu
!= -1 &&
469 !cpumask_test_cpu(event
->cpu
, &armpmu
->supported_cpus
))
472 /* does not support taken branch sampling */
473 if (has_branch_stack(event
))
476 if (armpmu
->map_event(event
) == -ENOENT
)
479 event
->destroy
= hw_perf_event_destroy
;
481 if (!atomic_inc_not_zero(active_events
)) {
482 mutex_lock(&armpmu
->reserve_mutex
);
483 if (atomic_read(active_events
) == 0)
484 err
= armpmu_reserve_hardware(armpmu
);
487 atomic_inc(active_events
);
488 mutex_unlock(&armpmu
->reserve_mutex
);
494 err
= __hw_perf_event_init(event
);
496 hw_perf_event_destroy(event
);
501 static void armpmu_enable(struct pmu
*pmu
)
503 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
504 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
505 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
507 /* For task-bound events we may be called on other CPUs */
508 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
512 armpmu
->start(armpmu
);
515 static void armpmu_disable(struct pmu
*pmu
)
517 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
519 /* For task-bound events we may be called on other CPUs */
520 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
523 armpmu
->stop(armpmu
);
527 * In heterogeneous systems, events are specific to a particular
528 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
529 * the same microarchitecture.
531 static int armpmu_filter_match(struct perf_event
*event
)
533 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
534 unsigned int cpu
= smp_processor_id();
535 return cpumask_test_cpu(cpu
, &armpmu
->supported_cpus
);
538 static ssize_t
armpmu_cpumask_show(struct device
*dev
,
539 struct device_attribute
*attr
, char *buf
)
541 struct arm_pmu
*armpmu
= to_arm_pmu(dev_get_drvdata(dev
));
542 return cpumap_print_to_pagebuf(true, buf
, &armpmu
->supported_cpus
);
545 static DEVICE_ATTR(cpus
, S_IRUGO
, armpmu_cpumask_show
, NULL
);
547 static struct attribute
*armpmu_common_attrs
[] = {
552 static struct attribute_group armpmu_common_attr_group
= {
553 .attrs
= armpmu_common_attrs
,
556 static void armpmu_init(struct arm_pmu
*armpmu
)
558 atomic_set(&armpmu
->active_events
, 0);
559 mutex_init(&armpmu
->reserve_mutex
);
561 armpmu
->pmu
= (struct pmu
) {
562 .pmu_enable
= armpmu_enable
,
563 .pmu_disable
= armpmu_disable
,
564 .event_init
= armpmu_event_init
,
567 .start
= armpmu_start
,
570 .filter_match
= armpmu_filter_match
,
571 .attr_groups
= armpmu
->attr_groups
,
573 armpmu
->attr_groups
[ARMPMU_ATTR_GROUP_COMMON
] =
574 &armpmu_common_attr_group
;
577 /* Set at runtime when we know what CPU type we are. */
578 static struct arm_pmu
*__oprofile_cpu_pmu
;
581 * Despite the names, these two functions are CPU-specific and are used
582 * by the OProfile/perf code.
584 const char *perf_pmu_name(void)
586 if (!__oprofile_cpu_pmu
)
589 return __oprofile_cpu_pmu
->name
;
591 EXPORT_SYMBOL_GPL(perf_pmu_name
);
593 int perf_num_counters(void)
597 if (__oprofile_cpu_pmu
!= NULL
)
598 max_events
= __oprofile_cpu_pmu
->num_events
;
602 EXPORT_SYMBOL_GPL(perf_num_counters
);
604 static void cpu_pmu_enable_percpu_irq(void *data
)
606 int irq
= *(int *)data
;
608 enable_percpu_irq(irq
, IRQ_TYPE_NONE
);
611 static void cpu_pmu_disable_percpu_irq(void *data
)
613 int irq
= *(int *)data
;
615 disable_percpu_irq(irq
);
618 static void cpu_pmu_free_irq(struct arm_pmu
*cpu_pmu
)
621 struct platform_device
*pmu_device
= cpu_pmu
->plat_device
;
622 struct pmu_hw_events __percpu
*hw_events
= cpu_pmu
->hw_events
;
624 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
626 irq
= platform_get_irq(pmu_device
, 0);
627 if (irq
> 0 && irq_is_percpu(irq
)) {
628 on_each_cpu_mask(&cpu_pmu
->supported_cpus
,
629 cpu_pmu_disable_percpu_irq
, &irq
, 1);
630 free_percpu_irq(irq
, &hw_events
->percpu_pmu
);
632 for (i
= 0; i
< irqs
; ++i
) {
635 if (cpu_pmu
->irq_affinity
)
636 cpu
= cpu_pmu
->irq_affinity
[i
];
638 if (!cpumask_test_and_clear_cpu(cpu
, &cpu_pmu
->active_irqs
))
640 irq
= platform_get_irq(pmu_device
, i
);
642 free_irq(irq
, per_cpu_ptr(&hw_events
->percpu_pmu
, cpu
));
647 static int cpu_pmu_request_irq(struct arm_pmu
*cpu_pmu
, irq_handler_t handler
)
649 int i
, err
, irq
, irqs
;
650 struct platform_device
*pmu_device
= cpu_pmu
->plat_device
;
651 struct pmu_hw_events __percpu
*hw_events
= cpu_pmu
->hw_events
;
656 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
658 pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
662 irq
= platform_get_irq(pmu_device
, 0);
663 if (irq
> 0 && irq_is_percpu(irq
)) {
664 err
= request_percpu_irq(irq
, handler
, "arm-pmu",
665 &hw_events
->percpu_pmu
);
667 pr_err("unable to request IRQ%d for ARM PMU counters\n",
672 on_each_cpu_mask(&cpu_pmu
->supported_cpus
,
673 cpu_pmu_enable_percpu_irq
, &irq
, 1);
675 for (i
= 0; i
< irqs
; ++i
) {
679 irq
= platform_get_irq(pmu_device
, i
);
683 if (cpu_pmu
->irq_affinity
)
684 cpu
= cpu_pmu
->irq_affinity
[i
];
687 * If we have a single PMU interrupt that we can't shift,
688 * assume that we're running on a uniprocessor machine and
689 * continue. Otherwise, continue without this interrupt.
691 if (irq_set_affinity(irq
, cpumask_of(cpu
)) && irqs
> 1) {
692 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
697 err
= request_irq(irq
, handler
,
698 IRQF_NOBALANCING
| IRQF_NO_THREAD
, "arm-pmu",
699 per_cpu_ptr(&hw_events
->percpu_pmu
, cpu
));
701 pr_err("unable to request IRQ%d for ARM PMU counters\n",
706 cpumask_set_cpu(cpu
, &cpu_pmu
->active_irqs
);
714 * PMU hardware loses all context when a CPU goes offline.
715 * When a CPU is hotplugged back in, since some hardware registers are
716 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
717 * junk values out of them.
719 static int arm_perf_starting_cpu(unsigned int cpu
, struct hlist_node
*node
)
721 struct arm_pmu
*pmu
= hlist_entry_safe(node
, struct arm_pmu
, node
);
723 if (!cpumask_test_cpu(cpu
, &pmu
->supported_cpus
))
731 static void cpu_pm_pmu_setup(struct arm_pmu
*armpmu
, unsigned long cmd
)
733 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
734 struct perf_event
*event
;
737 for (idx
= 0; idx
< armpmu
->num_events
; idx
++) {
739 * If the counter is not used skip it, there is no
740 * need of stopping/restarting it.
742 if (!test_bit(idx
, hw_events
->used_mask
))
745 event
= hw_events
->events
[idx
];
750 * Stop and update the counter
752 armpmu_stop(event
, PERF_EF_UPDATE
);
755 case CPU_PM_ENTER_FAILED
:
757 * Restore and enable the counter.
758 * armpmu_start() indirectly calls
760 * perf_event_update_userpage()
762 * that requires RCU read locking to be functional,
763 * wrap the call within RCU_NONIDLE to make the
764 * RCU subsystem aware this cpu is not idle from
765 * an RCU perspective for the armpmu_start() call
768 RCU_NONIDLE(armpmu_start(event
, PERF_EF_RELOAD
));
776 static int cpu_pm_pmu_notify(struct notifier_block
*b
, unsigned long cmd
,
779 struct arm_pmu
*armpmu
= container_of(b
, struct arm_pmu
, cpu_pm_nb
);
780 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
781 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
783 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->supported_cpus
))
787 * Always reset the PMU registers on power-up even if
788 * there are no events running.
790 if (cmd
== CPU_PM_EXIT
&& armpmu
->reset
)
791 armpmu
->reset(armpmu
);
798 armpmu
->stop(armpmu
);
799 cpu_pm_pmu_setup(armpmu
, cmd
);
802 cpu_pm_pmu_setup(armpmu
, cmd
);
803 case CPU_PM_ENTER_FAILED
:
804 armpmu
->start(armpmu
);
813 static int cpu_pm_pmu_register(struct arm_pmu
*cpu_pmu
)
815 cpu_pmu
->cpu_pm_nb
.notifier_call
= cpu_pm_pmu_notify
;
816 return cpu_pm_register_notifier(&cpu_pmu
->cpu_pm_nb
);
819 static void cpu_pm_pmu_unregister(struct arm_pmu
*cpu_pmu
)
821 cpu_pm_unregister_notifier(&cpu_pmu
->cpu_pm_nb
);
824 static inline int cpu_pm_pmu_register(struct arm_pmu
*cpu_pmu
) { return 0; }
825 static inline void cpu_pm_pmu_unregister(struct arm_pmu
*cpu_pmu
) { }
828 static int cpu_pmu_init(struct arm_pmu
*cpu_pmu
)
832 struct pmu_hw_events __percpu
*cpu_hw_events
;
834 cpu_hw_events
= alloc_percpu(struct pmu_hw_events
);
838 err
= cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING
,
843 err
= cpu_pm_pmu_register(cpu_pmu
);
847 for_each_possible_cpu(cpu
) {
848 struct pmu_hw_events
*events
= per_cpu_ptr(cpu_hw_events
, cpu
);
849 raw_spin_lock_init(&events
->pmu_lock
);
850 events
->percpu_pmu
= cpu_pmu
;
853 cpu_pmu
->hw_events
= cpu_hw_events
;
854 cpu_pmu
->request_irq
= cpu_pmu_request_irq
;
855 cpu_pmu
->free_irq
= cpu_pmu_free_irq
;
857 /* Ensure the PMU has sane values out of reset. */
859 on_each_cpu_mask(&cpu_pmu
->supported_cpus
, cpu_pmu
->reset
,
862 /* If no interrupts available, set the corresponding capability flag */
863 if (!platform_get_irq(cpu_pmu
->plat_device
, 0))
864 cpu_pmu
->pmu
.capabilities
|= PERF_PMU_CAP_NO_INTERRUPT
;
867 * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
868 * big.LITTLE). This is not an uncore PMU, and we have taken ctx
869 * sharing into account (e.g. with our pmu::filter_match callback and
870 * pmu::event_init group validation).
872 cpu_pmu
->pmu
.capabilities
|= PERF_PMU_CAP_HETEROGENEOUS_CPUS
;
877 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING
,
880 free_percpu(cpu_hw_events
);
884 static void cpu_pmu_destroy(struct arm_pmu
*cpu_pmu
)
886 cpu_pm_pmu_unregister(cpu_pmu
);
887 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING
,
889 free_percpu(cpu_pmu
->hw_events
);
893 * CPU PMU identification and probing.
895 static int probe_current_pmu(struct arm_pmu
*pmu
,
896 const struct pmu_probe_info
*info
)
899 unsigned int cpuid
= read_cpuid_id();
902 pr_info("probing PMU on CPU %d\n", cpu
);
904 for (; info
->init
!= NULL
; info
++) {
905 if ((cpuid
& info
->mask
) != info
->cpuid
)
907 ret
= info
->init(pmu
);
915 static int of_pmu_irq_cfg(struct arm_pmu
*pmu
)
918 bool using_spi
= false;
919 struct platform_device
*pdev
= pmu
->plat_device
;
921 irqs
= kcalloc(pdev
->num_resources
, sizeof(*irqs
), GFP_KERNEL
);
926 struct device_node
*dn
;
929 /* See if we have an affinity entry */
930 dn
= of_parse_phandle(pdev
->dev
.of_node
, "interrupt-affinity", i
);
934 /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
935 irq
= platform_get_irq(pdev
, i
);
937 bool spi
= !irq_is_percpu(irq
);
939 if (i
> 0 && spi
!= using_spi
) {
940 pr_err("PPI/SPI IRQ type mismatch for %s!\n",
950 /* Now look up the logical CPU number */
951 for_each_possible_cpu(cpu
) {
952 struct device_node
*cpu_dn
;
954 cpu_dn
= of_cpu_device_node_get(cpu
);
961 if (cpu
>= nr_cpu_ids
) {
962 pr_warn("Failed to find logical CPU for %s\n",
965 cpumask_setall(&pmu
->supported_cpus
);
970 /* For SPIs, we need to track the affinity per IRQ */
972 if (i
>= pdev
->num_resources
)
978 /* Keep track of the CPUs containing this PMU type */
979 cpumask_set_cpu(cpu
, &pmu
->supported_cpus
);
983 /* If we didn't manage to parse anything, try the interrupt affinity */
984 if (cpumask_weight(&pmu
->supported_cpus
) == 0) {
985 int irq
= platform_get_irq(pdev
, 0);
987 if (irq
> 0 && irq_is_percpu(irq
)) {
988 /* If using PPIs, check the affinity of the partition */
991 ret
= irq_get_percpu_devid_partition(irq
, &pmu
->supported_cpus
);
997 /* Otherwise default to all CPUs */
998 cpumask_setall(&pmu
->supported_cpus
);
1002 /* If we matched up the IRQ affinities, use them to route the SPIs */
1003 if (using_spi
&& i
== pdev
->num_resources
)
1004 pmu
->irq_affinity
= irqs
;
1011 int arm_pmu_device_probe(struct platform_device
*pdev
,
1012 const struct of_device_id
*of_table
,
1013 const struct pmu_probe_info
*probe_table
)
1015 const struct of_device_id
*of_id
;
1016 const int (*init_fn
)(struct arm_pmu
*);
1017 struct device_node
*node
= pdev
->dev
.of_node
;
1018 struct arm_pmu
*pmu
;
1021 pmu
= kzalloc(sizeof(struct arm_pmu
), GFP_KERNEL
);
1023 pr_info("failed to allocate PMU device!\n");
1029 pmu
->plat_device
= pdev
;
1031 if (node
&& (of_id
= of_match_node(of_table
, pdev
->dev
.of_node
))) {
1032 init_fn
= of_id
->data
;
1034 pmu
->secure_access
= of_property_read_bool(pdev
->dev
.of_node
,
1035 "secure-reg-access");
1037 /* arm64 systems boot only as non-secure */
1038 if (IS_ENABLED(CONFIG_ARM64
) && pmu
->secure_access
) {
1039 pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
1040 pmu
->secure_access
= false;
1043 ret
= of_pmu_irq_cfg(pmu
);
1046 } else if (probe_table
) {
1047 cpumask_setall(&pmu
->supported_cpus
);
1048 ret
= probe_current_pmu(pmu
, probe_table
);
1052 pr_info("%s: failed to probe PMU!\n", of_node_full_name(node
));
1057 ret
= cpu_pmu_init(pmu
);
1061 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
1065 if (!__oprofile_cpu_pmu
)
1066 __oprofile_cpu_pmu
= pmu
;
1068 pr_info("enabled with %s PMU driver, %d counters available\n",
1069 pmu
->name
, pmu
->num_events
);
1074 cpu_pmu_destroy(pmu
);
1076 pr_info("%s: failed to register PMU devices!\n",
1077 of_node_full_name(node
));
1078 kfree(pmu
->irq_affinity
);
1083 static int arm_pmu_hp_init(void)
1087 ret
= cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING
,
1088 "perf/arm/pmu:starting",
1089 arm_perf_starting_cpu
, NULL
);
1091 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
1095 subsys_initcall(arm_pmu_hp_init
);