2 * Meta performance counter support.
3 * Copyright (C) 2012 Imagination Technologies Ltd
5 * This code is based on the sh pmu code:
6 * Copyright (C) 2009 Paul Mundt
8 * and on the arm pmu code:
9 * Copyright (C) 2009 picoChip Designs, Ltd., James Iles
10 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
17 #include <linux/atomic.h>
18 #include <linux/export.h>
19 #include <linux/init.h>
20 #include <linux/irqchip/metag.h>
21 #include <linux/perf_event.h>
22 #include <linux/slab.h>
24 #include <asm/core_reg.h>
27 #include <asm/processor.h>
29 #include "perf_event.h"
31 static int _hw_perf_event_init(struct perf_event
*);
32 static void _hw_perf_event_destroy(struct perf_event
*);
34 /* Determines which core type we are */
35 static struct metag_pmu
*metag_pmu __read_mostly
;
37 /* Processor specific data */
38 static DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
41 const char *perf_pmu_name(void)
44 return metag_pmu
->pmu
.name
;
48 EXPORT_SYMBOL_GPL(perf_pmu_name
);
50 int perf_num_counters(void)
53 return metag_pmu
->max_events
;
57 EXPORT_SYMBOL_GPL(perf_num_counters
);
59 static inline int metag_pmu_initialised(void)
64 static void release_pmu_hardware(void)
67 unsigned int version
= (metag_pmu
->version
&
68 (METAC_ID_MINOR_BITS
| METAC_ID_REV_BITS
)) >>
71 /* Early cores don't have overflow interrupts */
75 irq
= internal_irq_map(17);
77 free_irq(irq
, (void *)1);
79 irq
= internal_irq_map(16);
81 free_irq(irq
, (void *)0);
84 static int reserve_pmu_hardware(void)
87 unsigned int version
= (metag_pmu
->version
&
88 (METAC_ID_MINOR_BITS
| METAC_ID_REV_BITS
)) >>
91 /* Early cores don't have overflow interrupts */
96 * Bit 16 on HWSTATMETA is the interrupt for performance counter 0;
97 * similarly, 17 is the interrupt for performance counter 1.
98 * We can't (yet) interrupt on the cycle counter, because it's a
99 * register, however it holds a 32-bit value as opposed to 24-bit.
101 irq
[0] = internal_irq_map(16);
103 pr_err("unable to map internal IRQ %d\n", 16);
106 err
= request_irq(irq
[0], metag_pmu
->handle_irq
, IRQF_NOBALANCING
,
107 "metagpmu0", (void *)0);
109 pr_err("unable to request IRQ%d for metag PMU counters\n",
114 irq
[1] = internal_irq_map(17);
116 pr_err("unable to map internal IRQ %d\n", 17);
119 err
= request_irq(irq
[1], metag_pmu
->handle_irq
, IRQF_NOBALANCING
,
120 "metagpmu1", (void *)1);
122 pr_err("unable to request IRQ%d for metag PMU counters\n",
130 free_irq(irq
[0], (void *)0);
136 static void metag_pmu_enable(struct pmu
*pmu
)
140 static void metag_pmu_disable(struct pmu
*pmu
)
144 static int metag_pmu_event_init(struct perf_event
*event
)
147 atomic_t
*active_events
= &metag_pmu
->active_events
;
149 if (!metag_pmu_initialised()) {
154 if (has_branch_stack(event
))
157 event
->destroy
= _hw_perf_event_destroy
;
159 if (!atomic_inc_not_zero(active_events
)) {
160 mutex_lock(&metag_pmu
->reserve_mutex
);
161 if (atomic_read(active_events
) == 0)
162 err
= reserve_pmu_hardware();
165 atomic_inc(active_events
);
167 mutex_unlock(&metag_pmu
->reserve_mutex
);
170 /* Hardware and caches counters */
171 switch (event
->attr
.type
) {
172 case PERF_TYPE_HARDWARE
:
173 case PERF_TYPE_HW_CACHE
:
174 err
= _hw_perf_event_init(event
);
182 event
->destroy(event
);
188 void metag_pmu_event_update(struct perf_event
*event
,
189 struct hw_perf_event
*hwc
, int idx
)
191 u64 prev_raw_count
, new_raw_count
;
195 * If this counter is chained, it may be that the previous counter
196 * value has been changed beneath us.
198 * To get around this, we read and exchange the new raw count, then
199 * add the delta (new - prev) to the generic counter atomically.
201 * Without interrupts, this is the simplest approach.
204 prev_raw_count
= local64_read(&hwc
->prev_count
);
205 new_raw_count
= metag_pmu
->read(idx
);
207 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
208 new_raw_count
) != prev_raw_count
)
212 * Calculate the delta and add it to the counter.
214 delta
= (new_raw_count
- prev_raw_count
) & MAX_PERIOD
;
216 local64_add(delta
, &event
->count
);
217 local64_sub(delta
, &hwc
->period_left
);
220 int metag_pmu_event_set_period(struct perf_event
*event
,
221 struct hw_perf_event
*hwc
, int idx
)
223 s64 left
= local64_read(&hwc
->period_left
);
224 s64 period
= hwc
->sample_period
;
227 /* The period may have been changed */
228 if (unlikely(period
!= hwc
->last_period
))
229 left
+= period
- hwc
->last_period
;
231 if (unlikely(left
<= -period
)) {
233 local64_set(&hwc
->period_left
, left
);
234 hwc
->last_period
= period
;
238 if (unlikely(left
<= 0)) {
240 local64_set(&hwc
->period_left
, left
);
241 hwc
->last_period
= period
;
245 if (left
> (s64
)metag_pmu
->max_period
)
246 left
= metag_pmu
->max_period
;
248 if (metag_pmu
->write
) {
249 local64_set(&hwc
->prev_count
, -(s32
)left
);
250 metag_pmu
->write(idx
, -left
& MAX_PERIOD
);
253 perf_event_update_userpage(event
);
258 static void metag_pmu_start(struct perf_event
*event
, int flags
)
260 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
261 struct hw_perf_event
*hwc
= &event
->hw
;
264 if (WARN_ON_ONCE(idx
== -1))
268 * We always have to reprogram the period, so ignore PERF_EF_RELOAD.
270 if (flags
& PERF_EF_RELOAD
)
271 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
277 * Some counters can't be stopped (i.e. are core global), so when the
278 * counter was 'stopped' we merely disabled the IRQ. If we don't reset
279 * the period, then we'll either: a) get an overflow too soon;
280 * or b) too late if the overflow happened since disabling.
281 * Obviously, this has little bearing on cores without the overflow
282 * interrupt, as the performance counter resets to zero on write
285 if (metag_pmu
->max_period
)
286 metag_pmu_event_set_period(event
, hwc
, hwc
->idx
);
287 cpuc
->events
[idx
] = event
;
288 metag_pmu
->enable(hwc
, idx
);
291 static void metag_pmu_stop(struct perf_event
*event
, int flags
)
293 struct hw_perf_event
*hwc
= &event
->hw
;
296 * We should always update the counter on stop; see comment above
299 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
300 metag_pmu_event_update(event
, hwc
, hwc
->idx
);
301 metag_pmu
->disable(hwc
, hwc
->idx
);
302 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
306 static int metag_pmu_add(struct perf_event
*event
, int flags
)
308 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
309 struct hw_perf_event
*hwc
= &event
->hw
;
310 int idx
= 0, ret
= 0;
312 perf_pmu_disable(event
->pmu
);
314 /* check whether we're counting instructions */
315 if (hwc
->config
== 0x100) {
316 if (__test_and_set_bit(METAG_INST_COUNTER
,
321 idx
= METAG_INST_COUNTER
;
323 /* Check whether we have a spare counter */
324 idx
= find_first_zero_bit(cpuc
->used_mask
,
325 atomic_read(&metag_pmu
->active_events
));
326 if (idx
>= METAG_INST_COUNTER
) {
331 __set_bit(idx
, cpuc
->used_mask
);
335 /* Make sure the counter is disabled */
336 metag_pmu
->disable(hwc
, idx
);
338 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
339 if (flags
& PERF_EF_START
)
340 metag_pmu_start(event
, PERF_EF_RELOAD
);
342 perf_event_update_userpage(event
);
344 perf_pmu_enable(event
->pmu
);
348 static void metag_pmu_del(struct perf_event
*event
, int flags
)
350 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
351 struct hw_perf_event
*hwc
= &event
->hw
;
355 metag_pmu_stop(event
, PERF_EF_UPDATE
);
356 cpuc
->events
[idx
] = NULL
;
357 __clear_bit(idx
, cpuc
->used_mask
);
359 perf_event_update_userpage(event
);
362 static void metag_pmu_read(struct perf_event
*event
)
364 struct hw_perf_event
*hwc
= &event
->hw
;
366 /* Don't read disabled counters! */
370 metag_pmu_event_update(event
, hwc
, hwc
->idx
);
373 static struct pmu pmu
= {
374 .pmu_enable
= metag_pmu_enable
,
375 .pmu_disable
= metag_pmu_disable
,
377 .event_init
= metag_pmu_event_init
,
379 .add
= metag_pmu_add
,
380 .del
= metag_pmu_del
,
381 .start
= metag_pmu_start
,
382 .stop
= metag_pmu_stop
,
383 .read
= metag_pmu_read
,
386 /* Core counter specific functions */
387 static const int metag_general_events
[] = {
388 [PERF_COUNT_HW_CPU_CYCLES
] = 0x03,
389 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x100,
390 [PERF_COUNT_HW_CACHE_REFERENCES
] = -1,
391 [PERF_COUNT_HW_CACHE_MISSES
] = -1,
392 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = -1,
393 [PERF_COUNT_HW_BRANCH_MISSES
] = -1,
394 [PERF_COUNT_HW_BUS_CYCLES
] = -1,
395 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = -1,
396 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = -1,
397 [PERF_COUNT_HW_REF_CPU_CYCLES
] = -1,
400 static const int metag_pmu_cache_events
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
403 [C(RESULT_ACCESS
)] = 0x08,
404 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
407 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
408 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
411 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
412 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
417 [C(RESULT_ACCESS
)] = 0x09,
418 [C(RESULT_MISS
)] = 0x0a,
421 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
422 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
425 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
426 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
431 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
432 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
435 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
436 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
439 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
440 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
445 [C(RESULT_ACCESS
)] = 0xd0,
446 [C(RESULT_MISS
)] = 0xd2,
449 [C(RESULT_ACCESS
)] = 0xd4,
450 [C(RESULT_MISS
)] = 0xd5,
453 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
454 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
459 [C(RESULT_ACCESS
)] = 0xd1,
460 [C(RESULT_MISS
)] = 0xd3,
463 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
464 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
467 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
468 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
473 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
474 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
477 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
478 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
481 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
482 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
487 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
488 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
491 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
492 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
495 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
496 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
502 static void _hw_perf_event_destroy(struct perf_event
*event
)
504 atomic_t
*active_events
= &metag_pmu
->active_events
;
505 struct mutex
*pmu_mutex
= &metag_pmu
->reserve_mutex
;
507 if (atomic_dec_and_mutex_lock(active_events
, pmu_mutex
)) {
508 release_pmu_hardware();
509 mutex_unlock(pmu_mutex
);
513 static int _hw_perf_cache_event(int config
, int *evp
)
515 unsigned long type
, op
, result
;
518 if (!metag_pmu
->cache_events
)
522 type
= config
& 0xff;
523 op
= (config
>> 8) & 0xff;
524 result
= (config
>> 16) & 0xff;
526 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
527 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
528 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
531 ev
= (*metag_pmu
->cache_events
)[type
][op
][result
];
540 static int _hw_perf_event_init(struct perf_event
*event
)
542 struct perf_event_attr
*attr
= &event
->attr
;
543 struct hw_perf_event
*hwc
= &event
->hw
;
544 int mapping
= 0, err
;
546 switch (attr
->type
) {
547 case PERF_TYPE_HARDWARE
:
548 if (attr
->config
>= PERF_COUNT_HW_MAX
)
551 mapping
= metag_pmu
->event_map(attr
->config
);
554 case PERF_TYPE_HW_CACHE
:
555 err
= _hw_perf_cache_event(attr
->config
, &mapping
);
561 /* Return early if the event is unsupported */
566 * Early cores have "limited" counters - they have no overflow
567 * interrupts - and so are unable to do sampling without extra work
568 * and timer assistance.
570 if (metag_pmu
->max_period
== 0) {
571 if (hwc
->sample_period
)
576 * Don't assign an index until the event is placed into the hardware.
577 * -1 signifies that we're still deciding where to put it. On SMP
578 * systems each core has its own set of counters, so we can't do any
579 * constraint checking yet.
583 /* Store the event encoding */
584 hwc
->config
|= (unsigned long)mapping
;
587 * For non-sampling runs, limit the sample_period to half of the
588 * counter width. This way, the new counter value should be less
589 * likely to overtake the previous one (unless there are IRQ latency
592 if (metag_pmu
->max_period
) {
593 if (!hwc
->sample_period
) {
594 hwc
->sample_period
= metag_pmu
->max_period
>> 1;
595 hwc
->last_period
= hwc
->sample_period
;
596 local64_set(&hwc
->period_left
, hwc
->sample_period
);
603 static void metag_pmu_enable_counter(struct hw_perf_event
*event
, int idx
)
605 struct cpu_hw_events
*events
= &__get_cpu_var(cpu_hw_events
);
606 unsigned int config
= event
->config
;
607 unsigned int tmp
= config
& 0xf0;
610 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
613 * Check if we're enabling the instruction counter (index of
616 if (METAG_INST_COUNTER
== idx
) {
617 WARN_ONCE((config
!= 0x100),
618 "invalid configuration (%d) for counter (%d)\n",
621 /* Reset the cycle count */
622 __core_reg_set(TXTACTCYC
, 0);
626 /* Check for a core internal or performance channel event. */
628 void *perf_addr
= (void *)PERF_COUNT(idx
);
631 * Anything other than a cycle count will write the low-
632 * nibble to the correct counter register.
636 perf_addr
= (void *)PERF_ICORE(idx
);
640 perf_addr
= (void *)PERF_CHAN(idx
);
644 metag_out32((config
& 0x0f), perf_addr
);
647 * Now we use the high nibble as the performance event to
653 tmp
= ((config
& 0xf) << 28) |
654 ((1 << 24) << hard_processor_id());
655 if (metag_pmu
->max_period
)
657 * Cores supporting overflow interrupts may have had the counter
658 * set to a specific value that needs preserving.
660 tmp
|= metag_in32(PERF_COUNT(idx
)) & 0x00ffffff;
663 * Older cores reset the counter on write, so prev_count needs
664 * resetting too so we can calculate a correct delta.
666 local64_set(&event
->prev_count
, 0);
668 metag_out32(tmp
, PERF_COUNT(idx
));
670 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
673 static void metag_pmu_disable_counter(struct hw_perf_event
*event
, int idx
)
675 struct cpu_hw_events
*events
= &__get_cpu_var(cpu_hw_events
);
676 unsigned int tmp
= 0;
680 * The cycle counter can't be disabled per se, as it's a hardware
681 * thread register which is always counting. We merely return if this
682 * is the counter we're attempting to disable.
684 if (METAG_INST_COUNTER
== idx
)
688 * The counter value _should_ have been read prior to disabling,
689 * as if we're running on an early core then the value gets reset to
690 * 0, and any read after that would be useless. On the newer cores,
691 * however, it's better to read-modify-update this for purposes of
692 * the overflow interrupt.
693 * Here we remove the thread id AND the event nibble (there are at
694 * least two events that count events that are core global and ignore
695 * the thread id mask). This only works because we don't mix thread
696 * performance counts, and event 0x00 requires a thread id mask!
698 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
700 tmp
= metag_in32(PERF_COUNT(idx
));
702 metag_out32(tmp
, PERF_COUNT(idx
));
704 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
707 static u64
metag_pmu_read_counter(int idx
)
711 /* The act of reading the cycle counter also clears it */
712 if (METAG_INST_COUNTER
== idx
) {
713 __core_reg_swap(TXTACTCYC
, tmp
);
717 tmp
= metag_in32(PERF_COUNT(idx
)) & 0x00ffffff;
722 static void metag_pmu_write_counter(int idx
, u32 val
)
724 struct cpu_hw_events
*events
= &__get_cpu_var(cpu_hw_events
);
729 * This _shouldn't_ happen, but if it does, then we can just
730 * ignore the write, as the register is read-only and clear-on-write.
732 if (METAG_INST_COUNTER
== idx
)
736 * We'll keep the thread mask and event id, and just update the
737 * counter itself. Also , we should bound the value to 24-bits.
739 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
742 tmp
= metag_in32(PERF_COUNT(idx
)) & 0xff000000;
744 metag_out32(val
, PERF_COUNT(idx
));
746 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
749 static int metag_pmu_event_map(int idx
)
751 return metag_general_events
[idx
];
754 static irqreturn_t
metag_pmu_counter_overflow(int irq
, void *dev
)
757 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
758 struct perf_event
*event
= cpuhw
->events
[idx
];
759 struct hw_perf_event
*hwc
= &event
->hw
;
760 struct pt_regs
*regs
= get_irq_regs();
761 struct perf_sample_data sampledata
;
766 * We need to stop the core temporarily from generating another
767 * interrupt while we disable this counter. However, we don't want
768 * to flag the counter as free
770 __global_lock2(flags
);
771 counter
= metag_in32(PERF_COUNT(idx
));
772 metag_out32((counter
& 0x00ffffff), PERF_COUNT(idx
));
773 __global_unlock2(flags
);
775 /* Update the counts and reset the sample period */
776 metag_pmu_event_update(event
, hwc
, idx
);
777 perf_sample_data_init(&sampledata
, 0, hwc
->last_period
);
778 metag_pmu_event_set_period(event
, hwc
, idx
);
781 * Enable the counter again once core overflow processing has
782 * completed. Note the counter value may have been modified while it was
783 * inactive to set it up ready for the next interrupt.
785 if (!perf_event_overflow(event
, &sampledata
, regs
)) {
786 __global_lock2(flags
);
787 counter
= (counter
& 0xff000000) |
788 (metag_in32(PERF_COUNT(idx
)) & 0x00ffffff);
789 metag_out32(counter
, PERF_COUNT(idx
));
790 __global_unlock2(flags
);
796 static struct metag_pmu _metag_pmu
= {
797 .handle_irq
= metag_pmu_counter_overflow
,
798 .enable
= metag_pmu_enable_counter
,
799 .disable
= metag_pmu_disable_counter
,
800 .read
= metag_pmu_read_counter
,
801 .write
= metag_pmu_write_counter
,
802 .event_map
= metag_pmu_event_map
,
803 .cache_events
= &metag_pmu_cache_events
,
804 .max_period
= MAX_PERIOD
,
805 .max_events
= MAX_HWEVENTS
,
808 /* PMU CPU hotplug notifier */
809 static int __cpuinit
metag_pmu_cpu_notify(struct notifier_block
*b
,
810 unsigned long action
, void *hcpu
)
812 unsigned int cpu
= (unsigned int)hcpu
;
813 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
815 if ((action
& ~CPU_TASKS_FROZEN
) != CPU_STARTING
)
818 memset(cpuc
, 0, sizeof(struct cpu_hw_events
));
819 raw_spin_lock_init(&cpuc
->pmu_lock
);
824 static struct notifier_block __cpuinitdata metag_pmu_notifier
= {
825 .notifier_call
= metag_pmu_cpu_notify
,
828 /* PMU Initialisation */
829 static int __init
init_hw_perf_events(void)
832 u32 version
= *(u32
*)METAC_ID
;
833 int major
= (version
& METAC_ID_MAJOR_BITS
) >> METAC_ID_MAJOR_S
;
834 int min_rev
= (version
& (METAC_ID_MINOR_BITS
| METAC_ID_REV_BITS
))
837 /* Not a Meta 2 core, then not supported */
839 pr_info("no hardware counter support available\n");
841 } else if (0x02 == major
) {
842 metag_pmu
= &_metag_pmu
;
844 if (min_rev
< 0x0104) {
846 * A core without overflow interrupts, and clear-on-
849 metag_pmu
->handle_irq
= NULL
;
850 metag_pmu
->write
= NULL
;
851 metag_pmu
->max_period
= 0;
854 metag_pmu
->name
= "Meta 2";
855 metag_pmu
->version
= version
;
856 metag_pmu
->pmu
= pmu
;
859 pr_info("enabled with %s PMU driver, %d counters available\n",
860 metag_pmu
->name
, metag_pmu
->max_events
);
862 /* Initialise the active events and reservation mutex */
863 atomic_set(&metag_pmu
->active_events
, 0);
864 mutex_init(&metag_pmu
->reserve_mutex
);
866 /* Clear the counters */
867 metag_out32(0, PERF_COUNT(0));
868 metag_out32(0, PERF_COUNT(1));
870 for_each_possible_cpu(cpu
) {
871 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
873 memset(cpuc
, 0, sizeof(struct cpu_hw_events
));
874 raw_spin_lock_init(&cpuc
->pmu_lock
);
877 register_cpu_notifier(&metag_pmu_notifier
);
878 ret
= perf_pmu_register(&pmu
, (char *)metag_pmu
->name
, PERF_TYPE_RAW
);
882 early_initcall(init_hw_perf_events
);