4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
15 #define pr_fmt(fmt) "hw perfevents: " fmt
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/perf_event.h>
20 #include <linux/spinlock.h>
21 #include <linux/uaccess.h>
23 #include <asm/cputype.h>
25 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
29 static const struct pmu_irqs
*pmu_irqs
;
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
35 DEFINE_SPINLOCK(pmu_lock
);
38 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
39 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms.
42 * ARMv7 supports up to 32 events:
43 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
46 #define ARMPMU_MAX_HWEVENTS 33
48 /* The events for a given CPU. */
49 struct cpu_hw_events
{
51 * The events that are active on the CPU for the given index. Index 0
54 struct perf_event
*events
[ARMPMU_MAX_HWEVENTS
];
57 * A 1 bit for an index indicates that the counter is being used for
58 * an event. A 0 means that the counter can be used.
60 unsigned long used_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
63 * A 1 bit for an index indicates that the counter is actively being
66 unsigned long active_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
68 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
72 irqreturn_t (*handle_irq
)(int irq_num
, void *dev
);
73 void (*enable
)(struct hw_perf_event
*evt
, int idx
);
74 void (*disable
)(struct hw_perf_event
*evt
, int idx
);
75 int (*event_map
)(int evt
);
76 u64 (*raw_event
)(u64
);
77 int (*get_event_idx
)(struct cpu_hw_events
*cpuc
,
78 struct hw_perf_event
*hwc
);
79 u32 (*read_counter
)(int idx
);
80 void (*write_counter
)(int idx
, u32 val
);
87 /* Set at runtime when we know what CPU type we are. */
88 static const struct arm_pmu
*armpmu
;
90 #define HW_OP_UNSUPPORTED 0xFFFF
93 PERF_COUNT_HW_CACHE_##_x
95 #define CACHE_OP_UNSUPPORTED 0xFFFF
97 static unsigned armpmu_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
98 [PERF_COUNT_HW_CACHE_OP_MAX
]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
102 armpmu_map_cache_event(u64 config
)
104 unsigned int cache_type
, cache_op
, cache_result
, ret
;
106 cache_type
= (config
>> 0) & 0xff;
107 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
110 cache_op
= (config
>> 8) & 0xff;
111 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
114 cache_result
= (config
>> 16) & 0xff;
115 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
118 ret
= (int)armpmu_perf_cache_map
[cache_type
][cache_op
][cache_result
];
120 if (ret
== CACHE_OP_UNSUPPORTED
)
127 armpmu_event_set_period(struct perf_event
*event
,
128 struct hw_perf_event
*hwc
,
131 s64 left
= atomic64_read(&hwc
->period_left
);
132 s64 period
= hwc
->sample_period
;
135 if (unlikely(left
<= -period
)) {
137 atomic64_set(&hwc
->period_left
, left
);
138 hwc
->last_period
= period
;
142 if (unlikely(left
<= 0)) {
144 atomic64_set(&hwc
->period_left
, left
);
145 hwc
->last_period
= period
;
149 if (left
> (s64
)armpmu
->max_period
)
150 left
= armpmu
->max_period
;
152 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
154 armpmu
->write_counter(idx
, (u64
)(-left
) & 0xffffffff);
156 perf_event_update_userpage(event
);
162 armpmu_event_update(struct perf_event
*event
,
163 struct hw_perf_event
*hwc
,
167 s64 prev_raw_count
, new_raw_count
;
171 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
172 new_raw_count
= armpmu
->read_counter(idx
);
174 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
175 new_raw_count
) != prev_raw_count
)
178 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
181 atomic64_add(delta
, &event
->count
);
182 atomic64_sub(delta
, &hwc
->period_left
);
184 return new_raw_count
;
188 armpmu_disable(struct perf_event
*event
)
190 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
191 struct hw_perf_event
*hwc
= &event
->hw
;
196 clear_bit(idx
, cpuc
->active_mask
);
197 armpmu
->disable(hwc
, idx
);
201 armpmu_event_update(event
, hwc
, idx
);
202 cpuc
->events
[idx
] = NULL
;
203 clear_bit(idx
, cpuc
->used_mask
);
205 perf_event_update_userpage(event
);
209 armpmu_read(struct perf_event
*event
)
211 struct hw_perf_event
*hwc
= &event
->hw
;
213 /* Don't read disabled counters! */
217 armpmu_event_update(event
, hwc
, hwc
->idx
);
221 armpmu_unthrottle(struct perf_event
*event
)
223 struct hw_perf_event
*hwc
= &event
->hw
;
226 * Set the period again. Some counters can't be stopped, so when we
227 * were throttled we simply disabled the IRQ source and the counter
228 * may have been left counting. If we don't do this step then we may
229 * get an interrupt too soon or *way* too late if the overflow has
230 * happened since disabling.
232 armpmu_event_set_period(event
, hwc
, hwc
->idx
);
233 armpmu
->enable(hwc
, hwc
->idx
);
237 armpmu_enable(struct perf_event
*event
)
239 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
240 struct hw_perf_event
*hwc
= &event
->hw
;
244 /* If we don't have a space for the counter then finish early. */
245 idx
= armpmu
->get_event_idx(cpuc
, hwc
);
252 * If there is an event in the counter we are going to use then make
253 * sure it is disabled.
256 armpmu
->disable(hwc
, idx
);
257 cpuc
->events
[idx
] = event
;
258 set_bit(idx
, cpuc
->active_mask
);
260 /* Set the period for the event. */
261 armpmu_event_set_period(event
, hwc
, idx
);
263 /* Enable the event. */
264 armpmu
->enable(hwc
, idx
);
266 /* Propagate our changes to the userspace mapping. */
267 perf_event_update_userpage(event
);
273 static struct pmu pmu
= {
274 .enable
= armpmu_enable
,
275 .disable
= armpmu_disable
,
276 .unthrottle
= armpmu_unthrottle
,
281 validate_event(struct cpu_hw_events
*cpuc
,
282 struct perf_event
*event
)
284 struct hw_perf_event fake_event
= event
->hw
;
286 if (event
->pmu
&& event
->pmu
!= &pmu
)
289 return armpmu
->get_event_idx(cpuc
, &fake_event
) >= 0;
293 validate_group(struct perf_event
*event
)
295 struct perf_event
*sibling
, *leader
= event
->group_leader
;
296 struct cpu_hw_events fake_pmu
;
298 memset(&fake_pmu
, 0, sizeof(fake_pmu
));
300 if (!validate_event(&fake_pmu
, leader
))
303 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
304 if (!validate_event(&fake_pmu
, sibling
))
308 if (!validate_event(&fake_pmu
, event
))
315 armpmu_reserve_hardware(void)
320 pmu_irqs
= reserve_pmu();
321 if (IS_ERR(pmu_irqs
)) {
322 pr_warning("unable to reserve pmu\n");
323 return PTR_ERR(pmu_irqs
);
328 if (pmu_irqs
->num_irqs
< 1) {
329 pr_err("no irqs for PMUs defined\n");
333 for (i
= 0; i
< pmu_irqs
->num_irqs
; ++i
) {
334 err
= request_irq(pmu_irqs
->irqs
[i
], armpmu
->handle_irq
,
335 IRQF_DISABLED
| IRQF_NOBALANCING
,
338 pr_warning("unable to request IRQ%d for ARM "
339 "perf counters\n", pmu_irqs
->irqs
[i
]);
345 for (i
= i
- 1; i
>= 0; --i
)
346 free_irq(pmu_irqs
->irqs
[i
], NULL
);
347 release_pmu(pmu_irqs
);
355 armpmu_release_hardware(void)
359 for (i
= pmu_irqs
->num_irqs
- 1; i
>= 0; --i
)
360 free_irq(pmu_irqs
->irqs
[i
], NULL
);
363 release_pmu(pmu_irqs
);
367 static atomic_t active_events
= ATOMIC_INIT(0);
368 static DEFINE_MUTEX(pmu_reserve_mutex
);
371 hw_perf_event_destroy(struct perf_event
*event
)
373 if (atomic_dec_and_mutex_lock(&active_events
, &pmu_reserve_mutex
)) {
374 armpmu_release_hardware();
375 mutex_unlock(&pmu_reserve_mutex
);
380 __hw_perf_event_init(struct perf_event
*event
)
382 struct hw_perf_event
*hwc
= &event
->hw
;
385 /* Decode the generic type into an ARM event identifier. */
386 if (PERF_TYPE_HARDWARE
== event
->attr
.type
) {
387 mapping
= armpmu
->event_map(event
->attr
.config
);
388 } else if (PERF_TYPE_HW_CACHE
== event
->attr
.type
) {
389 mapping
= armpmu_map_cache_event(event
->attr
.config
);
390 } else if (PERF_TYPE_RAW
== event
->attr
.type
) {
391 mapping
= armpmu
->raw_event(event
->attr
.config
);
393 pr_debug("event type %x not supported\n", event
->attr
.type
);
398 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
404 * Check whether we need to exclude the counter from certain modes.
405 * The ARM performance counters are on all of the time so if someone
406 * has asked us for some excludes then we have to fail.
408 if (event
->attr
.exclude_kernel
|| event
->attr
.exclude_user
||
409 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
) {
410 pr_debug("ARM performance counters do not support "
416 * We don't assign an index until we actually place the event onto
417 * hardware. Use -1 to signify that we haven't decided where to put it
418 * yet. For SMP systems, each core has it's own PMU so we can't do any
419 * clever allocation or constraints checking at this point.
424 * Store the event encoding into the config_base field. config and
425 * event_base are unused as the only 2 things we need to know are
426 * the event mapping and the counter to use. The counter to use is
427 * also the indx and the config_base is the event type.
429 hwc
->config_base
= (unsigned long)mapping
;
433 if (!hwc
->sample_period
) {
434 hwc
->sample_period
= armpmu
->max_period
;
435 hwc
->last_period
= hwc
->sample_period
;
436 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
440 if (event
->group_leader
!= event
) {
441 err
= validate_group(event
);
450 hw_perf_event_init(struct perf_event
*event
)
455 return ERR_PTR(-ENODEV
);
457 event
->destroy
= hw_perf_event_destroy
;
459 if (!atomic_inc_not_zero(&active_events
)) {
460 if (atomic_read(&active_events
) > perf_max_events
) {
461 atomic_dec(&active_events
);
462 return ERR_PTR(-ENOSPC
);
465 mutex_lock(&pmu_reserve_mutex
);
466 if (atomic_read(&active_events
) == 0) {
467 err
= armpmu_reserve_hardware();
471 atomic_inc(&active_events
);
472 mutex_unlock(&pmu_reserve_mutex
);
478 err
= __hw_perf_event_init(event
);
480 hw_perf_event_destroy(event
);
482 return err
? ERR_PTR(err
) : &pmu
;
488 /* Enable all of the perf events on hardware. */
490 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
495 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
496 struct perf_event
*event
= cpuc
->events
[idx
];
501 armpmu
->enable(&event
->hw
, idx
);
508 hw_perf_disable(void)
515 * ARMv6 Performance counter handling code.
517 * ARMv6 has 2 configurable performance counters and a single cycle counter.
518 * They all share a single reset bit but can be written to zero so we can use
521 * The counters can't be individually enabled or disabled so when we remove
522 * one event and replace it with another we could get spurious counts from the
523 * wrong event. However, we can take advantage of the fact that the
524 * performance counters can export events to the event bus, and the event bus
525 * itself can be monitored. This requires that we *don't* export the events to
526 * the event bus. The procedure for disabling a configurable counter is:
527 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
528 * effectively stops the counter from counting.
529 * - disable the counter's interrupt generation (each counter has it's
530 * own interrupt enable bit).
531 * Once stopped, the counter value can be written as 0 to reset.
533 * To enable a counter:
534 * - enable the counter's interrupt generation.
535 * - set the new event type.
537 * Note: the dedicated cycle counter only counts cycles and can't be
538 * enabled/disabled independently of the others. When we want to disable the
539 * cycle counter, we have to just disable the interrupt reporting and start
540 * ignoring that counter. When re-enabling, we have to reset the value and
541 * enable the interrupt.
544 enum armv6_perf_types
{
545 ARMV6_PERFCTR_ICACHE_MISS
= 0x0,
546 ARMV6_PERFCTR_IBUF_STALL
= 0x1,
547 ARMV6_PERFCTR_DDEP_STALL
= 0x2,
548 ARMV6_PERFCTR_ITLB_MISS
= 0x3,
549 ARMV6_PERFCTR_DTLB_MISS
= 0x4,
550 ARMV6_PERFCTR_BR_EXEC
= 0x5,
551 ARMV6_PERFCTR_BR_MISPREDICT
= 0x6,
552 ARMV6_PERFCTR_INSTR_EXEC
= 0x7,
553 ARMV6_PERFCTR_DCACHE_HIT
= 0x9,
554 ARMV6_PERFCTR_DCACHE_ACCESS
= 0xA,
555 ARMV6_PERFCTR_DCACHE_MISS
= 0xB,
556 ARMV6_PERFCTR_DCACHE_WBACK
= 0xC,
557 ARMV6_PERFCTR_SW_PC_CHANGE
= 0xD,
558 ARMV6_PERFCTR_MAIN_TLB_MISS
= 0xF,
559 ARMV6_PERFCTR_EXPL_D_ACCESS
= 0x10,
560 ARMV6_PERFCTR_LSU_FULL_STALL
= 0x11,
561 ARMV6_PERFCTR_WBUF_DRAINED
= 0x12,
562 ARMV6_PERFCTR_CPU_CYCLES
= 0xFF,
563 ARMV6_PERFCTR_NOP
= 0x20,
566 enum armv6_counters
{
567 ARMV6_CYCLE_COUNTER
= 1,
573 * The hardware events that we support. We do support cache operations but
574 * we have harvard caches and no way to combine instruction and data
575 * accesses/misses in hardware.
577 static const unsigned armv6_perf_map
[PERF_COUNT_HW_MAX
] = {
578 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6_PERFCTR_CPU_CYCLES
,
579 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6_PERFCTR_INSTR_EXEC
,
580 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
581 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
582 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6_PERFCTR_BR_EXEC
,
583 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6_PERFCTR_BR_MISPREDICT
,
584 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
587 static const unsigned armv6_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
588 [PERF_COUNT_HW_CACHE_OP_MAX
]
589 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
592 * The performance counters don't differentiate between read
593 * and write accesses/misses so this isn't strictly correct,
594 * but it's the best we can do. Writes and reads get
598 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
599 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
602 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
603 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
606 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
607 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
612 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
613 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
616 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
617 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
620 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
621 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
626 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
627 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
630 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
631 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
634 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
635 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
640 * The ARM performance counters can count micro DTLB misses,
641 * micro ITLB misses and main TLB misses. There isn't an event
642 * for TLB misses, so use the micro misses here and if users
643 * want the main TLB misses they can use a raw counter.
646 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
647 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
650 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
651 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
654 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
655 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
660 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
661 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
664 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
665 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
668 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
669 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
674 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
675 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
678 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
679 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
682 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
683 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
688 enum armv6mpcore_perf_types
{
689 ARMV6MPCORE_PERFCTR_ICACHE_MISS
= 0x0,
690 ARMV6MPCORE_PERFCTR_IBUF_STALL
= 0x1,
691 ARMV6MPCORE_PERFCTR_DDEP_STALL
= 0x2,
692 ARMV6MPCORE_PERFCTR_ITLB_MISS
= 0x3,
693 ARMV6MPCORE_PERFCTR_DTLB_MISS
= 0x4,
694 ARMV6MPCORE_PERFCTR_BR_EXEC
= 0x5,
695 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT
= 0x6,
696 ARMV6MPCORE_PERFCTR_BR_MISPREDICT
= 0x7,
697 ARMV6MPCORE_PERFCTR_INSTR_EXEC
= 0x8,
698 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
= 0xA,
699 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
= 0xB,
700 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
= 0xC,
701 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
= 0xD,
702 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION
= 0xE,
703 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE
= 0xF,
704 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS
= 0x10,
705 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS
= 0x11,
706 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL
= 0x12,
707 ARMV6MPCORE_PERFCTR_WBUF_DRAINED
= 0x13,
708 ARMV6MPCORE_PERFCTR_CPU_CYCLES
= 0xFF,
712 * The hardware events that we support. We do support cache operations but
713 * we have harvard caches and no way to combine instruction and data
714 * accesses/misses in hardware.
716 static const unsigned armv6mpcore_perf_map
[PERF_COUNT_HW_MAX
] = {
717 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6MPCORE_PERFCTR_CPU_CYCLES
,
718 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_INSTR_EXEC
,
719 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
720 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
721 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_BR_EXEC
,
722 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT
,
723 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
726 static const unsigned armv6mpcore_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
727 [PERF_COUNT_HW_CACHE_OP_MAX
]
728 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
732 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
,
734 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
,
738 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
,
740 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
,
743 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
744 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
749 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
750 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
753 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
754 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
757 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
758 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
763 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
764 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
767 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
768 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
771 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
772 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
777 * The ARM performance counters can count micro DTLB misses,
778 * micro ITLB misses and main TLB misses. There isn't an event
779 * for TLB misses, so use the micro misses here and if users
780 * want the main TLB misses they can use a raw counter.
783 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
784 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
787 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
788 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
791 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
792 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
797 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
798 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
801 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
802 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
805 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
806 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
811 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
812 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
815 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
816 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
819 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
820 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
825 static inline unsigned long
826 armv6_pmcr_read(void)
829 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val
));
834 armv6_pmcr_write(unsigned long val
)
836 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val
));
839 #define ARMV6_PMCR_ENABLE (1 << 0)
840 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
841 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
842 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
843 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
844 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
845 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
846 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
847 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
848 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
849 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
850 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
851 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
852 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
854 #define ARMV6_PMCR_OVERFLOWED_MASK \
855 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
856 ARMV6_PMCR_CCOUNT_OVERFLOW)
859 armv6_pmcr_has_overflowed(unsigned long pmcr
)
861 return (pmcr
& ARMV6_PMCR_OVERFLOWED_MASK
);
865 armv6_pmcr_counter_has_overflowed(unsigned long pmcr
,
866 enum armv6_counters counter
)
870 if (ARMV6_CYCLE_COUNTER
== counter
)
871 ret
= pmcr
& ARMV6_PMCR_CCOUNT_OVERFLOW
;
872 else if (ARMV6_COUNTER0
== counter
)
873 ret
= pmcr
& ARMV6_PMCR_COUNT0_OVERFLOW
;
874 else if (ARMV6_COUNTER1
== counter
)
875 ret
= pmcr
& ARMV6_PMCR_COUNT1_OVERFLOW
;
877 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
883 armv6pmu_read_counter(int counter
)
885 unsigned long value
= 0;
887 if (ARMV6_CYCLE_COUNTER
== counter
)
888 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value
));
889 else if (ARMV6_COUNTER0
== counter
)
890 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value
));
891 else if (ARMV6_COUNTER1
== counter
)
892 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value
));
894 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
900 armv6pmu_write_counter(int counter
,
903 if (ARMV6_CYCLE_COUNTER
== counter
)
904 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value
));
905 else if (ARMV6_COUNTER0
== counter
)
906 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value
));
907 else if (ARMV6_COUNTER1
== counter
)
908 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value
));
910 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
914 armv6pmu_enable_event(struct hw_perf_event
*hwc
,
917 unsigned long val
, mask
, evt
, flags
;
919 if (ARMV6_CYCLE_COUNTER
== idx
) {
921 evt
= ARMV6_PMCR_CCOUNT_IEN
;
922 } else if (ARMV6_COUNTER0
== idx
) {
923 mask
= ARMV6_PMCR_EVT_COUNT0_MASK
;
924 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
) |
925 ARMV6_PMCR_COUNT0_IEN
;
926 } else if (ARMV6_COUNTER1
== idx
) {
927 mask
= ARMV6_PMCR_EVT_COUNT1_MASK
;
928 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
) |
929 ARMV6_PMCR_COUNT1_IEN
;
931 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
936 * Mask out the current event and set the counter to count the event
937 * that we're interested in.
939 spin_lock_irqsave(&pmu_lock
, flags
);
940 val
= armv6_pmcr_read();
943 armv6_pmcr_write(val
);
944 spin_unlock_irqrestore(&pmu_lock
, flags
);
948 armv6pmu_handle_irq(int irq_num
,
951 unsigned long pmcr
= armv6_pmcr_read();
952 struct perf_sample_data data
;
953 struct cpu_hw_events
*cpuc
;
954 struct pt_regs
*regs
;
957 if (!armv6_pmcr_has_overflowed(pmcr
))
960 regs
= get_irq_regs();
963 * The interrupts are cleared by writing the overflow flags back to
964 * the control register. All of the other bits don't have any effect
965 * if they are rewritten, so write the whole value back.
967 armv6_pmcr_write(pmcr
);
969 perf_sample_data_init(&data
, 0);
971 cpuc
= &__get_cpu_var(cpu_hw_events
);
972 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
973 struct perf_event
*event
= cpuc
->events
[idx
];
974 struct hw_perf_event
*hwc
;
976 if (!test_bit(idx
, cpuc
->active_mask
))
980 * We have a single interrupt for all counters. Check that
981 * each counter has overflowed before we process it.
983 if (!armv6_pmcr_counter_has_overflowed(pmcr
, idx
))
987 armpmu_event_update(event
, hwc
, idx
);
988 data
.period
= event
->hw
.last_period
;
989 if (!armpmu_event_set_period(event
, hwc
, idx
))
992 if (perf_event_overflow(event
, 0, &data
, regs
))
993 armpmu
->disable(hwc
, idx
);
997 * Handle the pending perf events.
999 * Note: this call *must* be run with interrupts enabled. For
1000 * platforms that can have the PMU interrupts raised as a PMI, this
1003 perf_event_do_pending();
1009 armv6pmu_start(void)
1011 unsigned long flags
, val
;
1013 spin_lock_irqsave(&pmu_lock
, flags
);
1014 val
= armv6_pmcr_read();
1015 val
|= ARMV6_PMCR_ENABLE
;
1016 armv6_pmcr_write(val
);
1017 spin_unlock_irqrestore(&pmu_lock
, flags
);
1023 unsigned long flags
, val
;
1025 spin_lock_irqsave(&pmu_lock
, flags
);
1026 val
= armv6_pmcr_read();
1027 val
&= ~ARMV6_PMCR_ENABLE
;
1028 armv6_pmcr_write(val
);
1029 spin_unlock_irqrestore(&pmu_lock
, flags
);
1033 armv6pmu_event_map(int config
)
1035 int mapping
= armv6_perf_map
[config
];
1036 if (HW_OP_UNSUPPORTED
== mapping
)
1037 mapping
= -EOPNOTSUPP
;
1042 armv6mpcore_pmu_event_map(int config
)
1044 int mapping
= armv6mpcore_perf_map
[config
];
1045 if (HW_OP_UNSUPPORTED
== mapping
)
1046 mapping
= -EOPNOTSUPP
;
1051 armv6pmu_raw_event(u64 config
)
1053 return config
& 0xff;
1057 armv6pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
1058 struct hw_perf_event
*event
)
1060 /* Always place a cycle counter into the cycle counter. */
1061 if (ARMV6_PERFCTR_CPU_CYCLES
== event
->config_base
) {
1062 if (test_and_set_bit(ARMV6_CYCLE_COUNTER
, cpuc
->used_mask
))
1065 return ARMV6_CYCLE_COUNTER
;
1068 * For anything other than a cycle counter, try and use
1069 * counter0 and counter1.
1071 if (!test_and_set_bit(ARMV6_COUNTER1
, cpuc
->used_mask
)) {
1072 return ARMV6_COUNTER1
;
1075 if (!test_and_set_bit(ARMV6_COUNTER0
, cpuc
->used_mask
)) {
1076 return ARMV6_COUNTER0
;
1079 /* The counters are all in use. */
1085 armv6pmu_disable_event(struct hw_perf_event
*hwc
,
1088 unsigned long val
, mask
, evt
, flags
;
1090 if (ARMV6_CYCLE_COUNTER
== idx
) {
1091 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1093 } else if (ARMV6_COUNTER0
== idx
) {
1094 mask
= ARMV6_PMCR_COUNT0_IEN
| ARMV6_PMCR_EVT_COUNT0_MASK
;
1095 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
;
1096 } else if (ARMV6_COUNTER1
== idx
) {
1097 mask
= ARMV6_PMCR_COUNT1_IEN
| ARMV6_PMCR_EVT_COUNT1_MASK
;
1098 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
;
1100 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1105 * Mask out the current event and set the counter to count the number
1106 * of ETM bus signal assertion cycles. The external reporting should
1107 * be disabled and so this should never increment.
1109 spin_lock_irqsave(&pmu_lock
, flags
);
1110 val
= armv6_pmcr_read();
1113 armv6_pmcr_write(val
);
1114 spin_unlock_irqrestore(&pmu_lock
, flags
);
1118 armv6mpcore_pmu_disable_event(struct hw_perf_event
*hwc
,
1121 unsigned long val
, mask
, flags
, evt
= 0;
1123 if (ARMV6_CYCLE_COUNTER
== idx
) {
1124 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1125 } else if (ARMV6_COUNTER0
== idx
) {
1126 mask
= ARMV6_PMCR_COUNT0_IEN
;
1127 } else if (ARMV6_COUNTER1
== idx
) {
1128 mask
= ARMV6_PMCR_COUNT1_IEN
;
1130 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1135 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1136 * simply disable the interrupt reporting.
1138 spin_lock_irqsave(&pmu_lock
, flags
);
1139 val
= armv6_pmcr_read();
1142 armv6_pmcr_write(val
);
1143 spin_unlock_irqrestore(&pmu_lock
, flags
);
1146 static const struct arm_pmu armv6pmu
= {
1148 .handle_irq
= armv6pmu_handle_irq
,
1149 .enable
= armv6pmu_enable_event
,
1150 .disable
= armv6pmu_disable_event
,
1151 .event_map
= armv6pmu_event_map
,
1152 .raw_event
= armv6pmu_raw_event
,
1153 .read_counter
= armv6pmu_read_counter
,
1154 .write_counter
= armv6pmu_write_counter
,
1155 .get_event_idx
= armv6pmu_get_event_idx
,
1156 .start
= armv6pmu_start
,
1157 .stop
= armv6pmu_stop
,
1159 .max_period
= (1LLU << 32) - 1,
1163 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1164 * that some of the events have different enumerations and that there is no
1165 * *hack* to stop the programmable counters. To stop the counters we simply
1166 * disable the interrupt reporting and update the event. When unthrottling we
1167 * reset the period and enable the interrupt reporting.
1169 static const struct arm_pmu armv6mpcore_pmu
= {
1171 .handle_irq
= armv6pmu_handle_irq
,
1172 .enable
= armv6pmu_enable_event
,
1173 .disable
= armv6mpcore_pmu_disable_event
,
1174 .event_map
= armv6mpcore_pmu_event_map
,
1175 .raw_event
= armv6pmu_raw_event
,
1176 .read_counter
= armv6pmu_read_counter
,
1177 .write_counter
= armv6pmu_write_counter
,
1178 .get_event_idx
= armv6pmu_get_event_idx
,
1179 .start
= armv6pmu_start
,
1180 .stop
= armv6pmu_stop
,
1182 .max_period
= (1LLU << 32) - 1,
1186 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1188 * Copied from ARMv6 code, with the low level code inspired
1189 * by the ARMv7 Oprofile code.
1191 * Cortex-A8 has up to 4 configurable performance counters and
1192 * a single cycle counter.
1193 * Cortex-A9 has up to 31 configurable performance counters and
1194 * a single cycle counter.
1196 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1197 * counter and all 4 performance counters together can be reset separately.
1200 #define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
1202 #define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
1204 /* Common ARMv7 event types */
1205 enum armv7_perf_types
{
1206 ARMV7_PERFCTR_PMNC_SW_INCR
= 0x00,
1207 ARMV7_PERFCTR_IFETCH_MISS
= 0x01,
1208 ARMV7_PERFCTR_ITLB_MISS
= 0x02,
1209 ARMV7_PERFCTR_DCACHE_REFILL
= 0x03,
1210 ARMV7_PERFCTR_DCACHE_ACCESS
= 0x04,
1211 ARMV7_PERFCTR_DTLB_REFILL
= 0x05,
1212 ARMV7_PERFCTR_DREAD
= 0x06,
1213 ARMV7_PERFCTR_DWRITE
= 0x07,
1215 ARMV7_PERFCTR_EXC_TAKEN
= 0x09,
1216 ARMV7_PERFCTR_EXC_EXECUTED
= 0x0A,
1217 ARMV7_PERFCTR_CID_WRITE
= 0x0B,
1218 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1220 * - all branch instructions,
1221 * - instructions that explicitly write the PC,
1222 * - exception generating instructions.
1224 ARMV7_PERFCTR_PC_WRITE
= 0x0C,
1225 ARMV7_PERFCTR_PC_IMM_BRANCH
= 0x0D,
1226 ARMV7_PERFCTR_UNALIGNED_ACCESS
= 0x0F,
1227 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
1228 ARMV7_PERFCTR_CLOCK_CYCLES
= 0x11,
1230 ARMV7_PERFCTR_PC_BRANCH_MIS_USED
= 0x12,
1232 ARMV7_PERFCTR_CPU_CYCLES
= 0xFF
1235 /* ARMv7 Cortex-A8 specific event types */
1236 enum armv7_a8_perf_types
{
1237 ARMV7_PERFCTR_INSTR_EXECUTED
= 0x08,
1239 ARMV7_PERFCTR_PC_PROC_RETURN
= 0x0E,
1241 ARMV7_PERFCTR_WRITE_BUFFER_FULL
= 0x40,
1242 ARMV7_PERFCTR_L2_STORE_MERGED
= 0x41,
1243 ARMV7_PERFCTR_L2_STORE_BUFF
= 0x42,
1244 ARMV7_PERFCTR_L2_ACCESS
= 0x43,
1245 ARMV7_PERFCTR_L2_CACH_MISS
= 0x44,
1246 ARMV7_PERFCTR_AXI_READ_CYCLES
= 0x45,
1247 ARMV7_PERFCTR_AXI_WRITE_CYCLES
= 0x46,
1248 ARMV7_PERFCTR_MEMORY_REPLAY
= 0x47,
1249 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY
= 0x48,
1250 ARMV7_PERFCTR_L1_DATA_MISS
= 0x49,
1251 ARMV7_PERFCTR_L1_INST_MISS
= 0x4A,
1252 ARMV7_PERFCTR_L1_DATA_COLORING
= 0x4B,
1253 ARMV7_PERFCTR_L1_NEON_DATA
= 0x4C,
1254 ARMV7_PERFCTR_L1_NEON_CACH_DATA
= 0x4D,
1255 ARMV7_PERFCTR_L2_NEON
= 0x4E,
1256 ARMV7_PERFCTR_L2_NEON_HIT
= 0x4F,
1257 ARMV7_PERFCTR_L1_INST
= 0x50,
1258 ARMV7_PERFCTR_PC_RETURN_MIS_PRED
= 0x51,
1259 ARMV7_PERFCTR_PC_BRANCH_FAILED
= 0x52,
1260 ARMV7_PERFCTR_PC_BRANCH_TAKEN
= 0x53,
1261 ARMV7_PERFCTR_PC_BRANCH_EXECUTED
= 0x54,
1262 ARMV7_PERFCTR_OP_EXECUTED
= 0x55,
1263 ARMV7_PERFCTR_CYCLES_INST_STALL
= 0x56,
1264 ARMV7_PERFCTR_CYCLES_INST
= 0x57,
1265 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL
= 0x58,
1266 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL
= 0x59,
1267 ARMV7_PERFCTR_NEON_CYCLES
= 0x5A,
1269 ARMV7_PERFCTR_PMU0_EVENTS
= 0x70,
1270 ARMV7_PERFCTR_PMU1_EVENTS
= 0x71,
1271 ARMV7_PERFCTR_PMU_EVENTS
= 0x72,
1274 /* ARMv7 Cortex-A9 specific event types */
1275 enum armv7_a9_perf_types
{
1276 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC
= 0x40,
1277 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC
= 0x41,
1278 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC
= 0x42,
1280 ARMV7_PERFCTR_COHERENT_LINE_MISS
= 0x50,
1281 ARMV7_PERFCTR_COHERENT_LINE_HIT
= 0x51,
1283 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES
= 0x60,
1284 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES
= 0x61,
1285 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES
= 0x62,
1286 ARMV7_PERFCTR_STREX_EXECUTED_PASSED
= 0x63,
1287 ARMV7_PERFCTR_STREX_EXECUTED_FAILED
= 0x64,
1288 ARMV7_PERFCTR_DATA_EVICTION
= 0x65,
1289 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST
= 0x66,
1290 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY
= 0x67,
1291 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
= 0x68,
1293 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS
= 0x6E,
1295 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST
= 0x70,
1296 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST
= 0x71,
1297 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST
= 0x72,
1298 ARMV7_PERFCTR_FP_EXECUTED_INST
= 0x73,
1299 ARMV7_PERFCTR_NEON_EXECUTED_INST
= 0x74,
1301 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES
= 0x80,
1302 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES
= 0x81,
1303 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES
= 0x82,
1304 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES
= 0x83,
1305 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES
= 0x84,
1306 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES
= 0x85,
1307 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES
= 0x86,
1309 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES
= 0x8A,
1310 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES
= 0x8B,
1312 ARMV7_PERFCTR_ISB_INST
= 0x90,
1313 ARMV7_PERFCTR_DSB_INST
= 0x91,
1314 ARMV7_PERFCTR_DMB_INST
= 0x92,
1315 ARMV7_PERFCTR_EXT_INTERRUPTS
= 0x93,
1317 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED
= 0xA0,
1318 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED
= 0xA1,
1319 ARMV7_PERFCTR_PLE_FIFO_FLUSH
= 0xA2,
1320 ARMV7_PERFCTR_PLE_RQST_COMPLETED
= 0xA3,
1321 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW
= 0xA4,
1322 ARMV7_PERFCTR_PLE_RQST_PROG
= 0xA5
1326 * Cortex-A8 HW events mapping
1328 * The hardware events that we support. We do support cache operations but
1329 * we have harvard caches and no way to combine instruction and data
1330 * accesses/misses in hardware.
1332 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
1333 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1334 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
1335 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
1336 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
1337 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1338 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1339 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1342 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1343 [PERF_COUNT_HW_CACHE_OP_MAX
]
1344 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1347 * The performance counters don't differentiate between read
1348 * and write accesses/misses so this isn't strictly correct,
1349 * but it's the best we can do. Writes and reads get
1353 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1354 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1357 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1358 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1360 [C(OP_PREFETCH
)] = {
1361 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1362 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1367 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1368 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1371 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1372 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1374 [C(OP_PREFETCH
)] = {
1375 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1376 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1381 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1382 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1385 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1386 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1388 [C(OP_PREFETCH
)] = {
1389 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1390 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1395 * Only ITLB misses and DTLB refills are supported.
1396 * If users want the DTLB refills misses a raw counter
1400 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1401 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1404 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1405 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1407 [C(OP_PREFETCH
)] = {
1408 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1409 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1414 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1415 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1418 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1419 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1421 [C(OP_PREFETCH
)] = {
1422 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1423 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1428 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1430 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1433 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1435 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1437 [C(OP_PREFETCH
)] = {
1438 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1439 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1445 * Cortex-A9 HW events mapping
1447 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
1448 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1449 [PERF_COUNT_HW_INSTRUCTIONS
] =
1450 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
,
1451 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_COHERENT_LINE_HIT
,
1452 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_COHERENT_LINE_MISS
,
1453 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1454 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1455 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1458 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1459 [PERF_COUNT_HW_CACHE_OP_MAX
]
1460 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1463 * The performance counters don't differentiate between read
1464 * and write accesses/misses so this isn't strictly correct,
1465 * but it's the best we can do. Writes and reads get
1469 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1470 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1473 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1474 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1476 [C(OP_PREFETCH
)] = {
1477 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1478 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1483 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1484 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1487 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1488 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1490 [C(OP_PREFETCH
)] = {
1491 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1492 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1497 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1498 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1501 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1502 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1504 [C(OP_PREFETCH
)] = {
1505 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1506 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1511 * Only ITLB misses and DTLB refills are supported.
1512 * If users want the DTLB refills misses a raw counter
1516 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1517 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1520 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1521 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1523 [C(OP_PREFETCH
)] = {
1524 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1525 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1530 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1531 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1534 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1535 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1537 [C(OP_PREFETCH
)] = {
1538 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1539 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1544 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1546 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1549 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1551 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1553 [C(OP_PREFETCH
)] = {
1554 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1555 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1561 * Perf Events counters
1563 enum armv7_counters
{
1564 ARMV7_CYCLE_COUNTER
= 1, /* Cycle counter */
1565 ARMV7_COUNTER0
= 2, /* First event counter */
1569 * The cycle counter is ARMV7_CYCLE_COUNTER.
1570 * The first event counter is ARMV7_COUNTER0.
1571 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1573 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1576 * ARMv7 low level PMNC access
1580 * Per-CPU PMNC: config reg
1582 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1583 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1584 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1585 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1586 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1587 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1588 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1589 #define ARMV7_PMNC_N_MASK 0x1f
1590 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1593 * Available counters
1595 #define ARMV7_CNT0 0 /* First event counter */
1596 #define ARMV7_CCNT 31 /* Cycle counter */
1598 /* Perf Event to low level counters mapping */
1599 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1602 * CNTENS: counters enable reg
1604 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1605 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1608 * CNTENC: counters disable reg
1610 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1611 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1614 * INTENS: counters overflow interrupt enable reg
1616 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1617 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1620 * INTENC: counters overflow interrupt disable reg
1622 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1623 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1626 * EVTSEL: Event selection reg
1628 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
1631 * SELECT: Counter selection reg
1633 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1636 * FLAG: counters overflow flag status reg
1638 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1639 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1640 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1641 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1643 static inline unsigned long armv7_pmnc_read(void)
1646 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
1650 static inline void armv7_pmnc_write(unsigned long val
)
1652 val
&= ARMV7_PMNC_MASK
;
1653 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
1656 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc
)
1658 return pmnc
& ARMV7_OVERFLOWED_MASK
;
1661 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc
,
1662 enum armv7_counters counter
)
1666 if (counter
== ARMV7_CYCLE_COUNTER
)
1667 ret
= pmnc
& ARMV7_FLAG_C
;
1668 else if ((counter
>= ARMV7_COUNTER0
) && (counter
<= ARMV7_COUNTER_LAST
))
1669 ret
= pmnc
& ARMV7_FLAG_P(counter
);
1671 pr_err("CPU%u checking wrong counter %d overflow status\n",
1672 smp_processor_id(), counter
);
1677 static inline int armv7_pmnc_select_counter(unsigned int idx
)
1681 if ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
)) {
1682 pr_err("CPU%u selecting wrong PMNC counter"
1683 " %d\n", smp_processor_id(), idx
);
1687 val
= (idx
- ARMV7_EVENT_CNT_TO_CNTx
) & ARMV7_SELECT_MASK
;
1688 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val
));
1693 static inline u32
armv7pmu_read_counter(int idx
)
1695 unsigned long value
= 0;
1697 if (idx
== ARMV7_CYCLE_COUNTER
)
1698 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
1699 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1700 if (armv7_pmnc_select_counter(idx
) == idx
)
1701 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1704 pr_err("CPU%u reading wrong counter %d\n",
1705 smp_processor_id(), idx
);
1710 static inline void armv7pmu_write_counter(int idx
, u32 value
)
1712 if (idx
== ARMV7_CYCLE_COUNTER
)
1713 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
1714 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1715 if (armv7_pmnc_select_counter(idx
) == idx
)
1716 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1719 pr_err("CPU%u writing wrong counter %d\n",
1720 smp_processor_id(), idx
);
1723 static inline void armv7_pmnc_write_evtsel(unsigned int idx
, u32 val
)
1725 if (armv7_pmnc_select_counter(idx
) == idx
) {
1726 val
&= ARMV7_EVTSEL_MASK
;
1727 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
1731 static inline u32
armv7_pmnc_enable_counter(unsigned int idx
)
1735 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1736 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1737 pr_err("CPU%u enabling wrong PMNC counter"
1738 " %d\n", smp_processor_id(), idx
);
1742 if (idx
== ARMV7_CYCLE_COUNTER
)
1743 val
= ARMV7_CNTENS_C
;
1745 val
= ARMV7_CNTENS_P(idx
);
1747 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val
));
1752 static inline u32
armv7_pmnc_disable_counter(unsigned int idx
)
1757 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1758 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1759 pr_err("CPU%u disabling wrong PMNC counter"
1760 " %d\n", smp_processor_id(), idx
);
1764 if (idx
== ARMV7_CYCLE_COUNTER
)
1765 val
= ARMV7_CNTENC_C
;
1767 val
= ARMV7_CNTENC_P(idx
);
1769 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val
));
1774 static inline u32
armv7_pmnc_enable_intens(unsigned int idx
)
1778 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1779 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1780 pr_err("CPU%u enabling wrong PMNC counter"
1781 " interrupt enable %d\n", smp_processor_id(), idx
);
1785 if (idx
== ARMV7_CYCLE_COUNTER
)
1786 val
= ARMV7_INTENS_C
;
1788 val
= ARMV7_INTENS_P(idx
);
1790 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val
));
1795 static inline u32
armv7_pmnc_disable_intens(unsigned int idx
)
1799 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1800 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1801 pr_err("CPU%u disabling wrong PMNC counter"
1802 " interrupt enable %d\n", smp_processor_id(), idx
);
1806 if (idx
== ARMV7_CYCLE_COUNTER
)
1807 val
= ARMV7_INTENC_C
;
1809 val
= ARMV7_INTENC_P(idx
);
1811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val
));
1816 static inline u32
armv7_pmnc_getreset_flags(void)
1821 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1823 /* Write to clear flags */
1824 val
&= ARMV7_FLAG_MASK
;
1825 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
1831 static void armv7_pmnc_dump_regs(void)
1836 printk(KERN_INFO
"PMNC registers dump:\n");
1838 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
1839 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
1841 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
1842 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
1844 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
1845 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
1847 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1848 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
1850 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
1851 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
1853 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
1854 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
1856 for (cnt
= ARMV7_COUNTER0
; cnt
< ARMV7_COUNTER_LAST
; cnt
++) {
1857 armv7_pmnc_select_counter(cnt
);
1858 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
1859 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
1860 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1861 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
1862 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
1863 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1868 void armv7pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1870 unsigned long flags
;
1873 * Enable counter and interrupt, and set the counter to count
1874 * the event that we're interested in.
1876 spin_lock_irqsave(&pmu_lock
, flags
);
1881 armv7_pmnc_disable_counter(idx
);
1884 * Set event (if destined for PMNx counters)
1885 * We don't need to set the event if it's a cycle count
1887 if (idx
!= ARMV7_CYCLE_COUNTER
)
1888 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
1891 * Enable interrupt for this counter
1893 armv7_pmnc_enable_intens(idx
);
1898 armv7_pmnc_enable_counter(idx
);
1900 spin_unlock_irqrestore(&pmu_lock
, flags
);
1903 static void armv7pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1905 unsigned long flags
;
1908 * Disable counter and interrupt
1910 spin_lock_irqsave(&pmu_lock
, flags
);
1915 armv7_pmnc_disable_counter(idx
);
1918 * Disable interrupt for this counter
1920 armv7_pmnc_disable_intens(idx
);
1922 spin_unlock_irqrestore(&pmu_lock
, flags
);
1925 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
1928 struct perf_sample_data data
;
1929 struct cpu_hw_events
*cpuc
;
1930 struct pt_regs
*regs
;
1934 * Get and reset the IRQ flags
1936 pmnc
= armv7_pmnc_getreset_flags();
1939 * Did an overflow occur?
1941 if (!armv7_pmnc_has_overflowed(pmnc
))
1945 * Handle the counter(s) overflow(s)
1947 regs
= get_irq_regs();
1949 perf_sample_data_init(&data
, 0);
1951 cpuc
= &__get_cpu_var(cpu_hw_events
);
1952 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
1953 struct perf_event
*event
= cpuc
->events
[idx
];
1954 struct hw_perf_event
*hwc
;
1956 if (!test_bit(idx
, cpuc
->active_mask
))
1960 * We have a single interrupt for all counters. Check that
1961 * each counter has overflowed before we process it.
1963 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
1967 armpmu_event_update(event
, hwc
, idx
);
1968 data
.period
= event
->hw
.last_period
;
1969 if (!armpmu_event_set_period(event
, hwc
, idx
))
1972 if (perf_event_overflow(event
, 0, &data
, regs
))
1973 armpmu
->disable(hwc
, idx
);
1977 * Handle the pending perf events.
1979 * Note: this call *must* be run with interrupts enabled. For
1980 * platforms that can have the PMU interrupts raised as a PMI, this
1983 perf_event_do_pending();
1988 static void armv7pmu_start(void)
1990 unsigned long flags
;
1992 spin_lock_irqsave(&pmu_lock
, flags
);
1993 /* Enable all counters */
1994 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
1995 spin_unlock_irqrestore(&pmu_lock
, flags
);
1998 static void armv7pmu_stop(void)
2000 unsigned long flags
;
2002 spin_lock_irqsave(&pmu_lock
, flags
);
2003 /* Disable all counters */
2004 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
2005 spin_unlock_irqrestore(&pmu_lock
, flags
);
2008 static inline int armv7_a8_pmu_event_map(int config
)
2010 int mapping
= armv7_a8_perf_map
[config
];
2011 if (HW_OP_UNSUPPORTED
== mapping
)
2012 mapping
= -EOPNOTSUPP
;
2016 static inline int armv7_a9_pmu_event_map(int config
)
2018 int mapping
= armv7_a9_perf_map
[config
];
2019 if (HW_OP_UNSUPPORTED
== mapping
)
2020 mapping
= -EOPNOTSUPP
;
2024 static u64
armv7pmu_raw_event(u64 config
)
2026 return config
& 0xff;
2029 static int armv7pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2030 struct hw_perf_event
*event
)
2034 /* Always place a cycle counter into the cycle counter. */
2035 if (event
->config_base
== ARMV7_PERFCTR_CPU_CYCLES
) {
2036 if (test_and_set_bit(ARMV7_CYCLE_COUNTER
, cpuc
->used_mask
))
2039 return ARMV7_CYCLE_COUNTER
;
2042 * For anything other than a cycle counter, try and use
2043 * the events counters
2045 for (idx
= ARMV7_COUNTER0
; idx
<= armpmu
->num_events
; ++idx
) {
2046 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
2050 /* The counters are all in use. */
2055 static struct arm_pmu armv7pmu
= {
2056 .handle_irq
= armv7pmu_handle_irq
,
2057 .enable
= armv7pmu_enable_event
,
2058 .disable
= armv7pmu_disable_event
,
2059 .raw_event
= armv7pmu_raw_event
,
2060 .read_counter
= armv7pmu_read_counter
,
2061 .write_counter
= armv7pmu_write_counter
,
2062 .get_event_idx
= armv7pmu_get_event_idx
,
2063 .start
= armv7pmu_start
,
2064 .stop
= armv7pmu_stop
,
2065 .max_period
= (1LLU << 32) - 1,
2068 static u32 __init
armv7_reset_read_pmnc(void)
2072 /* Initialize & Reset PMNC: C and P bits */
2073 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
2075 /* Read the nb of CNTx counters supported from PMNC */
2076 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
2078 /* Add the CPU cycles counter and return */
2083 init_hw_perf_events(void)
2085 unsigned long cpuid
= read_cpuid_id();
2086 unsigned long implementor
= (cpuid
& 0xFF000000) >> 24;
2087 unsigned long part_number
= (cpuid
& 0xFFF0);
2089 /* We only support ARM CPUs implemented by ARM at the moment. */
2090 if (0x41 == implementor
) {
2091 switch (part_number
) {
2092 case 0xB360: /* ARM1136 */
2093 case 0xB560: /* ARM1156 */
2094 case 0xB760: /* ARM1176 */
2096 memcpy(armpmu_perf_cache_map
, armv6_perf_cache_map
,
2097 sizeof(armv6_perf_cache_map
));
2098 perf_max_events
= armv6pmu
.num_events
;
2100 case 0xB020: /* ARM11mpcore */
2101 armpmu
= &armv6mpcore_pmu
;
2102 memcpy(armpmu_perf_cache_map
,
2103 armv6mpcore_perf_cache_map
,
2104 sizeof(armv6mpcore_perf_cache_map
));
2105 perf_max_events
= armv6mpcore_pmu
.num_events
;
2107 case 0xC080: /* Cortex-A8 */
2108 armv7pmu
.name
= ARMV7_PMU_CORTEX_A8_NAME
;
2109 memcpy(armpmu_perf_cache_map
, armv7_a8_perf_cache_map
,
2110 sizeof(armv7_a8_perf_cache_map
));
2111 armv7pmu
.event_map
= armv7_a8_pmu_event_map
;
2114 /* Reset PMNC and read the nb of CNTx counters
2116 armv7pmu
.num_events
= armv7_reset_read_pmnc();
2117 perf_max_events
= armv7pmu
.num_events
;
2119 case 0xC090: /* Cortex-A9 */
2120 armv7pmu
.name
= ARMV7_PMU_CORTEX_A9_NAME
;
2121 memcpy(armpmu_perf_cache_map
, armv7_a9_perf_cache_map
,
2122 sizeof(armv7_a9_perf_cache_map
));
2123 armv7pmu
.event_map
= armv7_a9_pmu_event_map
;
2126 /* Reset PMNC and read the nb of CNTx counters
2128 armv7pmu
.num_events
= armv7_reset_read_pmnc();
2129 perf_max_events
= armv7pmu
.num_events
;
2132 pr_info("no hardware support available\n");
2133 perf_max_events
= -1;
2138 pr_info("enabled with %s PMU driver, %d counters available\n",
2139 armpmu
->name
, armpmu
->num_events
);
2143 arch_initcall(init_hw_perf_events
);
2146 * Callchain handling code.
2149 callchain_store(struct perf_callchain_entry
*entry
,
2152 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
2153 entry
->ip
[entry
->nr
++] = ip
;
2157 * The registers we're interested in are at the end of the variable
2158 * length saved register structure. The fp points at the end of this
2159 * structure so the address of this struct is:
2160 * (struct frame_tail *)(xxx->fp)-1
2162 * This code has been adapted from the ARM OProfile support.
2165 struct frame_tail
*fp
;
2168 } __attribute__((packed
));
2171 * Get the return address for a single stackframe and return a pointer to the
2174 static struct frame_tail
*
2175 user_backtrace(struct frame_tail
*tail
,
2176 struct perf_callchain_entry
*entry
)
2178 struct frame_tail buftail
;
2180 /* Also check accessibility of one struct frame_tail beyond */
2181 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
2183 if (__copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
)))
2186 callchain_store(entry
, buftail
.lr
);
2189 * Frame pointers should strictly progress back up the stack
2190 * (towards higher addresses).
2192 if (tail
>= buftail
.fp
)
2195 return buftail
.fp
- 1;
2199 perf_callchain_user(struct pt_regs
*regs
,
2200 struct perf_callchain_entry
*entry
)
2202 struct frame_tail
*tail
;
2204 callchain_store(entry
, PERF_CONTEXT_USER
);
2206 if (!user_mode(regs
))
2207 regs
= task_pt_regs(current
);
2209 tail
= (struct frame_tail
*)regs
->ARM_fp
- 1;
2211 while (tail
&& !((unsigned long)tail
& 0x3))
2212 tail
= user_backtrace(tail
, entry
);
2216 * Gets called by walk_stackframe() for every stackframe. This will be called
2217 * whist unwinding the stackframe and is like a subroutine return so we use
2221 callchain_trace(struct stackframe
*fr
,
2224 struct perf_callchain_entry
*entry
= data
;
2225 callchain_store(entry
, fr
->pc
);
2230 perf_callchain_kernel(struct pt_regs
*regs
,
2231 struct perf_callchain_entry
*entry
)
2233 struct stackframe fr
;
2235 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
2236 fr
.fp
= regs
->ARM_fp
;
2237 fr
.sp
= regs
->ARM_sp
;
2238 fr
.lr
= regs
->ARM_lr
;
2239 fr
.pc
= regs
->ARM_pc
;
2240 walk_stackframe(&fr
, callchain_trace
, entry
);
2244 perf_do_callchain(struct pt_regs
*regs
,
2245 struct perf_callchain_entry
*entry
)
2252 is_user
= user_mode(regs
);
2254 if (!current
|| !current
->pid
)
2257 if (is_user
&& current
->state
!= TASK_RUNNING
)
2261 perf_callchain_kernel(regs
, entry
);
2264 perf_callchain_user(regs
, entry
);
2267 static DEFINE_PER_CPU(struct perf_callchain_entry
, pmc_irq_entry
);
2269 struct perf_callchain_entry
*
2270 perf_callchain(struct pt_regs
*regs
)
2272 struct perf_callchain_entry
*entry
= &__get_cpu_var(pmc_irq_entry
);
2275 perf_do_callchain(regs
, entry
);