]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
43eab878 WD |
2 | /* |
3 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | |
4 | * | |
5 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | |
6 | * 2010 (c) MontaVista Software, LLC. | |
7 | * | |
8 | * Copied from ARMv6 code, with the low level code inspired | |
9 | * by the ARMv7 Oprofile code. | |
10 | * | |
11 | * Cortex-A8 has up to 4 configurable performance counters and | |
12 | * a single cycle counter. | |
13 | * Cortex-A9 has up to 31 configurable performance counters and | |
14 | * a single cycle counter. | |
15 | * | |
16 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | |
17 | * counter and all 4 performance counters together can be reset separately. | |
18 | */ | |
19 | ||
20 | #ifdef CONFIG_CPU_V7 | |
a505addc | 21 | |
b7aafe99 | 22 | #include <asm/cp15.h> |
29ba0f37 MR |
23 | #include <asm/cputype.h> |
24 | #include <asm/irq_regs.h> | |
b7aafe99 SB |
25 | #include <asm/vfp.h> |
26 | #include "../vfp/vfpinstr.h" | |
27 | ||
29ba0f37 | 28 | #include <linux/of.h> |
fa8ad788 | 29 | #include <linux/perf/arm_pmu.h> |
29ba0f37 MR |
30 | #include <linux/platform_device.h> |
31 | ||
6d4eaf99 WD |
32 | /* |
33 | * Common ARMv7 event types | |
34 | * | |
35 | * Note: An implementation may not be able to count all of these events | |
36 | * but the encodings are considered to be `reserved' in the case that | |
37 | * they are not available. | |
38 | */ | |
f4ab36cb DR |
39 | #define ARMV7_PERFCTR_PMNC_SW_INCR 0x00 |
40 | #define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01 | |
41 | #define ARMV7_PERFCTR_ITLB_REFILL 0x02 | |
42 | #define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03 | |
43 | #define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04 | |
44 | #define ARMV7_PERFCTR_DTLB_REFILL 0x05 | |
45 | #define ARMV7_PERFCTR_MEM_READ 0x06 | |
46 | #define ARMV7_PERFCTR_MEM_WRITE 0x07 | |
47 | #define ARMV7_PERFCTR_INSTR_EXECUTED 0x08 | |
48 | #define ARMV7_PERFCTR_EXC_TAKEN 0x09 | |
49 | #define ARMV7_PERFCTR_EXC_EXECUTED 0x0A | |
50 | #define ARMV7_PERFCTR_CID_WRITE 0x0B | |
51 | ||
52 | /* | |
53 | * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | |
54 | * It counts: | |
55 | * - all (taken) branch instructions, | |
56 | * - instructions that explicitly write the PC, | |
57 | * - exception generating instructions. | |
58 | */ | |
59 | #define ARMV7_PERFCTR_PC_WRITE 0x0C | |
60 | #define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D | |
61 | #define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E | |
62 | #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F | |
63 | #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10 | |
64 | #define ARMV7_PERFCTR_CLOCK_CYCLES 0x11 | |
65 | #define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12 | |
66 | ||
67 | /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ | |
68 | #define ARMV7_PERFCTR_MEM_ACCESS 0x13 | |
69 | #define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14 | |
70 | #define ARMV7_PERFCTR_L1_DCACHE_WB 0x15 | |
71 | #define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16 | |
72 | #define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17 | |
73 | #define ARMV7_PERFCTR_L2_CACHE_WB 0x18 | |
74 | #define ARMV7_PERFCTR_BUS_ACCESS 0x19 | |
75 | #define ARMV7_PERFCTR_MEM_ERROR 0x1A | |
76 | #define ARMV7_PERFCTR_INSTR_SPEC 0x1B | |
77 | #define ARMV7_PERFCTR_TTBR_WRITE 0x1C | |
78 | #define ARMV7_PERFCTR_BUS_CYCLES 0x1D | |
79 | ||
80 | #define ARMV7_PERFCTR_CPU_CYCLES 0xFF | |
43eab878 WD |
81 | |
82 | /* ARMv7 Cortex-A8 specific event types */ | |
f4ab36cb DR |
83 | #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43 |
84 | #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44 | |
85 | #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50 | |
86 | #define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56 | |
43eab878 WD |
87 | |
88 | /* ARMv7 Cortex-A9 specific event types */ | |
f4ab36cb DR |
89 | #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68 |
90 | #define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60 | |
91 | #define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66 | |
43eab878 | 92 | |
0c205cbe | 93 | /* ARMv7 Cortex-A5 specific event types */ |
f4ab36cb DR |
94 | #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2 |
95 | #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3 | |
0c205cbe | 96 | |
14abd038 | 97 | /* ARMv7 Cortex-A15 specific event types */ |
f4ab36cb DR |
98 | #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40 |
99 | #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41 | |
100 | #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42 | |
101 | #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43 | |
14abd038 | 102 | |
f4ab36cb DR |
103 | #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C |
104 | #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D | |
14abd038 | 105 | |
f4ab36cb DR |
106 | #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50 |
107 | #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51 | |
108 | #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52 | |
109 | #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53 | |
14abd038 | 110 | |
f4ab36cb | 111 | #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76 |
14abd038 | 112 | |
8e781f65 | 113 | /* ARMv7 Cortex-A12 specific event types */ |
f4ab36cb DR |
114 | #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40 |
115 | #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41 | |
8e781f65 | 116 | |
f4ab36cb DR |
117 | #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50 |
118 | #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51 | |
8e781f65 | 119 | |
f4ab36cb | 120 | #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76 |
8e781f65 | 121 | |
f4ab36cb | 122 | #define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7 |
8e781f65 | 123 | |
b7aafe99 | 124 | /* ARMv7 Krait specific event types */ |
f4ab36cb DR |
125 | #define KRAIT_PMRESR0_GROUP0 0xcc |
126 | #define KRAIT_PMRESR1_GROUP0 0xd0 | |
127 | #define KRAIT_PMRESR2_GROUP0 0xd4 | |
128 | #define KRAIT_VPMRESR0_GROUP0 0xd8 | |
b7aafe99 | 129 | |
f4ab36cb DR |
130 | #define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011 |
131 | #define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010 | |
b7aafe99 | 132 | |
f4ab36cb DR |
133 | #define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222 |
134 | #define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210 | |
b7aafe99 | 135 | |
341e42c4 | 136 | /* ARMv7 Scorpion specific event types */ |
f4ab36cb DR |
137 | #define SCORPION_LPM0_GROUP0 0x4c |
138 | #define SCORPION_LPM1_GROUP0 0x50 | |
139 | #define SCORPION_LPM2_GROUP0 0x54 | |
140 | #define SCORPION_L2LPM_GROUP0 0x58 | |
141 | #define SCORPION_VLPM_GROUP0 0x5c | |
341e42c4 | 142 | |
f4ab36cb DR |
143 | #define SCORPION_ICACHE_ACCESS 0x10053 |
144 | #define SCORPION_ICACHE_MISS 0x10052 | |
341e42c4 | 145 | |
f4ab36cb DR |
146 | #define SCORPION_DTLB_ACCESS 0x12013 |
147 | #define SCORPION_DTLB_MISS 0x12012 | |
341e42c4 | 148 | |
f4ab36cb | 149 | #define SCORPION_ITLB_MISS 0x12021 |
341e42c4 | 150 | |
43eab878 WD |
151 | /* |
152 | * Cortex-A8 HW events mapping | |
153 | * | |
154 | * The hardware events that we support. We do support cache operations but | |
155 | * we have harvard caches and no way to combine instruction and data | |
156 | * accesses/misses in hardware. | |
157 | */ | |
158 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 159 | PERF_MAP_ALL_UNSUPPORTED, |
0445e7a5 WD |
160 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
161 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
162 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
163 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
164 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
165 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
0445e7a5 | 166 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE, |
43eab878 WD |
167 | }; |
168 | ||
169 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
170 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
171 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
6b7658ec MR |
172 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
173 | ||
174 | /* | |
175 | * The performance counters don't differentiate between read and write | |
176 | * accesses/misses so this isn't strictly correct, but it's the best we | |
177 | * can do. Writes and reads get combined. | |
178 | */ | |
179 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
180 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
181 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
182 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
183 | ||
184 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, | |
185 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
186 | ||
187 | [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, | |
188 | [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, | |
189 | [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, | |
190 | [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, | |
191 | ||
192 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
193 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
194 | ||
195 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
196 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
197 | ||
198 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
199 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
200 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
201 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
43eab878 WD |
202 | }; |
203 | ||
204 | /* | |
205 | * Cortex-A9 HW events mapping | |
206 | */ | |
207 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 208 | PERF_MAP_ALL_UNSUPPORTED, |
0445e7a5 WD |
209 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
210 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME, | |
211 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
212 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
213 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
214 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
0445e7a5 WD |
215 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE, |
216 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH, | |
43eab878 WD |
217 | }; |
218 | ||
219 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
220 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
221 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
6b7658ec MR |
222 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
223 | ||
224 | /* | |
225 | * The performance counters don't differentiate between read and write | |
226 | * accesses/misses so this isn't strictly correct, but it's the best we | |
227 | * can do. Writes and reads get combined. | |
228 | */ | |
229 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
230 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
231 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
232 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
233 | ||
234 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
235 | ||
236 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
237 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
238 | ||
239 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
240 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
241 | ||
242 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
243 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
244 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
245 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
43eab878 WD |
246 | }; |
247 | ||
0c205cbe WD |
248 | /* |
249 | * Cortex-A5 HW events mapping | |
250 | */ | |
251 | static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 252 | PERF_MAP_ALL_UNSUPPORTED, |
0445e7a5 WD |
253 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
254 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
255 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
256 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
257 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
258 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
0c205cbe WD |
259 | }; |
260 | ||
261 | static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
262 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
263 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
6b7658ec MR |
264 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
265 | ||
266 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
267 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
268 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
269 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
270 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, | |
271 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, | |
272 | ||
273 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | |
274 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
275 | /* | |
276 | * The prefetch counters don't differentiate between the I side and the | |
277 | * D side. | |
278 | */ | |
279 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, | |
280 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, | |
281 | ||
282 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
283 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
284 | ||
285 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
286 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
287 | ||
288 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
289 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
290 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
291 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
0c205cbe WD |
292 | }; |
293 | ||
14abd038 WD |
294 | /* |
295 | * Cortex-A15 HW events mapping | |
296 | */ | |
297 | static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 298 | PERF_MAP_ALL_UNSUPPORTED, |
0445e7a5 WD |
299 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
300 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
301 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
302 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
303 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC, | |
304 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
305 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, | |
14abd038 WD |
306 | }; |
307 | ||
308 | static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
309 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
310 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
6b7658ec MR |
311 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
312 | ||
313 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ, | |
314 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ, | |
315 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE, | |
316 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE, | |
317 | ||
318 | /* | |
319 | * Not all performance counters differentiate between read and write | |
320 | * accesses/misses so we're not always strictly correct, but it's the | |
321 | * best we can do. Writes and reads get combined in these cases. | |
322 | */ | |
323 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | |
324 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
325 | ||
326 | [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ, | |
327 | [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ, | |
328 | [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE, | |
329 | [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE, | |
330 | ||
331 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ, | |
332 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE, | |
333 | ||
334 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
335 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
336 | ||
337 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
338 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
339 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
340 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
14abd038 WD |
341 | }; |
342 | ||
d33c88c6 WD |
343 | /* |
344 | * Cortex-A7 HW events mapping | |
345 | */ | |
346 | static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 347 | PERF_MAP_ALL_UNSUPPORTED, |
d33c88c6 WD |
348 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
349 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
350 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
351 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
352 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
353 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
354 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, | |
d33c88c6 WD |
355 | }; |
356 | ||
357 | static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
358 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
359 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
6b7658ec MR |
360 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
361 | ||
362 | /* | |
363 | * The performance counters don't differentiate between read and write | |
364 | * accesses/misses so this isn't strictly correct, but it's the best we | |
365 | * can do. Writes and reads get combined. | |
366 | */ | |
367 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
368 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
369 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
370 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
371 | ||
372 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | |
373 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
374 | ||
375 | [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, | |
376 | [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | |
377 | [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, | |
378 | [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | |
379 | ||
380 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
381 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
382 | ||
383 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
384 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
385 | ||
386 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
387 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
388 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
389 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
d33c88c6 WD |
390 | }; |
391 | ||
8e781f65 AT |
392 | /* |
393 | * Cortex-A12 HW events mapping | |
394 | */ | |
395 | static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 396 | PERF_MAP_ALL_UNSUPPORTED, |
8e781f65 AT |
397 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
398 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
399 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
400 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
401 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC, | |
402 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
403 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, | |
8e781f65 AT |
404 | }; |
405 | ||
406 | static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
407 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
408 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
6b7658ec MR |
409 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
410 | ||
411 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ, | |
412 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
413 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE, | |
414 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
415 | ||
416 | /* | |
417 | * Not all performance counters differentiate between read and write | |
418 | * accesses/misses so we're not always strictly correct, but it's the | |
419 | * best we can do. Writes and reads get combined in these cases. | |
420 | */ | |
421 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | |
422 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
423 | ||
424 | [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ, | |
425 | [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | |
426 | [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE, | |
427 | [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | |
428 | ||
429 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
430 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
431 | [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL, | |
432 | ||
433 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
434 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
435 | ||
436 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
437 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
438 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
439 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
8e781f65 AT |
440 | }; |
441 | ||
2a3391cd SB |
442 | /* |
443 | * Krait HW events mapping | |
444 | */ | |
445 | static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 446 | PERF_MAP_ALL_UNSUPPORTED, |
2a3391cd SB |
447 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
448 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
2a3391cd SB |
449 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, |
450 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
451 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | |
452 | }; | |
453 | ||
454 | static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = { | |
6b7658ec | 455 | PERF_MAP_ALL_UNSUPPORTED, |
2a3391cd SB |
456 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
457 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
2a3391cd SB |
458 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, |
459 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | |
460 | }; | |
461 | ||
462 | static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
463 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
464 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
6b7658ec MR |
465 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
466 | ||
467 | /* | |
468 | * The performance counters don't differentiate between read and write | |
469 | * accesses/misses so this isn't strictly correct, but it's the best we | |
470 | * can do. Writes and reads get combined. | |
471 | */ | |
472 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
473 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
474 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
475 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
476 | ||
477 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS, | |
478 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS, | |
479 | ||
480 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS, | |
481 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS, | |
482 | ||
483 | [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS, | |
484 | [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS, | |
485 | ||
486 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
487 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
488 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
489 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
2a3391cd SB |
490 | }; |
491 | ||
341e42c4 SB |
492 | /* |
493 | * Scorpion HW events mapping | |
494 | */ | |
495 | static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = { | |
496 | PERF_MAP_ALL_UNSUPPORTED, | |
497 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | |
498 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
499 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
500 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
501 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | |
502 | }; | |
503 | ||
504 | static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
505 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
506 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
507 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
508 | /* | |
509 | * The performance counters don't differentiate between read and write | |
510 | * accesses/misses so this isn't strictly correct, but it's the best we | |
511 | * can do. Writes and reads get combined. | |
512 | */ | |
513 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
514 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
515 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
516 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
517 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, | |
518 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS, | |
519 | /* | |
520 | * Only ITLB misses and DTLB refills are supported. If users want the | |
521 | * DTLB refills misses a raw counter must be used. | |
522 | */ | |
523 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, | |
524 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, | |
525 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, | |
526 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, | |
527 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, | |
528 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, | |
529 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
530 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
531 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
532 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
533 | }; | |
534 | ||
abff083c WD |
535 | PMU_FORMAT_ATTR(event, "config:0-7"); |
536 | ||
537 | static struct attribute *armv7_pmu_format_attrs[] = { | |
538 | &format_attr_event.attr, | |
539 | NULL, | |
540 | }; | |
541 | ||
542 | static struct attribute_group armv7_pmu_format_attr_group = { | |
543 | .name = "format", | |
544 | .attrs = armv7_pmu_format_attrs, | |
545 | }; | |
546 | ||
3fbac6cc DR |
547 | #define ARMV7_EVENT_ATTR_RESOLVE(m) #m |
548 | #define ARMV7_EVENT_ATTR(name, config) \ | |
549 | PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \ | |
550 | "event=" ARMV7_EVENT_ATTR_RESOLVE(config)) | |
551 | ||
552 | ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR); | |
553 | ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL); | |
554 | ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL); | |
555 | ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL); | |
556 | ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS); | |
557 | ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL); | |
558 | ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ); | |
559 | ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE); | |
560 | ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED); | |
561 | ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN); | |
562 | ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED); | |
563 | ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE); | |
564 | ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE); | |
565 | ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH); | |
566 | ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN); | |
567 | ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS); | |
568 | ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED); | |
569 | ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES); | |
570 | ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED); | |
571 | ||
572 | static struct attribute *armv7_pmuv1_event_attrs[] = { | |
573 | &armv7_event_attr_sw_incr.attr.attr, | |
574 | &armv7_event_attr_l1i_cache_refill.attr.attr, | |
575 | &armv7_event_attr_l1i_tlb_refill.attr.attr, | |
576 | &armv7_event_attr_l1d_cache_refill.attr.attr, | |
577 | &armv7_event_attr_l1d_cache.attr.attr, | |
578 | &armv7_event_attr_l1d_tlb_refill.attr.attr, | |
579 | &armv7_event_attr_ld_retired.attr.attr, | |
580 | &armv7_event_attr_st_retired.attr.attr, | |
581 | &armv7_event_attr_inst_retired.attr.attr, | |
582 | &armv7_event_attr_exc_taken.attr.attr, | |
583 | &armv7_event_attr_exc_return.attr.attr, | |
584 | &armv7_event_attr_cid_write_retired.attr.attr, | |
585 | &armv7_event_attr_pc_write_retired.attr.attr, | |
586 | &armv7_event_attr_br_immed_retired.attr.attr, | |
587 | &armv7_event_attr_br_return_retired.attr.attr, | |
588 | &armv7_event_attr_unaligned_ldst_retired.attr.attr, | |
589 | &armv7_event_attr_br_mis_pred.attr.attr, | |
590 | &armv7_event_attr_cpu_cycles.attr.attr, | |
591 | &armv7_event_attr_br_pred.attr.attr, | |
abff083c | 592 | NULL, |
3fbac6cc DR |
593 | }; |
594 | ||
595 | static struct attribute_group armv7_pmuv1_events_attr_group = { | |
596 | .name = "events", | |
597 | .attrs = armv7_pmuv1_event_attrs, | |
598 | }; | |
599 | ||
3fbac6cc DR |
600 | ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS); |
601 | ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS); | |
602 | ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB); | |
603 | ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS); | |
604 | ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL); | |
605 | ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB); | |
606 | ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS); | |
607 | ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR); | |
608 | ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC); | |
609 | ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE); | |
610 | ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES); | |
611 | ||
612 | static struct attribute *armv7_pmuv2_event_attrs[] = { | |
613 | &armv7_event_attr_sw_incr.attr.attr, | |
614 | &armv7_event_attr_l1i_cache_refill.attr.attr, | |
615 | &armv7_event_attr_l1i_tlb_refill.attr.attr, | |
616 | &armv7_event_attr_l1d_cache_refill.attr.attr, | |
617 | &armv7_event_attr_l1d_cache.attr.attr, | |
618 | &armv7_event_attr_l1d_tlb_refill.attr.attr, | |
619 | &armv7_event_attr_ld_retired.attr.attr, | |
620 | &armv7_event_attr_st_retired.attr.attr, | |
621 | &armv7_event_attr_inst_retired.attr.attr, | |
622 | &armv7_event_attr_exc_taken.attr.attr, | |
623 | &armv7_event_attr_exc_return.attr.attr, | |
624 | &armv7_event_attr_cid_write_retired.attr.attr, | |
625 | &armv7_event_attr_pc_write_retired.attr.attr, | |
626 | &armv7_event_attr_br_immed_retired.attr.attr, | |
627 | &armv7_event_attr_br_return_retired.attr.attr, | |
628 | &armv7_event_attr_unaligned_ldst_retired.attr.attr, | |
629 | &armv7_event_attr_br_mis_pred.attr.attr, | |
630 | &armv7_event_attr_cpu_cycles.attr.attr, | |
631 | &armv7_event_attr_br_pred.attr.attr, | |
632 | &armv7_event_attr_mem_access.attr.attr, | |
633 | &armv7_event_attr_l1i_cache.attr.attr, | |
634 | &armv7_event_attr_l1d_cache_wb.attr.attr, | |
635 | &armv7_event_attr_l2d_cache.attr.attr, | |
636 | &armv7_event_attr_l2d_cache_refill.attr.attr, | |
637 | &armv7_event_attr_l2d_cache_wb.attr.attr, | |
638 | &armv7_event_attr_bus_access.attr.attr, | |
639 | &armv7_event_attr_memory_error.attr.attr, | |
640 | &armv7_event_attr_inst_spec.attr.attr, | |
641 | &armv7_event_attr_ttbr_write_retired.attr.attr, | |
642 | &armv7_event_attr_bus_cycles.attr.attr, | |
abff083c | 643 | NULL, |
3fbac6cc DR |
644 | }; |
645 | ||
646 | static struct attribute_group armv7_pmuv2_events_attr_group = { | |
647 | .name = "events", | |
648 | .attrs = armv7_pmuv2_event_attrs, | |
649 | }; | |
650 | ||
43eab878 | 651 | /* |
c691bb62 | 652 | * Perf Events' indices |
43eab878 | 653 | */ |
c691bb62 WD |
654 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
655 | #define ARMV7_IDX_COUNTER0 1 | |
7279adbd SH |
656 | #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ |
657 | (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | |
c691bb62 WD |
658 | |
659 | #define ARMV7_MAX_COUNTERS 32 | |
660 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) | |
43eab878 WD |
661 | |
662 | /* | |
c691bb62 | 663 | * ARMv7 low level PMNC access |
43eab878 | 664 | */ |
43eab878 WD |
665 | |
666 | /* | |
c691bb62 | 667 | * Perf Event to low level counters mapping |
43eab878 | 668 | */ |
c691bb62 WD |
669 | #define ARMV7_IDX_TO_COUNTER(x) \ |
670 | (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) | |
43eab878 WD |
671 | |
672 | /* | |
673 | * Per-CPU PMNC: config reg | |
674 | */ | |
675 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | |
676 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | |
677 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | |
678 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | |
679 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | |
680 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | |
681 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | |
682 | #define ARMV7_PMNC_N_MASK 0x1f | |
683 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | |
684 | ||
685 | /* | |
43eab878 | 686 | * FLAG: counters overflow flag status reg |
43eab878 | 687 | */ |
43eab878 WD |
688 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ |
689 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | |
43eab878 WD |
690 | |
691 | /* | |
a505addc | 692 | * PMXEVTYPER: Event selection reg |
43eab878 | 693 | */ |
f2fe09b0 | 694 | #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ |
a505addc | 695 | #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ |
43eab878 WD |
696 | |
697 | /* | |
a505addc | 698 | * Event filters for PMUv2 |
43eab878 | 699 | */ |
a505addc WD |
700 | #define ARMV7_EXCLUDE_PL1 (1 << 31) |
701 | #define ARMV7_EXCLUDE_USER (1 << 30) | |
702 | #define ARMV7_INCLUDE_HYP (1 << 27) | |
43eab878 | 703 | |
8d1a0ae7 MF |
704 | /* |
705 | * Secure debug enable reg | |
706 | */ | |
707 | #define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */ | |
708 | ||
6330aae7 | 709 | static inline u32 armv7_pmnc_read(void) |
43eab878 WD |
710 | { |
711 | u32 val; | |
712 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | |
713 | return val; | |
714 | } | |
715 | ||
6330aae7 | 716 | static inline void armv7_pmnc_write(u32 val) |
43eab878 WD |
717 | { |
718 | val &= ARMV7_PMNC_MASK; | |
d25d3b4c | 719 | isb(); |
43eab878 WD |
720 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); |
721 | } | |
722 | ||
6330aae7 | 723 | static inline int armv7_pmnc_has_overflowed(u32 pmnc) |
43eab878 WD |
724 | { |
725 | return pmnc & ARMV7_OVERFLOWED_MASK; | |
726 | } | |
727 | ||
7279adbd | 728 | static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) |
c691bb62 | 729 | { |
7279adbd SH |
730 | return idx >= ARMV7_IDX_CYCLE_COUNTER && |
731 | idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); | |
c691bb62 WD |
732 | } |
733 | ||
734 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) | |
43eab878 | 735 | { |
7279adbd | 736 | return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); |
43eab878 WD |
737 | } |
738 | ||
cb6eb108 | 739 | static inline void armv7_pmnc_select_counter(int idx) |
43eab878 | 740 | { |
7279adbd | 741 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 742 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); |
d25d3b4c | 743 | isb(); |
43eab878 WD |
744 | } |
745 | ||
ed6f2a52 | 746 | static inline u32 armv7pmu_read_counter(struct perf_event *event) |
43eab878 | 747 | { |
7279adbd | 748 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
ed6f2a52 SH |
749 | struct hw_perf_event *hwc = &event->hw; |
750 | int idx = hwc->idx; | |
6330aae7 | 751 | u32 value = 0; |
43eab878 | 752 | |
cb6eb108 | 753 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { |
43eab878 WD |
754 | pr_err("CPU%u reading wrong counter %d\n", |
755 | smp_processor_id(), idx); | |
cb6eb108 | 756 | } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { |
c691bb62 | 757 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); |
cb6eb108 | 758 | } else { |
759 | armv7_pmnc_select_counter(idx); | |
c691bb62 | 760 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); |
cb6eb108 | 761 | } |
43eab878 WD |
762 | |
763 | return value; | |
764 | } | |
765 | ||
ed6f2a52 | 766 | static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) |
43eab878 | 767 | { |
7279adbd | 768 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
ed6f2a52 SH |
769 | struct hw_perf_event *hwc = &event->hw; |
770 | int idx = hwc->idx; | |
771 | ||
cb6eb108 | 772 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { |
43eab878 WD |
773 | pr_err("CPU%u writing wrong counter %d\n", |
774 | smp_processor_id(), idx); | |
cb6eb108 | 775 | } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { |
c691bb62 | 776 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); |
cb6eb108 | 777 | } else { |
778 | armv7_pmnc_select_counter(idx); | |
c691bb62 | 779 | asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); |
cb6eb108 | 780 | } |
43eab878 WD |
781 | } |
782 | ||
25e29c7c | 783 | static inline void armv7_pmnc_write_evtsel(int idx, u32 val) |
43eab878 | 784 | { |
cb6eb108 | 785 | armv7_pmnc_select_counter(idx); |
786 | val &= ARMV7_EVTYPE_MASK; | |
787 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | |
43eab878 WD |
788 | } |
789 | ||
cb6eb108 | 790 | static inline void armv7_pmnc_enable_counter(int idx) |
43eab878 | 791 | { |
7279adbd | 792 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 793 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); |
43eab878 WD |
794 | } |
795 | ||
cb6eb108 | 796 | static inline void armv7_pmnc_disable_counter(int idx) |
43eab878 | 797 | { |
7279adbd | 798 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 799 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); |
43eab878 WD |
800 | } |
801 | ||
cb6eb108 | 802 | static inline void armv7_pmnc_enable_intens(int idx) |
43eab878 | 803 | { |
7279adbd | 804 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 805 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); |
43eab878 WD |
806 | } |
807 | ||
cb6eb108 | 808 | static inline void armv7_pmnc_disable_intens(int idx) |
43eab878 | 809 | { |
7279adbd | 810 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 811 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); |
99c1745b WD |
812 | isb(); |
813 | /* Clear the overflow flag in case an interrupt is pending. */ | |
814 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); | |
815 | isb(); | |
43eab878 WD |
816 | } |
817 | ||
818 | static inline u32 armv7_pmnc_getreset_flags(void) | |
819 | { | |
820 | u32 val; | |
821 | ||
822 | /* Read */ | |
823 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | |
824 | ||
825 | /* Write to clear flags */ | |
826 | val &= ARMV7_FLAG_MASK; | |
827 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | |
828 | ||
829 | return val; | |
830 | } | |
831 | ||
832 | #ifdef DEBUG | |
7279adbd | 833 | static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) |
43eab878 WD |
834 | { |
835 | u32 val; | |
836 | unsigned int cnt; | |
837 | ||
52a5566e | 838 | pr_info("PMNC registers dump:\n"); |
43eab878 WD |
839 | |
840 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | |
52a5566e | 841 | pr_info("PMNC =0x%08x\n", val); |
43eab878 WD |
842 | |
843 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | |
52a5566e | 844 | pr_info("CNTENS=0x%08x\n", val); |
43eab878 WD |
845 | |
846 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | |
52a5566e | 847 | pr_info("INTENS=0x%08x\n", val); |
43eab878 WD |
848 | |
849 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | |
52a5566e | 850 | pr_info("FLAGS =0x%08x\n", val); |
43eab878 WD |
851 | |
852 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | |
52a5566e | 853 | pr_info("SELECT=0x%08x\n", val); |
43eab878 WD |
854 | |
855 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | |
52a5566e | 856 | pr_info("CCNT =0x%08x\n", val); |
43eab878 | 857 | |
7279adbd SH |
858 | for (cnt = ARMV7_IDX_COUNTER0; |
859 | cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { | |
43eab878 WD |
860 | armv7_pmnc_select_counter(cnt); |
861 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | |
52a5566e | 862 | pr_info("CNT[%d] count =0x%08x\n", |
c691bb62 | 863 | ARMV7_IDX_TO_COUNTER(cnt), val); |
43eab878 | 864 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); |
52a5566e | 865 | pr_info("CNT[%d] evtsel=0x%08x\n", |
c691bb62 | 866 | ARMV7_IDX_TO_COUNTER(cnt), val); |
43eab878 WD |
867 | } |
868 | } | |
869 | #endif | |
870 | ||
ed6f2a52 | 871 | static void armv7pmu_enable_event(struct perf_event *event) |
43eab878 WD |
872 | { |
873 | unsigned long flags; | |
ed6f2a52 SH |
874 | struct hw_perf_event *hwc = &event->hw; |
875 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
11679250 | 876 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
ed6f2a52 | 877 | int idx = hwc->idx; |
43eab878 | 878 | |
7279adbd SH |
879 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { |
880 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | |
881 | smp_processor_id(), idx); | |
882 | return; | |
883 | } | |
884 | ||
43eab878 WD |
885 | /* |
886 | * Enable counter and interrupt, and set the counter to count | |
887 | * the event that we're interested in. | |
888 | */ | |
0f78d2d5 | 889 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
890 | |
891 | /* | |
892 | * Disable counter | |
893 | */ | |
894 | armv7_pmnc_disable_counter(idx); | |
895 | ||
896 | /* | |
897 | * Set event (if destined for PMNx counters) | |
a505addc WD |
898 | * We only need to set the event for the cycle counter if we |
899 | * have the ability to perform event filtering. | |
43eab878 | 900 | */ |
513c99ce | 901 | if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) |
43eab878 WD |
902 | armv7_pmnc_write_evtsel(idx, hwc->config_base); |
903 | ||
904 | /* | |
905 | * Enable interrupt for this counter | |
906 | */ | |
907 | armv7_pmnc_enable_intens(idx); | |
908 | ||
909 | /* | |
910 | * Enable counter | |
911 | */ | |
912 | armv7_pmnc_enable_counter(idx); | |
913 | ||
0f78d2d5 | 914 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
915 | } |
916 | ||
ed6f2a52 | 917 | static void armv7pmu_disable_event(struct perf_event *event) |
43eab878 WD |
918 | { |
919 | unsigned long flags; | |
ed6f2a52 SH |
920 | struct hw_perf_event *hwc = &event->hw; |
921 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
11679250 | 922 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
ed6f2a52 | 923 | int idx = hwc->idx; |
43eab878 | 924 | |
7279adbd SH |
925 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { |
926 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | |
927 | smp_processor_id(), idx); | |
928 | return; | |
929 | } | |
930 | ||
43eab878 WD |
931 | /* |
932 | * Disable counter and interrupt | |
933 | */ | |
0f78d2d5 | 934 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
935 | |
936 | /* | |
937 | * Disable counter | |
938 | */ | |
939 | armv7_pmnc_disable_counter(idx); | |
940 | ||
941 | /* | |
942 | * Disable interrupt for this counter | |
943 | */ | |
944 | armv7_pmnc_disable_intens(idx); | |
945 | ||
0f78d2d5 | 946 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
947 | } |
948 | ||
949 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |
950 | { | |
6330aae7 | 951 | u32 pmnc; |
43eab878 | 952 | struct perf_sample_data data; |
ed6f2a52 | 953 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
11679250 | 954 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); |
43eab878 WD |
955 | struct pt_regs *regs; |
956 | int idx; | |
957 | ||
958 | /* | |
959 | * Get and reset the IRQ flags | |
960 | */ | |
961 | pmnc = armv7_pmnc_getreset_flags(); | |
962 | ||
963 | /* | |
964 | * Did an overflow occur? | |
965 | */ | |
966 | if (!armv7_pmnc_has_overflowed(pmnc)) | |
967 | return IRQ_NONE; | |
968 | ||
969 | /* | |
970 | * Handle the counter(s) overflow(s) | |
971 | */ | |
972 | regs = get_irq_regs(); | |
973 | ||
8be3f9a2 | 974 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
43eab878 WD |
975 | struct perf_event *event = cpuc->events[idx]; |
976 | struct hw_perf_event *hwc; | |
977 | ||
f6f5a30c WD |
978 | /* Ignore if we don't have an event. */ |
979 | if (!event) | |
980 | continue; | |
981 | ||
43eab878 WD |
982 | /* |
983 | * We have a single interrupt for all counters. Check that | |
984 | * each counter has overflowed before we process it. | |
985 | */ | |
986 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | |
987 | continue; | |
988 | ||
989 | hwc = &event->hw; | |
ed6f2a52 | 990 | armpmu_event_update(event); |
fd0d000b | 991 | perf_sample_data_init(&data, 0, hwc->last_period); |
ed6f2a52 | 992 | if (!armpmu_event_set_period(event)) |
43eab878 WD |
993 | continue; |
994 | ||
a8b0ca17 | 995 | if (perf_event_overflow(event, &data, regs)) |
ed6f2a52 | 996 | cpu_pmu->disable(event); |
43eab878 WD |
997 | } |
998 | ||
999 | /* | |
1000 | * Handle the pending perf events. | |
1001 | * | |
1002 | * Note: this call *must* be run with interrupts disabled. For | |
1003 | * platforms that can have the PMU interrupts raised as an NMI, this | |
1004 | * will not work. | |
1005 | */ | |
1006 | irq_work_run(); | |
1007 | ||
1008 | return IRQ_HANDLED; | |
1009 | } | |
1010 | ||
ed6f2a52 | 1011 | static void armv7pmu_start(struct arm_pmu *cpu_pmu) |
43eab878 WD |
1012 | { |
1013 | unsigned long flags; | |
11679250 | 1014 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
43eab878 | 1015 | |
0f78d2d5 | 1016 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
1017 | /* Enable all counters */ |
1018 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | |
0f78d2d5 | 1019 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
1020 | } |
1021 | ||
ed6f2a52 | 1022 | static void armv7pmu_stop(struct arm_pmu *cpu_pmu) |
43eab878 WD |
1023 | { |
1024 | unsigned long flags; | |
11679250 | 1025 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
43eab878 | 1026 | |
0f78d2d5 | 1027 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
1028 | /* Disable all counters */ |
1029 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | |
0f78d2d5 | 1030 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
1031 | } |
1032 | ||
8be3f9a2 | 1033 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, |
ed6f2a52 | 1034 | struct perf_event *event) |
43eab878 WD |
1035 | { |
1036 | int idx; | |
ed6f2a52 SH |
1037 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
1038 | struct hw_perf_event *hwc = &event->hw; | |
1039 | unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT; | |
43eab878 WD |
1040 | |
1041 | /* Always place a cycle counter into the cycle counter. */ | |
a505addc | 1042 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { |
c691bb62 | 1043 | if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
43eab878 WD |
1044 | return -EAGAIN; |
1045 | ||
c691bb62 WD |
1046 | return ARMV7_IDX_CYCLE_COUNTER; |
1047 | } | |
43eab878 | 1048 | |
c691bb62 WD |
1049 | /* |
1050 | * For anything other than a cycle counter, try and use | |
1051 | * the events counters | |
1052 | */ | |
8be3f9a2 | 1053 | for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { |
c691bb62 WD |
1054 | if (!test_and_set_bit(idx, cpuc->used_mask)) |
1055 | return idx; | |
43eab878 | 1056 | } |
c691bb62 WD |
1057 | |
1058 | /* The counters are all in use. */ | |
1059 | return -EAGAIN; | |
43eab878 WD |
1060 | } |
1061 | ||
a505addc WD |
1062 | /* |
1063 | * Add an event filter to a given event. This will only work for PMUv2 PMUs. | |
1064 | */ | |
1065 | static int armv7pmu_set_event_filter(struct hw_perf_event *event, | |
1066 | struct perf_event_attr *attr) | |
1067 | { | |
1068 | unsigned long config_base = 0; | |
1069 | ||
1070 | if (attr->exclude_idle) | |
1071 | return -EPERM; | |
1072 | if (attr->exclude_user) | |
1073 | config_base |= ARMV7_EXCLUDE_USER; | |
1074 | if (attr->exclude_kernel) | |
1075 | config_base |= ARMV7_EXCLUDE_PL1; | |
1076 | if (!attr->exclude_hv) | |
1077 | config_base |= ARMV7_INCLUDE_HYP; | |
1078 | ||
1079 | /* | |
1080 | * Install the filter into config_base as this is used to | |
1081 | * construct the event type. | |
1082 | */ | |
1083 | event->config_base = config_base; | |
1084 | ||
1085 | return 0; | |
43eab878 WD |
1086 | } |
1087 | ||
574b69cb WD |
1088 | static void armv7pmu_reset(void *info) |
1089 | { | |
ed6f2a52 | 1090 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; |
8d1a0ae7 MF |
1091 | u32 idx, nb_cnt = cpu_pmu->num_events, val; |
1092 | ||
1093 | if (cpu_pmu->secure_access) { | |
1094 | asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val)); | |
1095 | val |= ARMV7_SDER_SUNIDEN; | |
1096 | asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val)); | |
1097 | } | |
574b69cb WD |
1098 | |
1099 | /* The counter and interrupt enable registers are unknown at reset. */ | |
ed6f2a52 SH |
1100 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { |
1101 | armv7_pmnc_disable_counter(idx); | |
1102 | armv7_pmnc_disable_intens(idx); | |
1103 | } | |
574b69cb WD |
1104 | |
1105 | /* Initialize & Reset PMNC: C and P bits */ | |
1106 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | |
1107 | } | |
1108 | ||
e1f431b5 MR |
1109 | static int armv7_a8_map_event(struct perf_event *event) |
1110 | { | |
6dbc0029 | 1111 | return armpmu_map_event(event, &armv7_a8_perf_map, |
e1f431b5 MR |
1112 | &armv7_a8_perf_cache_map, 0xFF); |
1113 | } | |
1114 | ||
1115 | static int armv7_a9_map_event(struct perf_event *event) | |
1116 | { | |
6dbc0029 | 1117 | return armpmu_map_event(event, &armv7_a9_perf_map, |
e1f431b5 MR |
1118 | &armv7_a9_perf_cache_map, 0xFF); |
1119 | } | |
1120 | ||
1121 | static int armv7_a5_map_event(struct perf_event *event) | |
1122 | { | |
6dbc0029 | 1123 | return armpmu_map_event(event, &armv7_a5_perf_map, |
e1f431b5 MR |
1124 | &armv7_a5_perf_cache_map, 0xFF); |
1125 | } | |
1126 | ||
1127 | static int armv7_a15_map_event(struct perf_event *event) | |
1128 | { | |
6dbc0029 | 1129 | return armpmu_map_event(event, &armv7_a15_perf_map, |
e1f431b5 MR |
1130 | &armv7_a15_perf_cache_map, 0xFF); |
1131 | } | |
1132 | ||
d33c88c6 WD |
1133 | static int armv7_a7_map_event(struct perf_event *event) |
1134 | { | |
6dbc0029 | 1135 | return armpmu_map_event(event, &armv7_a7_perf_map, |
d33c88c6 WD |
1136 | &armv7_a7_perf_cache_map, 0xFF); |
1137 | } | |
1138 | ||
8e781f65 AT |
1139 | static int armv7_a12_map_event(struct perf_event *event) |
1140 | { | |
1141 | return armpmu_map_event(event, &armv7_a12_perf_map, | |
1142 | &armv7_a12_perf_cache_map, 0xFF); | |
1143 | } | |
1144 | ||
2a3391cd SB |
1145 | static int krait_map_event(struct perf_event *event) |
1146 | { | |
1147 | return armpmu_map_event(event, &krait_perf_map, | |
1148 | &krait_perf_cache_map, 0xFFFFF); | |
1149 | } | |
1150 | ||
1151 | static int krait_map_event_no_branch(struct perf_event *event) | |
1152 | { | |
1153 | return armpmu_map_event(event, &krait_perf_map_no_branch, | |
1154 | &krait_perf_cache_map, 0xFFFFF); | |
1155 | } | |
1156 | ||
341e42c4 SB |
1157 | static int scorpion_map_event(struct perf_event *event) |
1158 | { | |
1159 | return armpmu_map_event(event, &scorpion_perf_map, | |
1160 | &scorpion_perf_cache_map, 0xFFFFF); | |
1161 | } | |
1162 | ||
513c99ce SH |
1163 | static void armv7pmu_init(struct arm_pmu *cpu_pmu) |
1164 | { | |
1165 | cpu_pmu->handle_irq = armv7pmu_handle_irq; | |
1166 | cpu_pmu->enable = armv7pmu_enable_event; | |
1167 | cpu_pmu->disable = armv7pmu_disable_event; | |
1168 | cpu_pmu->read_counter = armv7pmu_read_counter; | |
1169 | cpu_pmu->write_counter = armv7pmu_write_counter; | |
1170 | cpu_pmu->get_event_idx = armv7pmu_get_event_idx; | |
1171 | cpu_pmu->start = armv7pmu_start; | |
1172 | cpu_pmu->stop = armv7pmu_stop; | |
1173 | cpu_pmu->reset = armv7pmu_reset; | |
1174 | cpu_pmu->max_period = (1LLU << 32) - 1; | |
43eab878 WD |
1175 | }; |
1176 | ||
0e3038d1 | 1177 | static void armv7_read_num_pmnc_events(void *info) |
43eab878 | 1178 | { |
0e3038d1 | 1179 | int *nb_cnt = info; |
43eab878 | 1180 | |
43eab878 | 1181 | /* Read the nb of CNTx counters supported from PMNC */ |
0e3038d1 | 1182 | *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; |
43eab878 | 1183 | |
0e3038d1 MR |
1184 | /* Add the CPU cycles counter */ |
1185 | *nb_cnt += 1; | |
1186 | } | |
1187 | ||
1188 | static int armv7_probe_num_events(struct arm_pmu *arm_pmu) | |
1189 | { | |
1190 | return smp_call_function_any(&arm_pmu->supported_cpus, | |
1191 | armv7_read_num_pmnc_events, | |
1192 | &arm_pmu->num_events, 1); | |
43eab878 WD |
1193 | } |
1194 | ||
351a102d | 1195 | static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 1196 | { |
513c99ce | 1197 | armv7pmu_init(cpu_pmu); |
3d1ff755 | 1198 | cpu_pmu->name = "armv7_cortex_a8"; |
513c99ce | 1199 | cpu_pmu->map_event = armv7_a8_map_event; |
9268c5da MR |
1200 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1201 | &armv7_pmuv1_events_attr_group; | |
1202 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1203 | &armv7_pmu_format_attr_group; | |
0e3038d1 | 1204 | return armv7_probe_num_events(cpu_pmu); |
43eab878 WD |
1205 | } |
1206 | ||
351a102d | 1207 | static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 1208 | { |
513c99ce | 1209 | armv7pmu_init(cpu_pmu); |
3d1ff755 | 1210 | cpu_pmu->name = "armv7_cortex_a9"; |
513c99ce | 1211 | cpu_pmu->map_event = armv7_a9_map_event; |
9268c5da MR |
1212 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1213 | &armv7_pmuv1_events_attr_group; | |
1214 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1215 | &armv7_pmu_format_attr_group; | |
0e3038d1 | 1216 | return armv7_probe_num_events(cpu_pmu); |
43eab878 | 1217 | } |
0c205cbe | 1218 | |
351a102d | 1219 | static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) |
0c205cbe | 1220 | { |
513c99ce | 1221 | armv7pmu_init(cpu_pmu); |
3d1ff755 | 1222 | cpu_pmu->name = "armv7_cortex_a5"; |
513c99ce | 1223 | cpu_pmu->map_event = armv7_a5_map_event; |
9268c5da MR |
1224 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1225 | &armv7_pmuv1_events_attr_group; | |
1226 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1227 | &armv7_pmu_format_attr_group; | |
0e3038d1 | 1228 | return armv7_probe_num_events(cpu_pmu); |
0c205cbe | 1229 | } |
14abd038 | 1230 | |
351a102d | 1231 | static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) |
14abd038 | 1232 | { |
513c99ce | 1233 | armv7pmu_init(cpu_pmu); |
3d1ff755 | 1234 | cpu_pmu->name = "armv7_cortex_a15"; |
513c99ce | 1235 | cpu_pmu->map_event = armv7_a15_map_event; |
513c99ce | 1236 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
9268c5da MR |
1237 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1238 | &armv7_pmuv2_events_attr_group; | |
1239 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1240 | &armv7_pmu_format_attr_group; | |
0e3038d1 | 1241 | return armv7_probe_num_events(cpu_pmu); |
14abd038 | 1242 | } |
d33c88c6 | 1243 | |
351a102d | 1244 | static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) |
d33c88c6 | 1245 | { |
513c99ce | 1246 | armv7pmu_init(cpu_pmu); |
3d1ff755 | 1247 | cpu_pmu->name = "armv7_cortex_a7"; |
513c99ce | 1248 | cpu_pmu->map_event = armv7_a7_map_event; |
513c99ce | 1249 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
9268c5da MR |
1250 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1251 | &armv7_pmuv2_events_attr_group; | |
1252 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1253 | &armv7_pmu_format_attr_group; | |
0e3038d1 | 1254 | return armv7_probe_num_events(cpu_pmu); |
d33c88c6 | 1255 | } |
2a3391cd | 1256 | |
8e781f65 AT |
1257 | static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) |
1258 | { | |
1259 | armv7pmu_init(cpu_pmu); | |
3d1ff755 | 1260 | cpu_pmu->name = "armv7_cortex_a12"; |
8e781f65 | 1261 | cpu_pmu->map_event = armv7_a12_map_event; |
8e781f65 | 1262 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
9268c5da MR |
1263 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1264 | &armv7_pmuv2_events_attr_group; | |
1265 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1266 | &armv7_pmu_format_attr_group; | |
0e3038d1 | 1267 | return armv7_probe_num_events(cpu_pmu); |
8e781f65 AT |
1268 | } |
1269 | ||
03eff46c WD |
1270 | static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) |
1271 | { | |
0e3038d1 | 1272 | int ret = armv7_a12_pmu_init(cpu_pmu); |
3d1ff755 | 1273 | cpu_pmu->name = "armv7_cortex_a17"; |
9268c5da MR |
1274 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1275 | &armv7_pmuv2_events_attr_group; | |
1276 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1277 | &armv7_pmu_format_attr_group; | |
0e3038d1 | 1278 | return ret; |
03eff46c WD |
1279 | } |
1280 | ||
b7aafe99 SB |
1281 | /* |
1282 | * Krait Performance Monitor Region Event Selection Register (PMRESRn) | |
1283 | * | |
1284 | * 31 30 24 16 8 0 | |
1285 | * +--------------------------------+ | |
1286 | * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0 | |
1287 | * +--------------------------------+ | |
1288 | * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1 | |
1289 | * +--------------------------------+ | |
1290 | * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2 | |
1291 | * +--------------------------------+ | |
1292 | * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ? | |
1293 | * +--------------------------------+ | |
1294 | * EN | G=3 | G=2 | G=1 | G=0 | |
1295 | * | |
1296 | * Event Encoding: | |
1297 | * | |
1298 | * hwc->config_base = 0xNRCCG | |
1299 | * | |
1300 | * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR) | |
1301 | * R = region register | |
1302 | * CC = class of events the group G is choosing from | |
1303 | * G = group or particular event | |
1304 | * | |
1305 | * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2 | |
1306 | * | |
1307 | * A region (R) corresponds to a piece of the CPU (execution unit, instruction | |
1308 | * unit, etc.) while the event code (CC) corresponds to a particular class of | |
1309 | * events (interrupts for example). An event code is broken down into | |
1310 | * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for | |
1311 | * example). | |
1312 | */ | |
1313 | ||
1314 | #define KRAIT_EVENT (1 << 16) | |
1315 | #define VENUM_EVENT (2 << 16) | |
1316 | #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) | |
1317 | #define PMRESRn_EN BIT(31) | |
1318 | ||
65bab451 SB |
1319 | #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */ |
1320 | #define EVENT_GROUP(event) ((event) & 0xf) /* G */ | |
1321 | #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */ | |
1322 | #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */ | |
1323 | #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */ | |
1324 | ||
b7aafe99 SB |
1325 | static u32 krait_read_pmresrn(int n) |
1326 | { | |
1327 | u32 val; | |
1328 | ||
1329 | switch (n) { | |
1330 | case 0: | |
1331 | asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val)); | |
1332 | break; | |
1333 | case 1: | |
1334 | asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val)); | |
1335 | break; | |
1336 | case 2: | |
1337 | asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val)); | |
1338 | break; | |
1339 | default: | |
1340 | BUG(); /* Should be validated in krait_pmu_get_event_idx() */ | |
1341 | } | |
1342 | ||
1343 | return val; | |
1344 | } | |
1345 | ||
1346 | static void krait_write_pmresrn(int n, u32 val) | |
1347 | { | |
1348 | switch (n) { | |
1349 | case 0: | |
1350 | asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val)); | |
1351 | break; | |
1352 | case 1: | |
1353 | asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val)); | |
1354 | break; | |
1355 | case 2: | |
1356 | asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val)); | |
1357 | break; | |
1358 | default: | |
1359 | BUG(); /* Should be validated in krait_pmu_get_event_idx() */ | |
1360 | } | |
1361 | } | |
1362 | ||
65bab451 | 1363 | static u32 venum_read_pmresr(void) |
b7aafe99 SB |
1364 | { |
1365 | u32 val; | |
1366 | asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); | |
1367 | return val; | |
1368 | } | |
1369 | ||
65bab451 | 1370 | static void venum_write_pmresr(u32 val) |
b7aafe99 SB |
1371 | { |
1372 | asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); | |
1373 | } | |
1374 | ||
65bab451 | 1375 | static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val) |
b7aafe99 SB |
1376 | { |
1377 | u32 venum_new_val; | |
1378 | u32 fp_new_val; | |
1379 | ||
1380 | BUG_ON(preemptible()); | |
1381 | /* CPACR Enable CP10 and CP11 access */ | |
1382 | *venum_orig_val = get_copro_access(); | |
1383 | venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11); | |
1384 | set_copro_access(venum_new_val); | |
1385 | ||
1386 | /* Enable FPEXC */ | |
1387 | *fp_orig_val = fmrx(FPEXC); | |
1388 | fp_new_val = *fp_orig_val | FPEXC_EN; | |
1389 | fmxr(FPEXC, fp_new_val); | |
1390 | } | |
1391 | ||
65bab451 | 1392 | static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val) |
b7aafe99 SB |
1393 | { |
1394 | BUG_ON(preemptible()); | |
1395 | /* Restore FPEXC */ | |
1396 | fmxr(FPEXC, fp_orig_val); | |
1397 | isb(); | |
1398 | /* Restore CPACR */ | |
1399 | set_copro_access(venum_orig_val); | |
1400 | } | |
1401 | ||
1402 | static u32 krait_get_pmresrn_event(unsigned int region) | |
1403 | { | |
1404 | static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0, | |
1405 | KRAIT_PMRESR1_GROUP0, | |
1406 | KRAIT_PMRESR2_GROUP0 }; | |
1407 | return pmresrn_table[region]; | |
1408 | } | |
1409 | ||
1410 | static void krait_evt_setup(int idx, u32 config_base) | |
1411 | { | |
1412 | u32 val; | |
1413 | u32 mask; | |
1414 | u32 vval, fval; | |
65bab451 SB |
1415 | unsigned int region = EVENT_REGION(config_base); |
1416 | unsigned int group = EVENT_GROUP(config_base); | |
1417 | unsigned int code = EVENT_CODE(config_base); | |
b7aafe99 | 1418 | unsigned int group_shift; |
65bab451 | 1419 | bool venum_event = EVENT_VENUM(config_base); |
b7aafe99 SB |
1420 | |
1421 | group_shift = group * 8; | |
1422 | mask = 0xff << group_shift; | |
1423 | ||
1424 | /* Configure evtsel for the region and group */ | |
1425 | if (venum_event) | |
1426 | val = KRAIT_VPMRESR0_GROUP0; | |
1427 | else | |
1428 | val = krait_get_pmresrn_event(region); | |
1429 | val += group; | |
1430 | /* Mix in mode-exclusion bits */ | |
1431 | val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); | |
1432 | armv7_pmnc_write_evtsel(idx, val); | |
1433 | ||
b7aafe99 | 1434 | if (venum_event) { |
65bab451 SB |
1435 | venum_pre_pmresr(&vval, &fval); |
1436 | val = venum_read_pmresr(); | |
b7aafe99 SB |
1437 | val &= ~mask; |
1438 | val |= code << group_shift; | |
1439 | val |= PMRESRn_EN; | |
65bab451 SB |
1440 | venum_write_pmresr(val); |
1441 | venum_post_pmresr(vval, fval); | |
b7aafe99 SB |
1442 | } else { |
1443 | val = krait_read_pmresrn(region); | |
1444 | val &= ~mask; | |
1445 | val |= code << group_shift; | |
1446 | val |= PMRESRn_EN; | |
1447 | krait_write_pmresrn(region, val); | |
1448 | } | |
1449 | } | |
1450 | ||
65bab451 | 1451 | static u32 clear_pmresrn_group(u32 val, int group) |
b7aafe99 SB |
1452 | { |
1453 | u32 mask; | |
1454 | int group_shift; | |
1455 | ||
1456 | group_shift = group * 8; | |
1457 | mask = 0xff << group_shift; | |
1458 | val &= ~mask; | |
1459 | ||
1460 | /* Don't clear enable bit if entire region isn't disabled */ | |
1461 | if (val & ~PMRESRn_EN) | |
1462 | return val |= PMRESRn_EN; | |
1463 | ||
1464 | return 0; | |
1465 | } | |
1466 | ||
1467 | static void krait_clearpmu(u32 config_base) | |
1468 | { | |
1469 | u32 val; | |
1470 | u32 vval, fval; | |
65bab451 SB |
1471 | unsigned int region = EVENT_REGION(config_base); |
1472 | unsigned int group = EVENT_GROUP(config_base); | |
1473 | bool venum_event = EVENT_VENUM(config_base); | |
b7aafe99 SB |
1474 | |
1475 | if (venum_event) { | |
65bab451 SB |
1476 | venum_pre_pmresr(&vval, &fval); |
1477 | val = venum_read_pmresr(); | |
1478 | val = clear_pmresrn_group(val, group); | |
1479 | venum_write_pmresr(val); | |
1480 | venum_post_pmresr(vval, fval); | |
b7aafe99 SB |
1481 | } else { |
1482 | val = krait_read_pmresrn(region); | |
65bab451 | 1483 | val = clear_pmresrn_group(val, group); |
b7aafe99 SB |
1484 | krait_write_pmresrn(region, val); |
1485 | } | |
1486 | } | |
1487 | ||
1488 | static void krait_pmu_disable_event(struct perf_event *event) | |
1489 | { | |
1490 | unsigned long flags; | |
1491 | struct hw_perf_event *hwc = &event->hw; | |
1492 | int idx = hwc->idx; | |
037e79aa | 1493 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
11679250 | 1494 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
b7aafe99 SB |
1495 | |
1496 | /* Disable counter and interrupt */ | |
1497 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1498 | ||
1499 | /* Disable counter */ | |
1500 | armv7_pmnc_disable_counter(idx); | |
1501 | ||
1502 | /* | |
1503 | * Clear pmresr code (if destined for PMNx counters) | |
1504 | */ | |
1505 | if (hwc->config_base & KRAIT_EVENT_MASK) | |
1506 | krait_clearpmu(hwc->config_base); | |
1507 | ||
1508 | /* Disable interrupt for this counter */ | |
1509 | armv7_pmnc_disable_intens(idx); | |
1510 | ||
1511 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1512 | } | |
1513 | ||
1514 | static void krait_pmu_enable_event(struct perf_event *event) | |
1515 | { | |
1516 | unsigned long flags; | |
1517 | struct hw_perf_event *hwc = &event->hw; | |
1518 | int idx = hwc->idx; | |
037e79aa | 1519 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
11679250 | 1520 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
b7aafe99 SB |
1521 | |
1522 | /* | |
1523 | * Enable counter and interrupt, and set the counter to count | |
1524 | * the event that we're interested in. | |
1525 | */ | |
1526 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1527 | ||
1528 | /* Disable counter */ | |
1529 | armv7_pmnc_disable_counter(idx); | |
1530 | ||
1531 | /* | |
1532 | * Set event (if destined for PMNx counters) | |
1533 | * We set the event for the cycle counter because we | |
1534 | * have the ability to perform event filtering. | |
1535 | */ | |
1536 | if (hwc->config_base & KRAIT_EVENT_MASK) | |
1537 | krait_evt_setup(idx, hwc->config_base); | |
1538 | else | |
1539 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | |
1540 | ||
1541 | /* Enable interrupt for this counter */ | |
1542 | armv7_pmnc_enable_intens(idx); | |
1543 | ||
1544 | /* Enable counter */ | |
1545 | armv7_pmnc_enable_counter(idx); | |
1546 | ||
1547 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1548 | } | |
1549 | ||
1550 | static void krait_pmu_reset(void *info) | |
1551 | { | |
1552 | u32 vval, fval; | |
93499918 SB |
1553 | struct arm_pmu *cpu_pmu = info; |
1554 | u32 idx, nb_cnt = cpu_pmu->num_events; | |
b7aafe99 SB |
1555 | |
1556 | armv7pmu_reset(info); | |
1557 | ||
1558 | /* Clear all pmresrs */ | |
1559 | krait_write_pmresrn(0, 0); | |
1560 | krait_write_pmresrn(1, 0); | |
1561 | krait_write_pmresrn(2, 0); | |
1562 | ||
65bab451 SB |
1563 | venum_pre_pmresr(&vval, &fval); |
1564 | venum_write_pmresr(0); | |
1565 | venum_post_pmresr(vval, fval); | |
93499918 SB |
1566 | |
1567 | /* Reset PMxEVNCTCR to sane default */ | |
1568 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { | |
1569 | armv7_pmnc_select_counter(idx); | |
1570 | asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); | |
1571 | } | |
1572 | ||
b7aafe99 SB |
1573 | } |
1574 | ||
1575 | static int krait_event_to_bit(struct perf_event *event, unsigned int region, | |
1576 | unsigned int group) | |
1577 | { | |
1578 | int bit; | |
1579 | struct hw_perf_event *hwc = &event->hw; | |
1580 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
1581 | ||
1582 | if (hwc->config_base & VENUM_EVENT) | |
1583 | bit = KRAIT_VPMRESR0_GROUP0; | |
1584 | else | |
1585 | bit = krait_get_pmresrn_event(region); | |
1586 | bit -= krait_get_pmresrn_event(0); | |
1587 | bit += group; | |
1588 | /* | |
1589 | * Lower bits are reserved for use by the counters (see | |
1590 | * armv7pmu_get_event_idx() for more info) | |
1591 | */ | |
1592 | bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; | |
1593 | ||
1594 | return bit; | |
1595 | } | |
1596 | ||
1597 | /* | |
1598 | * We check for column exclusion constraints here. | |
1599 | * Two events cant use the same group within a pmresr register. | |
1600 | */ | |
1601 | static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, | |
1602 | struct perf_event *event) | |
1603 | { | |
1604 | int idx; | |
6a78371a | 1605 | int bit = -1; |
b7aafe99 | 1606 | struct hw_perf_event *hwc = &event->hw; |
65bab451 SB |
1607 | unsigned int region = EVENT_REGION(hwc->config_base); |
1608 | unsigned int code = EVENT_CODE(hwc->config_base); | |
1609 | unsigned int group = EVENT_GROUP(hwc->config_base); | |
1610 | bool venum_event = EVENT_VENUM(hwc->config_base); | |
1611 | bool krait_event = EVENT_CPU(hwc->config_base); | |
b7aafe99 | 1612 | |
65bab451 | 1613 | if (venum_event || krait_event) { |
b7aafe99 SB |
1614 | /* Ignore invalid events */ |
1615 | if (group > 3 || region > 2) | |
1616 | return -EINVAL; | |
65bab451 | 1617 | if (venum_event && (code & 0xe0)) |
b7aafe99 SB |
1618 | return -EINVAL; |
1619 | ||
1620 | bit = krait_event_to_bit(event, region, group); | |
1621 | if (test_and_set_bit(bit, cpuc->used_mask)) | |
1622 | return -EAGAIN; | |
1623 | } | |
1624 | ||
1625 | idx = armv7pmu_get_event_idx(cpuc, event); | |
6a78371a | 1626 | if (idx < 0 && bit >= 0) |
b7aafe99 SB |
1627 | clear_bit(bit, cpuc->used_mask); |
1628 | ||
1629 | return idx; | |
1630 | } | |
1631 | ||
1632 | static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, | |
1633 | struct perf_event *event) | |
1634 | { | |
1635 | int bit; | |
1636 | struct hw_perf_event *hwc = &event->hw; | |
65bab451 SB |
1637 | unsigned int region = EVENT_REGION(hwc->config_base); |
1638 | unsigned int group = EVENT_GROUP(hwc->config_base); | |
1639 | bool venum_event = EVENT_VENUM(hwc->config_base); | |
1640 | bool krait_event = EVENT_CPU(hwc->config_base); | |
b7aafe99 | 1641 | |
65bab451 | 1642 | if (venum_event || krait_event) { |
b7aafe99 SB |
1643 | bit = krait_event_to_bit(event, region, group); |
1644 | clear_bit(bit, cpuc->used_mask); | |
1645 | } | |
1646 | } | |
1647 | ||
2a3391cd SB |
1648 | static int krait_pmu_init(struct arm_pmu *cpu_pmu) |
1649 | { | |
1650 | armv7pmu_init(cpu_pmu); | |
3d1ff755 | 1651 | cpu_pmu->name = "armv7_krait"; |
2a3391cd SB |
1652 | /* Some early versions of Krait don't support PC write events */ |
1653 | if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node, | |
1654 | "qcom,no-pc-write")) | |
1655 | cpu_pmu->map_event = krait_map_event_no_branch; | |
1656 | else | |
1657 | cpu_pmu->map_event = krait_map_event; | |
2a3391cd | 1658 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
b7aafe99 SB |
1659 | cpu_pmu->reset = krait_pmu_reset; |
1660 | cpu_pmu->enable = krait_pmu_enable_event; | |
1661 | cpu_pmu->disable = krait_pmu_disable_event; | |
1662 | cpu_pmu->get_event_idx = krait_pmu_get_event_idx; | |
1663 | cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; | |
0e3038d1 | 1664 | return armv7_probe_num_events(cpu_pmu); |
2a3391cd | 1665 | } |
341e42c4 SB |
1666 | |
1667 | /* | |
1668 | * Scorpion Local Performance Monitor Register (LPMn) | |
1669 | * | |
1670 | * 31 30 24 16 8 0 | |
1671 | * +--------------------------------+ | |
1672 | * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0 | |
1673 | * +--------------------------------+ | |
1674 | * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1 | |
1675 | * +--------------------------------+ | |
1676 | * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2 | |
1677 | * +--------------------------------+ | |
1678 | * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3 | |
1679 | * +--------------------------------+ | |
1680 | * VLPM | EN | CC | CC | CC | CC | N = 2, R = ? | |
1681 | * +--------------------------------+ | |
1682 | * EN | G=3 | G=2 | G=1 | G=0 | |
1683 | * | |
1684 | * | |
1685 | * Event Encoding: | |
1686 | * | |
1687 | * hwc->config_base = 0xNRCCG | |
1688 | * | |
1689 | * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM) | |
1690 | * R = region register | |
1691 | * CC = class of events the group G is choosing from | |
1692 | * G = group or particular event | |
1693 | * | |
1694 | * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2 | |
1695 | * | |
1696 | * A region (R) corresponds to a piece of the CPU (execution unit, instruction | |
1697 | * unit, etc.) while the event code (CC) corresponds to a particular class of | |
1698 | * events (interrupts for example). An event code is broken down into | |
1699 | * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for | |
1700 | * example). | |
1701 | */ | |
1702 | ||
1703 | static u32 scorpion_read_pmresrn(int n) | |
1704 | { | |
1705 | u32 val; | |
1706 | ||
1707 | switch (n) { | |
1708 | case 0: | |
1709 | asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val)); | |
1710 | break; | |
1711 | case 1: | |
1712 | asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); | |
1713 | break; | |
1714 | case 2: | |
1715 | asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val)); | |
1716 | break; | |
1717 | case 3: | |
1718 | asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val)); | |
1719 | break; | |
1720 | default: | |
1721 | BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ | |
1722 | } | |
1723 | ||
1724 | return val; | |
1725 | } | |
1726 | ||
1727 | static void scorpion_write_pmresrn(int n, u32 val) | |
1728 | { | |
1729 | switch (n) { | |
1730 | case 0: | |
1731 | asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val)); | |
1732 | break; | |
1733 | case 1: | |
1734 | asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); | |
1735 | break; | |
1736 | case 2: | |
1737 | asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val)); | |
1738 | break; | |
1739 | case 3: | |
1740 | asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val)); | |
1741 | break; | |
1742 | default: | |
1743 | BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ | |
1744 | } | |
1745 | } | |
1746 | ||
1747 | static u32 scorpion_get_pmresrn_event(unsigned int region) | |
1748 | { | |
1749 | static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0, | |
1750 | SCORPION_LPM1_GROUP0, | |
1751 | SCORPION_LPM2_GROUP0, | |
1752 | SCORPION_L2LPM_GROUP0 }; | |
1753 | return pmresrn_table[region]; | |
1754 | } | |
1755 | ||
1756 | static void scorpion_evt_setup(int idx, u32 config_base) | |
1757 | { | |
1758 | u32 val; | |
1759 | u32 mask; | |
1760 | u32 vval, fval; | |
1761 | unsigned int region = EVENT_REGION(config_base); | |
1762 | unsigned int group = EVENT_GROUP(config_base); | |
1763 | unsigned int code = EVENT_CODE(config_base); | |
1764 | unsigned int group_shift; | |
1765 | bool venum_event = EVENT_VENUM(config_base); | |
1766 | ||
1767 | group_shift = group * 8; | |
1768 | mask = 0xff << group_shift; | |
1769 | ||
1770 | /* Configure evtsel for the region and group */ | |
1771 | if (venum_event) | |
1772 | val = SCORPION_VLPM_GROUP0; | |
1773 | else | |
1774 | val = scorpion_get_pmresrn_event(region); | |
1775 | val += group; | |
1776 | /* Mix in mode-exclusion bits */ | |
1777 | val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); | |
1778 | armv7_pmnc_write_evtsel(idx, val); | |
1779 | ||
1780 | asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); | |
1781 | ||
1782 | if (venum_event) { | |
1783 | venum_pre_pmresr(&vval, &fval); | |
1784 | val = venum_read_pmresr(); | |
1785 | val &= ~mask; | |
1786 | val |= code << group_shift; | |
1787 | val |= PMRESRn_EN; | |
1788 | venum_write_pmresr(val); | |
1789 | venum_post_pmresr(vval, fval); | |
1790 | } else { | |
1791 | val = scorpion_read_pmresrn(region); | |
1792 | val &= ~mask; | |
1793 | val |= code << group_shift; | |
1794 | val |= PMRESRn_EN; | |
1795 | scorpion_write_pmresrn(region, val); | |
1796 | } | |
1797 | } | |
1798 | ||
1799 | static void scorpion_clearpmu(u32 config_base) | |
1800 | { | |
1801 | u32 val; | |
1802 | u32 vval, fval; | |
1803 | unsigned int region = EVENT_REGION(config_base); | |
1804 | unsigned int group = EVENT_GROUP(config_base); | |
1805 | bool venum_event = EVENT_VENUM(config_base); | |
1806 | ||
1807 | if (venum_event) { | |
1808 | venum_pre_pmresr(&vval, &fval); | |
1809 | val = venum_read_pmresr(); | |
1810 | val = clear_pmresrn_group(val, group); | |
1811 | venum_write_pmresr(val); | |
1812 | venum_post_pmresr(vval, fval); | |
1813 | } else { | |
1814 | val = scorpion_read_pmresrn(region); | |
1815 | val = clear_pmresrn_group(val, group); | |
1816 | scorpion_write_pmresrn(region, val); | |
1817 | } | |
1818 | } | |
1819 | ||
1820 | static void scorpion_pmu_disable_event(struct perf_event *event) | |
1821 | { | |
1822 | unsigned long flags; | |
1823 | struct hw_perf_event *hwc = &event->hw; | |
1824 | int idx = hwc->idx; | |
1825 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
1826 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | |
1827 | ||
1828 | /* Disable counter and interrupt */ | |
1829 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1830 | ||
1831 | /* Disable counter */ | |
1832 | armv7_pmnc_disable_counter(idx); | |
1833 | ||
1834 | /* | |
1835 | * Clear pmresr code (if destined for PMNx counters) | |
1836 | */ | |
1837 | if (hwc->config_base & KRAIT_EVENT_MASK) | |
1838 | scorpion_clearpmu(hwc->config_base); | |
1839 | ||
1840 | /* Disable interrupt for this counter */ | |
1841 | armv7_pmnc_disable_intens(idx); | |
1842 | ||
1843 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1844 | } | |
1845 | ||
1846 | static void scorpion_pmu_enable_event(struct perf_event *event) | |
1847 | { | |
1848 | unsigned long flags; | |
1849 | struct hw_perf_event *hwc = &event->hw; | |
1850 | int idx = hwc->idx; | |
1851 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
1852 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | |
1853 | ||
1854 | /* | |
1855 | * Enable counter and interrupt, and set the counter to count | |
1856 | * the event that we're interested in. | |
1857 | */ | |
1858 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1859 | ||
1860 | /* Disable counter */ | |
1861 | armv7_pmnc_disable_counter(idx); | |
1862 | ||
1863 | /* | |
1864 | * Set event (if destined for PMNx counters) | |
1865 | * We don't set the event for the cycle counter because we | |
1866 | * don't have the ability to perform event filtering. | |
1867 | */ | |
1868 | if (hwc->config_base & KRAIT_EVENT_MASK) | |
1869 | scorpion_evt_setup(idx, hwc->config_base); | |
1870 | else if (idx != ARMV7_IDX_CYCLE_COUNTER) | |
1871 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | |
1872 | ||
1873 | /* Enable interrupt for this counter */ | |
1874 | armv7_pmnc_enable_intens(idx); | |
1875 | ||
1876 | /* Enable counter */ | |
1877 | armv7_pmnc_enable_counter(idx); | |
1878 | ||
1879 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1880 | } | |
1881 | ||
1882 | static void scorpion_pmu_reset(void *info) | |
1883 | { | |
1884 | u32 vval, fval; | |
1885 | struct arm_pmu *cpu_pmu = info; | |
1886 | u32 idx, nb_cnt = cpu_pmu->num_events; | |
1887 | ||
1888 | armv7pmu_reset(info); | |
1889 | ||
1890 | /* Clear all pmresrs */ | |
1891 | scorpion_write_pmresrn(0, 0); | |
1892 | scorpion_write_pmresrn(1, 0); | |
1893 | scorpion_write_pmresrn(2, 0); | |
1894 | scorpion_write_pmresrn(3, 0); | |
1895 | ||
1896 | venum_pre_pmresr(&vval, &fval); | |
1897 | venum_write_pmresr(0); | |
1898 | venum_post_pmresr(vval, fval); | |
1899 | ||
1900 | /* Reset PMxEVNCTCR to sane default */ | |
1901 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { | |
1902 | armv7_pmnc_select_counter(idx); | |
1903 | asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); | |
1904 | } | |
1905 | } | |
1906 | ||
1907 | static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, | |
1908 | unsigned int group) | |
1909 | { | |
1910 | int bit; | |
1911 | struct hw_perf_event *hwc = &event->hw; | |
1912 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
1913 | ||
1914 | if (hwc->config_base & VENUM_EVENT) | |
1915 | bit = SCORPION_VLPM_GROUP0; | |
1916 | else | |
1917 | bit = scorpion_get_pmresrn_event(region); | |
1918 | bit -= scorpion_get_pmresrn_event(0); | |
1919 | bit += group; | |
1920 | /* | |
1921 | * Lower bits are reserved for use by the counters (see | |
1922 | * armv7pmu_get_event_idx() for more info) | |
1923 | */ | |
1924 | bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; | |
1925 | ||
1926 | return bit; | |
1927 | } | |
1928 | ||
1929 | /* | |
1930 | * We check for column exclusion constraints here. | |
1931 | * Two events cant use the same group within a pmresr register. | |
1932 | */ | |
1933 | static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, | |
1934 | struct perf_event *event) | |
1935 | { | |
1936 | int idx; | |
1937 | int bit = -1; | |
1938 | struct hw_perf_event *hwc = &event->hw; | |
1939 | unsigned int region = EVENT_REGION(hwc->config_base); | |
1940 | unsigned int group = EVENT_GROUP(hwc->config_base); | |
1941 | bool venum_event = EVENT_VENUM(hwc->config_base); | |
1942 | bool scorpion_event = EVENT_CPU(hwc->config_base); | |
1943 | ||
1944 | if (venum_event || scorpion_event) { | |
1945 | /* Ignore invalid events */ | |
1946 | if (group > 3 || region > 3) | |
1947 | return -EINVAL; | |
1948 | ||
1949 | bit = scorpion_event_to_bit(event, region, group); | |
1950 | if (test_and_set_bit(bit, cpuc->used_mask)) | |
1951 | return -EAGAIN; | |
1952 | } | |
1953 | ||
1954 | idx = armv7pmu_get_event_idx(cpuc, event); | |
1955 | if (idx < 0 && bit >= 0) | |
1956 | clear_bit(bit, cpuc->used_mask); | |
1957 | ||
1958 | return idx; | |
1959 | } | |
1960 | ||
1961 | static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, | |
1962 | struct perf_event *event) | |
1963 | { | |
1964 | int bit; | |
1965 | struct hw_perf_event *hwc = &event->hw; | |
1966 | unsigned int region = EVENT_REGION(hwc->config_base); | |
1967 | unsigned int group = EVENT_GROUP(hwc->config_base); | |
1968 | bool venum_event = EVENT_VENUM(hwc->config_base); | |
1969 | bool scorpion_event = EVENT_CPU(hwc->config_base); | |
1970 | ||
1971 | if (venum_event || scorpion_event) { | |
1972 | bit = scorpion_event_to_bit(event, region, group); | |
1973 | clear_bit(bit, cpuc->used_mask); | |
1974 | } | |
1975 | } | |
1976 | ||
1977 | static int scorpion_pmu_init(struct arm_pmu *cpu_pmu) | |
1978 | { | |
1979 | armv7pmu_init(cpu_pmu); | |
1980 | cpu_pmu->name = "armv7_scorpion"; | |
1981 | cpu_pmu->map_event = scorpion_map_event; | |
341e42c4 SB |
1982 | cpu_pmu->reset = scorpion_pmu_reset; |
1983 | cpu_pmu->enable = scorpion_pmu_enable_event; | |
1984 | cpu_pmu->disable = scorpion_pmu_disable_event; | |
1985 | cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; | |
1986 | cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; | |
0e3038d1 | 1987 | return armv7_probe_num_events(cpu_pmu); |
341e42c4 SB |
1988 | } |
1989 | ||
1990 | static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) | |
1991 | { | |
1992 | armv7pmu_init(cpu_pmu); | |
1993 | cpu_pmu->name = "armv7_scorpion_mp"; | |
1994 | cpu_pmu->map_event = scorpion_map_event; | |
341e42c4 SB |
1995 | cpu_pmu->reset = scorpion_pmu_reset; |
1996 | cpu_pmu->enable = scorpion_pmu_enable_event; | |
1997 | cpu_pmu->disable = scorpion_pmu_disable_event; | |
1998 | cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; | |
1999 | cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; | |
0e3038d1 | 2000 | return armv7_probe_num_events(cpu_pmu); |
341e42c4 | 2001 | } |
d33c88c6 | 2002 | |
29ba0f37 MR |
2003 | static const struct of_device_id armv7_pmu_of_device_ids[] = { |
2004 | {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, | |
2005 | {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, | |
2006 | {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, | |
2007 | {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, | |
2008 | {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, | |
2009 | {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, | |
2010 | {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init}, | |
2011 | {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, | |
2012 | {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init}, | |
2013 | {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init}, | |
2014 | {}, | |
2015 | }; | |
2a3391cd | 2016 | |
29ba0f37 MR |
2017 | static const struct pmu_probe_info armv7_pmu_probe_table[] = { |
2018 | ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init), | |
2019 | ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init), | |
2020 | { /* sentinel value */ } | |
2021 | }; | |
8e781f65 | 2022 | |
03eff46c | 2023 | |
29ba0f37 | 2024 | static int armv7_pmu_device_probe(struct platform_device *pdev) |
2a3391cd | 2025 | { |
29ba0f37 MR |
2026 | return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, |
2027 | armv7_pmu_probe_table); | |
2a3391cd | 2028 | } |
341e42c4 | 2029 | |
29ba0f37 MR |
2030 | static struct platform_driver armv7_pmu_driver = { |
2031 | .driver = { | |
2032 | .name = "armv7-pmu", | |
2033 | .of_match_table = armv7_pmu_of_device_ids, | |
2034 | }, | |
2035 | .probe = armv7_pmu_device_probe, | |
2036 | }; | |
341e42c4 | 2037 | |
b128cb55 | 2038 | builtin_platform_driver(armv7_pmu_driver); |
43eab878 | 2039 | #endif /* CONFIG_CPU_V7 */ |