]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/perf/power8-pmu.c
powerpc/perf: Factor out event_alternative function
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / perf / power8-pmu.c
1 /*
2 * Performance counter support for POWER8 processors.
3 *
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #define pr_fmt(fmt) "power8-pmu: " fmt
14
15 #include "isa207-common.h"
16
17 /*
18 * Some power8 event codes.
19 */
20 #define EVENT(_name, _code) _name = _code,
21
22 enum {
23 #include "power8-events-list.h"
24 };
25
26 #undef EVENT
27
28 /* MMCRA IFM bits - POWER8 */
29 #define POWER8_MMCRA_IFM1 0x0000000040000000UL
30 #define POWER8_MMCRA_IFM2 0x0000000080000000UL
31 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
32
33 /* PowerISA v2.07 format attribute structure*/
34 extern struct attribute_group isa207_pmu_format_group;
35
36 /* Table of alternatives, sorted by column 0 */
37 static const unsigned int event_alternatives[][MAX_ALT] = {
38 { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
39 { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
40 { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
41 { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
42 { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
43 { PM_BR_2PATH, PM_BR_2PATH_ALT },
44 { PM_INST_DISP, PM_INST_DISP_ALT },
45 { PM_RUN_CYC_ALT, PM_RUN_CYC },
46 { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
47 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
48 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
49 };
50
51 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
52 {
53 int i, j, num_alt = 0;
54
55 num_alt = isa207_get_alternatives(event, alt, event_alternatives,
56 (int)ARRAY_SIZE(event_alternatives));
57 if (flags & PPMU_ONLY_COUNT_RUN) {
58 /*
59 * We're only counting in RUN state, so PM_CYC is equivalent to
60 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
61 */
62 j = num_alt;
63 for (i = 0; i < num_alt; ++i) {
64 switch (alt[i]) {
65 case PM_CYC:
66 alt[j++] = PM_RUN_CYC;
67 break;
68 case PM_RUN_CYC:
69 alt[j++] = PM_CYC;
70 break;
71 case PM_INST_CMPL:
72 alt[j++] = PM_RUN_INST_CMPL;
73 break;
74 case PM_RUN_INST_CMPL:
75 alt[j++] = PM_INST_CMPL;
76 break;
77 }
78 }
79 num_alt = j;
80 }
81
82 return num_alt;
83 }
84
85 GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
86 GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
87 GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
88 GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
89 GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
90 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
91 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
92 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
93
94 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
95 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
96
97 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
98 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
99 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
100 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
101 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
102
103 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
104 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
105 CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
106 CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
107 CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
108
109 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
110 CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
111 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
112 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
113
114 static struct attribute *power8_events_attr[] = {
115 GENERIC_EVENT_PTR(PM_CYC),
116 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
117 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
118 GENERIC_EVENT_PTR(PM_INST_CMPL),
119 GENERIC_EVENT_PTR(PM_BRU_FIN),
120 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
121 GENERIC_EVENT_PTR(PM_LD_REF_L1),
122 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
123
124 CACHE_EVENT_PTR(PM_LD_MISS_L1),
125 CACHE_EVENT_PTR(PM_LD_REF_L1),
126 CACHE_EVENT_PTR(PM_L1_PREF),
127 CACHE_EVENT_PTR(PM_ST_MISS_L1),
128 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
129 CACHE_EVENT_PTR(PM_INST_FROM_L1),
130 CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
131 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
132 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
133 CACHE_EVENT_PTR(PM_L3_PREF_ALL),
134 CACHE_EVENT_PTR(PM_L2_ST_MISS),
135 CACHE_EVENT_PTR(PM_L2_ST),
136
137 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
138 CACHE_EVENT_PTR(PM_BRU_FIN),
139
140 CACHE_EVENT_PTR(PM_DTLB_MISS),
141 CACHE_EVENT_PTR(PM_ITLB_MISS),
142 NULL
143 };
144
145 static struct attribute_group power8_pmu_events_group = {
146 .name = "events",
147 .attrs = power8_events_attr,
148 };
149
150 static const struct attribute_group *power8_pmu_attr_groups[] = {
151 &isa207_pmu_format_group,
152 &power8_pmu_events_group,
153 NULL,
154 };
155
156 static int power8_generic_events[] = {
157 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
158 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
159 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
160 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
161 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
162 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
163 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
164 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
165 };
166
167 static u64 power8_bhrb_filter_map(u64 branch_sample_type)
168 {
169 u64 pmu_bhrb_filter = 0;
170
171 /* BHRB and regular PMU events share the same privilege state
172 * filter configuration. BHRB is always recorded along with a
173 * regular PMU event. As the privilege state filter is handled
174 * in the basic PMC configuration of the accompanying regular
175 * PMU event, we ignore any separate BHRB specific request.
176 */
177
178 /* No branch filter requested */
179 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
180 return pmu_bhrb_filter;
181
182 /* Invalid branch filter options - HW does not support */
183 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
184 return -1;
185
186 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
187 return -1;
188
189 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
190 return -1;
191
192 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
193 pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
194 return pmu_bhrb_filter;
195 }
196
197 /* Every thing else is unsupported */
198 return -1;
199 }
200
201 static void power8_config_bhrb(u64 pmu_bhrb_filter)
202 {
203 /* Enable BHRB filter in PMU */
204 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
205 }
206
207 #define C(x) PERF_COUNT_HW_CACHE_##x
208
209 /*
210 * Table of generalized cache-related events.
211 * 0 means not supported, -1 means nonsensical, other values
212 * are event codes.
213 */
214 static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
215 [ C(L1D) ] = {
216 [ C(OP_READ) ] = {
217 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
218 [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
219 },
220 [ C(OP_WRITE) ] = {
221 [ C(RESULT_ACCESS) ] = 0,
222 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
223 },
224 [ C(OP_PREFETCH) ] = {
225 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
226 [ C(RESULT_MISS) ] = 0,
227 },
228 },
229 [ C(L1I) ] = {
230 [ C(OP_READ) ] = {
231 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
232 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
233 },
234 [ C(OP_WRITE) ] = {
235 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
236 [ C(RESULT_MISS) ] = -1,
237 },
238 [ C(OP_PREFETCH) ] = {
239 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
240 [ C(RESULT_MISS) ] = 0,
241 },
242 },
243 [ C(LL) ] = {
244 [ C(OP_READ) ] = {
245 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
246 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
247 },
248 [ C(OP_WRITE) ] = {
249 [ C(RESULT_ACCESS) ] = PM_L2_ST,
250 [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
251 },
252 [ C(OP_PREFETCH) ] = {
253 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
254 [ C(RESULT_MISS) ] = 0,
255 },
256 },
257 [ C(DTLB) ] = {
258 [ C(OP_READ) ] = {
259 [ C(RESULT_ACCESS) ] = 0,
260 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
261 },
262 [ C(OP_WRITE) ] = {
263 [ C(RESULT_ACCESS) ] = -1,
264 [ C(RESULT_MISS) ] = -1,
265 },
266 [ C(OP_PREFETCH) ] = {
267 [ C(RESULT_ACCESS) ] = -1,
268 [ C(RESULT_MISS) ] = -1,
269 },
270 },
271 [ C(ITLB) ] = {
272 [ C(OP_READ) ] = {
273 [ C(RESULT_ACCESS) ] = 0,
274 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
275 },
276 [ C(OP_WRITE) ] = {
277 [ C(RESULT_ACCESS) ] = -1,
278 [ C(RESULT_MISS) ] = -1,
279 },
280 [ C(OP_PREFETCH) ] = {
281 [ C(RESULT_ACCESS) ] = -1,
282 [ C(RESULT_MISS) ] = -1,
283 },
284 },
285 [ C(BPU) ] = {
286 [ C(OP_READ) ] = {
287 [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
288 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
289 },
290 [ C(OP_WRITE) ] = {
291 [ C(RESULT_ACCESS) ] = -1,
292 [ C(RESULT_MISS) ] = -1,
293 },
294 [ C(OP_PREFETCH) ] = {
295 [ C(RESULT_ACCESS) ] = -1,
296 [ C(RESULT_MISS) ] = -1,
297 },
298 },
299 [ C(NODE) ] = {
300 [ C(OP_READ) ] = {
301 [ C(RESULT_ACCESS) ] = -1,
302 [ C(RESULT_MISS) ] = -1,
303 },
304 [ C(OP_WRITE) ] = {
305 [ C(RESULT_ACCESS) ] = -1,
306 [ C(RESULT_MISS) ] = -1,
307 },
308 [ C(OP_PREFETCH) ] = {
309 [ C(RESULT_ACCESS) ] = -1,
310 [ C(RESULT_MISS) ] = -1,
311 },
312 },
313 };
314
315 #undef C
316
317 static struct power_pmu power8_pmu = {
318 .name = "POWER8",
319 .n_counter = MAX_PMU_COUNTERS,
320 .max_alternatives = MAX_ALT + 1,
321 .add_fields = ISA207_ADD_FIELDS,
322 .test_adder = ISA207_TEST_ADDER,
323 .compute_mmcr = isa207_compute_mmcr,
324 .config_bhrb = power8_config_bhrb,
325 .bhrb_filter_map = power8_bhrb_filter_map,
326 .get_constraint = isa207_get_constraint,
327 .get_alternatives = power8_get_alternatives,
328 .disable_pmc = isa207_disable_pmc,
329 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
330 .n_generic = ARRAY_SIZE(power8_generic_events),
331 .generic_events = power8_generic_events,
332 .cache_events = &power8_cache_events,
333 .attr_groups = power8_pmu_attr_groups,
334 .bhrb_nr = 32,
335 };
336
337 static int __init init_power8_pmu(void)
338 {
339 int rc;
340
341 if (!cur_cpu_spec->oprofile_cpu_type ||
342 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
343 return -ENODEV;
344
345 rc = register_power_pmu(&power8_pmu);
346 if (rc)
347 return rc;
348
349 /* Tell userspace that EBB is supported */
350 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
351
352 if (cpu_has_feature(CPU_FTR_PMAO_BUG))
353 pr_info("PMAO restore workaround active.\n");
354
355 return 0;
356 }
357 early_initcall(init_power8_pmu);