]>
Commit | Line | Data |
---|---|---|
e05b9b9e ME |
1 | /* |
2 | * Performance counter support for POWER8 processors. | |
3 | * | |
4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | |
5 | * Copyright 2013 Michael Ellerman, IBM Corporation. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/perf_event.h> | |
15 | #include <asm/firmware.h> | |
16 | ||
17 | ||
18 | /* | |
19 | * Some power8 event codes. | |
20 | */ | |
21 | #define PM_CYC 0x0001e | |
22 | #define PM_GCT_NOSLOT_CYC 0x100f8 | |
23 | #define PM_CMPLU_STALL 0x4000a | |
24 | #define PM_INST_CMPL 0x00002 | |
25 | #define PM_BRU_FIN 0x10068 | |
26 | #define PM_BR_MPRED_CMPL 0x400f6 | |
27 | ||
2fdd313f ME |
28 | /* All L1 D cache load references counted at finish, gated by reject */ |
29 | #define PM_LD_REF_L1 0x100ee | |
30 | /* Load Missed L1 */ | |
31 | #define PM_LD_MISS_L1 0x3e054 | |
32 | /* Store Missed L1 */ | |
33 | #define PM_ST_MISS_L1 0x300f0 | |
34 | /* L1 cache data prefetches */ | |
35 | #define PM_L1_PREF 0x0d8b8 | |
36 | /* Instruction fetches from L1 */ | |
37 | #define PM_INST_FROM_L1 0x04080 | |
38 | /* Demand iCache Miss */ | |
39 | #define PM_L1_ICACHE_MISS 0x200fd | |
40 | /* Instruction Demand sectors wriittent into IL1 */ | |
41 | #define PM_L1_DEMAND_WRITE 0x0408c | |
42 | /* Instruction prefetch written into IL1 */ | |
43 | #define PM_IC_PREF_WRITE 0x0408e | |
44 | /* The data cache was reloaded from local core's L3 due to a demand load */ | |
45 | #define PM_DATA_FROM_L3 0x4c042 | |
46 | /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ | |
47 | #define PM_DATA_FROM_L3MISS 0x300fe | |
48 | /* All successful D-side store dispatches for this thread */ | |
49 | #define PM_L2_ST 0x17080 | |
50 | /* All successful D-side store dispatches for this thread that were L2 Miss */ | |
51 | #define PM_L2_ST_MISS 0x17082 | |
52 | /* Total HW L3 prefetches(Load+store) */ | |
53 | #define PM_L3_PREF_ALL 0x4e052 | |
54 | /* Data PTEG reload */ | |
55 | #define PM_DTLB_MISS 0x300fc | |
56 | /* ITLB Reloaded */ | |
57 | #define PM_ITLB_MISS 0x400fc | |
58 | ||
e05b9b9e ME |
59 | |
60 | /* | |
61 | * Raw event encoding for POWER8: | |
62 | * | |
63 | * 60 56 52 48 44 40 36 32 | |
64 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
4df48999 ME |
65 | * | [ thresh_cmp ] [ thresh_ctl ] |
66 | * | | | |
67 | * *- EBB (Linux) thresh start/stop OR FAB match -* | |
e05b9b9e ME |
68 | * |
69 | * 28 24 20 16 12 8 4 0 | |
70 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
71 | * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ] | |
72 | * | | | | | | |
73 | * | | | | *- mark | |
74 | * | | *- L1/L2/L3 cache_sel | | |
75 | * | | | | |
76 | * | *- sampling mode for marked events *- combine | |
77 | * | | |
78 | * *- thresh_sel | |
79 | * | |
80 | * Below uses IBM bit numbering. | |
81 | * | |
82 | * MMCR1[x:y] = unit (PMCxUNIT) | |
83 | * MMCR1[x] = combine (PMCxCOMB) | |
84 | * | |
85 | * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 | |
86 | * # PM_MRK_FAB_RSP_MATCH | |
87 | * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) | |
88 | * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 | |
89 | * # PM_MRK_FAB_RSP_MATCH_CYC | |
90 | * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) | |
91 | * else | |
92 | * MMCRA[48:55] = thresh_ctl (THRESH START/END) | |
93 | * | |
94 | * if thresh_sel: | |
95 | * MMCRA[45:47] = thresh_sel | |
96 | * | |
97 | * if thresh_cmp: | |
98 | * MMCRA[22:24] = thresh_cmp[0:2] | |
99 | * MMCRA[25:31] = thresh_cmp[3:9] | |
100 | * | |
101 | * if unit == 6 or unit == 7 | |
102 | * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) | |
103 | * else if unit == 8 or unit == 9: | |
104 | * if cache_sel[0] == 0: # L3 bank | |
105 | * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) | |
106 | * else if cache_sel[0] == 1: | |
107 | * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) | |
108 | * else if cache_sel[1]: # L1 event | |
109 | * MMCR1[16] = cache_sel[2] | |
110 | * MMCR1[17] = cache_sel[3] | |
111 | * | |
112 | * if mark: | |
113 | * MMCRA[63] = 1 (SAMPLE_ENABLE) | |
114 | * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) | |
115 | * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) | |
116 | * | |
117 | */ | |
118 | ||
4df48999 | 119 | #define EVENT_EBB_MASK 1ull |
e05b9b9e ME |
120 | #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */ |
121 | #define EVENT_THR_CMP_MASK 0x3ff | |
122 | #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */ | |
123 | #define EVENT_THR_CTL_MASK 0xffull | |
124 | #define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */ | |
125 | #define EVENT_THR_SEL_MASK 0x7 | |
126 | #define EVENT_THRESH_SHIFT 29 /* All threshold bits */ | |
127 | #define EVENT_THRESH_MASK 0x1fffffull | |
128 | #define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */ | |
129 | #define EVENT_SAMPLE_MASK 0x1f | |
130 | #define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */ | |
131 | #define EVENT_CACHE_SEL_MASK 0xf | |
132 | #define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT) | |
133 | #define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */ | |
134 | #define EVENT_PMC_MASK 0xf | |
135 | #define EVENT_UNIT_SHIFT 12 /* Unit */ | |
136 | #define EVENT_UNIT_MASK 0xf | |
137 | #define EVENT_COMBINE_SHIFT 11 /* Combine bit */ | |
138 | #define EVENT_COMBINE_MASK 0x1 | |
139 | #define EVENT_MARKED_SHIFT 8 /* Marked bit */ | |
140 | #define EVENT_MARKED_MASK 0x1 | |
141 | #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | |
142 | #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */ | |
143 | ||
d8bec4c9 ME |
144 | #define EVENT_VALID_MASK \ |
145 | ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ | |
146 | (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ | |
147 | (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \ | |
148 | (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \ | |
149 | (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ | |
150 | (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ | |
151 | (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ | |
8d7c55d0 | 152 | (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \ |
d8bec4c9 ME |
153 | EVENT_PSEL_MASK) |
154 | ||
b1113557 AK |
155 | /* MMCRA IFM bits - POWER8 */ |
156 | #define POWER8_MMCRA_IFM1 0x0000000040000000UL | |
157 | #define POWER8_MMCRA_IFM2 0x0000000080000000UL | |
158 | #define POWER8_MMCRA_IFM3 0x00000000C0000000UL | |
159 | ||
160 | #define ONLY_PLM \ | |
161 | (PERF_SAMPLE_BRANCH_USER |\ | |
162 | PERF_SAMPLE_BRANCH_KERNEL |\ | |
163 | PERF_SAMPLE_BRANCH_HV) | |
164 | ||
e05b9b9e ME |
165 | /* |
166 | * Layout of constraint bits: | |
167 | * | |
168 | * 60 56 52 48 44 40 36 32 | |
169 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
170 | * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ] | |
171 | * | | |
172 | * thresh_sel -* | |
173 | * | |
174 | * 28 24 20 16 12 8 4 0 | |
175 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
4df48999 ME |
176 | * | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] |
177 | * EBB -* | | | |
178 | * | | Count of events for each PMC. | |
179 | * L1 I/D qualifier -* | p1, p2, p3, p4, p5, p6. | |
e05b9b9e ME |
180 | * nc - number of counters -* |
181 | * | |
182 | * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints | |
183 | * we want the low bit of each field to be added to any existing value. | |
184 | * | |
185 | * Everything else is a value field. | |
186 | */ | |
187 | ||
188 | #define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56) | |
189 | #define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK) | |
190 | ||
191 | /* We just throw all the threshold bits into the constraint */ | |
192 | #define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32) | |
193 | #define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK) | |
194 | ||
4df48999 ME |
195 | #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) |
196 | #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) | |
197 | ||
e05b9b9e ME |
198 | #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22) |
199 | #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3) | |
200 | ||
201 | #define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16) | |
202 | #define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK) | |
203 | ||
204 | /* | |
205 | * For NC we are counting up to 4 events. This requires three bits, and we need | |
206 | * the fifth event to overflow and set the 4th bit. To achieve that we bias the | |
207 | * fields by 3 in test_adder. | |
208 | */ | |
209 | #define CNST_NC_SHIFT 12 | |
210 | #define CNST_NC_VAL (1 << CNST_NC_SHIFT) | |
211 | #define CNST_NC_MASK (8 << CNST_NC_SHIFT) | |
212 | #define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT) | |
213 | ||
214 | /* | |
215 | * For the per-PMC fields we have two bits. The low bit is added, so if two | |
216 | * events ask for the same PMC the sum will overflow, setting the high bit, | |
217 | * indicating an error. So our mask sets the high bit. | |
218 | */ | |
219 | #define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2) | |
220 | #define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc)) | |
221 | #define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc)) | |
222 | ||
223 | /* Our add_fields is defined as: */ | |
224 | #define POWER8_ADD_FIELDS \ | |
225 | CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \ | |
226 | CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL | |
227 | ||
228 | ||
229 | /* Bits in MMCR1 for POWER8 */ | |
230 | #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) | |
231 | #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) | |
232 | #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) | |
a53b27b3 | 233 | #define MMCR1_FAB_SHIFT 36 |
e05b9b9e ME |
234 | #define MMCR1_DC_QUAL_SHIFT 47 |
235 | #define MMCR1_IC_QUAL_SHIFT 46 | |
236 | ||
237 | /* Bits in MMCRA for POWER8 */ | |
238 | #define MMCRA_SAMP_MODE_SHIFT 1 | |
239 | #define MMCRA_SAMP_ELIG_SHIFT 4 | |
240 | #define MMCRA_THR_CTL_SHIFT 8 | |
241 | #define MMCRA_THR_SEL_SHIFT 16 | |
242 | #define MMCRA_THR_CMP_SHIFT 32 | |
243 | #define MMCRA_SDAR_MODE_TLB (1ull << 42) | |
244 | ||
245 | ||
246 | static inline bool event_is_fab_match(u64 event) | |
247 | { | |
248 | /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */ | |
249 | event &= 0xff0fe; | |
250 | ||
251 | /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */ | |
252 | return (event == 0x30056 || event == 0x4f052); | |
253 | } | |
254 | ||
255 | static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) | |
256 | { | |
4df48999 | 257 | unsigned int unit, pmc, cache, ebb; |
e05b9b9e ME |
258 | unsigned long mask, value; |
259 | ||
260 | mask = value = 0; | |
261 | ||
d8bec4c9 ME |
262 | if (event & ~EVENT_VALID_MASK) |
263 | return -1; | |
264 | ||
4df48999 ME |
265 | pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; |
266 | unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | |
267 | cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; | |
8d7c55d0 | 268 | ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; |
4df48999 ME |
269 | |
270 | /* Clear the EBB bit in the event, so event checks work below */ | |
8d7c55d0 | 271 | event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT); |
e05b9b9e ME |
272 | |
273 | if (pmc) { | |
274 | if (pmc > 6) | |
275 | return -1; | |
276 | ||
277 | mask |= CNST_PMC_MASK(pmc); | |
278 | value |= CNST_PMC_VAL(pmc); | |
279 | ||
280 | if (pmc >= 5 && event != 0x500fa && event != 0x600f4) | |
281 | return -1; | |
282 | } | |
283 | ||
284 | if (pmc <= 4) { | |
285 | /* | |
286 | * Add to number of counters in use. Note this includes events with | |
287 | * a PMC of 0 - they still need a PMC, it's just assigned later. | |
288 | * Don't count events on PMC 5 & 6, there is only one valid event | |
289 | * on each of those counters, and they are handled above. | |
290 | */ | |
291 | mask |= CNST_NC_MASK; | |
292 | value |= CNST_NC_VAL; | |
293 | } | |
294 | ||
295 | if (unit >= 6 && unit <= 9) { | |
296 | /* | |
297 | * L2/L3 events contain a cache selector field, which is | |
298 | * supposed to be programmed into MMCRC. However MMCRC is only | |
299 | * HV writable, and there is no API for guest kernels to modify | |
300 | * it. The solution is for the hypervisor to initialise the | |
301 | * field to zeroes, and for us to only ever allow events that | |
302 | * have a cache selector of zero. | |
303 | */ | |
304 | if (cache) | |
305 | return -1; | |
306 | ||
307 | } else if (event & EVENT_IS_L1) { | |
308 | mask |= CNST_L1_QUAL_MASK; | |
309 | value |= CNST_L1_QUAL_VAL(cache); | |
310 | } | |
311 | ||
312 | if (event & EVENT_IS_MARKED) { | |
313 | mask |= CNST_SAMPLE_MASK; | |
314 | value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); | |
315 | } | |
316 | ||
317 | /* | |
318 | * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | |
319 | * the threshold control bits are used for the match value. | |
320 | */ | |
321 | if (event_is_fab_match(event)) { | |
322 | mask |= CNST_FAB_MATCH_MASK; | |
323 | value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); | |
324 | } else { | |
325 | /* | |
326 | * Check the mantissa upper two bits are not zero, unless the | |
327 | * exponent is also zero. See the THRESH_CMP_MANTISSA doc. | |
328 | */ | |
329 | unsigned int cmp, exp; | |
330 | ||
331 | cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | |
332 | exp = cmp >> 7; | |
333 | ||
334 | if (exp && (cmp & 0x60) == 0) | |
335 | return -1; | |
336 | ||
337 | mask |= CNST_THRESH_MASK; | |
338 | value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); | |
339 | } | |
340 | ||
4df48999 ME |
341 | if (!pmc && ebb) |
342 | /* EBB events must specify the PMC */ | |
343 | return -1; | |
344 | ||
345 | /* | |
346 | * All events must agree on EBB, either all request it or none. | |
347 | * EBB events are pinned & exclusive, so this should never actually | |
348 | * hit, but we leave it as a fallback in case. | |
349 | */ | |
350 | mask |= CNST_EBB_VAL(ebb); | |
351 | value |= CNST_EBB_MASK; | |
352 | ||
e05b9b9e ME |
353 | *maskp = mask; |
354 | *valp = value; | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
359 | static int power8_compute_mmcr(u64 event[], int n_ev, | |
360 | unsigned int hwc[], unsigned long mmcr[]) | |
361 | { | |
362 | unsigned long mmcra, mmcr1, unit, combine, psel, cache, val; | |
363 | unsigned int pmc, pmc_inuse; | |
364 | int i; | |
365 | ||
366 | pmc_inuse = 0; | |
367 | ||
368 | /* First pass to count resource use */ | |
369 | for (i = 0; i < n_ev; ++i) { | |
370 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | |
371 | if (pmc) | |
372 | pmc_inuse |= 1 << pmc; | |
373 | } | |
374 | ||
375 | /* In continous sampling mode, update SDAR on TLB miss */ | |
376 | mmcra = MMCRA_SDAR_MODE_TLB; | |
377 | mmcr1 = 0; | |
378 | ||
379 | /* Second pass: assign PMCs, set all MMCR1 fields */ | |
380 | for (i = 0; i < n_ev; ++i) { | |
381 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | |
382 | unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | |
383 | combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK; | |
384 | psel = event[i] & EVENT_PSEL_MASK; | |
385 | ||
386 | if (!pmc) { | |
387 | for (pmc = 1; pmc <= 4; ++pmc) { | |
388 | if (!(pmc_inuse & (1 << pmc))) | |
389 | break; | |
390 | } | |
391 | ||
392 | pmc_inuse |= 1 << pmc; | |
393 | } | |
394 | ||
395 | if (pmc <= 4) { | |
396 | mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc); | |
397 | mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc); | |
398 | mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc); | |
399 | } | |
400 | ||
401 | if (event[i] & EVENT_IS_L1) { | |
402 | cache = event[i] >> EVENT_CACHE_SEL_SHIFT; | |
403 | mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; | |
404 | cache >>= 1; | |
405 | mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; | |
406 | } | |
407 | ||
408 | if (event[i] & EVENT_IS_MARKED) { | |
409 | mmcra |= MMCRA_SAMPLE_ENABLE; | |
410 | ||
411 | val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; | |
412 | if (val) { | |
413 | mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT; | |
414 | mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT; | |
415 | } | |
416 | } | |
417 | ||
418 | /* | |
419 | * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | |
420 | * the threshold bits are used for the match value. | |
421 | */ | |
422 | if (event_is_fab_match(event[i])) { | |
a53b27b3 ME |
423 | mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & |
424 | EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT; | |
e05b9b9e ME |
425 | } else { |
426 | val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; | |
427 | mmcra |= val << MMCRA_THR_CTL_SHIFT; | |
428 | val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; | |
429 | mmcra |= val << MMCRA_THR_SEL_SHIFT; | |
430 | val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | |
431 | mmcra |= val << MMCRA_THR_CMP_SHIFT; | |
432 | } | |
433 | ||
434 | hwc[i] = pmc - 1; | |
435 | } | |
436 | ||
437 | /* Return MMCRx values */ | |
438 | mmcr[0] = 0; | |
439 | ||
440 | /* pmc_inuse is 1-based */ | |
441 | if (pmc_inuse & 2) | |
442 | mmcr[0] = MMCR0_PMC1CE; | |
443 | ||
444 | if (pmc_inuse & 0x7c) | |
445 | mmcr[0] |= MMCR0_PMCjCE; | |
446 | ||
7a7a41f9 ME |
447 | /* If we're not using PMC 5 or 6, freeze them */ |
448 | if (!(pmc_inuse & 0x60)) | |
449 | mmcr[0] |= MMCR0_FC56; | |
450 | ||
e05b9b9e ME |
451 | mmcr[1] = mmcr1; |
452 | mmcr[2] = mmcra; | |
453 | ||
454 | return 0; | |
455 | } | |
456 | ||
457 | #define MAX_ALT 2 | |
458 | ||
459 | /* Table of alternatives, sorted by column 0 */ | |
460 | static const unsigned int event_alternatives[][MAX_ALT] = { | |
461 | { 0x10134, 0x301e2 }, /* PM_MRK_ST_CMPL */ | |
462 | { 0x10138, 0x40138 }, /* PM_BR_MRK_2PATH */ | |
463 | { 0x18082, 0x3e05e }, /* PM_L3_CO_MEPF */ | |
464 | { 0x1d14e, 0x401e8 }, /* PM_MRK_DATA_FROM_L2MISS */ | |
465 | { 0x1e054, 0x4000a }, /* PM_CMPLU_STALL */ | |
466 | { 0x20036, 0x40036 }, /* PM_BR_2PATH */ | |
467 | { 0x200f2, 0x300f2 }, /* PM_INST_DISP */ | |
468 | { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */ | |
469 | { 0x2013c, 0x3012e }, /* PM_MRK_FILT_MATCH */ | |
470 | { 0x3e054, 0x400f0 }, /* PM_LD_MISS_L1 */ | |
471 | { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */ | |
472 | }; | |
473 | ||
474 | /* | |
475 | * Scan the alternatives table for a match and return the | |
476 | * index into the alternatives table if found, else -1. | |
477 | */ | |
478 | static int find_alternative(u64 event) | |
479 | { | |
480 | int i, j; | |
481 | ||
482 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | |
483 | if (event < event_alternatives[i][0]) | |
484 | break; | |
485 | ||
486 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) | |
487 | if (event == event_alternatives[i][j]) | |
488 | return i; | |
489 | } | |
490 | ||
491 | return -1; | |
492 | } | |
493 | ||
494 | static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | |
495 | { | |
496 | int i, j, num_alt = 0; | |
497 | u64 alt_event; | |
498 | ||
499 | alt[num_alt++] = event; | |
500 | ||
501 | i = find_alternative(event); | |
502 | if (i >= 0) { | |
503 | /* Filter out the original event, it's already in alt[0] */ | |
504 | for (j = 0; j < MAX_ALT; ++j) { | |
505 | alt_event = event_alternatives[i][j]; | |
506 | if (alt_event && alt_event != event) | |
507 | alt[num_alt++] = alt_event; | |
508 | } | |
509 | } | |
510 | ||
511 | if (flags & PPMU_ONLY_COUNT_RUN) { | |
512 | /* | |
513 | * We're only counting in RUN state, so PM_CYC is equivalent to | |
514 | * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL. | |
515 | */ | |
516 | j = num_alt; | |
517 | for (i = 0; i < num_alt; ++i) { | |
518 | switch (alt[i]) { | |
519 | case 0x1e: /* PM_CYC */ | |
520 | alt[j++] = 0x600f4; /* PM_RUN_CYC */ | |
521 | break; | |
522 | case 0x600f4: /* PM_RUN_CYC */ | |
523 | alt[j++] = 0x1e; | |
524 | break; | |
525 | case 0x2: /* PM_PPC_CMPL */ | |
526 | alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */ | |
527 | break; | |
528 | case 0x500fa: /* PM_RUN_INST_CMPL */ | |
529 | alt[j++] = 0x2; /* PM_PPC_CMPL */ | |
530 | break; | |
531 | } | |
532 | } | |
533 | num_alt = j; | |
534 | } | |
535 | ||
536 | return num_alt; | |
537 | } | |
538 | ||
539 | static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | |
540 | { | |
541 | if (pmc <= 3) | |
542 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); | |
543 | } | |
544 | ||
545 | PMU_FORMAT_ATTR(event, "config:0-49"); | |
546 | PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); | |
547 | PMU_FORMAT_ATTR(mark, "config:8"); | |
548 | PMU_FORMAT_ATTR(combine, "config:11"); | |
549 | PMU_FORMAT_ATTR(unit, "config:12-15"); | |
550 | PMU_FORMAT_ATTR(pmc, "config:16-19"); | |
551 | PMU_FORMAT_ATTR(cache_sel, "config:20-23"); | |
552 | PMU_FORMAT_ATTR(sample_mode, "config:24-28"); | |
553 | PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); | |
554 | PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); | |
555 | PMU_FORMAT_ATTR(thresh_start, "config:36-39"); | |
556 | PMU_FORMAT_ATTR(thresh_cmp, "config:40-49"); | |
557 | ||
558 | static struct attribute *power8_pmu_format_attr[] = { | |
559 | &format_attr_event.attr, | |
560 | &format_attr_pmcxsel.attr, | |
561 | &format_attr_mark.attr, | |
562 | &format_attr_combine.attr, | |
563 | &format_attr_unit.attr, | |
564 | &format_attr_pmc.attr, | |
565 | &format_attr_cache_sel.attr, | |
566 | &format_attr_sample_mode.attr, | |
567 | &format_attr_thresh_sel.attr, | |
568 | &format_attr_thresh_stop.attr, | |
569 | &format_attr_thresh_start.attr, | |
570 | &format_attr_thresh_cmp.attr, | |
571 | NULL, | |
572 | }; | |
573 | ||
574 | struct attribute_group power8_pmu_format_group = { | |
575 | .name = "format", | |
576 | .attrs = power8_pmu_format_attr, | |
577 | }; | |
578 | ||
579 | static const struct attribute_group *power8_pmu_attr_groups[] = { | |
580 | &power8_pmu_format_group, | |
581 | NULL, | |
582 | }; | |
583 | ||
584 | static int power8_generic_events[] = { | |
585 | [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, | |
586 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC, | |
587 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL, | |
588 | [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, | |
589 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, | |
590 | [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, | |
2fdd313f ME |
591 | [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, |
592 | [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, | |
e05b9b9e ME |
593 | }; |
594 | ||
b1113557 AK |
595 | static u64 power8_bhrb_filter_map(u64 branch_sample_type) |
596 | { | |
597 | u64 pmu_bhrb_filter = 0; | |
b1113557 | 598 | |
7689bdca | 599 | /* BHRB and regular PMU events share the same privilege state |
b1113557 | 600 | * filter configuration. BHRB is always recorded along with a |
7689bdca AK |
601 | * regular PMU event. As the privilege state filter is handled |
602 | * in the basic PMC configuration of the accompanying regular | |
603 | * PMU event, we ignore any separate BHRB specific request. | |
b1113557 | 604 | */ |
b1113557 AK |
605 | |
606 | /* No branch filter requested */ | |
607 | if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) | |
608 | return pmu_bhrb_filter; | |
609 | ||
610 | /* Invalid branch filter options - HW does not support */ | |
611 | if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN) | |
612 | return -1; | |
613 | ||
614 | if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) | |
615 | return -1; | |
616 | ||
617 | if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) { | |
618 | pmu_bhrb_filter |= POWER8_MMCRA_IFM1; | |
619 | return pmu_bhrb_filter; | |
620 | } | |
621 | ||
622 | /* Every thing else is unsupported */ | |
623 | return -1; | |
624 | } | |
625 | ||
626 | static void power8_config_bhrb(u64 pmu_bhrb_filter) | |
627 | { | |
628 | /* Enable BHRB filter in PMU */ | |
629 | mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); | |
630 | } | |
631 | ||
2fdd313f ME |
632 | #define C(x) PERF_COUNT_HW_CACHE_##x |
633 | ||
634 | /* | |
635 | * Table of generalized cache-related events. | |
636 | * 0 means not supported, -1 means nonsensical, other values | |
637 | * are event codes. | |
638 | */ | |
639 | static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |
640 | [ C(L1D) ] = { | |
641 | [ C(OP_READ) ] = { | |
642 | [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, | |
643 | [ C(RESULT_MISS) ] = PM_LD_MISS_L1, | |
644 | }, | |
645 | [ C(OP_WRITE) ] = { | |
646 | [ C(RESULT_ACCESS) ] = 0, | |
647 | [ C(RESULT_MISS) ] = PM_ST_MISS_L1, | |
648 | }, | |
649 | [ C(OP_PREFETCH) ] = { | |
650 | [ C(RESULT_ACCESS) ] = PM_L1_PREF, | |
651 | [ C(RESULT_MISS) ] = 0, | |
652 | }, | |
653 | }, | |
654 | [ C(L1I) ] = { | |
655 | [ C(OP_READ) ] = { | |
656 | [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, | |
657 | [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, | |
658 | }, | |
659 | [ C(OP_WRITE) ] = { | |
660 | [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, | |
661 | [ C(RESULT_MISS) ] = -1, | |
662 | }, | |
663 | [ C(OP_PREFETCH) ] = { | |
664 | [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, | |
665 | [ C(RESULT_MISS) ] = 0, | |
666 | }, | |
667 | }, | |
668 | [ C(LL) ] = { | |
669 | [ C(OP_READ) ] = { | |
670 | [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, | |
671 | [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, | |
672 | }, | |
673 | [ C(OP_WRITE) ] = { | |
674 | [ C(RESULT_ACCESS) ] = PM_L2_ST, | |
675 | [ C(RESULT_MISS) ] = PM_L2_ST_MISS, | |
676 | }, | |
677 | [ C(OP_PREFETCH) ] = { | |
678 | [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, | |
679 | [ C(RESULT_MISS) ] = 0, | |
680 | }, | |
681 | }, | |
682 | [ C(DTLB) ] = { | |
683 | [ C(OP_READ) ] = { | |
684 | [ C(RESULT_ACCESS) ] = 0, | |
685 | [ C(RESULT_MISS) ] = PM_DTLB_MISS, | |
686 | }, | |
687 | [ C(OP_WRITE) ] = { | |
688 | [ C(RESULT_ACCESS) ] = -1, | |
689 | [ C(RESULT_MISS) ] = -1, | |
690 | }, | |
691 | [ C(OP_PREFETCH) ] = { | |
692 | [ C(RESULT_ACCESS) ] = -1, | |
693 | [ C(RESULT_MISS) ] = -1, | |
694 | }, | |
695 | }, | |
696 | [ C(ITLB) ] = { | |
697 | [ C(OP_READ) ] = { | |
698 | [ C(RESULT_ACCESS) ] = 0, | |
699 | [ C(RESULT_MISS) ] = PM_ITLB_MISS, | |
700 | }, | |
701 | [ C(OP_WRITE) ] = { | |
702 | [ C(RESULT_ACCESS) ] = -1, | |
703 | [ C(RESULT_MISS) ] = -1, | |
704 | }, | |
705 | [ C(OP_PREFETCH) ] = { | |
706 | [ C(RESULT_ACCESS) ] = -1, | |
707 | [ C(RESULT_MISS) ] = -1, | |
708 | }, | |
709 | }, | |
710 | [ C(BPU) ] = { | |
711 | [ C(OP_READ) ] = { | |
712 | [ C(RESULT_ACCESS) ] = PM_BRU_FIN, | |
713 | [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, | |
714 | }, | |
715 | [ C(OP_WRITE) ] = { | |
716 | [ C(RESULT_ACCESS) ] = -1, | |
717 | [ C(RESULT_MISS) ] = -1, | |
718 | }, | |
719 | [ C(OP_PREFETCH) ] = { | |
720 | [ C(RESULT_ACCESS) ] = -1, | |
721 | [ C(RESULT_MISS) ] = -1, | |
722 | }, | |
723 | }, | |
724 | [ C(NODE) ] = { | |
725 | [ C(OP_READ) ] = { | |
726 | [ C(RESULT_ACCESS) ] = -1, | |
727 | [ C(RESULT_MISS) ] = -1, | |
728 | }, | |
729 | [ C(OP_WRITE) ] = { | |
730 | [ C(RESULT_ACCESS) ] = -1, | |
731 | [ C(RESULT_MISS) ] = -1, | |
732 | }, | |
733 | [ C(OP_PREFETCH) ] = { | |
734 | [ C(RESULT_ACCESS) ] = -1, | |
735 | [ C(RESULT_MISS) ] = -1, | |
736 | }, | |
737 | }, | |
738 | }; | |
739 | ||
740 | #undef C | |
741 | ||
e05b9b9e ME |
742 | static struct power_pmu power8_pmu = { |
743 | .name = "POWER8", | |
744 | .n_counter = 6, | |
745 | .max_alternatives = MAX_ALT + 1, | |
746 | .add_fields = POWER8_ADD_FIELDS, | |
747 | .test_adder = POWER8_TEST_ADDER, | |
748 | .compute_mmcr = power8_compute_mmcr, | |
b1113557 AK |
749 | .config_bhrb = power8_config_bhrb, |
750 | .bhrb_filter_map = power8_bhrb_filter_map, | |
e05b9b9e ME |
751 | .get_constraint = power8_get_constraint, |
752 | .get_alternatives = power8_get_alternatives, | |
753 | .disable_pmc = power8_disable_pmc, | |
4df48999 | 754 | .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, |
e05b9b9e ME |
755 | .n_generic = ARRAY_SIZE(power8_generic_events), |
756 | .generic_events = power8_generic_events, | |
2fdd313f | 757 | .cache_events = &power8_cache_events, |
e05b9b9e | 758 | .attr_groups = power8_pmu_attr_groups, |
b1113557 | 759 | .bhrb_nr = 32, |
e05b9b9e ME |
760 | }; |
761 | ||
762 | static int __init init_power8_pmu(void) | |
763 | { | |
5d7ead00 ME |
764 | int rc; |
765 | ||
e05b9b9e ME |
766 | if (!cur_cpu_spec->oprofile_cpu_type || |
767 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8")) | |
768 | return -ENODEV; | |
769 | ||
5d7ead00 ME |
770 | rc = register_power_pmu(&power8_pmu); |
771 | if (rc) | |
772 | return rc; | |
773 | ||
774 | /* Tell userspace that EBB is supported */ | |
775 | cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; | |
776 | ||
777 | return 0; | |
e05b9b9e ME |
778 | } |
779 | early_initcall(init_power8_pmu); |