2 * Performance counter support for POWER9 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or later version.
14 #define pr_fmt(fmt) "power9-pmu: " fmt
16 #include "isa207-common.h"
19 * Raw event encoding for Power9:
21 * 60 56 52 48 44 40 36 32
22 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
23 * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
25 * | | *- IFM (Linux) | thresh start/stop -*
26 * | *- BHRB (Linux) *sm
29 * 28 24 20 16 12 8 4 0
30 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
31 * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
34 * | | *- L1/L2/L3 cache_sel |
36 * | *- sampling mode for marked events *- combine
40 * Below uses IBM bit numbering.
42 * MMCR1[x:y] = unit (PMCxUNIT)
43 * MMCR1[24] = pmc1combine[0]
44 * MMCR1[25] = pmc1combine[1]
45 * MMCR1[26] = pmc2combine[0]
46 * MMCR1[27] = pmc2combine[1]
47 * MMCR1[28] = pmc3combine[0]
48 * MMCR1[29] = pmc3combine[1]
49 * MMCR1[30] = pmc4combine[0]
50 * MMCR1[31] = pmc4combine[1]
52 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
53 * MMCR1[20:27] = thresh_ctl
54 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
55 * MMCR1[20:27] = thresh_ctl
57 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
60 * MMCRA[45:47] = thresh_sel
63 * MMCRA[9:11] = thresh_cmp[0:2]
64 * MMCRA[12:18] = thresh_cmp[3:9]
66 * if unit == 6 or unit == 7
67 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
68 * else if unit == 8 or unit == 9:
69 * if cache_sel[0] == 0: # L3 bank
70 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
71 * else if cache_sel[0] == 1:
72 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
73 * else if cache_sel[1]: # L1 event
74 * MMCR1[16] = cache_sel[2]
75 * MMCR1[17] = cache_sel[3]
78 * MMCRA[63] = 1 (SAMPLE_ENABLE)
79 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
80 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
85 * MMCRA[SDAR_MODE] = sm
89 * Some power9 event codes.
91 #define EVENT(_name, _code) _name = _code,
94 #include "power9-events-list.h"
99 /* MMCRA IFM bits - POWER9 */
100 #define POWER9_MMCRA_IFM1 0x0000000040000000UL
101 #define POWER9_MMCRA_IFM2 0x0000000080000000UL
102 #define POWER9_MMCRA_IFM3 0x00000000C0000000UL
104 /* PowerISA v2.07 format attribute structure*/
105 extern struct attribute_group isa207_pmu_format_group
;
107 GENERIC_EVENT_ATTR(cpu
-cycles
, PM_CYC
);
108 GENERIC_EVENT_ATTR(stalled
-cycles
-frontend
, PM_ICT_NOSLOT_CYC
);
109 GENERIC_EVENT_ATTR(stalled
-cycles
-backend
, PM_CMPLU_STALL
);
110 GENERIC_EVENT_ATTR(instructions
, PM_INST_CMPL
);
111 GENERIC_EVENT_ATTR(branch
-instructions
, PM_BRU_CMPL
);
112 GENERIC_EVENT_ATTR(branch
-misses
, PM_BR_MPRED_CMPL
);
113 GENERIC_EVENT_ATTR(cache
-references
, PM_LD_REF_L1
);
114 GENERIC_EVENT_ATTR(cache
-misses
, PM_LD_MISS_L1_FIN
);
116 CACHE_EVENT_ATTR(L1
-dcache
-load
-misses
, PM_LD_MISS_L1_FIN
);
117 CACHE_EVENT_ATTR(L1
-dcache
-loads
, PM_LD_REF_L1
);
118 CACHE_EVENT_ATTR(L1
-dcache
-prefetches
, PM_L1_PREF
);
119 CACHE_EVENT_ATTR(L1
-dcache
-store
-misses
, PM_ST_MISS_L1
);
120 CACHE_EVENT_ATTR(L1
-icache
-load
-misses
, PM_L1_ICACHE_MISS
);
121 CACHE_EVENT_ATTR(L1
-icache
-loads
, PM_INST_FROM_L1
);
122 CACHE_EVENT_ATTR(L1
-icache
-prefetches
, PM_IC_PREF_WRITE
);
123 CACHE_EVENT_ATTR(LLC
-load
-misses
, PM_DATA_FROM_L3MISS
);
124 CACHE_EVENT_ATTR(LLC
-loads
, PM_DATA_FROM_L3
);
125 CACHE_EVENT_ATTR(LLC
-prefetches
, PM_L3_PREF_ALL
);
126 CACHE_EVENT_ATTR(LLC
-store
-misses
, PM_L2_ST_MISS
);
127 CACHE_EVENT_ATTR(LLC
-stores
, PM_L2_ST
);
128 CACHE_EVENT_ATTR(branch
-load
-misses
, PM_BR_MPRED_CMPL
);
129 CACHE_EVENT_ATTR(branch
-loads
, PM_BRU_CMPL
);
130 CACHE_EVENT_ATTR(dTLB
-load
-misses
, PM_DTLB_MISS
);
131 CACHE_EVENT_ATTR(iTLB
-load
-misses
, PM_ITLB_MISS
);
133 static struct attribute
*power9_events_attr
[] = {
134 GENERIC_EVENT_PTR(PM_CYC
),
135 GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC
),
136 GENERIC_EVENT_PTR(PM_CMPLU_STALL
),
137 GENERIC_EVENT_PTR(PM_INST_CMPL
),
138 GENERIC_EVENT_PTR(PM_BRU_CMPL
),
139 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL
),
140 GENERIC_EVENT_PTR(PM_LD_REF_L1
),
141 GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN
),
142 CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN
),
143 CACHE_EVENT_PTR(PM_LD_REF_L1
),
144 CACHE_EVENT_PTR(PM_L1_PREF
),
145 CACHE_EVENT_PTR(PM_ST_MISS_L1
),
146 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS
),
147 CACHE_EVENT_PTR(PM_INST_FROM_L1
),
148 CACHE_EVENT_PTR(PM_IC_PREF_WRITE
),
149 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS
),
150 CACHE_EVENT_PTR(PM_DATA_FROM_L3
),
151 CACHE_EVENT_PTR(PM_L3_PREF_ALL
),
152 CACHE_EVENT_PTR(PM_L2_ST_MISS
),
153 CACHE_EVENT_PTR(PM_L2_ST
),
154 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL
),
155 CACHE_EVENT_PTR(PM_BRU_CMPL
),
156 CACHE_EVENT_PTR(PM_DTLB_MISS
),
157 CACHE_EVENT_PTR(PM_ITLB_MISS
),
161 static struct attribute_group power9_pmu_events_group
= {
163 .attrs
= power9_events_attr
,
166 static const struct attribute_group
*power9_isa207_pmu_attr_groups
[] = {
167 &isa207_pmu_format_group
,
168 &power9_pmu_events_group
,
172 PMU_FORMAT_ATTR(event
, "config:0-51");
173 PMU_FORMAT_ATTR(pmcxsel
, "config:0-7");
174 PMU_FORMAT_ATTR(mark
, "config:8");
175 PMU_FORMAT_ATTR(combine
, "config:10-11");
176 PMU_FORMAT_ATTR(unit
, "config:12-15");
177 PMU_FORMAT_ATTR(pmc
, "config:16-19");
178 PMU_FORMAT_ATTR(cache_sel
, "config:20-23");
179 PMU_FORMAT_ATTR(sample_mode
, "config:24-28");
180 PMU_FORMAT_ATTR(thresh_sel
, "config:29-31");
181 PMU_FORMAT_ATTR(thresh_stop
, "config:32-35");
182 PMU_FORMAT_ATTR(thresh_start
, "config:36-39");
183 PMU_FORMAT_ATTR(thresh_cmp
, "config:40-49");
184 PMU_FORMAT_ATTR(sdar_mode
, "config:50-51");
186 static struct attribute
*power9_pmu_format_attr
[] = {
187 &format_attr_event
.attr
,
188 &format_attr_pmcxsel
.attr
,
189 &format_attr_mark
.attr
,
190 &format_attr_combine
.attr
,
191 &format_attr_unit
.attr
,
192 &format_attr_pmc
.attr
,
193 &format_attr_cache_sel
.attr
,
194 &format_attr_sample_mode
.attr
,
195 &format_attr_thresh_sel
.attr
,
196 &format_attr_thresh_stop
.attr
,
197 &format_attr_thresh_start
.attr
,
198 &format_attr_thresh_cmp
.attr
,
199 &format_attr_sdar_mode
.attr
,
203 static struct attribute_group power9_pmu_format_group
= {
205 .attrs
= power9_pmu_format_attr
,
208 static const struct attribute_group
*power9_pmu_attr_groups
[] = {
209 &power9_pmu_format_group
,
210 &power9_pmu_events_group
,
214 static int power9_generic_events
[] = {
215 [PERF_COUNT_HW_CPU_CYCLES
] = PM_CYC
,
216 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = PM_ICT_NOSLOT_CYC
,
217 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = PM_CMPLU_STALL
,
218 [PERF_COUNT_HW_INSTRUCTIONS
] = PM_INST_CMPL
,
219 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = PM_BRU_CMPL
,
220 [PERF_COUNT_HW_BRANCH_MISSES
] = PM_BR_MPRED_CMPL
,
221 [PERF_COUNT_HW_CACHE_REFERENCES
] = PM_LD_REF_L1
,
222 [PERF_COUNT_HW_CACHE_MISSES
] = PM_LD_MISS_L1_FIN
,
225 static u64
power9_bhrb_filter_map(u64 branch_sample_type
)
227 u64 pmu_bhrb_filter
= 0;
229 /* BHRB and regular PMU events share the same privilege state
230 * filter configuration. BHRB is always recorded along with a
231 * regular PMU event. As the privilege state filter is handled
232 * in the basic PMC configuration of the accompanying regular
233 * PMU event, we ignore any separate BHRB specific request.
236 /* No branch filter requested */
237 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY
)
238 return pmu_bhrb_filter
;
240 /* Invalid branch filter options - HW does not support */
241 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_RETURN
)
244 if (branch_sample_type
& PERF_SAMPLE_BRANCH_IND_CALL
)
247 if (branch_sample_type
& PERF_SAMPLE_BRANCH_CALL
)
250 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_CALL
) {
251 pmu_bhrb_filter
|= POWER9_MMCRA_IFM1
;
252 return pmu_bhrb_filter
;
255 /* Every thing else is unsupported */
259 static void power9_config_bhrb(u64 pmu_bhrb_filter
)
261 /* Enable BHRB filter in PMU */
262 mtspr(SPRN_MMCRA
, (mfspr(SPRN_MMCRA
) | pmu_bhrb_filter
));
265 #define C(x) PERF_COUNT_HW_CACHE_##x
268 * Table of generalized cache-related events.
269 * 0 means not supported, -1 means nonsensical, other values
272 static int power9_cache_events
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
275 [ C(RESULT_ACCESS
) ] = PM_LD_REF_L1
,
276 [ C(RESULT_MISS
) ] = PM_LD_MISS_L1_FIN
,
279 [ C(RESULT_ACCESS
) ] = 0,
280 [ C(RESULT_MISS
) ] = PM_ST_MISS_L1
,
282 [ C(OP_PREFETCH
) ] = {
283 [ C(RESULT_ACCESS
) ] = PM_L1_PREF
,
284 [ C(RESULT_MISS
) ] = 0,
289 [ C(RESULT_ACCESS
) ] = PM_INST_FROM_L1
,
290 [ C(RESULT_MISS
) ] = PM_L1_ICACHE_MISS
,
293 [ C(RESULT_ACCESS
) ] = PM_L1_DEMAND_WRITE
,
294 [ C(RESULT_MISS
) ] = -1,
296 [ C(OP_PREFETCH
) ] = {
297 [ C(RESULT_ACCESS
) ] = PM_IC_PREF_WRITE
,
298 [ C(RESULT_MISS
) ] = 0,
303 [ C(RESULT_ACCESS
) ] = PM_DATA_FROM_L3
,
304 [ C(RESULT_MISS
) ] = PM_DATA_FROM_L3MISS
,
307 [ C(RESULT_ACCESS
) ] = PM_L2_ST
,
308 [ C(RESULT_MISS
) ] = PM_L2_ST_MISS
,
310 [ C(OP_PREFETCH
) ] = {
311 [ C(RESULT_ACCESS
) ] = PM_L3_PREF_ALL
,
312 [ C(RESULT_MISS
) ] = 0,
317 [ C(RESULT_ACCESS
) ] = 0,
318 [ C(RESULT_MISS
) ] = PM_DTLB_MISS
,
321 [ C(RESULT_ACCESS
) ] = -1,
322 [ C(RESULT_MISS
) ] = -1,
324 [ C(OP_PREFETCH
) ] = {
325 [ C(RESULT_ACCESS
) ] = -1,
326 [ C(RESULT_MISS
) ] = -1,
331 [ C(RESULT_ACCESS
) ] = 0,
332 [ C(RESULT_MISS
) ] = PM_ITLB_MISS
,
335 [ C(RESULT_ACCESS
) ] = -1,
336 [ C(RESULT_MISS
) ] = -1,
338 [ C(OP_PREFETCH
) ] = {
339 [ C(RESULT_ACCESS
) ] = -1,
340 [ C(RESULT_MISS
) ] = -1,
345 [ C(RESULT_ACCESS
) ] = PM_BRU_CMPL
,
346 [ C(RESULT_MISS
) ] = PM_BR_MPRED_CMPL
,
349 [ C(RESULT_ACCESS
) ] = -1,
350 [ C(RESULT_MISS
) ] = -1,
352 [ C(OP_PREFETCH
) ] = {
353 [ C(RESULT_ACCESS
) ] = -1,
354 [ C(RESULT_MISS
) ] = -1,
359 [ C(RESULT_ACCESS
) ] = -1,
360 [ C(RESULT_MISS
) ] = -1,
363 [ C(RESULT_ACCESS
) ] = -1,
364 [ C(RESULT_MISS
) ] = -1,
366 [ C(OP_PREFETCH
) ] = {
367 [ C(RESULT_ACCESS
) ] = -1,
368 [ C(RESULT_MISS
) ] = -1,
375 static struct power_pmu power9_isa207_pmu
= {
377 .n_counter
= MAX_PMU_COUNTERS
,
378 .add_fields
= ISA207_ADD_FIELDS
,
379 .test_adder
= ISA207_TEST_ADDER
,
380 .compute_mmcr
= isa207_compute_mmcr
,
381 .config_bhrb
= power9_config_bhrb
,
382 .bhrb_filter_map
= power9_bhrb_filter_map
,
383 .get_constraint
= isa207_get_constraint
,
384 .disable_pmc
= isa207_disable_pmc
,
385 .flags
= PPMU_NO_SIAR
| PPMU_ARCH_207S
,
386 .n_generic
= ARRAY_SIZE(power9_generic_events
),
387 .generic_events
= power9_generic_events
,
388 .cache_events
= &power9_cache_events
,
389 .attr_groups
= power9_isa207_pmu_attr_groups
,
393 static struct power_pmu power9_pmu
= {
395 .n_counter
= MAX_PMU_COUNTERS
,
396 .add_fields
= ISA207_ADD_FIELDS
,
397 .test_adder
= P9_DD1_TEST_ADDER
,
398 .compute_mmcr
= isa207_compute_mmcr
,
399 .config_bhrb
= power9_config_bhrb
,
400 .bhrb_filter_map
= power9_bhrb_filter_map
,
401 .get_constraint
= isa207_get_constraint
,
402 .disable_pmc
= isa207_disable_pmc
,
403 .flags
= PPMU_HAS_SIER
| PPMU_ARCH_207S
,
404 .n_generic
= ARRAY_SIZE(power9_generic_events
),
405 .generic_events
= power9_generic_events
,
406 .cache_events
= &power9_cache_events
,
407 .attr_groups
= power9_pmu_attr_groups
,
411 static int __init
init_power9_pmu(void)
415 /* Comes from cpu_specs[] */
416 if (!cur_cpu_spec
->oprofile_cpu_type
||
417 strcmp(cur_cpu_spec
->oprofile_cpu_type
, "ppc64/power9"))
420 if (cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
421 rc
= register_power_pmu(&power9_isa207_pmu
);
423 rc
= register_power_pmu(&power9_pmu
);
429 /* Tell userspace that EBB is supported */
430 cur_cpu_spec
->cpu_user_features2
|= PPC_FEATURE2_EBB
;
434 early_initcall(init_power9_pmu
);