]>
Commit | Line | Data |
---|---|---|
3de6be7a RM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // CCI Cache Coherent Interconnect PMU driver | |
3 | // Copyright (C) 2013-2018 Arm Ltd. | |
4 | // Author: Punit Agrawal <punit.agrawal@arm.com>, Suzuki Poulose <suzuki.poulose@arm.com> | |
5 | ||
6 | #include <linux/arm-cci.h> | |
7 | #include <linux/io.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/of_address.h> | |
11 | #include <linux/of_irq.h> | |
12 | #include <linux/of_platform.h> | |
13 | #include <linux/perf_event.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/spinlock.h> | |
17 | ||
18 | extern void __iomem *const cci_ctrl_base; | |
19 | ||
20 | #define DRIVER_NAME "ARM-CCI PMU" | |
21 | ||
22 | #define CCI_PMCR 0x0100 | |
23 | #define CCI_PID2 0x0fe8 | |
24 | ||
25 | #define CCI_PMCR_CEN 0x00000001 | |
26 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | |
27 | #define CCI_PMCR_NCNT_SHIFT 11 | |
28 | ||
29 | #define CCI_PID2_REV_MASK 0xf0 | |
30 | #define CCI_PID2_REV_SHIFT 4 | |
31 | ||
32 | #define CCI_PMU_EVT_SEL 0x000 | |
33 | #define CCI_PMU_CNTR 0x004 | |
34 | #define CCI_PMU_CNTR_CTRL 0x008 | |
35 | #define CCI_PMU_OVRFLW 0x00c | |
36 | ||
37 | #define CCI_PMU_OVRFLW_FLAG 1 | |
38 | ||
39 | #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) | |
40 | #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) | |
41 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) | |
42 | #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) | |
43 | ||
44 | #define CCI_PMU_MAX_HW_CNTRS(model) \ | |
45 | ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) | |
46 | ||
47 | /* Types of interfaces that can generate events */ | |
48 | enum { | |
49 | CCI_IF_SLAVE, | |
50 | CCI_IF_MASTER, | |
51 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
52 | CCI_IF_GLOBAL, | |
53 | #endif | |
54 | CCI_IF_MAX, | |
55 | }; | |
56 | ||
57 | struct event_range { | |
58 | u32 min; | |
59 | u32 max; | |
60 | }; | |
61 | ||
62 | struct cci_pmu_hw_events { | |
63 | struct perf_event **events; | |
64 | unsigned long *used_mask; | |
65 | raw_spinlock_t pmu_lock; | |
66 | }; | |
67 | ||
68 | struct cci_pmu; | |
69 | /* | |
70 | * struct cci_pmu_model: | |
71 | * @fixed_hw_cntrs - Number of fixed event counters | |
72 | * @num_hw_cntrs - Maximum number of programmable event counters | |
73 | * @cntr_size - Size of an event counter mapping | |
74 | */ | |
75 | struct cci_pmu_model { | |
76 | char *name; | |
77 | u32 fixed_hw_cntrs; | |
78 | u32 num_hw_cntrs; | |
79 | u32 cntr_size; | |
80 | struct attribute **format_attrs; | |
81 | struct attribute **event_attrs; | |
82 | struct event_range event_ranges[CCI_IF_MAX]; | |
83 | int (*validate_hw_event)(struct cci_pmu *, unsigned long); | |
84 | int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); | |
85 | void (*write_counters)(struct cci_pmu *, unsigned long *); | |
86 | }; | |
87 | ||
88 | static struct cci_pmu_model cci_pmu_models[]; | |
89 | ||
90 | struct cci_pmu { | |
91 | void __iomem *base; | |
92 | struct pmu pmu; | |
03057f26 | 93 | int cpu; |
3de6be7a RM |
94 | int nr_irqs; |
95 | int *irqs; | |
96 | unsigned long active_irqs; | |
97 | const struct cci_pmu_model *model; | |
98 | struct cci_pmu_hw_events hw_events; | |
99 | struct platform_device *plat_device; | |
100 | int num_cntrs; | |
101 | atomic_t active_events; | |
102 | struct mutex reserve_mutex; | |
3de6be7a RM |
103 | }; |
104 | ||
105 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | |
106 | ||
03057f26 RM |
107 | static struct cci_pmu *g_cci_pmu; |
108 | ||
3de6be7a RM |
109 | enum cci_models { |
110 | #ifdef CONFIG_ARM_CCI400_PMU | |
111 | CCI400_R0, | |
112 | CCI400_R1, | |
113 | #endif | |
114 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
115 | CCI500_R0, | |
116 | CCI550_R0, | |
117 | #endif | |
118 | CCI_MODEL_MAX | |
119 | }; | |
120 | ||
121 | static void pmu_write_counters(struct cci_pmu *cci_pmu, | |
122 | unsigned long *mask); | |
123 | static ssize_t cci_pmu_format_show(struct device *dev, | |
124 | struct device_attribute *attr, char *buf); | |
125 | static ssize_t cci_pmu_event_show(struct device *dev, | |
126 | struct device_attribute *attr, char *buf); | |
127 | ||
128 | #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ | |
129 | &((struct dev_ext_attribute[]) { \ | |
130 | { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ | |
131 | })[0].attr.attr | |
132 | ||
133 | #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ | |
134 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) | |
135 | #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
136 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) | |
137 | ||
138 | /* CCI400 PMU Specific definitions */ | |
139 | ||
140 | #ifdef CONFIG_ARM_CCI400_PMU | |
141 | ||
142 | /* Port ids */ | |
143 | #define CCI400_PORT_S0 0 | |
144 | #define CCI400_PORT_S1 1 | |
145 | #define CCI400_PORT_S2 2 | |
146 | #define CCI400_PORT_S3 3 | |
147 | #define CCI400_PORT_S4 4 | |
148 | #define CCI400_PORT_M0 5 | |
149 | #define CCI400_PORT_M1 6 | |
150 | #define CCI400_PORT_M2 7 | |
151 | ||
152 | #define CCI400_R1_PX 5 | |
153 | ||
154 | /* | |
155 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | |
156 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | |
157 | * make use of this event in hardware. | |
158 | */ | |
159 | enum cci400_perf_events { | |
160 | CCI400_PMU_CYCLES = 0xff | |
161 | }; | |
162 | ||
163 | #define CCI400_PMU_CYCLE_CNTR_IDX 0 | |
164 | #define CCI400_PMU_CNTR0_IDX 1 | |
165 | ||
166 | /* | |
167 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | |
168 | * ports and bits 4:0 are event codes. There are different event codes | |
169 | * associated with each port type. | |
170 | * | |
171 | * Additionally, the range of events associated with the port types changed | |
172 | * between Rev0 and Rev1. | |
173 | * | |
174 | * The constants below define the range of valid codes for each port type for | |
175 | * the different revisions and are used to validate the event to be monitored. | |
176 | */ | |
177 | ||
178 | #define CCI400_PMU_EVENT_MASK 0xffUL | |
179 | #define CCI400_PMU_EVENT_SOURCE_SHIFT 5 | |
180 | #define CCI400_PMU_EVENT_SOURCE_MASK 0x7 | |
181 | #define CCI400_PMU_EVENT_CODE_SHIFT 0 | |
182 | #define CCI400_PMU_EVENT_CODE_MASK 0x1f | |
183 | #define CCI400_PMU_EVENT_SOURCE(event) \ | |
184 | ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ | |
185 | CCI400_PMU_EVENT_SOURCE_MASK) | |
186 | #define CCI400_PMU_EVENT_CODE(event) \ | |
187 | ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) | |
188 | ||
189 | #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 | |
190 | #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 | |
191 | #define CCI400_R0_MASTER_PORT_MIN_EV 0x14 | |
192 | #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a | |
193 | ||
194 | #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 | |
195 | #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 | |
196 | #define CCI400_R1_MASTER_PORT_MIN_EV 0x00 | |
197 | #define CCI400_R1_MASTER_PORT_MAX_EV 0x11 | |
198 | ||
199 | #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
200 | CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ | |
201 | (unsigned long)_config) | |
202 | ||
203 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
204 | struct device_attribute *attr, char *buf); | |
205 | ||
206 | static struct attribute *cci400_pmu_format_attrs[] = { | |
207 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
208 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), | |
209 | NULL | |
210 | }; | |
211 | ||
212 | static struct attribute *cci400_r0_pmu_event_attrs[] = { | |
213 | /* Slave events */ | |
214 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
215 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
216 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
217 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
218 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
219 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
220 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
221 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
222 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
223 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
224 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
225 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
226 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
227 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
228 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
229 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
230 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
231 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
232 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
233 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
234 | /* Master events */ | |
235 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), | |
236 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), | |
237 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), | |
238 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), | |
239 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), | |
240 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), | |
241 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), | |
242 | /* Special event for cycles counter */ | |
243 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
244 | NULL | |
245 | }; | |
246 | ||
247 | static struct attribute *cci400_r1_pmu_event_attrs[] = { | |
248 | /* Slave events */ | |
249 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
250 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
251 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
252 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
253 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
254 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
255 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
256 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
257 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
258 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
259 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
260 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
261 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
262 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
263 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
264 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
265 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
266 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
267 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
268 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
269 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), | |
270 | /* Master events */ | |
271 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), | |
272 | CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), | |
273 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), | |
274 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), | |
275 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), | |
276 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), | |
277 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), | |
278 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), | |
279 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), | |
280 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), | |
281 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), | |
282 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), | |
283 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), | |
284 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), | |
285 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), | |
286 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), | |
287 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), | |
288 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), | |
289 | /* Special event for cycles counter */ | |
290 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
291 | NULL | |
292 | }; | |
293 | ||
294 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
295 | struct device_attribute *attr, char *buf) | |
296 | { | |
297 | struct dev_ext_attribute *eattr = container_of(attr, | |
298 | struct dev_ext_attribute, attr); | |
299 | return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); | |
300 | } | |
301 | ||
302 | static int cci400_get_event_idx(struct cci_pmu *cci_pmu, | |
303 | struct cci_pmu_hw_events *hw, | |
304 | unsigned long cci_event) | |
305 | { | |
306 | int idx; | |
307 | ||
308 | /* cycles event idx is fixed */ | |
309 | if (cci_event == CCI400_PMU_CYCLES) { | |
310 | if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | |
311 | return -EAGAIN; | |
312 | ||
313 | return CCI400_PMU_CYCLE_CNTR_IDX; | |
314 | } | |
315 | ||
316 | for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | |
317 | if (!test_and_set_bit(idx, hw->used_mask)) | |
318 | return idx; | |
319 | ||
320 | /* No counters available */ | |
321 | return -EAGAIN; | |
322 | } | |
323 | ||
324 | static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) | |
325 | { | |
326 | u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); | |
327 | u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); | |
328 | int if_type; | |
329 | ||
330 | if (hw_event & ~CCI400_PMU_EVENT_MASK) | |
331 | return -ENOENT; | |
332 | ||
333 | if (hw_event == CCI400_PMU_CYCLES) | |
334 | return hw_event; | |
335 | ||
336 | switch (ev_source) { | |
337 | case CCI400_PORT_S0: | |
338 | case CCI400_PORT_S1: | |
339 | case CCI400_PORT_S2: | |
340 | case CCI400_PORT_S3: | |
341 | case CCI400_PORT_S4: | |
342 | /* Slave Interface */ | |
343 | if_type = CCI_IF_SLAVE; | |
344 | break; | |
345 | case CCI400_PORT_M0: | |
346 | case CCI400_PORT_M1: | |
347 | case CCI400_PORT_M2: | |
348 | /* Master Interface */ | |
349 | if_type = CCI_IF_MASTER; | |
350 | break; | |
351 | default: | |
352 | return -ENOENT; | |
353 | } | |
354 | ||
355 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
356 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
357 | return hw_event; | |
358 | ||
359 | return -ENOENT; | |
360 | } | |
361 | ||
362 | static int probe_cci400_revision(void) | |
363 | { | |
364 | int rev; | |
365 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | |
366 | rev >>= CCI_PID2_REV_SHIFT; | |
367 | ||
368 | if (rev < CCI400_R1_PX) | |
369 | return CCI400_R0; | |
370 | else | |
371 | return CCI400_R1; | |
372 | } | |
373 | ||
374 | static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) | |
375 | { | |
376 | if (platform_has_secure_cci_access()) | |
377 | return &cci_pmu_models[probe_cci400_revision()]; | |
378 | return NULL; | |
379 | } | |
380 | #else /* !CONFIG_ARM_CCI400_PMU */ | |
381 | static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) | |
382 | { | |
383 | return NULL; | |
384 | } | |
385 | #endif /* CONFIG_ARM_CCI400_PMU */ | |
386 | ||
387 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
388 | ||
389 | /* | |
390 | * CCI5xx PMU event id is an 9-bit value made of two parts. | |
391 | * bits [8:5] - Source for the event | |
392 | * bits [4:0] - Event code (specific to type of interface) | |
393 | * | |
394 | * | |
395 | */ | |
396 | ||
397 | /* Port ids */ | |
398 | #define CCI5xx_PORT_S0 0x0 | |
399 | #define CCI5xx_PORT_S1 0x1 | |
400 | #define CCI5xx_PORT_S2 0x2 | |
401 | #define CCI5xx_PORT_S3 0x3 | |
402 | #define CCI5xx_PORT_S4 0x4 | |
403 | #define CCI5xx_PORT_S5 0x5 | |
404 | #define CCI5xx_PORT_S6 0x6 | |
405 | ||
406 | #define CCI5xx_PORT_M0 0x8 | |
407 | #define CCI5xx_PORT_M1 0x9 | |
408 | #define CCI5xx_PORT_M2 0xa | |
409 | #define CCI5xx_PORT_M3 0xb | |
410 | #define CCI5xx_PORT_M4 0xc | |
411 | #define CCI5xx_PORT_M5 0xd | |
412 | #define CCI5xx_PORT_M6 0xe | |
413 | ||
414 | #define CCI5xx_PORT_GLOBAL 0xf | |
415 | ||
416 | #define CCI5xx_PMU_EVENT_MASK 0x1ffUL | |
417 | #define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5 | |
418 | #define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf | |
419 | #define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0 | |
420 | #define CCI5xx_PMU_EVENT_CODE_MASK 0x1f | |
421 | ||
422 | #define CCI5xx_PMU_EVENT_SOURCE(event) \ | |
423 | ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) | |
424 | #define CCI5xx_PMU_EVENT_CODE(event) \ | |
425 | ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) | |
426 | ||
427 | #define CCI5xx_SLAVE_PORT_MIN_EV 0x00 | |
428 | #define CCI5xx_SLAVE_PORT_MAX_EV 0x1f | |
429 | #define CCI5xx_MASTER_PORT_MIN_EV 0x00 | |
430 | #define CCI5xx_MASTER_PORT_MAX_EV 0x06 | |
431 | #define CCI5xx_GLOBAL_PORT_MIN_EV 0x00 | |
432 | #define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f | |
433 | ||
434 | ||
435 | #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
436 | CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \ | |
437 | (unsigned long) _config) | |
438 | ||
439 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
440 | struct device_attribute *attr, char *buf); | |
441 | ||
442 | static struct attribute *cci5xx_pmu_format_attrs[] = { | |
443 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
444 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), | |
445 | NULL, | |
446 | }; | |
447 | ||
448 | static struct attribute *cci5xx_pmu_event_attrs[] = { | |
449 | /* Slave events */ | |
450 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), | |
451 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), | |
452 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), | |
453 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), | |
454 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), | |
455 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), | |
456 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), | |
457 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
458 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), | |
459 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), | |
460 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), | |
461 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), | |
462 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), | |
463 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), | |
464 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), | |
465 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), | |
466 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), | |
467 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), | |
468 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), | |
469 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), | |
470 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), | |
471 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), | |
472 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), | |
473 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), | |
474 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), | |
475 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), | |
476 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), | |
477 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), | |
478 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), | |
479 | CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), | |
480 | CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), | |
481 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), | |
482 | ||
483 | /* Master events */ | |
484 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), | |
485 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), | |
486 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), | |
487 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), | |
488 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), | |
489 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), | |
490 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), | |
491 | ||
492 | /* Global events */ | |
493 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), | |
494 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), | |
495 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), | |
496 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), | |
497 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), | |
498 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), | |
499 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), | |
500 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), | |
501 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), | |
502 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), | |
503 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), | |
504 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), | |
505 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), | |
506 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), | |
507 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), | |
508 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), | |
509 | NULL | |
510 | }; | |
511 | ||
512 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
513 | struct device_attribute *attr, char *buf) | |
514 | { | |
515 | struct dev_ext_attribute *eattr = container_of(attr, | |
516 | struct dev_ext_attribute, attr); | |
517 | /* Global events have single fixed source code */ | |
518 | return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", | |
519 | (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); | |
520 | } | |
521 | ||
522 | /* | |
523 | * CCI500 provides 8 independent event counters that can count | |
524 | * any of the events available. | |
525 | * CCI500 PMU event source ids | |
526 | * 0x0-0x6 - Slave interfaces | |
527 | * 0x8-0xD - Master interfaces | |
528 | * 0xf - Global Events | |
529 | * 0x7,0xe - Reserved | |
530 | */ | |
531 | static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, | |
532 | unsigned long hw_event) | |
533 | { | |
534 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
535 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
536 | int if_type; | |
537 | ||
538 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
539 | return -ENOENT; | |
540 | ||
541 | switch (ev_source) { | |
542 | case CCI5xx_PORT_S0: | |
543 | case CCI5xx_PORT_S1: | |
544 | case CCI5xx_PORT_S2: | |
545 | case CCI5xx_PORT_S3: | |
546 | case CCI5xx_PORT_S4: | |
547 | case CCI5xx_PORT_S5: | |
548 | case CCI5xx_PORT_S6: | |
549 | if_type = CCI_IF_SLAVE; | |
550 | break; | |
551 | case CCI5xx_PORT_M0: | |
552 | case CCI5xx_PORT_M1: | |
553 | case CCI5xx_PORT_M2: | |
554 | case CCI5xx_PORT_M3: | |
555 | case CCI5xx_PORT_M4: | |
556 | case CCI5xx_PORT_M5: | |
557 | if_type = CCI_IF_MASTER; | |
558 | break; | |
559 | case CCI5xx_PORT_GLOBAL: | |
560 | if_type = CCI_IF_GLOBAL; | |
561 | break; | |
562 | default: | |
563 | return -ENOENT; | |
564 | } | |
565 | ||
566 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
567 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
568 | return hw_event; | |
569 | ||
570 | return -ENOENT; | |
571 | } | |
572 | ||
573 | /* | |
574 | * CCI550 provides 8 independent event counters that can count | |
575 | * any of the events available. | |
576 | * CCI550 PMU event source ids | |
577 | * 0x0-0x6 - Slave interfaces | |
578 | * 0x8-0xe - Master interfaces | |
579 | * 0xf - Global Events | |
580 | * 0x7 - Reserved | |
581 | */ | |
582 | static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, | |
583 | unsigned long hw_event) | |
584 | { | |
585 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
586 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
587 | int if_type; | |
588 | ||
589 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
590 | return -ENOENT; | |
591 | ||
592 | switch (ev_source) { | |
593 | case CCI5xx_PORT_S0: | |
594 | case CCI5xx_PORT_S1: | |
595 | case CCI5xx_PORT_S2: | |
596 | case CCI5xx_PORT_S3: | |
597 | case CCI5xx_PORT_S4: | |
598 | case CCI5xx_PORT_S5: | |
599 | case CCI5xx_PORT_S6: | |
600 | if_type = CCI_IF_SLAVE; | |
601 | break; | |
602 | case CCI5xx_PORT_M0: | |
603 | case CCI5xx_PORT_M1: | |
604 | case CCI5xx_PORT_M2: | |
605 | case CCI5xx_PORT_M3: | |
606 | case CCI5xx_PORT_M4: | |
607 | case CCI5xx_PORT_M5: | |
608 | case CCI5xx_PORT_M6: | |
609 | if_type = CCI_IF_MASTER; | |
610 | break; | |
611 | case CCI5xx_PORT_GLOBAL: | |
612 | if_type = CCI_IF_GLOBAL; | |
613 | break; | |
614 | default: | |
615 | return -ENOENT; | |
616 | } | |
617 | ||
618 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
619 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
620 | return hw_event; | |
621 | ||
622 | return -ENOENT; | |
623 | } | |
624 | ||
625 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
626 | ||
627 | /* | |
628 | * Program the CCI PMU counters which have PERF_HES_ARCH set | |
629 | * with the event period and mark them ready before we enable | |
630 | * PMU. | |
631 | */ | |
632 | static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) | |
633 | { | |
634 | int i; | |
635 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
636 | ||
637 | DECLARE_BITMAP(mask, cci_pmu->num_cntrs); | |
638 | ||
639 | bitmap_zero(mask, cci_pmu->num_cntrs); | |
640 | for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { | |
641 | struct perf_event *event = cci_hw->events[i]; | |
642 | ||
643 | if (WARN_ON(!event)) | |
644 | continue; | |
645 | ||
646 | /* Leave the events which are not counting */ | |
647 | if (event->hw.state & PERF_HES_STOPPED) | |
648 | continue; | |
649 | if (event->hw.state & PERF_HES_ARCH) { | |
650 | set_bit(i, mask); | |
651 | event->hw.state &= ~PERF_HES_ARCH; | |
652 | } | |
653 | } | |
654 | ||
655 | pmu_write_counters(cci_pmu, mask); | |
656 | } | |
657 | ||
658 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
659 | static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) | |
660 | { | |
661 | u32 val; | |
662 | ||
663 | /* Enable all the PMU counters. */ | |
664 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | |
665 | writel(val, cci_ctrl_base + CCI_PMCR); | |
666 | } | |
667 | ||
668 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
669 | static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) | |
670 | { | |
671 | cci_pmu_sync_counters(cci_pmu); | |
672 | __cci_pmu_enable_nosync(cci_pmu); | |
673 | } | |
674 | ||
675 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
676 | static void __cci_pmu_disable(void) | |
677 | { | |
678 | u32 val; | |
679 | ||
680 | /* Disable all the PMU counters. */ | |
681 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | |
682 | writel(val, cci_ctrl_base + CCI_PMCR); | |
683 | } | |
684 | ||
685 | static ssize_t cci_pmu_format_show(struct device *dev, | |
686 | struct device_attribute *attr, char *buf) | |
687 | { | |
688 | struct dev_ext_attribute *eattr = container_of(attr, | |
689 | struct dev_ext_attribute, attr); | |
690 | return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); | |
691 | } | |
692 | ||
693 | static ssize_t cci_pmu_event_show(struct device *dev, | |
694 | struct device_attribute *attr, char *buf) | |
695 | { | |
696 | struct dev_ext_attribute *eattr = container_of(attr, | |
697 | struct dev_ext_attribute, attr); | |
698 | /* source parameter is mandatory for normal PMU events */ | |
699 | return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", | |
700 | (unsigned long)eattr->var); | |
701 | } | |
702 | ||
703 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) | |
704 | { | |
705 | return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); | |
706 | } | |
707 | ||
708 | static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) | |
709 | { | |
710 | return readl_relaxed(cci_pmu->base + | |
711 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
712 | } | |
713 | ||
714 | static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, | |
715 | int idx, unsigned int offset) | |
716 | { | |
717 | writel_relaxed(value, cci_pmu->base + | |
718 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
719 | } | |
720 | ||
721 | static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) | |
722 | { | |
723 | pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); | |
724 | } | |
725 | ||
726 | static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) | |
727 | { | |
728 | pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); | |
729 | } | |
730 | ||
731 | static bool __maybe_unused | |
732 | pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) | |
733 | { | |
734 | return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; | |
735 | } | |
736 | ||
737 | static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) | |
738 | { | |
739 | pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); | |
740 | } | |
741 | ||
742 | /* | |
743 | * For all counters on the CCI-PMU, disable any 'enabled' counters, | |
744 | * saving the changed counters in the mask, so that we can restore | |
745 | * it later using pmu_restore_counters. The mask is private to the | |
746 | * caller. We cannot rely on the used_mask maintained by the CCI_PMU | |
747 | * as it only tells us if the counter is assigned to perf_event or not. | |
748 | * The state of the perf_event cannot be locked by the PMU layer, hence | |
749 | * we check the individual counter status (which can be locked by | |
750 | * cci_pm->hw_events->pmu_lock). | |
751 | * | |
752 | * @mask should be initialised to empty by the caller. | |
753 | */ | |
754 | static void __maybe_unused | |
755 | pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
756 | { | |
757 | int i; | |
758 | ||
759 | for (i = 0; i < cci_pmu->num_cntrs; i++) { | |
760 | if (pmu_counter_is_enabled(cci_pmu, i)) { | |
761 | set_bit(i, mask); | |
762 | pmu_disable_counter(cci_pmu, i); | |
763 | } | |
764 | } | |
765 | } | |
766 | ||
767 | /* | |
768 | * Restore the status of the counters. Reversal of the pmu_save_counters(). | |
769 | * For each counter set in the mask, enable the counter back. | |
770 | */ | |
771 | static void __maybe_unused | |
772 | pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
773 | { | |
774 | int i; | |
775 | ||
776 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) | |
777 | pmu_enable_counter(cci_pmu, i); | |
778 | } | |
779 | ||
780 | /* | |
781 | * Returns the number of programmable counters actually implemented | |
782 | * by the cci | |
783 | */ | |
784 | static u32 pmu_get_max_counters(void) | |
785 | { | |
786 | return (readl_relaxed(cci_ctrl_base + CCI_PMCR) & | |
787 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; | |
788 | } | |
789 | ||
790 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) | |
791 | { | |
792 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
793 | unsigned long cci_event = event->hw.config_base; | |
794 | int idx; | |
795 | ||
796 | if (cci_pmu->model->get_event_idx) | |
797 | return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); | |
798 | ||
799 | /* Generic code to find an unused idx from the mask */ | |
800 | for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) | |
801 | if (!test_and_set_bit(idx, hw->used_mask)) | |
802 | return idx; | |
803 | ||
804 | /* No counters available */ | |
805 | return -EAGAIN; | |
806 | } | |
807 | ||
808 | static int pmu_map_event(struct perf_event *event) | |
809 | { | |
810 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
811 | ||
812 | if (event->attr.type < PERF_TYPE_MAX || | |
813 | !cci_pmu->model->validate_hw_event) | |
814 | return -ENOENT; | |
815 | ||
816 | return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); | |
817 | } | |
818 | ||
819 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) | |
820 | { | |
821 | int i; | |
822 | struct platform_device *pmu_device = cci_pmu->plat_device; | |
823 | ||
824 | if (unlikely(!pmu_device)) | |
825 | return -ENODEV; | |
826 | ||
827 | if (cci_pmu->nr_irqs < 1) { | |
828 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | |
829 | return -ENODEV; | |
830 | } | |
831 | ||
832 | /* | |
833 | * Register all available CCI PMU interrupts. In the interrupt handler | |
834 | * we iterate over the counters checking for interrupt source (the | |
835 | * overflowing counter) and clear it. | |
836 | * | |
837 | * This should allow handling of non-unique interrupt for the counters. | |
838 | */ | |
839 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
840 | int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, | |
841 | "arm-cci-pmu", cci_pmu); | |
842 | if (err) { | |
843 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | |
844 | cci_pmu->irqs[i]); | |
845 | return err; | |
846 | } | |
847 | ||
848 | set_bit(i, &cci_pmu->active_irqs); | |
849 | } | |
850 | ||
851 | return 0; | |
852 | } | |
853 | ||
854 | static void pmu_free_irq(struct cci_pmu *cci_pmu) | |
855 | { | |
856 | int i; | |
857 | ||
858 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
859 | if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) | |
860 | continue; | |
861 | ||
862 | free_irq(cci_pmu->irqs[i], cci_pmu); | |
863 | } | |
864 | } | |
865 | ||
866 | static u32 pmu_read_counter(struct perf_event *event) | |
867 | { | |
868 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
869 | struct hw_perf_event *hw_counter = &event->hw; | |
870 | int idx = hw_counter->idx; | |
871 | u32 value; | |
872 | ||
873 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
874 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
875 | return 0; | |
876 | } | |
877 | value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); | |
878 | ||
879 | return value; | |
880 | } | |
881 | ||
882 | static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) | |
883 | { | |
884 | pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); | |
885 | } | |
886 | ||
887 | static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
888 | { | |
889 | int i; | |
890 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
891 | ||
892 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
893 | struct perf_event *event = cci_hw->events[i]; | |
894 | ||
895 | if (WARN_ON(!event)) | |
896 | continue; | |
897 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
898 | } | |
899 | } | |
900 | ||
901 | static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
902 | { | |
903 | if (cci_pmu->model->write_counters) | |
904 | cci_pmu->model->write_counters(cci_pmu, mask); | |
905 | else | |
906 | __pmu_write_counters(cci_pmu, mask); | |
907 | } | |
908 | ||
909 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
910 | ||
911 | /* | |
912 | * CCI-500/CCI-550 has advanced power saving policies, which could gate the | |
913 | * clocks to the PMU counters, which makes the writes to them ineffective. | |
914 | * The only way to write to those counters is when the global counters | |
915 | * are enabled and the particular counter is enabled. | |
916 | * | |
917 | * So we do the following : | |
918 | * | |
919 | * 1) Disable all the PMU counters, saving their current state | |
920 | * 2) Enable the global PMU profiling, now that all counters are | |
921 | * disabled. | |
922 | * | |
923 | * For each counter to be programmed, repeat steps 3-7: | |
924 | * | |
925 | * 3) Write an invalid event code to the event control register for the | |
926 | counter, so that the counters are not modified. | |
927 | * 4) Enable the counter control for the counter. | |
928 | * 5) Set the counter value | |
929 | * 6) Disable the counter | |
930 | * 7) Restore the event in the target counter | |
931 | * | |
932 | * 8) Disable the global PMU. | |
933 | * 9) Restore the status of the rest of the counters. | |
934 | * | |
935 | * We choose an event which for CCI-5xx is guaranteed not to count. | |
936 | * We use the highest possible event code (0x1f) for the master interface 0. | |
937 | */ | |
938 | #define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \ | |
939 | (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT)) | |
940 | static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
941 | { | |
942 | int i; | |
943 | DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs); | |
944 | ||
945 | bitmap_zero(saved_mask, cci_pmu->num_cntrs); | |
946 | pmu_save_counters(cci_pmu, saved_mask); | |
947 | ||
948 | /* | |
949 | * Now that all the counters are disabled, we can safely turn the PMU on, | |
950 | * without syncing the status of the counters | |
951 | */ | |
952 | __cci_pmu_enable_nosync(cci_pmu); | |
953 | ||
954 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
955 | struct perf_event *event = cci_pmu->hw_events.events[i]; | |
956 | ||
957 | if (WARN_ON(!event)) | |
958 | continue; | |
959 | ||
960 | pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); | |
961 | pmu_enable_counter(cci_pmu, i); | |
962 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
963 | pmu_disable_counter(cci_pmu, i); | |
964 | pmu_set_event(cci_pmu, i, event->hw.config_base); | |
965 | } | |
966 | ||
967 | __cci_pmu_disable(); | |
968 | ||
969 | pmu_restore_counters(cci_pmu, saved_mask); | |
970 | } | |
971 | ||
972 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
973 | ||
974 | static u64 pmu_event_update(struct perf_event *event) | |
975 | { | |
976 | struct hw_perf_event *hwc = &event->hw; | |
977 | u64 delta, prev_raw_count, new_raw_count; | |
978 | ||
979 | do { | |
980 | prev_raw_count = local64_read(&hwc->prev_count); | |
981 | new_raw_count = pmu_read_counter(event); | |
982 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
983 | new_raw_count) != prev_raw_count); | |
984 | ||
985 | delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; | |
986 | ||
987 | local64_add(delta, &event->count); | |
988 | ||
989 | return new_raw_count; | |
990 | } | |
991 | ||
992 | static void pmu_read(struct perf_event *event) | |
993 | { | |
994 | pmu_event_update(event); | |
995 | } | |
996 | ||
997 | static void pmu_event_set_period(struct perf_event *event) | |
998 | { | |
999 | struct hw_perf_event *hwc = &event->hw; | |
1000 | /* | |
1001 | * The CCI PMU counters have a period of 2^32. To account for the | |
1002 | * possiblity of extreme interrupt latency we program for a period of | |
1003 | * half that. Hopefully we can handle the interrupt before another 2^31 | |
1004 | * events occur and the counter overtakes its previous value. | |
1005 | */ | |
1006 | u64 val = 1ULL << 31; | |
1007 | local64_set(&hwc->prev_count, val); | |
1008 | ||
1009 | /* | |
1010 | * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose | |
1011 | * values needs to be sync-ed with the s/w state before the PMU is | |
1012 | * enabled. | |
1013 | * Mark this counter for sync. | |
1014 | */ | |
1015 | hwc->state |= PERF_HES_ARCH; | |
1016 | } | |
1017 | ||
1018 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |
1019 | { | |
1020 | unsigned long flags; | |
1021 | struct cci_pmu *cci_pmu = dev; | |
1022 | struct cci_pmu_hw_events *events = &cci_pmu->hw_events; | |
1023 | int idx, handled = IRQ_NONE; | |
1024 | ||
1025 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1026 | ||
1027 | /* Disable the PMU while we walk through the counters */ | |
1028 | __cci_pmu_disable(); | |
1029 | /* | |
1030 | * Iterate over counters and update the corresponding perf events. | |
1031 | * This should work regardless of whether we have per-counter overflow | |
1032 | * interrupt or a combined overflow interrupt. | |
1033 | */ | |
1034 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | |
1035 | struct perf_event *event = events->events[idx]; | |
1036 | ||
1037 | if (!event) | |
1038 | continue; | |
1039 | ||
1040 | /* Did this counter overflow? */ | |
1041 | if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & | |
1042 | CCI_PMU_OVRFLW_FLAG)) | |
1043 | continue; | |
1044 | ||
1045 | pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, | |
1046 | CCI_PMU_OVRFLW); | |
1047 | ||
1048 | pmu_event_update(event); | |
1049 | pmu_event_set_period(event); | |
1050 | handled = IRQ_HANDLED; | |
1051 | } | |
1052 | ||
1053 | /* Enable the PMU and sync possibly overflowed counters */ | |
1054 | __cci_pmu_enable_sync(cci_pmu); | |
1055 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1056 | ||
1057 | return IRQ_RETVAL(handled); | |
1058 | } | |
1059 | ||
1060 | static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) | |
1061 | { | |
1062 | int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); | |
1063 | if (ret) { | |
1064 | pmu_free_irq(cci_pmu); | |
1065 | return ret; | |
1066 | } | |
1067 | return 0; | |
1068 | } | |
1069 | ||
1070 | static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) | |
1071 | { | |
1072 | pmu_free_irq(cci_pmu); | |
1073 | } | |
1074 | ||
1075 | static void hw_perf_event_destroy(struct perf_event *event) | |
1076 | { | |
1077 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1078 | atomic_t *active_events = &cci_pmu->active_events; | |
1079 | struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; | |
1080 | ||
1081 | if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { | |
1082 | cci_pmu_put_hw(cci_pmu); | |
1083 | mutex_unlock(reserve_mutex); | |
1084 | } | |
1085 | } | |
1086 | ||
1087 | static void cci_pmu_enable(struct pmu *pmu) | |
1088 | { | |
1089 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1090 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1091 | int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); | |
1092 | unsigned long flags; | |
1093 | ||
1094 | if (!enabled) | |
1095 | return; | |
1096 | ||
1097 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1098 | __cci_pmu_enable_sync(cci_pmu); | |
1099 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1100 | ||
1101 | } | |
1102 | ||
1103 | static void cci_pmu_disable(struct pmu *pmu) | |
1104 | { | |
1105 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1106 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1107 | unsigned long flags; | |
1108 | ||
1109 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1110 | __cci_pmu_disable(); | |
1111 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1112 | } | |
1113 | ||
1114 | /* | |
1115 | * Check if the idx represents a non-programmable counter. | |
1116 | * All the fixed event counters are mapped before the programmable | |
1117 | * counters. | |
1118 | */ | |
1119 | static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) | |
1120 | { | |
1121 | return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); | |
1122 | } | |
1123 | ||
1124 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) | |
1125 | { | |
1126 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1127 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1128 | struct hw_perf_event *hwc = &event->hw; | |
1129 | int idx = hwc->idx; | |
1130 | unsigned long flags; | |
1131 | ||
1132 | /* | |
1133 | * To handle interrupt latency, we always reprogram the period | |
1134 | * regardlesss of PERF_EF_RELOAD. | |
1135 | */ | |
1136 | if (pmu_flags & PERF_EF_RELOAD) | |
1137 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
1138 | ||
1139 | hwc->state = 0; | |
1140 | ||
1141 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1142 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1143 | return; | |
1144 | } | |
1145 | ||
1146 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1147 | ||
1148 | /* Configure the counter unless you are counting a fixed event */ | |
1149 | if (!pmu_fixed_hw_idx(cci_pmu, idx)) | |
1150 | pmu_set_event(cci_pmu, idx, hwc->config_base); | |
1151 | ||
1152 | pmu_event_set_period(event); | |
1153 | pmu_enable_counter(cci_pmu, idx); | |
1154 | ||
1155 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1156 | } | |
1157 | ||
1158 | static void cci_pmu_stop(struct perf_event *event, int pmu_flags) | |
1159 | { | |
1160 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1161 | struct hw_perf_event *hwc = &event->hw; | |
1162 | int idx = hwc->idx; | |
1163 | ||
1164 | if (hwc->state & PERF_HES_STOPPED) | |
1165 | return; | |
1166 | ||
1167 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1168 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1169 | return; | |
1170 | } | |
1171 | ||
1172 | /* | |
1173 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | |
1174 | * cci_pmu_start() | |
1175 | */ | |
1176 | pmu_disable_counter(cci_pmu, idx); | |
1177 | pmu_event_update(event); | |
1178 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1179 | } | |
1180 | ||
1181 | static int cci_pmu_add(struct perf_event *event, int flags) | |
1182 | { | |
1183 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1184 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1185 | struct hw_perf_event *hwc = &event->hw; | |
1186 | int idx; | |
1187 | int err = 0; | |
1188 | ||
1189 | perf_pmu_disable(event->pmu); | |
1190 | ||
1191 | /* If we don't have a space for the counter then finish early. */ | |
1192 | idx = pmu_get_event_idx(hw_events, event); | |
1193 | if (idx < 0) { | |
1194 | err = idx; | |
1195 | goto out; | |
1196 | } | |
1197 | ||
1198 | event->hw.idx = idx; | |
1199 | hw_events->events[idx] = event; | |
1200 | ||
1201 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1202 | if (flags & PERF_EF_START) | |
1203 | cci_pmu_start(event, PERF_EF_RELOAD); | |
1204 | ||
1205 | /* Propagate our changes to the userspace mapping. */ | |
1206 | perf_event_update_userpage(event); | |
1207 | ||
1208 | out: | |
1209 | perf_pmu_enable(event->pmu); | |
1210 | return err; | |
1211 | } | |
1212 | ||
1213 | static void cci_pmu_del(struct perf_event *event, int flags) | |
1214 | { | |
1215 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1216 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1217 | struct hw_perf_event *hwc = &event->hw; | |
1218 | int idx = hwc->idx; | |
1219 | ||
1220 | cci_pmu_stop(event, PERF_EF_UPDATE); | |
1221 | hw_events->events[idx] = NULL; | |
1222 | clear_bit(idx, hw_events->used_mask); | |
1223 | ||
1224 | perf_event_update_userpage(event); | |
1225 | } | |
1226 | ||
1227 | static int validate_event(struct pmu *cci_pmu, | |
1228 | struct cci_pmu_hw_events *hw_events, | |
1229 | struct perf_event *event) | |
1230 | { | |
1231 | if (is_software_event(event)) | |
1232 | return 1; | |
1233 | ||
1234 | /* | |
1235 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
1236 | * core perf code won't check that the pmu->ctx == leader->ctx | |
1237 | * until after pmu->event_init(event). | |
1238 | */ | |
1239 | if (event->pmu != cci_pmu) | |
1240 | return 0; | |
1241 | ||
1242 | if (event->state < PERF_EVENT_STATE_OFF) | |
1243 | return 1; | |
1244 | ||
1245 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
1246 | return 1; | |
1247 | ||
1248 | return pmu_get_event_idx(hw_events, event) >= 0; | |
1249 | } | |
1250 | ||
1251 | static int validate_group(struct perf_event *event) | |
1252 | { | |
1253 | struct perf_event *sibling, *leader = event->group_leader; | |
1254 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1255 | unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; | |
1256 | struct cci_pmu_hw_events fake_pmu = { | |
1257 | /* | |
1258 | * Initialise the fake PMU. We only need to populate the | |
1259 | * used_mask for the purposes of validation. | |
1260 | */ | |
1261 | .used_mask = mask, | |
1262 | }; | |
1263 | memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); | |
1264 | ||
1265 | if (!validate_event(event->pmu, &fake_pmu, leader)) | |
1266 | return -EINVAL; | |
1267 | ||
1268 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
1269 | if (!validate_event(event->pmu, &fake_pmu, sibling)) | |
1270 | return -EINVAL; | |
1271 | } | |
1272 | ||
1273 | if (!validate_event(event->pmu, &fake_pmu, event)) | |
1274 | return -EINVAL; | |
1275 | ||
1276 | return 0; | |
1277 | } | |
1278 | ||
1279 | static int __hw_perf_event_init(struct perf_event *event) | |
1280 | { | |
1281 | struct hw_perf_event *hwc = &event->hw; | |
1282 | int mapping; | |
1283 | ||
1284 | mapping = pmu_map_event(event); | |
1285 | ||
1286 | if (mapping < 0) { | |
1287 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
1288 | event->attr.config); | |
1289 | return mapping; | |
1290 | } | |
1291 | ||
1292 | /* | |
1293 | * We don't assign an index until we actually place the event onto | |
1294 | * hardware. Use -1 to signify that we haven't decided where to put it | |
1295 | * yet. | |
1296 | */ | |
1297 | hwc->idx = -1; | |
1298 | hwc->config_base = 0; | |
1299 | hwc->config = 0; | |
1300 | hwc->event_base = 0; | |
1301 | ||
1302 | /* | |
1303 | * Store the event encoding into the config_base field. | |
1304 | */ | |
1305 | hwc->config_base |= (unsigned long)mapping; | |
1306 | ||
1307 | /* | |
1308 | * Limit the sample_period to half of the counter width. That way, the | |
1309 | * new counter value is far less likely to overtake the previous one | |
1310 | * unless you have some serious IRQ latency issues. | |
1311 | */ | |
1312 | hwc->sample_period = CCI_PMU_CNTR_MASK >> 1; | |
1313 | hwc->last_period = hwc->sample_period; | |
1314 | local64_set(&hwc->period_left, hwc->sample_period); | |
1315 | ||
1316 | if (event->group_leader != event) { | |
1317 | if (validate_group(event) != 0) | |
1318 | return -EINVAL; | |
1319 | } | |
1320 | ||
1321 | return 0; | |
1322 | } | |
1323 | ||
1324 | static int cci_pmu_event_init(struct perf_event *event) | |
1325 | { | |
1326 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1327 | atomic_t *active_events = &cci_pmu->active_events; | |
1328 | int err = 0; | |
3de6be7a RM |
1329 | |
1330 | if (event->attr.type != event->pmu->type) | |
1331 | return -ENOENT; | |
1332 | ||
1333 | /* Shared by all CPUs, no meaningful state to sample */ | |
1334 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
1335 | return -EOPNOTSUPP; | |
1336 | ||
1337 | /* We have no filtering of any kind */ | |
1338 | if (event->attr.exclude_user || | |
1339 | event->attr.exclude_kernel || | |
1340 | event->attr.exclude_hv || | |
1341 | event->attr.exclude_idle || | |
1342 | event->attr.exclude_host || | |
1343 | event->attr.exclude_guest) | |
1344 | return -EINVAL; | |
1345 | ||
1346 | /* | |
1347 | * Following the example set by other "uncore" PMUs, we accept any CPU | |
1348 | * and rewrite its affinity dynamically rather than having perf core | |
1349 | * handle cpu == -1 and pid == -1 for this case. | |
1350 | * | |
1351 | * The perf core will pin online CPUs for the duration of this call and | |
1352 | * the event being installed into its context, so the PMU's CPU can't | |
1353 | * change under our feet. | |
1354 | */ | |
03057f26 | 1355 | if (event->cpu < 0) |
3de6be7a | 1356 | return -EINVAL; |
03057f26 | 1357 | event->cpu = cci_pmu->cpu; |
3de6be7a RM |
1358 | |
1359 | event->destroy = hw_perf_event_destroy; | |
1360 | if (!atomic_inc_not_zero(active_events)) { | |
1361 | mutex_lock(&cci_pmu->reserve_mutex); | |
1362 | if (atomic_read(active_events) == 0) | |
1363 | err = cci_pmu_get_hw(cci_pmu); | |
1364 | if (!err) | |
1365 | atomic_inc(active_events); | |
1366 | mutex_unlock(&cci_pmu->reserve_mutex); | |
1367 | } | |
1368 | if (err) | |
1369 | return err; | |
1370 | ||
1371 | err = __hw_perf_event_init(event); | |
1372 | if (err) | |
1373 | hw_perf_event_destroy(event); | |
1374 | ||
1375 | return err; | |
1376 | } | |
1377 | ||
1378 | static ssize_t pmu_cpumask_attr_show(struct device *dev, | |
1379 | struct device_attribute *attr, char *buf) | |
1380 | { | |
1381 | struct pmu *pmu = dev_get_drvdata(dev); | |
1382 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1383 | ||
03057f26 | 1384 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); |
3de6be7a RM |
1385 | } |
1386 | ||
1387 | static struct device_attribute pmu_cpumask_attr = | |
1388 | __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); | |
1389 | ||
1390 | static struct attribute *pmu_attrs[] = { | |
1391 | &pmu_cpumask_attr.attr, | |
1392 | NULL, | |
1393 | }; | |
1394 | ||
1395 | static struct attribute_group pmu_attr_group = { | |
1396 | .attrs = pmu_attrs, | |
1397 | }; | |
1398 | ||
1399 | static struct attribute_group pmu_format_attr_group = { | |
1400 | .name = "format", | |
1401 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1402 | }; | |
1403 | ||
1404 | static struct attribute_group pmu_event_attr_group = { | |
1405 | .name = "events", | |
1406 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1407 | }; | |
1408 | ||
1409 | static const struct attribute_group *pmu_attr_groups[] = { | |
1410 | &pmu_attr_group, | |
1411 | &pmu_format_attr_group, | |
1412 | &pmu_event_attr_group, | |
1413 | NULL | |
1414 | }; | |
1415 | ||
1416 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |
1417 | { | |
1418 | const struct cci_pmu_model *model = cci_pmu->model; | |
1419 | char *name = model->name; | |
1420 | u32 num_cntrs; | |
1421 | ||
1422 | pmu_event_attr_group.attrs = model->event_attrs; | |
1423 | pmu_format_attr_group.attrs = model->format_attrs; | |
1424 | ||
1425 | cci_pmu->pmu = (struct pmu) { | |
1426 | .name = cci_pmu->model->name, | |
1427 | .task_ctx_nr = perf_invalid_context, | |
1428 | .pmu_enable = cci_pmu_enable, | |
1429 | .pmu_disable = cci_pmu_disable, | |
1430 | .event_init = cci_pmu_event_init, | |
1431 | .add = cci_pmu_add, | |
1432 | .del = cci_pmu_del, | |
1433 | .start = cci_pmu_start, | |
1434 | .stop = cci_pmu_stop, | |
1435 | .read = pmu_read, | |
1436 | .attr_groups = pmu_attr_groups, | |
1437 | }; | |
1438 | ||
1439 | cci_pmu->plat_device = pdev; | |
1440 | num_cntrs = pmu_get_max_counters(); | |
1441 | if (num_cntrs > cci_pmu->model->num_hw_cntrs) { | |
1442 | dev_warn(&pdev->dev, | |
1443 | "PMU implements more counters(%d) than supported by" | |
1444 | " the model(%d), truncated.", | |
1445 | num_cntrs, cci_pmu->model->num_hw_cntrs); | |
1446 | num_cntrs = cci_pmu->model->num_hw_cntrs; | |
1447 | } | |
1448 | cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; | |
1449 | ||
1450 | return perf_pmu_register(&cci_pmu->pmu, name, -1); | |
1451 | } | |
1452 | ||
03057f26 | 1453 | static int cci_pmu_offline_cpu(unsigned int cpu) |
3de6be7a | 1454 | { |
03057f26 | 1455 | int target; |
3de6be7a | 1456 | |
03057f26 | 1457 | if (!g_cci_pmu || cpu != g_cci_pmu->cpu) |
3de6be7a | 1458 | return 0; |
03057f26 | 1459 | |
3de6be7a RM |
1460 | target = cpumask_any_but(cpu_online_mask, cpu); |
1461 | if (target >= nr_cpu_ids) | |
1462 | return 0; | |
03057f26 RM |
1463 | |
1464 | perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); | |
1465 | g_cci_pmu->cpu = target; | |
3de6be7a RM |
1466 | return 0; |
1467 | } | |
1468 | ||
1469 | static struct cci_pmu_model cci_pmu_models[] = { | |
1470 | #ifdef CONFIG_ARM_CCI400_PMU | |
1471 | [CCI400_R0] = { | |
1472 | .name = "CCI_400", | |
1473 | .fixed_hw_cntrs = 1, /* Cycle counter */ | |
1474 | .num_hw_cntrs = 4, | |
1475 | .cntr_size = SZ_4K, | |
1476 | .format_attrs = cci400_pmu_format_attrs, | |
1477 | .event_attrs = cci400_r0_pmu_event_attrs, | |
1478 | .event_ranges = { | |
1479 | [CCI_IF_SLAVE] = { | |
1480 | CCI400_R0_SLAVE_PORT_MIN_EV, | |
1481 | CCI400_R0_SLAVE_PORT_MAX_EV, | |
1482 | }, | |
1483 | [CCI_IF_MASTER] = { | |
1484 | CCI400_R0_MASTER_PORT_MIN_EV, | |
1485 | CCI400_R0_MASTER_PORT_MAX_EV, | |
1486 | }, | |
1487 | }, | |
1488 | .validate_hw_event = cci400_validate_hw_event, | |
1489 | .get_event_idx = cci400_get_event_idx, | |
1490 | }, | |
1491 | [CCI400_R1] = { | |
1492 | .name = "CCI_400_r1", | |
1493 | .fixed_hw_cntrs = 1, /* Cycle counter */ | |
1494 | .num_hw_cntrs = 4, | |
1495 | .cntr_size = SZ_4K, | |
1496 | .format_attrs = cci400_pmu_format_attrs, | |
1497 | .event_attrs = cci400_r1_pmu_event_attrs, | |
1498 | .event_ranges = { | |
1499 | [CCI_IF_SLAVE] = { | |
1500 | CCI400_R1_SLAVE_PORT_MIN_EV, | |
1501 | CCI400_R1_SLAVE_PORT_MAX_EV, | |
1502 | }, | |
1503 | [CCI_IF_MASTER] = { | |
1504 | CCI400_R1_MASTER_PORT_MIN_EV, | |
1505 | CCI400_R1_MASTER_PORT_MAX_EV, | |
1506 | }, | |
1507 | }, | |
1508 | .validate_hw_event = cci400_validate_hw_event, | |
1509 | .get_event_idx = cci400_get_event_idx, | |
1510 | }, | |
1511 | #endif | |
1512 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1513 | [CCI500_R0] = { | |
1514 | .name = "CCI_500", | |
1515 | .fixed_hw_cntrs = 0, | |
1516 | .num_hw_cntrs = 8, | |
1517 | .cntr_size = SZ_64K, | |
1518 | .format_attrs = cci5xx_pmu_format_attrs, | |
1519 | .event_attrs = cci5xx_pmu_event_attrs, | |
1520 | .event_ranges = { | |
1521 | [CCI_IF_SLAVE] = { | |
1522 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1523 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1524 | }, | |
1525 | [CCI_IF_MASTER] = { | |
1526 | CCI5xx_MASTER_PORT_MIN_EV, | |
1527 | CCI5xx_MASTER_PORT_MAX_EV, | |
1528 | }, | |
1529 | [CCI_IF_GLOBAL] = { | |
1530 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1531 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1532 | }, | |
1533 | }, | |
1534 | .validate_hw_event = cci500_validate_hw_event, | |
1535 | .write_counters = cci5xx_pmu_write_counters, | |
1536 | }, | |
1537 | [CCI550_R0] = { | |
1538 | .name = "CCI_550", | |
1539 | .fixed_hw_cntrs = 0, | |
1540 | .num_hw_cntrs = 8, | |
1541 | .cntr_size = SZ_64K, | |
1542 | .format_attrs = cci5xx_pmu_format_attrs, | |
1543 | .event_attrs = cci5xx_pmu_event_attrs, | |
1544 | .event_ranges = { | |
1545 | [CCI_IF_SLAVE] = { | |
1546 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1547 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1548 | }, | |
1549 | [CCI_IF_MASTER] = { | |
1550 | CCI5xx_MASTER_PORT_MIN_EV, | |
1551 | CCI5xx_MASTER_PORT_MAX_EV, | |
1552 | }, | |
1553 | [CCI_IF_GLOBAL] = { | |
1554 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1555 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1556 | }, | |
1557 | }, | |
1558 | .validate_hw_event = cci550_validate_hw_event, | |
1559 | .write_counters = cci5xx_pmu_write_counters, | |
1560 | }, | |
1561 | #endif | |
1562 | }; | |
1563 | ||
1564 | static const struct of_device_id arm_cci_pmu_matches[] = { | |
1565 | #ifdef CONFIG_ARM_CCI400_PMU | |
1566 | { | |
1567 | .compatible = "arm,cci-400-pmu", | |
1568 | .data = NULL, | |
1569 | }, | |
1570 | { | |
1571 | .compatible = "arm,cci-400-pmu,r0", | |
1572 | .data = &cci_pmu_models[CCI400_R0], | |
1573 | }, | |
1574 | { | |
1575 | .compatible = "arm,cci-400-pmu,r1", | |
1576 | .data = &cci_pmu_models[CCI400_R1], | |
1577 | }, | |
1578 | #endif | |
1579 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1580 | { | |
1581 | .compatible = "arm,cci-500-pmu,r0", | |
1582 | .data = &cci_pmu_models[CCI500_R0], | |
1583 | }, | |
1584 | { | |
1585 | .compatible = "arm,cci-550-pmu,r0", | |
1586 | .data = &cci_pmu_models[CCI550_R0], | |
1587 | }, | |
1588 | #endif | |
1589 | {}, | |
1590 | }; | |
1591 | ||
1592 | static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev) | |
1593 | { | |
1594 | const struct of_device_id *match = of_match_node(arm_cci_pmu_matches, | |
1595 | pdev->dev.of_node); | |
1596 | if (!match) | |
1597 | return NULL; | |
1598 | if (match->data) | |
1599 | return match->data; | |
1600 | ||
1601 | dev_warn(&pdev->dev, "DEPRECATED compatible property," | |
1602 | "requires secure access to CCI registers"); | |
1603 | return probe_cci_model(pdev); | |
1604 | } | |
1605 | ||
1606 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) | |
1607 | { | |
1608 | int i; | |
1609 | ||
1610 | for (i = 0; i < nr_irqs; i++) | |
1611 | if (irq == irqs[i]) | |
1612 | return true; | |
1613 | ||
1614 | return false; | |
1615 | } | |
1616 | ||
1617 | static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) | |
1618 | { | |
1619 | struct cci_pmu *cci_pmu; | |
1620 | const struct cci_pmu_model *model; | |
1621 | ||
1622 | /* | |
1623 | * All allocations are devm_* hence we don't have to free | |
1624 | * them explicitly on an error, as it would end up in driver | |
1625 | * detach. | |
1626 | */ | |
1627 | model = get_cci_model(pdev); | |
1628 | if (!model) { | |
1629 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); | |
1630 | return ERR_PTR(-ENODEV); | |
1631 | } | |
1632 | ||
1633 | cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); | |
1634 | if (!cci_pmu) | |
1635 | return ERR_PTR(-ENOMEM); | |
1636 | ||
1637 | cci_pmu->model = model; | |
1638 | cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), | |
1639 | sizeof(*cci_pmu->irqs), GFP_KERNEL); | |
1640 | if (!cci_pmu->irqs) | |
1641 | return ERR_PTR(-ENOMEM); | |
1642 | cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, | |
1643 | CCI_PMU_MAX_HW_CNTRS(model), | |
1644 | sizeof(*cci_pmu->hw_events.events), | |
1645 | GFP_KERNEL); | |
1646 | if (!cci_pmu->hw_events.events) | |
1647 | return ERR_PTR(-ENOMEM); | |
1648 | cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, | |
1649 | BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), | |
1650 | sizeof(*cci_pmu->hw_events.used_mask), | |
1651 | GFP_KERNEL); | |
1652 | if (!cci_pmu->hw_events.used_mask) | |
1653 | return ERR_PTR(-ENOMEM); | |
1654 | ||
1655 | return cci_pmu; | |
1656 | } | |
1657 | ||
1658 | static int cci_pmu_probe(struct platform_device *pdev) | |
1659 | { | |
1660 | struct resource *res; | |
1661 | struct cci_pmu *cci_pmu; | |
1662 | int i, ret, irq; | |
1663 | ||
1664 | cci_pmu = cci_pmu_alloc(pdev); | |
1665 | if (IS_ERR(cci_pmu)) | |
1666 | return PTR_ERR(cci_pmu); | |
1667 | ||
1668 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1669 | cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); | |
1670 | if (IS_ERR(cci_pmu->base)) | |
1671 | return -ENOMEM; | |
1672 | ||
1673 | /* | |
1674 | * CCI PMU has one overflow interrupt per counter; but some may be tied | |
1675 | * together to a common interrupt. | |
1676 | */ | |
1677 | cci_pmu->nr_irqs = 0; | |
1678 | for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { | |
1679 | irq = platform_get_irq(pdev, i); | |
1680 | if (irq < 0) | |
1681 | break; | |
1682 | ||
1683 | if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) | |
1684 | continue; | |
1685 | ||
1686 | cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; | |
1687 | } | |
1688 | ||
1689 | /* | |
1690 | * Ensure that the device tree has as many interrupts as the number | |
1691 | * of counters. | |
1692 | */ | |
1693 | if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { | |
1694 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | |
1695 | i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); | |
1696 | return -EINVAL; | |
1697 | } | |
1698 | ||
1699 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); | |
1700 | mutex_init(&cci_pmu->reserve_mutex); | |
1701 | atomic_set(&cci_pmu->active_events, 0); | |
03057f26 | 1702 | cci_pmu->cpu = get_cpu(); |
3de6be7a RM |
1703 | |
1704 | ret = cci_pmu_init(cci_pmu, pdev); | |
1705 | if (ret) { | |
1706 | put_cpu(); | |
1707 | return ret; | |
1708 | } | |
1709 | ||
03057f26 RM |
1710 | cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, |
1711 | "perf/arm/cci:online", NULL, | |
1712 | cci_pmu_offline_cpu); | |
3de6be7a | 1713 | put_cpu(); |
03057f26 | 1714 | g_cci_pmu = cci_pmu; |
3de6be7a RM |
1715 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
1716 | return 0; | |
1717 | } | |
1718 | ||
1719 | static struct platform_driver cci_pmu_driver = { | |
1720 | .driver = { | |
1721 | .name = DRIVER_NAME, | |
1722 | .of_match_table = arm_cci_pmu_matches, | |
1723 | }, | |
1724 | .probe = cci_pmu_probe, | |
1725 | }; | |
1726 | ||
03057f26 | 1727 | builtin_platform_driver(cci_pmu_driver); |
3de6be7a RM |
1728 | MODULE_LICENSE("GPL"); |
1729 | MODULE_DESCRIPTION("ARM CCI PMU support"); |