]>
Commit | Line | Data |
---|---|---|
1b8873a0 JI |
1 | #undef DEBUG |
2 | ||
3 | /* | |
4 | * ARM performance counter support. | |
5 | * | |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
796d1295 | 8 | * |
1b8873a0 | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
d39976f0 | 10 | * on the x86 code. |
1b8873a0 JI |
11 | */ |
12 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
13 | ||
cc88116d | 14 | #include <linux/cpumask.h> |
1b8873a0 | 15 | #include <linux/kernel.h> |
49c006b9 | 16 | #include <linux/platform_device.h> |
bbd64559 SB |
17 | #include <linux/irq.h> |
18 | #include <linux/irqdesc.h> | |
1b8873a0 | 19 | |
1b8873a0 JI |
20 | #include <asm/irq_regs.h> |
21 | #include <asm/pmu.h> | |
1b8873a0 | 22 | |
1b8873a0 | 23 | static int |
e1f431b5 MR |
24 | armpmu_map_cache_event(const unsigned (*cache_map) |
25 | [PERF_COUNT_HW_CACHE_MAX] | |
26 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
27 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
28 | u64 config) | |
1b8873a0 JI |
29 | { |
30 | unsigned int cache_type, cache_op, cache_result, ret; | |
31 | ||
32 | cache_type = (config >> 0) & 0xff; | |
33 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
34 | return -EINVAL; | |
35 | ||
36 | cache_op = (config >> 8) & 0xff; | |
37 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
38 | return -EINVAL; | |
39 | ||
40 | cache_result = (config >> 16) & 0xff; | |
41 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
42 | return -EINVAL; | |
43 | ||
e1f431b5 | 44 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
45 | |
46 | if (ret == CACHE_OP_UNSUPPORTED) | |
47 | return -ENOENT; | |
48 | ||
49 | return ret; | |
50 | } | |
51 | ||
84fee97a | 52 | static int |
6dbc0029 | 53 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 54 | { |
d9f96635 SB |
55 | int mapping; |
56 | ||
57 | if (config >= PERF_COUNT_HW_MAX) | |
58 | return -EINVAL; | |
59 | ||
60 | mapping = (*event_map)[config]; | |
e1f431b5 | 61 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
84fee97a WD |
62 | } |
63 | ||
64 | static int | |
e1f431b5 | 65 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 66 | { |
e1f431b5 MR |
67 | return (int)(config & raw_event_mask); |
68 | } | |
69 | ||
6dbc0029 WD |
70 | int |
71 | armpmu_map_event(struct perf_event *event, | |
72 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
73 | const unsigned (*cache_map) | |
74 | [PERF_COUNT_HW_CACHE_MAX] | |
75 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
76 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
77 | u32 raw_event_mask) | |
e1f431b5 MR |
78 | { |
79 | u64 config = event->attr.config; | |
67b4305a | 80 | int type = event->attr.type; |
e1f431b5 | 81 | |
67b4305a MR |
82 | if (type == event->pmu->type) |
83 | return armpmu_map_raw_event(raw_event_mask, config); | |
84 | ||
85 | switch (type) { | |
e1f431b5 | 86 | case PERF_TYPE_HARDWARE: |
6dbc0029 | 87 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
88 | case PERF_TYPE_HW_CACHE: |
89 | return armpmu_map_cache_event(cache_map, config); | |
90 | case PERF_TYPE_RAW: | |
91 | return armpmu_map_raw_event(raw_event_mask, config); | |
92 | } | |
93 | ||
94 | return -ENOENT; | |
84fee97a WD |
95 | } |
96 | ||
ed6f2a52 | 97 | int armpmu_event_set_period(struct perf_event *event) |
1b8873a0 | 98 | { |
8a16b34e | 99 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 100 | struct hw_perf_event *hwc = &event->hw; |
e7850595 | 101 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 JI |
102 | s64 period = hwc->sample_period; |
103 | int ret = 0; | |
104 | ||
105 | if (unlikely(left <= -period)) { | |
106 | left = period; | |
e7850595 | 107 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
108 | hwc->last_period = period; |
109 | ret = 1; | |
110 | } | |
111 | ||
112 | if (unlikely(left <= 0)) { | |
113 | left += period; | |
e7850595 | 114 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
115 | hwc->last_period = period; |
116 | ret = 1; | |
117 | } | |
118 | ||
2d9ed740 DT |
119 | /* |
120 | * Limit the maximum period to prevent the counter value | |
121 | * from overtaking the one we are about to program. In | |
122 | * effect we are reducing max_period to account for | |
123 | * interrupt latency (and we are being very conservative). | |
124 | */ | |
125 | if (left > (armpmu->max_period >> 1)) | |
126 | left = armpmu->max_period >> 1; | |
1b8873a0 | 127 | |
e7850595 | 128 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 | 129 | |
ed6f2a52 | 130 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
1b8873a0 JI |
131 | |
132 | perf_event_update_userpage(event); | |
133 | ||
134 | return ret; | |
135 | } | |
136 | ||
ed6f2a52 | 137 | u64 armpmu_event_update(struct perf_event *event) |
1b8873a0 | 138 | { |
8a16b34e | 139 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 140 | struct hw_perf_event *hwc = &event->hw; |
a737823d | 141 | u64 delta, prev_raw_count, new_raw_count; |
1b8873a0 JI |
142 | |
143 | again: | |
e7850595 | 144 | prev_raw_count = local64_read(&hwc->prev_count); |
ed6f2a52 | 145 | new_raw_count = armpmu->read_counter(event); |
1b8873a0 | 146 | |
e7850595 | 147 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
148 | new_raw_count) != prev_raw_count) |
149 | goto again; | |
150 | ||
57273471 | 151 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
1b8873a0 | 152 | |
e7850595 PZ |
153 | local64_add(delta, &event->count); |
154 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
155 | |
156 | return new_raw_count; | |
157 | } | |
158 | ||
159 | static void | |
a4eaf7f1 | 160 | armpmu_read(struct perf_event *event) |
1b8873a0 | 161 | { |
ed6f2a52 | 162 | armpmu_event_update(event); |
1b8873a0 JI |
163 | } |
164 | ||
165 | static void | |
a4eaf7f1 | 166 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 167 | { |
8a16b34e | 168 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
169 | struct hw_perf_event *hwc = &event->hw; |
170 | ||
a4eaf7f1 PZ |
171 | /* |
172 | * ARM pmu always has to update the counter, so ignore | |
173 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
174 | */ | |
175 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
ed6f2a52 SH |
176 | armpmu->disable(event); |
177 | armpmu_event_update(event); | |
a4eaf7f1 PZ |
178 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
179 | } | |
1b8873a0 JI |
180 | } |
181 | ||
ed6f2a52 | 182 | static void armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 183 | { |
8a16b34e | 184 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
185 | struct hw_perf_event *hwc = &event->hw; |
186 | ||
a4eaf7f1 PZ |
187 | /* |
188 | * ARM pmu always has to reprogram the period, so ignore | |
189 | * PERF_EF_RELOAD, see the comment below. | |
190 | */ | |
191 | if (flags & PERF_EF_RELOAD) | |
192 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
193 | ||
194 | hwc->state = 0; | |
1b8873a0 JI |
195 | /* |
196 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 197 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
198 | * may have been left counting. If we don't do this step then we may |
199 | * get an interrupt too soon or *way* too late if the overflow has | |
200 | * happened since disabling. | |
201 | */ | |
ed6f2a52 SH |
202 | armpmu_event_set_period(event); |
203 | armpmu->enable(event); | |
1b8873a0 JI |
204 | } |
205 | ||
a4eaf7f1 PZ |
206 | static void |
207 | armpmu_del(struct perf_event *event, int flags) | |
208 | { | |
8a16b34e | 209 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 210 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
a4eaf7f1 PZ |
211 | struct hw_perf_event *hwc = &event->hw; |
212 | int idx = hwc->idx; | |
213 | ||
a4eaf7f1 | 214 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 MR |
215 | hw_events->events[idx] = NULL; |
216 | clear_bit(idx, hw_events->used_mask); | |
eab443ef SB |
217 | if (armpmu->clear_event_idx) |
218 | armpmu->clear_event_idx(hw_events, event); | |
a4eaf7f1 PZ |
219 | |
220 | perf_event_update_userpage(event); | |
221 | } | |
222 | ||
1b8873a0 | 223 | static int |
a4eaf7f1 | 224 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 225 | { |
8a16b34e | 226 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 227 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
1b8873a0 JI |
228 | struct hw_perf_event *hwc = &event->hw; |
229 | int idx; | |
230 | int err = 0; | |
231 | ||
cc88116d MR |
232 | /* An event following a process won't be stopped earlier */ |
233 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
234 | return -ENOENT; | |
235 | ||
33696fc0 | 236 | perf_pmu_disable(event->pmu); |
24cd7f54 | 237 | |
1b8873a0 | 238 | /* If we don't have a space for the counter then finish early. */ |
ed6f2a52 | 239 | idx = armpmu->get_event_idx(hw_events, event); |
1b8873a0 JI |
240 | if (idx < 0) { |
241 | err = idx; | |
242 | goto out; | |
243 | } | |
244 | ||
245 | /* | |
246 | * If there is an event in the counter we are going to use then make | |
247 | * sure it is disabled. | |
248 | */ | |
249 | event->hw.idx = idx; | |
ed6f2a52 | 250 | armpmu->disable(event); |
8be3f9a2 | 251 | hw_events->events[idx] = event; |
1b8873a0 | 252 | |
a4eaf7f1 PZ |
253 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
254 | if (flags & PERF_EF_START) | |
255 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
256 | |
257 | /* Propagate our changes to the userspace mapping. */ | |
258 | perf_event_update_userpage(event); | |
259 | ||
260 | out: | |
33696fc0 | 261 | perf_pmu_enable(event->pmu); |
1b8873a0 JI |
262 | return err; |
263 | } | |
264 | ||
1b8873a0 | 265 | static int |
e429817b SP |
266 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
267 | struct perf_event *event) | |
1b8873a0 | 268 | { |
e429817b | 269 | struct arm_pmu *armpmu; |
1b8873a0 | 270 | |
c95eb318 WD |
271 | if (is_software_event(event)) |
272 | return 1; | |
273 | ||
e429817b SP |
274 | /* |
275 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
276 | * core perf code won't check that the pmu->ctx == leader->ctx | |
277 | * until after pmu->event_init(event). | |
278 | */ | |
279 | if (event->pmu != pmu) | |
280 | return 0; | |
281 | ||
2dfcb802 | 282 | if (event->state < PERF_EVENT_STATE_OFF) |
cb2d8b34 WD |
283 | return 1; |
284 | ||
285 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
65b4711f | 286 | return 1; |
1b8873a0 | 287 | |
e429817b | 288 | armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 289 | return armpmu->get_event_idx(hw_events, event) >= 0; |
1b8873a0 JI |
290 | } |
291 | ||
292 | static int | |
293 | validate_group(struct perf_event *event) | |
294 | { | |
295 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 296 | struct pmu_hw_events fake_pmu; |
1b8873a0 | 297 | |
bce34d14 WD |
298 | /* |
299 | * Initialise the fake PMU. We only need to populate the | |
300 | * used_mask for the purposes of validation. | |
301 | */ | |
a4560846 | 302 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
1b8873a0 | 303 | |
e429817b | 304 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
aa2bc1ad | 305 | return -EINVAL; |
1b8873a0 JI |
306 | |
307 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
e429817b | 308 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
aa2bc1ad | 309 | return -EINVAL; |
1b8873a0 JI |
310 | } |
311 | ||
e429817b | 312 | if (!validate_event(event->pmu, &fake_pmu, event)) |
aa2bc1ad | 313 | return -EINVAL; |
1b8873a0 JI |
314 | |
315 | return 0; | |
316 | } | |
317 | ||
051f1b13 | 318 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
0e25a5c9 | 319 | { |
bbd64559 SB |
320 | struct arm_pmu *armpmu; |
321 | struct platform_device *plat_device; | |
322 | struct arm_pmu_platdata *plat; | |
5f5092e7 WD |
323 | int ret; |
324 | u64 start_clock, finish_clock; | |
bbd64559 | 325 | |
5ebd9200 MR |
326 | /* |
327 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but | |
328 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will | |
329 | * do any necessary shifting, we just need to perform the first | |
330 | * dereference. | |
331 | */ | |
332 | armpmu = *(void **)dev; | |
bbd64559 SB |
333 | plat_device = armpmu->plat_device; |
334 | plat = dev_get_platdata(&plat_device->dev); | |
0e25a5c9 | 335 | |
5f5092e7 | 336 | start_clock = sched_clock(); |
051f1b13 | 337 | if (plat && plat->handle_irq) |
5ebd9200 | 338 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); |
051f1b13 | 339 | else |
5ebd9200 | 340 | ret = armpmu->handle_irq(irq, armpmu); |
5f5092e7 WD |
341 | finish_clock = sched_clock(); |
342 | ||
343 | perf_sample_event_took(finish_clock - start_clock); | |
344 | return ret; | |
0e25a5c9 RV |
345 | } |
346 | ||
0b390e21 | 347 | static void |
8a16b34e | 348 | armpmu_release_hardware(struct arm_pmu *armpmu) |
0b390e21 | 349 | { |
ed6f2a52 | 350 | armpmu->free_irq(armpmu); |
0b390e21 WD |
351 | } |
352 | ||
1b8873a0 | 353 | static int |
8a16b34e | 354 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
1b8873a0 | 355 | { |
ed61f985 | 356 | int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
051f1b13 SH |
357 | if (err) { |
358 | armpmu_release_hardware(armpmu); | |
359 | return err; | |
49c006b9 | 360 | } |
1b8873a0 | 361 | |
0b390e21 | 362 | return 0; |
1b8873a0 JI |
363 | } |
364 | ||
1b8873a0 JI |
365 | static void |
366 | hw_perf_event_destroy(struct perf_event *event) | |
367 | { | |
8a16b34e | 368 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
03b7898d MR |
369 | atomic_t *active_events = &armpmu->active_events; |
370 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | |
371 | ||
372 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | |
8a16b34e | 373 | armpmu_release_hardware(armpmu); |
03b7898d | 374 | mutex_unlock(pmu_reserve_mutex); |
1b8873a0 JI |
375 | } |
376 | } | |
377 | ||
05d22fde WD |
378 | static int |
379 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
380 | { | |
381 | return attr->exclude_idle || attr->exclude_user || | |
382 | attr->exclude_kernel || attr->exclude_hv; | |
383 | } | |
384 | ||
1b8873a0 JI |
385 | static int |
386 | __hw_perf_event_init(struct perf_event *event) | |
387 | { | |
8a16b34e | 388 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 389 | struct hw_perf_event *hwc = &event->hw; |
9dcbf466 | 390 | int mapping; |
1b8873a0 | 391 | |
e1f431b5 | 392 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
393 | |
394 | if (mapping < 0) { | |
395 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
396 | event->attr.config); | |
397 | return mapping; | |
398 | } | |
399 | ||
05d22fde WD |
400 | /* |
401 | * We don't assign an index until we actually place the event onto | |
402 | * hardware. Use -1 to signify that we haven't decided where to put it | |
403 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
404 | * clever allocation or constraints checking at this point. | |
405 | */ | |
406 | hwc->idx = -1; | |
407 | hwc->config_base = 0; | |
408 | hwc->config = 0; | |
409 | hwc->event_base = 0; | |
410 | ||
1b8873a0 JI |
411 | /* |
412 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 413 | */ |
05d22fde WD |
414 | if ((!armpmu->set_event_filter || |
415 | armpmu->set_event_filter(hwc, &event->attr)) && | |
416 | event_requires_mode_exclusion(&event->attr)) { | |
1b8873a0 JI |
417 | pr_debug("ARM performance counters do not support " |
418 | "mode exclusion\n"); | |
fdeb8e35 | 419 | return -EOPNOTSUPP; |
1b8873a0 JI |
420 | } |
421 | ||
422 | /* | |
05d22fde | 423 | * Store the event encoding into the config_base field. |
1b8873a0 | 424 | */ |
05d22fde | 425 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 | 426 | |
edcb4d3c | 427 | if (!is_sampling_event(event)) { |
57273471 WD |
428 | /* |
429 | * For non-sampling runs, limit the sample_period to half | |
430 | * of the counter width. That way, the new counter value | |
431 | * is far less likely to overtake the previous one unless | |
432 | * you have some serious IRQ latency issues. | |
433 | */ | |
434 | hwc->sample_period = armpmu->max_period >> 1; | |
1b8873a0 | 435 | hwc->last_period = hwc->sample_period; |
e7850595 | 436 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
437 | } |
438 | ||
1b8873a0 | 439 | if (event->group_leader != event) { |
e595ede6 | 440 | if (validate_group(event) != 0) |
1b8873a0 JI |
441 | return -EINVAL; |
442 | } | |
443 | ||
9dcbf466 | 444 | return 0; |
1b8873a0 JI |
445 | } |
446 | ||
b0a873eb | 447 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 448 | { |
8a16b34e | 449 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 450 | int err = 0; |
03b7898d | 451 | atomic_t *active_events = &armpmu->active_events; |
1b8873a0 | 452 | |
cc88116d MR |
453 | /* |
454 | * Reject CPU-affine events for CPUs that are of a different class to | |
455 | * that which this PMU handles. Process-following events (where | |
456 | * event->cpu == -1) can be migrated between CPUs, and thus we have to | |
457 | * reject them later (in armpmu_add) if they're scheduled on a | |
458 | * different class of CPU. | |
459 | */ | |
460 | if (event->cpu != -1 && | |
461 | !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) | |
462 | return -ENOENT; | |
463 | ||
2481c5fa SE |
464 | /* does not support taken branch sampling */ |
465 | if (has_branch_stack(event)) | |
466 | return -EOPNOTSUPP; | |
467 | ||
e1f431b5 | 468 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 469 | return -ENOENT; |
b0a873eb | 470 | |
1b8873a0 JI |
471 | event->destroy = hw_perf_event_destroy; |
472 | ||
03b7898d MR |
473 | if (!atomic_inc_not_zero(active_events)) { |
474 | mutex_lock(&armpmu->reserve_mutex); | |
475 | if (atomic_read(active_events) == 0) | |
8a16b34e | 476 | err = armpmu_reserve_hardware(armpmu); |
1b8873a0 JI |
477 | |
478 | if (!err) | |
03b7898d MR |
479 | atomic_inc(active_events); |
480 | mutex_unlock(&armpmu->reserve_mutex); | |
1b8873a0 JI |
481 | } |
482 | ||
483 | if (err) | |
b0a873eb | 484 | return err; |
1b8873a0 JI |
485 | |
486 | err = __hw_perf_event_init(event); | |
487 | if (err) | |
488 | hw_perf_event_destroy(event); | |
489 | ||
b0a873eb | 490 | return err; |
1b8873a0 JI |
491 | } |
492 | ||
a4eaf7f1 | 493 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 494 | { |
8be3f9a2 | 495 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
11679250 | 496 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
7325eaec | 497 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 498 | |
cc88116d MR |
499 | /* For task-bound events we may be called on other CPUs */ |
500 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
501 | return; | |
502 | ||
f4f38430 | 503 | if (enabled) |
ed6f2a52 | 504 | armpmu->start(armpmu); |
1b8873a0 JI |
505 | } |
506 | ||
a4eaf7f1 | 507 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 508 | { |
8a16b34e | 509 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
cc88116d MR |
510 | |
511 | /* For task-bound events we may be called on other CPUs */ | |
512 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
513 | return; | |
514 | ||
ed6f2a52 | 515 | armpmu->stop(armpmu); |
1b8873a0 JI |
516 | } |
517 | ||
c904e32a MR |
518 | /* |
519 | * In heterogeneous systems, events are specific to a particular | |
520 | * microarchitecture, and aren't suitable for another. Thus, only match CPUs of | |
521 | * the same microarchitecture. | |
522 | */ | |
523 | static int armpmu_filter_match(struct perf_event *event) | |
524 | { | |
525 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
526 | unsigned int cpu = smp_processor_id(); | |
527 | return cpumask_test_cpu(cpu, &armpmu->supported_cpus); | |
528 | } | |
529 | ||
44d6b1fc | 530 | static void armpmu_init(struct arm_pmu *armpmu) |
03b7898d MR |
531 | { |
532 | atomic_set(&armpmu->active_events, 0); | |
533 | mutex_init(&armpmu->reserve_mutex); | |
8a16b34e MR |
534 | |
535 | armpmu->pmu = (struct pmu) { | |
536 | .pmu_enable = armpmu_enable, | |
537 | .pmu_disable = armpmu_disable, | |
538 | .event_init = armpmu_event_init, | |
539 | .add = armpmu_add, | |
540 | .del = armpmu_del, | |
541 | .start = armpmu_start, | |
542 | .stop = armpmu_stop, | |
543 | .read = armpmu_read, | |
c904e32a | 544 | .filter_match = armpmu_filter_match, |
8a16b34e MR |
545 | }; |
546 | } | |
547 | ||
0305230a | 548 | int armpmu_register(struct arm_pmu *armpmu, int type) |
8a16b34e MR |
549 | { |
550 | armpmu_init(armpmu); | |
04236f9f WD |
551 | pr_info("enabled with %s PMU driver, %d counters available\n", |
552 | armpmu->name, armpmu->num_events); | |
0305230a | 553 | return perf_pmu_register(&armpmu->pmu, armpmu->name, type); |
03b7898d MR |
554 | } |
555 |