]>
Commit | Line | Data |
---|---|---|
1b8873a0 JI |
1 | #undef DEBUG |
2 | ||
3 | /* | |
4 | * ARM performance counter support. | |
5 | * | |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
796d1295 | 8 | * |
1b8873a0 JI |
9 | * This code is based on the sparc64 perf event code, which is in turn based |
10 | * on the x86 code. Callchain code is based on the ARM OProfile backtrace | |
11 | * code. | |
12 | */ | |
13 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
14 | ||
1b8873a0 | 15 | #include <linux/kernel.h> |
49c006b9 | 16 | #include <linux/platform_device.h> |
7be2958e | 17 | #include <linux/pm_runtime.h> |
5505b206 | 18 | #include <linux/uaccess.h> |
1b8873a0 | 19 | |
1b8873a0 JI |
20 | #include <asm/irq_regs.h> |
21 | #include <asm/pmu.h> | |
22 | #include <asm/stacktrace.h> | |
23 | ||
1b8873a0 | 24 | static int |
e1f431b5 MR |
25 | armpmu_map_cache_event(const unsigned (*cache_map) |
26 | [PERF_COUNT_HW_CACHE_MAX] | |
27 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
28 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
29 | u64 config) | |
1b8873a0 JI |
30 | { |
31 | unsigned int cache_type, cache_op, cache_result, ret; | |
32 | ||
33 | cache_type = (config >> 0) & 0xff; | |
34 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
35 | return -EINVAL; | |
36 | ||
37 | cache_op = (config >> 8) & 0xff; | |
38 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
39 | return -EINVAL; | |
40 | ||
41 | cache_result = (config >> 16) & 0xff; | |
42 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
43 | return -EINVAL; | |
44 | ||
e1f431b5 | 45 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
46 | |
47 | if (ret == CACHE_OP_UNSUPPORTED) | |
48 | return -ENOENT; | |
49 | ||
50 | return ret; | |
51 | } | |
52 | ||
84fee97a | 53 | static int |
6dbc0029 | 54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 55 | { |
e1f431b5 MR |
56 | int mapping = (*event_map)[config]; |
57 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | |
84fee97a WD |
58 | } |
59 | ||
60 | static int | |
e1f431b5 | 61 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 62 | { |
e1f431b5 MR |
63 | return (int)(config & raw_event_mask); |
64 | } | |
65 | ||
6dbc0029 WD |
66 | int |
67 | armpmu_map_event(struct perf_event *event, | |
68 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
69 | const unsigned (*cache_map) | |
70 | [PERF_COUNT_HW_CACHE_MAX] | |
71 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
72 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
73 | u32 raw_event_mask) | |
e1f431b5 MR |
74 | { |
75 | u64 config = event->attr.config; | |
76 | ||
77 | switch (event->attr.type) { | |
78 | case PERF_TYPE_HARDWARE: | |
6dbc0029 | 79 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
80 | case PERF_TYPE_HW_CACHE: |
81 | return armpmu_map_cache_event(cache_map, config); | |
82 | case PERF_TYPE_RAW: | |
83 | return armpmu_map_raw_event(raw_event_mask, config); | |
84 | } | |
85 | ||
86 | return -ENOENT; | |
84fee97a WD |
87 | } |
88 | ||
0ce47080 | 89 | int |
1b8873a0 JI |
90 | armpmu_event_set_period(struct perf_event *event, |
91 | struct hw_perf_event *hwc, | |
92 | int idx) | |
93 | { | |
8a16b34e | 94 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
e7850595 | 95 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 JI |
96 | s64 period = hwc->sample_period; |
97 | int ret = 0; | |
98 | ||
99 | if (unlikely(left <= -period)) { | |
100 | left = period; | |
e7850595 | 101 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
102 | hwc->last_period = period; |
103 | ret = 1; | |
104 | } | |
105 | ||
106 | if (unlikely(left <= 0)) { | |
107 | left += period; | |
e7850595 | 108 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
109 | hwc->last_period = period; |
110 | ret = 1; | |
111 | } | |
112 | ||
113 | if (left > (s64)armpmu->max_period) | |
114 | left = armpmu->max_period; | |
115 | ||
e7850595 | 116 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 JI |
117 | |
118 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | |
119 | ||
120 | perf_event_update_userpage(event); | |
121 | ||
122 | return ret; | |
123 | } | |
124 | ||
0ce47080 | 125 | u64 |
1b8873a0 JI |
126 | armpmu_event_update(struct perf_event *event, |
127 | struct hw_perf_event *hwc, | |
57273471 | 128 | int idx) |
1b8873a0 | 129 | { |
8a16b34e | 130 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
a737823d | 131 | u64 delta, prev_raw_count, new_raw_count; |
1b8873a0 JI |
132 | |
133 | again: | |
e7850595 | 134 | prev_raw_count = local64_read(&hwc->prev_count); |
1b8873a0 JI |
135 | new_raw_count = armpmu->read_counter(idx); |
136 | ||
e7850595 | 137 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
138 | new_raw_count) != prev_raw_count) |
139 | goto again; | |
140 | ||
57273471 | 141 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
1b8873a0 | 142 | |
e7850595 PZ |
143 | local64_add(delta, &event->count); |
144 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
145 | |
146 | return new_raw_count; | |
147 | } | |
148 | ||
149 | static void | |
a4eaf7f1 | 150 | armpmu_read(struct perf_event *event) |
1b8873a0 | 151 | { |
1b8873a0 | 152 | struct hw_perf_event *hwc = &event->hw; |
1b8873a0 | 153 | |
a4eaf7f1 PZ |
154 | /* Don't read disabled counters! */ |
155 | if (hwc->idx < 0) | |
156 | return; | |
1b8873a0 | 157 | |
57273471 | 158 | armpmu_event_update(event, hwc, hwc->idx); |
1b8873a0 JI |
159 | } |
160 | ||
161 | static void | |
a4eaf7f1 | 162 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 163 | { |
8a16b34e | 164 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
165 | struct hw_perf_event *hwc = &event->hw; |
166 | ||
a4eaf7f1 PZ |
167 | /* |
168 | * ARM pmu always has to update the counter, so ignore | |
169 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
170 | */ | |
171 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
172 | armpmu->disable(hwc, hwc->idx); | |
57273471 | 173 | armpmu_event_update(event, hwc, hwc->idx); |
a4eaf7f1 PZ |
174 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
175 | } | |
1b8873a0 JI |
176 | } |
177 | ||
178 | static void | |
a4eaf7f1 | 179 | armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 180 | { |
8a16b34e | 181 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
182 | struct hw_perf_event *hwc = &event->hw; |
183 | ||
a4eaf7f1 PZ |
184 | /* |
185 | * ARM pmu always has to reprogram the period, so ignore | |
186 | * PERF_EF_RELOAD, see the comment below. | |
187 | */ | |
188 | if (flags & PERF_EF_RELOAD) | |
189 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
190 | ||
191 | hwc->state = 0; | |
1b8873a0 JI |
192 | /* |
193 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 194 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
195 | * may have been left counting. If we don't do this step then we may |
196 | * get an interrupt too soon or *way* too late if the overflow has | |
197 | * happened since disabling. | |
198 | */ | |
199 | armpmu_event_set_period(event, hwc, hwc->idx); | |
200 | armpmu->enable(hwc, hwc->idx); | |
201 | } | |
202 | ||
a4eaf7f1 PZ |
203 | static void |
204 | armpmu_del(struct perf_event *event, int flags) | |
205 | { | |
8a16b34e | 206 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
8be3f9a2 | 207 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); |
a4eaf7f1 PZ |
208 | struct hw_perf_event *hwc = &event->hw; |
209 | int idx = hwc->idx; | |
210 | ||
211 | WARN_ON(idx < 0); | |
212 | ||
a4eaf7f1 | 213 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 MR |
214 | hw_events->events[idx] = NULL; |
215 | clear_bit(idx, hw_events->used_mask); | |
a4eaf7f1 PZ |
216 | |
217 | perf_event_update_userpage(event); | |
218 | } | |
219 | ||
1b8873a0 | 220 | static int |
a4eaf7f1 | 221 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 222 | { |
8a16b34e | 223 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
8be3f9a2 | 224 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); |
1b8873a0 JI |
225 | struct hw_perf_event *hwc = &event->hw; |
226 | int idx; | |
227 | int err = 0; | |
228 | ||
33696fc0 | 229 | perf_pmu_disable(event->pmu); |
24cd7f54 | 230 | |
1b8873a0 | 231 | /* If we don't have a space for the counter then finish early. */ |
8be3f9a2 | 232 | idx = armpmu->get_event_idx(hw_events, hwc); |
1b8873a0 JI |
233 | if (idx < 0) { |
234 | err = idx; | |
235 | goto out; | |
236 | } | |
237 | ||
238 | /* | |
239 | * If there is an event in the counter we are going to use then make | |
240 | * sure it is disabled. | |
241 | */ | |
242 | event->hw.idx = idx; | |
243 | armpmu->disable(hwc, idx); | |
8be3f9a2 | 244 | hw_events->events[idx] = event; |
1b8873a0 | 245 | |
a4eaf7f1 PZ |
246 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
247 | if (flags & PERF_EF_START) | |
248 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
249 | |
250 | /* Propagate our changes to the userspace mapping. */ | |
251 | perf_event_update_userpage(event); | |
252 | ||
253 | out: | |
33696fc0 | 254 | perf_pmu_enable(event->pmu); |
1b8873a0 JI |
255 | return err; |
256 | } | |
257 | ||
1b8873a0 | 258 | static int |
8be3f9a2 | 259 | validate_event(struct pmu_hw_events *hw_events, |
1b8873a0 JI |
260 | struct perf_event *event) |
261 | { | |
8a16b34e | 262 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 263 | struct hw_perf_event fake_event = event->hw; |
7b9f72c6 | 264 | struct pmu *leader_pmu = event->group_leader->pmu; |
1b8873a0 | 265 | |
7b9f72c6 | 266 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
65b4711f | 267 | return 1; |
1b8873a0 | 268 | |
8be3f9a2 | 269 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; |
1b8873a0 JI |
270 | } |
271 | ||
272 | static int | |
273 | validate_group(struct perf_event *event) | |
274 | { | |
275 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 276 | struct pmu_hw_events fake_pmu; |
bce34d14 | 277 | DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); |
1b8873a0 | 278 | |
bce34d14 WD |
279 | /* |
280 | * Initialise the fake PMU. We only need to populate the | |
281 | * used_mask for the purposes of validation. | |
282 | */ | |
283 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); | |
284 | fake_pmu.used_mask = fake_used_mask; | |
1b8873a0 JI |
285 | |
286 | if (!validate_event(&fake_pmu, leader)) | |
aa2bc1ad | 287 | return -EINVAL; |
1b8873a0 JI |
288 | |
289 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
290 | if (!validate_event(&fake_pmu, sibling)) | |
aa2bc1ad | 291 | return -EINVAL; |
1b8873a0 JI |
292 | } |
293 | ||
294 | if (!validate_event(&fake_pmu, event)) | |
aa2bc1ad | 295 | return -EINVAL; |
1b8873a0 JI |
296 | |
297 | return 0; | |
298 | } | |
299 | ||
0e25a5c9 RV |
300 | static irqreturn_t armpmu_platform_irq(int irq, void *dev) |
301 | { | |
8a16b34e | 302 | struct arm_pmu *armpmu = (struct arm_pmu *) dev; |
a9356a04 MR |
303 | struct platform_device *plat_device = armpmu->plat_device; |
304 | struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); | |
0e25a5c9 RV |
305 | |
306 | return plat->handle_irq(irq, dev, armpmu->handle_irq); | |
307 | } | |
308 | ||
0b390e21 | 309 | static void |
8a16b34e | 310 | armpmu_release_hardware(struct arm_pmu *armpmu) |
0b390e21 WD |
311 | { |
312 | int i, irq, irqs; | |
a9356a04 | 313 | struct platform_device *pmu_device = armpmu->plat_device; |
0b390e21 WD |
314 | |
315 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | |
316 | ||
317 | for (i = 0; i < irqs; ++i) { | |
318 | if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) | |
319 | continue; | |
320 | irq = platform_get_irq(pmu_device, i); | |
7be2958e | 321 | if (irq >= 0) |
8a16b34e | 322 | free_irq(irq, armpmu); |
0b390e21 WD |
323 | } |
324 | ||
7be2958e | 325 | pm_runtime_put_sync(&pmu_device->dev); |
0b390e21 WD |
326 | } |
327 | ||
1b8873a0 | 328 | static int |
8a16b34e | 329 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
1b8873a0 | 330 | { |
0e25a5c9 RV |
331 | struct arm_pmu_platdata *plat; |
332 | irq_handler_t handle_irq; | |
b0e89590 | 333 | int i, err, irq, irqs; |
a9356a04 | 334 | struct platform_device *pmu_device = armpmu->plat_device; |
1b8873a0 | 335 | |
e5a21327 WD |
336 | if (!pmu_device) |
337 | return -ENODEV; | |
338 | ||
0e25a5c9 RV |
339 | plat = dev_get_platdata(&pmu_device->dev); |
340 | if (plat && plat->handle_irq) | |
341 | handle_irq = armpmu_platform_irq; | |
342 | else | |
343 | handle_irq = armpmu->handle_irq; | |
344 | ||
0b390e21 | 345 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
b0e89590 | 346 | if (irqs < 1) { |
1b8873a0 JI |
347 | pr_err("no irqs for PMUs defined\n"); |
348 | return -ENODEV; | |
349 | } | |
350 | ||
7be2958e JH |
351 | pm_runtime_get_sync(&pmu_device->dev); |
352 | ||
b0e89590 | 353 | for (i = 0; i < irqs; ++i) { |
0b390e21 | 354 | err = 0; |
49c006b9 WD |
355 | irq = platform_get_irq(pmu_device, i); |
356 | if (irq < 0) | |
357 | continue; | |
358 | ||
b0e89590 WD |
359 | /* |
360 | * If we have a single PMU interrupt that we can't shift, | |
361 | * assume that we're running on a uniprocessor machine and | |
0b390e21 | 362 | * continue. Otherwise, continue without this interrupt. |
b0e89590 | 363 | */ |
0b390e21 WD |
364 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { |
365 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | |
366 | irq, i); | |
367 | continue; | |
b0e89590 WD |
368 | } |
369 | ||
0e25a5c9 | 370 | err = request_irq(irq, handle_irq, |
ddee87f2 | 371 | IRQF_DISABLED | IRQF_NOBALANCING, |
8a16b34e | 372 | "arm-pmu", armpmu); |
1b8873a0 | 373 | if (err) { |
b0e89590 WD |
374 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
375 | irq); | |
8a16b34e | 376 | armpmu_release_hardware(armpmu); |
0b390e21 | 377 | return err; |
7be2958e | 378 | } |
1b8873a0 | 379 | |
0b390e21 | 380 | cpumask_set_cpu(i, &armpmu->active_irqs); |
49c006b9 | 381 | } |
1b8873a0 | 382 | |
0b390e21 | 383 | return 0; |
1b8873a0 JI |
384 | } |
385 | ||
1b8873a0 JI |
386 | static void |
387 | hw_perf_event_destroy(struct perf_event *event) | |
388 | { | |
8a16b34e | 389 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
03b7898d MR |
390 | atomic_t *active_events = &armpmu->active_events; |
391 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | |
392 | ||
393 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | |
8a16b34e | 394 | armpmu_release_hardware(armpmu); |
03b7898d | 395 | mutex_unlock(pmu_reserve_mutex); |
1b8873a0 JI |
396 | } |
397 | } | |
398 | ||
05d22fde WD |
399 | static int |
400 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
401 | { | |
402 | return attr->exclude_idle || attr->exclude_user || | |
403 | attr->exclude_kernel || attr->exclude_hv; | |
404 | } | |
405 | ||
1b8873a0 JI |
406 | static int |
407 | __hw_perf_event_init(struct perf_event *event) | |
408 | { | |
8a16b34e | 409 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
410 | struct hw_perf_event *hwc = &event->hw; |
411 | int mapping, err; | |
412 | ||
e1f431b5 | 413 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
414 | |
415 | if (mapping < 0) { | |
416 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
417 | event->attr.config); | |
418 | return mapping; | |
419 | } | |
420 | ||
05d22fde WD |
421 | /* |
422 | * We don't assign an index until we actually place the event onto | |
423 | * hardware. Use -1 to signify that we haven't decided where to put it | |
424 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
425 | * clever allocation or constraints checking at this point. | |
426 | */ | |
427 | hwc->idx = -1; | |
428 | hwc->config_base = 0; | |
429 | hwc->config = 0; | |
430 | hwc->event_base = 0; | |
431 | ||
1b8873a0 JI |
432 | /* |
433 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 434 | */ |
05d22fde WD |
435 | if ((!armpmu->set_event_filter || |
436 | armpmu->set_event_filter(hwc, &event->attr)) && | |
437 | event_requires_mode_exclusion(&event->attr)) { | |
1b8873a0 JI |
438 | pr_debug("ARM performance counters do not support " |
439 | "mode exclusion\n"); | |
fdeb8e35 | 440 | return -EOPNOTSUPP; |
1b8873a0 JI |
441 | } |
442 | ||
443 | /* | |
05d22fde | 444 | * Store the event encoding into the config_base field. |
1b8873a0 | 445 | */ |
05d22fde | 446 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 JI |
447 | |
448 | if (!hwc->sample_period) { | |
57273471 WD |
449 | /* |
450 | * For non-sampling runs, limit the sample_period to half | |
451 | * of the counter width. That way, the new counter value | |
452 | * is far less likely to overtake the previous one unless | |
453 | * you have some serious IRQ latency issues. | |
454 | */ | |
455 | hwc->sample_period = armpmu->max_period >> 1; | |
1b8873a0 | 456 | hwc->last_period = hwc->sample_period; |
e7850595 | 457 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
458 | } |
459 | ||
460 | err = 0; | |
461 | if (event->group_leader != event) { | |
462 | err = validate_group(event); | |
463 | if (err) | |
464 | return -EINVAL; | |
465 | } | |
466 | ||
467 | return err; | |
468 | } | |
469 | ||
b0a873eb | 470 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 471 | { |
8a16b34e | 472 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 473 | int err = 0; |
03b7898d | 474 | atomic_t *active_events = &armpmu->active_events; |
1b8873a0 | 475 | |
2481c5fa SE |
476 | /* does not support taken branch sampling */ |
477 | if (has_branch_stack(event)) | |
478 | return -EOPNOTSUPP; | |
479 | ||
e1f431b5 | 480 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 481 | return -ENOENT; |
b0a873eb | 482 | |
1b8873a0 JI |
483 | event->destroy = hw_perf_event_destroy; |
484 | ||
03b7898d MR |
485 | if (!atomic_inc_not_zero(active_events)) { |
486 | mutex_lock(&armpmu->reserve_mutex); | |
487 | if (atomic_read(active_events) == 0) | |
8a16b34e | 488 | err = armpmu_reserve_hardware(armpmu); |
1b8873a0 JI |
489 | |
490 | if (!err) | |
03b7898d MR |
491 | atomic_inc(active_events); |
492 | mutex_unlock(&armpmu->reserve_mutex); | |
1b8873a0 JI |
493 | } |
494 | ||
495 | if (err) | |
b0a873eb | 496 | return err; |
1b8873a0 JI |
497 | |
498 | err = __hw_perf_event_init(event); | |
499 | if (err) | |
500 | hw_perf_event_destroy(event); | |
501 | ||
b0a873eb | 502 | return err; |
1b8873a0 JI |
503 | } |
504 | ||
a4eaf7f1 | 505 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 506 | { |
8be3f9a2 | 507 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
8be3f9a2 | 508 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); |
7325eaec | 509 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 510 | |
f4f38430 WD |
511 | if (enabled) |
512 | armpmu->start(); | |
1b8873a0 JI |
513 | } |
514 | ||
a4eaf7f1 | 515 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 516 | { |
8a16b34e | 517 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
48957155 | 518 | armpmu->stop(); |
1b8873a0 JI |
519 | } |
520 | ||
7be2958e JH |
521 | #ifdef CONFIG_PM_RUNTIME |
522 | static int armpmu_runtime_resume(struct device *dev) | |
523 | { | |
524 | struct arm_pmu_platdata *plat = dev_get_platdata(dev); | |
525 | ||
526 | if (plat && plat->runtime_resume) | |
527 | return plat->runtime_resume(dev); | |
528 | ||
529 | return 0; | |
530 | } | |
531 | ||
532 | static int armpmu_runtime_suspend(struct device *dev) | |
533 | { | |
534 | struct arm_pmu_platdata *plat = dev_get_platdata(dev); | |
535 | ||
536 | if (plat && plat->runtime_suspend) | |
537 | return plat->runtime_suspend(dev); | |
538 | ||
539 | return 0; | |
540 | } | |
541 | #endif | |
542 | ||
6dbc0029 WD |
543 | const struct dev_pm_ops armpmu_dev_pm_ops = { |
544 | SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) | |
545 | }; | |
546 | ||
03b7898d MR |
547 | static void __init armpmu_init(struct arm_pmu *armpmu) |
548 | { | |
549 | atomic_set(&armpmu->active_events, 0); | |
550 | mutex_init(&armpmu->reserve_mutex); | |
8a16b34e MR |
551 | |
552 | armpmu->pmu = (struct pmu) { | |
553 | .pmu_enable = armpmu_enable, | |
554 | .pmu_disable = armpmu_disable, | |
555 | .event_init = armpmu_event_init, | |
556 | .add = armpmu_add, | |
557 | .del = armpmu_del, | |
558 | .start = armpmu_start, | |
559 | .stop = armpmu_stop, | |
560 | .read = armpmu_read, | |
561 | }; | |
562 | } | |
563 | ||
04236f9f | 564 | int armpmu_register(struct arm_pmu *armpmu, char *name, int type) |
8a16b34e MR |
565 | { |
566 | armpmu_init(armpmu); | |
04236f9f WD |
567 | pr_info("enabled with %s PMU driver, %d counters available\n", |
568 | armpmu->name, armpmu->num_events); | |
8a16b34e | 569 | return perf_pmu_register(&armpmu->pmu, name, type); |
03b7898d MR |
570 | } |
571 | ||
1b8873a0 JI |
572 | /* |
573 | * Callchain handling code. | |
574 | */ | |
1b8873a0 JI |
575 | |
576 | /* | |
577 | * The registers we're interested in are at the end of the variable | |
578 | * length saved register structure. The fp points at the end of this | |
579 | * structure so the address of this struct is: | |
580 | * (struct frame_tail *)(xxx->fp)-1 | |
581 | * | |
582 | * This code has been adapted from the ARM OProfile support. | |
583 | */ | |
584 | struct frame_tail { | |
4d6b7a77 WD |
585 | struct frame_tail __user *fp; |
586 | unsigned long sp; | |
587 | unsigned long lr; | |
1b8873a0 JI |
588 | } __attribute__((packed)); |
589 | ||
590 | /* | |
591 | * Get the return address for a single stackframe and return a pointer to the | |
592 | * next frame tail. | |
593 | */ | |
4d6b7a77 WD |
594 | static struct frame_tail __user * |
595 | user_backtrace(struct frame_tail __user *tail, | |
1b8873a0 JI |
596 | struct perf_callchain_entry *entry) |
597 | { | |
598 | struct frame_tail buftail; | |
599 | ||
600 | /* Also check accessibility of one struct frame_tail beyond */ | |
601 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) | |
602 | return NULL; | |
603 | if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) | |
604 | return NULL; | |
605 | ||
70791ce9 | 606 | perf_callchain_store(entry, buftail.lr); |
1b8873a0 JI |
607 | |
608 | /* | |
609 | * Frame pointers should strictly progress back up the stack | |
610 | * (towards higher addresses). | |
611 | */ | |
cb06199b | 612 | if (tail + 1 >= buftail.fp) |
1b8873a0 JI |
613 | return NULL; |
614 | ||
615 | return buftail.fp - 1; | |
616 | } | |
617 | ||
56962b44 FW |
618 | void |
619 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |
1b8873a0 | 620 | { |
4d6b7a77 | 621 | struct frame_tail __user *tail; |
1b8873a0 | 622 | |
1b8873a0 | 623 | |
4d6b7a77 | 624 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
1b8873a0 | 625 | |
860ad782 SR |
626 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && |
627 | tail && !((unsigned long)tail & 0x3)) | |
1b8873a0 JI |
628 | tail = user_backtrace(tail, entry); |
629 | } | |
630 | ||
631 | /* | |
632 | * Gets called by walk_stackframe() for every stackframe. This will be called | |
633 | * whist unwinding the stackframe and is like a subroutine return so we use | |
634 | * the PC. | |
635 | */ | |
636 | static int | |
637 | callchain_trace(struct stackframe *fr, | |
638 | void *data) | |
639 | { | |
640 | struct perf_callchain_entry *entry = data; | |
70791ce9 | 641 | perf_callchain_store(entry, fr->pc); |
1b8873a0 JI |
642 | return 0; |
643 | } | |
644 | ||
56962b44 FW |
645 | void |
646 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |
1b8873a0 JI |
647 | { |
648 | struct stackframe fr; | |
649 | ||
1b8873a0 JI |
650 | fr.fp = regs->ARM_fp; |
651 | fr.sp = regs->ARM_sp; | |
652 | fr.lr = regs->ARM_lr; | |
653 | fr.pc = regs->ARM_pc; | |
654 | walk_stackframe(&fr, callchain_trace, entry); | |
655 | } |