]>
Commit | Line | Data |
---|---|---|
59abbd1e DM |
1 | /* Performance counter support for sparc64. |
2 | * | |
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | |
4 | * | |
5 | * This code is based almost entirely upon the x86 perf counter | |
6 | * code, which is: | |
7 | * | |
8 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
9 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
10 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
11 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
12 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
13 | */ | |
14 | ||
15 | #include <linux/perf_counter.h> | |
16 | #include <linux/kprobes.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/kdebug.h> | |
19 | #include <linux/mutex.h> | |
20 | ||
21 | #include <asm/cpudata.h> | |
22 | #include <asm/atomic.h> | |
23 | #include <asm/nmi.h> | |
24 | #include <asm/pcr.h> | |
25 | ||
26 | /* Sparc64 chips have two performance counters, 32-bits each, with | |
27 | * overflow interrupts generated on transition from 0xffffffff to 0. | |
28 | * The counters are accessed in one go using a 64-bit register. | |
29 | * | |
30 | * Both counters are controlled using a single control register. The | |
31 | * only way to stop all sampling is to clear all of the context (user, | |
32 | * supervisor, hypervisor) sampling enable bits. But these bits apply | |
33 | * to both counters, thus the two counters can't be enabled/disabled | |
34 | * individually. | |
35 | * | |
36 | * The control register has two event fields, one for each of the two | |
37 | * counters. It's thus nearly impossible to have one counter going | |
38 | * while keeping the other one stopped. Therefore it is possible to | |
39 | * get overflow interrupts for counters not currently "in use" and | |
40 | * that condition must be checked in the overflow interrupt handler. | |
41 | * | |
42 | * So we use a hack, in that we program inactive counters with the | |
43 | * "sw_count0" and "sw_count1" events. These count how many times | |
44 | * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an | |
45 | * unusual way to encode a NOP and therefore will not trigger in | |
46 | * normal code. | |
47 | */ | |
48 | ||
49 | #define MAX_HWCOUNTERS 2 | |
50 | #define MAX_PERIOD ((1UL << 32) - 1) | |
51 | ||
52 | #define PIC_UPPER_INDEX 0 | |
53 | #define PIC_LOWER_INDEX 1 | |
54 | ||
55 | #define PIC_UPPER_NOP 0x1c | |
56 | #define PIC_LOWER_NOP 0x14 | |
57 | ||
58 | struct cpu_hw_counters { | |
59 | struct perf_counter *counters[MAX_HWCOUNTERS]; | |
60 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; | |
61 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; | |
62 | int enabled; | |
63 | }; | |
64 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; | |
65 | ||
66 | struct perf_event_map { | |
67 | u16 encoding; | |
68 | u8 pic_mask; | |
69 | #define PIC_NONE 0x00 | |
70 | #define PIC_UPPER 0x01 | |
71 | #define PIC_LOWER 0x02 | |
72 | }; | |
73 | ||
74 | struct sparc_pmu { | |
75 | const struct perf_event_map *(*event_map)(int); | |
76 | int max_events; | |
77 | int upper_shift; | |
78 | int lower_shift; | |
79 | int event_mask; | |
91b9286d | 80 | int hv_bit; |
59abbd1e DM |
81 | }; |
82 | ||
83 | static const struct perf_event_map ultra3i_perfmon_event_map[] = { | |
84 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, | |
85 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | |
86 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | |
87 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | |
88 | }; | |
89 | ||
90 | static const struct perf_event_map *ultra3i_event_map(int event) | |
91 | { | |
92 | return &ultra3i_perfmon_event_map[event]; | |
93 | } | |
94 | ||
95 | static const struct sparc_pmu ultra3i_pmu = { | |
96 | .event_map = ultra3i_event_map, | |
97 | .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), | |
98 | .upper_shift = 11, | |
99 | .lower_shift = 4, | |
100 | .event_mask = 0x3f, | |
101 | }; | |
102 | ||
103 | static const struct sparc_pmu *sparc_pmu __read_mostly; | |
104 | ||
105 | static u64 event_encoding(u64 event, int idx) | |
106 | { | |
107 | if (idx == PIC_UPPER_INDEX) | |
108 | event <<= sparc_pmu->upper_shift; | |
109 | else | |
110 | event <<= sparc_pmu->lower_shift; | |
111 | return event; | |
112 | } | |
113 | ||
114 | static u64 mask_for_index(int idx) | |
115 | { | |
116 | return event_encoding(sparc_pmu->event_mask, idx); | |
117 | } | |
118 | ||
119 | static u64 nop_for_index(int idx) | |
120 | { | |
121 | return event_encoding(idx == PIC_UPPER_INDEX ? | |
122 | PIC_UPPER_NOP : PIC_LOWER_NOP, idx); | |
123 | } | |
124 | ||
125 | static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, | |
126 | int idx) | |
127 | { | |
128 | u64 val, mask = mask_for_index(idx); | |
129 | ||
130 | val = pcr_ops->read(); | |
131 | pcr_ops->write((val & ~mask) | hwc->config); | |
132 | } | |
133 | ||
134 | static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, | |
135 | int idx) | |
136 | { | |
137 | u64 mask = mask_for_index(idx); | |
138 | u64 nop = nop_for_index(idx); | |
139 | u64 val = pcr_ops->read(); | |
140 | ||
141 | pcr_ops->write((val & ~mask) | nop); | |
142 | } | |
143 | ||
144 | void hw_perf_enable(void) | |
145 | { | |
146 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
147 | u64 val; | |
148 | int i; | |
149 | ||
150 | if (cpuc->enabled) | |
151 | return; | |
152 | ||
153 | cpuc->enabled = 1; | |
154 | barrier(); | |
155 | ||
156 | val = pcr_ops->read(); | |
157 | ||
158 | for (i = 0; i < MAX_HWCOUNTERS; i++) { | |
159 | struct perf_counter *cp = cpuc->counters[i]; | |
160 | struct hw_perf_counter *hwc; | |
161 | ||
162 | if (!cp) | |
163 | continue; | |
164 | hwc = &cp->hw; | |
165 | val |= hwc->config_base; | |
166 | } | |
167 | ||
168 | pcr_ops->write(val); | |
169 | } | |
170 | ||
171 | void hw_perf_disable(void) | |
172 | { | |
173 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
174 | u64 val; | |
175 | ||
176 | if (!cpuc->enabled) | |
177 | return; | |
178 | ||
179 | cpuc->enabled = 0; | |
180 | ||
181 | val = pcr_ops->read(); | |
91b9286d | 182 | val &= ~(PCR_UTRACE | PCR_STRACE | sparc_pmu->hv_bit); |
59abbd1e DM |
183 | pcr_ops->write(val); |
184 | } | |
185 | ||
186 | static u32 read_pmc(int idx) | |
187 | { | |
188 | u64 val; | |
189 | ||
190 | read_pic(val); | |
191 | if (idx == PIC_UPPER_INDEX) | |
192 | val >>= 32; | |
193 | ||
194 | return val & 0xffffffff; | |
195 | } | |
196 | ||
197 | static void write_pmc(int idx, u64 val) | |
198 | { | |
199 | u64 shift, mask, pic; | |
200 | ||
201 | shift = 0; | |
202 | if (idx == PIC_UPPER_INDEX) | |
203 | shift = 32; | |
204 | ||
205 | mask = ((u64) 0xffffffff) << shift; | |
206 | val <<= shift; | |
207 | ||
208 | read_pic(pic); | |
209 | pic &= ~mask; | |
210 | pic |= val; | |
211 | write_pic(pic); | |
212 | } | |
213 | ||
214 | static int sparc_perf_counter_set_period(struct perf_counter *counter, | |
215 | struct hw_perf_counter *hwc, int idx) | |
216 | { | |
217 | s64 left = atomic64_read(&hwc->period_left); | |
218 | s64 period = hwc->sample_period; | |
219 | int ret = 0; | |
220 | ||
221 | if (unlikely(left <= -period)) { | |
222 | left = period; | |
223 | atomic64_set(&hwc->period_left, left); | |
224 | hwc->last_period = period; | |
225 | ret = 1; | |
226 | } | |
227 | ||
228 | if (unlikely(left <= 0)) { | |
229 | left += period; | |
230 | atomic64_set(&hwc->period_left, left); | |
231 | hwc->last_period = period; | |
232 | ret = 1; | |
233 | } | |
234 | if (left > MAX_PERIOD) | |
235 | left = MAX_PERIOD; | |
236 | ||
237 | atomic64_set(&hwc->prev_count, (u64)-left); | |
238 | ||
239 | write_pmc(idx, (u64)(-left) & 0xffffffff); | |
240 | ||
241 | perf_counter_update_userpage(counter); | |
242 | ||
243 | return ret; | |
244 | } | |
245 | ||
246 | static int sparc_pmu_enable(struct perf_counter *counter) | |
247 | { | |
248 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
249 | struct hw_perf_counter *hwc = &counter->hw; | |
250 | int idx = hwc->idx; | |
251 | ||
252 | if (test_and_set_bit(idx, cpuc->used_mask)) | |
253 | return -EAGAIN; | |
254 | ||
255 | sparc_pmu_disable_counter(hwc, idx); | |
256 | ||
257 | cpuc->counters[idx] = counter; | |
258 | set_bit(idx, cpuc->active_mask); | |
259 | ||
260 | sparc_perf_counter_set_period(counter, hwc, idx); | |
261 | sparc_pmu_enable_counter(hwc, idx); | |
262 | perf_counter_update_userpage(counter); | |
263 | return 0; | |
264 | } | |
265 | ||
266 | static u64 sparc_perf_counter_update(struct perf_counter *counter, | |
267 | struct hw_perf_counter *hwc, int idx) | |
268 | { | |
269 | int shift = 64 - 32; | |
270 | u64 prev_raw_count, new_raw_count; | |
271 | s64 delta; | |
272 | ||
273 | again: | |
274 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
275 | new_raw_count = read_pmc(idx); | |
276 | ||
277 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
278 | new_raw_count) != prev_raw_count) | |
279 | goto again; | |
280 | ||
281 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
282 | delta >>= shift; | |
283 | ||
284 | atomic64_add(delta, &counter->count); | |
285 | atomic64_sub(delta, &hwc->period_left); | |
286 | ||
287 | return new_raw_count; | |
288 | } | |
289 | ||
290 | static void sparc_pmu_disable(struct perf_counter *counter) | |
291 | { | |
292 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
293 | struct hw_perf_counter *hwc = &counter->hw; | |
294 | int idx = hwc->idx; | |
295 | ||
296 | clear_bit(idx, cpuc->active_mask); | |
297 | sparc_pmu_disable_counter(hwc, idx); | |
298 | ||
299 | barrier(); | |
300 | ||
301 | sparc_perf_counter_update(counter, hwc, idx); | |
302 | cpuc->counters[idx] = NULL; | |
303 | clear_bit(idx, cpuc->used_mask); | |
304 | ||
305 | perf_counter_update_userpage(counter); | |
306 | } | |
307 | ||
308 | static void sparc_pmu_read(struct perf_counter *counter) | |
309 | { | |
310 | struct hw_perf_counter *hwc = &counter->hw; | |
311 | sparc_perf_counter_update(counter, hwc, hwc->idx); | |
312 | } | |
313 | ||
314 | static void sparc_pmu_unthrottle(struct perf_counter *counter) | |
315 | { | |
316 | struct hw_perf_counter *hwc = &counter->hw; | |
317 | sparc_pmu_enable_counter(hwc, hwc->idx); | |
318 | } | |
319 | ||
320 | static atomic_t active_counters = ATOMIC_INIT(0); | |
321 | static DEFINE_MUTEX(pmc_grab_mutex); | |
322 | ||
323 | void perf_counter_grab_pmc(void) | |
324 | { | |
325 | if (atomic_inc_not_zero(&active_counters)) | |
326 | return; | |
327 | ||
328 | mutex_lock(&pmc_grab_mutex); | |
329 | if (atomic_read(&active_counters) == 0) { | |
330 | if (atomic_read(&nmi_active) > 0) { | |
331 | on_each_cpu(stop_nmi_watchdog, NULL, 1); | |
332 | BUG_ON(atomic_read(&nmi_active) != 0); | |
333 | } | |
334 | atomic_inc(&active_counters); | |
335 | } | |
336 | mutex_unlock(&pmc_grab_mutex); | |
337 | } | |
338 | ||
339 | void perf_counter_release_pmc(void) | |
340 | { | |
341 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { | |
342 | if (atomic_read(&nmi_active) == 0) | |
343 | on_each_cpu(start_nmi_watchdog, NULL, 1); | |
344 | mutex_unlock(&pmc_grab_mutex); | |
345 | } | |
346 | } | |
347 | ||
348 | static void hw_perf_counter_destroy(struct perf_counter *counter) | |
349 | { | |
350 | perf_counter_release_pmc(); | |
351 | } | |
352 | ||
353 | static int __hw_perf_counter_init(struct perf_counter *counter) | |
354 | { | |
355 | struct perf_counter_attr *attr = &counter->attr; | |
356 | struct hw_perf_counter *hwc = &counter->hw; | |
357 | const struct perf_event_map *pmap; | |
358 | u64 enc; | |
359 | ||
360 | if (atomic_read(&nmi_active) < 0) | |
361 | return -ENODEV; | |
362 | ||
363 | if (attr->type != PERF_TYPE_HARDWARE) | |
364 | return -EOPNOTSUPP; | |
365 | ||
366 | if (attr->config >= sparc_pmu->max_events) | |
367 | return -EINVAL; | |
368 | ||
369 | perf_counter_grab_pmc(); | |
370 | counter->destroy = hw_perf_counter_destroy; | |
371 | ||
372 | /* We save the enable bits in the config_base. So to | |
373 | * turn off sampling just write 'config', and to enable | |
374 | * things write 'config | config_base'. | |
375 | */ | |
376 | hwc->config_base = 0; | |
377 | if (!attr->exclude_user) | |
378 | hwc->config_base |= PCR_UTRACE; | |
379 | if (!attr->exclude_kernel) | |
380 | hwc->config_base |= PCR_STRACE; | |
91b9286d DM |
381 | if (!attr->exclude_hv) |
382 | hwc->config_base |= sparc_pmu->hv_bit; | |
59abbd1e DM |
383 | |
384 | if (!hwc->sample_period) { | |
385 | hwc->sample_period = MAX_PERIOD; | |
386 | hwc->last_period = hwc->sample_period; | |
387 | atomic64_set(&hwc->period_left, hwc->sample_period); | |
388 | } | |
389 | ||
390 | pmap = sparc_pmu->event_map(attr->config); | |
391 | ||
392 | enc = pmap->encoding; | |
393 | if (pmap->pic_mask & PIC_UPPER) { | |
394 | hwc->idx = PIC_UPPER_INDEX; | |
395 | enc <<= sparc_pmu->upper_shift; | |
396 | } else { | |
397 | hwc->idx = PIC_LOWER_INDEX; | |
398 | enc <<= sparc_pmu->lower_shift; | |
399 | } | |
400 | ||
401 | hwc->config |= enc; | |
402 | return 0; | |
403 | } | |
404 | ||
405 | static const struct pmu pmu = { | |
406 | .enable = sparc_pmu_enable, | |
407 | .disable = sparc_pmu_disable, | |
408 | .read = sparc_pmu_read, | |
409 | .unthrottle = sparc_pmu_unthrottle, | |
410 | }; | |
411 | ||
412 | const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |
413 | { | |
414 | int err = __hw_perf_counter_init(counter); | |
415 | ||
416 | if (err) | |
417 | return ERR_PTR(err); | |
418 | return &pmu; | |
419 | } | |
420 | ||
421 | void perf_counter_print_debug(void) | |
422 | { | |
423 | unsigned long flags; | |
424 | u64 pcr, pic; | |
425 | int cpu; | |
426 | ||
427 | if (!sparc_pmu) | |
428 | return; | |
429 | ||
430 | local_irq_save(flags); | |
431 | ||
432 | cpu = smp_processor_id(); | |
433 | ||
434 | pcr = pcr_ops->read(); | |
435 | read_pic(pic); | |
436 | ||
437 | pr_info("\n"); | |
438 | pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", | |
439 | cpu, pcr, pic); | |
440 | ||
441 | local_irq_restore(flags); | |
442 | } | |
443 | ||
444 | static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, | |
445 | unsigned long cmd, void *__args) | |
446 | { | |
447 | struct die_args *args = __args; | |
448 | struct perf_sample_data data; | |
449 | struct cpu_hw_counters *cpuc; | |
450 | struct pt_regs *regs; | |
451 | int idx; | |
452 | ||
453 | if (!atomic_read(&active_counters)) | |
454 | return NOTIFY_DONE; | |
455 | ||
456 | switch (cmd) { | |
457 | case DIE_NMI: | |
458 | break; | |
459 | ||
460 | default: | |
461 | return NOTIFY_DONE; | |
462 | } | |
463 | ||
464 | regs = args->regs; | |
465 | ||
466 | data.regs = regs; | |
467 | data.addr = 0; | |
468 | ||
469 | cpuc = &__get_cpu_var(cpu_hw_counters); | |
470 | for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { | |
471 | struct perf_counter *counter = cpuc->counters[idx]; | |
472 | struct hw_perf_counter *hwc; | |
473 | u64 val; | |
474 | ||
475 | if (!test_bit(idx, cpuc->active_mask)) | |
476 | continue; | |
477 | hwc = &counter->hw; | |
478 | val = sparc_perf_counter_update(counter, hwc, idx); | |
479 | if (val & (1ULL << 31)) | |
480 | continue; | |
481 | ||
482 | data.period = counter->hw.last_period; | |
483 | if (!sparc_perf_counter_set_period(counter, hwc, idx)) | |
484 | continue; | |
485 | ||
486 | if (perf_counter_overflow(counter, 1, &data)) | |
487 | sparc_pmu_disable_counter(hwc, idx); | |
488 | } | |
489 | ||
490 | return NOTIFY_STOP; | |
491 | } | |
492 | ||
493 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |
494 | .notifier_call = perf_counter_nmi_handler, | |
495 | }; | |
496 | ||
497 | static bool __init supported_pmu(void) | |
498 | { | |
499 | if (!strcmp(sparc_pmu_type, "ultra3i")) { | |
500 | sparc_pmu = &ultra3i_pmu; | |
501 | return true; | |
502 | } | |
503 | return false; | |
504 | } | |
505 | ||
506 | void __init init_hw_perf_counters(void) | |
507 | { | |
508 | pr_info("Performance counters: "); | |
509 | ||
510 | if (!supported_pmu()) { | |
511 | pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | |
512 | return; | |
513 | } | |
514 | ||
515 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | |
516 | ||
517 | /* All sparc64 PMUs currently have 2 counters. But this simple | |
518 | * driver only supports one active counter at a time. | |
519 | */ | |
520 | perf_max_counters = 1; | |
521 | ||
522 | register_die_notifier(&perf_counter_nmi_notifier); | |
523 | } |