]>
Commit | Line | Data |
---|---|---|
cdd6c482 | 1 | /* Performance event support for sparc64. |
59abbd1e | 2 | * |
4f6dbe4a | 3 | * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> |
59abbd1e | 4 | * |
cdd6c482 | 5 | * This code is based almost entirely upon the x86 perf event |
59abbd1e DM |
6 | * code, which is: |
7 | * | |
8 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
9 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
10 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
11 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
12 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
13 | */ | |
14 | ||
cdd6c482 | 15 | #include <linux/perf_event.h> |
59abbd1e | 16 | #include <linux/kprobes.h> |
667f0cee | 17 | #include <linux/ftrace.h> |
59abbd1e DM |
18 | #include <linux/kernel.h> |
19 | #include <linux/kdebug.h> | |
20 | #include <linux/mutex.h> | |
21 | ||
4f6dbe4a | 22 | #include <asm/stacktrace.h> |
59abbd1e | 23 | #include <asm/cpudata.h> |
4f6dbe4a | 24 | #include <asm/uaccess.h> |
60063497 | 25 | #include <linux/atomic.h> |
59abbd1e DM |
26 | #include <asm/nmi.h> |
27 | #include <asm/pcr.h> | |
d550bbd4 | 28 | #include <asm/cacheflush.h> |
59abbd1e | 29 | |
cb1b8209 | 30 | #include "kernel.h" |
4f6dbe4a DM |
31 | #include "kstack.h" |
32 | ||
59abbd1e DM |
33 | /* Sparc64 chips have two performance counters, 32-bits each, with |
34 | * overflow interrupts generated on transition from 0xffffffff to 0. | |
35 | * The counters are accessed in one go using a 64-bit register. | |
36 | * | |
37 | * Both counters are controlled using a single control register. The | |
38 | * only way to stop all sampling is to clear all of the context (user, | |
39 | * supervisor, hypervisor) sampling enable bits. But these bits apply | |
40 | * to both counters, thus the two counters can't be enabled/disabled | |
41 | * individually. | |
42 | * | |
43 | * The control register has two event fields, one for each of the two | |
44 | * counters. It's thus nearly impossible to have one counter going | |
45 | * while keeping the other one stopped. Therefore it is possible to | |
46 | * get overflow interrupts for counters not currently "in use" and | |
47 | * that condition must be checked in the overflow interrupt handler. | |
48 | * | |
49 | * So we use a hack, in that we program inactive counters with the | |
50 | * "sw_count0" and "sw_count1" events. These count how many times | |
51 | * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an | |
52 | * unusual way to encode a NOP and therefore will not trigger in | |
53 | * normal code. | |
54 | */ | |
55 | ||
cdd6c482 | 56 | #define MAX_HWEVENTS 2 |
3f1a2097 | 57 | #define MAX_PCRS 1 |
59abbd1e DM |
58 | #define MAX_PERIOD ((1UL << 32) - 1) |
59 | ||
60 | #define PIC_UPPER_INDEX 0 | |
61 | #define PIC_LOWER_INDEX 1 | |
e7bef6b0 | 62 | #define PIC_NO_INDEX -1 |
59abbd1e | 63 | |
cdd6c482 | 64 | struct cpu_hw_events { |
e7bef6b0 DM |
65 | /* Number of events currently scheduled onto this cpu. |
66 | * This tells how many entries in the arrays below | |
67 | * are valid. | |
68 | */ | |
69 | int n_events; | |
70 | ||
71 | /* Number of new events added since the last hw_perf_disable(). | |
72 | * This works because the perf event layer always adds new | |
73 | * events inside of a perf_{disable,enable}() sequence. | |
74 | */ | |
75 | int n_added; | |
76 | ||
77 | /* Array of events current scheduled on this cpu. */ | |
78 | struct perf_event *event[MAX_HWEVENTS]; | |
79 | ||
80 | /* Array of encoded longs, specifying the %pcr register | |
81 | * encoding and the mask of PIC counters this even can | |
82 | * be scheduled on. See perf_event_encode() et al. | |
83 | */ | |
84 | unsigned long events[MAX_HWEVENTS]; | |
85 | ||
86 | /* The current counter index assigned to an event. When the | |
87 | * event hasn't been programmed into the cpu yet, this will | |
88 | * hold PIC_NO_INDEX. The event->hw.idx value tells us where | |
89 | * we ought to schedule the event. | |
90 | */ | |
91 | int current_idx[MAX_HWEVENTS]; | |
92 | ||
3f1a2097 DM |
93 | /* Software copy of %pcr register(s) on this cpu. */ |
94 | u64 pcr[MAX_HWEVENTS]; | |
e7bef6b0 DM |
95 | |
96 | /* Enabled/disable state. */ | |
d1751388 | 97 | int enabled; |
a13c3afd LM |
98 | |
99 | unsigned int group_flag; | |
59abbd1e | 100 | }; |
cdd6c482 | 101 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
59abbd1e | 102 | |
e7bef6b0 DM |
103 | /* An event map describes the characteristics of a performance |
104 | * counter event. In particular it gives the encoding as well as | |
105 | * a mask telling which counters the event can be measured on. | |
106 | */ | |
59abbd1e DM |
107 | struct perf_event_map { |
108 | u16 encoding; | |
109 | u8 pic_mask; | |
110 | #define PIC_NONE 0x00 | |
111 | #define PIC_UPPER 0x01 | |
112 | #define PIC_LOWER 0x02 | |
113 | }; | |
114 | ||
e7bef6b0 | 115 | /* Encode a perf_event_map entry into a long. */ |
a72a8a5f DM |
116 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) |
117 | { | |
118 | return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; | |
119 | } | |
120 | ||
e7bef6b0 DM |
121 | static u8 perf_event_get_msk(unsigned long val) |
122 | { | |
123 | return val & 0xff; | |
124 | } | |
125 | ||
126 | static u64 perf_event_get_enc(unsigned long val) | |
a72a8a5f | 127 | { |
e7bef6b0 | 128 | return val >> 16; |
a72a8a5f DM |
129 | } |
130 | ||
2ce4da2e DM |
131 | #define C(x) PERF_COUNT_HW_CACHE_##x |
132 | ||
133 | #define CACHE_OP_UNSUPPORTED 0xfffe | |
134 | #define CACHE_OP_NONSENSE 0xffff | |
135 | ||
136 | typedef struct perf_event_map cache_map_t | |
137 | [PERF_COUNT_HW_CACHE_MAX] | |
138 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
139 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
140 | ||
59abbd1e DM |
141 | struct sparc_pmu { |
142 | const struct perf_event_map *(*event_map)(int); | |
2ce4da2e | 143 | const cache_map_t *cache_map; |
59abbd1e | 144 | int max_events; |
5344303c DM |
145 | u32 (*read_pmc)(int); |
146 | void (*write_pmc)(int, u64); | |
59abbd1e DM |
147 | int upper_shift; |
148 | int lower_shift; | |
149 | int event_mask; | |
7ac2ed28 DM |
150 | int user_bit; |
151 | int priv_bit; | |
91b9286d | 152 | int hv_bit; |
496c07e3 | 153 | int irq_bit; |
660d1376 DM |
154 | int upper_nop; |
155 | int lower_nop; | |
b38e99f5 DM |
156 | unsigned int flags; |
157 | #define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001 | |
158 | #define SPARC_PMU_HAS_CONFLICTS 0x00000002 | |
59660495 | 159 | int max_hw_events; |
3f1a2097 DM |
160 | int num_pcrs; |
161 | int num_pic_regs; | |
59abbd1e DM |
162 | }; |
163 | ||
5344303c DM |
164 | static u32 sparc_default_read_pmc(int idx) |
165 | { | |
166 | u64 val; | |
167 | ||
168 | val = pcr_ops->read_pic(0); | |
169 | if (idx == PIC_UPPER_INDEX) | |
170 | val >>= 32; | |
171 | ||
172 | return val & 0xffffffff; | |
173 | } | |
174 | ||
175 | static void sparc_default_write_pmc(int idx, u64 val) | |
176 | { | |
177 | u64 shift, mask, pic; | |
178 | ||
179 | shift = 0; | |
180 | if (idx == PIC_UPPER_INDEX) | |
181 | shift = 32; | |
182 | ||
183 | mask = ((u64) 0xffffffff) << shift; | |
184 | val <<= shift; | |
185 | ||
186 | pic = pcr_ops->read_pic(0); | |
187 | pic &= ~mask; | |
188 | pic |= val; | |
189 | pcr_ops->write_pic(0, pic); | |
190 | } | |
191 | ||
28e8f9be | 192 | static const struct perf_event_map ultra3_perfmon_event_map[] = { |
59abbd1e DM |
193 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, |
194 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | |
195 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | |
196 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | |
197 | }; | |
198 | ||
28e8f9be | 199 | static const struct perf_event_map *ultra3_event_map(int event_id) |
59abbd1e | 200 | { |
28e8f9be | 201 | return &ultra3_perfmon_event_map[event_id]; |
59abbd1e DM |
202 | } |
203 | ||
28e8f9be | 204 | static const cache_map_t ultra3_cache_map = { |
2ce4da2e DM |
205 | [C(L1D)] = { |
206 | [C(OP_READ)] = { | |
207 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
208 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
209 | }, | |
210 | [C(OP_WRITE)] = { | |
211 | [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, | |
212 | [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, | |
213 | }, | |
214 | [C(OP_PREFETCH)] = { | |
215 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
216 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
217 | }, | |
218 | }, | |
219 | [C(L1I)] = { | |
220 | [C(OP_READ)] = { | |
221 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
222 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
223 | }, | |
224 | [ C(OP_WRITE) ] = { | |
225 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
226 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
227 | }, | |
228 | [ C(OP_PREFETCH) ] = { | |
229 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
230 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
231 | }, | |
232 | }, | |
233 | [C(LL)] = { | |
234 | [C(OP_READ)] = { | |
235 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, | |
236 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, | |
237 | }, | |
238 | [C(OP_WRITE)] = { | |
239 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, | |
240 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, | |
241 | }, | |
242 | [C(OP_PREFETCH)] = { | |
243 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
244 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
245 | }, | |
246 | }, | |
247 | [C(DTLB)] = { | |
248 | [C(OP_READ)] = { | |
249 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
250 | [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, | |
251 | }, | |
252 | [ C(OP_WRITE) ] = { | |
253 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
254 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
255 | }, | |
256 | [ C(OP_PREFETCH) ] = { | |
257 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
258 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
259 | }, | |
260 | }, | |
261 | [C(ITLB)] = { | |
262 | [C(OP_READ)] = { | |
263 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
264 | [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, | |
265 | }, | |
266 | [ C(OP_WRITE) ] = { | |
267 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
268 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
269 | }, | |
270 | [ C(OP_PREFETCH) ] = { | |
271 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
272 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
273 | }, | |
274 | }, | |
275 | [C(BPU)] = { | |
276 | [C(OP_READ)] = { | |
277 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
278 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
279 | }, | |
280 | [ C(OP_WRITE) ] = { | |
281 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
282 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
283 | }, | |
284 | [ C(OP_PREFETCH) ] = { | |
285 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
286 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
287 | }, | |
288 | }, | |
89d6c0b5 PZ |
289 | [C(NODE)] = { |
290 | [C(OP_READ)] = { | |
291 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
292 | [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
293 | }, | |
294 | [ C(OP_WRITE) ] = { | |
295 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
296 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
297 | }, | |
298 | [ C(OP_PREFETCH) ] = { | |
299 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
300 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
301 | }, | |
302 | }, | |
2ce4da2e DM |
303 | }; |
304 | ||
28e8f9be DM |
305 | static const struct sparc_pmu ultra3_pmu = { |
306 | .event_map = ultra3_event_map, | |
307 | .cache_map = &ultra3_cache_map, | |
308 | .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), | |
5344303c DM |
309 | .read_pmc = sparc_default_read_pmc, |
310 | .write_pmc = sparc_default_write_pmc, | |
59abbd1e DM |
311 | .upper_shift = 11, |
312 | .lower_shift = 4, | |
313 | .event_mask = 0x3f, | |
7ac2ed28 DM |
314 | .user_bit = PCR_UTRACE, |
315 | .priv_bit = PCR_STRACE, | |
660d1376 DM |
316 | .upper_nop = 0x1c, |
317 | .lower_nop = 0x14, | |
b38e99f5 DM |
318 | .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | |
319 | SPARC_PMU_HAS_CONFLICTS), | |
59660495 | 320 | .max_hw_events = 2, |
3f1a2097 DM |
321 | .num_pcrs = 1, |
322 | .num_pic_regs = 1, | |
59abbd1e DM |
323 | }; |
324 | ||
7eebda60 DM |
325 | /* Niagara1 is very limited. The upper PIC is hard-locked to count |
326 | * only instructions, so it is free running which creates all kinds of | |
6e804251 | 327 | * problems. Some hardware designs make one wonder if the creator |
7eebda60 DM |
328 | * even looked at how this stuff gets used by software. |
329 | */ | |
330 | static const struct perf_event_map niagara1_perfmon_event_map[] = { | |
331 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, | |
332 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, | |
333 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, | |
334 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, | |
335 | }; | |
336 | ||
337 | static const struct perf_event_map *niagara1_event_map(int event_id) | |
338 | { | |
339 | return &niagara1_perfmon_event_map[event_id]; | |
340 | } | |
341 | ||
342 | static const cache_map_t niagara1_cache_map = { | |
343 | [C(L1D)] = { | |
344 | [C(OP_READ)] = { | |
345 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
346 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
347 | }, | |
348 | [C(OP_WRITE)] = { | |
349 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
350 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
351 | }, | |
352 | [C(OP_PREFETCH)] = { | |
353 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
354 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
355 | }, | |
356 | }, | |
357 | [C(L1I)] = { | |
358 | [C(OP_READ)] = { | |
359 | [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, | |
360 | [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, | |
361 | }, | |
362 | [ C(OP_WRITE) ] = { | |
363 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
364 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
365 | }, | |
366 | [ C(OP_PREFETCH) ] = { | |
367 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
368 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
369 | }, | |
370 | }, | |
371 | [C(LL)] = { | |
372 | [C(OP_READ)] = { | |
373 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
374 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
375 | }, | |
376 | [C(OP_WRITE)] = { | |
377 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
378 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
379 | }, | |
380 | [C(OP_PREFETCH)] = { | |
381 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
382 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
383 | }, | |
384 | }, | |
385 | [C(DTLB)] = { | |
386 | [C(OP_READ)] = { | |
387 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
388 | [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, | |
389 | }, | |
390 | [ C(OP_WRITE) ] = { | |
391 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
392 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
393 | }, | |
394 | [ C(OP_PREFETCH) ] = { | |
395 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
396 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
397 | }, | |
398 | }, | |
399 | [C(ITLB)] = { | |
400 | [C(OP_READ)] = { | |
401 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
402 | [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, | |
403 | }, | |
404 | [ C(OP_WRITE) ] = { | |
405 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
406 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
407 | }, | |
408 | [ C(OP_PREFETCH) ] = { | |
409 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
410 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
411 | }, | |
412 | }, | |
413 | [C(BPU)] = { | |
414 | [C(OP_READ)] = { | |
415 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
416 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
417 | }, | |
418 | [ C(OP_WRITE) ] = { | |
419 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
420 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
421 | }, | |
422 | [ C(OP_PREFETCH) ] = { | |
423 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
424 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
425 | }, | |
426 | }, | |
89d6c0b5 PZ |
427 | [C(NODE)] = { |
428 | [C(OP_READ)] = { | |
429 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
430 | [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
431 | }, | |
432 | [ C(OP_WRITE) ] = { | |
433 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
434 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
435 | }, | |
436 | [ C(OP_PREFETCH) ] = { | |
437 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
438 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
439 | }, | |
440 | }, | |
7eebda60 DM |
441 | }; |
442 | ||
443 | static const struct sparc_pmu niagara1_pmu = { | |
444 | .event_map = niagara1_event_map, | |
445 | .cache_map = &niagara1_cache_map, | |
446 | .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), | |
5344303c DM |
447 | .read_pmc = sparc_default_read_pmc, |
448 | .write_pmc = sparc_default_write_pmc, | |
7eebda60 DM |
449 | .upper_shift = 0, |
450 | .lower_shift = 4, | |
451 | .event_mask = 0x7, | |
7ac2ed28 DM |
452 | .user_bit = PCR_UTRACE, |
453 | .priv_bit = PCR_STRACE, | |
7eebda60 DM |
454 | .upper_nop = 0x0, |
455 | .lower_nop = 0x0, | |
b38e99f5 DM |
456 | .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | |
457 | SPARC_PMU_HAS_CONFLICTS), | |
59660495 | 458 | .max_hw_events = 2, |
3f1a2097 DM |
459 | .num_pcrs = 1, |
460 | .num_pic_regs = 1, | |
7eebda60 DM |
461 | }; |
462 | ||
b73d8847 DM |
463 | static const struct perf_event_map niagara2_perfmon_event_map[] = { |
464 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
465 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
466 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, | |
467 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, | |
468 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, | |
469 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, | |
470 | }; | |
471 | ||
cdd6c482 | 472 | static const struct perf_event_map *niagara2_event_map(int event_id) |
b73d8847 | 473 | { |
cdd6c482 | 474 | return &niagara2_perfmon_event_map[event_id]; |
b73d8847 DM |
475 | } |
476 | ||
d0b86480 DM |
477 | static const cache_map_t niagara2_cache_map = { |
478 | [C(L1D)] = { | |
479 | [C(OP_READ)] = { | |
480 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
481 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
482 | }, | |
483 | [C(OP_WRITE)] = { | |
484 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
485 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
486 | }, | |
487 | [C(OP_PREFETCH)] = { | |
488 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
489 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
490 | }, | |
491 | }, | |
492 | [C(L1I)] = { | |
493 | [C(OP_READ)] = { | |
494 | [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, | |
495 | [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, | |
496 | }, | |
497 | [ C(OP_WRITE) ] = { | |
498 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
499 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
500 | }, | |
501 | [ C(OP_PREFETCH) ] = { | |
502 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
503 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
504 | }, | |
505 | }, | |
506 | [C(LL)] = { | |
507 | [C(OP_READ)] = { | |
508 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
509 | [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, | |
510 | }, | |
511 | [C(OP_WRITE)] = { | |
512 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
513 | [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, | |
514 | }, | |
515 | [C(OP_PREFETCH)] = { | |
516 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
517 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
518 | }, | |
519 | }, | |
520 | [C(DTLB)] = { | |
521 | [C(OP_READ)] = { | |
522 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
523 | [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, | |
524 | }, | |
525 | [ C(OP_WRITE) ] = { | |
526 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
527 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
528 | }, | |
529 | [ C(OP_PREFETCH) ] = { | |
530 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
531 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
532 | }, | |
533 | }, | |
534 | [C(ITLB)] = { | |
535 | [C(OP_READ)] = { | |
536 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
537 | [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, | |
538 | }, | |
539 | [ C(OP_WRITE) ] = { | |
540 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
541 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
542 | }, | |
543 | [ C(OP_PREFETCH) ] = { | |
544 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
545 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
546 | }, | |
547 | }, | |
548 | [C(BPU)] = { | |
549 | [C(OP_READ)] = { | |
550 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
551 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
552 | }, | |
553 | [ C(OP_WRITE) ] = { | |
554 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
89d6c0b5 PZ |
555 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, |
556 | }, | |
557 | [ C(OP_PREFETCH) ] = { | |
558 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
559 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
560 | }, | |
561 | }, | |
562 | [C(NODE)] = { | |
563 | [C(OP_READ)] = { | |
564 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
565 | [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
566 | }, | |
567 | [ C(OP_WRITE) ] = { | |
568 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
d0b86480 DM |
569 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, |
570 | }, | |
571 | [ C(OP_PREFETCH) ] = { | |
572 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
573 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
574 | }, | |
575 | }, | |
576 | }; | |
577 | ||
b73d8847 DM |
578 | static const struct sparc_pmu niagara2_pmu = { |
579 | .event_map = niagara2_event_map, | |
d0b86480 | 580 | .cache_map = &niagara2_cache_map, |
b73d8847 | 581 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), |
5344303c DM |
582 | .read_pmc = sparc_default_read_pmc, |
583 | .write_pmc = sparc_default_write_pmc, | |
b73d8847 DM |
584 | .upper_shift = 19, |
585 | .lower_shift = 6, | |
586 | .event_mask = 0xfff, | |
7ac2ed28 DM |
587 | .user_bit = PCR_UTRACE, |
588 | .priv_bit = PCR_STRACE, | |
589 | .hv_bit = PCR_N2_HTRACE, | |
de23cf3c | 590 | .irq_bit = 0x30, |
b73d8847 DM |
591 | .upper_nop = 0x220, |
592 | .lower_nop = 0x220, | |
b38e99f5 DM |
593 | .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | |
594 | SPARC_PMU_HAS_CONFLICTS), | |
59660495 | 595 | .max_hw_events = 2, |
3f1a2097 DM |
596 | .num_pcrs = 1, |
597 | .num_pic_regs = 1, | |
b73d8847 DM |
598 | }; |
599 | ||
59abbd1e DM |
600 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
601 | ||
cdd6c482 | 602 | static u64 event_encoding(u64 event_id, int idx) |
59abbd1e DM |
603 | { |
604 | if (idx == PIC_UPPER_INDEX) | |
cdd6c482 | 605 | event_id <<= sparc_pmu->upper_shift; |
59abbd1e | 606 | else |
cdd6c482 IM |
607 | event_id <<= sparc_pmu->lower_shift; |
608 | return event_id; | |
59abbd1e DM |
609 | } |
610 | ||
611 | static u64 mask_for_index(int idx) | |
612 | { | |
613 | return event_encoding(sparc_pmu->event_mask, idx); | |
614 | } | |
615 | ||
616 | static u64 nop_for_index(int idx) | |
617 | { | |
618 | return event_encoding(idx == PIC_UPPER_INDEX ? | |
660d1376 DM |
619 | sparc_pmu->upper_nop : |
620 | sparc_pmu->lower_nop, idx); | |
59abbd1e DM |
621 | } |
622 | ||
d1751388 | 623 | static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
59abbd1e DM |
624 | { |
625 | u64 val, mask = mask_for_index(idx); | |
b4f061a4 | 626 | int pcr_index = 0; |
59abbd1e | 627 | |
b4f061a4 DM |
628 | if (sparc_pmu->num_pcrs > 1) |
629 | pcr_index = idx; | |
630 | ||
631 | val = cpuc->pcr[pcr_index]; | |
d1751388 DM |
632 | val &= ~mask; |
633 | val |= hwc->config; | |
b4f061a4 | 634 | cpuc->pcr[pcr_index] = val; |
d1751388 | 635 | |
b4f061a4 | 636 | pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); |
59abbd1e DM |
637 | } |
638 | ||
d1751388 | 639 | static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
59abbd1e DM |
640 | { |
641 | u64 mask = mask_for_index(idx); | |
642 | u64 nop = nop_for_index(idx); | |
b4f061a4 | 643 | int pcr_index = 0; |
d1751388 | 644 | u64 val; |
59abbd1e | 645 | |
b4f061a4 DM |
646 | if (sparc_pmu->num_pcrs > 1) |
647 | pcr_index = idx; | |
648 | ||
649 | val = cpuc->pcr[pcr_index]; | |
d1751388 DM |
650 | val &= ~mask; |
651 | val |= nop; | |
b4f061a4 | 652 | cpuc->pcr[pcr_index] = val; |
d1751388 | 653 | |
b4f061a4 | 654 | pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); |
59abbd1e DM |
655 | } |
656 | ||
e7bef6b0 DM |
657 | static u64 sparc_perf_event_update(struct perf_event *event, |
658 | struct hw_perf_event *hwc, int idx) | |
659 | { | |
660 | int shift = 64 - 32; | |
661 | u64 prev_raw_count, new_raw_count; | |
662 | s64 delta; | |
663 | ||
664 | again: | |
e7850595 | 665 | prev_raw_count = local64_read(&hwc->prev_count); |
5344303c | 666 | new_raw_count = sparc_pmu->read_pmc(idx); |
e7bef6b0 | 667 | |
e7850595 | 668 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
e7bef6b0 DM |
669 | new_raw_count) != prev_raw_count) |
670 | goto again; | |
671 | ||
672 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
673 | delta >>= shift; | |
674 | ||
e7850595 PZ |
675 | local64_add(delta, &event->count); |
676 | local64_sub(delta, &hwc->period_left); | |
e7bef6b0 DM |
677 | |
678 | return new_raw_count; | |
679 | } | |
680 | ||
cdd6c482 | 681 | static int sparc_perf_event_set_period(struct perf_event *event, |
d29862f0 | 682 | struct hw_perf_event *hwc, int idx) |
59abbd1e | 683 | { |
e7850595 | 684 | s64 left = local64_read(&hwc->period_left); |
59abbd1e DM |
685 | s64 period = hwc->sample_period; |
686 | int ret = 0; | |
687 | ||
688 | if (unlikely(left <= -period)) { | |
689 | left = period; | |
e7850595 | 690 | local64_set(&hwc->period_left, left); |
59abbd1e DM |
691 | hwc->last_period = period; |
692 | ret = 1; | |
693 | } | |
694 | ||
695 | if (unlikely(left <= 0)) { | |
696 | left += period; | |
e7850595 | 697 | local64_set(&hwc->period_left, left); |
59abbd1e DM |
698 | hwc->last_period = period; |
699 | ret = 1; | |
700 | } | |
701 | if (left > MAX_PERIOD) | |
702 | left = MAX_PERIOD; | |
703 | ||
e7850595 | 704 | local64_set(&hwc->prev_count, (u64)-left); |
59abbd1e | 705 | |
5344303c | 706 | sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff); |
59abbd1e | 707 | |
cdd6c482 | 708 | perf_event_update_userpage(event); |
59abbd1e DM |
709 | |
710 | return ret; | |
711 | } | |
712 | ||
7a37a0b8 | 713 | static void read_in_all_counters(struct cpu_hw_events *cpuc) |
59abbd1e | 714 | { |
e7bef6b0 | 715 | int i; |
59abbd1e | 716 | |
e7bef6b0 DM |
717 | for (i = 0; i < cpuc->n_events; i++) { |
718 | struct perf_event *cp = cpuc->event[i]; | |
59abbd1e | 719 | |
e7bef6b0 DM |
720 | if (cpuc->current_idx[i] != PIC_NO_INDEX && |
721 | cpuc->current_idx[i] != cp->hw.idx) { | |
722 | sparc_perf_event_update(cp, &cp->hw, | |
723 | cpuc->current_idx[i]); | |
724 | cpuc->current_idx[i] = PIC_NO_INDEX; | |
725 | } | |
726 | } | |
7a37a0b8 DM |
727 | } |
728 | ||
729 | /* On this PMU all PICs are programmed using a single PCR. Calculate | |
730 | * the combined control register value. | |
731 | * | |
732 | * For such chips we require that all of the events have the same | |
733 | * configuration, so just fetch the settings from the first entry. | |
734 | */ | |
735 | static void calculate_single_pcr(struct cpu_hw_events *cpuc) | |
736 | { | |
737 | int i; | |
738 | ||
739 | if (!cpuc->n_added) | |
740 | goto out; | |
59abbd1e | 741 | |
e7bef6b0 DM |
742 | /* Assign to counters all unassigned events. */ |
743 | for (i = 0; i < cpuc->n_events; i++) { | |
744 | struct perf_event *cp = cpuc->event[i]; | |
745 | struct hw_perf_event *hwc = &cp->hw; | |
746 | int idx = hwc->idx; | |
747 | u64 enc; | |
748 | ||
749 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | |
750 | continue; | |
751 | ||
752 | sparc_perf_event_set_period(cp, hwc, idx); | |
753 | cpuc->current_idx[i] = idx; | |
754 | ||
755 | enc = perf_event_get_enc(cpuc->events[i]); | |
7a37a0b8 | 756 | cpuc->pcr[0] &= ~mask_for_index(idx); |
a4eaf7f1 | 757 | if (hwc->state & PERF_HES_STOPPED) |
7a37a0b8 | 758 | cpuc->pcr[0] |= nop_for_index(idx); |
a4eaf7f1 | 759 | else |
7a37a0b8 | 760 | cpuc->pcr[0] |= event_encoding(enc, idx); |
e7bef6b0 DM |
761 | } |
762 | out: | |
7a37a0b8 DM |
763 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; |
764 | } | |
765 | ||
766 | /* On this PMU each PIC has it's own PCR control register. */ | |
767 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | |
768 | { | |
769 | int i; | |
770 | ||
771 | if (!cpuc->n_added) | |
772 | goto out; | |
773 | ||
774 | for (i = 0; i < cpuc->n_events; i++) { | |
775 | struct perf_event *cp = cpuc->event[i]; | |
776 | struct hw_perf_event *hwc = &cp->hw; | |
777 | int idx = hwc->idx; | |
778 | u64 enc; | |
779 | ||
780 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | |
781 | continue; | |
782 | ||
783 | sparc_perf_event_set_period(cp, hwc, idx); | |
784 | cpuc->current_idx[i] = idx; | |
785 | ||
786 | enc = perf_event_get_enc(cpuc->events[i]); | |
787 | cpuc->pcr[idx] &= ~mask_for_index(idx); | |
788 | if (hwc->state & PERF_HES_STOPPED) | |
789 | cpuc->pcr[idx] |= nop_for_index(idx); | |
790 | else | |
791 | cpuc->pcr[idx] |= event_encoding(enc, idx); | |
792 | } | |
793 | out: | |
794 | for (i = 0; i < cpuc->n_events; i++) { | |
795 | struct perf_event *cp = cpuc->event[i]; | |
796 | int idx = cp->hw.idx; | |
797 | ||
798 | cpuc->pcr[idx] |= cp->hw.config_base; | |
799 | } | |
800 | } | |
801 | ||
802 | /* If performance event entries have been added, move existing events | |
803 | * around (if necessary) and then assign new entries to counters. | |
804 | */ | |
805 | static void update_pcrs_for_enable(struct cpu_hw_events *cpuc) | |
806 | { | |
807 | if (cpuc->n_added) | |
808 | read_in_all_counters(cpuc); | |
809 | ||
810 | if (sparc_pmu->num_pcrs == 1) { | |
811 | calculate_single_pcr(cpuc); | |
812 | } else { | |
813 | calculate_multiple_pcrs(cpuc); | |
814 | } | |
59abbd1e DM |
815 | } |
816 | ||
a4eaf7f1 | 817 | static void sparc_pmu_enable(struct pmu *pmu) |
59abbd1e | 818 | { |
e7bef6b0 | 819 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
7a37a0b8 | 820 | int i; |
59abbd1e | 821 | |
e7bef6b0 DM |
822 | if (cpuc->enabled) |
823 | return; | |
59abbd1e | 824 | |
e7bef6b0 DM |
825 | cpuc->enabled = 1; |
826 | barrier(); | |
59abbd1e | 827 | |
7a37a0b8 DM |
828 | if (cpuc->n_events) |
829 | update_pcrs_for_enable(cpuc); | |
59abbd1e | 830 | |
7a37a0b8 DM |
831 | for (i = 0; i < sparc_pmu->num_pcrs; i++) |
832 | pcr_ops->write_pcr(i, cpuc->pcr[i]); | |
e7bef6b0 DM |
833 | } |
834 | ||
a4eaf7f1 | 835 | static void sparc_pmu_disable(struct pmu *pmu) |
e7bef6b0 DM |
836 | { |
837 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
3f1a2097 | 838 | int i; |
e7bef6b0 DM |
839 | |
840 | if (!cpuc->enabled) | |
841 | return; | |
842 | ||
843 | cpuc->enabled = 0; | |
844 | cpuc->n_added = 0; | |
845 | ||
3f1a2097 DM |
846 | for (i = 0; i < sparc_pmu->num_pcrs; i++) { |
847 | u64 val = cpuc->pcr[i]; | |
e7bef6b0 | 848 | |
3f1a2097 DM |
849 | val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit | |
850 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | |
851 | cpuc->pcr[i] = val; | |
852 | pcr_ops->write_pcr(i, cpuc->pcr[i]); | |
853 | } | |
59abbd1e DM |
854 | } |
855 | ||
a4eaf7f1 PZ |
856 | static int active_event_index(struct cpu_hw_events *cpuc, |
857 | struct perf_event *event) | |
858 | { | |
859 | int i; | |
860 | ||
861 | for (i = 0; i < cpuc->n_events; i++) { | |
862 | if (cpuc->event[i] == event) | |
863 | break; | |
864 | } | |
865 | BUG_ON(i == cpuc->n_events); | |
866 | return cpuc->current_idx[i]; | |
867 | } | |
868 | ||
869 | static void sparc_pmu_start(struct perf_event *event, int flags) | |
870 | { | |
871 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
872 | int idx = active_event_index(cpuc, event); | |
873 | ||
874 | if (flags & PERF_EF_RELOAD) { | |
875 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | |
876 | sparc_perf_event_set_period(event, &event->hw, idx); | |
877 | } | |
878 | ||
879 | event->hw.state = 0; | |
880 | ||
881 | sparc_pmu_enable_event(cpuc, &event->hw, idx); | |
882 | } | |
883 | ||
884 | static void sparc_pmu_stop(struct perf_event *event, int flags) | |
885 | { | |
886 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
887 | int idx = active_event_index(cpuc, event); | |
888 | ||
889 | if (!(event->hw.state & PERF_HES_STOPPED)) { | |
890 | sparc_pmu_disable_event(cpuc, &event->hw, idx); | |
891 | event->hw.state |= PERF_HES_STOPPED; | |
892 | } | |
893 | ||
894 | if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { | |
895 | sparc_perf_event_update(event, &event->hw, idx); | |
896 | event->hw.state |= PERF_HES_UPTODATE; | |
897 | } | |
898 | } | |
899 | ||
900 | static void sparc_pmu_del(struct perf_event *event, int _flags) | |
59abbd1e | 901 | { |
cdd6c482 | 902 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
e7bef6b0 DM |
903 | unsigned long flags; |
904 | int i; | |
59abbd1e | 905 | |
e7bef6b0 | 906 | local_irq_save(flags); |
33696fc0 | 907 | perf_pmu_disable(event->pmu); |
e7bef6b0 DM |
908 | |
909 | for (i = 0; i < cpuc->n_events; i++) { | |
910 | if (event == cpuc->event[i]) { | |
a4eaf7f1 PZ |
911 | /* Absorb the final count and turn off the |
912 | * event. | |
913 | */ | |
914 | sparc_pmu_stop(event, PERF_EF_UPDATE); | |
e7bef6b0 DM |
915 | |
916 | /* Shift remaining entries down into | |
917 | * the existing slot. | |
918 | */ | |
919 | while (++i < cpuc->n_events) { | |
920 | cpuc->event[i - 1] = cpuc->event[i]; | |
921 | cpuc->events[i - 1] = cpuc->events[i]; | |
922 | cpuc->current_idx[i - 1] = | |
923 | cpuc->current_idx[i]; | |
924 | } | |
925 | ||
e7bef6b0 | 926 | perf_event_update_userpage(event); |
59abbd1e | 927 | |
e7bef6b0 DM |
928 | cpuc->n_events--; |
929 | break; | |
930 | } | |
931 | } | |
59abbd1e | 932 | |
33696fc0 | 933 | perf_pmu_enable(event->pmu); |
e7bef6b0 DM |
934 | local_irq_restore(flags); |
935 | } | |
936 | ||
cdd6c482 | 937 | static void sparc_pmu_read(struct perf_event *event) |
59abbd1e | 938 | { |
e7bef6b0 DM |
939 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
940 | int idx = active_event_index(cpuc, event); | |
cdd6c482 | 941 | struct hw_perf_event *hwc = &event->hw; |
d1751388 | 942 | |
e7bef6b0 | 943 | sparc_perf_event_update(event, hwc, idx); |
59abbd1e DM |
944 | } |
945 | ||
cdd6c482 | 946 | static atomic_t active_events = ATOMIC_INIT(0); |
59abbd1e DM |
947 | static DEFINE_MUTEX(pmc_grab_mutex); |
948 | ||
d1751388 DM |
949 | static void perf_stop_nmi_watchdog(void *unused) |
950 | { | |
951 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
3f1a2097 | 952 | int i; |
d1751388 DM |
953 | |
954 | stop_nmi_watchdog(NULL); | |
3f1a2097 DM |
955 | for (i = 0; i < sparc_pmu->num_pcrs; i++) |
956 | cpuc->pcr[i] = pcr_ops->read_pcr(i); | |
d1751388 DM |
957 | } |
958 | ||
cdd6c482 | 959 | void perf_event_grab_pmc(void) |
59abbd1e | 960 | { |
cdd6c482 | 961 | if (atomic_inc_not_zero(&active_events)) |
59abbd1e DM |
962 | return; |
963 | ||
964 | mutex_lock(&pmc_grab_mutex); | |
cdd6c482 | 965 | if (atomic_read(&active_events) == 0) { |
59abbd1e | 966 | if (atomic_read(&nmi_active) > 0) { |
d1751388 | 967 | on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); |
59abbd1e DM |
968 | BUG_ON(atomic_read(&nmi_active) != 0); |
969 | } | |
cdd6c482 | 970 | atomic_inc(&active_events); |
59abbd1e DM |
971 | } |
972 | mutex_unlock(&pmc_grab_mutex); | |
973 | } | |
974 | ||
cdd6c482 | 975 | void perf_event_release_pmc(void) |
59abbd1e | 976 | { |
cdd6c482 | 977 | if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { |
59abbd1e DM |
978 | if (atomic_read(&nmi_active) == 0) |
979 | on_each_cpu(start_nmi_watchdog, NULL, 1); | |
980 | mutex_unlock(&pmc_grab_mutex); | |
981 | } | |
982 | } | |
983 | ||
2ce4da2e DM |
984 | static const struct perf_event_map *sparc_map_cache_event(u64 config) |
985 | { | |
986 | unsigned int cache_type, cache_op, cache_result; | |
987 | const struct perf_event_map *pmap; | |
988 | ||
989 | if (!sparc_pmu->cache_map) | |
990 | return ERR_PTR(-ENOENT); | |
991 | ||
992 | cache_type = (config >> 0) & 0xff; | |
993 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
994 | return ERR_PTR(-EINVAL); | |
995 | ||
996 | cache_op = (config >> 8) & 0xff; | |
997 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
998 | return ERR_PTR(-EINVAL); | |
999 | ||
1000 | cache_result = (config >> 16) & 0xff; | |
1001 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
1002 | return ERR_PTR(-EINVAL); | |
1003 | ||
1004 | pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); | |
1005 | ||
1006 | if (pmap->encoding == CACHE_OP_UNSUPPORTED) | |
1007 | return ERR_PTR(-ENOENT); | |
1008 | ||
1009 | if (pmap->encoding == CACHE_OP_NONSENSE) | |
1010 | return ERR_PTR(-EINVAL); | |
1011 | ||
1012 | return pmap; | |
1013 | } | |
1014 | ||
cdd6c482 | 1015 | static void hw_perf_event_destroy(struct perf_event *event) |
59abbd1e | 1016 | { |
cdd6c482 | 1017 | perf_event_release_pmc(); |
59abbd1e DM |
1018 | } |
1019 | ||
a72a8a5f DM |
1020 | /* Make sure all events can be scheduled into the hardware at |
1021 | * the same time. This is simplified by the fact that we only | |
1022 | * need to support 2 simultaneous HW events. | |
e7bef6b0 DM |
1023 | * |
1024 | * As a side effect, the evts[]->hw.idx values will be assigned | |
1025 | * on success. These are pending indexes. When the events are | |
1026 | * actually programmed into the chip, these values will propagate | |
1027 | * to the per-cpu cpuc->current_idx[] slots, see the code in | |
1028 | * maybe_change_configuration() for details. | |
a72a8a5f | 1029 | */ |
e7bef6b0 DM |
1030 | static int sparc_check_constraints(struct perf_event **evts, |
1031 | unsigned long *events, int n_ev) | |
a72a8a5f | 1032 | { |
e7bef6b0 DM |
1033 | u8 msk0 = 0, msk1 = 0; |
1034 | int idx0 = 0; | |
1035 | ||
1036 | /* This case is possible when we are invoked from | |
1037 | * hw_perf_group_sched_in(). | |
1038 | */ | |
1039 | if (!n_ev) | |
1040 | return 0; | |
1041 | ||
59660495 | 1042 | if (n_ev > sparc_pmu->max_hw_events) |
e7bef6b0 DM |
1043 | return -1; |
1044 | ||
b38e99f5 DM |
1045 | if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) { |
1046 | int i; | |
1047 | ||
1048 | for (i = 0; i < n_ev; i++) | |
1049 | evts[i]->hw.idx = i; | |
1050 | return 0; | |
1051 | } | |
1052 | ||
e7bef6b0 DM |
1053 | msk0 = perf_event_get_msk(events[0]); |
1054 | if (n_ev == 1) { | |
1055 | if (msk0 & PIC_LOWER) | |
1056 | idx0 = 1; | |
1057 | goto success; | |
1058 | } | |
1059 | BUG_ON(n_ev != 2); | |
1060 | msk1 = perf_event_get_msk(events[1]); | |
1061 | ||
1062 | /* If both events can go on any counter, OK. */ | |
1063 | if (msk0 == (PIC_UPPER | PIC_LOWER) && | |
1064 | msk1 == (PIC_UPPER | PIC_LOWER)) | |
1065 | goto success; | |
1066 | ||
1067 | /* If one event is limited to a specific counter, | |
1068 | * and the other can go on both, OK. | |
1069 | */ | |
1070 | if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && | |
1071 | msk1 == (PIC_UPPER | PIC_LOWER)) { | |
1072 | if (msk0 & PIC_LOWER) | |
1073 | idx0 = 1; | |
1074 | goto success; | |
a72a8a5f DM |
1075 | } |
1076 | ||
e7bef6b0 DM |
1077 | if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && |
1078 | msk0 == (PIC_UPPER | PIC_LOWER)) { | |
1079 | if (msk1 & PIC_UPPER) | |
1080 | idx0 = 1; | |
1081 | goto success; | |
1082 | } | |
1083 | ||
1084 | /* If the events are fixed to different counters, OK. */ | |
1085 | if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || | |
1086 | (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { | |
1087 | if (msk0 & PIC_LOWER) | |
1088 | idx0 = 1; | |
1089 | goto success; | |
1090 | } | |
1091 | ||
1092 | /* Otherwise, there is a conflict. */ | |
a72a8a5f | 1093 | return -1; |
e7bef6b0 DM |
1094 | |
1095 | success: | |
1096 | evts[0]->hw.idx = idx0; | |
1097 | if (n_ev == 2) | |
1098 | evts[1]->hw.idx = idx0 ^ 1; | |
1099 | return 0; | |
a72a8a5f DM |
1100 | } |
1101 | ||
01552f76 DM |
1102 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) |
1103 | { | |
1104 | int eu = 0, ek = 0, eh = 0; | |
1105 | struct perf_event *event; | |
1106 | int i, n, first; | |
1107 | ||
b38e99f5 DM |
1108 | if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME)) |
1109 | return 0; | |
1110 | ||
01552f76 DM |
1111 | n = n_prev + n_new; |
1112 | if (n <= 1) | |
1113 | return 0; | |
1114 | ||
1115 | first = 1; | |
1116 | for (i = 0; i < n; i++) { | |
1117 | event = evts[i]; | |
1118 | if (first) { | |
1119 | eu = event->attr.exclude_user; | |
1120 | ek = event->attr.exclude_kernel; | |
1121 | eh = event->attr.exclude_hv; | |
1122 | first = 0; | |
1123 | } else if (event->attr.exclude_user != eu || | |
1124 | event->attr.exclude_kernel != ek || | |
1125 | event->attr.exclude_hv != eh) { | |
1126 | return -EAGAIN; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | return 0; | |
1131 | } | |
1132 | ||
1133 | static int collect_events(struct perf_event *group, int max_count, | |
e7bef6b0 DM |
1134 | struct perf_event *evts[], unsigned long *events, |
1135 | int *current_idx) | |
01552f76 DM |
1136 | { |
1137 | struct perf_event *event; | |
1138 | int n = 0; | |
1139 | ||
1140 | if (!is_software_event(group)) { | |
1141 | if (n >= max_count) | |
1142 | return -1; | |
1143 | evts[n] = group; | |
e7bef6b0 DM |
1144 | events[n] = group->hw.event_base; |
1145 | current_idx[n++] = PIC_NO_INDEX; | |
01552f76 DM |
1146 | } |
1147 | list_for_each_entry(event, &group->sibling_list, group_entry) { | |
1148 | if (!is_software_event(event) && | |
1149 | event->state != PERF_EVENT_STATE_OFF) { | |
1150 | if (n >= max_count) | |
1151 | return -1; | |
1152 | evts[n] = event; | |
e7bef6b0 DM |
1153 | events[n] = event->hw.event_base; |
1154 | current_idx[n++] = PIC_NO_INDEX; | |
01552f76 DM |
1155 | } |
1156 | } | |
1157 | return n; | |
1158 | } | |
1159 | ||
a4eaf7f1 | 1160 | static int sparc_pmu_add(struct perf_event *event, int ef_flags) |
e7bef6b0 DM |
1161 | { |
1162 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
1163 | int n0, ret = -EAGAIN; | |
1164 | unsigned long flags; | |
1165 | ||
1166 | local_irq_save(flags); | |
33696fc0 | 1167 | perf_pmu_disable(event->pmu); |
e7bef6b0 DM |
1168 | |
1169 | n0 = cpuc->n_events; | |
59660495 | 1170 | if (n0 >= sparc_pmu->max_hw_events) |
e7bef6b0 DM |
1171 | goto out; |
1172 | ||
1173 | cpuc->event[n0] = event; | |
1174 | cpuc->events[n0] = event->hw.event_base; | |
1175 | cpuc->current_idx[n0] = PIC_NO_INDEX; | |
1176 | ||
a4eaf7f1 PZ |
1177 | event->hw.state = PERF_HES_UPTODATE; |
1178 | if (!(ef_flags & PERF_EF_START)) | |
1179 | event->hw.state |= PERF_HES_STOPPED; | |
1180 | ||
a13c3afd LM |
1181 | /* |
1182 | * If group events scheduling transaction was started, | |
25985edc | 1183 | * skip the schedulability test here, it will be performed |
a13c3afd LM |
1184 | * at commit time(->commit_txn) as a whole |
1185 | */ | |
8d2cacbb | 1186 | if (cpuc->group_flag & PERF_EVENT_TXN) |
a13c3afd LM |
1187 | goto nocheck; |
1188 | ||
e7bef6b0 DM |
1189 | if (check_excludes(cpuc->event, n0, 1)) |
1190 | goto out; | |
1191 | if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) | |
1192 | goto out; | |
1193 | ||
a13c3afd | 1194 | nocheck: |
e7bef6b0 DM |
1195 | cpuc->n_events++; |
1196 | cpuc->n_added++; | |
1197 | ||
1198 | ret = 0; | |
1199 | out: | |
33696fc0 | 1200 | perf_pmu_enable(event->pmu); |
e7bef6b0 DM |
1201 | local_irq_restore(flags); |
1202 | return ret; | |
1203 | } | |
1204 | ||
b0a873eb | 1205 | static int sparc_pmu_event_init(struct perf_event *event) |
59abbd1e | 1206 | { |
cdd6c482 | 1207 | struct perf_event_attr *attr = &event->attr; |
01552f76 | 1208 | struct perf_event *evts[MAX_HWEVENTS]; |
cdd6c482 | 1209 | struct hw_perf_event *hwc = &event->hw; |
a72a8a5f | 1210 | unsigned long events[MAX_HWEVENTS]; |
e7bef6b0 | 1211 | int current_idx_dmy[MAX_HWEVENTS]; |
59abbd1e | 1212 | const struct perf_event_map *pmap; |
01552f76 | 1213 | int n; |
59abbd1e DM |
1214 | |
1215 | if (atomic_read(&nmi_active) < 0) | |
1216 | return -ENODEV; | |
1217 | ||
2481c5fa SE |
1218 | /* does not support taken branch sampling */ |
1219 | if (has_branch_stack(event)) | |
1220 | return -EOPNOTSUPP; | |
1221 | ||
b0a873eb PZ |
1222 | switch (attr->type) { |
1223 | case PERF_TYPE_HARDWARE: | |
2ce4da2e DM |
1224 | if (attr->config >= sparc_pmu->max_events) |
1225 | return -EINVAL; | |
1226 | pmap = sparc_pmu->event_map(attr->config); | |
b0a873eb PZ |
1227 | break; |
1228 | ||
1229 | case PERF_TYPE_HW_CACHE: | |
2ce4da2e DM |
1230 | pmap = sparc_map_cache_event(attr->config); |
1231 | if (IS_ERR(pmap)) | |
1232 | return PTR_ERR(pmap); | |
b0a873eb PZ |
1233 | break; |
1234 | ||
1235 | case PERF_TYPE_RAW: | |
d0303d71 IM |
1236 | pmap = NULL; |
1237 | break; | |
59abbd1e | 1238 | |
b0a873eb PZ |
1239 | default: |
1240 | return -ENOENT; | |
1241 | ||
1242 | } | |
1243 | ||
b343ae51 DM |
1244 | if (pmap) { |
1245 | hwc->event_base = perf_event_encode(pmap); | |
1246 | } else { | |
d0303d71 IM |
1247 | /* |
1248 | * User gives us "(encoding << 16) | pic_mask" for | |
b343ae51 DM |
1249 | * PERF_TYPE_RAW events. |
1250 | */ | |
1251 | hwc->event_base = attr->config; | |
1252 | } | |
1253 | ||
e7bef6b0 | 1254 | /* We save the enable bits in the config_base. */ |
496c07e3 | 1255 | hwc->config_base = sparc_pmu->irq_bit; |
59abbd1e | 1256 | if (!attr->exclude_user) |
7ac2ed28 | 1257 | hwc->config_base |= sparc_pmu->user_bit; |
59abbd1e | 1258 | if (!attr->exclude_kernel) |
7ac2ed28 | 1259 | hwc->config_base |= sparc_pmu->priv_bit; |
91b9286d DM |
1260 | if (!attr->exclude_hv) |
1261 | hwc->config_base |= sparc_pmu->hv_bit; | |
59abbd1e | 1262 | |
01552f76 DM |
1263 | n = 0; |
1264 | if (event->group_leader != event) { | |
1265 | n = collect_events(event->group_leader, | |
59660495 | 1266 | sparc_pmu->max_hw_events - 1, |
e7bef6b0 | 1267 | evts, events, current_idx_dmy); |
01552f76 DM |
1268 | if (n < 0) |
1269 | return -EINVAL; | |
1270 | } | |
a72a8a5f | 1271 | events[n] = hwc->event_base; |
01552f76 DM |
1272 | evts[n] = event; |
1273 | ||
1274 | if (check_excludes(evts, n, 1)) | |
1275 | return -EINVAL; | |
1276 | ||
e7bef6b0 | 1277 | if (sparc_check_constraints(evts, events, n + 1)) |
a72a8a5f DM |
1278 | return -EINVAL; |
1279 | ||
e7bef6b0 DM |
1280 | hwc->idx = PIC_NO_INDEX; |
1281 | ||
01552f76 DM |
1282 | /* Try to do all error checking before this point, as unwinding |
1283 | * state after grabbing the PMC is difficult. | |
1284 | */ | |
1285 | perf_event_grab_pmc(); | |
1286 | event->destroy = hw_perf_event_destroy; | |
1287 | ||
59abbd1e DM |
1288 | if (!hwc->sample_period) { |
1289 | hwc->sample_period = MAX_PERIOD; | |
1290 | hwc->last_period = hwc->sample_period; | |
e7850595 | 1291 | local64_set(&hwc->period_left, hwc->sample_period); |
59abbd1e DM |
1292 | } |
1293 | ||
59abbd1e DM |
1294 | return 0; |
1295 | } | |
1296 | ||
a13c3afd LM |
1297 | /* |
1298 | * Start group events scheduling transaction | |
1299 | * Set the flag to make pmu::enable() not perform the | |
1300 | * schedulability test, it will be performed at commit time | |
1301 | */ | |
51b0fe39 | 1302 | static void sparc_pmu_start_txn(struct pmu *pmu) |
a13c3afd LM |
1303 | { |
1304 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
1305 | ||
33696fc0 | 1306 | perf_pmu_disable(pmu); |
8d2cacbb | 1307 | cpuhw->group_flag |= PERF_EVENT_TXN; |
a13c3afd LM |
1308 | } |
1309 | ||
1310 | /* | |
1311 | * Stop group events scheduling transaction | |
1312 | * Clear the flag and pmu::enable() will perform the | |
1313 | * schedulability test. | |
1314 | */ | |
51b0fe39 | 1315 | static void sparc_pmu_cancel_txn(struct pmu *pmu) |
a13c3afd LM |
1316 | { |
1317 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
1318 | ||
8d2cacbb | 1319 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
33696fc0 | 1320 | perf_pmu_enable(pmu); |
a13c3afd LM |
1321 | } |
1322 | ||
1323 | /* | |
1324 | * Commit group events scheduling transaction | |
1325 | * Perform the group schedulability test as a whole | |
1326 | * Return 0 if success | |
1327 | */ | |
51b0fe39 | 1328 | static int sparc_pmu_commit_txn(struct pmu *pmu) |
a13c3afd LM |
1329 | { |
1330 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
1331 | int n; | |
1332 | ||
1333 | if (!sparc_pmu) | |
1334 | return -EINVAL; | |
1335 | ||
1336 | cpuc = &__get_cpu_var(cpu_hw_events); | |
1337 | n = cpuc->n_events; | |
1338 | if (check_excludes(cpuc->event, 0, n)) | |
1339 | return -EINVAL; | |
1340 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) | |
1341 | return -EAGAIN; | |
1342 | ||
8d2cacbb | 1343 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
33696fc0 | 1344 | perf_pmu_enable(pmu); |
a13c3afd LM |
1345 | return 0; |
1346 | } | |
1347 | ||
51b0fe39 | 1348 | static struct pmu pmu = { |
a4eaf7f1 PZ |
1349 | .pmu_enable = sparc_pmu_enable, |
1350 | .pmu_disable = sparc_pmu_disable, | |
b0a873eb | 1351 | .event_init = sparc_pmu_event_init, |
a4eaf7f1 PZ |
1352 | .add = sparc_pmu_add, |
1353 | .del = sparc_pmu_del, | |
1354 | .start = sparc_pmu_start, | |
1355 | .stop = sparc_pmu_stop, | |
59abbd1e | 1356 | .read = sparc_pmu_read, |
a13c3afd LM |
1357 | .start_txn = sparc_pmu_start_txn, |
1358 | .cancel_txn = sparc_pmu_cancel_txn, | |
1359 | .commit_txn = sparc_pmu_commit_txn, | |
59abbd1e DM |
1360 | }; |
1361 | ||
cdd6c482 | 1362 | void perf_event_print_debug(void) |
59abbd1e DM |
1363 | { |
1364 | unsigned long flags; | |
3f1a2097 | 1365 | int cpu, i; |
59abbd1e DM |
1366 | |
1367 | if (!sparc_pmu) | |
1368 | return; | |
1369 | ||
1370 | local_irq_save(flags); | |
1371 | ||
1372 | cpu = smp_processor_id(); | |
1373 | ||
59abbd1e | 1374 | pr_info("\n"); |
3f1a2097 DM |
1375 | for (i = 0; i < sparc_pmu->num_pcrs; i++) |
1376 | pr_info("CPU#%d: PCR%d[%016llx]\n", | |
1377 | cpu, i, pcr_ops->read_pcr(i)); | |
1378 | for (i = 0; i < sparc_pmu->num_pic_regs; i++) | |
1379 | pr_info("CPU#%d: PIC%d[%016llx]\n", | |
1380 | cpu, i, pcr_ops->read_pic(i)); | |
59abbd1e DM |
1381 | |
1382 | local_irq_restore(flags); | |
1383 | } | |
1384 | ||
cdd6c482 | 1385 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, |
d29862f0 | 1386 | unsigned long cmd, void *__args) |
59abbd1e DM |
1387 | { |
1388 | struct die_args *args = __args; | |
1389 | struct perf_sample_data data; | |
cdd6c482 | 1390 | struct cpu_hw_events *cpuc; |
59abbd1e | 1391 | struct pt_regs *regs; |
e7bef6b0 | 1392 | int i; |
59abbd1e | 1393 | |
cdd6c482 | 1394 | if (!atomic_read(&active_events)) |
59abbd1e DM |
1395 | return NOTIFY_DONE; |
1396 | ||
1397 | switch (cmd) { | |
1398 | case DIE_NMI: | |
1399 | break; | |
1400 | ||
1401 | default: | |
1402 | return NOTIFY_DONE; | |
1403 | } | |
1404 | ||
1405 | regs = args->regs; | |
1406 | ||
cdd6c482 | 1407 | cpuc = &__get_cpu_var(cpu_hw_events); |
e04ed38d DM |
1408 | |
1409 | /* If the PMU has the TOE IRQ enable bits, we need to do a | |
1410 | * dummy write to the %pcr to clear the overflow bits and thus | |
1411 | * the interrupt. | |
1412 | * | |
1413 | * Do this before we peek at the counters to determine | |
1414 | * overflow so we don't lose any events. | |
1415 | */ | |
3f1a2097 DM |
1416 | if (sparc_pmu->irq_bit && |
1417 | sparc_pmu->num_pcrs == 1) | |
1418 | pcr_ops->write_pcr(0, cpuc->pcr[0]); | |
e04ed38d | 1419 | |
e7bef6b0 DM |
1420 | for (i = 0; i < cpuc->n_events; i++) { |
1421 | struct perf_event *event = cpuc->event[i]; | |
1422 | int idx = cpuc->current_idx[i]; | |
cdd6c482 | 1423 | struct hw_perf_event *hwc; |
59abbd1e DM |
1424 | u64 val; |
1425 | ||
3f1a2097 DM |
1426 | if (sparc_pmu->irq_bit && |
1427 | sparc_pmu->num_pcrs > 1) | |
1428 | pcr_ops->write_pcr(idx, cpuc->pcr[idx]); | |
1429 | ||
cdd6c482 IM |
1430 | hwc = &event->hw; |
1431 | val = sparc_perf_event_update(event, hwc, idx); | |
59abbd1e DM |
1432 | if (val & (1ULL << 31)) |
1433 | continue; | |
1434 | ||
fd0d000b | 1435 | perf_sample_data_init(&data, 0, hwc->last_period); |
cdd6c482 | 1436 | if (!sparc_perf_event_set_period(event, hwc, idx)) |
59abbd1e DM |
1437 | continue; |
1438 | ||
a8b0ca17 | 1439 | if (perf_event_overflow(event, &data, regs)) |
a4eaf7f1 | 1440 | sparc_pmu_stop(event, 0); |
59abbd1e DM |
1441 | } |
1442 | ||
1443 | return NOTIFY_STOP; | |
1444 | } | |
1445 | ||
cdd6c482 IM |
1446 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { |
1447 | .notifier_call = perf_event_nmi_handler, | |
59abbd1e DM |
1448 | }; |
1449 | ||
1450 | static bool __init supported_pmu(void) | |
1451 | { | |
28e8f9be DM |
1452 | if (!strcmp(sparc_pmu_type, "ultra3") || |
1453 | !strcmp(sparc_pmu_type, "ultra3+") || | |
1454 | !strcmp(sparc_pmu_type, "ultra3i") || | |
1455 | !strcmp(sparc_pmu_type, "ultra4+")) { | |
1456 | sparc_pmu = &ultra3_pmu; | |
59abbd1e DM |
1457 | return true; |
1458 | } | |
7eebda60 DM |
1459 | if (!strcmp(sparc_pmu_type, "niagara")) { |
1460 | sparc_pmu = &niagara1_pmu; | |
1461 | return true; | |
1462 | } | |
4ba991d3 DM |
1463 | if (!strcmp(sparc_pmu_type, "niagara2") || |
1464 | !strcmp(sparc_pmu_type, "niagara3")) { | |
b73d8847 DM |
1465 | sparc_pmu = &niagara2_pmu; |
1466 | return true; | |
1467 | } | |
59abbd1e DM |
1468 | return false; |
1469 | } | |
1470 | ||
004417a6 | 1471 | int __init init_hw_perf_events(void) |
59abbd1e | 1472 | { |
cdd6c482 | 1473 | pr_info("Performance events: "); |
59abbd1e DM |
1474 | |
1475 | if (!supported_pmu()) { | |
1476 | pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | |
004417a6 | 1477 | return 0; |
59abbd1e DM |
1478 | } |
1479 | ||
1480 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | |
1481 | ||
2e80a82a | 1482 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
cdd6c482 | 1483 | register_die_notifier(&perf_event_nmi_notifier); |
004417a6 PZ |
1484 | |
1485 | return 0; | |
59abbd1e | 1486 | } |
efc70d24 | 1487 | early_initcall(init_hw_perf_events); |
4f6dbe4a | 1488 | |
56962b44 FW |
1489 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
1490 | struct pt_regs *regs) | |
4f6dbe4a DM |
1491 | { |
1492 | unsigned long ksp, fp; | |
667f0cee DM |
1493 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1494 | int graph = 0; | |
1495 | #endif | |
4f6dbe4a | 1496 | |
56962b44 FW |
1497 | stack_trace_flush(); |
1498 | ||
70791ce9 | 1499 | perf_callchain_store(entry, regs->tpc); |
4f6dbe4a DM |
1500 | |
1501 | ksp = regs->u_regs[UREG_I6]; | |
1502 | fp = ksp + STACK_BIAS; | |
1503 | do { | |
1504 | struct sparc_stackf *sf; | |
1505 | struct pt_regs *regs; | |
1506 | unsigned long pc; | |
1507 | ||
1508 | if (!kstack_valid(current_thread_info(), fp)) | |
1509 | break; | |
1510 | ||
1511 | sf = (struct sparc_stackf *) fp; | |
1512 | regs = (struct pt_regs *) (sf + 1); | |
1513 | ||
1514 | if (kstack_is_trap_frame(current_thread_info(), regs)) { | |
1515 | if (user_mode(regs)) | |
1516 | break; | |
1517 | pc = regs->tpc; | |
1518 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; | |
1519 | } else { | |
1520 | pc = sf->callers_pc; | |
1521 | fp = (unsigned long)sf->fp + STACK_BIAS; | |
1522 | } | |
70791ce9 | 1523 | perf_callchain_store(entry, pc); |
667f0cee DM |
1524 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1525 | if ((pc + 8UL) == (unsigned long) &return_to_handler) { | |
1526 | int index = current->curr_ret_stack; | |
1527 | if (current->ret_stack && index >= graph) { | |
1528 | pc = current->ret_stack[index - graph].ret; | |
70791ce9 | 1529 | perf_callchain_store(entry, pc); |
667f0cee DM |
1530 | graph++; |
1531 | } | |
1532 | } | |
1533 | #endif | |
4f6dbe4a DM |
1534 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1535 | } | |
1536 | ||
56962b44 FW |
1537 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, |
1538 | struct pt_regs *regs) | |
4f6dbe4a DM |
1539 | { |
1540 | unsigned long ufp; | |
1541 | ||
70791ce9 | 1542 | perf_callchain_store(entry, regs->tpc); |
4f6dbe4a DM |
1543 | |
1544 | ufp = regs->u_regs[UREG_I6] + STACK_BIAS; | |
1545 | do { | |
1546 | struct sparc_stackf *usf, sf; | |
1547 | unsigned long pc; | |
1548 | ||
1549 | usf = (struct sparc_stackf *) ufp; | |
1550 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | |
1551 | break; | |
1552 | ||
1553 | pc = sf.callers_pc; | |
1554 | ufp = (unsigned long)sf.fp + STACK_BIAS; | |
70791ce9 | 1555 | perf_callchain_store(entry, pc); |
4f6dbe4a DM |
1556 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1557 | } | |
1558 | ||
56962b44 FW |
1559 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, |
1560 | struct pt_regs *regs) | |
4f6dbe4a DM |
1561 | { |
1562 | unsigned long ufp; | |
1563 | ||
70791ce9 | 1564 | perf_callchain_store(entry, regs->tpc); |
4f6dbe4a | 1565 | |
9e8307ec | 1566 | ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; |
4f6dbe4a DM |
1567 | do { |
1568 | struct sparc_stackf32 *usf, sf; | |
1569 | unsigned long pc; | |
1570 | ||
1571 | usf = (struct sparc_stackf32 *) ufp; | |
1572 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | |
1573 | break; | |
1574 | ||
1575 | pc = sf.callers_pc; | |
1576 | ufp = (unsigned long)sf.fp; | |
70791ce9 | 1577 | perf_callchain_store(entry, pc); |
4f6dbe4a DM |
1578 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1579 | } | |
1580 | ||
56962b44 FW |
1581 | void |
1582 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |
4f6dbe4a | 1583 | { |
56962b44 FW |
1584 | flushw_user(); |
1585 | if (test_thread_flag(TIF_32BIT)) | |
1586 | perf_callchain_user_32(entry, regs); | |
1587 | else | |
1588 | perf_callchain_user_64(entry, regs); | |
4f6dbe4a | 1589 | } |