]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/oprofile/op_model_mipsxx.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / arch / mips / oprofile / op_model_mipsxx.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
7 * Copyright (C) 2005 by MIPS Technologies, Inc.
8 */
9 #include <linux/cpumask.h>
10 #include <linux/oprofile.h>
11 #include <linux/interrupt.h>
12 #include <linux/smp.h>
13 #include <asm/irq_regs.h>
14 #include <asm/time.h>
15
16 #include "op_impl.h"
17
18 #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
19 MIPS_PERFCTRL_EVENT)
20 #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
21
22 #define M_COUNTER_OVERFLOW (1UL << 31)
23
24 static int (*save_perf_irq)(void);
25 static int perfcount_irq;
26
27 /*
28 * XLR has only one set of counters per core. Designate the
29 * first hardware thread in the core for setup and init.
30 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
31 */
32 #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
33 #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
34 #else
35 #define oprofile_skip_cpu(c) 0
36 #endif
37
38 #ifdef CONFIG_MIPS_MT_SMP
39 static int cpu_has_mipsmt_pertccounters;
40 #define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
41 M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
42 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
43 0 : cpu_data[smp_processor_id()].vpe_id)
44
45 /*
46 * The number of bits to shift to convert between counters per core and
47 * counters per VPE. There is no reasonable interface atm to obtain the
48 * number of VPEs used by Linux and in the 34K this number is fixed to two
49 * anyways so we hardcore a few things here for the moment. The way it's
50 * done here will ensure that oprofile VSMP kernel will run right on a lesser
51 * core like a 24K also or with maxcpus=1.
52 */
53 static inline unsigned int vpe_shift(void)
54 {
55 if (num_possible_cpus() > 1)
56 return 1;
57
58 return 0;
59 }
60
61 #else
62
63 #define WHAT 0
64 #define vpe_id() 0
65
66 static inline unsigned int vpe_shift(void)
67 {
68 return 0;
69 }
70
71 #endif
72
73 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
74 {
75 return counters >> vpe_shift();
76 }
77
78 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
79 {
80 return counters << vpe_shift();
81 }
82
83 #define __define_perf_accessors(r, n, np) \
84 \
85 static inline unsigned int r_c0_ ## r ## n(void) \
86 { \
87 unsigned int cpu = vpe_id(); \
88 \
89 switch (cpu) { \
90 case 0: \
91 return read_c0_ ## r ## n(); \
92 case 1: \
93 return read_c0_ ## r ## np(); \
94 default: \
95 BUG(); \
96 } \
97 return 0; \
98 } \
99 \
100 static inline void w_c0_ ## r ## n(unsigned int value) \
101 { \
102 unsigned int cpu = vpe_id(); \
103 \
104 switch (cpu) { \
105 case 0: \
106 write_c0_ ## r ## n(value); \
107 return; \
108 case 1: \
109 write_c0_ ## r ## np(value); \
110 return; \
111 default: \
112 BUG(); \
113 } \
114 return; \
115 } \
116
117 __define_perf_accessors(perfcntr, 0, 2)
118 __define_perf_accessors(perfcntr, 1, 3)
119 __define_perf_accessors(perfcntr, 2, 0)
120 __define_perf_accessors(perfcntr, 3, 1)
121
122 __define_perf_accessors(perfctrl, 0, 2)
123 __define_perf_accessors(perfctrl, 1, 3)
124 __define_perf_accessors(perfctrl, 2, 0)
125 __define_perf_accessors(perfctrl, 3, 1)
126
127 struct op_mips_model op_model_mipsxx_ops;
128
129 static struct mipsxx_register_config {
130 unsigned int control[4];
131 unsigned int counter[4];
132 } reg;
133
134 /* Compute all of the registers in preparation for enabling profiling. */
135
136 static void mipsxx_reg_setup(struct op_counter_config *ctr)
137 {
138 unsigned int counters = op_model_mipsxx_ops.num_counters;
139 int i;
140
141 /* Compute the performance counter control word. */
142 for (i = 0; i < counters; i++) {
143 reg.control[i] = 0;
144 reg.counter[i] = 0;
145
146 if (!ctr[i].enabled)
147 continue;
148
149 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
150 MIPS_PERFCTRL_IE;
151 if (ctr[i].kernel)
152 reg.control[i] |= MIPS_PERFCTRL_K;
153 if (ctr[i].user)
154 reg.control[i] |= MIPS_PERFCTRL_U;
155 if (ctr[i].exl)
156 reg.control[i] |= MIPS_PERFCTRL_EXL;
157 if (boot_cpu_type() == CPU_XLR)
158 reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
159 reg.counter[i] = 0x80000000 - ctr[i].count;
160 }
161 }
162
163 /* Program all of the registers in preparation for enabling profiling. */
164
165 static void mipsxx_cpu_setup(void *args)
166 {
167 unsigned int counters = op_model_mipsxx_ops.num_counters;
168
169 if (oprofile_skip_cpu(smp_processor_id()))
170 return;
171
172 switch (counters) {
173 case 4:
174 w_c0_perfctrl3(0);
175 w_c0_perfcntr3(reg.counter[3]);
176 case 3:
177 w_c0_perfctrl2(0);
178 w_c0_perfcntr2(reg.counter[2]);
179 case 2:
180 w_c0_perfctrl1(0);
181 w_c0_perfcntr1(reg.counter[1]);
182 case 1:
183 w_c0_perfctrl0(0);
184 w_c0_perfcntr0(reg.counter[0]);
185 }
186 }
187
188 /* Start all counters on current CPU */
189 static void mipsxx_cpu_start(void *args)
190 {
191 unsigned int counters = op_model_mipsxx_ops.num_counters;
192
193 if (oprofile_skip_cpu(smp_processor_id()))
194 return;
195
196 switch (counters) {
197 case 4:
198 w_c0_perfctrl3(WHAT | reg.control[3]);
199 case 3:
200 w_c0_perfctrl2(WHAT | reg.control[2]);
201 case 2:
202 w_c0_perfctrl1(WHAT | reg.control[1]);
203 case 1:
204 w_c0_perfctrl0(WHAT | reg.control[0]);
205 }
206 }
207
208 /* Stop all counters on current CPU */
209 static void mipsxx_cpu_stop(void *args)
210 {
211 unsigned int counters = op_model_mipsxx_ops.num_counters;
212
213 if (oprofile_skip_cpu(smp_processor_id()))
214 return;
215
216 switch (counters) {
217 case 4:
218 w_c0_perfctrl3(0);
219 case 3:
220 w_c0_perfctrl2(0);
221 case 2:
222 w_c0_perfctrl1(0);
223 case 1:
224 w_c0_perfctrl0(0);
225 }
226 }
227
228 static int mipsxx_perfcount_handler(void)
229 {
230 unsigned int counters = op_model_mipsxx_ops.num_counters;
231 unsigned int control;
232 unsigned int counter;
233 int handled = IRQ_NONE;
234
235 if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
236 return handled;
237
238 switch (counters) {
239 #define HANDLE_COUNTER(n) \
240 case n + 1: \
241 control = r_c0_perfctrl ## n(); \
242 counter = r_c0_perfcntr ## n(); \
243 if ((control & MIPS_PERFCTRL_IE) && \
244 (counter & M_COUNTER_OVERFLOW)) { \
245 oprofile_add_sample(get_irq_regs(), n); \
246 w_c0_perfcntr ## n(reg.counter[n]); \
247 handled = IRQ_HANDLED; \
248 }
249 HANDLE_COUNTER(3)
250 HANDLE_COUNTER(2)
251 HANDLE_COUNTER(1)
252 HANDLE_COUNTER(0)
253 }
254
255 return handled;
256 }
257
258 static inline int __n_counters(void)
259 {
260 if (!cpu_has_perf)
261 return 0;
262 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
263 return 1;
264 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
265 return 2;
266 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
267 return 3;
268
269 return 4;
270 }
271
272 static inline int n_counters(void)
273 {
274 int counters;
275
276 switch (current_cpu_type()) {
277 case CPU_R10000:
278 counters = 2;
279 break;
280
281 case CPU_R12000:
282 case CPU_R14000:
283 case CPU_R16000:
284 counters = 4;
285 break;
286
287 default:
288 counters = __n_counters();
289 }
290
291 return counters;
292 }
293
294 static void reset_counters(void *arg)
295 {
296 int counters = (int)(long)arg;
297 switch (counters) {
298 case 4:
299 w_c0_perfctrl3(0);
300 w_c0_perfcntr3(0);
301 case 3:
302 w_c0_perfctrl2(0);
303 w_c0_perfcntr2(0);
304 case 2:
305 w_c0_perfctrl1(0);
306 w_c0_perfcntr1(0);
307 case 1:
308 w_c0_perfctrl0(0);
309 w_c0_perfcntr0(0);
310 }
311 }
312
313 static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
314 {
315 return mipsxx_perfcount_handler();
316 }
317
318 static int __init mipsxx_init(void)
319 {
320 int counters;
321
322 counters = n_counters();
323 if (counters == 0) {
324 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
325 return -ENODEV;
326 }
327
328 #ifdef CONFIG_MIPS_MT_SMP
329 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
330 if (!cpu_has_mipsmt_pertccounters)
331 counters = counters_total_to_per_cpu(counters);
332 #endif
333 on_each_cpu(reset_counters, (void *)(long)counters, 1);
334
335 op_model_mipsxx_ops.num_counters = counters;
336 switch (current_cpu_type()) {
337 case CPU_M14KC:
338 op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
339 break;
340
341 case CPU_M14KEC:
342 op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
343 break;
344
345 case CPU_20KC:
346 op_model_mipsxx_ops.cpu_type = "mips/20K";
347 break;
348
349 case CPU_24K:
350 op_model_mipsxx_ops.cpu_type = "mips/24K";
351 break;
352
353 case CPU_25KF:
354 op_model_mipsxx_ops.cpu_type = "mips/25K";
355 break;
356
357 case CPU_1004K:
358 case CPU_34K:
359 op_model_mipsxx_ops.cpu_type = "mips/34K";
360 break;
361
362 case CPU_1074K:
363 case CPU_74K:
364 op_model_mipsxx_ops.cpu_type = "mips/74K";
365 break;
366
367 case CPU_INTERAPTIV:
368 op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
369 break;
370
371 case CPU_PROAPTIV:
372 op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
373 break;
374
375 case CPU_P5600:
376 op_model_mipsxx_ops.cpu_type = "mips/P5600";
377 break;
378
379 case CPU_I6400:
380 op_model_mipsxx_ops.cpu_type = "mips/I6400";
381 break;
382
383 case CPU_M5150:
384 op_model_mipsxx_ops.cpu_type = "mips/M5150";
385 break;
386
387 case CPU_5KC:
388 op_model_mipsxx_ops.cpu_type = "mips/5K";
389 break;
390
391 case CPU_R10000:
392 if ((current_cpu_data.processor_id & 0xff) == 0x20)
393 op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
394 else
395 op_model_mipsxx_ops.cpu_type = "mips/r10000";
396 break;
397
398 case CPU_R12000:
399 case CPU_R14000:
400 op_model_mipsxx_ops.cpu_type = "mips/r12000";
401 break;
402
403 case CPU_R16000:
404 op_model_mipsxx_ops.cpu_type = "mips/r16000";
405 break;
406
407 case CPU_SB1:
408 case CPU_SB1A:
409 op_model_mipsxx_ops.cpu_type = "mips/sb1";
410 break;
411
412 case CPU_LOONGSON1:
413 op_model_mipsxx_ops.cpu_type = "mips/loongson1";
414 break;
415
416 case CPU_XLR:
417 op_model_mipsxx_ops.cpu_type = "mips/xlr";
418 break;
419
420 default:
421 printk(KERN_ERR "Profiling unsupported for this CPU\n");
422
423 return -ENODEV;
424 }
425
426 save_perf_irq = perf_irq;
427 perf_irq = mipsxx_perfcount_handler;
428
429 if (get_c0_perfcount_int)
430 perfcount_irq = get_c0_perfcount_int();
431 else if (cp0_perfcount_irq >= 0)
432 perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
433 else
434 perfcount_irq = -1;
435
436 if (perfcount_irq >= 0)
437 return request_irq(perfcount_irq, mipsxx_perfcount_int,
438 IRQF_PERCPU | IRQF_NOBALANCING |
439 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
440 IRQF_SHARED,
441 "Perfcounter", save_perf_irq);
442
443 return 0;
444 }
445
446 static void mipsxx_exit(void)
447 {
448 int counters = op_model_mipsxx_ops.num_counters;
449
450 if (perfcount_irq >= 0)
451 free_irq(perfcount_irq, save_perf_irq);
452
453 counters = counters_per_cpu_to_total(counters);
454 on_each_cpu(reset_counters, (void *)(long)counters, 1);
455
456 perf_irq = save_perf_irq;
457 }
458
459 struct op_mips_model op_model_mipsxx_ops = {
460 .reg_setup = mipsxx_reg_setup,
461 .cpu_setup = mipsxx_cpu_setup,
462 .init = mipsxx_init,
463 .exit = mipsxx_exit,
464 .cpu_start = mipsxx_cpu_start,
465 .cpu_stop = mipsxx_cpu_stop,
466 };