]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/oprofile/op_model_cell.c
cpufreq: Do not allow ->setpolicy drivers to provide ->target
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / oprofile / op_model_cell.c
CommitLineData
18f2190d
MJ
1/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: David Erb (djerb@us.ibm.com)
7 * Modifications:
1474855d
BN
8 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com>
18f2190d
MJ
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/cpufreq.h>
18#include <linux/delay.h>
18f2190d
MJ
19#include <linux/jiffies.h>
20#include <linux/kthread.h>
21#include <linux/oprofile.h>
22#include <linux/percpu.h>
23#include <linux/smp.h>
24#include <linux/spinlock.h>
25#include <linux/timer.h>
26#include <asm/cell-pmu.h>
27#include <asm/cputable.h>
28#include <asm/firmware.h>
29#include <asm/io.h>
30#include <asm/oprofile_impl.h>
31#include <asm/processor.h>
32#include <asm/prom.h>
33#include <asm/ptrace.h>
34#include <asm/reg.h>
35#include <asm/rtas.h>
eef686a0 36#include <asm/cell-regs.h>
18f2190d
MJ
37
38#include "../platforms/cell/interrupt.h"
1474855d
BN
39#include "cell/pr_util.h"
40
9b93418e
CL
41#define PPU_PROFILING 0
42#define SPU_PROFILING_CYCLES 1
43#define SPU_PROFILING_EVENTS 2
1474855d 44
88382329
CL
45#define SPU_EVENT_NUM_START 4100
46#define SPU_EVENT_NUM_STOP 4399
47#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
48#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
49#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
50
1474855d
BN
51#define NUM_SPUS_PER_NODE 8
52#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
18f2190d
MJ
53
54#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
1474855d
BN
55#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
56 * PPU_CYCLES event
57 */
58#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
18f2190d 59
bcb63e25
CL
60#define NUM_THREADS 2 /* number of physical threads in
61 * physical processor
62 */
a1ef4849 63#define NUM_DEBUG_BUS_WORDS 4
bcb63e25
CL
64#define NUM_INPUT_BUS_WORDS 2
65
1474855d 66#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
18f2190d 67
25985edc 68/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle.
88382329
CL
69 * To configure counter to send value every N cycles set counter to
70 * 2^32 - 1 - N.
71 */
72#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
73
9b93418e
CL
74/*
75 * spu_cycle_reset is the number of cycles between samples.
76 * This variable is used for SPU profiling and should ONLY be set
77 * at the beginning of cell_reg_setup; otherwise, it's read-only.
78 */
79static unsigned int spu_cycle_reset;
80static unsigned int profiling_mode;
88382329 81static int spu_evnt_phys_spu_indx;
9b93418e 82
18f2190d
MJ
83struct pmc_cntrl_data {
84 unsigned long vcntr;
85 unsigned long evnts;
86 unsigned long masks;
87 unsigned long enabled;
88};
89
90/*
91 * ibm,cbe-perftools rtas parameters
92 */
18f2190d
MJ
93struct pm_signal {
94 u16 cpu; /* Processor to modify */
1474855d
BN
95 u16 sub_unit; /* hw subunit this applies to (if applicable)*/
96 short int signal_group; /* Signal Group to Enable/Disable */
18f2190d
MJ
97 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
98 * Bus Word(s) (bitmask)
99 */
100 u8 bit; /* Trigger/Event bit (if applicable) */
101};
102
103/*
104 * rtas call arguments
105 */
106enum {
107 SUBFUNC_RESET = 1,
108 SUBFUNC_ACTIVATE = 2,
109 SUBFUNC_DEACTIVATE = 3,
110
111 PASSTHRU_IGNORE = 0,
112 PASSTHRU_ENABLE = 1,
113 PASSTHRU_DISABLE = 2,
114};
115
116struct pm_cntrl {
117 u16 enable;
118 u16 stop_at_max;
119 u16 trace_mode;
120 u16 freeze;
121 u16 count_mode;
88382329
CL
122 u16 spu_addr_trace;
123 u8 trace_buf_ovflw;
18f2190d
MJ
124};
125
126static struct {
127 u32 group_control;
128 u32 debug_bus_control;
129 struct pm_cntrl pm_cntrl;
130 u32 pm07_cntrl[NR_PHYS_CTRS];
131} pm_regs;
132
18f2190d
MJ
133#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
134#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
135#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
136#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
137#define GET_COUNT_CYCLES(x) (x & 0x00000001)
138#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
139
18f2190d 140static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
88382329 141static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
18f2190d
MJ
142static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
143
1474855d
BN
144/*
145 * The CELL profiling code makes rtas calls to setup the debug bus to
146 * route the performance signals. Additionally, SPU profiling requires
147 * a second rtas call to setup the hardware to capture the SPU PCs.
148 * The EIO error value is returned if the token lookups or the rtas
149 * call fail. The EIO error number is the best choice of the existing
150 * error numbers. The probability of rtas related error is very low. But
151 * by returning EIO and printing additional information to dmsg the user
152 * will know that OProfile did not start and dmesg will tell them why.
153 * OProfile does not support returning errors on Stop. Not a huge issue
154 * since failure to reset the debug bus or stop the SPU PC collection is
155 * not a fatel issue. Chances are if the Stop failed, Start doesn't work
156 * either.
157 */
158
159/*
160 * Interpetation of hdw_thread:
18f2190d
MJ
161 * 0 - even virtual cpus 0, 2, 4,...
162 * 1 - odd virtual cpus 1, 3, 5, ...
1474855d
BN
163 *
164 * FIXME: this is strictly wrong, we need to clean this up in a number
165 * of places. It works for now. -arnd
18f2190d
MJ
166 */
167static u32 hdw_thread;
168
169static u32 virt_cntr_inter_mask;
170static struct timer_list timer_virt_cntr;
88382329 171static struct timer_list timer_spu_event_swap;
18f2190d 172
1474855d
BN
173/*
174 * pm_signal needs to be global since it is initialized in
18f2190d
MJ
175 * cell_reg_setup at the time when the necessary information
176 * is available.
177 */
178static struct pm_signal pm_signal[NR_PHYS_CTRS];
1474855d
BN
179static int pm_rtas_token; /* token for debug bus setup call */
180static int spu_rtas_token; /* token for SPU cycle profiling */
18f2190d
MJ
181
182static u32 reset_value[NR_PHYS_CTRS];
183static int num_counters;
184static int oprofile_running;
9b93418e 185static DEFINE_SPINLOCK(cntr_lock);
18f2190d
MJ
186
187static u32 ctr_enabled;
188
bcb63e25 189static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
18f2190d
MJ
190
191/*
192 * Firmware interface functions
193 */
194static int
195rtas_ibm_cbe_perftools(int subfunc, int passthru,
196 void *address, unsigned long length)
197{
198 u64 paddr = __pa(address);
199
1474855d
BN
200 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
201 passthru, paddr >> 32, paddr & 0xffffffff, length);
18f2190d
MJ
202}
203
204static void pm_rtas_reset_signals(u32 node)
205{
206 int ret;
207 struct pm_signal pm_signal_local;
208
1474855d
BN
209 /*
210 * The debug bus is being set to the passthru disable state.
211 * However, the FW still expects atleast one legal signal routing
212 * entry or it will return an error on the arguments. If we don't
213 * supply a valid entry, we must ignore all return values. Ignoring
214 * all return values means we might miss an error we should be
215 * concerned about.
18f2190d
MJ
216 */
217
218 /* fw expects physical cpu #. */
219 pm_signal_local.cpu = node;
220 pm_signal_local.signal_group = 21;
221 pm_signal_local.bus_word = 1;
222 pm_signal_local.sub_unit = 0;
223 pm_signal_local.bit = 0;
224
225 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
226 &pm_signal_local,
227 sizeof(struct pm_signal));
228
1474855d
BN
229 if (unlikely(ret))
230 /*
231 * Not a fatal error. For Oprofile stop, the oprofile
232 * functions do not support returning an error for
233 * failure to stop OProfile.
234 */
18f2190d 235 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 236 __func__, ret);
18f2190d
MJ
237}
238
1474855d 239static int pm_rtas_activate_signals(u32 node, u32 count)
18f2190d
MJ
240{
241 int ret;
c7eb7347 242 int i, j;
18f2190d
MJ
243 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
244
1474855d
BN
245 /*
246 * There is no debug setup required for the cycles event.
c7eb7347
MJ
247 * Note that only events in the same group can be used.
248 * Otherwise, there will be conflicts in correctly routing
06fe9fb4 249 * the signals on the debug bus. It is the responsibility
c7eb7347
MJ
250 * of the OProfile user tool to check the events are in
251 * the same group.
252 */
253 i = 0;
18f2190d 254 for (j = 0; j < count; j++) {
c7eb7347
MJ
255 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
256
257 /* fw expects physical cpu # */
258 pm_signal_local[i].cpu = node;
259 pm_signal_local[i].signal_group
260 = pm_signal[j].signal_group;
261 pm_signal_local[i].bus_word = pm_signal[j].bus_word;
262 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
263 pm_signal_local[i].bit = pm_signal[j].bit;
264 i++;
265 }
18f2190d
MJ
266 }
267
c7eb7347
MJ
268 if (i != 0) {
269 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
270 pm_signal_local,
271 i * sizeof(struct pm_signal));
18f2190d 272
1474855d 273 if (unlikely(ret)) {
c7eb7347 274 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 275 __func__, ret);
1474855d
BN
276 return -EIO;
277 }
c7eb7347 278 }
1474855d
BN
279
280 return 0;
18f2190d
MJ
281}
282
283/*
284 * PM Signal functions
285 */
286static void set_pm_event(u32 ctr, int event, u32 unit_mask)
287{
288 struct pm_signal *p;
289 u32 signal_bit;
290 u32 bus_word, bus_type, count_cycles, polarity, input_control;
291 int j, i;
292
293 if (event == PPU_CYCLES_EVENT_NUM) {
294 /* Special Event: Count all cpu cycles */
295 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
296 p = &(pm_signal[ctr]);
c7eb7347 297 p->signal_group = PPU_CYCLES_GRP_NUM;
18f2190d
MJ
298 p->bus_word = 1;
299 p->sub_unit = 0;
300 p->bit = 0;
301 goto out;
302 } else {
303 pm_regs.pm07_cntrl[ctr] = 0;
304 }
305
306 bus_word = GET_BUS_WORD(unit_mask);
307 bus_type = GET_BUS_TYPE(unit_mask);
308 count_cycles = GET_COUNT_CYCLES(unit_mask);
309 polarity = GET_POLARITY(unit_mask);
310 input_control = GET_INPUT_CONTROL(unit_mask);
311 signal_bit = (event % 100);
312
313 p = &(pm_signal[ctr]);
314
315 p->signal_group = event / 100;
316 p->bus_word = bus_word;
a1ef4849 317 p->sub_unit = GET_SUB_UNIT(unit_mask);
18f2190d
MJ
318
319 pm_regs.pm07_cntrl[ctr] = 0;
320 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
321 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
322 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
323
1474855d
BN
324 /*
325 * Some of the islands signal selection is based on 64 bit words.
bcb63e25
CL
326 * The debug bus words are 32 bits, the input words to the performance
327 * counters are defined as 32 bits. Need to convert the 64 bit island
328 * specification to the appropriate 32 input bit and bus word for the
1474855d 329 * performance counter event selection. See the CELL Performance
bcb63e25
CL
330 * monitoring signals manual and the Perf cntr hardware descriptions
331 * for the details.
332 */
18f2190d
MJ
333 if (input_control == 0) {
334 if (signal_bit > 31) {
335 signal_bit -= 32;
336 if (bus_word == 0x3)
337 bus_word = 0x2;
338 else if (bus_word == 0xc)
339 bus_word = 0x8;
340 }
341
342 if ((bus_type == 0) && p->signal_group >= 60)
343 bus_type = 2;
344 if ((bus_type == 1) && p->signal_group >= 50)
345 bus_type = 0;
346
347 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
348 } else {
349 pm_regs.pm07_cntrl[ctr] = 0;
350 p->bit = signal_bit;
351 }
352
a1ef4849 353 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
18f2190d
MJ
354 if (bus_word & (1 << i)) {
355 pm_regs.debug_bus_control |=
25006644 356 (bus_type << (30 - (2 * i)));
18f2190d 357
bcb63e25 358 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
18f2190d
MJ
359 if (input_bus[j] == 0xff) {
360 input_bus[j] = i;
361 pm_regs.group_control |=
25006644 362 (i << (30 - (2 * j)));
1474855d 363
18f2190d
MJ
364 break;
365 }
366 }
367 }
368 }
369out:
370 ;
371}
372
bcb63e25 373static void write_pm_cntrl(int cpu)
18f2190d 374{
1474855d
BN
375 /*
376 * Oprofile will use 32 bit counters, set bits 7:10 to 0
bcb63e25
CL
377 * pmregs.pm_cntrl is a global
378 */
379
18f2190d 380 u32 val = 0;
bcb63e25 381 if (pm_regs.pm_cntrl.enable == 1)
18f2190d
MJ
382 val |= CBE_PM_ENABLE_PERF_MON;
383
bcb63e25 384 if (pm_regs.pm_cntrl.stop_at_max == 1)
18f2190d
MJ
385 val |= CBE_PM_STOP_AT_MAX;
386
9b93418e 387 if (pm_regs.pm_cntrl.trace_mode != 0)
bcb63e25 388 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
18f2190d 389
88382329
CL
390 if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
391 val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
bcb63e25 392 if (pm_regs.pm_cntrl.freeze == 1)
18f2190d
MJ
393 val |= CBE_PM_FREEZE_ALL_CTRS;
394
88382329
CL
395 val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
396
1474855d
BN
397 /*
398 * Routine set_count_mode must be called previously to set
18f2190d
MJ
399 * the count mode based on the user selection of user and kernel.
400 */
bcb63e25 401 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
18f2190d
MJ
402 cbe_write_pm(cpu, pm_control, val);
403}
404
405static inline void
bcb63e25 406set_count_mode(u32 kernel, u32 user)
18f2190d 407{
1474855d
BN
408 /*
409 * The user must specify user and kernel if they want them. If
bcb63e25
CL
410 * neither is specified, OProfile will count in hypervisor mode.
411 * pm_regs.pm_cntrl is a global
18f2190d
MJ
412 */
413 if (kernel) {
414 if (user)
bcb63e25 415 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
18f2190d 416 else
bcb63e25
CL
417 pm_regs.pm_cntrl.count_mode =
418 CBE_COUNT_SUPERVISOR_MODE;
18f2190d
MJ
419 } else {
420 if (user)
bcb63e25 421 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
18f2190d 422 else
bcb63e25
CL
423 pm_regs.pm_cntrl.count_mode =
424 CBE_COUNT_HYPERVISOR_MODE;
18f2190d
MJ
425 }
426}
427
25ad2913 428static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
18f2190d
MJ
429{
430
bcb63e25 431 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
18f2190d
MJ
432 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
433}
434
435/*
436 * Oprofile is expected to collect data on all CPUs simultaneously.
1474855d 437 * However, there is one set of performance counters per node. There are
18f2190d
MJ
438 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
439 * multiplex in time the performance counter collection on the two virtual
440 * CPUs. The multiplexing of the performance counters is done by this
441 * virtual counter routine.
442 *
443 * The pmc_values used below is defined as 'per-cpu' but its use is
444 * more akin to 'per-node'. We need to store two sets of counter
445 * values per node -- one for the previous run and one for the next.
446 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
447 * pair of per-cpu arrays is used for storing the previous and next
448 * pmc values for a given node.
449 * NOTE: We use the per-cpu variable to improve cache performance.
1474855d
BN
450 *
451 * This routine will alternate loading the virtual counters for
452 * virtual CPUs
18f2190d
MJ
453 */
454static void cell_virtual_cntr(unsigned long data)
455{
18f2190d
MJ
456 int i, prev_hdw_thread, next_hdw_thread;
457 u32 cpu;
458 unsigned long flags;
459
1474855d
BN
460 /*
461 * Make sure that the interrupt_hander and the virt counter are
462 * not both playing with the counters on the same node.
18f2190d
MJ
463 */
464
9b93418e 465 spin_lock_irqsave(&cntr_lock, flags);
18f2190d
MJ
466
467 prev_hdw_thread = hdw_thread;
468
469 /* switch the cpu handling the interrupts */
470 hdw_thread = 1 ^ hdw_thread;
471 next_hdw_thread = hdw_thread;
472
a1ef4849
BN
473 pm_regs.group_control = 0;
474 pm_regs.debug_bus_control = 0;
475
476 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
477 input_bus[i] = 0xff;
478
1474855d
BN
479 /*
480 * There are some per thread events. Must do the
bcb63e25
CL
481 * set event, for the thread that is being started
482 */
1474855d 483 for (i = 0; i < num_counters; i++)
bcb63e25
CL
484 set_pm_event(i,
485 pmc_cntrl[next_hdw_thread][i].evnts,
486 pmc_cntrl[next_hdw_thread][i].masks);
487
1474855d
BN
488 /*
489 * The following is done only once per each node, but
18f2190d
MJ
490 * we need cpu #, not node #, to pass to the cbe_xxx functions.
491 */
492 for_each_online_cpu(cpu) {
493 if (cbe_get_hw_thread_id(cpu))
494 continue;
495
1474855d
BN
496 /*
497 * stop counters, save counter values, restore counts
18f2190d
MJ
498 * for previous thread
499 */
500 cbe_disable_pm(cpu);
501 cbe_disable_pm_interrupts(cpu);
502 for (i = 0; i < num_counters; i++) {
503 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
25006644 504 = cbe_read_ctr(cpu, i);
18f2190d
MJ
505
506 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
507 == 0xFFFFFFFF)
508 /* If the cntr value is 0xffffffff, we must
509 * reset that to 0xfffffff0 when the current
1474855d 510 * thread is restarted. This will generate a
bcb63e25
CL
511 * new interrupt and make sure that we never
512 * restore the counters to the max value. If
513 * the counters were restored to the max value,
514 * they do not increment and no interrupts are
515 * generated. Hence no more samples will be
516 * collected on that cpu.
18f2190d
MJ
517 */
518 cbe_write_ctr(cpu, i, 0xFFFFFFF0);
519 else
520 cbe_write_ctr(cpu, i,
521 per_cpu(pmc_values,
522 cpu +
523 next_hdw_thread)[i]);
524 }
525
1474855d
BN
526 /*
527 * Switch to the other thread. Change the interrupt
18f2190d
MJ
528 * and control regs to be scheduled on the CPU
529 * corresponding to the thread to execute.
530 */
531 for (i = 0; i < num_counters; i++) {
532 if (pmc_cntrl[next_hdw_thread][i].enabled) {
1474855d
BN
533 /*
534 * There are some per thread events.
18f2190d
MJ
535 * Must do the set event, enable_cntr
536 * for each cpu.
537 */
18f2190d
MJ
538 enable_ctr(cpu, i,
539 pm_regs.pm07_cntrl);
540 } else {
541 cbe_write_pm07_control(cpu, i, 0);
542 }
543 }
544
545 /* Enable interrupts on the CPU thread that is starting */
546 cbe_enable_pm_interrupts(cpu, next_hdw_thread,
547 virt_cntr_inter_mask);
548 cbe_enable_pm(cpu);
549 }
550
9b93418e 551 spin_unlock_irqrestore(&cntr_lock, flags);
18f2190d
MJ
552
553 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
554}
555
556static void start_virt_cntrs(void)
557{
558 init_timer(&timer_virt_cntr);
559 timer_virt_cntr.function = cell_virtual_cntr;
560 timer_virt_cntr.data = 0UL;
561 timer_virt_cntr.expires = jiffies + HZ / 10;
562 add_timer(&timer_virt_cntr);
563}
564
9b93418e 565static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
1474855d 566 struct op_system_config *sys, int num_ctrs)
18f2190d 567{
9b93418e 568 spu_cycle_reset = ctr[0].count;
1474855d
BN
569
570 /*
9b93418e
CL
571 * Each node will need to make the rtas call to start
572 * and stop SPU profiling. Get the token once and store it.
1474855d 573 */
9b93418e
CL
574 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
575
576 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
1474855d 577 printk(KERN_ERR
9b93418e 578 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
e48b1b45 579 __func__);
1474855d 580 return -EIO;
18f2190d 581 }
9b93418e
CL
582 return 0;
583}
584
88382329
CL
585/* Unfortunately, the hardware will only support event profiling
586 * on one SPU per node at a time. Therefore, we must time slice
587 * the profiling across all SPUs in the node. Note, we do this
588 * in parallel for each node. The following routine is called
589 * periodically based on kernel timer to switch which SPU is
590 * being monitored in a round robbin fashion.
591 */
592static void spu_evnt_swap(unsigned long data)
593{
594 int node;
595 int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
596 unsigned long flags;
597 int cpu;
598 int ret;
599 u32 interrupt_mask;
600
601
602 /* enable interrupts on cntr 0 */
603 interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
604
605 hdw_thread = 0;
606
607 /* Make sure spu event interrupt handler and spu event swap
608 * don't access the counters simultaneously.
609 */
610 spin_lock_irqsave(&cntr_lock, flags);
611
612 cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
613
614 if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
615 spu_evnt_phys_spu_indx = 0;
616
617 pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
618 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
619 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
620
621 /* switch the SPU being profiled on each node */
622 for_each_online_cpu(cpu) {
623 if (cbe_get_hw_thread_id(cpu))
624 continue;
625
626 node = cbe_cpu_to_node(cpu);
627 cur_phys_spu = (node * NUM_SPUS_PER_NODE)
628 + cur_spu_evnt_phys_spu_indx;
629 nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
630 + spu_evnt_phys_spu_indx;
631
632 /*
633 * stop counters, save counter values, restore counts
634 * for previous physical SPU
635 */
636 cbe_disable_pm(cpu);
637 cbe_disable_pm_interrupts(cpu);
638
639 spu_pm_cnt[cur_phys_spu]
25006644 640 = cbe_read_ctr(cpu, 0);
88382329
CL
641
642 /* restore previous count for the next spu to sample */
643 /* NOTE, hardware issue, counter will not start if the
644 * counter value is at max (0xFFFFFFFF).
645 */
646 if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
647 cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
648 else
649 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
650
651 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
652
653 /* setup the debug bus measure the one event and
654 * the two events to route the next SPU's PC on
655 * the debug bus
656 */
657 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
658 if (ret)
25006644
RR
659 printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
660 "SPU event swap\n", __func__);
88382329
CL
661
662 /* clear the trace buffer, don't want to take PC for
663 * previous SPU*/
664 cbe_write_pm(cpu, trace_address, 0);
665
666 enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
667
668 /* Enable interrupts on the CPU thread that is starting */
669 cbe_enable_pm_interrupts(cpu, hdw_thread,
670 interrupt_mask);
671 cbe_enable_pm(cpu);
672 }
673
674 spin_unlock_irqrestore(&cntr_lock, flags);
675
676 /* swap approximately every 0.1 seconds */
677 mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
678}
679
680static void start_spu_event_swap(void)
681{
682 init_timer(&timer_spu_event_swap);
683 timer_spu_event_swap.function = spu_evnt_swap;
684 timer_spu_event_swap.data = 0UL;
685 timer_spu_event_swap.expires = jiffies + HZ / 25;
686 add_timer(&timer_spu_event_swap);
687}
688
689static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
690 struct op_system_config *sys, int num_ctrs)
691{
692 int i;
693
694 /* routine is called once for all nodes */
695
696 spu_evnt_phys_spu_indx = 0;
697 /*
698 * For all events except PPU CYCLEs, each node will need to make
699 * the rtas cbe-perftools call to setup and reset the debug bus.
700 * Make the token lookup call once and store it in the global
701 * variable pm_rtas_token.
702 */
703 pm_rtas_token = rtas_token("ibm,cbe-perftools");
704
705 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
706 printk(KERN_ERR
707 "%s: rtas token ibm,cbe-perftools unknown\n",
708 __func__);
709 return -EIO;
710 }
711
712 /* setup the pm_control register settings,
713 * settings will be written per node by the
714 * cell_cpu_setup() function.
715 */
716 pm_regs.pm_cntrl.trace_buf_ovflw = 1;
717
718 /* Use the occurrence trace mode to have SPU PC saved
719 * to the trace buffer. Occurrence data in trace buffer
720 * is not used. Bit 2 must be set to store SPU addresses.
721 */
722 pm_regs.pm_cntrl.trace_mode = 2;
723
724 pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
725 event 2 & 3 */
726
727 /* setup the debug bus event array with the SPU PC routing events.
728 * Note, pm_signal[0] will be filled in by set_pm_event() call below.
729 */
730 pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
731 pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
732 pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
733 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
734
735 pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
736 pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
737 pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
738 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
739
740 /* Set the user selected spu event to profile on,
741 * note, only one SPU profiling event is supported
742 */
743 num_counters = 1; /* Only support one SPU event at a time */
744 set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
745
746 reset_value[0] = 0xFFFFFFFF - ctr[0].count;
747
748 /* global, used by cell_cpu_setup */
749 ctr_enabled |= 1;
750
751 /* Initialize the count for each SPU to the reset value */
752 for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
753 spu_pm_cnt[i] = reset_value[0];
754
755 return 0;
756}
757
9b93418e
CL
758static int cell_reg_setup_ppu(struct op_counter_config *ctr,
759 struct op_system_config *sys, int num_ctrs)
760{
88382329 761 /* routine is called once for all nodes */
9b93418e 762 int i, j, cpu;
18f2190d
MJ
763
764 num_counters = num_ctrs;
765
210434d7
CL
766 if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
767 printk(KERN_ERR
768 "%s: Oprofile, number of specified events " \
769 "exceeds number of physical counters\n",
770 __func__);
771 return -EIO;
772 }
18f2190d 773
bcb63e25 774 set_count_mode(sys->enable_kernel, sys->enable_user);
18f2190d
MJ
775
776 /* Setup the thread 0 events */
777 for (i = 0; i < num_ctrs; ++i) {
778
779 pmc_cntrl[0][i].evnts = ctr[i].event;
780 pmc_cntrl[0][i].masks = ctr[i].unit_mask;
781 pmc_cntrl[0][i].enabled = ctr[i].enabled;
782 pmc_cntrl[0][i].vcntr = i;
783
784 for_each_possible_cpu(j)
785 per_cpu(pmc_values, j)[i] = 0;
786 }
787
1474855d
BN
788 /*
789 * Setup the thread 1 events, map the thread 0 event to the
18f2190d
MJ
790 * equivalent thread 1 event.
791 */
792 for (i = 0; i < num_ctrs; ++i) {
793 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
794 pmc_cntrl[1][i].evnts = ctr[i].event + 19;
795 else if (ctr[i].event == 2203)
796 pmc_cntrl[1][i].evnts = ctr[i].event;
797 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
798 pmc_cntrl[1][i].evnts = ctr[i].event + 16;
799 else
800 pmc_cntrl[1][i].evnts = ctr[i].event;
801
802 pmc_cntrl[1][i].masks = ctr[i].unit_mask;
803 pmc_cntrl[1][i].enabled = ctr[i].enabled;
804 pmc_cntrl[1][i].vcntr = i;
805 }
806
bcb63e25 807 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
18f2190d
MJ
808 input_bus[i] = 0xff;
809
1474855d
BN
810 /*
811 * Our counters count up, and "count" refers to
18f2190d 812 * how much before the next interrupt, and we interrupt
1474855d 813 * on overflow. So we calculate the starting value
18f2190d
MJ
814 * which will give us "count" until overflow.
815 * Then we set the events on the enabled counters.
816 */
817 for (i = 0; i < num_counters; ++i) {
818 /* start with virtual counter set 0 */
819 if (pmc_cntrl[0][i].enabled) {
820 /* Using 32bit counters, reset max - count */
821 reset_value[i] = 0xFFFFFFFF - ctr[i].count;
822 set_pm_event(i,
823 pmc_cntrl[0][i].evnts,
824 pmc_cntrl[0][i].masks);
825
826 /* global, used by cell_cpu_setup */
827 ctr_enabled |= (1 << i);
828 }
829 }
830
831 /* initialize the previous counts for the virtual cntrs */
832 for_each_online_cpu(cpu)
833 for (i = 0; i < num_counters; ++i) {
834 per_cpu(pmc_values, cpu)[i] = reset_value[i];
835 }
1474855d
BN
836
837 return 0;
18f2190d
MJ
838}
839
1474855d 840
9b93418e
CL
841/* This function is called once for all cpus combined */
842static int cell_reg_setup(struct op_counter_config *ctr,
843 struct op_system_config *sys, int num_ctrs)
844{
88382329 845 int ret=0;
9b93418e
CL
846 spu_cycle_reset = 0;
847
88382329
CL
848 /* initialize the spu_arr_trace value, will be reset if
849 * doing spu event profiling.
850 */
851 pm_regs.group_control = 0;
852 pm_regs.debug_bus_control = 0;
853 pm_regs.pm_cntrl.stop_at_max = 1;
854 pm_regs.pm_cntrl.trace_mode = 0;
855 pm_regs.pm_cntrl.freeze = 1;
856 pm_regs.pm_cntrl.trace_buf_ovflw = 0;
857 pm_regs.pm_cntrl.spu_addr_trace = 0;
858
9b93418e
CL
859 /*
860 * For all events except PPU CYCLEs, each node will need to make
861 * the rtas cbe-perftools call to setup and reset the debug bus.
862 * Make the token lookup call once and store it in the global
863 * variable pm_rtas_token.
864 */
865 pm_rtas_token = rtas_token("ibm,cbe-perftools");
866
867 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
868 printk(KERN_ERR
869 "%s: rtas token ibm,cbe-perftools unknown\n",
870 __func__);
871 return -EIO;
872 }
873
874 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
875 profiling_mode = SPU_PROFILING_CYCLES;
876 ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
88382329
CL
877 } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
878 (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
879 profiling_mode = SPU_PROFILING_EVENTS;
880 spu_cycle_reset = ctr[0].count;
881
882 /* for SPU event profiling, need to setup the
883 * pm_signal array with the events to route the
884 * SPU PC before making the FW call. Note, only
885 * one SPU event for profiling can be specified
886 * at a time.
887 */
888 cell_reg_setup_spu_events(ctr, sys, num_ctrs);
9b93418e
CL
889 } else {
890 profiling_mode = PPU_PROFILING;
891 ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
892 }
893
894 return ret;
895}
896
897
1474855d 898
18f2190d 899/* This function is called once for each cpu */
1474855d 900static int cell_cpu_setup(struct op_counter_config *cntr)
18f2190d
MJ
901{
902 u32 cpu = smp_processor_id();
903 u32 num_enabled = 0;
904 int i;
88382329 905 int ret;
18f2190d 906
9b93418e
CL
907 /* Cycle based SPU profiling does not use the performance
908 * counters. The trace array is configured to collect
909 * the data.
910 */
911 if (profiling_mode == SPU_PROFILING_CYCLES)
1474855d
BN
912 return 0;
913
18f2190d
MJ
914 /* There is one performance monitor per processor chip (i.e. node),
915 * so we only need to perform this function once per node.
916 */
917 if (cbe_get_hw_thread_id(cpu))
1474855d 918 return 0;
18f2190d
MJ
919
920 /* Stop all counters */
921 cbe_disable_pm(cpu);
922 cbe_disable_pm_interrupts(cpu);
923
18f2190d
MJ
924 cbe_write_pm(cpu, pm_start_stop, 0);
925 cbe_write_pm(cpu, group_control, pm_regs.group_control);
926 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
bcb63e25 927 write_pm_cntrl(cpu);
18f2190d
MJ
928
929 for (i = 0; i < num_counters; ++i) {
930 if (ctr_enabled & (1 << i)) {
931 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
932 num_enabled++;
933 }
934 }
935
1474855d
BN
936 /*
937 * The pm_rtas_activate_signals will return -EIO if the FW
938 * call failed.
939 */
88382329
CL
940 if (profiling_mode == SPU_PROFILING_EVENTS) {
941 /* For SPU event profiling also need to setup the
942 * pm interval timer
943 */
944 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
945 num_enabled+2);
946 /* store PC from debug bus to Trace buffer as often
947 * as possible (every 10 cycles)
948 */
949 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
950 return ret;
951 } else
952 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
953 num_enabled);
1474855d
BN
954}
955
956#define ENTRIES 303
957#define MAXLFSR 0xFFFFFF
958
959/* precomputed table of 24 bit LFSR values */
960static int initial_lfsr[] = {
961 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
962 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
963 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
964 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
965 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
966 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
967 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
968 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
969 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
970 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
971 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
972 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
973 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
974 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
975 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
976 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
977 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
978 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
979 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
980 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
981 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
982 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
983 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
984 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
985 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
986 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
987 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
988 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
989 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
990 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
991 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
992 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
993 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
994 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
995 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
996 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
997 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
998 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
999};
1000
1001/*
1002 * The hardware uses an LFSR counting sequence to determine when to capture
1003 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
1004 * where each number occurs once in the sequence but the sequence is not in
1005 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
1006 * the last value in the sequence. Hence the user specified value N
1007 * corresponds to the LFSR number that is N from the end of the sequence.
1008 *
1009 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
1010 * LFSR sequence is broken into four ranges. The spacing of the precomputed
1011 * values is adjusted in each range so the error between the user specifed
1012 * number (N) of events between samples and the actual number of events based
1013 * on the precomputed value will be les then about 6.2%. Note, if the user
1014 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
1015 * This is to prevent the loss of samples because the trace buffer is full.
1016 *
1017 * User specified N Step between Index in
1018 * precomputed values precomputed
1019 * table
1020 * 0 to 2^16-1 ---- 0
1021 * 2^16 to 2^16+2^19-1 2^12 1 to 128
1022 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
1023 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
1024 *
1025 *
1026 * For example, the LFSR values in the second range are computed for 2^16,
1027 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
1028 * 1, 2,..., 127, 128.
1029 *
1030 * The 24 bit LFSR value for the nth number in the sequence can be
1031 * calculated using the following code:
1032 *
1033 * #define size 24
1034 * int calculate_lfsr(int n)
1035 * {
1036 * int i;
1037 * unsigned int newlfsr0;
1038 * unsigned int lfsr = 0xFFFFFF;
1039 * unsigned int howmany = n;
1040 *
1041 * for (i = 2; i < howmany + 2; i++) {
1042 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
1043 * ((lfsr >> (size - 1 - 1)) & 1) ^
1044 * (((lfsr >> (size - 1 - 6)) & 1) ^
1045 * ((lfsr >> (size - 1 - 23)) & 1)));
1046 *
1047 * lfsr >>= 1;
1048 * lfsr = lfsr | (newlfsr0 << (size - 1));
1049 * }
1050 * return lfsr;
1051 * }
1052 */
1053
1054#define V2_16 (0x1 << 16)
1055#define V2_19 (0x1 << 19)
1056#define V2_22 (0x1 << 22)
1057
1058static int calculate_lfsr(int n)
1059{
1060 /*
1061 * The ranges and steps are in powers of 2 so the calculations
1062 * can be done using shifts rather then divide.
1063 */
1064 int index;
1065
1066 if ((n >> 16) == 0)
1067 index = 0;
1068 else if (((n - V2_16) >> 19) == 0)
1069 index = ((n - V2_16) >> 12) + 1;
1070 else if (((n - V2_16 - V2_19) >> 22) == 0)
1071 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
1072 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
1073 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
1074 else
1075 index = ENTRIES-1;
1076
1077 /* make sure index is valid */
238c1a78 1078 if ((index >= ENTRIES) || (index < 0))
1474855d
BN
1079 index = ENTRIES-1;
1080
1081 return initial_lfsr[index];
1082}
1083
1084static int pm_rtas_activate_spu_profiling(u32 node)
1085{
1086 int ret, i;
210434d7 1087 struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
1474855d
BN
1088
1089 /*
1090 * Set up the rtas call to configure the debug bus to
1091 * route the SPU PCs. Setup the pm_signal for each SPU
1092 */
210434d7 1093 for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
1474855d
BN
1094 pm_signal_local[i].cpu = node;
1095 pm_signal_local[i].signal_group = 41;
1096 /* spu i on word (i/2) */
1097 pm_signal_local[i].bus_word = 1 << i / 2;
1098 /* spu i */
1099 pm_signal_local[i].sub_unit = i;
1100 pm_signal_local[i].bit = 63;
1101 }
1102
1103 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
1104 PASSTHRU_ENABLE, pm_signal_local,
210434d7 1105 (ARRAY_SIZE(pm_signal_local)
1474855d
BN
1106 * sizeof(struct pm_signal)));
1107
1108 if (unlikely(ret)) {
1109 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 1110 __func__, ret);
1474855d
BN
1111 return -EIO;
1112 }
1113
1114 return 0;
1115}
1116
1117#ifdef CONFIG_CPU_FREQ
1118static int
1119oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
1120{
1121 int ret = 0;
1122 struct cpufreq_freqs *frq = data;
1123 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
1124 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
1125 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
1126 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
1127 return ret;
1128}
1129
1130static struct notifier_block cpu_freq_notifier_block = {
1131 .notifier_call = oprof_cpufreq_notify
1132};
1133#endif
1134
9b93418e
CL
1135/*
1136 * Note the generic OProfile stop calls do not support returning
1137 * an error on stop. Hence, will not return an error if the FW
1138 * calls fail on stop. Failure to reset the debug bus is not an issue.
1139 * Failure to disable the SPU profiling is not an issue. The FW calls
1140 * to enable the performance counters and debug bus will work even if
1141 * the hardware was not cleanly reset.
1142 */
1143static void cell_global_stop_spu_cycles(void)
1144{
1145 int subfunc, rtn_value;
1146 unsigned int lfsr_value;
1147 int cpu;
1148
1149 oprofile_running = 0;
88382329 1150 smp_wmb();
9b93418e
CL
1151
1152#ifdef CONFIG_CPU_FREQ
1153 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1154 CPUFREQ_TRANSITION_NOTIFIER);
1155#endif
1156
1157 for_each_online_cpu(cpu) {
1158 if (cbe_get_hw_thread_id(cpu))
1159 continue;
1160
1161 subfunc = 3; /*
1162 * 2 - activate SPU tracing,
1163 * 3 - deactivate
1164 */
1165 lfsr_value = 0x8f100000;
1166
1167 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1168 subfunc, cbe_cpu_to_node(cpu),
1169 lfsr_value);
1170
1171 if (unlikely(rtn_value != 0)) {
1172 printk(KERN_ERR
1173 "%s: rtas call ibm,cbe-spu-perftools " \
1174 "failed, return = %d\n",
1175 __func__, rtn_value);
1176 }
1177
1178 /* Deactivate the signals */
1179 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1180 }
1181
88382329
CL
1182 stop_spu_profiling_cycles();
1183}
1184
1185static void cell_global_stop_spu_events(void)
1186{
1187 int cpu;
1188 oprofile_running = 0;
1189
1190 stop_spu_profiling_events();
1191 smp_wmb();
1192
1193 for_each_online_cpu(cpu) {
1194 if (cbe_get_hw_thread_id(cpu))
1195 continue;
1196
1197 cbe_sync_irq(cbe_cpu_to_node(cpu));
1198 /* Stop the counters */
1199 cbe_disable_pm(cpu);
1200 cbe_write_pm07_control(cpu, 0, 0);
1201
1202 /* Deactivate the signals */
1203 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1204
1205 /* Deactivate interrupts */
1206 cbe_disable_pm_interrupts(cpu);
1207 }
1208 del_timer_sync(&timer_spu_event_swap);
9b93418e
CL
1209}
1210
1211static void cell_global_stop_ppu(void)
1212{
1213 int cpu;
1214
1215 /*
1216 * This routine will be called once for the system.
1217 * There is one performance monitor per node, so we
1218 * only need to perform this function once per node.
1219 */
1220 del_timer_sync(&timer_virt_cntr);
1221 oprofile_running = 0;
1222 smp_wmb();
1223
1224 for_each_online_cpu(cpu) {
1225 if (cbe_get_hw_thread_id(cpu))
1226 continue;
1227
1228 cbe_sync_irq(cbe_cpu_to_node(cpu));
1229 /* Stop the counters */
1230 cbe_disable_pm(cpu);
1231
1232 /* Deactivate the signals */
1233 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1234
1235 /* Deactivate interrupts */
1236 cbe_disable_pm_interrupts(cpu);
1237 }
1238}
1239
1240static void cell_global_stop(void)
1241{
1242 if (profiling_mode == PPU_PROFILING)
1243 cell_global_stop_ppu();
88382329
CL
1244 else if (profiling_mode == SPU_PROFILING_EVENTS)
1245 cell_global_stop_spu_events();
9b93418e
CL
1246 else
1247 cell_global_stop_spu_cycles();
1248}
1249
1250static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
1474855d
BN
1251{
1252 int subfunc;
1253 unsigned int lfsr_value;
1254 int cpu;
1255 int ret;
1256 int rtas_error;
1257 unsigned int cpu_khzfreq = 0;
1258
1259 /* The SPU profiling uses time-based profiling based on
1260 * cpu frequency, so if configured with the CPU_FREQ
1261 * option, we should detect frequency changes and react
1262 * accordingly.
1263 */
1264#ifdef CONFIG_CPU_FREQ
1265 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
1266 CPUFREQ_TRANSITION_NOTIFIER);
1267 if (ret < 0)
1268 /* this is not a fatal error */
1269 printk(KERN_ERR "CPU freq change registration failed: %d\n",
1270 ret);
1271
1272 else
1273 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
1274#endif
1275
1276 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
1277
1278 for_each_online_cpu(cpu) {
1279 if (cbe_get_hw_thread_id(cpu))
1280 continue;
1281
1282 /*
1283 * Setup SPU cycle-based profiling.
1284 * Set perf_mon_control bit 0 to a zero before
1285 * enabling spu collection hardware.
1286 */
1287 cbe_write_pm(cpu, pm_control, 0);
1288
1289 if (spu_cycle_reset > MAX_SPU_COUNT)
1290 /* use largest possible value */
1291 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
1292 else
1293 lfsr_value = calculate_lfsr(spu_cycle_reset);
1294
1295 /* must use a non zero value. Zero disables data collection. */
1296 if (lfsr_value == 0)
1297 lfsr_value = calculate_lfsr(1);
1298
1299 lfsr_value = lfsr_value << 8; /* shift lfsr to correct
1300 * register location
1301 */
1302
1303 /* debug bus setup */
1304 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
1305
1306 if (unlikely(ret)) {
1307 rtas_error = ret;
1308 goto out;
1309 }
1310
1311
1312 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
1313
1314 /* start profiling */
1315 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
25006644 1316 cbe_cpu_to_node(cpu), lfsr_value);
1474855d
BN
1317
1318 if (unlikely(ret != 0)) {
1319 printk(KERN_ERR
9b93418e
CL
1320 "%s: rtas call ibm,cbe-spu-perftools failed, " \
1321 "return = %d\n", __func__, ret);
1474855d
BN
1322 rtas_error = -EIO;
1323 goto out;
1324 }
1325 }
1326
9b93418e 1327 rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
1474855d
BN
1328 if (rtas_error)
1329 goto out_stop;
1330
1331 oprofile_running = 1;
1332 return 0;
1333
1334out_stop:
9b93418e 1335 cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
18f2190d 1336out:
1474855d 1337 return rtas_error;
18f2190d
MJ
1338}
1339
88382329
CL
1340static int cell_global_start_spu_events(struct op_counter_config *ctr)
1341{
1342 int cpu;
1343 u32 interrupt_mask = 0;
1344 int rtn = 0;
1345
1346 hdw_thread = 0;
1347
1348 /* spu event profiling, uses the performance counters to generate
1349 * an interrupt. The hardware is setup to store the SPU program
1350 * counter into the trace array. The occurrence mode is used to
1351 * enable storing data to the trace buffer. The bits are set
1352 * to send/store the SPU address in the trace buffer. The debug
1353 * bus must be setup to route the SPU program counter onto the
1354 * debug bus. The occurrence data in the trace buffer is not used.
1355 */
1356
1357 /* This routine gets called once for the system.
1358 * There is one performance monitor per node, so we
1359 * only need to perform this function once per node.
1360 */
1361
1362 for_each_online_cpu(cpu) {
1363 if (cbe_get_hw_thread_id(cpu))
1364 continue;
1365
1366 /*
1367 * Setup SPU event-based profiling.
1368 * Set perf_mon_control bit 0 to a zero before
1369 * enabling spu collection hardware.
1370 *
1371 * Only support one SPU event on one SPU per node.
1372 */
1373 if (ctr_enabled & 1) {
1374 cbe_write_ctr(cpu, 0, reset_value[0]);
1375 enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
1376 interrupt_mask |=
1377 CBE_PM_CTR_OVERFLOW_INTR(0);
1378 } else {
1379 /* Disable counter */
1380 cbe_write_pm07_control(cpu, 0, 0);
1381 }
1382
1383 cbe_get_and_clear_pm_interrupts(cpu);
1384 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1385 cbe_enable_pm(cpu);
1386
1387 /* clear the trace buffer */
1388 cbe_write_pm(cpu, trace_address, 0);
1389 }
1390
1391 /* Start the timer to time slice collecting the event profile
1392 * on each of the SPUs. Note, can collect profile on one SPU
1393 * per node at a time.
1394 */
1395 start_spu_event_swap();
1396 start_spu_profiling_events();
25006644 1397 oprofile_running = 1;
88382329
CL
1398 smp_wmb();
1399
1400 return rtn;
1401}
1402
1474855d 1403static int cell_global_start_ppu(struct op_counter_config *ctr)
18f2190d 1404{
1474855d 1405 u32 cpu, i;
18f2190d 1406 u32 interrupt_mask = 0;
18f2190d
MJ
1407
1408 /* This routine gets called once for the system.
1409 * There is one performance monitor per node, so we
1410 * only need to perform this function once per node.
1411 */
1412 for_each_online_cpu(cpu) {
1413 if (cbe_get_hw_thread_id(cpu))
1414 continue;
1415
1416 interrupt_mask = 0;
1417
1418 for (i = 0; i < num_counters; ++i) {
1419 if (ctr_enabled & (1 << i)) {
1420 cbe_write_ctr(cpu, i, reset_value[i]);
1421 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
25006644 1422 interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
18f2190d
MJ
1423 } else {
1424 /* Disable counter */
1425 cbe_write_pm07_control(cpu, i, 0);
1426 }
1427 }
1428
bcb63e25 1429 cbe_get_and_clear_pm_interrupts(cpu);
18f2190d
MJ
1430 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1431 cbe_enable_pm(cpu);
1432 }
1433
1434 virt_cntr_inter_mask = interrupt_mask;
1435 oprofile_running = 1;
1436 smp_wmb();
1437
1474855d
BN
1438 /*
1439 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1440 * executed which manipulates the PMU. We start the "virtual counter"
18f2190d
MJ
1441 * here so that we do not need to synchronize access to the PMU in
1442 * the above for-loop.
1443 */
1444 start_virt_cntrs();
1474855d
BN
1445
1446 return 0;
18f2190d
MJ
1447}
1448
1474855d
BN
1449static int cell_global_start(struct op_counter_config *ctr)
1450{
9b93418e
CL
1451 if (profiling_mode == SPU_PROFILING_CYCLES)
1452 return cell_global_start_spu_cycles(ctr);
88382329
CL
1453 else if (profiling_mode == SPU_PROFILING_EVENTS)
1454 return cell_global_start_spu_events(ctr);
1474855d
BN
1455 else
1456 return cell_global_start_ppu(ctr);
1457}
1458
1474855d 1459
88382329
CL
1460/* The SPU interrupt handler
1461 *
1462 * SPU event profiling works as follows:
1463 * The pm_signal[0] holds the one SPU event to be measured. It is routed on
1464 * the debug bus using word 0 or 1. The value of pm_signal[1] and
1465 * pm_signal[2] contain the necessary events to route the SPU program
1466 * counter for the selected SPU onto the debug bus using words 2 and 3.
1467 * The pm_interval register is setup to write the SPU PC value into the
1468 * trace buffer at the maximum rate possible. The trace buffer is configured
1469 * to store the PCs, wrapping when it is full. The performance counter is
b595076a 1470 * initialized to the max hardware count minus the number of events, N, between
25985edc 1471 * samples. Once the N events have occurred, a HW counter overflow occurs
88382329
CL
1472 * causing the generation of a HW counter interrupt which also stops the
1473 * writing of the SPU PC values to the trace buffer. Hence the last PC
1474 * written to the trace buffer is the SPU PC that we want. Unfortunately,
1475 * we have to read from the beginning of the trace buffer to get to the
1476 * last value written. We just hope the PPU has nothing better to do then
1477 * service this interrupt. The PC for the specific SPU being profiled is
1478 * extracted from the trace buffer processed and stored. The trace buffer
1479 * is cleared, interrupts are cleared, the counter is reset to max - N.
1480 * A kernel timer is used to periodically call the routine spu_evnt_swap()
1481 * to switch to the next physical SPU in the node to profile in round robbin
1482 * order. This way data is collected for all SPUs on the node. It does mean
1483 * that we need to use a relatively small value of N to ensure enough samples
1484 * on each SPU are collected each SPU is being profiled 1/8 of the time.
1485 * It may also be necessary to use a longer sample collection period.
1486 */
1487static void cell_handle_interrupt_spu(struct pt_regs *regs,
1488 struct op_counter_config *ctr)
1489{
1490 u32 cpu, cpu_tmp;
1491 u64 trace_entry;
1492 u32 interrupt_mask;
1493 u64 trace_buffer[2];
1494 u64 last_trace_buffer;
1495 u32 sample;
1496 u32 trace_addr;
1497 unsigned long sample_array_lock_flags;
1498 int spu_num;
1499 unsigned long flags;
1500
1501 /* Make sure spu event interrupt handler and spu event swap
1502 * don't access the counters simultaneously.
1503 */
1504 cpu = smp_processor_id();
1505 spin_lock_irqsave(&cntr_lock, flags);
1506
1507 cpu_tmp = cpu;
1508 cbe_disable_pm(cpu);
1509
1510 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1511
1512 sample = 0xABCDEF;
1513 trace_entry = 0xfedcba;
1514 last_trace_buffer = 0xdeadbeaf;
1515
25006644 1516 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
88382329
CL
1517 /* disable writes to trace buff */
1518 cbe_write_pm(cpu, pm_interval, 0);
1519
1520 /* only have one perf cntr being used, cntr 0 */
1521 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
25006644 1522 && ctr[0].enabled)
88382329
CL
1523 /* The SPU PC values will be read
1524 * from the trace buffer, reset counter
1525 */
1526
1527 cbe_write_ctr(cpu, 0, reset_value[0]);
1528
1529 trace_addr = cbe_read_pm(cpu, trace_address);
1530
1531 while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
1532 /* There is data in the trace buffer to process
1533 * Read the buffer until you get to the last
1534 * entry. This is the value we want.
1535 */
1536
1537 cbe_read_trace_buffer(cpu, trace_buffer);
1538 trace_addr = cbe_read_pm(cpu, trace_address);
1539 }
1540
1541 /* SPU Address 16 bit count format for 128 bit
1542 * HW trace buffer is used for the SPU PC storage
1543 * HDR bits 0:15
1544 * SPU Addr 0 bits 16:31
1545 * SPU Addr 1 bits 32:47
1546 * unused bits 48:127
1547 *
1548 * HDR: bit4 = 1 SPU Address 0 valid
1549 * HDR: bit5 = 1 SPU Address 1 valid
1550 * - unfortunately, the valid bits don't seem to work
1551 *
1552 * Note trace_buffer[0] holds bits 0:63 of the HW
1553 * trace buffer, trace_buffer[1] holds bits 64:127
1554 */
1555
1556 trace_entry = trace_buffer[0]
1557 & 0x00000000FFFF0000;
1558
1559 /* only top 16 of the 18 bit SPU PC address
1560 * is stored in trace buffer, hence shift right
1561 * by 16 -2 bits */
1562 sample = trace_entry >> 14;
1563 last_trace_buffer = trace_buffer[0];
1564
1565 spu_num = spu_evnt_phys_spu_indx
1566 + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
1567
1568 /* make sure only one process at a time is calling
1569 * spu_sync_buffer()
1570 */
1571 spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
1572 sample_array_lock_flags);
1573 spu_sync_buffer(spu_num, &sample, 1);
1574 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
1575 sample_array_lock_flags);
1576
1577 smp_wmb(); /* insure spu event buffer updates are written
1578 * don't want events intermingled... */
1579
1580 /* The counters were frozen by the interrupt.
1581 * Reenable the interrupt and restart the counters.
1582 */
1583 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1584 cbe_enable_pm_interrupts(cpu, hdw_thread,
1585 virt_cntr_inter_mask);
1586
1587 /* clear the trace buffer, re-enable writes to trace buff */
1588 cbe_write_pm(cpu, trace_address, 0);
1589 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1590
1591 /* The writes to the various performance counters only writes
1592 * to a latch. The new values (interrupt setting bits, reset
1593 * counter value etc.) are not copied to the actual registers
1594 * until the performance monitor is enabled. In order to get
af901ca1 1595 * this to work as desired, the performance monitor needs to
88382329
CL
1596 * be disabled while writing to the latches. This is a
1597 * HW design issue.
1598 */
1599 write_pm_cntrl(cpu);
1600 cbe_enable_pm(cpu);
1601 }
1602 spin_unlock_irqrestore(&cntr_lock, flags);
1603}
1604
9b93418e
CL
1605static void cell_handle_interrupt_ppu(struct pt_regs *regs,
1606 struct op_counter_config *ctr)
18f2190d
MJ
1607{
1608 u32 cpu;
1609 u64 pc;
1610 int is_kernel;
1611 unsigned long flags = 0;
1612 u32 interrupt_mask;
1613 int i;
1614
1615 cpu = smp_processor_id();
1616
1474855d
BN
1617 /*
1618 * Need to make sure the interrupt handler and the virt counter
18f2190d
MJ
1619 * routine are not running at the same time. See the
1620 * cell_virtual_cntr() routine for additional comments.
1621 */
9b93418e 1622 spin_lock_irqsave(&cntr_lock, flags);
18f2190d 1623
1474855d
BN
1624 /*
1625 * Need to disable and reenable the performance counters
18f2190d
MJ
1626 * to get the desired behavior from the hardware. This
1627 * is hardware specific.
1628 */
1629
1630 cbe_disable_pm(cpu);
1631
bcb63e25 1632 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
18f2190d 1633
1474855d
BN
1634 /*
1635 * If the interrupt mask has been cleared, then the virt cntr
18f2190d
MJ
1636 * has cleared the interrupt. When the thread that generated
1637 * the interrupt is restored, the data count will be restored to
1638 * 0xffffff0 to cause the interrupt to be regenerated.
1639 */
1640
1641 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1642 pc = regs->nip;
1643 is_kernel = is_kernel_addr(pc);
1644
1645 for (i = 0; i < num_counters; ++i) {
1646 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1647 && ctr[i].enabled) {
101fd46a 1648 oprofile_add_ext_sample(pc, regs, i, is_kernel);
18f2190d
MJ
1649 cbe_write_ctr(cpu, i, reset_value[i]);
1650 }
1651 }
1652
1474855d
BN
1653 /*
1654 * The counters were frozen by the interrupt.
18f2190d
MJ
1655 * Reenable the interrupt and restart the counters.
1656 * If there was a race between the interrupt handler and
25985edc 1657 * the virtual counter routine. The virtual counter
18f2190d
MJ
1658 * routine may have cleared the interrupts. Hence must
1659 * use the virt_cntr_inter_mask to re-enable the interrupts.
1660 */
1661 cbe_enable_pm_interrupts(cpu, hdw_thread,
1662 virt_cntr_inter_mask);
1663
1474855d
BN
1664 /*
1665 * The writes to the various performance counters only writes
1666 * to a latch. The new values (interrupt setting bits, reset
18f2190d
MJ
1667 * counter value etc.) are not copied to the actual registers
1668 * until the performance monitor is enabled. In order to get
af901ca1 1669 * this to work as desired, the performance monitor needs to
beb7dd86 1670 * be disabled while writing to the latches. This is a
18f2190d
MJ
1671 * HW design issue.
1672 */
1673 cbe_enable_pm(cpu);
1674 }
9b93418e
CL
1675 spin_unlock_irqrestore(&cntr_lock, flags);
1676}
1677
1678static void cell_handle_interrupt(struct pt_regs *regs,
1679 struct op_counter_config *ctr)
1680{
1681 if (profiling_mode == PPU_PROFILING)
1682 cell_handle_interrupt_ppu(regs, ctr);
88382329
CL
1683 else
1684 cell_handle_interrupt_spu(regs, ctr);
18f2190d
MJ
1685}
1686
1474855d
BN
1687/*
1688 * This function is called from the generic OProfile
1689 * driver. When profiling PPUs, we need to do the
1690 * generic sync start; otherwise, do spu_sync_start.
1691 */
1692static int cell_sync_start(void)
1693{
9b93418e
CL
1694 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1695 (profiling_mode == SPU_PROFILING_EVENTS))
1474855d
BN
1696 return spu_sync_start();
1697 else
1698 return DO_GENERIC_SYNC;
1699}
1700
1701static int cell_sync_stop(void)
1702{
9b93418e
CL
1703 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1704 (profiling_mode == SPU_PROFILING_EVENTS))
1474855d
BN
1705 return spu_sync_stop();
1706 else
1707 return 1;
1708}
1709
18f2190d
MJ
1710struct op_powerpc_model op_model_cell = {
1711 .reg_setup = cell_reg_setup,
1712 .cpu_setup = cell_cpu_setup,
1713 .global_start = cell_global_start,
1714 .global_stop = cell_global_stop,
1474855d
BN
1715 .sync_start = cell_sync_start,
1716 .sync_stop = cell_sync_stop,
18f2190d
MJ
1717 .handle_interrupt = cell_handle_interrupt,
1718};