]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/powerpc/oprofile/op_model_cell.c
powerpc/oprofile: fix cell/pr_util.h
[mirror_ubuntu-eoan-kernel.git] / arch / powerpc / oprofile / op_model_cell.c
CommitLineData
18f2190d
MJ
1/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: David Erb (djerb@us.ibm.com)
7 * Modifications:
1474855d
BN
8 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com>
18f2190d
MJ
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/cpufreq.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/jiffies.h>
21#include <linux/kthread.h>
22#include <linux/oprofile.h>
23#include <linux/percpu.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/timer.h>
27#include <asm/cell-pmu.h>
28#include <asm/cputable.h>
29#include <asm/firmware.h>
30#include <asm/io.h>
31#include <asm/oprofile_impl.h>
32#include <asm/processor.h>
33#include <asm/prom.h>
34#include <asm/ptrace.h>
35#include <asm/reg.h>
36#include <asm/rtas.h>
37#include <asm/system.h>
eef686a0 38#include <asm/cell-regs.h>
18f2190d
MJ
39
40#include "../platforms/cell/interrupt.h"
1474855d
BN
41#include "cell/pr_util.h"
42
9b93418e
CL
43#define PPU_PROFILING 0
44#define SPU_PROFILING_CYCLES 1
45#define SPU_PROFILING_EVENTS 2
1474855d
BN
46
47#define NUM_SPUS_PER_NODE 8
48#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
18f2190d
MJ
49
50#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
1474855d
BN
51#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
52 * PPU_CYCLES event
53 */
54#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
18f2190d 55
bcb63e25
CL
56#define NUM_THREADS 2 /* number of physical threads in
57 * physical processor
58 */
a1ef4849 59#define NUM_DEBUG_BUS_WORDS 4
bcb63e25
CL
60#define NUM_INPUT_BUS_WORDS 2
61
1474855d 62#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
18f2190d 63
9b93418e
CL
64/*
65 * spu_cycle_reset is the number of cycles between samples.
66 * This variable is used for SPU profiling and should ONLY be set
67 * at the beginning of cell_reg_setup; otherwise, it's read-only.
68 */
69static unsigned int spu_cycle_reset;
70static unsigned int profiling_mode;
71
18f2190d
MJ
72struct pmc_cntrl_data {
73 unsigned long vcntr;
74 unsigned long evnts;
75 unsigned long masks;
76 unsigned long enabled;
77};
78
79/*
80 * ibm,cbe-perftools rtas parameters
81 */
18f2190d
MJ
82struct pm_signal {
83 u16 cpu; /* Processor to modify */
1474855d
BN
84 u16 sub_unit; /* hw subunit this applies to (if applicable)*/
85 short int signal_group; /* Signal Group to Enable/Disable */
18f2190d
MJ
86 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
87 * Bus Word(s) (bitmask)
88 */
89 u8 bit; /* Trigger/Event bit (if applicable) */
90};
91
92/*
93 * rtas call arguments
94 */
95enum {
96 SUBFUNC_RESET = 1,
97 SUBFUNC_ACTIVATE = 2,
98 SUBFUNC_DEACTIVATE = 3,
99
100 PASSTHRU_IGNORE = 0,
101 PASSTHRU_ENABLE = 1,
102 PASSTHRU_DISABLE = 2,
103};
104
105struct pm_cntrl {
106 u16 enable;
107 u16 stop_at_max;
108 u16 trace_mode;
109 u16 freeze;
110 u16 count_mode;
111};
112
113static struct {
114 u32 group_control;
115 u32 debug_bus_control;
116 struct pm_cntrl pm_cntrl;
117 u32 pm07_cntrl[NR_PHYS_CTRS];
118} pm_regs;
119
18f2190d
MJ
120#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
121#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
122#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
123#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
124#define GET_COUNT_CYCLES(x) (x & 0x00000001)
125#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
126
18f2190d 127static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
18f2190d
MJ
128static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
129
1474855d
BN
130/*
131 * The CELL profiling code makes rtas calls to setup the debug bus to
132 * route the performance signals. Additionally, SPU profiling requires
133 * a second rtas call to setup the hardware to capture the SPU PCs.
134 * The EIO error value is returned if the token lookups or the rtas
135 * call fail. The EIO error number is the best choice of the existing
136 * error numbers. The probability of rtas related error is very low. But
137 * by returning EIO and printing additional information to dmsg the user
138 * will know that OProfile did not start and dmesg will tell them why.
139 * OProfile does not support returning errors on Stop. Not a huge issue
140 * since failure to reset the debug bus or stop the SPU PC collection is
141 * not a fatel issue. Chances are if the Stop failed, Start doesn't work
142 * either.
143 */
144
145/*
146 * Interpetation of hdw_thread:
18f2190d
MJ
147 * 0 - even virtual cpus 0, 2, 4,...
148 * 1 - odd virtual cpus 1, 3, 5, ...
1474855d
BN
149 *
150 * FIXME: this is strictly wrong, we need to clean this up in a number
151 * of places. It works for now. -arnd
18f2190d
MJ
152 */
153static u32 hdw_thread;
154
155static u32 virt_cntr_inter_mask;
156static struct timer_list timer_virt_cntr;
157
1474855d
BN
158/*
159 * pm_signal needs to be global since it is initialized in
18f2190d
MJ
160 * cell_reg_setup at the time when the necessary information
161 * is available.
162 */
163static struct pm_signal pm_signal[NR_PHYS_CTRS];
1474855d
BN
164static int pm_rtas_token; /* token for debug bus setup call */
165static int spu_rtas_token; /* token for SPU cycle profiling */
18f2190d
MJ
166
167static u32 reset_value[NR_PHYS_CTRS];
168static int num_counters;
169static int oprofile_running;
9b93418e 170static DEFINE_SPINLOCK(cntr_lock);
18f2190d
MJ
171
172static u32 ctr_enabled;
173
bcb63e25 174static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
18f2190d
MJ
175
176/*
177 * Firmware interface functions
178 */
179static int
180rtas_ibm_cbe_perftools(int subfunc, int passthru,
181 void *address, unsigned long length)
182{
183 u64 paddr = __pa(address);
184
1474855d
BN
185 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
186 passthru, paddr >> 32, paddr & 0xffffffff, length);
18f2190d
MJ
187}
188
189static void pm_rtas_reset_signals(u32 node)
190{
191 int ret;
192 struct pm_signal pm_signal_local;
193
1474855d
BN
194 /*
195 * The debug bus is being set to the passthru disable state.
196 * However, the FW still expects atleast one legal signal routing
197 * entry or it will return an error on the arguments. If we don't
198 * supply a valid entry, we must ignore all return values. Ignoring
199 * all return values means we might miss an error we should be
200 * concerned about.
18f2190d
MJ
201 */
202
203 /* fw expects physical cpu #. */
204 pm_signal_local.cpu = node;
205 pm_signal_local.signal_group = 21;
206 pm_signal_local.bus_word = 1;
207 pm_signal_local.sub_unit = 0;
208 pm_signal_local.bit = 0;
209
210 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
211 &pm_signal_local,
212 sizeof(struct pm_signal));
213
1474855d
BN
214 if (unlikely(ret))
215 /*
216 * Not a fatal error. For Oprofile stop, the oprofile
217 * functions do not support returning an error for
218 * failure to stop OProfile.
219 */
18f2190d 220 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 221 __func__, ret);
18f2190d
MJ
222}
223
1474855d 224static int pm_rtas_activate_signals(u32 node, u32 count)
18f2190d
MJ
225{
226 int ret;
c7eb7347 227 int i, j;
18f2190d
MJ
228 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
229
1474855d
BN
230 /*
231 * There is no debug setup required for the cycles event.
c7eb7347
MJ
232 * Note that only events in the same group can be used.
233 * Otherwise, there will be conflicts in correctly routing
234 * the signals on the debug bus. It is the responsiblity
235 * of the OProfile user tool to check the events are in
236 * the same group.
237 */
238 i = 0;
18f2190d 239 for (j = 0; j < count; j++) {
c7eb7347
MJ
240 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
241
242 /* fw expects physical cpu # */
243 pm_signal_local[i].cpu = node;
244 pm_signal_local[i].signal_group
245 = pm_signal[j].signal_group;
246 pm_signal_local[i].bus_word = pm_signal[j].bus_word;
247 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
248 pm_signal_local[i].bit = pm_signal[j].bit;
249 i++;
250 }
18f2190d
MJ
251 }
252
c7eb7347
MJ
253 if (i != 0) {
254 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
255 pm_signal_local,
256 i * sizeof(struct pm_signal));
18f2190d 257
1474855d 258 if (unlikely(ret)) {
c7eb7347 259 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 260 __func__, ret);
1474855d
BN
261 return -EIO;
262 }
c7eb7347 263 }
1474855d
BN
264
265 return 0;
18f2190d
MJ
266}
267
268/*
269 * PM Signal functions
270 */
271static void set_pm_event(u32 ctr, int event, u32 unit_mask)
272{
273 struct pm_signal *p;
274 u32 signal_bit;
275 u32 bus_word, bus_type, count_cycles, polarity, input_control;
276 int j, i;
277
278 if (event == PPU_CYCLES_EVENT_NUM) {
279 /* Special Event: Count all cpu cycles */
280 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
281 p = &(pm_signal[ctr]);
c7eb7347 282 p->signal_group = PPU_CYCLES_GRP_NUM;
18f2190d
MJ
283 p->bus_word = 1;
284 p->sub_unit = 0;
285 p->bit = 0;
286 goto out;
287 } else {
288 pm_regs.pm07_cntrl[ctr] = 0;
289 }
290
291 bus_word = GET_BUS_WORD(unit_mask);
292 bus_type = GET_BUS_TYPE(unit_mask);
293 count_cycles = GET_COUNT_CYCLES(unit_mask);
294 polarity = GET_POLARITY(unit_mask);
295 input_control = GET_INPUT_CONTROL(unit_mask);
296 signal_bit = (event % 100);
297
298 p = &(pm_signal[ctr]);
299
300 p->signal_group = event / 100;
301 p->bus_word = bus_word;
a1ef4849 302 p->sub_unit = GET_SUB_UNIT(unit_mask);
18f2190d
MJ
303
304 pm_regs.pm07_cntrl[ctr] = 0;
305 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
306 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
307 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
308
1474855d
BN
309 /*
310 * Some of the islands signal selection is based on 64 bit words.
bcb63e25
CL
311 * The debug bus words are 32 bits, the input words to the performance
312 * counters are defined as 32 bits. Need to convert the 64 bit island
313 * specification to the appropriate 32 input bit and bus word for the
1474855d 314 * performance counter event selection. See the CELL Performance
bcb63e25
CL
315 * monitoring signals manual and the Perf cntr hardware descriptions
316 * for the details.
317 */
18f2190d
MJ
318 if (input_control == 0) {
319 if (signal_bit > 31) {
320 signal_bit -= 32;
321 if (bus_word == 0x3)
322 bus_word = 0x2;
323 else if (bus_word == 0xc)
324 bus_word = 0x8;
325 }
326
327 if ((bus_type == 0) && p->signal_group >= 60)
328 bus_type = 2;
329 if ((bus_type == 1) && p->signal_group >= 50)
330 bus_type = 0;
331
332 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
333 } else {
334 pm_regs.pm07_cntrl[ctr] = 0;
335 p->bit = signal_bit;
336 }
337
a1ef4849 338 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
18f2190d
MJ
339 if (bus_word & (1 << i)) {
340 pm_regs.debug_bus_control |=
a1ef4849 341 (bus_type << (30 - (2 * i)));
18f2190d 342
bcb63e25 343 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
18f2190d
MJ
344 if (input_bus[j] == 0xff) {
345 input_bus[j] = i;
346 pm_regs.group_control |=
a1ef4849 347 (i << (30 - (2 * j)));
1474855d 348
18f2190d
MJ
349 break;
350 }
351 }
352 }
353 }
354out:
355 ;
356}
357
bcb63e25 358static void write_pm_cntrl(int cpu)
18f2190d 359{
1474855d
BN
360 /*
361 * Oprofile will use 32 bit counters, set bits 7:10 to 0
bcb63e25
CL
362 * pmregs.pm_cntrl is a global
363 */
364
18f2190d 365 u32 val = 0;
bcb63e25 366 if (pm_regs.pm_cntrl.enable == 1)
18f2190d
MJ
367 val |= CBE_PM_ENABLE_PERF_MON;
368
bcb63e25 369 if (pm_regs.pm_cntrl.stop_at_max == 1)
18f2190d
MJ
370 val |= CBE_PM_STOP_AT_MAX;
371
9b93418e 372 if (pm_regs.pm_cntrl.trace_mode != 0)
bcb63e25 373 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
18f2190d 374
bcb63e25 375 if (pm_regs.pm_cntrl.freeze == 1)
18f2190d
MJ
376 val |= CBE_PM_FREEZE_ALL_CTRS;
377
1474855d
BN
378 /*
379 * Routine set_count_mode must be called previously to set
18f2190d
MJ
380 * the count mode based on the user selection of user and kernel.
381 */
bcb63e25 382 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
18f2190d
MJ
383 cbe_write_pm(cpu, pm_control, val);
384}
385
386static inline void
bcb63e25 387set_count_mode(u32 kernel, u32 user)
18f2190d 388{
1474855d
BN
389 /*
390 * The user must specify user and kernel if they want them. If
bcb63e25
CL
391 * neither is specified, OProfile will count in hypervisor mode.
392 * pm_regs.pm_cntrl is a global
18f2190d
MJ
393 */
394 if (kernel) {
395 if (user)
bcb63e25 396 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
18f2190d 397 else
bcb63e25
CL
398 pm_regs.pm_cntrl.count_mode =
399 CBE_COUNT_SUPERVISOR_MODE;
18f2190d
MJ
400 } else {
401 if (user)
bcb63e25 402 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
18f2190d 403 else
bcb63e25
CL
404 pm_regs.pm_cntrl.count_mode =
405 CBE_COUNT_HYPERVISOR_MODE;
18f2190d
MJ
406 }
407}
408
25ad2913 409static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
18f2190d
MJ
410{
411
bcb63e25 412 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
18f2190d
MJ
413 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
414}
415
416/*
417 * Oprofile is expected to collect data on all CPUs simultaneously.
1474855d 418 * However, there is one set of performance counters per node. There are
18f2190d
MJ
419 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
420 * multiplex in time the performance counter collection on the two virtual
421 * CPUs. The multiplexing of the performance counters is done by this
422 * virtual counter routine.
423 *
424 * The pmc_values used below is defined as 'per-cpu' but its use is
425 * more akin to 'per-node'. We need to store two sets of counter
426 * values per node -- one for the previous run and one for the next.
427 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
428 * pair of per-cpu arrays is used for storing the previous and next
429 * pmc values for a given node.
430 * NOTE: We use the per-cpu variable to improve cache performance.
1474855d
BN
431 *
432 * This routine will alternate loading the virtual counters for
433 * virtual CPUs
18f2190d
MJ
434 */
435static void cell_virtual_cntr(unsigned long data)
436{
18f2190d
MJ
437 int i, prev_hdw_thread, next_hdw_thread;
438 u32 cpu;
439 unsigned long flags;
440
1474855d
BN
441 /*
442 * Make sure that the interrupt_hander and the virt counter are
443 * not both playing with the counters on the same node.
18f2190d
MJ
444 */
445
9b93418e 446 spin_lock_irqsave(&cntr_lock, flags);
18f2190d
MJ
447
448 prev_hdw_thread = hdw_thread;
449
450 /* switch the cpu handling the interrupts */
451 hdw_thread = 1 ^ hdw_thread;
452 next_hdw_thread = hdw_thread;
453
a1ef4849
BN
454 pm_regs.group_control = 0;
455 pm_regs.debug_bus_control = 0;
456
457 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
458 input_bus[i] = 0xff;
459
1474855d
BN
460 /*
461 * There are some per thread events. Must do the
bcb63e25
CL
462 * set event, for the thread that is being started
463 */
1474855d 464 for (i = 0; i < num_counters; i++)
bcb63e25
CL
465 set_pm_event(i,
466 pmc_cntrl[next_hdw_thread][i].evnts,
467 pmc_cntrl[next_hdw_thread][i].masks);
468
1474855d
BN
469 /*
470 * The following is done only once per each node, but
18f2190d
MJ
471 * we need cpu #, not node #, to pass to the cbe_xxx functions.
472 */
473 for_each_online_cpu(cpu) {
474 if (cbe_get_hw_thread_id(cpu))
475 continue;
476
1474855d
BN
477 /*
478 * stop counters, save counter values, restore counts
18f2190d
MJ
479 * for previous thread
480 */
481 cbe_disable_pm(cpu);
482 cbe_disable_pm_interrupts(cpu);
483 for (i = 0; i < num_counters; i++) {
484 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
485 = cbe_read_ctr(cpu, i);
486
487 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
488 == 0xFFFFFFFF)
489 /* If the cntr value is 0xffffffff, we must
490 * reset that to 0xfffffff0 when the current
1474855d 491 * thread is restarted. This will generate a
bcb63e25
CL
492 * new interrupt and make sure that we never
493 * restore the counters to the max value. If
494 * the counters were restored to the max value,
495 * they do not increment and no interrupts are
496 * generated. Hence no more samples will be
497 * collected on that cpu.
18f2190d
MJ
498 */
499 cbe_write_ctr(cpu, i, 0xFFFFFFF0);
500 else
501 cbe_write_ctr(cpu, i,
502 per_cpu(pmc_values,
503 cpu +
504 next_hdw_thread)[i]);
505 }
506
1474855d
BN
507 /*
508 * Switch to the other thread. Change the interrupt
18f2190d
MJ
509 * and control regs to be scheduled on the CPU
510 * corresponding to the thread to execute.
511 */
512 for (i = 0; i < num_counters; i++) {
513 if (pmc_cntrl[next_hdw_thread][i].enabled) {
1474855d
BN
514 /*
515 * There are some per thread events.
18f2190d
MJ
516 * Must do the set event, enable_cntr
517 * for each cpu.
518 */
18f2190d
MJ
519 enable_ctr(cpu, i,
520 pm_regs.pm07_cntrl);
521 } else {
522 cbe_write_pm07_control(cpu, i, 0);
523 }
524 }
525
526 /* Enable interrupts on the CPU thread that is starting */
527 cbe_enable_pm_interrupts(cpu, next_hdw_thread,
528 virt_cntr_inter_mask);
529 cbe_enable_pm(cpu);
530 }
531
9b93418e 532 spin_unlock_irqrestore(&cntr_lock, flags);
18f2190d
MJ
533
534 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
535}
536
537static void start_virt_cntrs(void)
538{
539 init_timer(&timer_virt_cntr);
540 timer_virt_cntr.function = cell_virtual_cntr;
541 timer_virt_cntr.data = 0UL;
542 timer_virt_cntr.expires = jiffies + HZ / 10;
543 add_timer(&timer_virt_cntr);
544}
545
9b93418e 546static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
1474855d 547 struct op_system_config *sys, int num_ctrs)
18f2190d 548{
9b93418e 549 spu_cycle_reset = ctr[0].count;
1474855d
BN
550
551 /*
9b93418e
CL
552 * Each node will need to make the rtas call to start
553 * and stop SPU profiling. Get the token once and store it.
1474855d 554 */
9b93418e
CL
555 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
556
557 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
1474855d 558 printk(KERN_ERR
9b93418e 559 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
e48b1b45 560 __func__);
1474855d 561 return -EIO;
18f2190d 562 }
9b93418e
CL
563 return 0;
564}
565
566static int cell_reg_setup_ppu(struct op_counter_config *ctr,
567 struct op_system_config *sys, int num_ctrs)
568{
569 int i, j, cpu;
18f2190d
MJ
570
571 num_counters = num_ctrs;
572
210434d7
CL
573 if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
574 printk(KERN_ERR
575 "%s: Oprofile, number of specified events " \
576 "exceeds number of physical counters\n",
577 __func__);
578 return -EIO;
579 }
18f2190d
MJ
580 pm_regs.group_control = 0;
581 pm_regs.debug_bus_control = 0;
582
583 /* setup the pm_control register */
584 memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
585 pm_regs.pm_cntrl.stop_at_max = 1;
586 pm_regs.pm_cntrl.trace_mode = 0;
587 pm_regs.pm_cntrl.freeze = 1;
588
bcb63e25 589 set_count_mode(sys->enable_kernel, sys->enable_user);
18f2190d
MJ
590
591 /* Setup the thread 0 events */
592 for (i = 0; i < num_ctrs; ++i) {
593
594 pmc_cntrl[0][i].evnts = ctr[i].event;
595 pmc_cntrl[0][i].masks = ctr[i].unit_mask;
596 pmc_cntrl[0][i].enabled = ctr[i].enabled;
597 pmc_cntrl[0][i].vcntr = i;
598
599 for_each_possible_cpu(j)
600 per_cpu(pmc_values, j)[i] = 0;
601 }
602
1474855d
BN
603 /*
604 * Setup the thread 1 events, map the thread 0 event to the
18f2190d
MJ
605 * equivalent thread 1 event.
606 */
607 for (i = 0; i < num_ctrs; ++i) {
608 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
609 pmc_cntrl[1][i].evnts = ctr[i].event + 19;
610 else if (ctr[i].event == 2203)
611 pmc_cntrl[1][i].evnts = ctr[i].event;
612 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
613 pmc_cntrl[1][i].evnts = ctr[i].event + 16;
614 else
615 pmc_cntrl[1][i].evnts = ctr[i].event;
616
617 pmc_cntrl[1][i].masks = ctr[i].unit_mask;
618 pmc_cntrl[1][i].enabled = ctr[i].enabled;
619 pmc_cntrl[1][i].vcntr = i;
620 }
621
bcb63e25 622 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
18f2190d
MJ
623 input_bus[i] = 0xff;
624
1474855d
BN
625 /*
626 * Our counters count up, and "count" refers to
18f2190d 627 * how much before the next interrupt, and we interrupt
1474855d 628 * on overflow. So we calculate the starting value
18f2190d
MJ
629 * which will give us "count" until overflow.
630 * Then we set the events on the enabled counters.
631 */
632 for (i = 0; i < num_counters; ++i) {
633 /* start with virtual counter set 0 */
634 if (pmc_cntrl[0][i].enabled) {
635 /* Using 32bit counters, reset max - count */
636 reset_value[i] = 0xFFFFFFFF - ctr[i].count;
637 set_pm_event(i,
638 pmc_cntrl[0][i].evnts,
639 pmc_cntrl[0][i].masks);
640
641 /* global, used by cell_cpu_setup */
642 ctr_enabled |= (1 << i);
643 }
644 }
645
646 /* initialize the previous counts for the virtual cntrs */
647 for_each_online_cpu(cpu)
648 for (i = 0; i < num_counters; ++i) {
649 per_cpu(pmc_values, cpu)[i] = reset_value[i];
650 }
1474855d
BN
651
652 return 0;
18f2190d
MJ
653}
654
1474855d 655
9b93418e
CL
656/* This function is called once for all cpus combined */
657static int cell_reg_setup(struct op_counter_config *ctr,
658 struct op_system_config *sys, int num_ctrs)
659{
660 int ret;
661
662 spu_cycle_reset = 0;
663
664 /*
665 * For all events except PPU CYCLEs, each node will need to make
666 * the rtas cbe-perftools call to setup and reset the debug bus.
667 * Make the token lookup call once and store it in the global
668 * variable pm_rtas_token.
669 */
670 pm_rtas_token = rtas_token("ibm,cbe-perftools");
671
672 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
673 printk(KERN_ERR
674 "%s: rtas token ibm,cbe-perftools unknown\n",
675 __func__);
676 return -EIO;
677 }
678
679 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
680 profiling_mode = SPU_PROFILING_CYCLES;
681 ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
682 } else {
683 profiling_mode = PPU_PROFILING;
684 ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
685 }
686
687 return ret;
688}
689
690
1474855d 691
18f2190d 692/* This function is called once for each cpu */
1474855d 693static int cell_cpu_setup(struct op_counter_config *cntr)
18f2190d
MJ
694{
695 u32 cpu = smp_processor_id();
696 u32 num_enabled = 0;
697 int i;
698
9b93418e
CL
699 /* Cycle based SPU profiling does not use the performance
700 * counters. The trace array is configured to collect
701 * the data.
702 */
703 if (profiling_mode == SPU_PROFILING_CYCLES)
1474855d
BN
704 return 0;
705
18f2190d
MJ
706 /* There is one performance monitor per processor chip (i.e. node),
707 * so we only need to perform this function once per node.
708 */
709 if (cbe_get_hw_thread_id(cpu))
1474855d 710 return 0;
18f2190d
MJ
711
712 /* Stop all counters */
713 cbe_disable_pm(cpu);
714 cbe_disable_pm_interrupts(cpu);
715
18f2190d
MJ
716 cbe_write_pm(cpu, pm_start_stop, 0);
717 cbe_write_pm(cpu, group_control, pm_regs.group_control);
718 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
bcb63e25 719 write_pm_cntrl(cpu);
18f2190d
MJ
720
721 for (i = 0; i < num_counters; ++i) {
722 if (ctr_enabled & (1 << i)) {
723 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
724 num_enabled++;
725 }
726 }
727
1474855d
BN
728 /*
729 * The pm_rtas_activate_signals will return -EIO if the FW
730 * call failed.
731 */
732 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
733}
734
735#define ENTRIES 303
736#define MAXLFSR 0xFFFFFF
737
738/* precomputed table of 24 bit LFSR values */
739static int initial_lfsr[] = {
740 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
741 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
742 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
743 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
744 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
745 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
746 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
747 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
748 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
749 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
750 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
751 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
752 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
753 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
754 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
755 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
756 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
757 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
758 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
759 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
760 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
761 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
762 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
763 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
764 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
765 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
766 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
767 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
768 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
769 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
770 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
771 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
772 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
773 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
774 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
775 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
776 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
777 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
778};
779
780/*
781 * The hardware uses an LFSR counting sequence to determine when to capture
782 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
783 * where each number occurs once in the sequence but the sequence is not in
784 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
785 * the last value in the sequence. Hence the user specified value N
786 * corresponds to the LFSR number that is N from the end of the sequence.
787 *
788 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
789 * LFSR sequence is broken into four ranges. The spacing of the precomputed
790 * values is adjusted in each range so the error between the user specifed
791 * number (N) of events between samples and the actual number of events based
792 * on the precomputed value will be les then about 6.2%. Note, if the user
793 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
794 * This is to prevent the loss of samples because the trace buffer is full.
795 *
796 * User specified N Step between Index in
797 * precomputed values precomputed
798 * table
799 * 0 to 2^16-1 ---- 0
800 * 2^16 to 2^16+2^19-1 2^12 1 to 128
801 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
802 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
803 *
804 *
805 * For example, the LFSR values in the second range are computed for 2^16,
806 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
807 * 1, 2,..., 127, 128.
808 *
809 * The 24 bit LFSR value for the nth number in the sequence can be
810 * calculated using the following code:
811 *
812 * #define size 24
813 * int calculate_lfsr(int n)
814 * {
815 * int i;
816 * unsigned int newlfsr0;
817 * unsigned int lfsr = 0xFFFFFF;
818 * unsigned int howmany = n;
819 *
820 * for (i = 2; i < howmany + 2; i++) {
821 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
822 * ((lfsr >> (size - 1 - 1)) & 1) ^
823 * (((lfsr >> (size - 1 - 6)) & 1) ^
824 * ((lfsr >> (size - 1 - 23)) & 1)));
825 *
826 * lfsr >>= 1;
827 * lfsr = lfsr | (newlfsr0 << (size - 1));
828 * }
829 * return lfsr;
830 * }
831 */
832
833#define V2_16 (0x1 << 16)
834#define V2_19 (0x1 << 19)
835#define V2_22 (0x1 << 22)
836
837static int calculate_lfsr(int n)
838{
839 /*
840 * The ranges and steps are in powers of 2 so the calculations
841 * can be done using shifts rather then divide.
842 */
843 int index;
844
845 if ((n >> 16) == 0)
846 index = 0;
847 else if (((n - V2_16) >> 19) == 0)
848 index = ((n - V2_16) >> 12) + 1;
849 else if (((n - V2_16 - V2_19) >> 22) == 0)
850 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
851 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
852 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
853 else
854 index = ENTRIES-1;
855
856 /* make sure index is valid */
857 if ((index > ENTRIES) || (index < 0))
858 index = ENTRIES-1;
859
860 return initial_lfsr[index];
861}
862
863static int pm_rtas_activate_spu_profiling(u32 node)
864{
865 int ret, i;
210434d7 866 struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
1474855d
BN
867
868 /*
869 * Set up the rtas call to configure the debug bus to
870 * route the SPU PCs. Setup the pm_signal for each SPU
871 */
210434d7 872 for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
1474855d
BN
873 pm_signal_local[i].cpu = node;
874 pm_signal_local[i].signal_group = 41;
875 /* spu i on word (i/2) */
876 pm_signal_local[i].bus_word = 1 << i / 2;
877 /* spu i */
878 pm_signal_local[i].sub_unit = i;
879 pm_signal_local[i].bit = 63;
880 }
881
882 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
883 PASSTHRU_ENABLE, pm_signal_local,
210434d7 884 (ARRAY_SIZE(pm_signal_local)
1474855d
BN
885 * sizeof(struct pm_signal)));
886
887 if (unlikely(ret)) {
888 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 889 __func__, ret);
1474855d
BN
890 return -EIO;
891 }
892
893 return 0;
894}
895
896#ifdef CONFIG_CPU_FREQ
897static int
898oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
899{
900 int ret = 0;
901 struct cpufreq_freqs *frq = data;
902 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
903 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
904 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
905 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
906 return ret;
907}
908
909static struct notifier_block cpu_freq_notifier_block = {
910 .notifier_call = oprof_cpufreq_notify
911};
912#endif
913
9b93418e
CL
914/*
915 * Note the generic OProfile stop calls do not support returning
916 * an error on stop. Hence, will not return an error if the FW
917 * calls fail on stop. Failure to reset the debug bus is not an issue.
918 * Failure to disable the SPU profiling is not an issue. The FW calls
919 * to enable the performance counters and debug bus will work even if
920 * the hardware was not cleanly reset.
921 */
922static void cell_global_stop_spu_cycles(void)
923{
924 int subfunc, rtn_value;
925 unsigned int lfsr_value;
926 int cpu;
927
928 oprofile_running = 0;
929
930#ifdef CONFIG_CPU_FREQ
931 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
932 CPUFREQ_TRANSITION_NOTIFIER);
933#endif
934
935 for_each_online_cpu(cpu) {
936 if (cbe_get_hw_thread_id(cpu))
937 continue;
938
939 subfunc = 3; /*
940 * 2 - activate SPU tracing,
941 * 3 - deactivate
942 */
943 lfsr_value = 0x8f100000;
944
945 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
946 subfunc, cbe_cpu_to_node(cpu),
947 lfsr_value);
948
949 if (unlikely(rtn_value != 0)) {
950 printk(KERN_ERR
951 "%s: rtas call ibm,cbe-spu-perftools " \
952 "failed, return = %d\n",
953 __func__, rtn_value);
954 }
955
956 /* Deactivate the signals */
957 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
958 }
959
960 if (profiling_mode == SPU_PROFILING_CYCLES)
961 stop_spu_profiling_cycles();
962}
963
964static void cell_global_stop_ppu(void)
965{
966 int cpu;
967
968 /*
969 * This routine will be called once for the system.
970 * There is one performance monitor per node, so we
971 * only need to perform this function once per node.
972 */
973 del_timer_sync(&timer_virt_cntr);
974 oprofile_running = 0;
975 smp_wmb();
976
977 for_each_online_cpu(cpu) {
978 if (cbe_get_hw_thread_id(cpu))
979 continue;
980
981 cbe_sync_irq(cbe_cpu_to_node(cpu));
982 /* Stop the counters */
983 cbe_disable_pm(cpu);
984
985 /* Deactivate the signals */
986 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
987
988 /* Deactivate interrupts */
989 cbe_disable_pm_interrupts(cpu);
990 }
991}
992
993static void cell_global_stop(void)
994{
995 if (profiling_mode == PPU_PROFILING)
996 cell_global_stop_ppu();
997 else
998 cell_global_stop_spu_cycles();
999}
1000
1001static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
1474855d
BN
1002{
1003 int subfunc;
1004 unsigned int lfsr_value;
1005 int cpu;
1006 int ret;
1007 int rtas_error;
1008 unsigned int cpu_khzfreq = 0;
1009
1010 /* The SPU profiling uses time-based profiling based on
1011 * cpu frequency, so if configured with the CPU_FREQ
1012 * option, we should detect frequency changes and react
1013 * accordingly.
1014 */
1015#ifdef CONFIG_CPU_FREQ
1016 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
1017 CPUFREQ_TRANSITION_NOTIFIER);
1018 if (ret < 0)
1019 /* this is not a fatal error */
1020 printk(KERN_ERR "CPU freq change registration failed: %d\n",
1021 ret);
1022
1023 else
1024 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
1025#endif
1026
1027 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
1028
1029 for_each_online_cpu(cpu) {
1030 if (cbe_get_hw_thread_id(cpu))
1031 continue;
1032
1033 /*
1034 * Setup SPU cycle-based profiling.
1035 * Set perf_mon_control bit 0 to a zero before
1036 * enabling spu collection hardware.
1037 */
1038 cbe_write_pm(cpu, pm_control, 0);
1039
1040 if (spu_cycle_reset > MAX_SPU_COUNT)
1041 /* use largest possible value */
1042 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
1043 else
1044 lfsr_value = calculate_lfsr(spu_cycle_reset);
1045
1046 /* must use a non zero value. Zero disables data collection. */
1047 if (lfsr_value == 0)
1048 lfsr_value = calculate_lfsr(1);
1049
1050 lfsr_value = lfsr_value << 8; /* shift lfsr to correct
1051 * register location
1052 */
1053
1054 /* debug bus setup */
1055 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
1056
1057 if (unlikely(ret)) {
1058 rtas_error = ret;
1059 goto out;
1060 }
1061
1062
1063 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
1064
1065 /* start profiling */
1066 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
1067 cbe_cpu_to_node(cpu), lfsr_value);
1068
1069 if (unlikely(ret != 0)) {
1070 printk(KERN_ERR
9b93418e
CL
1071 "%s: rtas call ibm,cbe-spu-perftools failed, " \
1072 "return = %d\n", __func__, ret);
1474855d
BN
1073 rtas_error = -EIO;
1074 goto out;
1075 }
1076 }
1077
9b93418e 1078 rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
1474855d
BN
1079 if (rtas_error)
1080 goto out_stop;
1081
1082 oprofile_running = 1;
1083 return 0;
1084
1085out_stop:
9b93418e 1086 cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
18f2190d 1087out:
1474855d 1088 return rtas_error;
18f2190d
MJ
1089}
1090
1474855d 1091static int cell_global_start_ppu(struct op_counter_config *ctr)
18f2190d 1092{
1474855d 1093 u32 cpu, i;
18f2190d 1094 u32 interrupt_mask = 0;
18f2190d
MJ
1095
1096 /* This routine gets called once for the system.
1097 * There is one performance monitor per node, so we
1098 * only need to perform this function once per node.
1099 */
1100 for_each_online_cpu(cpu) {
1101 if (cbe_get_hw_thread_id(cpu))
1102 continue;
1103
1104 interrupt_mask = 0;
1105
1106 for (i = 0; i < num_counters; ++i) {
1107 if (ctr_enabled & (1 << i)) {
1108 cbe_write_ctr(cpu, i, reset_value[i]);
1109 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
1110 interrupt_mask |=
1111 CBE_PM_CTR_OVERFLOW_INTR(i);
1112 } else {
1113 /* Disable counter */
1114 cbe_write_pm07_control(cpu, i, 0);
1115 }
1116 }
1117
bcb63e25 1118 cbe_get_and_clear_pm_interrupts(cpu);
18f2190d
MJ
1119 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1120 cbe_enable_pm(cpu);
1121 }
1122
1123 virt_cntr_inter_mask = interrupt_mask;
1124 oprofile_running = 1;
1125 smp_wmb();
1126
1474855d
BN
1127 /*
1128 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1129 * executed which manipulates the PMU. We start the "virtual counter"
18f2190d
MJ
1130 * here so that we do not need to synchronize access to the PMU in
1131 * the above for-loop.
1132 */
1133 start_virt_cntrs();
1474855d
BN
1134
1135 return 0;
18f2190d
MJ
1136}
1137
1474855d
BN
1138static int cell_global_start(struct op_counter_config *ctr)
1139{
9b93418e
CL
1140 if (profiling_mode == SPU_PROFILING_CYCLES)
1141 return cell_global_start_spu_cycles(ctr);
1474855d
BN
1142 else
1143 return cell_global_start_ppu(ctr);
1144}
1145
1474855d 1146
9b93418e
CL
1147static void cell_handle_interrupt_ppu(struct pt_regs *regs,
1148 struct op_counter_config *ctr)
18f2190d
MJ
1149{
1150 u32 cpu;
1151 u64 pc;
1152 int is_kernel;
1153 unsigned long flags = 0;
1154 u32 interrupt_mask;
1155 int i;
1156
1157 cpu = smp_processor_id();
1158
1474855d
BN
1159 /*
1160 * Need to make sure the interrupt handler and the virt counter
18f2190d
MJ
1161 * routine are not running at the same time. See the
1162 * cell_virtual_cntr() routine for additional comments.
1163 */
9b93418e 1164 spin_lock_irqsave(&cntr_lock, flags);
18f2190d 1165
1474855d
BN
1166 /*
1167 * Need to disable and reenable the performance counters
18f2190d
MJ
1168 * to get the desired behavior from the hardware. This
1169 * is hardware specific.
1170 */
1171
1172 cbe_disable_pm(cpu);
1173
bcb63e25 1174 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
18f2190d 1175
1474855d
BN
1176 /*
1177 * If the interrupt mask has been cleared, then the virt cntr
18f2190d
MJ
1178 * has cleared the interrupt. When the thread that generated
1179 * the interrupt is restored, the data count will be restored to
1180 * 0xffffff0 to cause the interrupt to be regenerated.
1181 */
1182
1183 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1184 pc = regs->nip;
1185 is_kernel = is_kernel_addr(pc);
1186
1187 for (i = 0; i < num_counters; ++i) {
1188 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1189 && ctr[i].enabled) {
101fd46a 1190 oprofile_add_ext_sample(pc, regs, i, is_kernel);
18f2190d
MJ
1191 cbe_write_ctr(cpu, i, reset_value[i]);
1192 }
1193 }
1194
1474855d
BN
1195 /*
1196 * The counters were frozen by the interrupt.
18f2190d
MJ
1197 * Reenable the interrupt and restart the counters.
1198 * If there was a race between the interrupt handler and
1474855d 1199 * the virtual counter routine. The virutal counter
18f2190d
MJ
1200 * routine may have cleared the interrupts. Hence must
1201 * use the virt_cntr_inter_mask to re-enable the interrupts.
1202 */
1203 cbe_enable_pm_interrupts(cpu, hdw_thread,
1204 virt_cntr_inter_mask);
1205
1474855d
BN
1206 /*
1207 * The writes to the various performance counters only writes
1208 * to a latch. The new values (interrupt setting bits, reset
18f2190d
MJ
1209 * counter value etc.) are not copied to the actual registers
1210 * until the performance monitor is enabled. In order to get
1211 * this to work as desired, the permormance monitor needs to
beb7dd86 1212 * be disabled while writing to the latches. This is a
18f2190d
MJ
1213 * HW design issue.
1214 */
1215 cbe_enable_pm(cpu);
1216 }
9b93418e
CL
1217 spin_unlock_irqrestore(&cntr_lock, flags);
1218}
1219
1220static void cell_handle_interrupt(struct pt_regs *regs,
1221 struct op_counter_config *ctr)
1222{
1223 if (profiling_mode == PPU_PROFILING)
1224 cell_handle_interrupt_ppu(regs, ctr);
18f2190d
MJ
1225}
1226
1474855d
BN
1227/*
1228 * This function is called from the generic OProfile
1229 * driver. When profiling PPUs, we need to do the
1230 * generic sync start; otherwise, do spu_sync_start.
1231 */
1232static int cell_sync_start(void)
1233{
9b93418e
CL
1234 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1235 (profiling_mode == SPU_PROFILING_EVENTS))
1474855d
BN
1236 return spu_sync_start();
1237 else
1238 return DO_GENERIC_SYNC;
1239}
1240
1241static int cell_sync_stop(void)
1242{
9b93418e
CL
1243 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1244 (profiling_mode == SPU_PROFILING_EVENTS))
1474855d
BN
1245 return spu_sync_stop();
1246 else
1247 return 1;
1248}
1249
18f2190d
MJ
1250struct op_powerpc_model op_model_cell = {
1251 .reg_setup = cell_reg_setup,
1252 .cpu_setup = cell_cpu_setup,
1253 .global_start = cell_global_start,
1254 .global_stop = cell_global_stop,
1474855d
BN
1255 .sync_start = cell_sync_start,
1256 .sync_stop = cell_sync_stop,
18f2190d
MJ
1257 .handle_interrupt = cell_handle_interrupt,
1258};