]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/platforms/cell/pervasive.c
[PATCH] for_each_possible_cpu: powerpc
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / platforms / cell / pervasive.c
CommitLineData
c902be71
AB
1/*
2 * CBE Pervasive Monitor and Debug
3 *
4 * (C) Copyright IBM Corporation 2005
5 *
6 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
7 * Michael N. Day (mnday@us.ibm.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#undef DEBUG
25
26#include <linux/config.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/percpu.h>
30#include <linux/types.h>
31#include <linux/kallsyms.h>
32
33#include <asm/io.h>
34#include <asm/machdep.h>
35#include <asm/prom.h>
36#include <asm/pgtable.h>
37#include <asm/reg.h>
38
39#include "pervasive.h"
40
41static DEFINE_SPINLOCK(cbe_pervasive_lock);
42struct cbe_pervasive {
43 struct pmd_regs __iomem *regs;
44 unsigned int thread;
45};
46
47/* can't use per_cpu from setup_arch */
48static struct cbe_pervasive cbe_pervasive[NR_CPUS];
49
50static void __init cbe_enable_pause_zero(void)
51{
52 unsigned long thread_switch_control;
53 unsigned long temp_register;
54 struct cbe_pervasive *p;
55 int thread;
56
57 spin_lock_irq(&cbe_pervasive_lock);
58 p = &cbe_pervasive[smp_processor_id()];
59
60 if (!cbe_pervasive->regs)
61 goto out;
62
63 pr_debug("Power Management: CPU %d\n", smp_processor_id());
64
65 /* Enable Pause(0) control bit */
66 temp_register = in_be64(&p->regs->pm_control);
67
68 out_be64(&p->regs->pm_control,
69 temp_register|PMD_PAUSE_ZERO_CONTROL);
70
71 /* Enable DEC and EE interrupt request */
72 thread_switch_control = mfspr(SPRN_TSC_CELL);
73 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
74
75 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
76 case CTRL_CT0:
77 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
78 thread = 0;
79 break;
80 case CTRL_CT1:
81 thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
82 thread = 1;
83 break;
84 default:
85 printk(KERN_WARNING "%s: unknown configuration\n",
86 __FUNCTION__);
87 thread = -1;
88 break;
89 }
90
91 if (p->thread != thread)
92 printk(KERN_WARNING "%s: device tree inconsistant, "
93 "cpu %i: %d/%d\n", __FUNCTION__,
94 smp_processor_id(),
95 p->thread, thread);
96
97 mtspr(SPRN_TSC_CELL, thread_switch_control);
98
99out:
100 spin_unlock_irq(&cbe_pervasive_lock);
101}
102
103static void cbe_idle(void)
104{
105 unsigned long ctrl;
106
107 cbe_enable_pause_zero();
108
109 while (1) {
110 if (!need_resched()) {
111 local_irq_disable();
112 while (!need_resched()) {
113 /* go into low thread priority */
114 HMT_low();
115
116 /*
117 * atomically disable thread execution
118 * and runlatch.
119 * External and Decrementer exceptions
120 * are still handled when the thread
121 * is disabled but now enter in
122 * cbe_system_reset_exception()
123 */
124 ctrl = mfspr(SPRN_CTRLF);
125 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
126 mtspr(SPRN_CTRLT, ctrl);
127 }
128 /* restore thread prio */
129 HMT_medium();
130 local_irq_enable();
131 }
132
133 /*
134 * turn runlatch on again before scheduling the
135 * process we just woke up
136 */
137 ppc64_runlatch_on();
138
139 preempt_enable_no_resched();
140 schedule();
141 preempt_disable();
142 }
143}
144
8fce10a3 145static int cbe_system_reset_exception(struct pt_regs *regs)
c902be71
AB
146{
147 switch (regs->msr & SRR1_WAKEMASK) {
148 case SRR1_WAKEEE:
149 do_IRQ(regs);
150 break;
151 case SRR1_WAKEDEC:
152 timer_interrupt(regs);
153 break;
154 case SRR1_WAKEMT:
155 /* no action required */
156 break;
157 default:
158 /* do system reset */
159 return 0;
160 }
161 /* everything handled */
162 return 1;
163}
164
165static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p)
166{
167 struct device_node *node;
168 unsigned int *int_servers;
169 char *addr;
170 unsigned long real_address;
171 unsigned int size;
172
173 struct pmd_regs __iomem *pmd_mmio_area;
174 int hardid, thread;
175 int proplen;
176
177 pmd_mmio_area = NULL;
178 hardid = get_hard_smp_processor_id(cpu);
179 for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) {
180 int_servers = (void *) get_property(node,
181 "ibm,ppc-interrupt-server#s", &proplen);
182 if (!int_servers) {
183 printk(KERN_WARNING "%s misses "
184 "ibm,ppc-interrupt-server#s property",
185 node->full_name);
186 continue;
187 }
188 for (thread = 0; thread < proplen / sizeof (int); thread++) {
189 if (hardid == int_servers[thread]) {
190 addr = get_property(node, "pervasive", NULL);
191 goto found;
192 }
193 }
194 }
195
196 printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu);
197 return -EINVAL;
198
199found:
200 real_address = *(unsigned long*) addr;
201 addr += sizeof (unsigned long);
202 size = *(unsigned int*) addr;
203
204 pr_debug("pervasive area for CPU %d at %lx, size %x\n",
205 cpu, real_address, size);
47952d5e 206 p->regs = ioremap(real_address, size);
c902be71
AB
207 p->thread = thread;
208 return 0;
209}
210
211void __init cell_pervasive_init(void)
212{
213 struct cbe_pervasive *p;
214 int cpu;
215 int ret;
216
217 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
218 return;
219
0e551954 220 for_each_possible_cpu(cpu) {
c902be71
AB
221 p = &cbe_pervasive[cpu];
222 ret = cbe_find_pmd_mmio(cpu, p);
223 if (ret)
224 return;
225 }
226
227 ppc_md.idle_loop = cbe_idle;
228 ppc_md.system_reset_exception = cbe_system_reset_exception;
229}