]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/sparc/kernel/smp_32.c
sparc32: fix sparse warnings in sun4m_smp.c
[mirror_ubuntu-bionic-kernel.git] / arch / sparc / kernel / smp_32.c
CommitLineData
1da177e4
LT
1/* smp.c: Sparc SMP support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
6 */
7
8#include <asm/head.h>
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/threads.h>
13#include <linux/smp.h>
1da177e4
LT
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/mm.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/cache.h>
22#include <linux/delay.h>
f9fd3488 23#include <linux/cpu.h>
1da177e4
LT
24
25#include <asm/ptrace.h>
60063497 26#include <linux/atomic.h>
1da177e4
LT
27
28#include <asm/irq.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32#include <asm/oplib.h>
33#include <asm/cacheflush.h>
34#include <asm/tlbflush.h>
35#include <asm/cpudata.h>
f9fd3488 36#include <asm/timer.h>
8401707f 37#include <asm/leon.h>
1da177e4 38
f9fd3488 39#include "kernel.h"
32231a66
AV
40#include "irq.h"
41
2066aadd 42volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
1da177e4 43
a54123e2 44cpumask_t smp_commenced_mask = CPU_MASK_NONE;
1da177e4 45
4ba22b16
SR
46const struct sparc32_ipi_ops *sparc32_ipi_ops;
47
1da177e4
LT
48/* The only guaranteed locking primitive available on all Sparc
49 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
50 * places the current byte at the effective address into dest_reg and
51 * places 0xff there afterwards. Pretty lame locking primitive
52 * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
53 * instruction which is much better...
54 */
55
2066aadd 56void smp_store_cpu_info(int id)
1da177e4
LT
57{
58 int cpu_node;
f486b3dc 59 int mid;
1da177e4
LT
60
61 cpu_data(id).udelay_val = loops_per_jiffy;
62
63 cpu_find_by_mid(id, &cpu_node);
64 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
65 "clock-frequency", 0);
66 cpu_data(id).prom_node = cpu_node;
f486b3dc 67 mid = cpu_get_hwmid(cpu_node);
650fb838 68
f486b3dc
SR
69 if (mid < 0) {
70 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
71 mid = 0;
72 }
73 cpu_data(id).mid = mid;
1da177e4
LT
74}
75
76void __init smp_cpus_done(unsigned int max_cpus)
77{
8b3c848c 78 extern void smp4d_smp_done(void);
a54123e2 79 unsigned long bogosum = 0;
ec7c14bd 80 int cpu, num = 0;
a54123e2 81
ec7c14bd
RR
82 for_each_online_cpu(cpu) {
83 num++;
84 bogosum += cpu_data(cpu).udelay_val;
85 }
a54123e2
BB
86
87 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
88 num, bogosum/(500000/HZ),
89 (bogosum/(5000/HZ))%100);
90
8b3c848c 91 switch(sparc_cpu_model) {
8b3c848c
RB
92 case sun4m:
93 smp4m_smp_done();
94 break;
95 case sun4d:
96 smp4d_smp_done();
97 break;
8401707f
KE
98 case sparc_leon:
99 leon_smp_done();
100 break;
8b3c848c
RB
101 case sun4e:
102 printk("SUN4E\n");
103 BUG();
104 break;
105 case sun4u:
106 printk("SUN4U\n");
107 BUG();
108 break;
109 default:
110 printk("UNKNOWN!\n");
111 BUG();
112 break;
6cb79b3f 113 }
1da177e4
LT
114}
115
116void cpu_panic(void)
117{
118 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
119 panic("SMP bolixed\n");
120}
121
2066aadd 122struct linux_prom_registers smp_penguin_ctable = { 0 };
1da177e4 123
1da177e4
LT
124void smp_send_reschedule(int cpu)
125{
d6d04819
DH
126 /*
127 * CPU model dependent way of implementing IPI generation targeting
128 * a single CPU. The trap handler needs only to do trap entry/return
129 * to call schedule.
130 */
4ba22b16 131 sparc32_ipi_ops->resched(cpu);
1da177e4
LT
132}
133
134void smp_send_stop(void)
135{
136}
137
d6d04819
DH
138void arch_send_call_function_single_ipi(int cpu)
139{
140 /* trigger one IPI single call on one CPU */
4ba22b16 141 sparc32_ipi_ops->single(cpu);
d6d04819
DH
142}
143
144void arch_send_call_function_ipi_mask(const struct cpumask *mask)
145{
146 int cpu;
147
148 /* trigger IPI mask call on each CPU */
149 for_each_cpu(cpu, mask)
4ba22b16 150 sparc32_ipi_ops->mask_one(cpu);
d6d04819
DH
151}
152
153void smp_resched_interrupt(void)
154{
90d3ac15
DM
155 irq_enter();
156 scheduler_ipi();
d6d04819 157 local_cpu_data().irq_resched_count++;
90d3ac15
DM
158 irq_exit();
159 /* re-schedule routine called by interrupt return code. */
d6d04819
DH
160}
161
162void smp_call_function_single_interrupt(void)
163{
164 irq_enter();
165 generic_smp_call_function_single_interrupt();
166 local_cpu_data().irq_call_count++;
167 irq_exit();
168}
169
170void smp_call_function_interrupt(void)
171{
172 irq_enter();
173 generic_smp_call_function_interrupt();
174 local_cpu_data().irq_call_count++;
175 irq_exit();
176}
177
1da177e4
LT
178int setup_profiling_timer(unsigned int multiplier)
179{
62f08283 180 return -EINVAL;
1da177e4
LT
181}
182
a54123e2 183void __init smp_prepare_cpus(unsigned int max_cpus)
1da177e4 184{
b4cff846 185 extern void __init smp4d_boot_cpus(void);
7202fb49 186 int i, cpuid, extra;
a54123e2 187
a54123e2
BB
188 printk("Entering SMP Mode...\n");
189
a54123e2
BB
190 extra = 0;
191 for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
7202fb49 192 if (cpuid >= NR_CPUS)
a54123e2
BB
193 extra++;
194 }
7202fb49
BB
195 /* i = number of cpus */
196 if (extra && max_cpus > i - extra)
a54123e2
BB
197 printk("Warning: NR_CPUS is too low to start all cpus\n");
198
199 smp_store_cpu_info(boot_cpu_id);
200
8b3c848c 201 switch(sparc_cpu_model) {
8b3c848c
RB
202 case sun4m:
203 smp4m_boot_cpus();
204 break;
205 case sun4d:
206 smp4d_boot_cpus();
207 break;
8401707f
KE
208 case sparc_leon:
209 leon_boot_cpus();
210 break;
8b3c848c
RB
211 case sun4e:
212 printk("SUN4E\n");
213 BUG();
214 break;
215 case sun4u:
216 printk("SUN4U\n");
217 BUG();
218 break;
219 default:
220 printk("UNKNOWN!\n");
221 BUG();
222 break;
6cb79b3f 223 }
1da177e4
LT
224}
225
7202fb49
BB
226/* Set this up early so that things like the scheduler can init
227 * properly. We use the same cpu mask for both the present and
228 * possible cpu map.
229 */
230void __init smp_setup_cpu_possible_map(void)
231{
232 int instance, mid;
233
234 instance = 0;
235 while (!cpu_find_by_instance(instance, NULL, &mid)) {
236 if (mid < NR_CPUS) {
fe73971c
RR
237 set_cpu_possible(mid, true);
238 set_cpu_present(mid, true);
7202fb49
BB
239 }
240 instance++;
241 }
242}
243
92d452f0 244void __init smp_prepare_boot_cpu(void)
1da177e4 245{
a54123e2
BB
246 int cpuid = hard_smp_processor_id();
247
248 if (cpuid >= NR_CPUS) {
249 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
250 prom_halt();
251 }
252 if (cpuid != 0)
253 printk("boot cpu id != 0, this could work but is untested\n");
254
255 current_thread_info()->cpu = cpuid;
fe73971c
RR
256 set_cpu_online(cpuid, true);
257 set_cpu_possible(cpuid, true);
1da177e4
LT
258}
259
2066aadd 260int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1da177e4 261{
2066aadd 262 extern int smp4d_boot_one_cpu(int, struct task_struct *);
8b3c848c
RB
263 int ret=0;
264
265 switch(sparc_cpu_model) {
8b3c848c 266 case sun4m:
f0a2bc7e 267 ret = smp4m_boot_one_cpu(cpu, tidle);
8b3c848c
RB
268 break;
269 case sun4d:
f0a2bc7e 270 ret = smp4d_boot_one_cpu(cpu, tidle);
8b3c848c 271 break;
8401707f 272 case sparc_leon:
f0a2bc7e 273 ret = leon_boot_one_cpu(cpu, tidle);
8401707f 274 break;
8b3c848c
RB
275 case sun4e:
276 printk("SUN4E\n");
277 BUG();
278 break;
279 case sun4u:
280 printk("SUN4U\n");
281 BUG();
282 break;
283 default:
284 printk("UNKNOWN!\n");
285 BUG();
286 break;
6cb79b3f 287 }
a54123e2
BB
288
289 if (!ret) {
fb1fece5 290 cpumask_set_cpu(cpu, &smp_commenced_mask);
a54123e2
BB
291 while (!cpu_online(cpu))
292 mb();
293 }
294 return ret;
1da177e4
LT
295}
296
c0b0ba84 297static void arch_cpu_pre_starting(void *arg)
f9fd3488
SR
298{
299 local_ops->cache_all();
300 local_ops->tlb_all();
301
302 switch(sparc_cpu_model) {
303 case sun4m:
304 sun4m_cpu_pre_starting(arg);
305 break;
306 case sun4d:
307 sun4d_cpu_pre_starting(arg);
308 break;
309 case sparc_leon:
310 leon_cpu_pre_starting(arg);
311 break;
312 default:
313 BUG();
314 }
315}
316
c0b0ba84 317static void arch_cpu_pre_online(void *arg)
f9fd3488
SR
318{
319 unsigned int cpuid = hard_smp_processor_id();
320
321 register_percpu_ce(cpuid);
322
323 calibrate_delay();
324 smp_store_cpu_info(cpuid);
325
326 local_ops->cache_all();
327 local_ops->tlb_all();
328
329 switch(sparc_cpu_model) {
330 case sun4m:
331 sun4m_cpu_pre_online(arg);
332 break;
333 case sun4d:
334 sun4d_cpu_pre_online(arg);
335 break;
336 case sparc_leon:
337 leon_cpu_pre_online(arg);
338 break;
339 default:
340 BUG();
341 }
342}
343
c0b0ba84 344static void sparc_start_secondary(void *arg)
f9fd3488
SR
345{
346 unsigned int cpu;
347
348 /*
349 * SMP booting is extremely fragile in some architectures. So run
350 * the cpu initialization code first before anything else.
351 */
352 arch_cpu_pre_starting(arg);
353
354 preempt_disable();
355 cpu = smp_processor_id();
356
357 /* Invoke the CPU_STARTING notifier callbacks */
358 notify_cpu_starting(cpu);
359
360 arch_cpu_pre_online(arg);
361
362 /* Set the CPU in the cpu_online_mask */
363 set_cpu_online(cpu, true);
364
365 /* Enable local interrupts now */
366 local_irq_enable();
367
368 wmb();
87fa05ae 369 cpu_startup_entry(CPUHP_ONLINE);
f9fd3488
SR
370
371 /* We should never reach here! */
372 BUG();
373}
374
2066aadd 375void smp_callin(void)
f9fd3488
SR
376{
377 sparc_start_secondary(NULL);
378}
379
1da177e4
LT
380void smp_bogo(struct seq_file *m)
381{
382 int i;
383
394e3902
AM
384 for_each_online_cpu(i) {
385 seq_printf(m,
386 "Cpu%dBogo\t: %lu.%02lu\n",
387 i,
388 cpu_data(i).udelay_val/(500000/HZ),
389 (cpu_data(i).udelay_val/(5000/HZ))%100);
1da177e4
LT
390 }
391}
392
393void smp_info(struct seq_file *m)
394{
395 int i;
396
397 seq_printf(m, "State:\n");
394e3902
AM
398 for_each_online_cpu(i)
399 seq_printf(m, "CPU%d\t\t: online\n", i);
1da177e4 400}