]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SMP support for ppc. | |
3 | * | |
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
5 | * deal of code from the sparc and intel versions. | |
6 | * | |
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
8 | * | |
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #undef DEBUG | |
19 | ||
20 | #include <linux/config.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/cache.h> | |
30 | #include <linux/err.h> | |
31 | #include <linux/sysdev.h> | |
32 | #include <linux/cpu.h> | |
33 | #include <linux/notifier.h> | |
34 | ||
35 | #include <asm/ptrace.h> | |
36 | #include <asm/atomic.h> | |
37 | #include <asm/irq.h> | |
38 | #include <asm/page.h> | |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/prom.h> | |
41 | #include <asm/smp.h> | |
1da177e4 LT |
42 | #include <asm/time.h> |
43 | #include <asm/machdep.h> | |
44 | #include <asm/cputable.h> | |
45 | #include <asm/system.h> | |
bbeb3f4c | 46 | #include <asm/mpic.h> |
5ad57078 PM |
47 | #ifdef CONFIG_PPC64 |
48 | #include <asm/paca.h> | |
49 | #endif | |
50 | ||
51 | int smp_hw_index[NR_CPUS]; | |
52 | struct thread_info *secondary_ti; | |
1da177e4 LT |
53 | |
54 | #ifdef DEBUG | |
55 | #define DBG(fmt...) udbg_printf(fmt) | |
56 | #else | |
57 | #define DBG(fmt...) | |
58 | #endif | |
59 | ||
60 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | |
61 | cpumask_t cpu_online_map = CPU_MASK_NONE; | |
62 | cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | |
63 | ||
64 | EXPORT_SYMBOL(cpu_online_map); | |
65 | EXPORT_SYMBOL(cpu_possible_map); | |
66 | ||
5ad57078 | 67 | /* SMP operations for this machine */ |
1da177e4 LT |
68 | struct smp_ops_t *smp_ops; |
69 | ||
70 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | |
71 | ||
1da177e4 LT |
72 | void smp_call_function_interrupt(void); |
73 | ||
74 | int smt_enabled_at_boot = 1; | |
75 | ||
cebf589c | 76 | #ifdef CONFIG_MPIC |
1da177e4 LT |
77 | int __init smp_mpic_probe(void) |
78 | { | |
79 | int nr_cpus; | |
80 | ||
81 | DBG("smp_mpic_probe()...\n"); | |
82 | ||
83 | nr_cpus = cpus_weight(cpu_possible_map); | |
84 | ||
85 | DBG("nr_cpus: %d\n", nr_cpus); | |
86 | ||
87 | if (nr_cpus > 1) | |
88 | mpic_request_ipis(); | |
89 | ||
90 | return nr_cpus; | |
91 | } | |
92 | ||
93 | void __devinit smp_mpic_setup_cpu(int cpu) | |
94 | { | |
95 | mpic_setup_this_cpu(); | |
96 | } | |
5ad57078 | 97 | #endif /* CONFIG_MPIC */ |
1da177e4 | 98 | |
5ad57078 | 99 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
100 | void __devinit smp_generic_kick_cpu(int nr) |
101 | { | |
102 | BUG_ON(nr < 0 || nr >= NR_CPUS); | |
103 | ||
104 | /* | |
105 | * The processor is currently spinning, waiting for the | |
106 | * cpu_start field to become non-zero After we set cpu_start, | |
107 | * the processor will continue on to secondary_start | |
108 | */ | |
109 | paca[nr].cpu_start = 1; | |
0d8d4d42 | 110 | smp_mb(); |
1da177e4 | 111 | } |
5ad57078 | 112 | #endif |
1da177e4 | 113 | |
1da177e4 LT |
114 | void smp_message_recv(int msg, struct pt_regs *regs) |
115 | { | |
116 | switch(msg) { | |
117 | case PPC_MSG_CALL_FUNCTION: | |
118 | smp_call_function_interrupt(); | |
119 | break; | |
5ad57078 | 120 | case PPC_MSG_RESCHEDULE: |
1da177e4 LT |
121 | /* XXX Do we have to do this? */ |
122 | set_need_resched(); | |
123 | break; | |
1da177e4 LT |
124 | #ifdef CONFIG_DEBUGGER |
125 | case PPC_MSG_DEBUGGER_BREAK: | |
126 | debugger_ipi(regs); | |
127 | break; | |
128 | #endif | |
129 | default: | |
130 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | |
131 | smp_processor_id(), msg); | |
132 | break; | |
133 | } | |
134 | } | |
135 | ||
136 | void smp_send_reschedule(int cpu) | |
137 | { | |
138 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | |
139 | } | |
140 | ||
141 | #ifdef CONFIG_DEBUGGER | |
142 | void smp_send_debugger_break(int cpu) | |
143 | { | |
144 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | |
145 | } | |
146 | #endif | |
147 | ||
148 | static void stop_this_cpu(void *dummy) | |
149 | { | |
150 | local_irq_disable(); | |
151 | while (1) | |
152 | ; | |
153 | } | |
154 | ||
155 | void smp_send_stop(void) | |
156 | { | |
157 | smp_call_function(stop_this_cpu, NULL, 1, 0); | |
158 | } | |
159 | ||
160 | /* | |
161 | * Structure and data for smp_call_function(). This is designed to minimise | |
162 | * static memory requirements. It also looks cleaner. | |
163 | * Stolen from the i386 version. | |
164 | */ | |
165 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | |
166 | ||
167 | static struct call_data_struct { | |
168 | void (*func) (void *info); | |
169 | void *info; | |
170 | atomic_t started; | |
171 | atomic_t finished; | |
172 | int wait; | |
173 | } *call_data; | |
174 | ||
5ad57078 PM |
175 | /* delay of at least 8 seconds */ |
176 | #define SMP_CALL_TIMEOUT 8 | |
1da177e4 LT |
177 | |
178 | /* | |
179 | * This function sends a 'generic call function' IPI to all other CPUs | |
180 | * in the system. | |
181 | * | |
182 | * [SUMMARY] Run a function on all other CPUs. | |
183 | * <func> The function to run. This must be fast and non-blocking. | |
184 | * <info> An arbitrary pointer to pass to the function. | |
185 | * <nonatomic> currently unused. | |
186 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | |
187 | * [RETURNS] 0 on success, else a negative status code. Does not return until | |
188 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
189 | * | |
190 | * You must not call this function with disabled interrupts or from a | |
191 | * hardware interrupt handler or from a bottom half handler. | |
192 | */ | |
193 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
194 | int wait) | |
195 | { | |
196 | struct call_data_struct data; | |
197 | int ret = -1, cpus; | |
5ad57078 | 198 | u64 timeout; |
1da177e4 LT |
199 | |
200 | /* Can deadlock when called with interrupts disabled */ | |
201 | WARN_ON(irqs_disabled()); | |
202 | ||
203 | data.func = func; | |
204 | data.info = info; | |
205 | atomic_set(&data.started, 0); | |
206 | data.wait = wait; | |
207 | if (wait) | |
208 | atomic_set(&data.finished, 0); | |
209 | ||
210 | spin_lock(&call_lock); | |
211 | /* Must grab online cpu count with preempt disabled, otherwise | |
212 | * it can change. */ | |
213 | cpus = num_online_cpus() - 1; | |
214 | if (!cpus) { | |
215 | ret = 0; | |
216 | goto out; | |
217 | } | |
218 | ||
219 | call_data = &data; | |
0d8d4d42 | 220 | smp_wmb(); |
1da177e4 LT |
221 | /* Send a message to all other CPUs and wait for them to respond */ |
222 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); | |
223 | ||
5ad57078 PM |
224 | timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; |
225 | ||
1da177e4 | 226 | /* Wait for response */ |
1da177e4 LT |
227 | while (atomic_read(&data.started) != cpus) { |
228 | HMT_low(); | |
5ad57078 | 229 | if (get_tb() >= timeout) { |
1da177e4 LT |
230 | printk("smp_call_function on cpu %d: other cpus not " |
231 | "responding (%d)\n", smp_processor_id(), | |
232 | atomic_read(&data.started)); | |
233 | debugger(NULL); | |
234 | goto out; | |
235 | } | |
236 | } | |
237 | ||
238 | if (wait) { | |
1da177e4 LT |
239 | while (atomic_read(&data.finished) != cpus) { |
240 | HMT_low(); | |
5ad57078 | 241 | if (get_tb() >= timeout) { |
1da177e4 LT |
242 | printk("smp_call_function on cpu %d: other " |
243 | "cpus not finishing (%d/%d)\n", | |
244 | smp_processor_id(), | |
245 | atomic_read(&data.finished), | |
246 | atomic_read(&data.started)); | |
247 | debugger(NULL); | |
248 | goto out; | |
249 | } | |
250 | } | |
251 | } | |
252 | ||
253 | ret = 0; | |
254 | ||
5ad57078 | 255 | out: |
1da177e4 LT |
256 | call_data = NULL; |
257 | HMT_medium(); | |
258 | spin_unlock(&call_lock); | |
259 | return ret; | |
260 | } | |
261 | ||
262 | EXPORT_SYMBOL(smp_call_function); | |
263 | ||
264 | void smp_call_function_interrupt(void) | |
265 | { | |
266 | void (*func) (void *info); | |
267 | void *info; | |
268 | int wait; | |
269 | ||
270 | /* call_data will be NULL if the sender timed out while | |
271 | * waiting on us to receive the call. | |
272 | */ | |
273 | if (!call_data) | |
274 | return; | |
275 | ||
276 | func = call_data->func; | |
277 | info = call_data->info; | |
278 | wait = call_data->wait; | |
279 | ||
280 | if (!wait) | |
281 | smp_mb__before_atomic_inc(); | |
282 | ||
283 | /* | |
284 | * Notify initiating CPU that I've grabbed the data and am | |
285 | * about to execute the function | |
286 | */ | |
287 | atomic_inc(&call_data->started); | |
288 | /* | |
289 | * At this point the info structure may be out of scope unless wait==1 | |
290 | */ | |
291 | (*func)(info); | |
292 | if (wait) { | |
293 | smp_mb__before_atomic_inc(); | |
294 | atomic_inc(&call_data->finished); | |
295 | } | |
296 | } | |
297 | ||
1da177e4 LT |
298 | extern struct gettimeofday_struct do_gtod; |
299 | ||
300 | struct thread_info *current_set[NR_CPUS]; | |
301 | ||
302 | DECLARE_PER_CPU(unsigned int, pvr); | |
303 | ||
304 | static void __devinit smp_store_cpu_info(int id) | |
305 | { | |
306 | per_cpu(pvr, id) = mfspr(SPRN_PVR); | |
307 | } | |
308 | ||
309 | static void __init smp_create_idle(unsigned int cpu) | |
310 | { | |
311 | struct task_struct *p; | |
312 | ||
313 | /* create a process for the processor */ | |
314 | p = fork_idle(cpu); | |
315 | if (IS_ERR(p)) | |
316 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | |
5ad57078 | 317 | #ifdef CONFIG_PPC64 |
1da177e4 | 318 | paca[cpu].__current = p; |
5ad57078 | 319 | #endif |
1da177e4 | 320 | current_set[cpu] = p->thread_info; |
5ad57078 | 321 | p->thread_info->cpu = cpu; |
1da177e4 LT |
322 | } |
323 | ||
324 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
325 | { | |
326 | unsigned int cpu; | |
327 | ||
328 | DBG("smp_prepare_cpus\n"); | |
329 | ||
330 | /* | |
331 | * setup_cpu may need to be called on the boot cpu. We havent | |
332 | * spun any cpus up but lets be paranoid. | |
333 | */ | |
334 | BUG_ON(boot_cpuid != smp_processor_id()); | |
335 | ||
336 | /* Fixup boot cpu */ | |
337 | smp_store_cpu_info(boot_cpuid); | |
338 | cpu_callin_map[boot_cpuid] = 1; | |
339 | ||
1da177e4 LT |
340 | max_cpus = smp_ops->probe(); |
341 | ||
342 | smp_space_timers(max_cpus); | |
343 | ||
344 | for_each_cpu(cpu) | |
345 | if (cpu != boot_cpuid) | |
346 | smp_create_idle(cpu); | |
347 | } | |
348 | ||
349 | void __devinit smp_prepare_boot_cpu(void) | |
350 | { | |
351 | BUG_ON(smp_processor_id() != boot_cpuid); | |
352 | ||
353 | cpu_set(boot_cpuid, cpu_online_map); | |
5ad57078 | 354 | #ifdef CONFIG_PPC64 |
1da177e4 | 355 | paca[boot_cpuid].__current = current; |
5ad57078 | 356 | #endif |
1da177e4 LT |
357 | current_set[boot_cpuid] = current->thread_info; |
358 | } | |
359 | ||
360 | #ifdef CONFIG_HOTPLUG_CPU | |
361 | /* State of each CPU during hotplug phases */ | |
362 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
363 | ||
364 | int generic_cpu_disable(void) | |
365 | { | |
366 | unsigned int cpu = smp_processor_id(); | |
367 | ||
368 | if (cpu == boot_cpuid) | |
369 | return -EBUSY; | |
370 | ||
371 | systemcfg->processorCount--; | |
372 | cpu_clear(cpu, cpu_online_map); | |
373 | fixup_irqs(cpu_online_map); | |
374 | return 0; | |
375 | } | |
376 | ||
377 | int generic_cpu_enable(unsigned int cpu) | |
378 | { | |
379 | /* Do the normal bootup if we haven't | |
380 | * already bootstrapped. */ | |
381 | if (system_state != SYSTEM_RUNNING) | |
382 | return -ENOSYS; | |
383 | ||
384 | /* get the target out of it's holding state */ | |
385 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
0d8d4d42 | 386 | smp_wmb(); |
1da177e4 LT |
387 | |
388 | while (!cpu_online(cpu)) | |
389 | cpu_relax(); | |
390 | ||
391 | fixup_irqs(cpu_online_map); | |
392 | /* counter the irq disable in fixup_irqs */ | |
393 | local_irq_enable(); | |
394 | return 0; | |
395 | } | |
396 | ||
397 | void generic_cpu_die(unsigned int cpu) | |
398 | { | |
399 | int i; | |
400 | ||
401 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 402 | smp_rmb(); |
1da177e4 LT |
403 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
404 | return; | |
405 | msleep(100); | |
406 | } | |
407 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
408 | } | |
409 | ||
410 | void generic_mach_cpu_die(void) | |
411 | { | |
412 | unsigned int cpu; | |
413 | ||
414 | local_irq_disable(); | |
415 | cpu = smp_processor_id(); | |
416 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | |
417 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
0d8d4d42 | 418 | smp_wmb(); |
1da177e4 LT |
419 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
420 | cpu_relax(); | |
421 | ||
422 | flush_tlb_pending(); | |
423 | cpu_set(cpu, cpu_online_map); | |
424 | local_irq_enable(); | |
425 | } | |
426 | #endif | |
427 | ||
428 | static int __devinit cpu_enable(unsigned int cpu) | |
429 | { | |
430 | if (smp_ops->cpu_enable) | |
431 | return smp_ops->cpu_enable(cpu); | |
432 | ||
433 | return -ENOSYS; | |
434 | } | |
435 | ||
436 | int __devinit __cpu_up(unsigned int cpu) | |
437 | { | |
438 | int c; | |
439 | ||
5ad57078 | 440 | secondary_ti = current_set[cpu]; |
1da177e4 LT |
441 | if (!cpu_enable(cpu)) |
442 | return 0; | |
443 | ||
444 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) | |
445 | return -EINVAL; | |
446 | ||
5ad57078 | 447 | #ifdef CONFIG_PPC64 |
c4eb2a93 | 448 | paca[cpu].default_decr = tb_ticks_per_jiffy; |
5ad57078 | 449 | #endif |
1da177e4 | 450 | |
1da177e4 LT |
451 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
452 | * hotplug | |
453 | */ | |
454 | cpu_callin_map[cpu] = 0; | |
455 | ||
456 | /* The information for processor bringup must | |
457 | * be written out to main store before we release | |
458 | * the processor. | |
459 | */ | |
0d8d4d42 | 460 | smp_mb(); |
1da177e4 LT |
461 | |
462 | /* wake up cpus */ | |
463 | DBG("smp: kicking cpu %d\n", cpu); | |
464 | smp_ops->kick_cpu(cpu); | |
465 | ||
466 | /* | |
467 | * wait to see if the cpu made a callin (is actually up). | |
468 | * use this value that I found through experimentation. | |
469 | * -- Cort | |
470 | */ | |
471 | if (system_state < SYSTEM_RUNNING) | |
472 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) | |
473 | udelay(100); | |
474 | #ifdef CONFIG_HOTPLUG_CPU | |
475 | else | |
476 | /* | |
477 | * CPUs can take much longer to come up in the | |
478 | * hotplug case. Wait five seconds. | |
479 | */ | |
480 | for (c = 25; c && !cpu_callin_map[cpu]; c--) { | |
481 | msleep(200); | |
482 | } | |
483 | #endif | |
484 | ||
485 | if (!cpu_callin_map[cpu]) { | |
486 | printk("Processor %u is stuck.\n", cpu); | |
487 | return -ENOENT; | |
488 | } | |
489 | ||
490 | printk("Processor %u found.\n", cpu); | |
491 | ||
492 | if (smp_ops->give_timebase) | |
493 | smp_ops->give_timebase(); | |
494 | ||
495 | /* Wait until cpu puts itself in the online map */ | |
496 | while (!cpu_online(cpu)) | |
497 | cpu_relax(); | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
502 | ||
503 | /* Activate a secondary processor. */ | |
504 | int __devinit start_secondary(void *unused) | |
505 | { | |
506 | unsigned int cpu = smp_processor_id(); | |
507 | ||
508 | atomic_inc(&init_mm.mm_count); | |
509 | current->active_mm = &init_mm; | |
510 | ||
511 | smp_store_cpu_info(cpu); | |
5ad57078 | 512 | set_dec(tb_ticks_per_jiffy); |
1da177e4 LT |
513 | cpu_callin_map[cpu] = 1; |
514 | ||
515 | smp_ops->setup_cpu(cpu); | |
516 | if (smp_ops->take_timebase) | |
517 | smp_ops->take_timebase(); | |
518 | ||
519 | spin_lock(&call_lock); | |
520 | cpu_set(cpu, cpu_online_map); | |
521 | spin_unlock(&call_lock); | |
522 | ||
523 | local_irq_enable(); | |
524 | ||
525 | cpu_idle(); | |
526 | return 0; | |
527 | } | |
528 | ||
529 | int setup_profiling_timer(unsigned int multiplier) | |
530 | { | |
531 | return 0; | |
532 | } | |
533 | ||
534 | void __init smp_cpus_done(unsigned int max_cpus) | |
535 | { | |
536 | cpumask_t old_mask; | |
537 | ||
538 | /* We want the setup_cpu() here to be called from CPU 0, but our | |
539 | * init thread may have been "borrowed" by another CPU in the meantime | |
540 | * se we pin us down to CPU 0 for a short while | |
541 | */ | |
542 | old_mask = current->cpus_allowed; | |
543 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | |
544 | ||
545 | smp_ops->setup_cpu(boot_cpuid); | |
546 | ||
547 | set_cpus_allowed(current, old_mask); | |
548 | } | |
549 | ||
550 | #ifdef CONFIG_HOTPLUG_CPU | |
551 | int __cpu_disable(void) | |
552 | { | |
553 | if (smp_ops->cpu_disable) | |
554 | return smp_ops->cpu_disable(); | |
555 | ||
556 | return -ENOSYS; | |
557 | } | |
558 | ||
559 | void __cpu_die(unsigned int cpu) | |
560 | { | |
561 | if (smp_ops->cpu_die) | |
562 | smp_ops->cpu_die(cpu); | |
563 | } | |
564 | #endif |