4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/sysdev.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
35 #include <asm/ptrace.h>
36 #include <asm/atomic.h>
39 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/cputable.h>
45 #include <asm/system.h>
47 #include <asm/vdso_datapage.h>
54 #define DBG(fmt...) udbg_printf(fmt)
59 int smp_hw_index
[NR_CPUS
];
60 struct thread_info
*secondary_ti
;
62 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
63 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
64 cpumask_t cpu_sibling_map
[NR_CPUS
] = { [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
66 EXPORT_SYMBOL(cpu_online_map
);
67 EXPORT_SYMBOL(cpu_possible_map
);
69 /* SMP operations for this machine */
70 struct smp_ops_t
*smp_ops
;
72 static volatile unsigned int cpu_callin_map
[NR_CPUS
];
74 void smp_call_function_interrupt(void);
76 int smt_enabled_at_boot
= 1;
79 int __init
smp_mpic_probe(void)
83 DBG("smp_mpic_probe()...\n");
85 nr_cpus
= cpus_weight(cpu_possible_map
);
87 DBG("nr_cpus: %d\n", nr_cpus
);
95 void __devinit
smp_mpic_setup_cpu(int cpu
)
97 mpic_setup_this_cpu();
99 #endif /* CONFIG_MPIC */
102 void __devinit
smp_generic_kick_cpu(int nr
)
104 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
107 * The processor is currently spinning, waiting for the
108 * cpu_start field to become non-zero After we set cpu_start,
109 * the processor will continue on to secondary_start
111 paca
[nr
].cpu_start
= 1;
116 void smp_message_recv(int msg
, struct pt_regs
*regs
)
119 case PPC_MSG_CALL_FUNCTION
:
120 smp_call_function_interrupt();
122 case PPC_MSG_RESCHEDULE
:
123 /* XXX Do we have to do this? */
126 #ifdef CONFIG_DEBUGGER
127 case PPC_MSG_DEBUGGER_BREAK
:
132 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
133 smp_processor_id(), msg
);
138 void smp_send_reschedule(int cpu
)
140 smp_ops
->message_pass(cpu
, PPC_MSG_RESCHEDULE
);
143 #ifdef CONFIG_DEBUGGER
144 void smp_send_debugger_break(int cpu
)
146 smp_ops
->message_pass(cpu
, PPC_MSG_DEBUGGER_BREAK
);
150 static void stop_this_cpu(void *dummy
)
157 void smp_send_stop(void)
159 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
163 * Structure and data for smp_call_function(). This is designed to minimise
164 * static memory requirements. It also looks cleaner.
165 * Stolen from the i386 version.
167 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(call_lock
);
169 static struct call_data_struct
{
170 void (*func
) (void *info
);
177 /* delay of at least 8 seconds */
178 #define SMP_CALL_TIMEOUT 8
181 * This function sends a 'generic call function' IPI to all other CPUs
184 * [SUMMARY] Run a function on all other CPUs.
185 * <func> The function to run. This must be fast and non-blocking.
186 * <info> An arbitrary pointer to pass to the function.
187 * <nonatomic> currently unused.
188 * <wait> If true, wait (atomically) until function has completed on other CPUs.
189 * [RETURNS] 0 on success, else a negative status code. Does not return until
190 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
192 * You must not call this function with disabled interrupts or from a
193 * hardware interrupt handler or from a bottom half handler.
195 int smp_call_function (void (*func
) (void *info
), void *info
, int nonatomic
,
198 struct call_data_struct data
;
202 /* Can deadlock when called with interrupts disabled */
203 WARN_ON(irqs_disabled());
207 atomic_set(&data
.started
, 0);
210 atomic_set(&data
.finished
, 0);
212 spin_lock(&call_lock
);
213 /* Must grab online cpu count with preempt disabled, otherwise
215 cpus
= num_online_cpus() - 1;
223 /* Send a message to all other CPUs and wait for them to respond */
224 smp_ops
->message_pass(MSG_ALL_BUT_SELF
, PPC_MSG_CALL_FUNCTION
);
226 timeout
= get_tb() + (u64
) SMP_CALL_TIMEOUT
* tb_ticks_per_sec
;
228 /* Wait for response */
229 while (atomic_read(&data
.started
) != cpus
) {
231 if (get_tb() >= timeout
) {
232 printk("smp_call_function on cpu %d: other cpus not "
233 "responding (%d)\n", smp_processor_id(),
234 atomic_read(&data
.started
));
241 while (atomic_read(&data
.finished
) != cpus
) {
243 if (get_tb() >= timeout
) {
244 printk("smp_call_function on cpu %d: other "
245 "cpus not finishing (%d/%d)\n",
247 atomic_read(&data
.finished
),
248 atomic_read(&data
.started
));
260 spin_unlock(&call_lock
);
264 EXPORT_SYMBOL(smp_call_function
);
266 void smp_call_function_interrupt(void)
268 void (*func
) (void *info
);
272 /* call_data will be NULL if the sender timed out while
273 * waiting on us to receive the call.
278 func
= call_data
->func
;
279 info
= call_data
->info
;
280 wait
= call_data
->wait
;
283 smp_mb__before_atomic_inc();
286 * Notify initiating CPU that I've grabbed the data and am
287 * about to execute the function
289 atomic_inc(&call_data
->started
);
291 * At this point the info structure may be out of scope unless wait==1
295 smp_mb__before_atomic_inc();
296 atomic_inc(&call_data
->finished
);
300 extern struct gettimeofday_struct do_gtod
;
302 struct thread_info
*current_set
[NR_CPUS
];
304 DECLARE_PER_CPU(unsigned int, pvr
);
306 static void __devinit
smp_store_cpu_info(int id
)
308 per_cpu(pvr
, id
) = mfspr(SPRN_PVR
);
311 static void __init
smp_create_idle(unsigned int cpu
)
313 struct task_struct
*p
;
315 /* create a process for the processor */
318 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
320 paca
[cpu
].__current
= p
;
322 current_set
[cpu
] = p
->thread_info
;
323 p
->thread_info
->cpu
= cpu
;
326 void __init
smp_prepare_cpus(unsigned int max_cpus
)
330 DBG("smp_prepare_cpus\n");
333 * setup_cpu may need to be called on the boot cpu. We havent
334 * spun any cpus up but lets be paranoid.
336 BUG_ON(boot_cpuid
!= smp_processor_id());
339 smp_store_cpu_info(boot_cpuid
);
340 cpu_callin_map
[boot_cpuid
] = 1;
342 max_cpus
= smp_ops
->probe();
344 smp_space_timers(max_cpus
);
347 if (cpu
!= boot_cpuid
)
348 smp_create_idle(cpu
);
351 void __devinit
smp_prepare_boot_cpu(void)
353 BUG_ON(smp_processor_id() != boot_cpuid
);
355 cpu_set(boot_cpuid
, cpu_online_map
);
357 paca
[boot_cpuid
].__current
= current
;
359 current_set
[boot_cpuid
] = current
->thread_info
;
362 #ifdef CONFIG_HOTPLUG_CPU
363 /* State of each CPU during hotplug phases */
364 DEFINE_PER_CPU(int, cpu_state
) = { 0 };
366 int generic_cpu_disable(void)
368 unsigned int cpu
= smp_processor_id();
370 if (cpu
== boot_cpuid
)
373 cpu_clear(cpu
, cpu_online_map
);
375 vdso_data
->processorCount
--;
376 fixup_irqs(cpu_online_map
);
381 int generic_cpu_enable(unsigned int cpu
)
383 /* Do the normal bootup if we haven't
384 * already bootstrapped. */
385 if (system_state
!= SYSTEM_RUNNING
)
388 /* get the target out of it's holding state */
389 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
392 while (!cpu_online(cpu
))
396 fixup_irqs(cpu_online_map
);
397 /* counter the irq disable in fixup_irqs */
403 void generic_cpu_die(unsigned int cpu
)
407 for (i
= 0; i
< 100; i
++) {
409 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
)
413 printk(KERN_ERR
"CPU%d didn't die...\n", cpu
);
416 void generic_mach_cpu_die(void)
421 cpu
= smp_processor_id();
422 printk(KERN_DEBUG
"CPU%d offline\n", cpu
);
423 __get_cpu_var(cpu_state
) = CPU_DEAD
;
425 while (__get_cpu_var(cpu_state
) != CPU_UP_PREPARE
)
431 cpu_set(cpu
, cpu_online_map
);
436 static int __devinit
cpu_enable(unsigned int cpu
)
438 if (smp_ops
->cpu_enable
)
439 return smp_ops
->cpu_enable(cpu
);
444 int __devinit
__cpu_up(unsigned int cpu
)
448 secondary_ti
= current_set
[cpu
];
449 if (!cpu_enable(cpu
))
452 if (smp_ops
->cpu_bootable
&& !smp_ops
->cpu_bootable(cpu
))
456 paca
[cpu
].default_decr
= tb_ticks_per_jiffy
;
459 /* Make sure callin-map entry is 0 (can be leftover a CPU
462 cpu_callin_map
[cpu
] = 0;
464 /* The information for processor bringup must
465 * be written out to main store before we release
471 DBG("smp: kicking cpu %d\n", cpu
);
472 smp_ops
->kick_cpu(cpu
);
475 * wait to see if the cpu made a callin (is actually up).
476 * use this value that I found through experimentation.
479 if (system_state
< SYSTEM_RUNNING
)
480 for (c
= 5000; c
&& !cpu_callin_map
[cpu
]; c
--)
482 #ifdef CONFIG_HOTPLUG_CPU
485 * CPUs can take much longer to come up in the
486 * hotplug case. Wait five seconds.
488 for (c
= 25; c
&& !cpu_callin_map
[cpu
]; c
--) {
493 if (!cpu_callin_map
[cpu
]) {
494 printk("Processor %u is stuck.\n", cpu
);
498 printk("Processor %u found.\n", cpu
);
500 if (smp_ops
->give_timebase
)
501 smp_ops
->give_timebase();
503 /* Wait until cpu puts itself in the online map */
504 while (!cpu_online(cpu
))
511 /* Activate a secondary processor. */
512 int __devinit
start_secondary(void *unused
)
514 unsigned int cpu
= smp_processor_id();
516 atomic_inc(&init_mm
.mm_count
);
517 current
->active_mm
= &init_mm
;
519 smp_store_cpu_info(cpu
);
520 set_dec(tb_ticks_per_jiffy
);
522 cpu_callin_map
[cpu
] = 1;
524 smp_ops
->setup_cpu(cpu
);
525 if (smp_ops
->take_timebase
)
526 smp_ops
->take_timebase();
528 spin_lock(&call_lock
);
529 cpu_set(cpu
, cpu_online_map
);
530 spin_unlock(&call_lock
);
538 int setup_profiling_timer(unsigned int multiplier
)
543 void __init
smp_cpus_done(unsigned int max_cpus
)
547 /* We want the setup_cpu() here to be called from CPU 0, but our
548 * init thread may have been "borrowed" by another CPU in the meantime
549 * se we pin us down to CPU 0 for a short while
551 old_mask
= current
->cpus_allowed
;
552 set_cpus_allowed(current
, cpumask_of_cpu(boot_cpuid
));
554 smp_ops
->setup_cpu(boot_cpuid
);
556 set_cpus_allowed(current
, old_mask
);
559 #ifdef CONFIG_HOTPLUG_CPU
560 int __cpu_disable(void)
562 if (smp_ops
->cpu_disable
)
563 return smp_ops
->cpu_disable();
568 void __cpu_die(unsigned int cpu
)
570 if (smp_ops
->cpu_die
)
571 smp_ops
->cpu_die(cpu
);