2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
18 /* Serializes the updates to cpu_online_map, cpu_present_map */
19 static DEFINE_MUTEX(cpu_add_remove_lock
);
21 static __cpuinitdata
RAW_NOTIFIER_HEAD(cpu_chain
);
23 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
24 * Should always be manipulated under cpu_add_remove_lock
26 static int cpu_hotplug_disabled
;
29 struct task_struct
*active_writer
;
30 struct mutex lock
; /* Synchronizes accesses to refcount, */
32 * Also blocks the new readers during
33 * an ongoing cpu hotplug operation.
36 wait_queue_head_t writer_queue
;
39 #define writer_exists() (cpu_hotplug.active_writer != NULL)
41 void __init
cpu_hotplug_init(void)
43 cpu_hotplug
.active_writer
= NULL
;
44 mutex_init(&cpu_hotplug
.lock
);
45 cpu_hotplug
.refcount
= 0;
46 init_waitqueue_head(&cpu_hotplug
.writer_queue
);
49 #ifdef CONFIG_HOTPLUG_CPU
51 void lock_cpu_hotplug(void)
54 if (cpu_hotplug
.active_writer
== current
)
56 mutex_lock(&cpu_hotplug
.lock
);
57 cpu_hotplug
.refcount
++;
58 mutex_unlock(&cpu_hotplug
.lock
);
61 EXPORT_SYMBOL_GPL(lock_cpu_hotplug
);
63 void unlock_cpu_hotplug(void)
65 if (cpu_hotplug
.active_writer
== current
)
67 mutex_lock(&cpu_hotplug
.lock
);
68 cpu_hotplug
.refcount
--;
70 if (unlikely(writer_exists()) && !cpu_hotplug
.refcount
)
71 wake_up(&cpu_hotplug
.writer_queue
);
73 mutex_unlock(&cpu_hotplug
.lock
);
76 EXPORT_SYMBOL_GPL(unlock_cpu_hotplug
);
78 #endif /* CONFIG_HOTPLUG_CPU */
81 * The following two API's must be used when attempting
82 * to serialize the updates to cpu_online_map, cpu_present_map.
84 void cpu_maps_update_begin(void)
86 mutex_lock(&cpu_add_remove_lock
);
89 void cpu_maps_update_done(void)
91 mutex_unlock(&cpu_add_remove_lock
);
95 * This ensures that the hotplug operation can begin only when the
96 * refcount goes to zero.
98 * Note that during a cpu-hotplug operation, the new readers, if any,
99 * will be blocked by the cpu_hotplug.lock
101 * Since cpu_maps_update_begin is always called after invoking
102 * cpu_maps_update_begin, we can be sure that only one writer is active.
104 * Note that theoretically, there is a possibility of a livelock:
105 * - Refcount goes to zero, last reader wakes up the sleeping
107 * - Last reader unlocks the cpu_hotplug.lock.
108 * - A new reader arrives at this moment, bumps up the refcount.
109 * - The writer acquires the cpu_hotplug.lock finds the refcount
110 * non zero and goes to sleep again.
112 * However, this is very difficult to achieve in practice since
113 * lock_cpu_hotplug() not an api which is called all that often.
116 static void cpu_hotplug_begin(void)
118 DECLARE_WAITQUEUE(wait
, current
);
120 mutex_lock(&cpu_hotplug
.lock
);
122 cpu_hotplug
.active_writer
= current
;
123 add_wait_queue_exclusive(&cpu_hotplug
.writer_queue
, &wait
);
124 while (cpu_hotplug
.refcount
) {
125 set_current_state(TASK_UNINTERRUPTIBLE
);
126 mutex_unlock(&cpu_hotplug
.lock
);
128 mutex_lock(&cpu_hotplug
.lock
);
130 remove_wait_queue_locked(&cpu_hotplug
.writer_queue
, &wait
);
133 static void cpu_hotplug_done(void)
135 cpu_hotplug
.active_writer
= NULL
;
136 mutex_unlock(&cpu_hotplug
.lock
);
138 /* Need to know about CPUs going up/down? */
139 int __cpuinit
register_cpu_notifier(struct notifier_block
*nb
)
142 cpu_maps_update_begin();
143 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
144 cpu_maps_update_done();
148 #ifdef CONFIG_HOTPLUG_CPU
150 EXPORT_SYMBOL(register_cpu_notifier
);
152 void unregister_cpu_notifier(struct notifier_block
*nb
)
154 cpu_maps_update_begin();
155 raw_notifier_chain_unregister(&cpu_chain
, nb
);
156 cpu_maps_update_done();
158 EXPORT_SYMBOL(unregister_cpu_notifier
);
160 static inline void check_for_tasks(int cpu
)
162 struct task_struct
*p
;
164 write_lock_irq(&tasklist_lock
);
165 for_each_process(p
) {
166 if (task_cpu(p
) == cpu
&&
167 (!cputime_eq(p
->utime
, cputime_zero
) ||
168 !cputime_eq(p
->stime
, cputime_zero
)))
169 printk(KERN_WARNING
"Task %s (pid = %d) is on cpu %d\
170 (state = %ld, flags = %x) \n",
171 p
->comm
, task_pid_nr(p
), cpu
,
174 write_unlock_irq(&tasklist_lock
);
177 struct take_cpu_down_param
{
182 /* Take this CPU down. */
183 static int take_cpu_down(void *_param
)
185 struct take_cpu_down_param
*param
= _param
;
188 raw_notifier_call_chain(&cpu_chain
, CPU_DYING
| param
->mod
,
190 /* Ensure this CPU doesn't handle any more interrupts. */
191 err
= __cpu_disable();
195 /* Force idle task to run as soon as we yield: it should
196 immediately notice cpu is offline and die quickly. */
201 /* Requires cpu_add_remove_lock to be held */
202 static int _cpu_down(unsigned int cpu
, int tasks_frozen
)
204 int err
, nr_calls
= 0;
205 struct task_struct
*p
;
206 cpumask_t old_allowed
, tmp
;
207 void *hcpu
= (void *)(long)cpu
;
208 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
209 struct take_cpu_down_param tcd_param
= {
214 if (num_online_cpus() == 1)
217 if (!cpu_online(cpu
))
221 raw_notifier_call_chain(&cpu_chain
, CPU_LOCK_ACQUIRE
, hcpu
);
222 err
= __raw_notifier_call_chain(&cpu_chain
, CPU_DOWN_PREPARE
| mod
,
223 hcpu
, -1, &nr_calls
);
224 if (err
== NOTIFY_BAD
) {
226 __raw_notifier_call_chain(&cpu_chain
, CPU_DOWN_FAILED
| mod
,
227 hcpu
, nr_calls
, NULL
);
228 printk("%s: attempt to take down CPU %u failed\n",
234 /* Ensure that we are not runnable on dying cpu */
235 old_allowed
= current
->cpus_allowed
;
238 set_cpus_allowed(current
, tmp
);
240 p
= __stop_machine_run(take_cpu_down
, &tcd_param
, cpu
);
242 if (IS_ERR(p
) || cpu_online(cpu
)) {
243 /* CPU didn't die: tell everyone. Can't complain. */
244 if (raw_notifier_call_chain(&cpu_chain
, CPU_DOWN_FAILED
| mod
,
255 /* Wait for it to sleep (leaving idle task). */
256 while (!idle_cpu(cpu
))
259 /* This actually kills the CPU. */
262 /* CPU is completely dead: tell everyone. Too late to complain. */
263 if (raw_notifier_call_chain(&cpu_chain
, CPU_DEAD
| mod
,
267 check_for_tasks(cpu
);
270 err
= kthread_stop(p
);
272 set_cpus_allowed(current
, old_allowed
);
274 raw_notifier_call_chain(&cpu_chain
, CPU_LOCK_RELEASE
, hcpu
);
279 int cpu_down(unsigned int cpu
)
283 cpu_maps_update_begin();
284 if (cpu_hotplug_disabled
)
287 err
= _cpu_down(cpu
, 0);
289 cpu_maps_update_done();
292 #endif /*CONFIG_HOTPLUG_CPU*/
294 /* Requires cpu_add_remove_lock to be held */
295 static int __cpuinit
_cpu_up(unsigned int cpu
, int tasks_frozen
)
297 int ret
, nr_calls
= 0;
298 void *hcpu
= (void *)(long)cpu
;
299 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
301 if (cpu_online(cpu
) || !cpu_present(cpu
))
305 raw_notifier_call_chain(&cpu_chain
, CPU_LOCK_ACQUIRE
, hcpu
);
306 ret
= __raw_notifier_call_chain(&cpu_chain
, CPU_UP_PREPARE
| mod
, hcpu
,
308 if (ret
== NOTIFY_BAD
) {
310 printk("%s: attempt to bring up CPU %u failed\n",
316 /* Arch-specific enabling code. */
320 BUG_ON(!cpu_online(cpu
));
322 /* Now call notifier in preparation. */
323 raw_notifier_call_chain(&cpu_chain
, CPU_ONLINE
| mod
, hcpu
);
327 __raw_notifier_call_chain(&cpu_chain
,
328 CPU_UP_CANCELED
| mod
, hcpu
, nr_calls
, NULL
);
329 raw_notifier_call_chain(&cpu_chain
, CPU_LOCK_RELEASE
, hcpu
);
335 int __cpuinit
cpu_up(unsigned int cpu
)
338 if (!cpu_isset(cpu
, cpu_possible_map
)) {
339 printk(KERN_ERR
"can't online cpu %d because it is not "
340 "configured as may-hotadd at boot time\n", cpu
);
341 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
342 printk(KERN_ERR
"please check additional_cpus= boot "
348 cpu_maps_update_begin();
349 if (cpu_hotplug_disabled
)
352 err
= _cpu_up(cpu
, 0);
354 cpu_maps_update_done();
358 #ifdef CONFIG_PM_SLEEP_SMP
359 static cpumask_t frozen_cpus
;
361 int disable_nonboot_cpus(void)
363 int cpu
, first_cpu
, error
= 0;
365 cpu_maps_update_begin();
366 first_cpu
= first_cpu(cpu_online_map
);
367 /* We take down all of the non-boot CPUs in one shot to avoid races
368 * with the userspace trying to use the CPU hotplug at the same time
370 cpus_clear(frozen_cpus
);
371 printk("Disabling non-boot CPUs ...\n");
372 for_each_online_cpu(cpu
) {
373 if (cpu
== first_cpu
)
375 error
= _cpu_down(cpu
, 1);
377 cpu_set(cpu
, frozen_cpus
);
378 printk("CPU%d is down\n", cpu
);
380 printk(KERN_ERR
"Error taking CPU%d down: %d\n",
386 BUG_ON(num_online_cpus() > 1);
387 /* Make sure the CPUs won't be enabled by someone else */
388 cpu_hotplug_disabled
= 1;
390 printk(KERN_ERR
"Non-boot CPUs are not disabled\n");
392 cpu_maps_update_done();
396 void enable_nonboot_cpus(void)
400 /* Allow everyone to use the CPU hotplug again */
401 cpu_maps_update_begin();
402 cpu_hotplug_disabled
= 0;
403 if (cpus_empty(frozen_cpus
))
406 printk("Enabling non-boot CPUs ...\n");
407 for_each_cpu_mask(cpu
, frozen_cpus
) {
408 error
= _cpu_up(cpu
, 1);
410 printk("CPU%d is up\n", cpu
);
413 printk(KERN_WARNING
"Error taking CPU%d up: %d\n", cpu
, error
);
415 cpus_clear(frozen_cpus
);
417 cpu_maps_update_done();
419 #endif /* CONFIG_PM_SLEEP_SMP */