2 * kernel/stop_machine.c
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 * This file is released under the GPLv2 and any later version.
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
25 * Structure to determine completion condition and record errors. May
26 * be shared by works on different cpus.
28 struct cpu_stop_done
{
29 atomic_t nr_todo
; /* nr left to execute */
30 bool executed
; /* actually executed? */
31 int ret
; /* collected return value */
32 struct completion completion
; /* fired if nr_todo reaches 0 */
35 /* the actual stopper, one per every possible cpu, enabled on online cpus */
38 bool enabled
; /* is this stopper enabled? */
39 struct list_head works
; /* list of pending works */
42 static DEFINE_PER_CPU(struct cpu_stopper
, cpu_stopper
);
43 static DEFINE_PER_CPU(struct task_struct
*, cpu_stopper_task
);
44 static bool stop_machine_initialized
= false;
46 static void cpu_stop_init_done(struct cpu_stop_done
*done
, unsigned int nr_todo
)
48 memset(done
, 0, sizeof(*done
));
49 atomic_set(&done
->nr_todo
, nr_todo
);
50 init_completion(&done
->completion
);
53 /* signal completion unless @done is NULL */
54 static void cpu_stop_signal_done(struct cpu_stop_done
*done
, bool executed
)
58 done
->executed
= true;
59 if (atomic_dec_and_test(&done
->nr_todo
))
60 complete(&done
->completion
);
64 /* queue @work to @stopper. if offline, @work is completed immediately */
65 static void cpu_stop_queue_work(unsigned int cpu
, struct cpu_stop_work
*work
)
67 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
68 struct task_struct
*p
= per_cpu(cpu_stopper_task
, cpu
);
72 spin_lock_irqsave(&stopper
->lock
, flags
);
74 if (stopper
->enabled
) {
75 list_add_tail(&work
->list
, &stopper
->works
);
78 cpu_stop_signal_done(work
->done
, false);
80 spin_unlock_irqrestore(&stopper
->lock
, flags
);
84 * stop_one_cpu - stop a cpu
86 * @fn: function to execute
87 * @arg: argument to @fn
89 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
90 * the highest priority preempting any task on the cpu and
91 * monopolizing it. This function returns after the execution is
94 * This function doesn't guarantee @cpu stays online till @fn
95 * completes. If @cpu goes down in the middle, execution may happen
96 * partially or fully on different cpus. @fn should either be ready
97 * for that or the caller should ensure that @cpu stays online until
98 * this function completes.
104 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
105 * otherwise, the return value of @fn.
107 int stop_one_cpu(unsigned int cpu
, cpu_stop_fn_t fn
, void *arg
)
109 struct cpu_stop_done done
;
110 struct cpu_stop_work work
= { .fn
= fn
, .arg
= arg
, .done
= &done
};
112 cpu_stop_init_done(&done
, 1);
113 cpu_stop_queue_work(cpu
, &work
);
114 wait_for_completion(&done
.completion
);
115 return done
.executed
? done
.ret
: -ENOENT
;
118 /* This controls the threads on each CPU. */
119 enum multi_stop_state
{
120 /* Dummy starting state for thread. */
122 /* Awaiting everyone to be scheduled. */
124 /* Disable interrupts. */
125 MULTI_STOP_DISABLE_IRQ
,
126 /* Run the function */
132 struct multi_stop_data
{
135 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
136 unsigned int num_threads
;
137 const struct cpumask
*active_cpus
;
139 enum multi_stop_state state
;
143 static void set_state(struct multi_stop_data
*msdata
,
144 enum multi_stop_state newstate
)
146 /* Reset ack counter. */
147 atomic_set(&msdata
->thread_ack
, msdata
->num_threads
);
149 msdata
->state
= newstate
;
152 /* Last one to ack a state moves to the next state. */
153 static void ack_state(struct multi_stop_data
*msdata
)
155 if (atomic_dec_and_test(&msdata
->thread_ack
))
156 set_state(msdata
, msdata
->state
+ 1);
159 /* This is the cpu_stop function which stops the CPU. */
160 static int multi_cpu_stop(void *data
)
162 struct multi_stop_data
*msdata
= data
;
163 enum multi_stop_state curstate
= MULTI_STOP_NONE
;
164 int cpu
= smp_processor_id(), err
= 0;
169 * When called from stop_machine_from_inactive_cpu(), irq might
170 * already be disabled. Save the state and restore it on exit.
172 local_save_flags(flags
);
174 if (!msdata
->active_cpus
)
175 is_active
= cpu
== cpumask_first(cpu_online_mask
);
177 is_active
= cpumask_test_cpu(cpu
, msdata
->active_cpus
);
179 /* Simple state machine */
181 /* Chill out and ensure we re-read multi_stop_state. */
183 if (msdata
->state
!= curstate
) {
184 curstate
= msdata
->state
;
186 case MULTI_STOP_DISABLE_IRQ
:
192 err
= msdata
->fn(msdata
->data
);
199 } while (curstate
!= MULTI_STOP_EXIT
);
201 local_irq_restore(flags
);
205 struct irq_cpu_stop_queue_work_info
{
208 struct cpu_stop_work
*work1
;
209 struct cpu_stop_work
*work2
;
213 * This function is always run with irqs and preemption disabled.
214 * This guarantees that both work1 and work2 get queued, before
215 * our local migrate thread gets the chance to preempt us.
217 static void irq_cpu_stop_queue_work(void *arg
)
219 struct irq_cpu_stop_queue_work_info
*info
= arg
;
220 cpu_stop_queue_work(info
->cpu1
, info
->work1
);
221 cpu_stop_queue_work(info
->cpu2
, info
->work2
);
225 * stop_two_cpus - stops two cpus
226 * @cpu1: the cpu to stop
227 * @cpu2: the other cpu to stop
228 * @fn: function to execute
229 * @arg: argument to @fn
231 * Stops both the current and specified CPU and runs @fn on one of them.
233 * returns when both are completed.
235 int stop_two_cpus(unsigned int cpu1
, unsigned int cpu2
, cpu_stop_fn_t fn
, void *arg
)
238 struct cpu_stop_done done
;
239 struct cpu_stop_work work1
, work2
;
240 struct irq_cpu_stop_queue_work_info call_args
;
241 struct multi_stop_data msdata
= {
245 .active_cpus
= cpumask_of(cpu1
),
248 work1
= work2
= (struct cpu_stop_work
){
249 .fn
= multi_cpu_stop
,
254 call_args
= (struct irq_cpu_stop_queue_work_info
){
261 cpu_stop_init_done(&done
, 2);
262 set_state(&msdata
, MULTI_STOP_PREPARE
);
265 * Queuing needs to be done by the lowest numbered CPU, to ensure
266 * that works are always queued in the same order on every CPU.
267 * This prevents deadlocks.
269 call_cpu
= min(cpu1
, cpu2
);
271 smp_call_function_single(call_cpu
, &irq_cpu_stop_queue_work
,
274 wait_for_completion(&done
.completion
);
275 return done
.executed
? done
.ret
: -ENOENT
;
279 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
281 * @fn: function to execute
282 * @arg: argument to @fn
284 * Similar to stop_one_cpu() but doesn't wait for completion. The
285 * caller is responsible for ensuring @work_buf is currently unused
286 * and will remain untouched until stopper starts executing @fn.
291 void stop_one_cpu_nowait(unsigned int cpu
, cpu_stop_fn_t fn
, void *arg
,
292 struct cpu_stop_work
*work_buf
)
294 *work_buf
= (struct cpu_stop_work
){ .fn
= fn
, .arg
= arg
, };
295 cpu_stop_queue_work(cpu
, work_buf
);
298 /* static data for stop_cpus */
299 static DEFINE_MUTEX(stop_cpus_mutex
);
300 static DEFINE_PER_CPU(struct cpu_stop_work
, stop_cpus_work
);
302 static void queue_stop_cpus_work(const struct cpumask
*cpumask
,
303 cpu_stop_fn_t fn
, void *arg
,
304 struct cpu_stop_done
*done
)
306 struct cpu_stop_work
*work
;
309 /* initialize works and done */
310 for_each_cpu(cpu
, cpumask
) {
311 work
= &per_cpu(stop_cpus_work
, cpu
);
318 * Disable preemption while queueing to avoid getting
319 * preempted by a stopper which might wait for other stoppers
320 * to enter @fn which can lead to deadlock.
323 for_each_cpu(cpu
, cpumask
)
324 cpu_stop_queue_work(cpu
, &per_cpu(stop_cpus_work
, cpu
));
328 static int __stop_cpus(const struct cpumask
*cpumask
,
329 cpu_stop_fn_t fn
, void *arg
)
331 struct cpu_stop_done done
;
333 cpu_stop_init_done(&done
, cpumask_weight(cpumask
));
334 queue_stop_cpus_work(cpumask
, fn
, arg
, &done
);
335 wait_for_completion(&done
.completion
);
336 return done
.executed
? done
.ret
: -ENOENT
;
340 * stop_cpus - stop multiple cpus
341 * @cpumask: cpus to stop
342 * @fn: function to execute
343 * @arg: argument to @fn
345 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
346 * @fn is run in a process context with the highest priority
347 * preempting any task on the cpu and monopolizing it. This function
348 * returns after all executions are complete.
350 * This function doesn't guarantee the cpus in @cpumask stay online
351 * till @fn completes. If some cpus go down in the middle, execution
352 * on the cpu may happen partially or fully on different cpus. @fn
353 * should either be ready for that or the caller should ensure that
354 * the cpus stay online until this function completes.
356 * All stop_cpus() calls are serialized making it safe for @fn to wait
357 * for all cpus to start executing it.
363 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
364 * @cpumask were offline; otherwise, 0 if all executions of @fn
365 * returned 0, any non zero return value if any returned non zero.
367 int stop_cpus(const struct cpumask
*cpumask
, cpu_stop_fn_t fn
, void *arg
)
371 /* static works are used, process one request at a time */
372 mutex_lock(&stop_cpus_mutex
);
373 ret
= __stop_cpus(cpumask
, fn
, arg
);
374 mutex_unlock(&stop_cpus_mutex
);
379 * try_stop_cpus - try to stop multiple cpus
380 * @cpumask: cpus to stop
381 * @fn: function to execute
382 * @arg: argument to @fn
384 * Identical to stop_cpus() except that it fails with -EAGAIN if
385 * someone else is already using the facility.
391 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
392 * @fn(@arg) was not executed at all because all cpus in @cpumask were
393 * offline; otherwise, 0 if all executions of @fn returned 0, any non
394 * zero return value if any returned non zero.
396 int try_stop_cpus(const struct cpumask
*cpumask
, cpu_stop_fn_t fn
, void *arg
)
400 /* static works are used, process one request at a time */
401 if (!mutex_trylock(&stop_cpus_mutex
))
403 ret
= __stop_cpus(cpumask
, fn
, arg
);
404 mutex_unlock(&stop_cpus_mutex
);
408 static int cpu_stop_should_run(unsigned int cpu
)
410 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
414 spin_lock_irqsave(&stopper
->lock
, flags
);
415 run
= !list_empty(&stopper
->works
);
416 spin_unlock_irqrestore(&stopper
->lock
, flags
);
420 static void cpu_stopper_thread(unsigned int cpu
)
422 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
423 struct cpu_stop_work
*work
;
428 spin_lock_irq(&stopper
->lock
);
429 if (!list_empty(&stopper
->works
)) {
430 work
= list_first_entry(&stopper
->works
,
431 struct cpu_stop_work
, list
);
432 list_del_init(&work
->list
);
434 spin_unlock_irq(&stopper
->lock
);
437 cpu_stop_fn_t fn
= work
->fn
;
438 void *arg
= work
->arg
;
439 struct cpu_stop_done
*done
= work
->done
;
440 char ksym_buf
[KSYM_NAME_LEN
] __maybe_unused
;
442 /* cpu stop callbacks are not allowed to sleep */
449 /* restore preemption and check it's still balanced */
451 WARN_ONCE(preempt_count(),
452 "cpu_stop: %s(%p) leaked preempt count\n",
453 kallsyms_lookup((unsigned long)fn
, NULL
, NULL
, NULL
,
456 cpu_stop_signal_done(done
, true);
461 extern void sched_set_stop_task(int cpu
, struct task_struct
*stop
);
463 static void cpu_stop_create(unsigned int cpu
)
465 sched_set_stop_task(cpu
, per_cpu(cpu_stopper_task
, cpu
));
468 static void cpu_stop_park(unsigned int cpu
)
470 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
471 struct cpu_stop_work
*work
;
474 /* drain remaining works */
475 spin_lock_irqsave(&stopper
->lock
, flags
);
476 list_for_each_entry(work
, &stopper
->works
, list
)
477 cpu_stop_signal_done(work
->done
, false);
478 stopper
->enabled
= false;
479 spin_unlock_irqrestore(&stopper
->lock
, flags
);
482 static void cpu_stop_unpark(unsigned int cpu
)
484 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
486 spin_lock_irq(&stopper
->lock
);
487 stopper
->enabled
= true;
488 spin_unlock_irq(&stopper
->lock
);
491 static struct smp_hotplug_thread cpu_stop_threads
= {
492 .store
= &cpu_stopper_task
,
493 .thread_should_run
= cpu_stop_should_run
,
494 .thread_fn
= cpu_stopper_thread
,
495 .thread_comm
= "migration/%u",
496 .create
= cpu_stop_create
,
497 .setup
= cpu_stop_unpark
,
498 .park
= cpu_stop_park
,
499 .pre_unpark
= cpu_stop_unpark
,
503 static int __init
cpu_stop_init(void)
507 for_each_possible_cpu(cpu
) {
508 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
510 spin_lock_init(&stopper
->lock
);
511 INIT_LIST_HEAD(&stopper
->works
);
514 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads
));
515 stop_machine_initialized
= true;
518 early_initcall(cpu_stop_init
);
520 #ifdef CONFIG_STOP_MACHINE
522 int __stop_machine(int (*fn
)(void *), void *data
, const struct cpumask
*cpus
)
524 struct multi_stop_data msdata
= {
527 .num_threads
= num_online_cpus(),
531 if (!stop_machine_initialized
) {
533 * Handle the case where stop_machine() is called
534 * early in boot before stop_machine() has been
540 WARN_ON_ONCE(msdata
.num_threads
!= 1);
542 local_irq_save(flags
);
545 local_irq_restore(flags
);
550 /* Set the initial state and stop all online cpus. */
551 set_state(&msdata
, MULTI_STOP_PREPARE
);
552 return stop_cpus(cpu_online_mask
, multi_cpu_stop
, &msdata
);
555 int stop_machine(int (*fn
)(void *), void *data
, const struct cpumask
*cpus
)
559 /* No CPUs can come up or down during this. */
561 ret
= __stop_machine(fn
, data
, cpus
);
565 EXPORT_SYMBOL_GPL(stop_machine
);
568 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
569 * @fn: the function to run
570 * @data: the data ptr for the @fn()
571 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
573 * This is identical to stop_machine() but can be called from a CPU which
574 * is not active. The local CPU is in the process of hotplug (so no other
575 * CPU hotplug can start) and not marked active and doesn't have enough
578 * This function provides stop_machine() functionality for such state by
579 * using busy-wait for synchronization and executing @fn directly for local
583 * Local CPU is inactive. Temporarily stops all active CPUs.
586 * 0 if all executions of @fn returned 0, any non zero return value if any
589 int stop_machine_from_inactive_cpu(int (*fn
)(void *), void *data
,
590 const struct cpumask
*cpus
)
592 struct multi_stop_data msdata
= { .fn
= fn
, .data
= data
,
593 .active_cpus
= cpus
};
594 struct cpu_stop_done done
;
597 /* Local CPU must be inactive and CPU hotplug in progress. */
598 BUG_ON(cpu_active(raw_smp_processor_id()));
599 msdata
.num_threads
= num_active_cpus() + 1; /* +1 for local */
601 /* No proper task established and can't sleep - busy wait for lock. */
602 while (!mutex_trylock(&stop_cpus_mutex
))
605 /* Schedule work on other CPUs and execute directly for local CPU */
606 set_state(&msdata
, MULTI_STOP_PREPARE
);
607 cpu_stop_init_done(&done
, num_active_cpus());
608 queue_stop_cpus_work(cpu_active_mask
, multi_cpu_stop
, &msdata
,
610 ret
= multi_cpu_stop(&msdata
);
612 /* Busy wait for completion. */
613 while (!completion_done(&done
.completion
))
616 mutex_unlock(&stop_cpus_mutex
);
617 return ret
?: done
.ret
;
620 #endif /* CONFIG_STOP_MACHINE */