]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/stop_machine.c
stop_machine: Don't do for_each_cpu() twice in queue_stop_cpus_work()
[mirror_ubuntu-zesty-kernel.git] / kernel / stop_machine.c
1 /*
2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
10 */
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/lglock.h>
24
25 /*
26 * Structure to determine completion condition and record errors. May
27 * be shared by works on different cpus.
28 */
29 struct cpu_stop_done {
30 atomic_t nr_todo; /* nr left to execute */
31 bool executed; /* actually executed? */
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
34 };
35
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
37 struct cpu_stopper {
38 struct task_struct *thread;
39
40 spinlock_t lock;
41 bool enabled; /* is this stopper enabled? */
42 struct list_head works; /* list of pending works */
43
44 struct cpu_stop_work stop_work; /* for stop_cpus */
45 };
46
47 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
48 static bool stop_machine_initialized = false;
49
50 /*
51 * Avoids a race between stop_two_cpus and global stop_cpus, where
52 * the stoppers could get queued up in reverse order, leading to
53 * system deadlock. Using an lglock means stop_two_cpus remains
54 * relatively cheap.
55 */
56 DEFINE_STATIC_LGLOCK(stop_cpus_lock);
57
58 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
59 {
60 memset(done, 0, sizeof(*done));
61 atomic_set(&done->nr_todo, nr_todo);
62 init_completion(&done->completion);
63 }
64
65 /* signal completion unless @done is NULL */
66 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
67 {
68 if (done) {
69 if (executed)
70 done->executed = true;
71 if (atomic_dec_and_test(&done->nr_todo))
72 complete(&done->completion);
73 }
74 }
75
76 /* queue @work to @stopper. if offline, @work is completed immediately */
77 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
78 {
79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
80
81 unsigned long flags;
82
83 spin_lock_irqsave(&stopper->lock, flags);
84
85 if (stopper->enabled) {
86 list_add_tail(&work->list, &stopper->works);
87 wake_up_process(stopper->thread);
88 } else
89 cpu_stop_signal_done(work->done, false);
90
91 spin_unlock_irqrestore(&stopper->lock, flags);
92 }
93
94 /**
95 * stop_one_cpu - stop a cpu
96 * @cpu: cpu to stop
97 * @fn: function to execute
98 * @arg: argument to @fn
99 *
100 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
101 * the highest priority preempting any task on the cpu and
102 * monopolizing it. This function returns after the execution is
103 * complete.
104 *
105 * This function doesn't guarantee @cpu stays online till @fn
106 * completes. If @cpu goes down in the middle, execution may happen
107 * partially or fully on different cpus. @fn should either be ready
108 * for that or the caller should ensure that @cpu stays online until
109 * this function completes.
110 *
111 * CONTEXT:
112 * Might sleep.
113 *
114 * RETURNS:
115 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
116 * otherwise, the return value of @fn.
117 */
118 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
119 {
120 struct cpu_stop_done done;
121 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
122
123 cpu_stop_init_done(&done, 1);
124 cpu_stop_queue_work(cpu, &work);
125 wait_for_completion(&done.completion);
126 return done.executed ? done.ret : -ENOENT;
127 }
128
129 /* This controls the threads on each CPU. */
130 enum multi_stop_state {
131 /* Dummy starting state for thread. */
132 MULTI_STOP_NONE,
133 /* Awaiting everyone to be scheduled. */
134 MULTI_STOP_PREPARE,
135 /* Disable interrupts. */
136 MULTI_STOP_DISABLE_IRQ,
137 /* Run the function */
138 MULTI_STOP_RUN,
139 /* Exit */
140 MULTI_STOP_EXIT,
141 };
142
143 struct multi_stop_data {
144 int (*fn)(void *);
145 void *data;
146 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
147 unsigned int num_threads;
148 const struct cpumask *active_cpus;
149
150 enum multi_stop_state state;
151 atomic_t thread_ack;
152 };
153
154 static void set_state(struct multi_stop_data *msdata,
155 enum multi_stop_state newstate)
156 {
157 /* Reset ack counter. */
158 atomic_set(&msdata->thread_ack, msdata->num_threads);
159 smp_wmb();
160 msdata->state = newstate;
161 }
162
163 /* Last one to ack a state moves to the next state. */
164 static void ack_state(struct multi_stop_data *msdata)
165 {
166 if (atomic_dec_and_test(&msdata->thread_ack))
167 set_state(msdata, msdata->state + 1);
168 }
169
170 /* This is the cpu_stop function which stops the CPU. */
171 static int multi_cpu_stop(void *data)
172 {
173 struct multi_stop_data *msdata = data;
174 enum multi_stop_state curstate = MULTI_STOP_NONE;
175 int cpu = smp_processor_id(), err = 0;
176 unsigned long flags;
177 bool is_active;
178
179 /*
180 * When called from stop_machine_from_inactive_cpu(), irq might
181 * already be disabled. Save the state and restore it on exit.
182 */
183 local_save_flags(flags);
184
185 if (!msdata->active_cpus)
186 is_active = cpu == cpumask_first(cpu_online_mask);
187 else
188 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
189
190 /* Simple state machine */
191 do {
192 /* Chill out and ensure we re-read multi_stop_state. */
193 cpu_relax();
194 if (msdata->state != curstate) {
195 curstate = msdata->state;
196 switch (curstate) {
197 case MULTI_STOP_DISABLE_IRQ:
198 local_irq_disable();
199 hard_irq_disable();
200 break;
201 case MULTI_STOP_RUN:
202 if (is_active)
203 err = msdata->fn(msdata->data);
204 break;
205 default:
206 break;
207 }
208 ack_state(msdata);
209 }
210 } while (curstate != MULTI_STOP_EXIT);
211
212 local_irq_restore(flags);
213 return err;
214 }
215
216 /**
217 * stop_two_cpus - stops two cpus
218 * @cpu1: the cpu to stop
219 * @cpu2: the other cpu to stop
220 * @fn: function to execute
221 * @arg: argument to @fn
222 *
223 * Stops both the current and specified CPU and runs @fn on one of them.
224 *
225 * returns when both are completed.
226 */
227 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
228 {
229 struct cpu_stop_done done;
230 struct cpu_stop_work work1, work2;
231 struct multi_stop_data msdata;
232
233 preempt_disable();
234 msdata = (struct multi_stop_data){
235 .fn = fn,
236 .data = arg,
237 .num_threads = 2,
238 .active_cpus = cpumask_of(cpu1),
239 };
240
241 work1 = work2 = (struct cpu_stop_work){
242 .fn = multi_cpu_stop,
243 .arg = &msdata,
244 .done = &done
245 };
246
247 cpu_stop_init_done(&done, 2);
248 set_state(&msdata, MULTI_STOP_PREPARE);
249
250 /*
251 * If we observe both CPUs active we know _cpu_down() cannot yet have
252 * queued its stop_machine works and therefore ours will get executed
253 * first. Or its not either one of our CPUs that's getting unplugged,
254 * in which case we don't care.
255 *
256 * This relies on the stopper workqueues to be FIFO.
257 */
258 if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
259 preempt_enable();
260 return -ENOENT;
261 }
262
263 lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
264 cpu_stop_queue_work(cpu1, &work1);
265 cpu_stop_queue_work(cpu2, &work2);
266 lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
267
268 preempt_enable();
269
270 wait_for_completion(&done.completion);
271
272 return done.executed ? done.ret : -ENOENT;
273 }
274
275 /**
276 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
277 * @cpu: cpu to stop
278 * @fn: function to execute
279 * @arg: argument to @fn
280 * @work_buf: pointer to cpu_stop_work structure
281 *
282 * Similar to stop_one_cpu() but doesn't wait for completion. The
283 * caller is responsible for ensuring @work_buf is currently unused
284 * and will remain untouched until stopper starts executing @fn.
285 *
286 * CONTEXT:
287 * Don't care.
288 */
289 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
290 struct cpu_stop_work *work_buf)
291 {
292 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
293 cpu_stop_queue_work(cpu, work_buf);
294 }
295
296 /* static data for stop_cpus */
297 static DEFINE_MUTEX(stop_cpus_mutex);
298
299 static void queue_stop_cpus_work(const struct cpumask *cpumask,
300 cpu_stop_fn_t fn, void *arg,
301 struct cpu_stop_done *done)
302 {
303 struct cpu_stop_work *work;
304 unsigned int cpu;
305
306 /*
307 * Disable preemption while queueing to avoid getting
308 * preempted by a stopper which might wait for other stoppers
309 * to enter @fn which can lead to deadlock.
310 */
311 lg_global_lock(&stop_cpus_lock);
312 for_each_cpu(cpu, cpumask) {
313 work = &per_cpu(cpu_stopper.stop_work, cpu);
314 work->fn = fn;
315 work->arg = arg;
316 work->done = done;
317 cpu_stop_queue_work(cpu, work);
318 }
319 lg_global_unlock(&stop_cpus_lock);
320 }
321
322 static int __stop_cpus(const struct cpumask *cpumask,
323 cpu_stop_fn_t fn, void *arg)
324 {
325 struct cpu_stop_done done;
326
327 cpu_stop_init_done(&done, cpumask_weight(cpumask));
328 queue_stop_cpus_work(cpumask, fn, arg, &done);
329 wait_for_completion(&done.completion);
330 return done.executed ? done.ret : -ENOENT;
331 }
332
333 /**
334 * stop_cpus - stop multiple cpus
335 * @cpumask: cpus to stop
336 * @fn: function to execute
337 * @arg: argument to @fn
338 *
339 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
340 * @fn is run in a process context with the highest priority
341 * preempting any task on the cpu and monopolizing it. This function
342 * returns after all executions are complete.
343 *
344 * This function doesn't guarantee the cpus in @cpumask stay online
345 * till @fn completes. If some cpus go down in the middle, execution
346 * on the cpu may happen partially or fully on different cpus. @fn
347 * should either be ready for that or the caller should ensure that
348 * the cpus stay online until this function completes.
349 *
350 * All stop_cpus() calls are serialized making it safe for @fn to wait
351 * for all cpus to start executing it.
352 *
353 * CONTEXT:
354 * Might sleep.
355 *
356 * RETURNS:
357 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
358 * @cpumask were offline; otherwise, 0 if all executions of @fn
359 * returned 0, any non zero return value if any returned non zero.
360 */
361 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
362 {
363 int ret;
364
365 /* static works are used, process one request at a time */
366 mutex_lock(&stop_cpus_mutex);
367 ret = __stop_cpus(cpumask, fn, arg);
368 mutex_unlock(&stop_cpus_mutex);
369 return ret;
370 }
371
372 /**
373 * try_stop_cpus - try to stop multiple cpus
374 * @cpumask: cpus to stop
375 * @fn: function to execute
376 * @arg: argument to @fn
377 *
378 * Identical to stop_cpus() except that it fails with -EAGAIN if
379 * someone else is already using the facility.
380 *
381 * CONTEXT:
382 * Might sleep.
383 *
384 * RETURNS:
385 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
386 * @fn(@arg) was not executed at all because all cpus in @cpumask were
387 * offline; otherwise, 0 if all executions of @fn returned 0, any non
388 * zero return value if any returned non zero.
389 */
390 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
391 {
392 int ret;
393
394 /* static works are used, process one request at a time */
395 if (!mutex_trylock(&stop_cpus_mutex))
396 return -EAGAIN;
397 ret = __stop_cpus(cpumask, fn, arg);
398 mutex_unlock(&stop_cpus_mutex);
399 return ret;
400 }
401
402 static int cpu_stop_should_run(unsigned int cpu)
403 {
404 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
405 unsigned long flags;
406 int run;
407
408 spin_lock_irqsave(&stopper->lock, flags);
409 run = !list_empty(&stopper->works);
410 spin_unlock_irqrestore(&stopper->lock, flags);
411 return run;
412 }
413
414 static void cpu_stopper_thread(unsigned int cpu)
415 {
416 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
417 struct cpu_stop_work *work;
418 int ret;
419
420 repeat:
421 work = NULL;
422 spin_lock_irq(&stopper->lock);
423 if (!list_empty(&stopper->works)) {
424 work = list_first_entry(&stopper->works,
425 struct cpu_stop_work, list);
426 list_del_init(&work->list);
427 }
428 spin_unlock_irq(&stopper->lock);
429
430 if (work) {
431 cpu_stop_fn_t fn = work->fn;
432 void *arg = work->arg;
433 struct cpu_stop_done *done = work->done;
434 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
435
436 /* cpu stop callbacks are not allowed to sleep */
437 preempt_disable();
438
439 ret = fn(arg);
440 if (ret)
441 done->ret = ret;
442
443 /* restore preemption and check it's still balanced */
444 preempt_enable();
445 WARN_ONCE(preempt_count(),
446 "cpu_stop: %s(%p) leaked preempt count\n",
447 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
448 ksym_buf), arg);
449
450 cpu_stop_signal_done(done, true);
451 goto repeat;
452 }
453 }
454
455 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
456
457 static void cpu_stop_create(unsigned int cpu)
458 {
459 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
460 }
461
462 static void cpu_stop_park(unsigned int cpu)
463 {
464 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
465 struct cpu_stop_work *work;
466 unsigned long flags;
467
468 /* drain remaining works */
469 spin_lock_irqsave(&stopper->lock, flags);
470 list_for_each_entry(work, &stopper->works, list)
471 cpu_stop_signal_done(work->done, false);
472 stopper->enabled = false;
473 spin_unlock_irqrestore(&stopper->lock, flags);
474 }
475
476 static void cpu_stop_unpark(unsigned int cpu)
477 {
478 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
479
480 spin_lock_irq(&stopper->lock);
481 stopper->enabled = true;
482 spin_unlock_irq(&stopper->lock);
483 }
484
485 static struct smp_hotplug_thread cpu_stop_threads = {
486 .store = &cpu_stopper.thread,
487 .thread_should_run = cpu_stop_should_run,
488 .thread_fn = cpu_stopper_thread,
489 .thread_comm = "migration/%u",
490 .create = cpu_stop_create,
491 .setup = cpu_stop_unpark,
492 .park = cpu_stop_park,
493 .pre_unpark = cpu_stop_unpark,
494 .selfparking = true,
495 };
496
497 static int __init cpu_stop_init(void)
498 {
499 unsigned int cpu;
500
501 for_each_possible_cpu(cpu) {
502 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
503
504 spin_lock_init(&stopper->lock);
505 INIT_LIST_HEAD(&stopper->works);
506 }
507
508 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
509 stop_machine_initialized = true;
510 return 0;
511 }
512 early_initcall(cpu_stop_init);
513
514 #ifdef CONFIG_STOP_MACHINE
515
516 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
517 {
518 struct multi_stop_data msdata = {
519 .fn = fn,
520 .data = data,
521 .num_threads = num_online_cpus(),
522 .active_cpus = cpus,
523 };
524
525 if (!stop_machine_initialized) {
526 /*
527 * Handle the case where stop_machine() is called
528 * early in boot before stop_machine() has been
529 * initialized.
530 */
531 unsigned long flags;
532 int ret;
533
534 WARN_ON_ONCE(msdata.num_threads != 1);
535
536 local_irq_save(flags);
537 hard_irq_disable();
538 ret = (*fn)(data);
539 local_irq_restore(flags);
540
541 return ret;
542 }
543
544 /* Set the initial state and stop all online cpus. */
545 set_state(&msdata, MULTI_STOP_PREPARE);
546 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
547 }
548
549 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
550 {
551 int ret;
552
553 /* No CPUs can come up or down during this. */
554 get_online_cpus();
555 ret = __stop_machine(fn, data, cpus);
556 put_online_cpus();
557 return ret;
558 }
559 EXPORT_SYMBOL_GPL(stop_machine);
560
561 /**
562 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
563 * @fn: the function to run
564 * @data: the data ptr for the @fn()
565 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
566 *
567 * This is identical to stop_machine() but can be called from a CPU which
568 * is not active. The local CPU is in the process of hotplug (so no other
569 * CPU hotplug can start) and not marked active and doesn't have enough
570 * context to sleep.
571 *
572 * This function provides stop_machine() functionality for such state by
573 * using busy-wait for synchronization and executing @fn directly for local
574 * CPU.
575 *
576 * CONTEXT:
577 * Local CPU is inactive. Temporarily stops all active CPUs.
578 *
579 * RETURNS:
580 * 0 if all executions of @fn returned 0, any non zero return value if any
581 * returned non zero.
582 */
583 int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
584 const struct cpumask *cpus)
585 {
586 struct multi_stop_data msdata = { .fn = fn, .data = data,
587 .active_cpus = cpus };
588 struct cpu_stop_done done;
589 int ret;
590
591 /* Local CPU must be inactive and CPU hotplug in progress. */
592 BUG_ON(cpu_active(raw_smp_processor_id()));
593 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
594
595 /* No proper task established and can't sleep - busy wait for lock. */
596 while (!mutex_trylock(&stop_cpus_mutex))
597 cpu_relax();
598
599 /* Schedule work on other CPUs and execute directly for local CPU */
600 set_state(&msdata, MULTI_STOP_PREPARE);
601 cpu_stop_init_done(&done, num_active_cpus());
602 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
603 &done);
604 ret = multi_cpu_stop(&msdata);
605
606 /* Busy wait for completion. */
607 while (!completion_done(&done.completion))
608 cpu_relax();
609
610 mutex_unlock(&stop_cpus_mutex);
611 return ret ?: done.ret;
612 }
613
614 #endif /* CONFIG_STOP_MACHINE */