]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/stop_machine.c
Merge branch 'for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
[mirror_ubuntu-hirsute-kernel.git] / kernel / stop_machine.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * kernel/stop_machine.c
4 *
5 * Copyright (C) 2008, 2005 IBM Corporation.
6 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
7 * Copyright (C) 2010 SUSE Linux Products GmbH
8 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 */
10 #include <linux/completion.h>
11 #include <linux/cpu.h>
12 #include <linux/init.h>
13 #include <linux/kthread.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/sched.h>
17 #include <linux/stop_machine.h>
18 #include <linux/interrupt.h>
19 #include <linux/kallsyms.h>
20 #include <linux/smpboot.h>
21 #include <linux/atomic.h>
22 #include <linux/nmi.h>
23 #include <linux/sched/wake_q.h>
24
25 /*
26 * Structure to determine completion condition and record errors. May
27 * be shared by works on different cpus.
28 */
29 struct cpu_stop_done {
30 atomic_t nr_todo; /* nr left to execute */
31 int ret; /* collected return value */
32 struct completion completion; /* fired if nr_todo reaches 0 */
33 };
34
35 /* the actual stopper, one per every possible cpu, enabled on online cpus */
36 struct cpu_stopper {
37 struct task_struct *thread;
38
39 raw_spinlock_t lock;
40 bool enabled; /* is this stopper enabled? */
41 struct list_head works; /* list of pending works */
42
43 struct cpu_stop_work stop_work; /* for stop_cpus */
44 };
45
46 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
47 static bool stop_machine_initialized = false;
48
49 /* static data for stop_cpus */
50 static DEFINE_MUTEX(stop_cpus_mutex);
51 static bool stop_cpus_in_progress;
52
53 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
54 {
55 memset(done, 0, sizeof(*done));
56 atomic_set(&done->nr_todo, nr_todo);
57 init_completion(&done->completion);
58 }
59
60 /* signal completion unless @done is NULL */
61 static void cpu_stop_signal_done(struct cpu_stop_done *done)
62 {
63 if (atomic_dec_and_test(&done->nr_todo))
64 complete(&done->completion);
65 }
66
67 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
68 struct cpu_stop_work *work,
69 struct wake_q_head *wakeq)
70 {
71 list_add_tail(&work->list, &stopper->works);
72 wake_q_add(wakeq, stopper->thread);
73 }
74
75 /* queue @work to @stopper. if offline, @work is completed immediately */
76 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
77 {
78 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
79 DEFINE_WAKE_Q(wakeq);
80 unsigned long flags;
81 bool enabled;
82
83 preempt_disable();
84 raw_spin_lock_irqsave(&stopper->lock, flags);
85 enabled = stopper->enabled;
86 if (enabled)
87 __cpu_stop_queue_work(stopper, work, &wakeq);
88 else if (work->done)
89 cpu_stop_signal_done(work->done);
90 raw_spin_unlock_irqrestore(&stopper->lock, flags);
91
92 wake_up_q(&wakeq);
93 preempt_enable();
94
95 return enabled;
96 }
97
98 /**
99 * stop_one_cpu - stop a cpu
100 * @cpu: cpu to stop
101 * @fn: function to execute
102 * @arg: argument to @fn
103 *
104 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
105 * the highest priority preempting any task on the cpu and
106 * monopolizing it. This function returns after the execution is
107 * complete.
108 *
109 * This function doesn't guarantee @cpu stays online till @fn
110 * completes. If @cpu goes down in the middle, execution may happen
111 * partially or fully on different cpus. @fn should either be ready
112 * for that or the caller should ensure that @cpu stays online until
113 * this function completes.
114 *
115 * CONTEXT:
116 * Might sleep.
117 *
118 * RETURNS:
119 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
120 * otherwise, the return value of @fn.
121 */
122 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
123 {
124 struct cpu_stop_done done;
125 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
126
127 cpu_stop_init_done(&done, 1);
128 if (!cpu_stop_queue_work(cpu, &work))
129 return -ENOENT;
130 /*
131 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
132 * cycle by doing a preemption:
133 */
134 cond_resched();
135 wait_for_completion(&done.completion);
136 return done.ret;
137 }
138
139 /* This controls the threads on each CPU. */
140 enum multi_stop_state {
141 /* Dummy starting state for thread. */
142 MULTI_STOP_NONE,
143 /* Awaiting everyone to be scheduled. */
144 MULTI_STOP_PREPARE,
145 /* Disable interrupts. */
146 MULTI_STOP_DISABLE_IRQ,
147 /* Run the function */
148 MULTI_STOP_RUN,
149 /* Exit */
150 MULTI_STOP_EXIT,
151 };
152
153 struct multi_stop_data {
154 cpu_stop_fn_t fn;
155 void *data;
156 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
157 unsigned int num_threads;
158 const struct cpumask *active_cpus;
159
160 enum multi_stop_state state;
161 atomic_t thread_ack;
162 };
163
164 static void set_state(struct multi_stop_data *msdata,
165 enum multi_stop_state newstate)
166 {
167 /* Reset ack counter. */
168 atomic_set(&msdata->thread_ack, msdata->num_threads);
169 smp_wmb();
170 msdata->state = newstate;
171 }
172
173 /* Last one to ack a state moves to the next state. */
174 static void ack_state(struct multi_stop_data *msdata)
175 {
176 if (atomic_dec_and_test(&msdata->thread_ack))
177 set_state(msdata, msdata->state + 1);
178 }
179
180 void __weak stop_machine_yield(const struct cpumask *cpumask)
181 {
182 cpu_relax();
183 }
184
185 /* This is the cpu_stop function which stops the CPU. */
186 static int multi_cpu_stop(void *data)
187 {
188 struct multi_stop_data *msdata = data;
189 enum multi_stop_state curstate = MULTI_STOP_NONE;
190 int cpu = smp_processor_id(), err = 0;
191 const struct cpumask *cpumask;
192 unsigned long flags;
193 bool is_active;
194
195 /*
196 * When called from stop_machine_from_inactive_cpu(), irq might
197 * already be disabled. Save the state and restore it on exit.
198 */
199 local_save_flags(flags);
200
201 if (!msdata->active_cpus) {
202 cpumask = cpu_online_mask;
203 is_active = cpu == cpumask_first(cpumask);
204 } else {
205 cpumask = msdata->active_cpus;
206 is_active = cpumask_test_cpu(cpu, cpumask);
207 }
208
209 /* Simple state machine */
210 do {
211 /* Chill out and ensure we re-read multi_stop_state. */
212 stop_machine_yield(cpumask);
213 if (msdata->state != curstate) {
214 curstate = msdata->state;
215 switch (curstate) {
216 case MULTI_STOP_DISABLE_IRQ:
217 local_irq_disable();
218 hard_irq_disable();
219 break;
220 case MULTI_STOP_RUN:
221 if (is_active)
222 err = msdata->fn(msdata->data);
223 break;
224 default:
225 break;
226 }
227 ack_state(msdata);
228 } else if (curstate > MULTI_STOP_PREPARE) {
229 /*
230 * At this stage all other CPUs we depend on must spin
231 * in the same loop. Any reason for hard-lockup should
232 * be detected and reported on their side.
233 */
234 touch_nmi_watchdog();
235 }
236 } while (curstate != MULTI_STOP_EXIT);
237
238 local_irq_restore(flags);
239 return err;
240 }
241
242 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
243 int cpu2, struct cpu_stop_work *work2)
244 {
245 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
246 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
247 DEFINE_WAKE_Q(wakeq);
248 int err;
249
250 retry:
251 /*
252 * The waking up of stopper threads has to happen in the same
253 * scheduling context as the queueing. Otherwise, there is a
254 * possibility of one of the above stoppers being woken up by another
255 * CPU, and preempting us. This will cause us to not wake up the other
256 * stopper forever.
257 */
258 preempt_disable();
259 raw_spin_lock_irq(&stopper1->lock);
260 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
261
262 if (!stopper1->enabled || !stopper2->enabled) {
263 err = -ENOENT;
264 goto unlock;
265 }
266
267 /*
268 * Ensure that if we race with __stop_cpus() the stoppers won't get
269 * queued up in reverse order leading to system deadlock.
270 *
271 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
272 * queued a work on cpu1 but not on cpu2, we hold both locks.
273 *
274 * It can be falsely true but it is safe to spin until it is cleared,
275 * queue_stop_cpus_work() does everything under preempt_disable().
276 */
277 if (unlikely(stop_cpus_in_progress)) {
278 err = -EDEADLK;
279 goto unlock;
280 }
281
282 err = 0;
283 __cpu_stop_queue_work(stopper1, work1, &wakeq);
284 __cpu_stop_queue_work(stopper2, work2, &wakeq);
285
286 unlock:
287 raw_spin_unlock(&stopper2->lock);
288 raw_spin_unlock_irq(&stopper1->lock);
289
290 if (unlikely(err == -EDEADLK)) {
291 preempt_enable();
292
293 while (stop_cpus_in_progress)
294 cpu_relax();
295
296 goto retry;
297 }
298
299 wake_up_q(&wakeq);
300 preempt_enable();
301
302 return err;
303 }
304 /**
305 * stop_two_cpus - stops two cpus
306 * @cpu1: the cpu to stop
307 * @cpu2: the other cpu to stop
308 * @fn: function to execute
309 * @arg: argument to @fn
310 *
311 * Stops both the current and specified CPU and runs @fn on one of them.
312 *
313 * returns when both are completed.
314 */
315 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
316 {
317 struct cpu_stop_done done;
318 struct cpu_stop_work work1, work2;
319 struct multi_stop_data msdata;
320
321 msdata = (struct multi_stop_data){
322 .fn = fn,
323 .data = arg,
324 .num_threads = 2,
325 .active_cpus = cpumask_of(cpu1),
326 };
327
328 work1 = work2 = (struct cpu_stop_work){
329 .fn = multi_cpu_stop,
330 .arg = &msdata,
331 .done = &done
332 };
333
334 cpu_stop_init_done(&done, 2);
335 set_state(&msdata, MULTI_STOP_PREPARE);
336
337 if (cpu1 > cpu2)
338 swap(cpu1, cpu2);
339 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
340 return -ENOENT;
341
342 wait_for_completion(&done.completion);
343 return done.ret;
344 }
345
346 /**
347 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
348 * @cpu: cpu to stop
349 * @fn: function to execute
350 * @arg: argument to @fn
351 * @work_buf: pointer to cpu_stop_work structure
352 *
353 * Similar to stop_one_cpu() but doesn't wait for completion. The
354 * caller is responsible for ensuring @work_buf is currently unused
355 * and will remain untouched until stopper starts executing @fn.
356 *
357 * CONTEXT:
358 * Don't care.
359 *
360 * RETURNS:
361 * true if cpu_stop_work was queued successfully and @fn will be called,
362 * false otherwise.
363 */
364 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
365 struct cpu_stop_work *work_buf)
366 {
367 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
368 return cpu_stop_queue_work(cpu, work_buf);
369 }
370
371 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
372 cpu_stop_fn_t fn, void *arg,
373 struct cpu_stop_done *done)
374 {
375 struct cpu_stop_work *work;
376 unsigned int cpu;
377 bool queued = false;
378
379 /*
380 * Disable preemption while queueing to avoid getting
381 * preempted by a stopper which might wait for other stoppers
382 * to enter @fn which can lead to deadlock.
383 */
384 preempt_disable();
385 stop_cpus_in_progress = true;
386 barrier();
387 for_each_cpu(cpu, cpumask) {
388 work = &per_cpu(cpu_stopper.stop_work, cpu);
389 work->fn = fn;
390 work->arg = arg;
391 work->done = done;
392 if (cpu_stop_queue_work(cpu, work))
393 queued = true;
394 }
395 barrier();
396 stop_cpus_in_progress = false;
397 preempt_enable();
398
399 return queued;
400 }
401
402 static int __stop_cpus(const struct cpumask *cpumask,
403 cpu_stop_fn_t fn, void *arg)
404 {
405 struct cpu_stop_done done;
406
407 cpu_stop_init_done(&done, cpumask_weight(cpumask));
408 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
409 return -ENOENT;
410 wait_for_completion(&done.completion);
411 return done.ret;
412 }
413
414 /**
415 * stop_cpus - stop multiple cpus
416 * @cpumask: cpus to stop
417 * @fn: function to execute
418 * @arg: argument to @fn
419 *
420 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
421 * @fn is run in a process context with the highest priority
422 * preempting any task on the cpu and monopolizing it. This function
423 * returns after all executions are complete.
424 *
425 * This function doesn't guarantee the cpus in @cpumask stay online
426 * till @fn completes. If some cpus go down in the middle, execution
427 * on the cpu may happen partially or fully on different cpus. @fn
428 * should either be ready for that or the caller should ensure that
429 * the cpus stay online until this function completes.
430 *
431 * All stop_cpus() calls are serialized making it safe for @fn to wait
432 * for all cpus to start executing it.
433 *
434 * CONTEXT:
435 * Might sleep.
436 *
437 * RETURNS:
438 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
439 * @cpumask were offline; otherwise, 0 if all executions of @fn
440 * returned 0, any non zero return value if any returned non zero.
441 */
442 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
443 {
444 int ret;
445
446 /* static works are used, process one request at a time */
447 mutex_lock(&stop_cpus_mutex);
448 ret = __stop_cpus(cpumask, fn, arg);
449 mutex_unlock(&stop_cpus_mutex);
450 return ret;
451 }
452
453 /**
454 * try_stop_cpus - try to stop multiple cpus
455 * @cpumask: cpus to stop
456 * @fn: function to execute
457 * @arg: argument to @fn
458 *
459 * Identical to stop_cpus() except that it fails with -EAGAIN if
460 * someone else is already using the facility.
461 *
462 * CONTEXT:
463 * Might sleep.
464 *
465 * RETURNS:
466 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
467 * @fn(@arg) was not executed at all because all cpus in @cpumask were
468 * offline; otherwise, 0 if all executions of @fn returned 0, any non
469 * zero return value if any returned non zero.
470 */
471 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
472 {
473 int ret;
474
475 /* static works are used, process one request at a time */
476 if (!mutex_trylock(&stop_cpus_mutex))
477 return -EAGAIN;
478 ret = __stop_cpus(cpumask, fn, arg);
479 mutex_unlock(&stop_cpus_mutex);
480 return ret;
481 }
482
483 static int cpu_stop_should_run(unsigned int cpu)
484 {
485 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
486 unsigned long flags;
487 int run;
488
489 raw_spin_lock_irqsave(&stopper->lock, flags);
490 run = !list_empty(&stopper->works);
491 raw_spin_unlock_irqrestore(&stopper->lock, flags);
492 return run;
493 }
494
495 static void cpu_stopper_thread(unsigned int cpu)
496 {
497 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
498 struct cpu_stop_work *work;
499
500 repeat:
501 work = NULL;
502 raw_spin_lock_irq(&stopper->lock);
503 if (!list_empty(&stopper->works)) {
504 work = list_first_entry(&stopper->works,
505 struct cpu_stop_work, list);
506 list_del_init(&work->list);
507 }
508 raw_spin_unlock_irq(&stopper->lock);
509
510 if (work) {
511 cpu_stop_fn_t fn = work->fn;
512 void *arg = work->arg;
513 struct cpu_stop_done *done = work->done;
514 int ret;
515
516 /* cpu stop callbacks must not sleep, make in_atomic() == T */
517 preempt_count_inc();
518 ret = fn(arg);
519 if (done) {
520 if (ret)
521 done->ret = ret;
522 cpu_stop_signal_done(done);
523 }
524 preempt_count_dec();
525 WARN_ONCE(preempt_count(),
526 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
527 goto repeat;
528 }
529 }
530
531 void stop_machine_park(int cpu)
532 {
533 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
534 /*
535 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
536 * the pending works before it parks, until then it is fine to queue
537 * the new works.
538 */
539 stopper->enabled = false;
540 kthread_park(stopper->thread);
541 }
542
543 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
544
545 static void cpu_stop_create(unsigned int cpu)
546 {
547 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
548 }
549
550 static void cpu_stop_park(unsigned int cpu)
551 {
552 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
553
554 WARN_ON(!list_empty(&stopper->works));
555 }
556
557 void stop_machine_unpark(int cpu)
558 {
559 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
560
561 stopper->enabled = true;
562 kthread_unpark(stopper->thread);
563 }
564
565 static struct smp_hotplug_thread cpu_stop_threads = {
566 .store = &cpu_stopper.thread,
567 .thread_should_run = cpu_stop_should_run,
568 .thread_fn = cpu_stopper_thread,
569 .thread_comm = "migration/%u",
570 .create = cpu_stop_create,
571 .park = cpu_stop_park,
572 .selfparking = true,
573 };
574
575 static int __init cpu_stop_init(void)
576 {
577 unsigned int cpu;
578
579 for_each_possible_cpu(cpu) {
580 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
581
582 raw_spin_lock_init(&stopper->lock);
583 INIT_LIST_HEAD(&stopper->works);
584 }
585
586 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
587 stop_machine_unpark(raw_smp_processor_id());
588 stop_machine_initialized = true;
589 return 0;
590 }
591 early_initcall(cpu_stop_init);
592
593 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
594 const struct cpumask *cpus)
595 {
596 struct multi_stop_data msdata = {
597 .fn = fn,
598 .data = data,
599 .num_threads = num_online_cpus(),
600 .active_cpus = cpus,
601 };
602
603 lockdep_assert_cpus_held();
604
605 if (!stop_machine_initialized) {
606 /*
607 * Handle the case where stop_machine() is called
608 * early in boot before stop_machine() has been
609 * initialized.
610 */
611 unsigned long flags;
612 int ret;
613
614 WARN_ON_ONCE(msdata.num_threads != 1);
615
616 local_irq_save(flags);
617 hard_irq_disable();
618 ret = (*fn)(data);
619 local_irq_restore(flags);
620
621 return ret;
622 }
623
624 /* Set the initial state and stop all online cpus. */
625 set_state(&msdata, MULTI_STOP_PREPARE);
626 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
627 }
628
629 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
630 {
631 int ret;
632
633 /* No CPUs can come up or down during this. */
634 cpus_read_lock();
635 ret = stop_machine_cpuslocked(fn, data, cpus);
636 cpus_read_unlock();
637 return ret;
638 }
639 EXPORT_SYMBOL_GPL(stop_machine);
640
641 /**
642 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
643 * @fn: the function to run
644 * @data: the data ptr for the @fn()
645 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
646 *
647 * This is identical to stop_machine() but can be called from a CPU which
648 * is not active. The local CPU is in the process of hotplug (so no other
649 * CPU hotplug can start) and not marked active and doesn't have enough
650 * context to sleep.
651 *
652 * This function provides stop_machine() functionality for such state by
653 * using busy-wait for synchronization and executing @fn directly for local
654 * CPU.
655 *
656 * CONTEXT:
657 * Local CPU is inactive. Temporarily stops all active CPUs.
658 *
659 * RETURNS:
660 * 0 if all executions of @fn returned 0, any non zero return value if any
661 * returned non zero.
662 */
663 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
664 const struct cpumask *cpus)
665 {
666 struct multi_stop_data msdata = { .fn = fn, .data = data,
667 .active_cpus = cpus };
668 struct cpu_stop_done done;
669 int ret;
670
671 /* Local CPU must be inactive and CPU hotplug in progress. */
672 BUG_ON(cpu_active(raw_smp_processor_id()));
673 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
674
675 /* No proper task established and can't sleep - busy wait for lock. */
676 while (!mutex_trylock(&stop_cpus_mutex))
677 cpu_relax();
678
679 /* Schedule work on other CPUs and execute directly for local CPU */
680 set_state(&msdata, MULTI_STOP_PREPARE);
681 cpu_stop_init_done(&done, num_active_cpus());
682 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
683 &done);
684 ret = multi_cpu_stop(&msdata);
685
686 /* Busy wait for completion. */
687 while (!completion_done(&done.completion))
688 cpu_relax();
689
690 mutex_unlock(&stop_cpus_mutex);
691 return ret ?: done.ret;
692 }