]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * kernel/stop_machine.c | |
3 | * | |
4 | * Copyright (C) 2008, 2005 IBM Corporation. | |
5 | * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au | |
6 | * Copyright (C) 2010 SUSE Linux Products GmbH | |
7 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> | |
8 | * | |
9 | * This file is released under the GPLv2 and any later version. | |
10 | */ | |
11 | #include <linux/completion.h> | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/export.h> | |
16 | #include <linux/percpu.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/stop_machine.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/kallsyms.h> | |
21 | #include <linux/smpboot.h> | |
22 | #include <linux/atomic.h> | |
23 | #include <linux/nmi.h> | |
24 | #include <linux/sched/wake_q.h> | |
25 | ||
26 | /* | |
27 | * Structure to determine completion condition and record errors. May | |
28 | * be shared by works on different cpus. | |
29 | */ | |
30 | struct cpu_stop_done { | |
31 | atomic_t nr_todo; /* nr left to execute */ | |
32 | int ret; /* collected return value */ | |
33 | struct completion completion; /* fired if nr_todo reaches 0 */ | |
34 | }; | |
35 | ||
36 | /* the actual stopper, one per every possible cpu, enabled on online cpus */ | |
37 | struct cpu_stopper { | |
38 | struct task_struct *thread; | |
39 | ||
40 | raw_spinlock_t lock; | |
41 | bool enabled; /* is this stopper enabled? */ | |
42 | struct list_head works; /* list of pending works */ | |
43 | ||
44 | struct cpu_stop_work stop_work; /* for stop_cpus */ | |
45 | }; | |
46 | ||
47 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); | |
48 | static bool stop_machine_initialized = false; | |
49 | ||
50 | /* static data for stop_cpus */ | |
51 | static DEFINE_MUTEX(stop_cpus_mutex); | |
52 | static bool stop_cpus_in_progress; | |
53 | ||
54 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) | |
55 | { | |
56 | memset(done, 0, sizeof(*done)); | |
57 | atomic_set(&done->nr_todo, nr_todo); | |
58 | init_completion(&done->completion); | |
59 | } | |
60 | ||
61 | /* signal completion unless @done is NULL */ | |
62 | static void cpu_stop_signal_done(struct cpu_stop_done *done) | |
63 | { | |
64 | if (atomic_dec_and_test(&done->nr_todo)) | |
65 | complete(&done->completion); | |
66 | } | |
67 | ||
68 | static void __cpu_stop_queue_work(struct cpu_stopper *stopper, | |
69 | struct cpu_stop_work *work, | |
70 | struct wake_q_head *wakeq) | |
71 | { | |
72 | list_add_tail(&work->list, &stopper->works); | |
73 | wake_q_add(wakeq, stopper->thread); | |
74 | } | |
75 | ||
76 | /* queue @work to @stopper. if offline, @work is completed immediately */ | |
77 | static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) | |
78 | { | |
79 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
80 | DEFINE_WAKE_Q(wakeq); | |
81 | unsigned long flags; | |
82 | bool enabled; | |
83 | ||
84 | preempt_disable(); | |
85 | raw_spin_lock_irqsave(&stopper->lock, flags); | |
86 | enabled = stopper->enabled; | |
87 | if (enabled) | |
88 | __cpu_stop_queue_work(stopper, work, &wakeq); | |
89 | else if (work->done) | |
90 | cpu_stop_signal_done(work->done); | |
91 | raw_spin_unlock_irqrestore(&stopper->lock, flags); | |
92 | ||
93 | wake_up_q(&wakeq); | |
94 | preempt_enable(); | |
95 | ||
96 | return enabled; | |
97 | } | |
98 | ||
99 | /** | |
100 | * stop_one_cpu - stop a cpu | |
101 | * @cpu: cpu to stop | |
102 | * @fn: function to execute | |
103 | * @arg: argument to @fn | |
104 | * | |
105 | * Execute @fn(@arg) on @cpu. @fn is run in a process context with | |
106 | * the highest priority preempting any task on the cpu and | |
107 | * monopolizing it. This function returns after the execution is | |
108 | * complete. | |
109 | * | |
110 | * This function doesn't guarantee @cpu stays online till @fn | |
111 | * completes. If @cpu goes down in the middle, execution may happen | |
112 | * partially or fully on different cpus. @fn should either be ready | |
113 | * for that or the caller should ensure that @cpu stays online until | |
114 | * this function completes. | |
115 | * | |
116 | * CONTEXT: | |
117 | * Might sleep. | |
118 | * | |
119 | * RETURNS: | |
120 | * -ENOENT if @fn(@arg) was not executed because @cpu was offline; | |
121 | * otherwise, the return value of @fn. | |
122 | */ | |
123 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) | |
124 | { | |
125 | struct cpu_stop_done done; | |
126 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; | |
127 | ||
128 | cpu_stop_init_done(&done, 1); | |
129 | if (!cpu_stop_queue_work(cpu, &work)) | |
130 | return -ENOENT; | |
131 | /* | |
132 | * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup | |
133 | * cycle by doing a preemption: | |
134 | */ | |
135 | cond_resched(); | |
136 | wait_for_completion(&done.completion); | |
137 | return done.ret; | |
138 | } | |
139 | ||
140 | /* This controls the threads on each CPU. */ | |
141 | enum multi_stop_state { | |
142 | /* Dummy starting state for thread. */ | |
143 | MULTI_STOP_NONE, | |
144 | /* Awaiting everyone to be scheduled. */ | |
145 | MULTI_STOP_PREPARE, | |
146 | /* Disable interrupts. */ | |
147 | MULTI_STOP_DISABLE_IRQ, | |
148 | /* Run the function */ | |
149 | MULTI_STOP_RUN, | |
150 | /* Exit */ | |
151 | MULTI_STOP_EXIT, | |
152 | }; | |
153 | ||
154 | struct multi_stop_data { | |
155 | cpu_stop_fn_t fn; | |
156 | void *data; | |
157 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ | |
158 | unsigned int num_threads; | |
159 | const struct cpumask *active_cpus; | |
160 | ||
161 | enum multi_stop_state state; | |
162 | atomic_t thread_ack; | |
163 | }; | |
164 | ||
165 | static void set_state(struct multi_stop_data *msdata, | |
166 | enum multi_stop_state newstate) | |
167 | { | |
168 | /* Reset ack counter. */ | |
169 | atomic_set(&msdata->thread_ack, msdata->num_threads); | |
170 | smp_wmb(); | |
171 | msdata->state = newstate; | |
172 | } | |
173 | ||
174 | /* Last one to ack a state moves to the next state. */ | |
175 | static void ack_state(struct multi_stop_data *msdata) | |
176 | { | |
177 | if (atomic_dec_and_test(&msdata->thread_ack)) | |
178 | set_state(msdata, msdata->state + 1); | |
179 | } | |
180 | ||
181 | /* This is the cpu_stop function which stops the CPU. */ | |
182 | static int multi_cpu_stop(void *data) | |
183 | { | |
184 | struct multi_stop_data *msdata = data; | |
185 | enum multi_stop_state curstate = MULTI_STOP_NONE; | |
186 | int cpu = smp_processor_id(), err = 0; | |
187 | unsigned long flags; | |
188 | bool is_active; | |
189 | ||
190 | /* | |
191 | * When called from stop_machine_from_inactive_cpu(), irq might | |
192 | * already be disabled. Save the state and restore it on exit. | |
193 | */ | |
194 | local_save_flags(flags); | |
195 | ||
196 | if (!msdata->active_cpus) | |
197 | is_active = cpu == cpumask_first(cpu_online_mask); | |
198 | else | |
199 | is_active = cpumask_test_cpu(cpu, msdata->active_cpus); | |
200 | ||
201 | /* Simple state machine */ | |
202 | do { | |
203 | /* Chill out and ensure we re-read multi_stop_state. */ | |
204 | cpu_relax_yield(); | |
205 | if (msdata->state != curstate) { | |
206 | curstate = msdata->state; | |
207 | switch (curstate) { | |
208 | case MULTI_STOP_DISABLE_IRQ: | |
209 | local_irq_disable(); | |
210 | hard_irq_disable(); | |
211 | break; | |
212 | case MULTI_STOP_RUN: | |
213 | if (is_active) | |
214 | err = msdata->fn(msdata->data); | |
215 | break; | |
216 | default: | |
217 | break; | |
218 | } | |
219 | ack_state(msdata); | |
220 | } else if (curstate > MULTI_STOP_PREPARE) { | |
221 | /* | |
222 | * At this stage all other CPUs we depend on must spin | |
223 | * in the same loop. Any reason for hard-lockup should | |
224 | * be detected and reported on their side. | |
225 | */ | |
226 | touch_nmi_watchdog(); | |
227 | } | |
228 | } while (curstate != MULTI_STOP_EXIT); | |
229 | ||
230 | local_irq_restore(flags); | |
231 | return err; | |
232 | } | |
233 | ||
234 | static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, | |
235 | int cpu2, struct cpu_stop_work *work2) | |
236 | { | |
237 | struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); | |
238 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); | |
239 | DEFINE_WAKE_Q(wakeq); | |
240 | int err; | |
241 | ||
242 | retry: | |
243 | /* | |
244 | * The waking up of stopper threads has to happen in the same | |
245 | * scheduling context as the queueing. Otherwise, there is a | |
246 | * possibility of one of the above stoppers being woken up by another | |
247 | * CPU, and preempting us. This will cause us to not wake up the other | |
248 | * stopper forever. | |
249 | */ | |
250 | preempt_disable(); | |
251 | raw_spin_lock_irq(&stopper1->lock); | |
252 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); | |
253 | ||
254 | if (!stopper1->enabled || !stopper2->enabled) { | |
255 | err = -ENOENT; | |
256 | goto unlock; | |
257 | } | |
258 | ||
259 | /* | |
260 | * Ensure that if we race with __stop_cpus() the stoppers won't get | |
261 | * queued up in reverse order leading to system deadlock. | |
262 | * | |
263 | * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has | |
264 | * queued a work on cpu1 but not on cpu2, we hold both locks. | |
265 | * | |
266 | * It can be falsely true but it is safe to spin until it is cleared, | |
267 | * queue_stop_cpus_work() does everything under preempt_disable(). | |
268 | */ | |
269 | if (unlikely(stop_cpus_in_progress)) { | |
270 | err = -EDEADLK; | |
271 | goto unlock; | |
272 | } | |
273 | ||
274 | err = 0; | |
275 | __cpu_stop_queue_work(stopper1, work1, &wakeq); | |
276 | __cpu_stop_queue_work(stopper2, work2, &wakeq); | |
277 | ||
278 | unlock: | |
279 | raw_spin_unlock(&stopper2->lock); | |
280 | raw_spin_unlock_irq(&stopper1->lock); | |
281 | ||
282 | if (unlikely(err == -EDEADLK)) { | |
283 | preempt_enable(); | |
284 | ||
285 | while (stop_cpus_in_progress) | |
286 | cpu_relax(); | |
287 | ||
288 | goto retry; | |
289 | } | |
290 | ||
291 | wake_up_q(&wakeq); | |
292 | preempt_enable(); | |
293 | ||
294 | return err; | |
295 | } | |
296 | /** | |
297 | * stop_two_cpus - stops two cpus | |
298 | * @cpu1: the cpu to stop | |
299 | * @cpu2: the other cpu to stop | |
300 | * @fn: function to execute | |
301 | * @arg: argument to @fn | |
302 | * | |
303 | * Stops both the current and specified CPU and runs @fn on one of them. | |
304 | * | |
305 | * returns when both are completed. | |
306 | */ | |
307 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) | |
308 | { | |
309 | struct cpu_stop_done done; | |
310 | struct cpu_stop_work work1, work2; | |
311 | struct multi_stop_data msdata; | |
312 | ||
313 | msdata = (struct multi_stop_data){ | |
314 | .fn = fn, | |
315 | .data = arg, | |
316 | .num_threads = 2, | |
317 | .active_cpus = cpumask_of(cpu1), | |
318 | }; | |
319 | ||
320 | work1 = work2 = (struct cpu_stop_work){ | |
321 | .fn = multi_cpu_stop, | |
322 | .arg = &msdata, | |
323 | .done = &done | |
324 | }; | |
325 | ||
326 | cpu_stop_init_done(&done, 2); | |
327 | set_state(&msdata, MULTI_STOP_PREPARE); | |
328 | ||
329 | if (cpu1 > cpu2) | |
330 | swap(cpu1, cpu2); | |
331 | if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) | |
332 | return -ENOENT; | |
333 | ||
334 | wait_for_completion(&done.completion); | |
335 | return done.ret; | |
336 | } | |
337 | ||
338 | /** | |
339 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion | |
340 | * @cpu: cpu to stop | |
341 | * @fn: function to execute | |
342 | * @arg: argument to @fn | |
343 | * @work_buf: pointer to cpu_stop_work structure | |
344 | * | |
345 | * Similar to stop_one_cpu() but doesn't wait for completion. The | |
346 | * caller is responsible for ensuring @work_buf is currently unused | |
347 | * and will remain untouched until stopper starts executing @fn. | |
348 | * | |
349 | * CONTEXT: | |
350 | * Don't care. | |
351 | * | |
352 | * RETURNS: | |
353 | * true if cpu_stop_work was queued successfully and @fn will be called, | |
354 | * false otherwise. | |
355 | */ | |
356 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, | |
357 | struct cpu_stop_work *work_buf) | |
358 | { | |
359 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; | |
360 | return cpu_stop_queue_work(cpu, work_buf); | |
361 | } | |
362 | ||
363 | static bool queue_stop_cpus_work(const struct cpumask *cpumask, | |
364 | cpu_stop_fn_t fn, void *arg, | |
365 | struct cpu_stop_done *done) | |
366 | { | |
367 | struct cpu_stop_work *work; | |
368 | unsigned int cpu; | |
369 | bool queued = false; | |
370 | ||
371 | /* | |
372 | * Disable preemption while queueing to avoid getting | |
373 | * preempted by a stopper which might wait for other stoppers | |
374 | * to enter @fn which can lead to deadlock. | |
375 | */ | |
376 | preempt_disable(); | |
377 | stop_cpus_in_progress = true; | |
378 | for_each_cpu(cpu, cpumask) { | |
379 | work = &per_cpu(cpu_stopper.stop_work, cpu); | |
380 | work->fn = fn; | |
381 | work->arg = arg; | |
382 | work->done = done; | |
383 | if (cpu_stop_queue_work(cpu, work)) | |
384 | queued = true; | |
385 | } | |
386 | stop_cpus_in_progress = false; | |
387 | preempt_enable(); | |
388 | ||
389 | return queued; | |
390 | } | |
391 | ||
392 | static int __stop_cpus(const struct cpumask *cpumask, | |
393 | cpu_stop_fn_t fn, void *arg) | |
394 | { | |
395 | struct cpu_stop_done done; | |
396 | ||
397 | cpu_stop_init_done(&done, cpumask_weight(cpumask)); | |
398 | if (!queue_stop_cpus_work(cpumask, fn, arg, &done)) | |
399 | return -ENOENT; | |
400 | wait_for_completion(&done.completion); | |
401 | return done.ret; | |
402 | } | |
403 | ||
404 | /** | |
405 | * stop_cpus - stop multiple cpus | |
406 | * @cpumask: cpus to stop | |
407 | * @fn: function to execute | |
408 | * @arg: argument to @fn | |
409 | * | |
410 | * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, | |
411 | * @fn is run in a process context with the highest priority | |
412 | * preempting any task on the cpu and monopolizing it. This function | |
413 | * returns after all executions are complete. | |
414 | * | |
415 | * This function doesn't guarantee the cpus in @cpumask stay online | |
416 | * till @fn completes. If some cpus go down in the middle, execution | |
417 | * on the cpu may happen partially or fully on different cpus. @fn | |
418 | * should either be ready for that or the caller should ensure that | |
419 | * the cpus stay online until this function completes. | |
420 | * | |
421 | * All stop_cpus() calls are serialized making it safe for @fn to wait | |
422 | * for all cpus to start executing it. | |
423 | * | |
424 | * CONTEXT: | |
425 | * Might sleep. | |
426 | * | |
427 | * RETURNS: | |
428 | * -ENOENT if @fn(@arg) was not executed at all because all cpus in | |
429 | * @cpumask were offline; otherwise, 0 if all executions of @fn | |
430 | * returned 0, any non zero return value if any returned non zero. | |
431 | */ | |
432 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) | |
433 | { | |
434 | int ret; | |
435 | ||
436 | /* static works are used, process one request at a time */ | |
437 | mutex_lock(&stop_cpus_mutex); | |
438 | ret = __stop_cpus(cpumask, fn, arg); | |
439 | mutex_unlock(&stop_cpus_mutex); | |
440 | return ret; | |
441 | } | |
442 | ||
443 | /** | |
444 | * try_stop_cpus - try to stop multiple cpus | |
445 | * @cpumask: cpus to stop | |
446 | * @fn: function to execute | |
447 | * @arg: argument to @fn | |
448 | * | |
449 | * Identical to stop_cpus() except that it fails with -EAGAIN if | |
450 | * someone else is already using the facility. | |
451 | * | |
452 | * CONTEXT: | |
453 | * Might sleep. | |
454 | * | |
455 | * RETURNS: | |
456 | * -EAGAIN if someone else is already stopping cpus, -ENOENT if | |
457 | * @fn(@arg) was not executed at all because all cpus in @cpumask were | |
458 | * offline; otherwise, 0 if all executions of @fn returned 0, any non | |
459 | * zero return value if any returned non zero. | |
460 | */ | |
461 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) | |
462 | { | |
463 | int ret; | |
464 | ||
465 | /* static works are used, process one request at a time */ | |
466 | if (!mutex_trylock(&stop_cpus_mutex)) | |
467 | return -EAGAIN; | |
468 | ret = __stop_cpus(cpumask, fn, arg); | |
469 | mutex_unlock(&stop_cpus_mutex); | |
470 | return ret; | |
471 | } | |
472 | ||
473 | static int cpu_stop_should_run(unsigned int cpu) | |
474 | { | |
475 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
476 | unsigned long flags; | |
477 | int run; | |
478 | ||
479 | raw_spin_lock_irqsave(&stopper->lock, flags); | |
480 | run = !list_empty(&stopper->works); | |
481 | raw_spin_unlock_irqrestore(&stopper->lock, flags); | |
482 | return run; | |
483 | } | |
484 | ||
485 | static void cpu_stopper_thread(unsigned int cpu) | |
486 | { | |
487 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
488 | struct cpu_stop_work *work; | |
489 | ||
490 | repeat: | |
491 | work = NULL; | |
492 | raw_spin_lock_irq(&stopper->lock); | |
493 | if (!list_empty(&stopper->works)) { | |
494 | work = list_first_entry(&stopper->works, | |
495 | struct cpu_stop_work, list); | |
496 | list_del_init(&work->list); | |
497 | } | |
498 | raw_spin_unlock_irq(&stopper->lock); | |
499 | ||
500 | if (work) { | |
501 | cpu_stop_fn_t fn = work->fn; | |
502 | void *arg = work->arg; | |
503 | struct cpu_stop_done *done = work->done; | |
504 | int ret; | |
505 | ||
506 | /* cpu stop callbacks must not sleep, make in_atomic() == T */ | |
507 | preempt_count_inc(); | |
508 | ret = fn(arg); | |
509 | if (done) { | |
510 | if (ret) | |
511 | done->ret = ret; | |
512 | cpu_stop_signal_done(done); | |
513 | } | |
514 | preempt_count_dec(); | |
515 | WARN_ONCE(preempt_count(), | |
516 | "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg); | |
517 | goto repeat; | |
518 | } | |
519 | } | |
520 | ||
521 | void stop_machine_park(int cpu) | |
522 | { | |
523 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
524 | /* | |
525 | * Lockless. cpu_stopper_thread() will take stopper->lock and flush | |
526 | * the pending works before it parks, until then it is fine to queue | |
527 | * the new works. | |
528 | */ | |
529 | stopper->enabled = false; | |
530 | kthread_park(stopper->thread); | |
531 | } | |
532 | ||
533 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); | |
534 | ||
535 | static void cpu_stop_create(unsigned int cpu) | |
536 | { | |
537 | sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); | |
538 | } | |
539 | ||
540 | static void cpu_stop_park(unsigned int cpu) | |
541 | { | |
542 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
543 | ||
544 | WARN_ON(!list_empty(&stopper->works)); | |
545 | } | |
546 | ||
547 | void stop_machine_unpark(int cpu) | |
548 | { | |
549 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
550 | ||
551 | stopper->enabled = true; | |
552 | kthread_unpark(stopper->thread); | |
553 | } | |
554 | ||
555 | static struct smp_hotplug_thread cpu_stop_threads = { | |
556 | .store = &cpu_stopper.thread, | |
557 | .thread_should_run = cpu_stop_should_run, | |
558 | .thread_fn = cpu_stopper_thread, | |
559 | .thread_comm = "migration/%u", | |
560 | .create = cpu_stop_create, | |
561 | .park = cpu_stop_park, | |
562 | .selfparking = true, | |
563 | }; | |
564 | ||
565 | static int __init cpu_stop_init(void) | |
566 | { | |
567 | unsigned int cpu; | |
568 | ||
569 | for_each_possible_cpu(cpu) { | |
570 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
571 | ||
572 | raw_spin_lock_init(&stopper->lock); | |
573 | INIT_LIST_HEAD(&stopper->works); | |
574 | } | |
575 | ||
576 | BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); | |
577 | stop_machine_unpark(raw_smp_processor_id()); | |
578 | stop_machine_initialized = true; | |
579 | return 0; | |
580 | } | |
581 | early_initcall(cpu_stop_init); | |
582 | ||
583 | int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, | |
584 | const struct cpumask *cpus) | |
585 | { | |
586 | struct multi_stop_data msdata = { | |
587 | .fn = fn, | |
588 | .data = data, | |
589 | .num_threads = num_online_cpus(), | |
590 | .active_cpus = cpus, | |
591 | }; | |
592 | ||
593 | lockdep_assert_cpus_held(); | |
594 | ||
595 | if (!stop_machine_initialized) { | |
596 | /* | |
597 | * Handle the case where stop_machine() is called | |
598 | * early in boot before stop_machine() has been | |
599 | * initialized. | |
600 | */ | |
601 | unsigned long flags; | |
602 | int ret; | |
603 | ||
604 | WARN_ON_ONCE(msdata.num_threads != 1); | |
605 | ||
606 | local_irq_save(flags); | |
607 | hard_irq_disable(); | |
608 | ret = (*fn)(data); | |
609 | local_irq_restore(flags); | |
610 | ||
611 | return ret; | |
612 | } | |
613 | ||
614 | /* Set the initial state and stop all online cpus. */ | |
615 | set_state(&msdata, MULTI_STOP_PREPARE); | |
616 | return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); | |
617 | } | |
618 | ||
619 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) | |
620 | { | |
621 | int ret; | |
622 | ||
623 | /* No CPUs can come up or down during this. */ | |
624 | cpus_read_lock(); | |
625 | ret = stop_machine_cpuslocked(fn, data, cpus); | |
626 | cpus_read_unlock(); | |
627 | return ret; | |
628 | } | |
629 | EXPORT_SYMBOL_GPL(stop_machine); | |
630 | ||
631 | /** | |
632 | * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU | |
633 | * @fn: the function to run | |
634 | * @data: the data ptr for the @fn() | |
635 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) | |
636 | * | |
637 | * This is identical to stop_machine() but can be called from a CPU which | |
638 | * is not active. The local CPU is in the process of hotplug (so no other | |
639 | * CPU hotplug can start) and not marked active and doesn't have enough | |
640 | * context to sleep. | |
641 | * | |
642 | * This function provides stop_machine() functionality for such state by | |
643 | * using busy-wait for synchronization and executing @fn directly for local | |
644 | * CPU. | |
645 | * | |
646 | * CONTEXT: | |
647 | * Local CPU is inactive. Temporarily stops all active CPUs. | |
648 | * | |
649 | * RETURNS: | |
650 | * 0 if all executions of @fn returned 0, any non zero return value if any | |
651 | * returned non zero. | |
652 | */ | |
653 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, | |
654 | const struct cpumask *cpus) | |
655 | { | |
656 | struct multi_stop_data msdata = { .fn = fn, .data = data, | |
657 | .active_cpus = cpus }; | |
658 | struct cpu_stop_done done; | |
659 | int ret; | |
660 | ||
661 | /* Local CPU must be inactive and CPU hotplug in progress. */ | |
662 | BUG_ON(cpu_active(raw_smp_processor_id())); | |
663 | msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ | |
664 | ||
665 | /* No proper task established and can't sleep - busy wait for lock. */ | |
666 | while (!mutex_trylock(&stop_cpus_mutex)) | |
667 | cpu_relax(); | |
668 | ||
669 | /* Schedule work on other CPUs and execute directly for local CPU */ | |
670 | set_state(&msdata, MULTI_STOP_PREPARE); | |
671 | cpu_stop_init_done(&done, num_active_cpus()); | |
672 | queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, | |
673 | &done); | |
674 | ret = multi_cpu_stop(&msdata); | |
675 | ||
676 | /* Busy wait for completion. */ | |
677 | while (!completion_done(&done.completion)) | |
678 | cpu_relax(); | |
679 | ||
680 | mutex_unlock(&stop_cpus_mutex); | |
681 | return ret ?: done.ret; | |
682 | } |