]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - kernel/cpu.c
rcu: Provide exact CPU-online tracking for RCU
[mirror_ubuntu-disco-kernel.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
a19423b9 22#include <linux/lockdep.h>
345527b1 23#include <linux/tick.h>
a8994181 24#include <linux/irq.h>
4cb28ced 25#include <linux/smpboot.h>
cff7d378 26
bb3632c6 27#include <trace/events/power.h>
cff7d378
TG
28#define CREATE_TRACE_POINTS
29#include <trace/events/cpuhp.h>
1da177e4 30
38498a67
TG
31#include "smpboot.h"
32
cff7d378
TG
33/**
34 * cpuhp_cpu_state - Per cpu hotplug state storage
35 * @state: The current cpu state
36 * @target: The target state
4cb28ced
TG
37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute
3b9d6da6 39 * @rollback: Perform a rollback
4cb28ced
TG
40 * @cb_stat: The state for a single callback (install/uninstall)
41 * @cb: Single callback function (install/uninstall)
42 * @result: Result of the operation
43 * @done: Signal completion to the issuer of the task
cff7d378
TG
44 */
45struct cpuhp_cpu_state {
46 enum cpuhp_state state;
47 enum cpuhp_state target;
4cb28ced
TG
48#ifdef CONFIG_SMP
49 struct task_struct *thread;
50 bool should_run;
3b9d6da6 51 bool rollback;
4cb28ced
TG
52 enum cpuhp_state cb_state;
53 int (*cb)(unsigned int cpu);
54 int result;
55 struct completion done;
56#endif
cff7d378
TG
57};
58
59static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
60
61/**
62 * cpuhp_step - Hotplug state machine step
63 * @name: Name of the step
64 * @startup: Startup function of the step
65 * @teardown: Teardown function of the step
66 * @skip_onerr: Do not invoke the functions on error rollback
67 * Will go away once the notifiers are gone
757c989b 68 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
69 */
70struct cpuhp_step {
71 const char *name;
72 int (*startup)(unsigned int cpu);
73 int (*teardown)(unsigned int cpu);
74 bool skip_onerr;
757c989b 75 bool cant_stop;
cff7d378
TG
76};
77
98f8cdce 78static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378 79static struct cpuhp_step cpuhp_bp_states[];
4baa0afc 80static struct cpuhp_step cpuhp_ap_states[];
cff7d378
TG
81
82/**
83 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
84 * @cpu: The cpu for which the callback should be invoked
85 * @step: The step in the state machine
86 * @cb: The callback function to invoke
87 *
88 * Called from cpu hotplug and from the state register machinery
89 */
90static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
91 int (*cb)(unsigned int))
92{
93 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
94 int ret = 0;
95
96 if (cb) {
97 trace_cpuhp_enter(cpu, st->target, step, cb);
98 ret = cb(cpu);
99 trace_cpuhp_exit(cpu, st->state, step, ret);
100 }
101 return ret;
102}
103
98a79d6a 104#ifdef CONFIG_SMP
b3199c02 105/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 106static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
107bool cpuhp_tasks_frozen;
108EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 109
79a6cdeb 110/*
93ae4f97
SB
111 * The following two APIs (cpu_maps_update_begin/done) must be used when
112 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
113 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
114 * hotplug callback (un)registration performed using __register_cpu_notifier()
115 * or __unregister_cpu_notifier().
79a6cdeb
LJ
116 */
117void cpu_maps_update_begin(void)
118{
119 mutex_lock(&cpu_add_remove_lock);
120}
93ae4f97 121EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb
LJ
122
123void cpu_maps_update_done(void)
124{
125 mutex_unlock(&cpu_add_remove_lock);
126}
93ae4f97 127EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb 128
5c113fbe 129static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 130
e3920fb4
RW
131/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
132 * Should always be manipulated under cpu_add_remove_lock
133 */
134static int cpu_hotplug_disabled;
135
79a6cdeb
LJ
136#ifdef CONFIG_HOTPLUG_CPU
137
d221938c
GS
138static struct {
139 struct task_struct *active_writer;
87af9e7f
DH
140 /* wait queue to wake up the active_writer */
141 wait_queue_head_t wq;
142 /* verifies that no writer will get active while readers are active */
143 struct mutex lock;
d221938c
GS
144 /*
145 * Also blocks the new readers during
146 * an ongoing cpu hotplug operation.
147 */
87af9e7f 148 atomic_t refcount;
a19423b9
GS
149
150#ifdef CONFIG_DEBUG_LOCK_ALLOC
151 struct lockdep_map dep_map;
152#endif
31950eb6
LT
153} cpu_hotplug = {
154 .active_writer = NULL,
87af9e7f 155 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb6 156 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b9
GS
157#ifdef CONFIG_DEBUG_LOCK_ALLOC
158 .dep_map = {.name = "cpu_hotplug.lock" },
159#endif
31950eb6 160};
d221938c 161
a19423b9
GS
162/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
163#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42
PM
164#define cpuhp_lock_acquire_tryread() \
165 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b9
GS
166#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
167#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
168
62db99f4 169
86ef5c9a 170void get_online_cpus(void)
a9d9baa1 171{
d221938c
GS
172 might_sleep();
173 if (cpu_hotplug.active_writer == current)
aa953877 174 return;
a19423b9 175 cpuhp_lock_acquire_read();
d221938c 176 mutex_lock(&cpu_hotplug.lock);
87af9e7f 177 atomic_inc(&cpu_hotplug.refcount);
d221938c 178 mutex_unlock(&cpu_hotplug.lock);
a9d9baa1 179}
86ef5c9a 180EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 181
86ef5c9a 182void put_online_cpus(void)
a9d9baa1 183{
87af9e7f
DH
184 int refcount;
185
d221938c 186 if (cpu_hotplug.active_writer == current)
aa953877 187 return;
075663d1 188
87af9e7f
DH
189 refcount = atomic_dec_return(&cpu_hotplug.refcount);
190 if (WARN_ON(refcount < 0)) /* try to fix things up */
191 atomic_inc(&cpu_hotplug.refcount);
192
193 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
194 wake_up(&cpu_hotplug.wq);
075663d1 195
a19423b9 196 cpuhp_lock_release();
d221938c 197
a9d9baa1 198}
86ef5c9a 199EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 200
d221938c
GS
201/*
202 * This ensures that the hotplug operation can begin only when the
203 * refcount goes to zero.
204 *
205 * Note that during a cpu-hotplug operation, the new readers, if any,
206 * will be blocked by the cpu_hotplug.lock
207 *
d2ba7e2a
ON
208 * Since cpu_hotplug_begin() is always called after invoking
209 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
210 *
211 * Note that theoretically, there is a possibility of a livelock:
212 * - Refcount goes to zero, last reader wakes up the sleeping
213 * writer.
214 * - Last reader unlocks the cpu_hotplug.lock.
215 * - A new reader arrives at this moment, bumps up the refcount.
216 * - The writer acquires the cpu_hotplug.lock finds the refcount
217 * non zero and goes to sleep again.
218 *
219 * However, this is very difficult to achieve in practice since
86ef5c9a 220 * get_online_cpus() not an api which is called all that often.
d221938c
GS
221 *
222 */
b9d10be7 223void cpu_hotplug_begin(void)
d221938c 224{
87af9e7f 225 DEFINE_WAIT(wait);
d2ba7e2a 226
87af9e7f 227 cpu_hotplug.active_writer = current;
a19423b9 228 cpuhp_lock_acquire();
87af9e7f 229
d2ba7e2a
ON
230 for (;;) {
231 mutex_lock(&cpu_hotplug.lock);
87af9e7f
DH
232 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
233 if (likely(!atomic_read(&cpu_hotplug.refcount)))
234 break;
d221938c
GS
235 mutex_unlock(&cpu_hotplug.lock);
236 schedule();
d221938c 237 }
87af9e7f 238 finish_wait(&cpu_hotplug.wq, &wait);
d221938c
GS
239}
240
b9d10be7 241void cpu_hotplug_done(void)
d221938c
GS
242{
243 cpu_hotplug.active_writer = NULL;
244 mutex_unlock(&cpu_hotplug.lock);
a19423b9 245 cpuhp_lock_release();
d221938c 246}
79a6cdeb 247
16e53dbf
SB
248/*
249 * Wait for currently running CPU hotplug operations to complete (if any) and
250 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
251 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
252 * hotplug path before performing hotplug operations. So acquiring that lock
253 * guarantees mutual exclusion from any currently running hotplug operations.
254 */
255void cpu_hotplug_disable(void)
256{
257 cpu_maps_update_begin();
89af7ba5 258 cpu_hotplug_disabled++;
16e53dbf
SB
259 cpu_maps_update_done();
260}
32145c46 261EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf
SB
262
263void cpu_hotplug_enable(void)
264{
265 cpu_maps_update_begin();
89af7ba5 266 WARN_ON(--cpu_hotplug_disabled < 0);
16e53dbf
SB
267 cpu_maps_update_done();
268}
32145c46 269EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 270#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 271
1da177e4 272/* Need to know about CPUs going up/down? */
71cf5aee 273int register_cpu_notifier(struct notifier_block *nb)
1da177e4 274{
bd5349cf 275 int ret;
d221938c 276 cpu_maps_update_begin();
bd5349cf 277 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 278 cpu_maps_update_done();
bd5349cf 279 return ret;
1da177e4 280}
65edc68c 281
71cf5aee 282int __register_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
283{
284 return raw_notifier_chain_register(&cpu_chain, nb);
285}
286
090e77c3 287static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
e9fb7631
AM
288 int *nr_calls)
289{
090e77c3
TG
290 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
291 void *hcpu = (void *)(long)cpu;
292
e6bde73b
AM
293 int ret;
294
090e77c3 295 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
e9fb7631 296 nr_calls);
e6bde73b
AM
297
298 return notifier_to_errno(ret);
e9fb7631
AM
299}
300
090e77c3 301static int cpu_notify(unsigned long val, unsigned int cpu)
e9fb7631 302{
090e77c3 303 return __cpu_notify(val, cpu, -1, NULL);
e9fb7631
AM
304}
305
3b9d6da6
SAS
306static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
307{
308 BUG_ON(cpu_notify(val, cpu));
309}
310
ba997462
TG
311/* Notifier wrappers for transitioning to state machine */
312static int notify_prepare(unsigned int cpu)
313{
314 int nr_calls = 0;
315 int ret;
316
317 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
318 if (ret) {
319 nr_calls--;
320 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
321 __func__, cpu);
322 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
323 }
324 return ret;
325}
326
327static int notify_online(unsigned int cpu)
328{
329 cpu_notify(CPU_ONLINE, cpu);
330 return 0;
331}
332
4baa0afc
TG
333static int notify_starting(unsigned int cpu)
334{
335 cpu_notify(CPU_STARTING, cpu);
336 return 0;
337}
338
8df3e07e
TG
339static int bringup_wait_for_ap(unsigned int cpu)
340{
341 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
342
343 wait_for_completion(&st->done);
344 return st->result;
345}
346
ba997462
TG
347static int bringup_cpu(unsigned int cpu)
348{
349 struct task_struct *idle = idle_thread_get(cpu);
350 int ret;
351
352 /* Arch-specific enabling code. */
353 ret = __cpu_up(cpu, idle);
354 if (ret) {
355 cpu_notify(CPU_UP_CANCELED, cpu);
356 return ret;
357 }
8df3e07e 358 ret = bringup_wait_for_ap(cpu);
ba997462 359 BUG_ON(!cpu_online(cpu));
8df3e07e 360 return ret;
ba997462
TG
361}
362
2e1a3483
TG
363/*
364 * Hotplug state machine related functions
365 */
366static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
367 struct cpuhp_step *steps)
368{
369 for (st->state++; st->state < st->target; st->state++) {
370 struct cpuhp_step *step = steps + st->state;
371
372 if (!step->skip_onerr)
373 cpuhp_invoke_callback(cpu, st->state, step->startup);
374 }
375}
376
377static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
378 struct cpuhp_step *steps, enum cpuhp_state target)
379{
380 enum cpuhp_state prev_state = st->state;
381 int ret = 0;
382
383 for (; st->state > target; st->state--) {
384 struct cpuhp_step *step = steps + st->state;
385
386 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
387 if (ret) {
388 st->target = prev_state;
389 undo_cpu_down(cpu, st, steps);
390 break;
391 }
392 }
393 return ret;
394}
395
396static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
397 struct cpuhp_step *steps)
398{
399 for (st->state--; st->state > st->target; st->state--) {
400 struct cpuhp_step *step = steps + st->state;
401
402 if (!step->skip_onerr)
403 cpuhp_invoke_callback(cpu, st->state, step->teardown);
404 }
405}
406
407static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
408 struct cpuhp_step *steps, enum cpuhp_state target)
409{
410 enum cpuhp_state prev_state = st->state;
411 int ret = 0;
412
413 while (st->state < target) {
414 struct cpuhp_step *step;
415
416 st->state++;
417 step = steps + st->state;
418 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
419 if (ret) {
420 st->target = prev_state;
421 undo_cpu_up(cpu, st, steps);
422 break;
423 }
424 }
425 return ret;
426}
427
4cb28ced
TG
428/*
429 * The cpu hotplug threads manage the bringup and teardown of the cpus
430 */
431static void cpuhp_create(unsigned int cpu)
432{
433 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
434
435 init_completion(&st->done);
436}
437
438static int cpuhp_should_run(unsigned int cpu)
439{
440 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
441
442 return st->should_run;
443}
444
445/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
446static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
447{
1cf4f629 448 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
4cb28ced
TG
449
450 return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target);
451}
452
453/* Execute the online startup callbacks. Used to be CPU_ONLINE */
454static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
455{
456 return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target);
457}
458
459/*
460 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
461 * callbacks when a state gets [un]installed at runtime.
462 */
463static void cpuhp_thread_fun(unsigned int cpu)
464{
465 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
466 int ret = 0;
467
468 /*
469 * Paired with the mb() in cpuhp_kick_ap_work and
470 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
471 */
472 smp_mb();
473 if (!st->should_run)
474 return;
475
476 st->should_run = false;
477
478 /* Single callback invocation for [un]install ? */
479 if (st->cb) {
480 if (st->cb_state < CPUHP_AP_ONLINE) {
481 local_irq_disable();
482 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
483 local_irq_enable();
484 } else {
485 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
486 }
3b9d6da6
SAS
487 } else if (st->rollback) {
488 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
489
490 undo_cpu_down(cpu, st, cpuhp_ap_states);
491 /*
492 * This is a momentary workaround to keep the notifier users
493 * happy. Will go away once we got rid of the notifiers.
494 */
495 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
496 st->rollback = false;
4cb28ced 497 } else {
1cf4f629 498 /* Cannot happen .... */
8df3e07e 499 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1cf4f629 500
4cb28ced
TG
501 /* Regular hotplug work */
502 if (st->state < st->target)
503 ret = cpuhp_ap_online(cpu, st);
504 else if (st->state > st->target)
505 ret = cpuhp_ap_offline(cpu, st);
506 }
507 st->result = ret;
508 complete(&st->done);
509}
510
511/* Invoke a single callback on a remote cpu */
512static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
513 int (*cb)(unsigned int))
514{
515 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
516
517 if (!cpu_online(cpu))
518 return 0;
519
6a4e2451
TG
520 /*
521 * If we are up and running, use the hotplug thread. For early calls
522 * we invoke the thread function directly.
523 */
524 if (!st->thread)
525 return cpuhp_invoke_callback(cpu, state, cb);
526
4cb28ced
TG
527 st->cb_state = state;
528 st->cb = cb;
529 /*
530 * Make sure the above stores are visible before should_run becomes
531 * true. Paired with the mb() above in cpuhp_thread_fun()
532 */
533 smp_mb();
534 st->should_run = true;
535 wake_up_process(st->thread);
536 wait_for_completion(&st->done);
537 return st->result;
538}
539
540/* Regular hotplug invocation of the AP hotplug thread */
1cf4f629 541static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
4cb28ced 542{
4cb28ced
TG
543 st->result = 0;
544 st->cb = NULL;
545 /*
546 * Make sure the above stores are visible before should_run becomes
547 * true. Paired with the mb() above in cpuhp_thread_fun()
548 */
549 smp_mb();
550 st->should_run = true;
551 wake_up_process(st->thread);
1cf4f629
TG
552}
553
554static int cpuhp_kick_ap_work(unsigned int cpu)
555{
556 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
557 enum cpuhp_state state = st->state;
558
559 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
560 __cpuhp_kick_ap_work(st);
4cb28ced
TG
561 wait_for_completion(&st->done);
562 trace_cpuhp_exit(cpu, st->state, state, st->result);
563 return st->result;
564}
565
566static struct smp_hotplug_thread cpuhp_threads = {
567 .store = &cpuhp_state.thread,
568 .create = &cpuhp_create,
569 .thread_should_run = cpuhp_should_run,
570 .thread_fn = cpuhp_thread_fun,
571 .thread_comm = "cpuhp/%u",
572 .selfparking = true,
573};
574
575void __init cpuhp_threads_init(void)
576{
577 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
578 kthread_unpark(this_cpu_read(cpuhp_state.thread));
579}
580
00b9b0af 581#ifdef CONFIG_HOTPLUG_CPU
1da177e4 582EXPORT_SYMBOL(register_cpu_notifier);
93ae4f97 583EXPORT_SYMBOL(__register_cpu_notifier);
71cf5aee 584void unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 585{
d221938c 586 cpu_maps_update_begin();
bd5349cf 587 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 588 cpu_maps_update_done();
1da177e4
LT
589}
590EXPORT_SYMBOL(unregister_cpu_notifier);
591
71cf5aee 592void __unregister_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
593{
594 raw_notifier_chain_unregister(&cpu_chain, nb);
595}
596EXPORT_SYMBOL(__unregister_cpu_notifier);
597
e4cc2f87
AV
598/**
599 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
600 * @cpu: a CPU id
601 *
602 * This function walks all processes, finds a valid mm struct for each one and
603 * then clears a corresponding bit in mm's cpumask. While this all sounds
604 * trivial, there are various non-obvious corner cases, which this function
605 * tries to solve in a safe manner.
606 *
607 * Also note that the function uses a somewhat relaxed locking scheme, so it may
608 * be called only for an already offlined CPU.
609 */
cb79295e
AV
610void clear_tasks_mm_cpumask(int cpu)
611{
612 struct task_struct *p;
613
614 /*
615 * This function is called after the cpu is taken down and marked
616 * offline, so its not like new tasks will ever get this cpu set in
617 * their mm mask. -- Peter Zijlstra
618 * Thus, we may use rcu_read_lock() here, instead of grabbing
619 * full-fledged tasklist_lock.
620 */
e4cc2f87 621 WARN_ON(cpu_online(cpu));
cb79295e
AV
622 rcu_read_lock();
623 for_each_process(p) {
624 struct task_struct *t;
625
e4cc2f87
AV
626 /*
627 * Main thread might exit, but other threads may still have
628 * a valid mm. Find one.
629 */
cb79295e
AV
630 t = find_lock_task_mm(p);
631 if (!t)
632 continue;
633 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
634 task_unlock(t);
635 }
636 rcu_read_unlock();
637}
638
b728ca06 639static inline void check_for_tasks(int dead_cpu)
1da177e4 640{
b728ca06 641 struct task_struct *g, *p;
1da177e4 642
a75a6068
ON
643 read_lock(&tasklist_lock);
644 for_each_process_thread(g, p) {
b728ca06
KT
645 if (!p->on_rq)
646 continue;
647 /*
648 * We do the check with unlocked task_rq(p)->lock.
649 * Order the reading to do not warn about a task,
650 * which was running on this cpu in the past, and
651 * it's just been woken on another cpu.
652 */
653 rmb();
654 if (task_cpu(p) != dead_cpu)
655 continue;
656
657 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
658 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068
ON
659 }
660 read_unlock(&tasklist_lock);
1da177e4
LT
661}
662
98458172
TG
663static int notify_down_prepare(unsigned int cpu)
664{
665 int err, nr_calls = 0;
666
667 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
668 if (err) {
669 nr_calls--;
670 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
671 pr_warn("%s: attempt to take down CPU %u failed\n",
672 __func__, cpu);
673 }
674 return err;
675}
676
4baa0afc
TG
677static int notify_dying(unsigned int cpu)
678{
679 cpu_notify(CPU_DYING, cpu);
680 return 0;
681}
682
1da177e4 683/* Take this CPU down. */
71cf5aee 684static int take_cpu_down(void *_param)
1da177e4 685{
4baa0afc
TG
686 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
687 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 688 int err, cpu = smp_processor_id();
1da177e4 689
1da177e4
LT
690 /* Ensure this CPU doesn't handle any more interrupts. */
691 err = __cpu_disable();
692 if (err < 0)
f3705136 693 return err;
1da177e4 694
4baa0afc
TG
695 /* Invoke the former CPU_DYING callbacks */
696 for (; st->state > target; st->state--) {
697 struct cpuhp_step *step = cpuhp_ap_states + st->state;
698
699 cpuhp_invoke_callback(cpu, st->state, step->teardown);
700 }
52c063d1
TG
701 /* Give up timekeeping duties */
702 tick_handover_do_timer();
14e568e7 703 /* Park the stopper thread */
090e77c3 704 stop_machine_park(cpu);
f3705136 705 return 0;
1da177e4
LT
706}
707
98458172 708static int takedown_cpu(unsigned int cpu)
1da177e4 709{
e69aab13 710 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
98458172 711 int err;
1da177e4 712
2a58c527 713 /* Park the smpboot threads */
1cf4f629 714 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
2a58c527 715 smpboot_park_threads(cpu);
1cf4f629 716
6acce3ef 717 /*
a8994181
TG
718 * Prevent irq alloc/free while the dying cpu reorganizes the
719 * interrupt affinities.
6acce3ef 720 */
a8994181 721 irq_lock_sparse();
6acce3ef 722
a8994181
TG
723 /*
724 * So now all preempt/rcu users must observe !cpu_active().
725 */
090e77c3 726 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
04321587 727 if (err) {
3b9d6da6 728 /* CPU refused to die */
a8994181 729 irq_unlock_sparse();
3b9d6da6
SAS
730 /* Unpark the hotplug thread so we can rollback there */
731 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
98458172 732 return err;
8fa1d7d3 733 }
04321587 734 BUG_ON(cpu_online(cpu));
1da177e4 735
48c5ccae
PZ
736 /*
737 * The migration_call() CPU_DYING callback will have removed all
738 * runnable tasks from the cpu, there's only the idle task left now
739 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
740 *
741 * Wait for the stop thread to go away.
48c5ccae 742 */
e69aab13
TG
743 wait_for_completion(&st->done);
744 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4 745
a8994181
TG
746 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
747 irq_unlock_sparse();
748
345527b1 749 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
750 /* This actually kills the CPU. */
751 __cpu_die(cpu);
752
a49b116d 753 tick_cleanup_dead_cpu(cpu);
98458172
TG
754 return 0;
755}
1da177e4 756
98458172
TG
757static int notify_dead(unsigned int cpu)
758{
759 cpu_notify_nofail(CPU_DEAD, cpu);
1da177e4 760 check_for_tasks(cpu);
98458172
TG
761 return 0;
762}
763
71f87b2f
TG
764static void cpuhp_complete_idle_dead(void *arg)
765{
766 struct cpuhp_cpu_state *st = arg;
767
768 complete(&st->done);
769}
770
e69aab13
TG
771void cpuhp_report_idle_dead(void)
772{
773 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
774
775 BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7e 776 rcu_report_dead(smp_processor_id());
71f87b2f
TG
777 st->state = CPUHP_AP_IDLE_DEAD;
778 /*
779 * We cannot call complete after rcu_report_dead() so we delegate it
780 * to an online cpu.
781 */
782 smp_call_function_single(cpumask_first(cpu_online_mask),
783 cpuhp_complete_idle_dead, st, 0);
e69aab13
TG
784}
785
cff7d378
TG
786#else
787#define notify_down_prepare NULL
788#define takedown_cpu NULL
789#define notify_dead NULL
4baa0afc 790#define notify_dying NULL
cff7d378
TG
791#endif
792
793#ifdef CONFIG_HOTPLUG_CPU
cff7d378 794
98458172 795/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
796static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
797 enum cpuhp_state target)
98458172 798{
cff7d378
TG
799 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
800 int prev_state, ret = 0;
801 bool hasdied = false;
98458172
TG
802
803 if (num_online_cpus() == 1)
804 return -EBUSY;
805
757c989b 806 if (!cpu_present(cpu))
98458172
TG
807 return -EINVAL;
808
809 cpu_hotplug_begin();
810
811 cpuhp_tasks_frozen = tasks_frozen;
812
cff7d378 813 prev_state = st->state;
af1f4045 814 st->target = target;
1cf4f629
TG
815 /*
816 * If the current CPU state is in the range of the AP hotplug thread,
817 * then we need to kick the thread.
818 */
8df3e07e 819 if (st->state > CPUHP_TEARDOWN_CPU) {
1cf4f629
TG
820 ret = cpuhp_kick_ap_work(cpu);
821 /*
822 * The AP side has done the error rollback already. Just
823 * return the error code..
824 */
825 if (ret)
826 goto out;
827
828 /*
829 * We might have stopped still in the range of the AP hotplug
830 * thread. Nothing to do anymore.
831 */
8df3e07e 832 if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629
TG
833 goto out;
834 }
835 /*
8df3e07e 836 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629
TG
837 * to do the further cleanups.
838 */
2e1a3483 839 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
3b9d6da6
SAS
840 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
841 st->target = prev_state;
842 st->rollback = true;
843 cpuhp_kick_ap_work(cpu);
844 }
98458172 845
cff7d378 846 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1cf4f629 847out:
d221938c 848 cpu_hotplug_done();
cff7d378
TG
849 /* This post dead nonsense must die */
850 if (!ret && hasdied)
090e77c3 851 cpu_notify_nofail(CPU_POST_DEAD, cpu);
cff7d378 852 return ret;
e3920fb4
RW
853}
854
af1f4045 855static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 856{
9ea09af3 857 int err;
e3920fb4 858
d221938c 859 cpu_maps_update_begin();
e761b772
MK
860
861 if (cpu_hotplug_disabled) {
e3920fb4 862 err = -EBUSY;
e761b772
MK
863 goto out;
864 }
865
af1f4045 866 err = _cpu_down(cpu, 0, target);
e3920fb4 867
e761b772 868out:
d221938c 869 cpu_maps_update_done();
1da177e4
LT
870 return err;
871}
af1f4045
TG
872int cpu_down(unsigned int cpu)
873{
874 return do_cpu_down(cpu, CPUHP_OFFLINE);
875}
b62b8ef9 876EXPORT_SYMBOL(cpu_down);
1da177e4
LT
877#endif /*CONFIG_HOTPLUG_CPU*/
878
4baa0afc
TG
879/**
880 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
881 * @cpu: cpu that just started
882 *
883 * This function calls the cpu_chain notifiers with CPU_STARTING.
884 * It must be called by the arch code on the new cpu, before the new cpu
885 * enables interrupts and before the "boot" cpu returns from __cpu_up().
886 */
887void notify_cpu_starting(unsigned int cpu)
888{
889 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
890 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
891
7ec99de3 892 rcu_cpu_starting(cpu); /* All CPU_STARTING notifiers can use RCU. */
4baa0afc
TG
893 while (st->state < target) {
894 struct cpuhp_step *step;
895
896 st->state++;
897 step = cpuhp_ap_states + st->state;
898 cpuhp_invoke_callback(cpu, st->state, step->startup);
899 }
900}
901
949338e3
TG
902/*
903 * Called from the idle task. We need to set active here, so we can kick off
8df3e07e
TG
904 * the stopper thread and unpark the smpboot threads. If the target state is
905 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
906 * cpu further.
949338e3 907 */
8df3e07e 908void cpuhp_online_idle(enum cpuhp_state state)
949338e3 909{
8df3e07e
TG
910 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
911 unsigned int cpu = smp_processor_id();
912
913 /* Happens for the boot cpu */
914 if (state != CPUHP_AP_ONLINE_IDLE)
915 return;
916
917 st->state = CPUHP_AP_ONLINE_IDLE;
1cf4f629 918
8df3e07e 919 /* Unpark the stopper thread and the hotplug thread of this cpu */
949338e3 920 stop_machine_unpark(cpu);
1cf4f629 921 kthread_unpark(st->thread);
8df3e07e
TG
922
923 /* Should we go further up ? */
924 if (st->target > CPUHP_AP_ONLINE_IDLE)
925 __cpuhp_kick_ap_work(st);
926 else
927 complete(&st->done);
949338e3
TG
928}
929
e3920fb4 930/* Requires cpu_add_remove_lock to be held */
af1f4045 931static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 932{
cff7d378 933 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 934 struct task_struct *idle;
2e1a3483 935 int ret = 0;
1da177e4 936
d221938c 937 cpu_hotplug_begin();
38498a67 938
757c989b 939 if (!cpu_present(cpu)) {
5e5041f3
YI
940 ret = -EINVAL;
941 goto out;
942 }
943
757c989b
TG
944 /*
945 * The caller of do_cpu_up might have raced with another
946 * caller. Ignore it for now.
947 */
948 if (st->state >= target)
38498a67 949 goto out;
757c989b
TG
950
951 if (st->state == CPUHP_OFFLINE) {
952 /* Let it fail before we try to bring the cpu up */
953 idle = idle_thread_get(cpu);
954 if (IS_ERR(idle)) {
955 ret = PTR_ERR(idle);
956 goto out;
957 }
3bb5d2ee 958 }
38498a67 959
ba997462
TG
960 cpuhp_tasks_frozen = tasks_frozen;
961
af1f4045 962 st->target = target;
1cf4f629
TG
963 /*
964 * If the current CPU state is in the range of the AP hotplug thread,
965 * then we need to kick the thread once more.
966 */
8df3e07e 967 if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629
TG
968 ret = cpuhp_kick_ap_work(cpu);
969 /*
970 * The AP side has done the error rollback already. Just
971 * return the error code..
972 */
973 if (ret)
974 goto out;
975 }
976
977 /*
978 * Try to reach the target state. We max out on the BP at
8df3e07e 979 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629
TG
980 * responsible for bringing it up to the target state.
981 */
8df3e07e 982 target = min((int)target, CPUHP_BRINGUP_CPU);
2e1a3483 983 ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
38498a67 984out:
d221938c 985 cpu_hotplug_done();
e3920fb4
RW
986 return ret;
987}
988
af1f4045 989static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
990{
991 int err = 0;
cf23422b 992
e0b582ec 993 if (!cpu_possible(cpu)) {
84117da5
FF
994 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
995 cpu);
87d5e023 996#if defined(CONFIG_IA64)
84117da5 997 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
998#endif
999 return -EINVAL;
1000 }
e3920fb4 1001
01b0f197
TK
1002 err = try_online_node(cpu_to_node(cpu));
1003 if (err)
1004 return err;
cf23422b 1005
d221938c 1006 cpu_maps_update_begin();
e761b772
MK
1007
1008 if (cpu_hotplug_disabled) {
e3920fb4 1009 err = -EBUSY;
e761b772
MK
1010 goto out;
1011 }
1012
af1f4045 1013 err = _cpu_up(cpu, 0, target);
e761b772 1014out:
d221938c 1015 cpu_maps_update_done();
e3920fb4
RW
1016 return err;
1017}
af1f4045
TG
1018
1019int cpu_up(unsigned int cpu)
1020{
1021 return do_cpu_up(cpu, CPUHP_ONLINE);
1022}
a513f6ba 1023EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 1024
f3de4be9 1025#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 1026static cpumask_var_t frozen_cpus;
e3920fb4
RW
1027
1028int disable_nonboot_cpus(void)
1029{
e9a5f426 1030 int cpu, first_cpu, error = 0;
e3920fb4 1031
d221938c 1032 cpu_maps_update_begin();
e0b582ec 1033 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
1034 /*
1035 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
1036 * with the userspace trying to use the CPU hotplug at the same time
1037 */
e0b582ec 1038 cpumask_clear(frozen_cpus);
6ad4c188 1039
84117da5 1040 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4
RW
1041 for_each_online_cpu(cpu) {
1042 if (cpu == first_cpu)
1043 continue;
bb3632c6 1044 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 1045 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 1046 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 1047 if (!error)
e0b582ec 1048 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 1049 else {
84117da5 1050 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
1051 break;
1052 }
1053 }
86886e55 1054
89af7ba5 1055 if (!error)
e3920fb4 1056 BUG_ON(num_online_cpus() > 1);
89af7ba5 1057 else
84117da5 1058 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
1059
1060 /*
1061 * Make sure the CPUs won't be enabled by someone else. We need to do
1062 * this even in case of failure as all disable_nonboot_cpus() users are
1063 * supposed to do enable_nonboot_cpus() on the failure path.
1064 */
1065 cpu_hotplug_disabled++;
1066
d221938c 1067 cpu_maps_update_done();
e3920fb4
RW
1068 return error;
1069}
1070
d0af9eed
SS
1071void __weak arch_enable_nonboot_cpus_begin(void)
1072{
1073}
1074
1075void __weak arch_enable_nonboot_cpus_end(void)
1076{
1077}
1078
71cf5aee 1079void enable_nonboot_cpus(void)
e3920fb4
RW
1080{
1081 int cpu, error;
1082
1083 /* Allow everyone to use the CPU hotplug again */
d221938c 1084 cpu_maps_update_begin();
89af7ba5 1085 WARN_ON(--cpu_hotplug_disabled < 0);
e0b582ec 1086 if (cpumask_empty(frozen_cpus))
1d64b9cb 1087 goto out;
e3920fb4 1088
84117da5 1089 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
1090
1091 arch_enable_nonboot_cpus_begin();
1092
e0b582ec 1093 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 1094 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 1095 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 1096 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 1097 if (!error) {
84117da5 1098 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
1099 continue;
1100 }
84117da5 1101 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 1102 }
d0af9eed
SS
1103
1104 arch_enable_nonboot_cpus_end();
1105
e0b582ec 1106 cpumask_clear(frozen_cpus);
1d64b9cb 1107out:
d221938c 1108 cpu_maps_update_done();
1da177e4 1109}
e0b582ec 1110
d7268a31 1111static int __init alloc_frozen_cpus(void)
e0b582ec
RR
1112{
1113 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1114 return -ENOMEM;
1115 return 0;
1116}
1117core_initcall(alloc_frozen_cpus);
79cfbdfa 1118
79cfbdfa
SB
1119/*
1120 * When callbacks for CPU hotplug notifications are being executed, we must
1121 * ensure that the state of the system with respect to the tasks being frozen
1122 * or not, as reported by the notification, remains unchanged *throughout the
1123 * duration* of the execution of the callbacks.
1124 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1125 *
1126 * This synchronization is implemented by mutually excluding regular CPU
1127 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1128 * Hibernate notifications.
1129 */
1130static int
1131cpu_hotplug_pm_callback(struct notifier_block *nb,
1132 unsigned long action, void *ptr)
1133{
1134 switch (action) {
1135
1136 case PM_SUSPEND_PREPARE:
1137 case PM_HIBERNATION_PREPARE:
16e53dbf 1138 cpu_hotplug_disable();
79cfbdfa
SB
1139 break;
1140
1141 case PM_POST_SUSPEND:
1142 case PM_POST_HIBERNATION:
16e53dbf 1143 cpu_hotplug_enable();
79cfbdfa
SB
1144 break;
1145
1146 default:
1147 return NOTIFY_DONE;
1148 }
1149
1150 return NOTIFY_OK;
1151}
1152
1153
d7268a31 1154static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 1155{
6e32d479
FY
1156 /*
1157 * cpu_hotplug_pm_callback has higher priority than x86
1158 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1159 * to disable cpu hotplug to avoid cpu hotplug race.
1160 */
79cfbdfa
SB
1161 pm_notifier(cpu_hotplug_pm_callback, 0);
1162 return 0;
1163}
1164core_initcall(cpu_hotplug_pm_sync_init);
1165
f3de4be9 1166#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec
MK
1167
1168#endif /* CONFIG_SMP */
b8d317d1 1169
cff7d378
TG
1170/* Boot processor state steps */
1171static struct cpuhp_step cpuhp_bp_states[] = {
1172 [CPUHP_OFFLINE] = {
1173 .name = "offline",
1174 .startup = NULL,
1175 .teardown = NULL,
1176 },
1177#ifdef CONFIG_SMP
1178 [CPUHP_CREATE_THREADS]= {
1179 .name = "threads:create",
1180 .startup = smpboot_create_threads,
1181 .teardown = NULL,
757c989b 1182 .cant_stop = true,
cff7d378 1183 },
00e16c3d
TG
1184 [CPUHP_PERF_PREPARE] = {
1185 .name = "perf prepare",
1186 .startup = perf_event_init_cpu,
1187 .teardown = perf_event_exit_cpu,
1188 },
7ee681b2
TG
1189 [CPUHP_WORKQUEUE_PREP] = {
1190 .name = "workqueue prepare",
1191 .startup = workqueue_prepare_cpu,
1192 .teardown = NULL,
1193 },
27590dc1
TG
1194 [CPUHP_HRTIMERS_PREPARE] = {
1195 .name = "hrtimers prepare",
1196 .startup = hrtimers_prepare_cpu,
1197 .teardown = hrtimers_dead_cpu,
1198 },
31487f83
RW
1199 [CPUHP_SMPCFD_PREPARE] = {
1200 .name = "SMPCFD prepare",
1201 .startup = smpcfd_prepare_cpu,
1202 .teardown = smpcfd_dead_cpu,
1203 },
4df83742
TG
1204 [CPUHP_RCUTREE_PREP] = {
1205 .name = "RCU-tree prepare",
1206 .startup = rcutree_prepare_cpu,
1207 .teardown = rcutree_dead_cpu,
1208 },
d10ef6f9
TG
1209 /*
1210 * Preparatory and dead notifiers. Will be replaced once the notifiers
1211 * are converted to states.
1212 */
cff7d378
TG
1213 [CPUHP_NOTIFY_PREPARE] = {
1214 .name = "notify:prepare",
1215 .startup = notify_prepare,
1216 .teardown = notify_dead,
1217 .skip_onerr = true,
757c989b 1218 .cant_stop = true,
cff7d378 1219 },
4fae16df
RC
1220 /*
1221 * On the tear-down path, timers_dead_cpu() must be invoked
1222 * before blk_mq_queue_reinit_notify() from notify_dead(),
1223 * otherwise a RCU stall occurs.
1224 */
1225 [CPUHP_TIMERS_DEAD] = {
1226 .name = "timers dead",
1227 .startup = NULL,
1228 .teardown = timers_dead_cpu,
1229 },
d10ef6f9 1230 /* Kicks the plugged cpu into life */
cff7d378
TG
1231 [CPUHP_BRINGUP_CPU] = {
1232 .name = "cpu:bringup",
1233 .startup = bringup_cpu,
4baa0afc 1234 .teardown = NULL,
757c989b 1235 .cant_stop = true,
4baa0afc 1236 },
31487f83
RW
1237 [CPUHP_AP_SMPCFD_DYING] = {
1238 .startup = NULL,
1239 .teardown = smpcfd_dying_cpu,
1240 },
d10ef6f9
TG
1241 /*
1242 * Handled on controll processor until the plugged processor manages
1243 * this itself.
1244 */
4baa0afc
TG
1245 [CPUHP_TEARDOWN_CPU] = {
1246 .name = "cpu:teardown",
1247 .startup = NULL,
cff7d378 1248 .teardown = takedown_cpu,
757c989b 1249 .cant_stop = true,
cff7d378 1250 },
a7c73414
TG
1251#else
1252 [CPUHP_BRINGUP_CPU] = { },
cff7d378 1253#endif
cff7d378
TG
1254};
1255
4baa0afc
TG
1256/* Application processor state steps */
1257static struct cpuhp_step cpuhp_ap_states[] = {
1258#ifdef CONFIG_SMP
d10ef6f9
TG
1259 /* Final state before CPU kills itself */
1260 [CPUHP_AP_IDLE_DEAD] = {
1261 .name = "idle:dead",
1262 },
1263 /*
1264 * Last state before CPU enters the idle loop to die. Transient state
1265 * for synchronization.
1266 */
1267 [CPUHP_AP_OFFLINE] = {
1268 .name = "ap:offline",
1269 .cant_stop = true,
1270 },
9cf7243d
TG
1271 /* First state is scheduler control. Interrupts are disabled */
1272 [CPUHP_AP_SCHED_STARTING] = {
1273 .name = "sched:starting",
1274 .startup = sched_cpu_starting,
f2785ddb 1275 .teardown = sched_cpu_dying,
9cf7243d 1276 },
4df83742
TG
1277 [CPUHP_AP_RCUTREE_DYING] = {
1278 .startup = NULL,
1279 .teardown = rcutree_dying_cpu,
1280 },
d10ef6f9
TG
1281 /*
1282 * Low level startup/teardown notifiers. Run with interrupts
1283 * disabled. Will be removed once the notifiers are converted to
1284 * states.
1285 */
4baa0afc
TG
1286 [CPUHP_AP_NOTIFY_STARTING] = {
1287 .name = "notify:starting",
1288 .startup = notify_starting,
1289 .teardown = notify_dying,
1290 .skip_onerr = true,
757c989b 1291 .cant_stop = true,
4baa0afc 1292 },
d10ef6f9
TG
1293 /* Entry state on starting. Interrupts enabled from here on. Transient
1294 * state for synchronsization */
1295 [CPUHP_AP_ONLINE] = {
1296 .name = "ap:online",
1297 },
1298 /* Handle smpboot threads park/unpark */
1cf4f629
TG
1299 [CPUHP_AP_SMPBOOT_THREADS] = {
1300 .name = "smpboot:threads",
1301 .startup = smpboot_unpark_threads,
2a58c527 1302 .teardown = NULL,
1cf4f629 1303 },
00e16c3d
TG
1304 [CPUHP_AP_PERF_ONLINE] = {
1305 .name = "perf online",
1306 .startup = perf_event_init_cpu,
1307 .teardown = perf_event_exit_cpu,
1308 },
7ee681b2
TG
1309 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1310 .name = "workqueue online",
1311 .startup = workqueue_online_cpu,
1312 .teardown = workqueue_offline_cpu,
1313 },
4df83742
TG
1314 [CPUHP_AP_RCUTREE_ONLINE] = {
1315 .name = "RCU-tree online",
1316 .startup = rcutree_online_cpu,
1317 .teardown = rcutree_offline_cpu,
1318 },
00e16c3d 1319
d10ef6f9
TG
1320 /*
1321 * Online/down_prepare notifiers. Will be removed once the notifiers
1322 * are converted to states.
1323 */
1cf4f629
TG
1324 [CPUHP_AP_NOTIFY_ONLINE] = {
1325 .name = "notify:online",
1326 .startup = notify_online,
1327 .teardown = notify_down_prepare,
3b9d6da6 1328 .skip_onerr = true,
1cf4f629 1329 },
4baa0afc 1330#endif
d10ef6f9
TG
1331 /*
1332 * The dynamically registered state space is here
1333 */
1334
aaddd7d1
TG
1335#ifdef CONFIG_SMP
1336 /* Last state is scheduler control setting the cpu active */
1337 [CPUHP_AP_ACTIVE] = {
1338 .name = "sched:active",
1339 .startup = sched_cpu_activate,
1340 .teardown = sched_cpu_deactivate,
1341 },
1342#endif
1343
d10ef6f9 1344 /* CPU is fully up and running. */
4baa0afc
TG
1345 [CPUHP_ONLINE] = {
1346 .name = "online",
1347 .startup = NULL,
1348 .teardown = NULL,
1349 },
1350};
1351
5b7aa87e
TG
1352/* Sanity check for callbacks */
1353static int cpuhp_cb_check(enum cpuhp_state state)
1354{
1355 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1356 return -EINVAL;
1357 return 0;
1358}
1359
98f8cdce
TG
1360static bool cpuhp_is_ap_state(enum cpuhp_state state)
1361{
d10ef6f9
TG
1362 /*
1363 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
1364 * purposes as that state is handled explicitely in cpu_down.
1365 */
1366 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
98f8cdce
TG
1367}
1368
1369static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
1370{
1371 struct cpuhp_step *sp;
1372
1373 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
1374 return sp + state;
1375}
1376
5b7aa87e
TG
1377static void cpuhp_store_callbacks(enum cpuhp_state state,
1378 const char *name,
1379 int (*startup)(unsigned int cpu),
1380 int (*teardown)(unsigned int cpu))
1381{
1382 /* (Un)Install the callbacks for further cpu hotplug operations */
1383 struct cpuhp_step *sp;
1384
1385 mutex_lock(&cpuhp_state_mutex);
1386 sp = cpuhp_get_step(state);
1387 sp->startup = startup;
1388 sp->teardown = teardown;
1389 sp->name = name;
1390 mutex_unlock(&cpuhp_state_mutex);
1391}
1392
1393static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1394{
1395 return cpuhp_get_step(state)->teardown;
1396}
1397
5b7aa87e
TG
1398/*
1399 * Call the startup/teardown function for a step either on the AP or
1400 * on the current CPU.
1401 */
1402static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1403 int (*cb)(unsigned int), bool bringup)
1404{
1405 int ret;
1406
1407 if (!cb)
1408 return 0;
5b7aa87e
TG
1409 /*
1410 * The non AP bound callbacks can fail on bringup. On teardown
1411 * e.g. module removal we crash for now.
1412 */
1cf4f629
TG
1413#ifdef CONFIG_SMP
1414 if (cpuhp_is_ap_state(state))
1415 ret = cpuhp_invoke_ap_callback(cpu, state, cb);
1416 else
1417 ret = cpuhp_invoke_callback(cpu, state, cb);
1418#else
1419 ret = cpuhp_invoke_callback(cpu, state, cb);
1420#endif
5b7aa87e
TG
1421 BUG_ON(ret && !bringup);
1422 return ret;
1423}
1424
1425/*
1426 * Called from __cpuhp_setup_state on a recoverable failure.
1427 *
1428 * Note: The teardown callbacks for rollback are not allowed to fail!
1429 */
1430static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1431 int (*teardown)(unsigned int cpu))
1432{
1433 int cpu;
1434
1435 if (!teardown)
1436 return;
1437
1438 /* Roll back the already executed steps on the other cpus */
1439 for_each_present_cpu(cpu) {
1440 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1441 int cpustate = st->state;
1442
1443 if (cpu >= failedcpu)
1444 break;
1445
1446 /* Did we invoke the startup call on that cpu ? */
1447 if (cpustate >= state)
1448 cpuhp_issue_call(cpu, state, teardown, false);
1449 }
1450}
1451
1452/*
1453 * Returns a free for dynamic slot assignment of the Online state. The states
1454 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1455 * by having no name assigned.
1456 */
1457static int cpuhp_reserve_state(enum cpuhp_state state)
1458{
1459 enum cpuhp_state i;
1460
1461 mutex_lock(&cpuhp_state_mutex);
1cf4f629
TG
1462 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1463 if (cpuhp_ap_states[i].name)
5b7aa87e
TG
1464 continue;
1465
1cf4f629 1466 cpuhp_ap_states[i].name = "Reserved";
5b7aa87e
TG
1467 mutex_unlock(&cpuhp_state_mutex);
1468 return i;
1469 }
1470 mutex_unlock(&cpuhp_state_mutex);
1471 WARN(1, "No more dynamic states available for CPU hotplug\n");
1472 return -ENOSPC;
1473}
1474
1475/**
1476 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1477 * @state: The state to setup
1478 * @invoke: If true, the startup function is invoked for cpus where
1479 * cpu state >= @state
1480 * @startup: startup callback function
1481 * @teardown: teardown callback function
1482 *
1483 * Returns 0 if successful, otherwise a proper error code
1484 */
1485int __cpuhp_setup_state(enum cpuhp_state state,
1486 const char *name, bool invoke,
1487 int (*startup)(unsigned int cpu),
1488 int (*teardown)(unsigned int cpu))
1489{
1490 int cpu, ret = 0;
1491 int dyn_state = 0;
1492
1493 if (cpuhp_cb_check(state) || !name)
1494 return -EINVAL;
1495
1496 get_online_cpus();
1497
1498 /* currently assignments for the ONLINE state are possible */
1cf4f629 1499 if (state == CPUHP_AP_ONLINE_DYN) {
5b7aa87e
TG
1500 dyn_state = 1;
1501 ret = cpuhp_reserve_state(state);
1502 if (ret < 0)
1503 goto out;
1504 state = ret;
1505 }
1506
1507 cpuhp_store_callbacks(state, name, startup, teardown);
1508
1509 if (!invoke || !startup)
1510 goto out;
1511
1512 /*
1513 * Try to call the startup callback for each present cpu
1514 * depending on the hotplug state of the cpu.
1515 */
1516 for_each_present_cpu(cpu) {
1517 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1518 int cpustate = st->state;
1519
1520 if (cpustate < state)
1521 continue;
1522
1523 ret = cpuhp_issue_call(cpu, state, startup, true);
1524 if (ret) {
1525 cpuhp_rollback_install(cpu, state, teardown);
1526 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1527 goto out;
1528 }
1529 }
1530out:
1531 put_online_cpus();
1532 if (!ret && dyn_state)
1533 return state;
1534 return ret;
1535}
1536EXPORT_SYMBOL(__cpuhp_setup_state);
1537
1538/**
1539 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1540 * @state: The state to remove
1541 * @invoke: If true, the teardown function is invoked for cpus where
1542 * cpu state >= @state
1543 *
1544 * The teardown callback is currently not allowed to fail. Think
1545 * about module removal!
1546 */
1547void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1548{
1549 int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
1550 int cpu;
1551
1552 BUG_ON(cpuhp_cb_check(state));
1553
1554 get_online_cpus();
1555
1556 if (!invoke || !teardown)
1557 goto remove;
1558
1559 /*
1560 * Call the teardown callback for each present cpu depending
1561 * on the hotplug state of the cpu. This function is not
1562 * allowed to fail currently!
1563 */
1564 for_each_present_cpu(cpu) {
1565 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1566 int cpustate = st->state;
1567
1568 if (cpustate >= state)
1569 cpuhp_issue_call(cpu, state, teardown, false);
1570 }
1571remove:
1572 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1573 put_online_cpus();
1574}
1575EXPORT_SYMBOL(__cpuhp_remove_state);
1576
98f8cdce
TG
1577#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1578static ssize_t show_cpuhp_state(struct device *dev,
1579 struct device_attribute *attr, char *buf)
1580{
1581 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1582
1583 return sprintf(buf, "%d\n", st->state);
1584}
1585static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1586
757c989b
TG
1587static ssize_t write_cpuhp_target(struct device *dev,
1588 struct device_attribute *attr,
1589 const char *buf, size_t count)
1590{
1591 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1592 struct cpuhp_step *sp;
1593 int target, ret;
1594
1595 ret = kstrtoint(buf, 10, &target);
1596 if (ret)
1597 return ret;
1598
1599#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1600 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1601 return -EINVAL;
1602#else
1603 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1604 return -EINVAL;
1605#endif
1606
1607 ret = lock_device_hotplug_sysfs();
1608 if (ret)
1609 return ret;
1610
1611 mutex_lock(&cpuhp_state_mutex);
1612 sp = cpuhp_get_step(target);
1613 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1614 mutex_unlock(&cpuhp_state_mutex);
1615 if (ret)
1616 return ret;
1617
1618 if (st->state < target)
1619 ret = do_cpu_up(dev->id, target);
1620 else
1621 ret = do_cpu_down(dev->id, target);
1622
1623 unlock_device_hotplug();
1624 return ret ? ret : count;
1625}
1626
98f8cdce
TG
1627static ssize_t show_cpuhp_target(struct device *dev,
1628 struct device_attribute *attr, char *buf)
1629{
1630 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1631
1632 return sprintf(buf, "%d\n", st->target);
1633}
757c989b 1634static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce
TG
1635
1636static struct attribute *cpuhp_cpu_attrs[] = {
1637 &dev_attr_state.attr,
1638 &dev_attr_target.attr,
1639 NULL
1640};
1641
1642static struct attribute_group cpuhp_cpu_attr_group = {
1643 .attrs = cpuhp_cpu_attrs,
1644 .name = "hotplug",
1645 NULL
1646};
1647
1648static ssize_t show_cpuhp_states(struct device *dev,
1649 struct device_attribute *attr, char *buf)
1650{
1651 ssize_t cur, res = 0;
1652 int i;
1653
1654 mutex_lock(&cpuhp_state_mutex);
757c989b 1655 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1656 struct cpuhp_step *sp = cpuhp_get_step(i);
1657
1658 if (sp->name) {
1659 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1660 buf += cur;
1661 res += cur;
1662 }
1663 }
1664 mutex_unlock(&cpuhp_state_mutex);
1665 return res;
1666}
1667static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1668
1669static struct attribute *cpuhp_cpu_root_attrs[] = {
1670 &dev_attr_states.attr,
1671 NULL
1672};
1673
1674static struct attribute_group cpuhp_cpu_root_attr_group = {
1675 .attrs = cpuhp_cpu_root_attrs,
1676 .name = "hotplug",
1677 NULL
1678};
1679
1680static int __init cpuhp_sysfs_init(void)
1681{
1682 int cpu, ret;
1683
1684 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1685 &cpuhp_cpu_root_attr_group);
1686 if (ret)
1687 return ret;
1688
1689 for_each_possible_cpu(cpu) {
1690 struct device *dev = get_cpu_device(cpu);
1691
1692 if (!dev)
1693 continue;
1694 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1695 if (ret)
1696 return ret;
1697 }
1698 return 0;
1699}
1700device_initcall(cpuhp_sysfs_init);
1701#endif
1702
e56b3bc7
LT
1703/*
1704 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1705 * represents all NR_CPUS bits binary values of 1<<nr.
1706 *
e0b582ec 1707 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
1708 * mask value that has a single bit set only.
1709 */
b8d317d1 1710
e56b3bc7 1711/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 1712#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
1713#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1714#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1715#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 1716
e56b3bc7
LT
1717const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1718
1719 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1720 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1721#if BITS_PER_LONG > 32
1722 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1723 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
1724#endif
1725};
e56b3bc7 1726EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
1727
1728const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1729EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
1730
1731#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 1732struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 1733 = {CPU_BITS_ALL};
b3199c02 1734#else
4b804c85 1735struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 1736#endif
4b804c85 1737EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 1738
4b804c85
RV
1739struct cpumask __cpu_online_mask __read_mostly;
1740EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 1741
4b804c85
RV
1742struct cpumask __cpu_present_mask __read_mostly;
1743EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 1744
4b804c85
RV
1745struct cpumask __cpu_active_mask __read_mostly;
1746EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 1747
3fa41520
RR
1748void init_cpu_present(const struct cpumask *src)
1749{
c4c54dd1 1750 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
1751}
1752
1753void init_cpu_possible(const struct cpumask *src)
1754{
c4c54dd1 1755 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
1756}
1757
1758void init_cpu_online(const struct cpumask *src)
1759{
c4c54dd1 1760 cpumask_copy(&__cpu_online_mask, src);
3fa41520 1761}
cff7d378
TG
1762
1763/*
1764 * Activate the first processor.
1765 */
1766void __init boot_cpu_init(void)
1767{
1768 int cpu = smp_processor_id();
1769
1770 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1771 set_cpu_online(cpu, true);
1772 set_cpu_active(cpu, true);
1773 set_cpu_present(cpu, true);
1774 set_cpu_possible(cpu, true);
1775}
1776
1777/*
1778 * Must be called _AFTER_ setting up the per_cpu areas
1779 */
1780void __init boot_cpu_state_init(void)
1781{
1782 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1783}