]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/cpu.c
relay: Use per CPU constructs for the relay channel buffer pointers
[mirror_ubuntu-artful-kernel.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
a19423b9 22#include <linux/lockdep.h>
345527b1 23#include <linux/tick.h>
a8994181 24#include <linux/irq.h>
4cb28ced 25#include <linux/smpboot.h>
cff7d378 26
bb3632c6 27#include <trace/events/power.h>
cff7d378
TG
28#define CREATE_TRACE_POINTS
29#include <trace/events/cpuhp.h>
1da177e4 30
38498a67
TG
31#include "smpboot.h"
32
cff7d378
TG
33/**
34 * cpuhp_cpu_state - Per cpu hotplug state storage
35 * @state: The current cpu state
36 * @target: The target state
4cb28ced
TG
37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute
3b9d6da6 39 * @rollback: Perform a rollback
a724632c
TG
40 * @single: Single callback invocation
41 * @bringup: Single callback bringup or teardown selector
42 * @cb_state: The state for a single callback (install/uninstall)
4cb28ced
TG
43 * @result: Result of the operation
44 * @done: Signal completion to the issuer of the task
cff7d378
TG
45 */
46struct cpuhp_cpu_state {
47 enum cpuhp_state state;
48 enum cpuhp_state target;
4cb28ced
TG
49#ifdef CONFIG_SMP
50 struct task_struct *thread;
51 bool should_run;
3b9d6da6 52 bool rollback;
a724632c
TG
53 bool single;
54 bool bringup;
cf392d10 55 struct hlist_node *node;
4cb28ced 56 enum cpuhp_state cb_state;
4cb28ced
TG
57 int result;
58 struct completion done;
59#endif
cff7d378
TG
60};
61
62static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
63
64/**
65 * cpuhp_step - Hotplug state machine step
66 * @name: Name of the step
67 * @startup: Startup function of the step
68 * @teardown: Teardown function of the step
69 * @skip_onerr: Do not invoke the functions on error rollback
70 * Will go away once the notifiers are gone
757c989b 71 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
72 */
73struct cpuhp_step {
cf392d10
TG
74 const char *name;
75 union {
3c1627e9
TG
76 int (*single)(unsigned int cpu);
77 int (*multi)(unsigned int cpu,
78 struct hlist_node *node);
79 } startup;
cf392d10 80 union {
3c1627e9
TG
81 int (*single)(unsigned int cpu);
82 int (*multi)(unsigned int cpu,
83 struct hlist_node *node);
84 } teardown;
cf392d10
TG
85 struct hlist_head list;
86 bool skip_onerr;
87 bool cant_stop;
88 bool multi_instance;
cff7d378
TG
89};
90
98f8cdce 91static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378 92static struct cpuhp_step cpuhp_bp_states[];
4baa0afc 93static struct cpuhp_step cpuhp_ap_states[];
cff7d378 94
a724632c
TG
95static bool cpuhp_is_ap_state(enum cpuhp_state state)
96{
97 /*
98 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
99 * purposes as that state is handled explicitly in cpu_down.
100 */
101 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
102}
103
104static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
105{
106 struct cpuhp_step *sp;
107
108 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
109 return sp + state;
110}
111
cff7d378
TG
112/**
113 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
114 * @cpu: The cpu for which the callback should be invoked
115 * @step: The step in the state machine
a724632c 116 * @bringup: True if the bringup callback should be invoked
cff7d378 117 *
cf392d10 118 * Called from cpu hotplug and from the state register machinery.
cff7d378 119 */
a724632c 120static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
cf392d10 121 bool bringup, struct hlist_node *node)
cff7d378
TG
122{
123 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632c 124 struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10
TG
125 int (*cbm)(unsigned int cpu, struct hlist_node *node);
126 int (*cb)(unsigned int cpu);
127 int ret, cnt;
128
129 if (!step->multi_instance) {
3c1627e9 130 cb = bringup ? step->startup.single : step->teardown.single;
cf392d10
TG
131 if (!cb)
132 return 0;
a724632c 133 trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378 134 ret = cb(cpu);
a724632c 135 trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10
TG
136 return ret;
137 }
3c1627e9 138 cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
139 if (!cbm)
140 return 0;
141
142 /* Single invocation for instance add/remove */
143 if (node) {
144 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
145 ret = cbm(cpu, node);
146 trace_cpuhp_exit(cpu, st->state, state, ret);
147 return ret;
148 }
149
150 /* State transition. Invoke on all instances */
151 cnt = 0;
152 hlist_for_each(node, &step->list) {
153 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
154 ret = cbm(cpu, node);
155 trace_cpuhp_exit(cpu, st->state, state, ret);
156 if (ret)
157 goto err;
158 cnt++;
159 }
160 return 0;
161err:
162 /* Rollback the instances if one failed */
3c1627e9 163 cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
164 if (!cbm)
165 return ret;
166
167 hlist_for_each(node, &step->list) {
168 if (!cnt--)
169 break;
170 cbm(cpu, node);
cff7d378
TG
171 }
172 return ret;
173}
174
98a79d6a 175#ifdef CONFIG_SMP
b3199c02 176/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 177static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
178bool cpuhp_tasks_frozen;
179EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 180
79a6cdeb 181/*
93ae4f97
SB
182 * The following two APIs (cpu_maps_update_begin/done) must be used when
183 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
184 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
185 * hotplug callback (un)registration performed using __register_cpu_notifier()
186 * or __unregister_cpu_notifier().
79a6cdeb
LJ
187 */
188void cpu_maps_update_begin(void)
189{
190 mutex_lock(&cpu_add_remove_lock);
191}
93ae4f97 192EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb
LJ
193
194void cpu_maps_update_done(void)
195{
196 mutex_unlock(&cpu_add_remove_lock);
197}
93ae4f97 198EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb 199
5c113fbe 200static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 201
e3920fb4
RW
202/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
203 * Should always be manipulated under cpu_add_remove_lock
204 */
205static int cpu_hotplug_disabled;
206
79a6cdeb
LJ
207#ifdef CONFIG_HOTPLUG_CPU
208
d221938c
GS
209static struct {
210 struct task_struct *active_writer;
87af9e7f
DH
211 /* wait queue to wake up the active_writer */
212 wait_queue_head_t wq;
213 /* verifies that no writer will get active while readers are active */
214 struct mutex lock;
d221938c
GS
215 /*
216 * Also blocks the new readers during
217 * an ongoing cpu hotplug operation.
218 */
87af9e7f 219 atomic_t refcount;
a19423b9
GS
220
221#ifdef CONFIG_DEBUG_LOCK_ALLOC
222 struct lockdep_map dep_map;
223#endif
31950eb6
LT
224} cpu_hotplug = {
225 .active_writer = NULL,
87af9e7f 226 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb6 227 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b9
GS
228#ifdef CONFIG_DEBUG_LOCK_ALLOC
229 .dep_map = {.name = "cpu_hotplug.lock" },
230#endif
31950eb6 231};
d221938c 232
a19423b9
GS
233/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
234#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42
PM
235#define cpuhp_lock_acquire_tryread() \
236 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b9
GS
237#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
238#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
239
62db99f4 240
86ef5c9a 241void get_online_cpus(void)
a9d9baa1 242{
d221938c
GS
243 might_sleep();
244 if (cpu_hotplug.active_writer == current)
aa953877 245 return;
a19423b9 246 cpuhp_lock_acquire_read();
d221938c 247 mutex_lock(&cpu_hotplug.lock);
87af9e7f 248 atomic_inc(&cpu_hotplug.refcount);
d221938c 249 mutex_unlock(&cpu_hotplug.lock);
a9d9baa1 250}
86ef5c9a 251EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 252
86ef5c9a 253void put_online_cpus(void)
a9d9baa1 254{
87af9e7f
DH
255 int refcount;
256
d221938c 257 if (cpu_hotplug.active_writer == current)
aa953877 258 return;
075663d1 259
87af9e7f
DH
260 refcount = atomic_dec_return(&cpu_hotplug.refcount);
261 if (WARN_ON(refcount < 0)) /* try to fix things up */
262 atomic_inc(&cpu_hotplug.refcount);
263
264 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
265 wake_up(&cpu_hotplug.wq);
075663d1 266
a19423b9 267 cpuhp_lock_release();
d221938c 268
a9d9baa1 269}
86ef5c9a 270EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 271
d221938c
GS
272/*
273 * This ensures that the hotplug operation can begin only when the
274 * refcount goes to zero.
275 *
276 * Note that during a cpu-hotplug operation, the new readers, if any,
277 * will be blocked by the cpu_hotplug.lock
278 *
d2ba7e2a
ON
279 * Since cpu_hotplug_begin() is always called after invoking
280 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
281 *
282 * Note that theoretically, there is a possibility of a livelock:
283 * - Refcount goes to zero, last reader wakes up the sleeping
284 * writer.
285 * - Last reader unlocks the cpu_hotplug.lock.
286 * - A new reader arrives at this moment, bumps up the refcount.
287 * - The writer acquires the cpu_hotplug.lock finds the refcount
288 * non zero and goes to sleep again.
289 *
290 * However, this is very difficult to achieve in practice since
86ef5c9a 291 * get_online_cpus() not an api which is called all that often.
d221938c
GS
292 *
293 */
b9d10be7 294void cpu_hotplug_begin(void)
d221938c 295{
87af9e7f 296 DEFINE_WAIT(wait);
d2ba7e2a 297
87af9e7f 298 cpu_hotplug.active_writer = current;
a19423b9 299 cpuhp_lock_acquire();
87af9e7f 300
d2ba7e2a
ON
301 for (;;) {
302 mutex_lock(&cpu_hotplug.lock);
87af9e7f
DH
303 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
304 if (likely(!atomic_read(&cpu_hotplug.refcount)))
305 break;
d221938c
GS
306 mutex_unlock(&cpu_hotplug.lock);
307 schedule();
d221938c 308 }
87af9e7f 309 finish_wait(&cpu_hotplug.wq, &wait);
d221938c
GS
310}
311
b9d10be7 312void cpu_hotplug_done(void)
d221938c
GS
313{
314 cpu_hotplug.active_writer = NULL;
315 mutex_unlock(&cpu_hotplug.lock);
a19423b9 316 cpuhp_lock_release();
d221938c 317}
79a6cdeb 318
16e53dbf
SB
319/*
320 * Wait for currently running CPU hotplug operations to complete (if any) and
321 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
322 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
323 * hotplug path before performing hotplug operations. So acquiring that lock
324 * guarantees mutual exclusion from any currently running hotplug operations.
325 */
326void cpu_hotplug_disable(void)
327{
328 cpu_maps_update_begin();
89af7ba5 329 cpu_hotplug_disabled++;
16e53dbf
SB
330 cpu_maps_update_done();
331}
32145c46 332EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf 333
01b41159
LW
334static void __cpu_hotplug_enable(void)
335{
336 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
337 return;
338 cpu_hotplug_disabled--;
339}
340
16e53dbf
SB
341void cpu_hotplug_enable(void)
342{
343 cpu_maps_update_begin();
01b41159 344 __cpu_hotplug_enable();
16e53dbf
SB
345 cpu_maps_update_done();
346}
32145c46 347EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 348#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 349
1da177e4 350/* Need to know about CPUs going up/down? */
71cf5aee 351int register_cpu_notifier(struct notifier_block *nb)
1da177e4 352{
bd5349cf 353 int ret;
d221938c 354 cpu_maps_update_begin();
bd5349cf 355 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 356 cpu_maps_update_done();
bd5349cf 357 return ret;
1da177e4 358}
65edc68c 359
71cf5aee 360int __register_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
361{
362 return raw_notifier_chain_register(&cpu_chain, nb);
363}
364
090e77c3 365static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
e9fb7631
AM
366 int *nr_calls)
367{
090e77c3
TG
368 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
369 void *hcpu = (void *)(long)cpu;
370
e6bde73b
AM
371 int ret;
372
090e77c3 373 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
e9fb7631 374 nr_calls);
e6bde73b
AM
375
376 return notifier_to_errno(ret);
e9fb7631
AM
377}
378
090e77c3 379static int cpu_notify(unsigned long val, unsigned int cpu)
e9fb7631 380{
090e77c3 381 return __cpu_notify(val, cpu, -1, NULL);
e9fb7631
AM
382}
383
3b9d6da6
SAS
384static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
385{
386 BUG_ON(cpu_notify(val, cpu));
387}
388
ba997462
TG
389/* Notifier wrappers for transitioning to state machine */
390static int notify_prepare(unsigned int cpu)
391{
392 int nr_calls = 0;
393 int ret;
394
395 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
396 if (ret) {
397 nr_calls--;
398 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
399 __func__, cpu);
400 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
401 }
402 return ret;
403}
404
405static int notify_online(unsigned int cpu)
406{
407 cpu_notify(CPU_ONLINE, cpu);
408 return 0;
409}
410
8df3e07e
TG
411static int bringup_wait_for_ap(unsigned int cpu)
412{
413 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
414
415 wait_for_completion(&st->done);
416 return st->result;
417}
418
ba997462
TG
419static int bringup_cpu(unsigned int cpu)
420{
421 struct task_struct *idle = idle_thread_get(cpu);
422 int ret;
423
aa877175
BO
424 /*
425 * Some architectures have to walk the irq descriptors to
426 * setup the vector space for the cpu which comes online.
427 * Prevent irq alloc/free across the bringup.
428 */
429 irq_lock_sparse();
430
ba997462
TG
431 /* Arch-specific enabling code. */
432 ret = __cpu_up(cpu, idle);
aa877175 433 irq_unlock_sparse();
ba997462
TG
434 if (ret) {
435 cpu_notify(CPU_UP_CANCELED, cpu);
436 return ret;
437 }
8df3e07e 438 ret = bringup_wait_for_ap(cpu);
ba997462 439 BUG_ON(!cpu_online(cpu));
8df3e07e 440 return ret;
ba997462
TG
441}
442
2e1a3483
TG
443/*
444 * Hotplug state machine related functions
445 */
a724632c 446static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483
TG
447{
448 for (st->state++; st->state < st->target; st->state++) {
a724632c 449 struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483
TG
450
451 if (!step->skip_onerr)
cf392d10 452 cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483
TG
453 }
454}
455
456static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632c 457 enum cpuhp_state target)
2e1a3483
TG
458{
459 enum cpuhp_state prev_state = st->state;
460 int ret = 0;
461
462 for (; st->state > target; st->state--) {
cf392d10 463 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483
TG
464 if (ret) {
465 st->target = prev_state;
a724632c 466 undo_cpu_down(cpu, st);
2e1a3483
TG
467 break;
468 }
469 }
470 return ret;
471}
472
a724632c 473static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483
TG
474{
475 for (st->state--; st->state > st->target; st->state--) {
a724632c 476 struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483
TG
477
478 if (!step->skip_onerr)
cf392d10 479 cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483
TG
480 }
481}
482
483static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632c 484 enum cpuhp_state target)
2e1a3483
TG
485{
486 enum cpuhp_state prev_state = st->state;
487 int ret = 0;
488
489 while (st->state < target) {
2e1a3483 490 st->state++;
cf392d10 491 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483
TG
492 if (ret) {
493 st->target = prev_state;
a724632c 494 undo_cpu_up(cpu, st);
2e1a3483
TG
495 break;
496 }
497 }
498 return ret;
499}
500
4cb28ced
TG
501/*
502 * The cpu hotplug threads manage the bringup and teardown of the cpus
503 */
504static void cpuhp_create(unsigned int cpu)
505{
506 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
507
508 init_completion(&st->done);
509}
510
511static int cpuhp_should_run(unsigned int cpu)
512{
513 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
514
515 return st->should_run;
516}
517
518/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
519static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
520{
1cf4f629 521 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
4cb28ced 522
a724632c 523 return cpuhp_down_callbacks(cpu, st, target);
4cb28ced
TG
524}
525
526/* Execute the online startup callbacks. Used to be CPU_ONLINE */
527static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
528{
a724632c 529 return cpuhp_up_callbacks(cpu, st, st->target);
4cb28ced
TG
530}
531
532/*
533 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
534 * callbacks when a state gets [un]installed at runtime.
535 */
536static void cpuhp_thread_fun(unsigned int cpu)
537{
538 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
539 int ret = 0;
540
541 /*
542 * Paired with the mb() in cpuhp_kick_ap_work and
543 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
544 */
545 smp_mb();
546 if (!st->should_run)
547 return;
548
549 st->should_run = false;
550
551 /* Single callback invocation for [un]install ? */
a724632c 552 if (st->single) {
4cb28ced
TG
553 if (st->cb_state < CPUHP_AP_ONLINE) {
554 local_irq_disable();
a724632c 555 ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10 556 st->bringup, st->node);
4cb28ced
TG
557 local_irq_enable();
558 } else {
a724632c 559 ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10 560 st->bringup, st->node);
4cb28ced 561 }
3b9d6da6
SAS
562 } else if (st->rollback) {
563 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
564
a724632c 565 undo_cpu_down(cpu, st);
3b9d6da6
SAS
566 /*
567 * This is a momentary workaround to keep the notifier users
568 * happy. Will go away once we got rid of the notifiers.
569 */
570 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
571 st->rollback = false;
4cb28ced 572 } else {
1cf4f629 573 /* Cannot happen .... */
8df3e07e 574 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1cf4f629 575
4cb28ced
TG
576 /* Regular hotplug work */
577 if (st->state < st->target)
578 ret = cpuhp_ap_online(cpu, st);
579 else if (st->state > st->target)
580 ret = cpuhp_ap_offline(cpu, st);
581 }
582 st->result = ret;
583 complete(&st->done);
584}
585
586/* Invoke a single callback on a remote cpu */
a724632c 587static int
cf392d10
TG
588cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
589 struct hlist_node *node)
4cb28ced
TG
590{
591 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
592
593 if (!cpu_online(cpu))
594 return 0;
595
6a4e2451
TG
596 /*
597 * If we are up and running, use the hotplug thread. For early calls
598 * we invoke the thread function directly.
599 */
600 if (!st->thread)
cf392d10 601 return cpuhp_invoke_callback(cpu, state, bringup, node);
6a4e2451 602
4cb28ced 603 st->cb_state = state;
a724632c
TG
604 st->single = true;
605 st->bringup = bringup;
cf392d10 606 st->node = node;
a724632c 607
4cb28ced
TG
608 /*
609 * Make sure the above stores are visible before should_run becomes
610 * true. Paired with the mb() above in cpuhp_thread_fun()
611 */
612 smp_mb();
613 st->should_run = true;
614 wake_up_process(st->thread);
615 wait_for_completion(&st->done);
616 return st->result;
617}
618
619/* Regular hotplug invocation of the AP hotplug thread */
1cf4f629 620static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
4cb28ced 621{
4cb28ced 622 st->result = 0;
a724632c 623 st->single = false;
4cb28ced
TG
624 /*
625 * Make sure the above stores are visible before should_run becomes
626 * true. Paired with the mb() above in cpuhp_thread_fun()
627 */
628 smp_mb();
629 st->should_run = true;
630 wake_up_process(st->thread);
1cf4f629
TG
631}
632
633static int cpuhp_kick_ap_work(unsigned int cpu)
634{
635 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
636 enum cpuhp_state state = st->state;
637
638 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
639 __cpuhp_kick_ap_work(st);
4cb28ced
TG
640 wait_for_completion(&st->done);
641 trace_cpuhp_exit(cpu, st->state, state, st->result);
642 return st->result;
643}
644
645static struct smp_hotplug_thread cpuhp_threads = {
646 .store = &cpuhp_state.thread,
647 .create = &cpuhp_create,
648 .thread_should_run = cpuhp_should_run,
649 .thread_fn = cpuhp_thread_fun,
650 .thread_comm = "cpuhp/%u",
651 .selfparking = true,
652};
653
654void __init cpuhp_threads_init(void)
655{
656 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
657 kthread_unpark(this_cpu_read(cpuhp_state.thread));
658}
659
00b9b0af 660#ifdef CONFIG_HOTPLUG_CPU
1da177e4 661EXPORT_SYMBOL(register_cpu_notifier);
93ae4f97 662EXPORT_SYMBOL(__register_cpu_notifier);
71cf5aee 663void unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 664{
d221938c 665 cpu_maps_update_begin();
bd5349cf 666 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 667 cpu_maps_update_done();
1da177e4
LT
668}
669EXPORT_SYMBOL(unregister_cpu_notifier);
670
71cf5aee 671void __unregister_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
672{
673 raw_notifier_chain_unregister(&cpu_chain, nb);
674}
675EXPORT_SYMBOL(__unregister_cpu_notifier);
676
e4cc2f87
AV
677/**
678 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
679 * @cpu: a CPU id
680 *
681 * This function walks all processes, finds a valid mm struct for each one and
682 * then clears a corresponding bit in mm's cpumask. While this all sounds
683 * trivial, there are various non-obvious corner cases, which this function
684 * tries to solve in a safe manner.
685 *
686 * Also note that the function uses a somewhat relaxed locking scheme, so it may
687 * be called only for an already offlined CPU.
688 */
cb79295e
AV
689void clear_tasks_mm_cpumask(int cpu)
690{
691 struct task_struct *p;
692
693 /*
694 * This function is called after the cpu is taken down and marked
695 * offline, so its not like new tasks will ever get this cpu set in
696 * their mm mask. -- Peter Zijlstra
697 * Thus, we may use rcu_read_lock() here, instead of grabbing
698 * full-fledged tasklist_lock.
699 */
e4cc2f87 700 WARN_ON(cpu_online(cpu));
cb79295e
AV
701 rcu_read_lock();
702 for_each_process(p) {
703 struct task_struct *t;
704
e4cc2f87
AV
705 /*
706 * Main thread might exit, but other threads may still have
707 * a valid mm. Find one.
708 */
cb79295e
AV
709 t = find_lock_task_mm(p);
710 if (!t)
711 continue;
712 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
713 task_unlock(t);
714 }
715 rcu_read_unlock();
716}
717
b728ca06 718static inline void check_for_tasks(int dead_cpu)
1da177e4 719{
b728ca06 720 struct task_struct *g, *p;
1da177e4 721
a75a6068
ON
722 read_lock(&tasklist_lock);
723 for_each_process_thread(g, p) {
b728ca06
KT
724 if (!p->on_rq)
725 continue;
726 /*
727 * We do the check with unlocked task_rq(p)->lock.
728 * Order the reading to do not warn about a task,
729 * which was running on this cpu in the past, and
730 * it's just been woken on another cpu.
731 */
732 rmb();
733 if (task_cpu(p) != dead_cpu)
734 continue;
735
736 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
737 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068
ON
738 }
739 read_unlock(&tasklist_lock);
1da177e4
LT
740}
741
98458172
TG
742static int notify_down_prepare(unsigned int cpu)
743{
744 int err, nr_calls = 0;
745
746 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
747 if (err) {
748 nr_calls--;
749 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
750 pr_warn("%s: attempt to take down CPU %u failed\n",
751 __func__, cpu);
752 }
753 return err;
754}
755
1da177e4 756/* Take this CPU down. */
71cf5aee 757static int take_cpu_down(void *_param)
1da177e4 758{
4baa0afc
TG
759 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
760 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 761 int err, cpu = smp_processor_id();
1da177e4 762
1da177e4
LT
763 /* Ensure this CPU doesn't handle any more interrupts. */
764 err = __cpu_disable();
765 if (err < 0)
f3705136 766 return err;
1da177e4 767
a724632c
TG
768 /*
769 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
770 * do this step again.
771 */
772 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
773 st->state--;
4baa0afc 774 /* Invoke the former CPU_DYING callbacks */
a724632c 775 for (; st->state > target; st->state--)
cf392d10 776 cpuhp_invoke_callback(cpu, st->state, false, NULL);
4baa0afc 777
52c063d1
TG
778 /* Give up timekeeping duties */
779 tick_handover_do_timer();
14e568e7 780 /* Park the stopper thread */
090e77c3 781 stop_machine_park(cpu);
f3705136 782 return 0;
1da177e4
LT
783}
784
98458172 785static int takedown_cpu(unsigned int cpu)
1da177e4 786{
e69aab13 787 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
98458172 788 int err;
1da177e4 789
2a58c527 790 /* Park the smpboot threads */
1cf4f629 791 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
2a58c527 792 smpboot_park_threads(cpu);
1cf4f629 793
6acce3ef 794 /*
a8994181
TG
795 * Prevent irq alloc/free while the dying cpu reorganizes the
796 * interrupt affinities.
6acce3ef 797 */
a8994181 798 irq_lock_sparse();
6acce3ef 799
a8994181
TG
800 /*
801 * So now all preempt/rcu users must observe !cpu_active().
802 */
090e77c3 803 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
04321587 804 if (err) {
3b9d6da6 805 /* CPU refused to die */
a8994181 806 irq_unlock_sparse();
3b9d6da6
SAS
807 /* Unpark the hotplug thread so we can rollback there */
808 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
98458172 809 return err;
8fa1d7d3 810 }
04321587 811 BUG_ON(cpu_online(cpu));
1da177e4 812
48c5ccae 813 /*
ee1e714b 814 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
48c5ccae
PZ
815 * runnable tasks from the cpu, there's only the idle task left now
816 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
817 *
818 * Wait for the stop thread to go away.
48c5ccae 819 */
e69aab13
TG
820 wait_for_completion(&st->done);
821 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4 822
a8994181
TG
823 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
824 irq_unlock_sparse();
825
345527b1 826 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
827 /* This actually kills the CPU. */
828 __cpu_die(cpu);
829
a49b116d 830 tick_cleanup_dead_cpu(cpu);
98458172
TG
831 return 0;
832}
1da177e4 833
98458172
TG
834static int notify_dead(unsigned int cpu)
835{
836 cpu_notify_nofail(CPU_DEAD, cpu);
1da177e4 837 check_for_tasks(cpu);
98458172
TG
838 return 0;
839}
840
71f87b2f
TG
841static void cpuhp_complete_idle_dead(void *arg)
842{
843 struct cpuhp_cpu_state *st = arg;
844
845 complete(&st->done);
846}
847
e69aab13
TG
848void cpuhp_report_idle_dead(void)
849{
850 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
851
852 BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7e 853 rcu_report_dead(smp_processor_id());
71f87b2f
TG
854 st->state = CPUHP_AP_IDLE_DEAD;
855 /*
856 * We cannot call complete after rcu_report_dead() so we delegate it
857 * to an online cpu.
858 */
859 smp_call_function_single(cpumask_first(cpu_online_mask),
860 cpuhp_complete_idle_dead, st, 0);
e69aab13
TG
861}
862
cff7d378
TG
863#else
864#define notify_down_prepare NULL
865#define takedown_cpu NULL
866#define notify_dead NULL
867#endif
868
869#ifdef CONFIG_HOTPLUG_CPU
cff7d378 870
98458172 871/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
872static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
873 enum cpuhp_state target)
98458172 874{
cff7d378
TG
875 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
876 int prev_state, ret = 0;
877 bool hasdied = false;
98458172
TG
878
879 if (num_online_cpus() == 1)
880 return -EBUSY;
881
757c989b 882 if (!cpu_present(cpu))
98458172
TG
883 return -EINVAL;
884
885 cpu_hotplug_begin();
886
887 cpuhp_tasks_frozen = tasks_frozen;
888
cff7d378 889 prev_state = st->state;
af1f4045 890 st->target = target;
1cf4f629
TG
891 /*
892 * If the current CPU state is in the range of the AP hotplug thread,
893 * then we need to kick the thread.
894 */
8df3e07e 895 if (st->state > CPUHP_TEARDOWN_CPU) {
1cf4f629
TG
896 ret = cpuhp_kick_ap_work(cpu);
897 /*
898 * The AP side has done the error rollback already. Just
899 * return the error code..
900 */
901 if (ret)
902 goto out;
903
904 /*
905 * We might have stopped still in the range of the AP hotplug
906 * thread. Nothing to do anymore.
907 */
8df3e07e 908 if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629
TG
909 goto out;
910 }
911 /*
8df3e07e 912 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629
TG
913 * to do the further cleanups.
914 */
a724632c 915 ret = cpuhp_down_callbacks(cpu, st, target);
3b9d6da6
SAS
916 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
917 st->target = prev_state;
918 st->rollback = true;
919 cpuhp_kick_ap_work(cpu);
920 }
98458172 921
cff7d378 922 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1cf4f629 923out:
d221938c 924 cpu_hotplug_done();
cff7d378
TG
925 /* This post dead nonsense must die */
926 if (!ret && hasdied)
090e77c3 927 cpu_notify_nofail(CPU_POST_DEAD, cpu);
cff7d378 928 return ret;
e3920fb4
RW
929}
930
af1f4045 931static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 932{
9ea09af3 933 int err;
e3920fb4 934
d221938c 935 cpu_maps_update_begin();
e761b772
MK
936
937 if (cpu_hotplug_disabled) {
e3920fb4 938 err = -EBUSY;
e761b772
MK
939 goto out;
940 }
941
af1f4045 942 err = _cpu_down(cpu, 0, target);
e3920fb4 943
e761b772 944out:
d221938c 945 cpu_maps_update_done();
1da177e4
LT
946 return err;
947}
af1f4045
TG
948int cpu_down(unsigned int cpu)
949{
950 return do_cpu_down(cpu, CPUHP_OFFLINE);
951}
b62b8ef9 952EXPORT_SYMBOL(cpu_down);
1da177e4
LT
953#endif /*CONFIG_HOTPLUG_CPU*/
954
4baa0afc 955/**
ee1e714b 956 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc
TG
957 * @cpu: cpu that just started
958 *
4baa0afc
TG
959 * It must be called by the arch code on the new cpu, before the new cpu
960 * enables interrupts and before the "boot" cpu returns from __cpu_up().
961 */
962void notify_cpu_starting(unsigned int cpu)
963{
964 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
965 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
966
967 while (st->state < target) {
4baa0afc 968 st->state++;
cf392d10 969 cpuhp_invoke_callback(cpu, st->state, true, NULL);
4baa0afc
TG
970 }
971}
972
949338e3
TG
973/*
974 * Called from the idle task. We need to set active here, so we can kick off
8df3e07e
TG
975 * the stopper thread and unpark the smpboot threads. If the target state is
976 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
977 * cpu further.
949338e3 978 */
8df3e07e 979void cpuhp_online_idle(enum cpuhp_state state)
949338e3 980{
8df3e07e
TG
981 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
982 unsigned int cpu = smp_processor_id();
983
984 /* Happens for the boot cpu */
985 if (state != CPUHP_AP_ONLINE_IDLE)
986 return;
987
988 st->state = CPUHP_AP_ONLINE_IDLE;
1cf4f629 989
8df3e07e 990 /* Unpark the stopper thread and the hotplug thread of this cpu */
949338e3 991 stop_machine_unpark(cpu);
1cf4f629 992 kthread_unpark(st->thread);
8df3e07e
TG
993
994 /* Should we go further up ? */
995 if (st->target > CPUHP_AP_ONLINE_IDLE)
996 __cpuhp_kick_ap_work(st);
997 else
998 complete(&st->done);
949338e3
TG
999}
1000
e3920fb4 1001/* Requires cpu_add_remove_lock to be held */
af1f4045 1002static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 1003{
cff7d378 1004 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 1005 struct task_struct *idle;
2e1a3483 1006 int ret = 0;
1da177e4 1007
d221938c 1008 cpu_hotplug_begin();
38498a67 1009
757c989b 1010 if (!cpu_present(cpu)) {
5e5041f3
YI
1011 ret = -EINVAL;
1012 goto out;
1013 }
1014
757c989b
TG
1015 /*
1016 * The caller of do_cpu_up might have raced with another
1017 * caller. Ignore it for now.
1018 */
1019 if (st->state >= target)
38498a67 1020 goto out;
757c989b
TG
1021
1022 if (st->state == CPUHP_OFFLINE) {
1023 /* Let it fail before we try to bring the cpu up */
1024 idle = idle_thread_get(cpu);
1025 if (IS_ERR(idle)) {
1026 ret = PTR_ERR(idle);
1027 goto out;
1028 }
3bb5d2ee 1029 }
38498a67 1030
ba997462
TG
1031 cpuhp_tasks_frozen = tasks_frozen;
1032
af1f4045 1033 st->target = target;
1cf4f629
TG
1034 /*
1035 * If the current CPU state is in the range of the AP hotplug thread,
1036 * then we need to kick the thread once more.
1037 */
8df3e07e 1038 if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629
TG
1039 ret = cpuhp_kick_ap_work(cpu);
1040 /*
1041 * The AP side has done the error rollback already. Just
1042 * return the error code..
1043 */
1044 if (ret)
1045 goto out;
1046 }
1047
1048 /*
1049 * Try to reach the target state. We max out on the BP at
8df3e07e 1050 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629
TG
1051 * responsible for bringing it up to the target state.
1052 */
8df3e07e 1053 target = min((int)target, CPUHP_BRINGUP_CPU);
a724632c 1054 ret = cpuhp_up_callbacks(cpu, st, target);
38498a67 1055out:
d221938c 1056 cpu_hotplug_done();
e3920fb4
RW
1057 return ret;
1058}
1059
af1f4045 1060static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
1061{
1062 int err = 0;
cf23422b 1063
e0b582ec 1064 if (!cpu_possible(cpu)) {
84117da5
FF
1065 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1066 cpu);
87d5e023 1067#if defined(CONFIG_IA64)
84117da5 1068 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
1069#endif
1070 return -EINVAL;
1071 }
e3920fb4 1072
01b0f197
TK
1073 err = try_online_node(cpu_to_node(cpu));
1074 if (err)
1075 return err;
cf23422b 1076
d221938c 1077 cpu_maps_update_begin();
e761b772
MK
1078
1079 if (cpu_hotplug_disabled) {
e3920fb4 1080 err = -EBUSY;
e761b772
MK
1081 goto out;
1082 }
1083
af1f4045 1084 err = _cpu_up(cpu, 0, target);
e761b772 1085out:
d221938c 1086 cpu_maps_update_done();
e3920fb4
RW
1087 return err;
1088}
af1f4045
TG
1089
1090int cpu_up(unsigned int cpu)
1091{
1092 return do_cpu_up(cpu, CPUHP_ONLINE);
1093}
a513f6ba 1094EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 1095
f3de4be9 1096#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 1097static cpumask_var_t frozen_cpus;
e3920fb4
RW
1098
1099int disable_nonboot_cpus(void)
1100{
e9a5f426 1101 int cpu, first_cpu, error = 0;
e3920fb4 1102
d221938c 1103 cpu_maps_update_begin();
e0b582ec 1104 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
1105 /*
1106 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
1107 * with the userspace trying to use the CPU hotplug at the same time
1108 */
e0b582ec 1109 cpumask_clear(frozen_cpus);
6ad4c188 1110
84117da5 1111 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4
RW
1112 for_each_online_cpu(cpu) {
1113 if (cpu == first_cpu)
1114 continue;
bb3632c6 1115 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 1116 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 1117 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 1118 if (!error)
e0b582ec 1119 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 1120 else {
84117da5 1121 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
1122 break;
1123 }
1124 }
86886e55 1125
89af7ba5 1126 if (!error)
e3920fb4 1127 BUG_ON(num_online_cpus() > 1);
89af7ba5 1128 else
84117da5 1129 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
1130
1131 /*
1132 * Make sure the CPUs won't be enabled by someone else. We need to do
1133 * this even in case of failure as all disable_nonboot_cpus() users are
1134 * supposed to do enable_nonboot_cpus() on the failure path.
1135 */
1136 cpu_hotplug_disabled++;
1137
d221938c 1138 cpu_maps_update_done();
e3920fb4
RW
1139 return error;
1140}
1141
d0af9eed
SS
1142void __weak arch_enable_nonboot_cpus_begin(void)
1143{
1144}
1145
1146void __weak arch_enable_nonboot_cpus_end(void)
1147{
1148}
1149
71cf5aee 1150void enable_nonboot_cpus(void)
e3920fb4
RW
1151{
1152 int cpu, error;
1153
1154 /* Allow everyone to use the CPU hotplug again */
d221938c 1155 cpu_maps_update_begin();
01b41159 1156 __cpu_hotplug_enable();
e0b582ec 1157 if (cpumask_empty(frozen_cpus))
1d64b9cb 1158 goto out;
e3920fb4 1159
84117da5 1160 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
1161
1162 arch_enable_nonboot_cpus_begin();
1163
e0b582ec 1164 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 1165 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 1166 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 1167 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 1168 if (!error) {
84117da5 1169 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
1170 continue;
1171 }
84117da5 1172 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 1173 }
d0af9eed
SS
1174
1175 arch_enable_nonboot_cpus_end();
1176
e0b582ec 1177 cpumask_clear(frozen_cpus);
1d64b9cb 1178out:
d221938c 1179 cpu_maps_update_done();
1da177e4 1180}
e0b582ec 1181
d7268a31 1182static int __init alloc_frozen_cpus(void)
e0b582ec
RR
1183{
1184 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1185 return -ENOMEM;
1186 return 0;
1187}
1188core_initcall(alloc_frozen_cpus);
79cfbdfa 1189
79cfbdfa
SB
1190/*
1191 * When callbacks for CPU hotplug notifications are being executed, we must
1192 * ensure that the state of the system with respect to the tasks being frozen
1193 * or not, as reported by the notification, remains unchanged *throughout the
1194 * duration* of the execution of the callbacks.
1195 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1196 *
1197 * This synchronization is implemented by mutually excluding regular CPU
1198 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1199 * Hibernate notifications.
1200 */
1201static int
1202cpu_hotplug_pm_callback(struct notifier_block *nb,
1203 unsigned long action, void *ptr)
1204{
1205 switch (action) {
1206
1207 case PM_SUSPEND_PREPARE:
1208 case PM_HIBERNATION_PREPARE:
16e53dbf 1209 cpu_hotplug_disable();
79cfbdfa
SB
1210 break;
1211
1212 case PM_POST_SUSPEND:
1213 case PM_POST_HIBERNATION:
16e53dbf 1214 cpu_hotplug_enable();
79cfbdfa
SB
1215 break;
1216
1217 default:
1218 return NOTIFY_DONE;
1219 }
1220
1221 return NOTIFY_OK;
1222}
1223
1224
d7268a31 1225static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 1226{
6e32d479
FY
1227 /*
1228 * cpu_hotplug_pm_callback has higher priority than x86
1229 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1230 * to disable cpu hotplug to avoid cpu hotplug race.
1231 */
79cfbdfa
SB
1232 pm_notifier(cpu_hotplug_pm_callback, 0);
1233 return 0;
1234}
1235core_initcall(cpu_hotplug_pm_sync_init);
1236
f3de4be9 1237#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec
MK
1238
1239#endif /* CONFIG_SMP */
b8d317d1 1240
cff7d378
TG
1241/* Boot processor state steps */
1242static struct cpuhp_step cpuhp_bp_states[] = {
1243 [CPUHP_OFFLINE] = {
1244 .name = "offline",
3c1627e9
TG
1245 .startup.single = NULL,
1246 .teardown.single = NULL,
cff7d378
TG
1247 },
1248#ifdef CONFIG_SMP
1249 [CPUHP_CREATE_THREADS]= {
677f6646 1250 .name = "threads:prepare",
3c1627e9
TG
1251 .startup.single = smpboot_create_threads,
1252 .teardown.single = NULL,
757c989b 1253 .cant_stop = true,
cff7d378 1254 },
00e16c3d 1255 [CPUHP_PERF_PREPARE] = {
3c1627e9
TG
1256 .name = "perf:prepare",
1257 .startup.single = perf_event_init_cpu,
1258 .teardown.single = perf_event_exit_cpu,
00e16c3d 1259 },
7ee681b2 1260 [CPUHP_WORKQUEUE_PREP] = {
3c1627e9
TG
1261 .name = "workqueue:prepare",
1262 .startup.single = workqueue_prepare_cpu,
1263 .teardown.single = NULL,
7ee681b2 1264 },
27590dc1 1265 [CPUHP_HRTIMERS_PREPARE] = {
3c1627e9
TG
1266 .name = "hrtimers:prepare",
1267 .startup.single = hrtimers_prepare_cpu,
1268 .teardown.single = hrtimers_dead_cpu,
27590dc1 1269 },
31487f83 1270 [CPUHP_SMPCFD_PREPARE] = {
677f6646 1271 .name = "smpcfd:prepare",
3c1627e9
TG
1272 .startup.single = smpcfd_prepare_cpu,
1273 .teardown.single = smpcfd_dead_cpu,
31487f83 1274 },
4df83742 1275 [CPUHP_RCUTREE_PREP] = {
677f6646 1276 .name = "RCU/tree:prepare",
3c1627e9
TG
1277 .startup.single = rcutree_prepare_cpu,
1278 .teardown.single = rcutree_dead_cpu,
4df83742 1279 },
d10ef6f9
TG
1280 /*
1281 * Preparatory and dead notifiers. Will be replaced once the notifiers
1282 * are converted to states.
1283 */
cff7d378
TG
1284 [CPUHP_NOTIFY_PREPARE] = {
1285 .name = "notify:prepare",
3c1627e9
TG
1286 .startup.single = notify_prepare,
1287 .teardown.single = notify_dead,
cff7d378 1288 .skip_onerr = true,
757c989b 1289 .cant_stop = true,
cff7d378 1290 },
4fae16df
RC
1291 /*
1292 * On the tear-down path, timers_dead_cpu() must be invoked
1293 * before blk_mq_queue_reinit_notify() from notify_dead(),
1294 * otherwise a RCU stall occurs.
1295 */
1296 [CPUHP_TIMERS_DEAD] = {
3c1627e9
TG
1297 .name = "timers:dead",
1298 .startup.single = NULL,
1299 .teardown.single = timers_dead_cpu,
4fae16df 1300 },
d10ef6f9 1301 /* Kicks the plugged cpu into life */
cff7d378
TG
1302 [CPUHP_BRINGUP_CPU] = {
1303 .name = "cpu:bringup",
3c1627e9
TG
1304 .startup.single = bringup_cpu,
1305 .teardown.single = NULL,
757c989b 1306 .cant_stop = true,
4baa0afc 1307 },
31487f83 1308 [CPUHP_AP_SMPCFD_DYING] = {
677f6646 1309 .name = "smpcfd:dying",
3c1627e9
TG
1310 .startup.single = NULL,
1311 .teardown.single = smpcfd_dying_cpu,
31487f83 1312 },
d10ef6f9
TG
1313 /*
1314 * Handled on controll processor until the plugged processor manages
1315 * this itself.
1316 */
4baa0afc
TG
1317 [CPUHP_TEARDOWN_CPU] = {
1318 .name = "cpu:teardown",
3c1627e9
TG
1319 .startup.single = NULL,
1320 .teardown.single = takedown_cpu,
757c989b 1321 .cant_stop = true,
cff7d378 1322 },
a7c73414
TG
1323#else
1324 [CPUHP_BRINGUP_CPU] = { },
cff7d378 1325#endif
cff7d378
TG
1326};
1327
4baa0afc
TG
1328/* Application processor state steps */
1329static struct cpuhp_step cpuhp_ap_states[] = {
1330#ifdef CONFIG_SMP
d10ef6f9
TG
1331 /* Final state before CPU kills itself */
1332 [CPUHP_AP_IDLE_DEAD] = {
1333 .name = "idle:dead",
1334 },
1335 /*
1336 * Last state before CPU enters the idle loop to die. Transient state
1337 * for synchronization.
1338 */
1339 [CPUHP_AP_OFFLINE] = {
1340 .name = "ap:offline",
1341 .cant_stop = true,
1342 },
9cf7243d
TG
1343 /* First state is scheduler control. Interrupts are disabled */
1344 [CPUHP_AP_SCHED_STARTING] = {
1345 .name = "sched:starting",
3c1627e9
TG
1346 .startup.single = sched_cpu_starting,
1347 .teardown.single = sched_cpu_dying,
9cf7243d 1348 },
4df83742 1349 [CPUHP_AP_RCUTREE_DYING] = {
677f6646 1350 .name = "RCU/tree:dying",
3c1627e9
TG
1351 .startup.single = NULL,
1352 .teardown.single = rcutree_dying_cpu,
4df83742 1353 },
d10ef6f9
TG
1354 /* Entry state on starting. Interrupts enabled from here on. Transient
1355 * state for synchronsization */
1356 [CPUHP_AP_ONLINE] = {
1357 .name = "ap:online",
1358 },
1359 /* Handle smpboot threads park/unpark */
1cf4f629 1360 [CPUHP_AP_SMPBOOT_THREADS] = {
677f6646 1361 .name = "smpboot/threads:online",
3c1627e9
TG
1362 .startup.single = smpboot_unpark_threads,
1363 .teardown.single = NULL,
1cf4f629 1364 },
00e16c3d 1365 [CPUHP_AP_PERF_ONLINE] = {
3c1627e9
TG
1366 .name = "perf:online",
1367 .startup.single = perf_event_init_cpu,
1368 .teardown.single = perf_event_exit_cpu,
00e16c3d 1369 },
7ee681b2 1370 [CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e9
TG
1371 .name = "workqueue:online",
1372 .startup.single = workqueue_online_cpu,
1373 .teardown.single = workqueue_offline_cpu,
7ee681b2 1374 },
4df83742 1375 [CPUHP_AP_RCUTREE_ONLINE] = {
677f6646 1376 .name = "RCU/tree:online",
3c1627e9
TG
1377 .startup.single = rcutree_online_cpu,
1378 .teardown.single = rcutree_offline_cpu,
4df83742 1379 },
00e16c3d 1380
d10ef6f9
TG
1381 /*
1382 * Online/down_prepare notifiers. Will be removed once the notifiers
1383 * are converted to states.
1384 */
1cf4f629
TG
1385 [CPUHP_AP_NOTIFY_ONLINE] = {
1386 .name = "notify:online",
3c1627e9
TG
1387 .startup.single = notify_online,
1388 .teardown.single = notify_down_prepare,
3b9d6da6 1389 .skip_onerr = true,
1cf4f629 1390 },
4baa0afc 1391#endif
d10ef6f9
TG
1392 /*
1393 * The dynamically registered state space is here
1394 */
1395
aaddd7d1
TG
1396#ifdef CONFIG_SMP
1397 /* Last state is scheduler control setting the cpu active */
1398 [CPUHP_AP_ACTIVE] = {
1399 .name = "sched:active",
3c1627e9
TG
1400 .startup.single = sched_cpu_activate,
1401 .teardown.single = sched_cpu_deactivate,
aaddd7d1
TG
1402 },
1403#endif
1404
d10ef6f9 1405 /* CPU is fully up and running. */
4baa0afc
TG
1406 [CPUHP_ONLINE] = {
1407 .name = "online",
3c1627e9
TG
1408 .startup.single = NULL,
1409 .teardown.single = NULL,
4baa0afc
TG
1410 },
1411};
1412
5b7aa87e
TG
1413/* Sanity check for callbacks */
1414static int cpuhp_cb_check(enum cpuhp_state state)
1415{
1416 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1417 return -EINVAL;
1418 return 0;
1419}
1420
5b7aa87e
TG
1421static void cpuhp_store_callbacks(enum cpuhp_state state,
1422 const char *name,
1423 int (*startup)(unsigned int cpu),
cf392d10
TG
1424 int (*teardown)(unsigned int cpu),
1425 bool multi_instance)
5b7aa87e
TG
1426{
1427 /* (Un)Install the callbacks for further cpu hotplug operations */
1428 struct cpuhp_step *sp;
1429
1430 mutex_lock(&cpuhp_state_mutex);
1431 sp = cpuhp_get_step(state);
3c1627e9
TG
1432 sp->startup.single = startup;
1433 sp->teardown.single = teardown;
5b7aa87e 1434 sp->name = name;
cf392d10
TG
1435 sp->multi_instance = multi_instance;
1436 INIT_HLIST_HEAD(&sp->list);
5b7aa87e
TG
1437 mutex_unlock(&cpuhp_state_mutex);
1438}
1439
1440static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1441{
3c1627e9 1442 return cpuhp_get_step(state)->teardown.single;
5b7aa87e
TG
1443}
1444
5b7aa87e
TG
1445/*
1446 * Call the startup/teardown function for a step either on the AP or
1447 * on the current CPU.
1448 */
cf392d10
TG
1449static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1450 struct hlist_node *node)
5b7aa87e 1451{
a724632c 1452 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1453 int ret;
1454
3c1627e9
TG
1455 if ((bringup && !sp->startup.single) ||
1456 (!bringup && !sp->teardown.single))
5b7aa87e 1457 return 0;
5b7aa87e
TG
1458 /*
1459 * The non AP bound callbacks can fail on bringup. On teardown
1460 * e.g. module removal we crash for now.
1461 */
1cf4f629
TG
1462#ifdef CONFIG_SMP
1463 if (cpuhp_is_ap_state(state))
cf392d10 1464 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629 1465 else
cf392d10 1466 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629 1467#else
cf392d10 1468 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629 1469#endif
5b7aa87e
TG
1470 BUG_ON(ret && !bringup);
1471 return ret;
1472}
1473
1474/*
1475 * Called from __cpuhp_setup_state on a recoverable failure.
1476 *
1477 * Note: The teardown callbacks for rollback are not allowed to fail!
1478 */
cf392d10
TG
1479static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1480 struct hlist_node *node)
5b7aa87e
TG
1481{
1482 int cpu;
1483
5b7aa87e
TG
1484 /* Roll back the already executed steps on the other cpus */
1485 for_each_present_cpu(cpu) {
1486 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1487 int cpustate = st->state;
1488
1489 if (cpu >= failedcpu)
1490 break;
1491
1492 /* Did we invoke the startup call on that cpu ? */
1493 if (cpustate >= state)
cf392d10 1494 cpuhp_issue_call(cpu, state, false, node);
5b7aa87e
TG
1495 }
1496}
1497
1498/*
1499 * Returns a free for dynamic slot assignment of the Online state. The states
1500 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1501 * by having no name assigned.
1502 */
1503static int cpuhp_reserve_state(enum cpuhp_state state)
1504{
1505 enum cpuhp_state i;
1506
1507 mutex_lock(&cpuhp_state_mutex);
1cf4f629
TG
1508 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1509 if (cpuhp_ap_states[i].name)
5b7aa87e
TG
1510 continue;
1511
1cf4f629 1512 cpuhp_ap_states[i].name = "Reserved";
5b7aa87e
TG
1513 mutex_unlock(&cpuhp_state_mutex);
1514 return i;
1515 }
1516 mutex_unlock(&cpuhp_state_mutex);
1517 WARN(1, "No more dynamic states available for CPU hotplug\n");
1518 return -ENOSPC;
1519}
1520
cf392d10
TG
1521int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1522 bool invoke)
1523{
1524 struct cpuhp_step *sp;
1525 int cpu;
1526 int ret;
1527
1528 sp = cpuhp_get_step(state);
1529 if (sp->multi_instance == false)
1530 return -EINVAL;
1531
1532 get_online_cpus();
1533
3c1627e9 1534 if (!invoke || !sp->startup.multi)
cf392d10
TG
1535 goto add_node;
1536
1537 /*
1538 * Try to call the startup callback for each present cpu
1539 * depending on the hotplug state of the cpu.
1540 */
1541 for_each_present_cpu(cpu) {
1542 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1543 int cpustate = st->state;
1544
1545 if (cpustate < state)
1546 continue;
1547
1548 ret = cpuhp_issue_call(cpu, state, true, node);
1549 if (ret) {
3c1627e9 1550 if (sp->teardown.multi)
cf392d10
TG
1551 cpuhp_rollback_install(cpu, state, node);
1552 goto err;
1553 }
1554 }
1555add_node:
1556 ret = 0;
1557 mutex_lock(&cpuhp_state_mutex);
1558 hlist_add_head(node, &sp->list);
1559 mutex_unlock(&cpuhp_state_mutex);
1560
1561err:
1562 put_online_cpus();
1563 return ret;
1564}
1565EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1566
5b7aa87e
TG
1567/**
1568 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1569 * @state: The state to setup
1570 * @invoke: If true, the startup function is invoked for cpus where
1571 * cpu state >= @state
1572 * @startup: startup callback function
1573 * @teardown: teardown callback function
1574 *
1575 * Returns 0 if successful, otherwise a proper error code
1576 */
1577int __cpuhp_setup_state(enum cpuhp_state state,
1578 const char *name, bool invoke,
1579 int (*startup)(unsigned int cpu),
cf392d10
TG
1580 int (*teardown)(unsigned int cpu),
1581 bool multi_instance)
5b7aa87e
TG
1582{
1583 int cpu, ret = 0;
1584 int dyn_state = 0;
1585
1586 if (cpuhp_cb_check(state) || !name)
1587 return -EINVAL;
1588
1589 get_online_cpus();
1590
1591 /* currently assignments for the ONLINE state are possible */
1cf4f629 1592 if (state == CPUHP_AP_ONLINE_DYN) {
5b7aa87e
TG
1593 dyn_state = 1;
1594 ret = cpuhp_reserve_state(state);
1595 if (ret < 0)
1596 goto out;
1597 state = ret;
1598 }
1599
cf392d10 1600 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
5b7aa87e
TG
1601
1602 if (!invoke || !startup)
1603 goto out;
1604
1605 /*
1606 * Try to call the startup callback for each present cpu
1607 * depending on the hotplug state of the cpu.
1608 */
1609 for_each_present_cpu(cpu) {
1610 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1611 int cpustate = st->state;
1612
1613 if (cpustate < state)
1614 continue;
1615
cf392d10 1616 ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e 1617 if (ret) {
a724632c 1618 if (teardown)
cf392d10
TG
1619 cpuhp_rollback_install(cpu, state, NULL);
1620 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e
TG
1621 goto out;
1622 }
1623 }
1624out:
1625 put_online_cpus();
1626 if (!ret && dyn_state)
1627 return state;
1628 return ret;
1629}
1630EXPORT_SYMBOL(__cpuhp_setup_state);
1631
cf392d10
TG
1632int __cpuhp_state_remove_instance(enum cpuhp_state state,
1633 struct hlist_node *node, bool invoke)
1634{
1635 struct cpuhp_step *sp = cpuhp_get_step(state);
1636 int cpu;
1637
1638 BUG_ON(cpuhp_cb_check(state));
1639
1640 if (!sp->multi_instance)
1641 return -EINVAL;
1642
1643 get_online_cpus();
1644 if (!invoke || !cpuhp_get_teardown_cb(state))
1645 goto remove;
1646 /*
1647 * Call the teardown callback for each present cpu depending
1648 * on the hotplug state of the cpu. This function is not
1649 * allowed to fail currently!
1650 */
1651 for_each_present_cpu(cpu) {
1652 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1653 int cpustate = st->state;
1654
1655 if (cpustate >= state)
1656 cpuhp_issue_call(cpu, state, false, node);
1657 }
1658
1659remove:
1660 mutex_lock(&cpuhp_state_mutex);
1661 hlist_del(node);
1662 mutex_unlock(&cpuhp_state_mutex);
1663 put_online_cpus();
1664
1665 return 0;
1666}
1667EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
5b7aa87e
TG
1668/**
1669 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1670 * @state: The state to remove
1671 * @invoke: If true, the teardown function is invoked for cpus where
1672 * cpu state >= @state
1673 *
1674 * The teardown callback is currently not allowed to fail. Think
1675 * about module removal!
1676 */
1677void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1678{
cf392d10 1679 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1680 int cpu;
1681
1682 BUG_ON(cpuhp_cb_check(state));
1683
1684 get_online_cpus();
1685
cf392d10
TG
1686 if (sp->multi_instance) {
1687 WARN(!hlist_empty(&sp->list),
1688 "Error: Removing state %d which has instances left.\n",
1689 state);
1690 goto remove;
1691 }
1692
a724632c 1693 if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e
TG
1694 goto remove;
1695
1696 /*
1697 * Call the teardown callback for each present cpu depending
1698 * on the hotplug state of the cpu. This function is not
1699 * allowed to fail currently!
1700 */
1701 for_each_present_cpu(cpu) {
1702 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1703 int cpustate = st->state;
1704
1705 if (cpustate >= state)
cf392d10 1706 cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e
TG
1707 }
1708remove:
cf392d10 1709 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e
TG
1710 put_online_cpus();
1711}
1712EXPORT_SYMBOL(__cpuhp_remove_state);
1713
98f8cdce
TG
1714#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1715static ssize_t show_cpuhp_state(struct device *dev,
1716 struct device_attribute *attr, char *buf)
1717{
1718 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1719
1720 return sprintf(buf, "%d\n", st->state);
1721}
1722static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1723
757c989b
TG
1724static ssize_t write_cpuhp_target(struct device *dev,
1725 struct device_attribute *attr,
1726 const char *buf, size_t count)
1727{
1728 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1729 struct cpuhp_step *sp;
1730 int target, ret;
1731
1732 ret = kstrtoint(buf, 10, &target);
1733 if (ret)
1734 return ret;
1735
1736#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1737 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1738 return -EINVAL;
1739#else
1740 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1741 return -EINVAL;
1742#endif
1743
1744 ret = lock_device_hotplug_sysfs();
1745 if (ret)
1746 return ret;
1747
1748 mutex_lock(&cpuhp_state_mutex);
1749 sp = cpuhp_get_step(target);
1750 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1751 mutex_unlock(&cpuhp_state_mutex);
1752 if (ret)
1753 return ret;
1754
1755 if (st->state < target)
1756 ret = do_cpu_up(dev->id, target);
1757 else
1758 ret = do_cpu_down(dev->id, target);
1759
1760 unlock_device_hotplug();
1761 return ret ? ret : count;
1762}
1763
98f8cdce
TG
1764static ssize_t show_cpuhp_target(struct device *dev,
1765 struct device_attribute *attr, char *buf)
1766{
1767 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1768
1769 return sprintf(buf, "%d\n", st->target);
1770}
757c989b 1771static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce
TG
1772
1773static struct attribute *cpuhp_cpu_attrs[] = {
1774 &dev_attr_state.attr,
1775 &dev_attr_target.attr,
1776 NULL
1777};
1778
1779static struct attribute_group cpuhp_cpu_attr_group = {
1780 .attrs = cpuhp_cpu_attrs,
1781 .name = "hotplug",
1782 NULL
1783};
1784
1785static ssize_t show_cpuhp_states(struct device *dev,
1786 struct device_attribute *attr, char *buf)
1787{
1788 ssize_t cur, res = 0;
1789 int i;
1790
1791 mutex_lock(&cpuhp_state_mutex);
757c989b 1792 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1793 struct cpuhp_step *sp = cpuhp_get_step(i);
1794
1795 if (sp->name) {
1796 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1797 buf += cur;
1798 res += cur;
1799 }
1800 }
1801 mutex_unlock(&cpuhp_state_mutex);
1802 return res;
1803}
1804static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1805
1806static struct attribute *cpuhp_cpu_root_attrs[] = {
1807 &dev_attr_states.attr,
1808 NULL
1809};
1810
1811static struct attribute_group cpuhp_cpu_root_attr_group = {
1812 .attrs = cpuhp_cpu_root_attrs,
1813 .name = "hotplug",
1814 NULL
1815};
1816
1817static int __init cpuhp_sysfs_init(void)
1818{
1819 int cpu, ret;
1820
1821 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1822 &cpuhp_cpu_root_attr_group);
1823 if (ret)
1824 return ret;
1825
1826 for_each_possible_cpu(cpu) {
1827 struct device *dev = get_cpu_device(cpu);
1828
1829 if (!dev)
1830 continue;
1831 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1832 if (ret)
1833 return ret;
1834 }
1835 return 0;
1836}
1837device_initcall(cpuhp_sysfs_init);
1838#endif
1839
e56b3bc7
LT
1840/*
1841 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1842 * represents all NR_CPUS bits binary values of 1<<nr.
1843 *
e0b582ec 1844 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
1845 * mask value that has a single bit set only.
1846 */
b8d317d1 1847
e56b3bc7 1848/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 1849#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
1850#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1851#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1852#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 1853
e56b3bc7
LT
1854const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1855
1856 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1857 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1858#if BITS_PER_LONG > 32
1859 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1860 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
1861#endif
1862};
e56b3bc7 1863EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
1864
1865const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1866EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
1867
1868#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 1869struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 1870 = {CPU_BITS_ALL};
b3199c02 1871#else
4b804c85 1872struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 1873#endif
4b804c85 1874EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 1875
4b804c85
RV
1876struct cpumask __cpu_online_mask __read_mostly;
1877EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 1878
4b804c85
RV
1879struct cpumask __cpu_present_mask __read_mostly;
1880EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 1881
4b804c85
RV
1882struct cpumask __cpu_active_mask __read_mostly;
1883EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 1884
3fa41520
RR
1885void init_cpu_present(const struct cpumask *src)
1886{
c4c54dd1 1887 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
1888}
1889
1890void init_cpu_possible(const struct cpumask *src)
1891{
c4c54dd1 1892 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
1893}
1894
1895void init_cpu_online(const struct cpumask *src)
1896{
c4c54dd1 1897 cpumask_copy(&__cpu_online_mask, src);
3fa41520 1898}
cff7d378
TG
1899
1900/*
1901 * Activate the first processor.
1902 */
1903void __init boot_cpu_init(void)
1904{
1905 int cpu = smp_processor_id();
1906
1907 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1908 set_cpu_online(cpu, true);
1909 set_cpu_active(cpu, true);
1910 set_cpu_present(cpu, true);
1911 set_cpu_possible(cpu, true);
1912}
1913
1914/*
1915 * Must be called _AFTER_ setting up the per_cpu areas
1916 */
1917void __init boot_cpu_state_init(void)
1918{
1919 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1920}