]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/cpu.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
3f07c014 10#include <linux/sched/signal.h>
ef8bd77f 11#include <linux/sched/hotplug.h>
29930025 12#include <linux/sched/task.h>
1da177e4
LT
13#include <linux/unistd.h>
14#include <linux/cpu.h>
cb79295e
AV
15#include <linux/oom.h>
16#include <linux/rcupdate.h>
9984de1a 17#include <linux/export.h>
e4cc2f87 18#include <linux/bug.h>
1da177e4
LT
19#include <linux/kthread.h>
20#include <linux/stop_machine.h>
81615b62 21#include <linux/mutex.h>
5a0e3ad6 22#include <linux/gfp.h>
79cfbdfa 23#include <linux/suspend.h>
a19423b9 24#include <linux/lockdep.h>
345527b1 25#include <linux/tick.h>
a8994181 26#include <linux/irq.h>
4cb28ced 27#include <linux/smpboot.h>
e6d4989a 28#include <linux/relay.h>
6731d4f1 29#include <linux/slab.h>
fc8dffd3 30#include <linux/percpu-rwsem.h>
cff7d378 31
bb3632c6 32#include <trace/events/power.h>
cff7d378
TG
33#define CREATE_TRACE_POINTS
34#include <trace/events/cpuhp.h>
1da177e4 35
38498a67
TG
36#include "smpboot.h"
37
cff7d378
TG
38/**
39 * cpuhp_cpu_state - Per cpu hotplug state storage
40 * @state: The current cpu state
41 * @target: The target state
4cb28ced
TG
42 * @thread: Pointer to the hotplug thread
43 * @should_run: Thread should execute
3b9d6da6 44 * @rollback: Perform a rollback
a724632c
TG
45 * @single: Single callback invocation
46 * @bringup: Single callback bringup or teardown selector
47 * @cb_state: The state for a single callback (install/uninstall)
4cb28ced
TG
48 * @result: Result of the operation
49 * @done: Signal completion to the issuer of the task
cff7d378
TG
50 */
51struct cpuhp_cpu_state {
52 enum cpuhp_state state;
53 enum cpuhp_state target;
4cb28ced
TG
54#ifdef CONFIG_SMP
55 struct task_struct *thread;
56 bool should_run;
3b9d6da6 57 bool rollback;
a724632c
TG
58 bool single;
59 bool bringup;
cf392d10 60 struct hlist_node *node;
4cb28ced 61 enum cpuhp_state cb_state;
4cb28ced
TG
62 int result;
63 struct completion done;
64#endif
cff7d378
TG
65};
66
67static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
68
49dfe2a6
TG
69#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
70static struct lock_class_key cpuhp_state_key;
71static struct lockdep_map cpuhp_state_lock_map =
72 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
73#endif
74
cff7d378
TG
75/**
76 * cpuhp_step - Hotplug state machine step
77 * @name: Name of the step
78 * @startup: Startup function of the step
79 * @teardown: Teardown function of the step
80 * @skip_onerr: Do not invoke the functions on error rollback
81 * Will go away once the notifiers are gone
757c989b 82 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
83 */
84struct cpuhp_step {
cf392d10
TG
85 const char *name;
86 union {
3c1627e9
TG
87 int (*single)(unsigned int cpu);
88 int (*multi)(unsigned int cpu,
89 struct hlist_node *node);
90 } startup;
cf392d10 91 union {
3c1627e9
TG
92 int (*single)(unsigned int cpu);
93 int (*multi)(unsigned int cpu,
94 struct hlist_node *node);
95 } teardown;
cf392d10
TG
96 struct hlist_head list;
97 bool skip_onerr;
98 bool cant_stop;
99 bool multi_instance;
cff7d378
TG
100};
101
98f8cdce 102static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378 103static struct cpuhp_step cpuhp_bp_states[];
4baa0afc 104static struct cpuhp_step cpuhp_ap_states[];
cff7d378 105
a724632c
TG
106static bool cpuhp_is_ap_state(enum cpuhp_state state)
107{
108 /*
109 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
110 * purposes as that state is handled explicitly in cpu_down.
111 */
112 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
113}
114
115static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
116{
117 struct cpuhp_step *sp;
118
119 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
120 return sp + state;
121}
122
cff7d378
TG
123/**
124 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
125 * @cpu: The cpu for which the callback should be invoked
126 * @step: The step in the state machine
a724632c 127 * @bringup: True if the bringup callback should be invoked
cff7d378 128 *
cf392d10 129 * Called from cpu hotplug and from the state register machinery.
cff7d378 130 */
a724632c 131static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
cf392d10 132 bool bringup, struct hlist_node *node)
cff7d378
TG
133{
134 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632c 135 struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10
TG
136 int (*cbm)(unsigned int cpu, struct hlist_node *node);
137 int (*cb)(unsigned int cpu);
138 int ret, cnt;
139
140 if (!step->multi_instance) {
3c1627e9 141 cb = bringup ? step->startup.single : step->teardown.single;
cf392d10
TG
142 if (!cb)
143 return 0;
a724632c 144 trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378 145 ret = cb(cpu);
a724632c 146 trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10
TG
147 return ret;
148 }
3c1627e9 149 cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
150 if (!cbm)
151 return 0;
152
153 /* Single invocation for instance add/remove */
154 if (node) {
155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
156 ret = cbm(cpu, node);
157 trace_cpuhp_exit(cpu, st->state, state, ret);
158 return ret;
159 }
160
161 /* State transition. Invoke on all instances */
162 cnt = 0;
163 hlist_for_each(node, &step->list) {
164 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
165 ret = cbm(cpu, node);
166 trace_cpuhp_exit(cpu, st->state, state, ret);
167 if (ret)
168 goto err;
169 cnt++;
170 }
171 return 0;
172err:
173 /* Rollback the instances if one failed */
3c1627e9 174 cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
175 if (!cbm)
176 return ret;
177
178 hlist_for_each(node, &step->list) {
179 if (!cnt--)
180 break;
181 cbm(cpu, node);
cff7d378
TG
182 }
183 return ret;
184}
185
98a79d6a 186#ifdef CONFIG_SMP
b3199c02 187/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 188static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
189bool cpuhp_tasks_frozen;
190EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 191
79a6cdeb 192/*
93ae4f97
SB
193 * The following two APIs (cpu_maps_update_begin/done) must be used when
194 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
79a6cdeb
LJ
195 */
196void cpu_maps_update_begin(void)
197{
198 mutex_lock(&cpu_add_remove_lock);
199}
200
201void cpu_maps_update_done(void)
202{
203 mutex_unlock(&cpu_add_remove_lock);
204}
1da177e4 205
fc8dffd3
TG
206/*
207 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
e3920fb4
RW
208 * Should always be manipulated under cpu_add_remove_lock
209 */
210static int cpu_hotplug_disabled;
211
79a6cdeb
LJ
212#ifdef CONFIG_HOTPLUG_CPU
213
fc8dffd3 214DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
a19423b9 215
8f553c49 216void cpus_read_lock(void)
a9d9baa1 217{
fc8dffd3 218 percpu_down_read(&cpu_hotplug_lock);
a9d9baa1 219}
8f553c49 220EXPORT_SYMBOL_GPL(cpus_read_lock);
90d45d17 221
8f553c49 222void cpus_read_unlock(void)
a9d9baa1 223{
fc8dffd3 224 percpu_up_read(&cpu_hotplug_lock);
a9d9baa1 225}
8f553c49 226EXPORT_SYMBOL_GPL(cpus_read_unlock);
a9d9baa1 227
8f553c49 228void cpus_write_lock(void)
d221938c 229{
fc8dffd3 230 percpu_down_write(&cpu_hotplug_lock);
d221938c 231}
87af9e7f 232
8f553c49 233void cpus_write_unlock(void)
d221938c 234{
fc8dffd3 235 percpu_up_write(&cpu_hotplug_lock);
d221938c
GS
236}
237
fc8dffd3 238void lockdep_assert_cpus_held(void)
d221938c 239{
fc8dffd3 240 percpu_rwsem_assert_held(&cpu_hotplug_lock);
d221938c 241}
79a6cdeb 242
16e53dbf
SB
243/*
244 * Wait for currently running CPU hotplug operations to complete (if any) and
245 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
246 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
247 * hotplug path before performing hotplug operations. So acquiring that lock
248 * guarantees mutual exclusion from any currently running hotplug operations.
249 */
250void cpu_hotplug_disable(void)
251{
252 cpu_maps_update_begin();
89af7ba5 253 cpu_hotplug_disabled++;
16e53dbf
SB
254 cpu_maps_update_done();
255}
32145c46 256EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf 257
01b41159
LW
258static void __cpu_hotplug_enable(void)
259{
260 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
261 return;
262 cpu_hotplug_disabled--;
263}
264
16e53dbf
SB
265void cpu_hotplug_enable(void)
266{
267 cpu_maps_update_begin();
01b41159 268 __cpu_hotplug_enable();
16e53dbf
SB
269 cpu_maps_update_done();
270}
32145c46 271EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 272#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 273
9cd4f1a4
TG
274static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
275
8df3e07e
TG
276static int bringup_wait_for_ap(unsigned int cpu)
277{
278 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
279
9cd4f1a4 280 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
8df3e07e 281 wait_for_completion(&st->done);
dea1d0f5
TG
282 if (WARN_ON_ONCE((!cpu_online(cpu))))
283 return -ECANCELED;
9cd4f1a4
TG
284
285 /* Unpark the stopper thread and the hotplug thread of the target cpu */
286 stop_machine_unpark(cpu);
287 kthread_unpark(st->thread);
288
289 /* Should we go further up ? */
290 if (st->target > CPUHP_AP_ONLINE_IDLE) {
291 __cpuhp_kick_ap_work(st);
292 wait_for_completion(&st->done);
293 }
8df3e07e
TG
294 return st->result;
295}
296
ba997462
TG
297static int bringup_cpu(unsigned int cpu)
298{
299 struct task_struct *idle = idle_thread_get(cpu);
300 int ret;
301
aa877175
BO
302 /*
303 * Some architectures have to walk the irq descriptors to
304 * setup the vector space for the cpu which comes online.
305 * Prevent irq alloc/free across the bringup.
306 */
307 irq_lock_sparse();
308
ba997462
TG
309 /* Arch-specific enabling code. */
310 ret = __cpu_up(cpu, idle);
aa877175 311 irq_unlock_sparse();
530e9b76 312 if (ret)
ba997462 313 return ret;
9cd4f1a4 314 return bringup_wait_for_ap(cpu);
ba997462
TG
315}
316
2e1a3483
TG
317/*
318 * Hotplug state machine related functions
319 */
a724632c 320static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483
TG
321{
322 for (st->state++; st->state < st->target; st->state++) {
a724632c 323 struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483
TG
324
325 if (!step->skip_onerr)
cf392d10 326 cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483
TG
327 }
328}
329
330static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632c 331 enum cpuhp_state target)
2e1a3483
TG
332{
333 enum cpuhp_state prev_state = st->state;
334 int ret = 0;
335
336 for (; st->state > target; st->state--) {
cf392d10 337 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483
TG
338 if (ret) {
339 st->target = prev_state;
a724632c 340 undo_cpu_down(cpu, st);
2e1a3483
TG
341 break;
342 }
343 }
344 return ret;
345}
346
a724632c 347static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483
TG
348{
349 for (st->state--; st->state > st->target; st->state--) {
a724632c 350 struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483
TG
351
352 if (!step->skip_onerr)
cf392d10 353 cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483
TG
354 }
355}
356
357static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632c 358 enum cpuhp_state target)
2e1a3483
TG
359{
360 enum cpuhp_state prev_state = st->state;
361 int ret = 0;
362
363 while (st->state < target) {
2e1a3483 364 st->state++;
cf392d10 365 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483
TG
366 if (ret) {
367 st->target = prev_state;
a724632c 368 undo_cpu_up(cpu, st);
2e1a3483
TG
369 break;
370 }
371 }
372 return ret;
373}
374
4cb28ced
TG
375/*
376 * The cpu hotplug threads manage the bringup and teardown of the cpus
377 */
378static void cpuhp_create(unsigned int cpu)
379{
380 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
381
382 init_completion(&st->done);
383}
384
385static int cpuhp_should_run(unsigned int cpu)
386{
387 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
388
389 return st->should_run;
390}
391
392/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
393static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
394{
1cf4f629 395 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
4cb28ced 396
a724632c 397 return cpuhp_down_callbacks(cpu, st, target);
4cb28ced
TG
398}
399
400/* Execute the online startup callbacks. Used to be CPU_ONLINE */
401static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
402{
a724632c 403 return cpuhp_up_callbacks(cpu, st, st->target);
4cb28ced
TG
404}
405
406/*
407 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
408 * callbacks when a state gets [un]installed at runtime.
409 */
410static void cpuhp_thread_fun(unsigned int cpu)
411{
412 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
413 int ret = 0;
414
415 /*
416 * Paired with the mb() in cpuhp_kick_ap_work and
417 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
418 */
419 smp_mb();
420 if (!st->should_run)
421 return;
422
423 st->should_run = false;
424
49dfe2a6 425 lock_map_acquire(&cpuhp_state_lock_map);
4cb28ced 426 /* Single callback invocation for [un]install ? */
a724632c 427 if (st->single) {
4cb28ced
TG
428 if (st->cb_state < CPUHP_AP_ONLINE) {
429 local_irq_disable();
a724632c 430 ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10 431 st->bringup, st->node);
4cb28ced
TG
432 local_irq_enable();
433 } else {
a724632c 434 ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10 435 st->bringup, st->node);
4cb28ced 436 }
3b9d6da6
SAS
437 } else if (st->rollback) {
438 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
439
a724632c 440 undo_cpu_down(cpu, st);
3b9d6da6 441 st->rollback = false;
4cb28ced 442 } else {
1cf4f629 443 /* Cannot happen .... */
8df3e07e 444 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1cf4f629 445
4cb28ced
TG
446 /* Regular hotplug work */
447 if (st->state < st->target)
448 ret = cpuhp_ap_online(cpu, st);
449 else if (st->state > st->target)
450 ret = cpuhp_ap_offline(cpu, st);
451 }
49dfe2a6 452 lock_map_release(&cpuhp_state_lock_map);
4cb28ced
TG
453 st->result = ret;
454 complete(&st->done);
455}
456
457/* Invoke a single callback on a remote cpu */
a724632c 458static int
cf392d10
TG
459cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
460 struct hlist_node *node)
4cb28ced
TG
461{
462 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
463
464 if (!cpu_online(cpu))
465 return 0;
466
49dfe2a6
TG
467 lock_map_acquire(&cpuhp_state_lock_map);
468 lock_map_release(&cpuhp_state_lock_map);
469
6a4e2451
TG
470 /*
471 * If we are up and running, use the hotplug thread. For early calls
472 * we invoke the thread function directly.
473 */
474 if (!st->thread)
cf392d10 475 return cpuhp_invoke_callback(cpu, state, bringup, node);
6a4e2451 476
4cb28ced 477 st->cb_state = state;
a724632c
TG
478 st->single = true;
479 st->bringup = bringup;
cf392d10 480 st->node = node;
a724632c 481
4cb28ced
TG
482 /*
483 * Make sure the above stores are visible before should_run becomes
484 * true. Paired with the mb() above in cpuhp_thread_fun()
485 */
486 smp_mb();
487 st->should_run = true;
488 wake_up_process(st->thread);
489 wait_for_completion(&st->done);
490 return st->result;
491}
492
493/* Regular hotplug invocation of the AP hotplug thread */
1cf4f629 494static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
4cb28ced 495{
4cb28ced 496 st->result = 0;
a724632c 497 st->single = false;
4cb28ced
TG
498 /*
499 * Make sure the above stores are visible before should_run becomes
500 * true. Paired with the mb() above in cpuhp_thread_fun()
501 */
502 smp_mb();
503 st->should_run = true;
504 wake_up_process(st->thread);
1cf4f629
TG
505}
506
507static int cpuhp_kick_ap_work(unsigned int cpu)
508{
509 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
510 enum cpuhp_state state = st->state;
511
512 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
49dfe2a6
TG
513 lock_map_acquire(&cpuhp_state_lock_map);
514 lock_map_release(&cpuhp_state_lock_map);
1cf4f629 515 __cpuhp_kick_ap_work(st);
4cb28ced
TG
516 wait_for_completion(&st->done);
517 trace_cpuhp_exit(cpu, st->state, state, st->result);
518 return st->result;
519}
520
521static struct smp_hotplug_thread cpuhp_threads = {
522 .store = &cpuhp_state.thread,
523 .create = &cpuhp_create,
524 .thread_should_run = cpuhp_should_run,
525 .thread_fn = cpuhp_thread_fun,
526 .thread_comm = "cpuhp/%u",
527 .selfparking = true,
528};
529
530void __init cpuhp_threads_init(void)
531{
532 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
533 kthread_unpark(this_cpu_read(cpuhp_state.thread));
534}
535
777c6e0d 536#ifdef CONFIG_HOTPLUG_CPU
e4cc2f87
AV
537/**
538 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
539 * @cpu: a CPU id
540 *
541 * This function walks all processes, finds a valid mm struct for each one and
542 * then clears a corresponding bit in mm's cpumask. While this all sounds
543 * trivial, there are various non-obvious corner cases, which this function
544 * tries to solve in a safe manner.
545 *
546 * Also note that the function uses a somewhat relaxed locking scheme, so it may
547 * be called only for an already offlined CPU.
548 */
cb79295e
AV
549void clear_tasks_mm_cpumask(int cpu)
550{
551 struct task_struct *p;
552
553 /*
554 * This function is called after the cpu is taken down and marked
555 * offline, so its not like new tasks will ever get this cpu set in
556 * their mm mask. -- Peter Zijlstra
557 * Thus, we may use rcu_read_lock() here, instead of grabbing
558 * full-fledged tasklist_lock.
559 */
e4cc2f87 560 WARN_ON(cpu_online(cpu));
cb79295e
AV
561 rcu_read_lock();
562 for_each_process(p) {
563 struct task_struct *t;
564
e4cc2f87
AV
565 /*
566 * Main thread might exit, but other threads may still have
567 * a valid mm. Find one.
568 */
cb79295e
AV
569 t = find_lock_task_mm(p);
570 if (!t)
571 continue;
572 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
573 task_unlock(t);
574 }
575 rcu_read_unlock();
576}
577
1da177e4 578/* Take this CPU down. */
71cf5aee 579static int take_cpu_down(void *_param)
1da177e4 580{
4baa0afc
TG
581 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
582 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 583 int err, cpu = smp_processor_id();
1da177e4 584
1da177e4
LT
585 /* Ensure this CPU doesn't handle any more interrupts. */
586 err = __cpu_disable();
587 if (err < 0)
f3705136 588 return err;
1da177e4 589
a724632c
TG
590 /*
591 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
592 * do this step again.
593 */
594 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
595 st->state--;
4baa0afc 596 /* Invoke the former CPU_DYING callbacks */
a724632c 597 for (; st->state > target; st->state--)
cf392d10 598 cpuhp_invoke_callback(cpu, st->state, false, NULL);
4baa0afc 599
52c063d1
TG
600 /* Give up timekeeping duties */
601 tick_handover_do_timer();
14e568e7 602 /* Park the stopper thread */
090e77c3 603 stop_machine_park(cpu);
f3705136 604 return 0;
1da177e4
LT
605}
606
98458172 607static int takedown_cpu(unsigned int cpu)
1da177e4 608{
e69aab13 609 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
98458172 610 int err;
1da177e4 611
2a58c527 612 /* Park the smpboot threads */
1cf4f629 613 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
2a58c527 614 smpboot_park_threads(cpu);
1cf4f629 615
6acce3ef 616 /*
a8994181
TG
617 * Prevent irq alloc/free while the dying cpu reorganizes the
618 * interrupt affinities.
6acce3ef 619 */
a8994181 620 irq_lock_sparse();
6acce3ef 621
a8994181
TG
622 /*
623 * So now all preempt/rcu users must observe !cpu_active().
624 */
210e2133 625 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
04321587 626 if (err) {
3b9d6da6 627 /* CPU refused to die */
a8994181 628 irq_unlock_sparse();
3b9d6da6
SAS
629 /* Unpark the hotplug thread so we can rollback there */
630 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
98458172 631 return err;
8fa1d7d3 632 }
04321587 633 BUG_ON(cpu_online(cpu));
1da177e4 634
48c5ccae 635 /*
ee1e714b 636 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
48c5ccae
PZ
637 * runnable tasks from the cpu, there's only the idle task left now
638 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
639 *
640 * Wait for the stop thread to go away.
48c5ccae 641 */
e69aab13
TG
642 wait_for_completion(&st->done);
643 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4 644
a8994181
TG
645 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
646 irq_unlock_sparse();
647
345527b1 648 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
649 /* This actually kills the CPU. */
650 __cpu_die(cpu);
651
a49b116d 652 tick_cleanup_dead_cpu(cpu);
98458172
TG
653 return 0;
654}
1da177e4 655
71f87b2f
TG
656static void cpuhp_complete_idle_dead(void *arg)
657{
658 struct cpuhp_cpu_state *st = arg;
659
660 complete(&st->done);
661}
662
e69aab13
TG
663void cpuhp_report_idle_dead(void)
664{
665 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
666
667 BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7e 668 rcu_report_dead(smp_processor_id());
71f87b2f
TG
669 st->state = CPUHP_AP_IDLE_DEAD;
670 /*
671 * We cannot call complete after rcu_report_dead() so we delegate it
672 * to an online cpu.
673 */
674 smp_call_function_single(cpumask_first(cpu_online_mask),
675 cpuhp_complete_idle_dead, st, 0);
e69aab13
TG
676}
677
cff7d378 678#else
cff7d378 679#define takedown_cpu NULL
cff7d378
TG
680#endif
681
682#ifdef CONFIG_HOTPLUG_CPU
cff7d378 683
98458172 684/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
685static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
686 enum cpuhp_state target)
98458172 687{
cff7d378
TG
688 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
689 int prev_state, ret = 0;
98458172
TG
690
691 if (num_online_cpus() == 1)
692 return -EBUSY;
693
757c989b 694 if (!cpu_present(cpu))
98458172
TG
695 return -EINVAL;
696
8f553c49 697 cpus_write_lock();
98458172
TG
698
699 cpuhp_tasks_frozen = tasks_frozen;
700
cff7d378 701 prev_state = st->state;
af1f4045 702 st->target = target;
1cf4f629
TG
703 /*
704 * If the current CPU state is in the range of the AP hotplug thread,
705 * then we need to kick the thread.
706 */
8df3e07e 707 if (st->state > CPUHP_TEARDOWN_CPU) {
1cf4f629
TG
708 ret = cpuhp_kick_ap_work(cpu);
709 /*
710 * The AP side has done the error rollback already. Just
711 * return the error code..
712 */
713 if (ret)
714 goto out;
715
716 /*
717 * We might have stopped still in the range of the AP hotplug
718 * thread. Nothing to do anymore.
719 */
8df3e07e 720 if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629
TG
721 goto out;
722 }
723 /*
8df3e07e 724 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629
TG
725 * to do the further cleanups.
726 */
a724632c 727 ret = cpuhp_down_callbacks(cpu, st, target);
3b9d6da6
SAS
728 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
729 st->target = prev_state;
730 st->rollback = true;
731 cpuhp_kick_ap_work(cpu);
732 }
98458172 733
1cf4f629 734out:
8f553c49 735 cpus_write_unlock();
cff7d378 736 return ret;
e3920fb4
RW
737}
738
af1f4045 739static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 740{
9ea09af3 741 int err;
e3920fb4 742
d221938c 743 cpu_maps_update_begin();
e761b772
MK
744
745 if (cpu_hotplug_disabled) {
e3920fb4 746 err = -EBUSY;
e761b772
MK
747 goto out;
748 }
749
af1f4045 750 err = _cpu_down(cpu, 0, target);
e3920fb4 751
e761b772 752out:
d221938c 753 cpu_maps_update_done();
1da177e4
LT
754 return err;
755}
af1f4045
TG
756int cpu_down(unsigned int cpu)
757{
758 return do_cpu_down(cpu, CPUHP_OFFLINE);
759}
b62b8ef9 760EXPORT_SYMBOL(cpu_down);
1da177e4
LT
761#endif /*CONFIG_HOTPLUG_CPU*/
762
4baa0afc 763/**
ee1e714b 764 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc
TG
765 * @cpu: cpu that just started
766 *
4baa0afc
TG
767 * It must be called by the arch code on the new cpu, before the new cpu
768 * enables interrupts and before the "boot" cpu returns from __cpu_up().
769 */
770void notify_cpu_starting(unsigned int cpu)
771{
772 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
773 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
774
0c6d4576 775 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
4baa0afc 776 while (st->state < target) {
4baa0afc 777 st->state++;
cf392d10 778 cpuhp_invoke_callback(cpu, st->state, true, NULL);
4baa0afc
TG
779 }
780}
781
949338e3 782/*
9cd4f1a4
TG
783 * Called from the idle task. Wake up the controlling task which brings the
784 * stopper and the hotplug thread of the upcoming CPU up and then delegates
785 * the rest of the online bringup to the hotplug thread.
949338e3 786 */
8df3e07e 787void cpuhp_online_idle(enum cpuhp_state state)
949338e3 788{
8df3e07e 789 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
8df3e07e
TG
790
791 /* Happens for the boot cpu */
792 if (state != CPUHP_AP_ONLINE_IDLE)
793 return;
794
795 st->state = CPUHP_AP_ONLINE_IDLE;
9cd4f1a4 796 complete(&st->done);
949338e3
TG
797}
798
e3920fb4 799/* Requires cpu_add_remove_lock to be held */
af1f4045 800static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 801{
cff7d378 802 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 803 struct task_struct *idle;
2e1a3483 804 int ret = 0;
1da177e4 805
8f553c49 806 cpus_write_lock();
38498a67 807
757c989b 808 if (!cpu_present(cpu)) {
5e5041f3
YI
809 ret = -EINVAL;
810 goto out;
811 }
812
757c989b
TG
813 /*
814 * The caller of do_cpu_up might have raced with another
815 * caller. Ignore it for now.
816 */
817 if (st->state >= target)
38498a67 818 goto out;
757c989b
TG
819
820 if (st->state == CPUHP_OFFLINE) {
821 /* Let it fail before we try to bring the cpu up */
822 idle = idle_thread_get(cpu);
823 if (IS_ERR(idle)) {
824 ret = PTR_ERR(idle);
825 goto out;
826 }
3bb5d2ee 827 }
38498a67 828
ba997462
TG
829 cpuhp_tasks_frozen = tasks_frozen;
830
af1f4045 831 st->target = target;
1cf4f629
TG
832 /*
833 * If the current CPU state is in the range of the AP hotplug thread,
834 * then we need to kick the thread once more.
835 */
8df3e07e 836 if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629
TG
837 ret = cpuhp_kick_ap_work(cpu);
838 /*
839 * The AP side has done the error rollback already. Just
840 * return the error code..
841 */
842 if (ret)
843 goto out;
844 }
845
846 /*
847 * Try to reach the target state. We max out on the BP at
8df3e07e 848 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629
TG
849 * responsible for bringing it up to the target state.
850 */
8df3e07e 851 target = min((int)target, CPUHP_BRINGUP_CPU);
a724632c 852 ret = cpuhp_up_callbacks(cpu, st, target);
38498a67 853out:
8f553c49 854 cpus_write_unlock();
e3920fb4
RW
855 return ret;
856}
857
af1f4045 858static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
859{
860 int err = 0;
cf23422b 861
e0b582ec 862 if (!cpu_possible(cpu)) {
84117da5
FF
863 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
864 cpu);
87d5e023 865#if defined(CONFIG_IA64)
84117da5 866 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
867#endif
868 return -EINVAL;
869 }
e3920fb4 870
01b0f197
TK
871 err = try_online_node(cpu_to_node(cpu));
872 if (err)
873 return err;
cf23422b 874
d221938c 875 cpu_maps_update_begin();
e761b772
MK
876
877 if (cpu_hotplug_disabled) {
e3920fb4 878 err = -EBUSY;
e761b772
MK
879 goto out;
880 }
881
af1f4045 882 err = _cpu_up(cpu, 0, target);
e761b772 883out:
d221938c 884 cpu_maps_update_done();
e3920fb4
RW
885 return err;
886}
af1f4045
TG
887
888int cpu_up(unsigned int cpu)
889{
890 return do_cpu_up(cpu, CPUHP_ONLINE);
891}
a513f6ba 892EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 893
f3de4be9 894#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 895static cpumask_var_t frozen_cpus;
e3920fb4 896
d391e552 897int freeze_secondary_cpus(int primary)
e3920fb4 898{
d391e552 899 int cpu, error = 0;
e3920fb4 900
d221938c 901 cpu_maps_update_begin();
d391e552
JM
902 if (!cpu_online(primary))
903 primary = cpumask_first(cpu_online_mask);
9ee349ad
XF
904 /*
905 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
906 * with the userspace trying to use the CPU hotplug at the same time
907 */
e0b582ec 908 cpumask_clear(frozen_cpus);
6ad4c188 909
84117da5 910 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4 911 for_each_online_cpu(cpu) {
d391e552 912 if (cpu == primary)
e3920fb4 913 continue;
bb3632c6 914 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 915 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 916 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 917 if (!error)
e0b582ec 918 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 919 else {
84117da5 920 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
921 break;
922 }
923 }
86886e55 924
89af7ba5 925 if (!error)
e3920fb4 926 BUG_ON(num_online_cpus() > 1);
89af7ba5 927 else
84117da5 928 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
929
930 /*
931 * Make sure the CPUs won't be enabled by someone else. We need to do
932 * this even in case of failure as all disable_nonboot_cpus() users are
933 * supposed to do enable_nonboot_cpus() on the failure path.
934 */
935 cpu_hotplug_disabled++;
936
d221938c 937 cpu_maps_update_done();
e3920fb4
RW
938 return error;
939}
940
d0af9eed
SS
941void __weak arch_enable_nonboot_cpus_begin(void)
942{
943}
944
945void __weak arch_enable_nonboot_cpus_end(void)
946{
947}
948
71cf5aee 949void enable_nonboot_cpus(void)
e3920fb4
RW
950{
951 int cpu, error;
952
953 /* Allow everyone to use the CPU hotplug again */
d221938c 954 cpu_maps_update_begin();
01b41159 955 __cpu_hotplug_enable();
e0b582ec 956 if (cpumask_empty(frozen_cpus))
1d64b9cb 957 goto out;
e3920fb4 958
84117da5 959 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
960
961 arch_enable_nonboot_cpus_begin();
962
e0b582ec 963 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 964 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 965 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 966 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 967 if (!error) {
84117da5 968 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
969 continue;
970 }
84117da5 971 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 972 }
d0af9eed
SS
973
974 arch_enable_nonboot_cpus_end();
975
e0b582ec 976 cpumask_clear(frozen_cpus);
1d64b9cb 977out:
d221938c 978 cpu_maps_update_done();
1da177e4 979}
e0b582ec 980
d7268a31 981static int __init alloc_frozen_cpus(void)
e0b582ec
RR
982{
983 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
984 return -ENOMEM;
985 return 0;
986}
987core_initcall(alloc_frozen_cpus);
79cfbdfa 988
79cfbdfa
SB
989/*
990 * When callbacks for CPU hotplug notifications are being executed, we must
991 * ensure that the state of the system with respect to the tasks being frozen
992 * or not, as reported by the notification, remains unchanged *throughout the
993 * duration* of the execution of the callbacks.
994 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
995 *
996 * This synchronization is implemented by mutually excluding regular CPU
997 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
998 * Hibernate notifications.
999 */
1000static int
1001cpu_hotplug_pm_callback(struct notifier_block *nb,
1002 unsigned long action, void *ptr)
1003{
1004 switch (action) {
1005
1006 case PM_SUSPEND_PREPARE:
1007 case PM_HIBERNATION_PREPARE:
16e53dbf 1008 cpu_hotplug_disable();
79cfbdfa
SB
1009 break;
1010
1011 case PM_POST_SUSPEND:
1012 case PM_POST_HIBERNATION:
16e53dbf 1013 cpu_hotplug_enable();
79cfbdfa
SB
1014 break;
1015
1016 default:
1017 return NOTIFY_DONE;
1018 }
1019
1020 return NOTIFY_OK;
1021}
1022
1023
d7268a31 1024static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 1025{
6e32d479
FY
1026 /*
1027 * cpu_hotplug_pm_callback has higher priority than x86
1028 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1029 * to disable cpu hotplug to avoid cpu hotplug race.
1030 */
79cfbdfa
SB
1031 pm_notifier(cpu_hotplug_pm_callback, 0);
1032 return 0;
1033}
1034core_initcall(cpu_hotplug_pm_sync_init);
1035
f3de4be9 1036#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 1037
8ce371f9
PZ
1038int __boot_cpu_id;
1039
68f4f1ec 1040#endif /* CONFIG_SMP */
b8d317d1 1041
cff7d378
TG
1042/* Boot processor state steps */
1043static struct cpuhp_step cpuhp_bp_states[] = {
1044 [CPUHP_OFFLINE] = {
1045 .name = "offline",
3c1627e9
TG
1046 .startup.single = NULL,
1047 .teardown.single = NULL,
cff7d378
TG
1048 },
1049#ifdef CONFIG_SMP
1050 [CPUHP_CREATE_THREADS]= {
677f6646 1051 .name = "threads:prepare",
3c1627e9
TG
1052 .startup.single = smpboot_create_threads,
1053 .teardown.single = NULL,
757c989b 1054 .cant_stop = true,
cff7d378 1055 },
00e16c3d 1056 [CPUHP_PERF_PREPARE] = {
3c1627e9
TG
1057 .name = "perf:prepare",
1058 .startup.single = perf_event_init_cpu,
1059 .teardown.single = perf_event_exit_cpu,
00e16c3d 1060 },
7ee681b2 1061 [CPUHP_WORKQUEUE_PREP] = {
3c1627e9
TG
1062 .name = "workqueue:prepare",
1063 .startup.single = workqueue_prepare_cpu,
1064 .teardown.single = NULL,
7ee681b2 1065 },
27590dc1 1066 [CPUHP_HRTIMERS_PREPARE] = {
3c1627e9
TG
1067 .name = "hrtimers:prepare",
1068 .startup.single = hrtimers_prepare_cpu,
1069 .teardown.single = hrtimers_dead_cpu,
27590dc1 1070 },
31487f83 1071 [CPUHP_SMPCFD_PREPARE] = {
677f6646 1072 .name = "smpcfd:prepare",
3c1627e9
TG
1073 .startup.single = smpcfd_prepare_cpu,
1074 .teardown.single = smpcfd_dead_cpu,
31487f83 1075 },
e6d4989a
RW
1076 [CPUHP_RELAY_PREPARE] = {
1077 .name = "relay:prepare",
1078 .startup.single = relay_prepare_cpu,
1079 .teardown.single = NULL,
1080 },
6731d4f1
SAS
1081 [CPUHP_SLAB_PREPARE] = {
1082 .name = "slab:prepare",
1083 .startup.single = slab_prepare_cpu,
1084 .teardown.single = slab_dead_cpu,
31487f83 1085 },
4df83742 1086 [CPUHP_RCUTREE_PREP] = {
677f6646 1087 .name = "RCU/tree:prepare",
3c1627e9
TG
1088 .startup.single = rcutree_prepare_cpu,
1089 .teardown.single = rcutree_dead_cpu,
4df83742 1090 },
4fae16df
RC
1091 /*
1092 * On the tear-down path, timers_dead_cpu() must be invoked
1093 * before blk_mq_queue_reinit_notify() from notify_dead(),
1094 * otherwise a RCU stall occurs.
1095 */
1096 [CPUHP_TIMERS_DEAD] = {
3c1627e9
TG
1097 .name = "timers:dead",
1098 .startup.single = NULL,
1099 .teardown.single = timers_dead_cpu,
4fae16df 1100 },
d10ef6f9 1101 /* Kicks the plugged cpu into life */
cff7d378
TG
1102 [CPUHP_BRINGUP_CPU] = {
1103 .name = "cpu:bringup",
3c1627e9
TG
1104 .startup.single = bringup_cpu,
1105 .teardown.single = NULL,
757c989b 1106 .cant_stop = true,
4baa0afc 1107 },
31487f83 1108 [CPUHP_AP_SMPCFD_DYING] = {
677f6646 1109 .name = "smpcfd:dying",
3c1627e9
TG
1110 .startup.single = NULL,
1111 .teardown.single = smpcfd_dying_cpu,
31487f83 1112 },
d10ef6f9
TG
1113 /*
1114 * Handled on controll processor until the plugged processor manages
1115 * this itself.
1116 */
4baa0afc
TG
1117 [CPUHP_TEARDOWN_CPU] = {
1118 .name = "cpu:teardown",
3c1627e9
TG
1119 .startup.single = NULL,
1120 .teardown.single = takedown_cpu,
757c989b 1121 .cant_stop = true,
cff7d378 1122 },
a7c73414
TG
1123#else
1124 [CPUHP_BRINGUP_CPU] = { },
cff7d378 1125#endif
cff7d378
TG
1126};
1127
4baa0afc
TG
1128/* Application processor state steps */
1129static struct cpuhp_step cpuhp_ap_states[] = {
1130#ifdef CONFIG_SMP
d10ef6f9
TG
1131 /* Final state before CPU kills itself */
1132 [CPUHP_AP_IDLE_DEAD] = {
1133 .name = "idle:dead",
1134 },
1135 /*
1136 * Last state before CPU enters the idle loop to die. Transient state
1137 * for synchronization.
1138 */
1139 [CPUHP_AP_OFFLINE] = {
1140 .name = "ap:offline",
1141 .cant_stop = true,
1142 },
9cf7243d
TG
1143 /* First state is scheduler control. Interrupts are disabled */
1144 [CPUHP_AP_SCHED_STARTING] = {
1145 .name = "sched:starting",
3c1627e9
TG
1146 .startup.single = sched_cpu_starting,
1147 .teardown.single = sched_cpu_dying,
9cf7243d 1148 },
4df83742 1149 [CPUHP_AP_RCUTREE_DYING] = {
677f6646 1150 .name = "RCU/tree:dying",
3c1627e9
TG
1151 .startup.single = NULL,
1152 .teardown.single = rcutree_dying_cpu,
4baa0afc 1153 },
d10ef6f9
TG
1154 /* Entry state on starting. Interrupts enabled from here on. Transient
1155 * state for synchronsization */
1156 [CPUHP_AP_ONLINE] = {
1157 .name = "ap:online",
1158 },
1159 /* Handle smpboot threads park/unpark */
1cf4f629 1160 [CPUHP_AP_SMPBOOT_THREADS] = {
677f6646 1161 .name = "smpboot/threads:online",
3c1627e9
TG
1162 .startup.single = smpboot_unpark_threads,
1163 .teardown.single = NULL,
1cf4f629 1164 },
c5cb83bb
TG
1165 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1166 .name = "irq/affinity:online",
1167 .startup.single = irq_affinity_online_cpu,
1168 .teardown.single = NULL,
1169 },
00e16c3d 1170 [CPUHP_AP_PERF_ONLINE] = {
3c1627e9
TG
1171 .name = "perf:online",
1172 .startup.single = perf_event_init_cpu,
1173 .teardown.single = perf_event_exit_cpu,
00e16c3d 1174 },
7ee681b2 1175 [CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e9
TG
1176 .name = "workqueue:online",
1177 .startup.single = workqueue_online_cpu,
1178 .teardown.single = workqueue_offline_cpu,
7ee681b2 1179 },
4df83742 1180 [CPUHP_AP_RCUTREE_ONLINE] = {
677f6646 1181 .name = "RCU/tree:online",
3c1627e9
TG
1182 .startup.single = rcutree_online_cpu,
1183 .teardown.single = rcutree_offline_cpu,
4df83742 1184 },
4baa0afc 1185#endif
d10ef6f9
TG
1186 /*
1187 * The dynamically registered state space is here
1188 */
1189
aaddd7d1
TG
1190#ifdef CONFIG_SMP
1191 /* Last state is scheduler control setting the cpu active */
1192 [CPUHP_AP_ACTIVE] = {
1193 .name = "sched:active",
3c1627e9
TG
1194 .startup.single = sched_cpu_activate,
1195 .teardown.single = sched_cpu_deactivate,
aaddd7d1
TG
1196 },
1197#endif
1198
d10ef6f9 1199 /* CPU is fully up and running. */
4baa0afc
TG
1200 [CPUHP_ONLINE] = {
1201 .name = "online",
3c1627e9
TG
1202 .startup.single = NULL,
1203 .teardown.single = NULL,
4baa0afc
TG
1204 },
1205};
1206
5b7aa87e
TG
1207/* Sanity check for callbacks */
1208static int cpuhp_cb_check(enum cpuhp_state state)
1209{
1210 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1211 return -EINVAL;
1212 return 0;
1213}
1214
dc280d93
TG
1215/*
1216 * Returns a free for dynamic slot assignment of the Online state. The states
1217 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1218 * by having no name assigned.
1219 */
1220static int cpuhp_reserve_state(enum cpuhp_state state)
1221{
4205e478
TG
1222 enum cpuhp_state i, end;
1223 struct cpuhp_step *step;
dc280d93 1224
4205e478
TG
1225 switch (state) {
1226 case CPUHP_AP_ONLINE_DYN:
1227 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1228 end = CPUHP_AP_ONLINE_DYN_END;
1229 break;
1230 case CPUHP_BP_PREPARE_DYN:
1231 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1232 end = CPUHP_BP_PREPARE_DYN_END;
1233 break;
1234 default:
1235 return -EINVAL;
1236 }
1237
1238 for (i = state; i <= end; i++, step++) {
1239 if (!step->name)
dc280d93
TG
1240 return i;
1241 }
1242 WARN(1, "No more dynamic states available for CPU hotplug\n");
1243 return -ENOSPC;
1244}
1245
1246static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1247 int (*startup)(unsigned int cpu),
1248 int (*teardown)(unsigned int cpu),
1249 bool multi_instance)
5b7aa87e
TG
1250{
1251 /* (Un)Install the callbacks for further cpu hotplug operations */
1252 struct cpuhp_step *sp;
dc280d93 1253 int ret = 0;
5b7aa87e 1254
4205e478 1255 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
dc280d93
TG
1256 ret = cpuhp_reserve_state(state);
1257 if (ret < 0)
dc434e05 1258 return ret;
dc280d93
TG
1259 state = ret;
1260 }
5b7aa87e 1261 sp = cpuhp_get_step(state);
dc434e05
SAS
1262 if (name && sp->name)
1263 return -EBUSY;
1264
3c1627e9
TG
1265 sp->startup.single = startup;
1266 sp->teardown.single = teardown;
5b7aa87e 1267 sp->name = name;
cf392d10
TG
1268 sp->multi_instance = multi_instance;
1269 INIT_HLIST_HEAD(&sp->list);
dc280d93 1270 return ret;
5b7aa87e
TG
1271}
1272
1273static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1274{
3c1627e9 1275 return cpuhp_get_step(state)->teardown.single;
5b7aa87e
TG
1276}
1277
5b7aa87e
TG
1278/*
1279 * Call the startup/teardown function for a step either on the AP or
1280 * on the current CPU.
1281 */
cf392d10
TG
1282static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1283 struct hlist_node *node)
5b7aa87e 1284{
a724632c 1285 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1286 int ret;
1287
3c1627e9
TG
1288 if ((bringup && !sp->startup.single) ||
1289 (!bringup && !sp->teardown.single))
5b7aa87e 1290 return 0;
5b7aa87e
TG
1291 /*
1292 * The non AP bound callbacks can fail on bringup. On teardown
1293 * e.g. module removal we crash for now.
1294 */
1cf4f629
TG
1295#ifdef CONFIG_SMP
1296 if (cpuhp_is_ap_state(state))
cf392d10 1297 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629 1298 else
cf392d10 1299 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629 1300#else
cf392d10 1301 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629 1302#endif
5b7aa87e
TG
1303 BUG_ON(ret && !bringup);
1304 return ret;
1305}
1306
1307/*
1308 * Called from __cpuhp_setup_state on a recoverable failure.
1309 *
1310 * Note: The teardown callbacks for rollback are not allowed to fail!
1311 */
1312static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
cf392d10 1313 struct hlist_node *node)
5b7aa87e
TG
1314{
1315 int cpu;
1316
5b7aa87e
TG
1317 /* Roll back the already executed steps on the other cpus */
1318 for_each_present_cpu(cpu) {
1319 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1320 int cpustate = st->state;
1321
1322 if (cpu >= failedcpu)
1323 break;
1324
1325 /* Did we invoke the startup call on that cpu ? */
1326 if (cpustate >= state)
cf392d10 1327 cpuhp_issue_call(cpu, state, false, node);
5b7aa87e
TG
1328 }
1329}
1330
9805c673
TG
1331int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1332 struct hlist_node *node,
1333 bool invoke)
cf392d10
TG
1334{
1335 struct cpuhp_step *sp;
1336 int cpu;
1337 int ret;
1338
9805c673
TG
1339 lockdep_assert_cpus_held();
1340
cf392d10
TG
1341 sp = cpuhp_get_step(state);
1342 if (sp->multi_instance == false)
1343 return -EINVAL;
1344
dc434e05 1345 mutex_lock(&cpuhp_state_mutex);
cf392d10 1346
3c1627e9 1347 if (!invoke || !sp->startup.multi)
cf392d10
TG
1348 goto add_node;
1349
1350 /*
1351 * Try to call the startup callback for each present cpu
1352 * depending on the hotplug state of the cpu.
1353 */
1354 for_each_present_cpu(cpu) {
1355 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1356 int cpustate = st->state;
1357
1358 if (cpustate < state)
1359 continue;
1360
1361 ret = cpuhp_issue_call(cpu, state, true, node);
1362 if (ret) {
3c1627e9 1363 if (sp->teardown.multi)
cf392d10 1364 cpuhp_rollback_install(cpu, state, node);
dc434e05 1365 goto unlock;
cf392d10
TG
1366 }
1367 }
1368add_node:
1369 ret = 0;
cf392d10 1370 hlist_add_head(node, &sp->list);
dc434e05 1371unlock:
cf392d10 1372 mutex_unlock(&cpuhp_state_mutex);
9805c673
TG
1373 return ret;
1374}
1375
1376int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1377 bool invoke)
1378{
1379 int ret;
1380
1381 cpus_read_lock();
1382 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
8f553c49 1383 cpus_read_unlock();
cf392d10
TG
1384 return ret;
1385}
1386EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1387
5b7aa87e 1388/**
71def423 1389 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
dc280d93
TG
1390 * @state: The state to setup
1391 * @invoke: If true, the startup function is invoked for cpus where
1392 * cpu state >= @state
1393 * @startup: startup callback function
1394 * @teardown: teardown callback function
1395 * @multi_instance: State is set up for multiple instances which get
1396 * added afterwards.
5b7aa87e 1397 *
71def423 1398 * The caller needs to hold cpus read locked while calling this function.
512f0980
BO
1399 * Returns:
1400 * On success:
1401 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1402 * 0 for all other states
1403 * On failure: proper (negative) error code
5b7aa87e 1404 */
71def423
SAS
1405int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1406 const char *name, bool invoke,
1407 int (*startup)(unsigned int cpu),
1408 int (*teardown)(unsigned int cpu),
1409 bool multi_instance)
5b7aa87e
TG
1410{
1411 int cpu, ret = 0;
b9d9d691 1412 bool dynstate;
5b7aa87e 1413
71def423
SAS
1414 lockdep_assert_cpus_held();
1415
5b7aa87e
TG
1416 if (cpuhp_cb_check(state) || !name)
1417 return -EINVAL;
1418
dc434e05 1419 mutex_lock(&cpuhp_state_mutex);
5b7aa87e 1420
dc280d93
TG
1421 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1422 multi_instance);
5b7aa87e 1423
b9d9d691
TG
1424 dynstate = state == CPUHP_AP_ONLINE_DYN;
1425 if (ret > 0 && dynstate) {
1426 state = ret;
1427 ret = 0;
1428 }
1429
dc280d93 1430 if (ret || !invoke || !startup)
5b7aa87e
TG
1431 goto out;
1432
1433 /*
1434 * Try to call the startup callback for each present cpu
1435 * depending on the hotplug state of the cpu.
1436 */
1437 for_each_present_cpu(cpu) {
1438 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1439 int cpustate = st->state;
1440
1441 if (cpustate < state)
1442 continue;
1443
cf392d10 1444 ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e 1445 if (ret) {
a724632c 1446 if (teardown)
cf392d10
TG
1447 cpuhp_rollback_install(cpu, state, NULL);
1448 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e
TG
1449 goto out;
1450 }
1451 }
1452out:
dc434e05 1453 mutex_unlock(&cpuhp_state_mutex);
dc280d93
TG
1454 /*
1455 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1456 * dynamically allocated state in case of success.
1457 */
b9d9d691 1458 if (!ret && dynstate)
5b7aa87e
TG
1459 return state;
1460 return ret;
1461}
71def423
SAS
1462EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1463
1464int __cpuhp_setup_state(enum cpuhp_state state,
1465 const char *name, bool invoke,
1466 int (*startup)(unsigned int cpu),
1467 int (*teardown)(unsigned int cpu),
1468 bool multi_instance)
1469{
1470 int ret;
1471
1472 cpus_read_lock();
1473 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1474 teardown, multi_instance);
1475 cpus_read_unlock();
1476 return ret;
1477}
5b7aa87e
TG
1478EXPORT_SYMBOL(__cpuhp_setup_state);
1479
cf392d10
TG
1480int __cpuhp_state_remove_instance(enum cpuhp_state state,
1481 struct hlist_node *node, bool invoke)
1482{
1483 struct cpuhp_step *sp = cpuhp_get_step(state);
1484 int cpu;
1485
1486 BUG_ON(cpuhp_cb_check(state));
1487
1488 if (!sp->multi_instance)
1489 return -EINVAL;
1490
8f553c49 1491 cpus_read_lock();
dc434e05
SAS
1492 mutex_lock(&cpuhp_state_mutex);
1493
cf392d10
TG
1494 if (!invoke || !cpuhp_get_teardown_cb(state))
1495 goto remove;
1496 /*
1497 * Call the teardown callback for each present cpu depending
1498 * on the hotplug state of the cpu. This function is not
1499 * allowed to fail currently!
1500 */
1501 for_each_present_cpu(cpu) {
1502 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1503 int cpustate = st->state;
1504
1505 if (cpustate >= state)
1506 cpuhp_issue_call(cpu, state, false, node);
1507 }
1508
1509remove:
cf392d10
TG
1510 hlist_del(node);
1511 mutex_unlock(&cpuhp_state_mutex);
8f553c49 1512 cpus_read_unlock();
cf392d10
TG
1513
1514 return 0;
1515}
1516EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
dc434e05 1517
5b7aa87e 1518/**
71def423 1519 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
5b7aa87e
TG
1520 * @state: The state to remove
1521 * @invoke: If true, the teardown function is invoked for cpus where
1522 * cpu state >= @state
1523 *
71def423 1524 * The caller needs to hold cpus read locked while calling this function.
5b7aa87e
TG
1525 * The teardown callback is currently not allowed to fail. Think
1526 * about module removal!
1527 */
71def423 1528void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
5b7aa87e 1529{
cf392d10 1530 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1531 int cpu;
1532
1533 BUG_ON(cpuhp_cb_check(state));
1534
71def423 1535 lockdep_assert_cpus_held();
5b7aa87e 1536
dc434e05 1537 mutex_lock(&cpuhp_state_mutex);
cf392d10
TG
1538 if (sp->multi_instance) {
1539 WARN(!hlist_empty(&sp->list),
1540 "Error: Removing state %d which has instances left.\n",
1541 state);
1542 goto remove;
1543 }
1544
a724632c 1545 if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e
TG
1546 goto remove;
1547
1548 /*
1549 * Call the teardown callback for each present cpu depending
1550 * on the hotplug state of the cpu. This function is not
1551 * allowed to fail currently!
1552 */
1553 for_each_present_cpu(cpu) {
1554 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1555 int cpustate = st->state;
1556
1557 if (cpustate >= state)
cf392d10 1558 cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e
TG
1559 }
1560remove:
cf392d10 1561 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
dc434e05 1562 mutex_unlock(&cpuhp_state_mutex);
71def423
SAS
1563}
1564EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1565
1566void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1567{
1568 cpus_read_lock();
1569 __cpuhp_remove_state_cpuslocked(state, invoke);
8f553c49 1570 cpus_read_unlock();
5b7aa87e
TG
1571}
1572EXPORT_SYMBOL(__cpuhp_remove_state);
1573
98f8cdce
TG
1574#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1575static ssize_t show_cpuhp_state(struct device *dev,
1576 struct device_attribute *attr, char *buf)
1577{
1578 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1579
1580 return sprintf(buf, "%d\n", st->state);
1581}
1582static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1583
757c989b
TG
1584static ssize_t write_cpuhp_target(struct device *dev,
1585 struct device_attribute *attr,
1586 const char *buf, size_t count)
1587{
1588 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1589 struct cpuhp_step *sp;
1590 int target, ret;
1591
1592 ret = kstrtoint(buf, 10, &target);
1593 if (ret)
1594 return ret;
1595
1596#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1597 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1598 return -EINVAL;
1599#else
1600 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1601 return -EINVAL;
1602#endif
1603
1604 ret = lock_device_hotplug_sysfs();
1605 if (ret)
1606 return ret;
1607
1608 mutex_lock(&cpuhp_state_mutex);
1609 sp = cpuhp_get_step(target);
1610 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1611 mutex_unlock(&cpuhp_state_mutex);
1612 if (ret)
40da1b11 1613 goto out;
757c989b
TG
1614
1615 if (st->state < target)
1616 ret = do_cpu_up(dev->id, target);
1617 else
1618 ret = do_cpu_down(dev->id, target);
40da1b11 1619out:
757c989b
TG
1620 unlock_device_hotplug();
1621 return ret ? ret : count;
1622}
1623
98f8cdce
TG
1624static ssize_t show_cpuhp_target(struct device *dev,
1625 struct device_attribute *attr, char *buf)
1626{
1627 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1628
1629 return sprintf(buf, "%d\n", st->target);
1630}
757c989b 1631static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce
TG
1632
1633static struct attribute *cpuhp_cpu_attrs[] = {
1634 &dev_attr_state.attr,
1635 &dev_attr_target.attr,
1636 NULL
1637};
1638
993647a2 1639static const struct attribute_group cpuhp_cpu_attr_group = {
98f8cdce
TG
1640 .attrs = cpuhp_cpu_attrs,
1641 .name = "hotplug",
1642 NULL
1643};
1644
1645static ssize_t show_cpuhp_states(struct device *dev,
1646 struct device_attribute *attr, char *buf)
1647{
1648 ssize_t cur, res = 0;
1649 int i;
1650
1651 mutex_lock(&cpuhp_state_mutex);
757c989b 1652 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1653 struct cpuhp_step *sp = cpuhp_get_step(i);
1654
1655 if (sp->name) {
1656 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1657 buf += cur;
1658 res += cur;
1659 }
1660 }
1661 mutex_unlock(&cpuhp_state_mutex);
1662 return res;
1663}
1664static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1665
1666static struct attribute *cpuhp_cpu_root_attrs[] = {
1667 &dev_attr_states.attr,
1668 NULL
1669};
1670
993647a2 1671static const struct attribute_group cpuhp_cpu_root_attr_group = {
98f8cdce
TG
1672 .attrs = cpuhp_cpu_root_attrs,
1673 .name = "hotplug",
1674 NULL
1675};
1676
1677static int __init cpuhp_sysfs_init(void)
1678{
1679 int cpu, ret;
1680
1681 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1682 &cpuhp_cpu_root_attr_group);
1683 if (ret)
1684 return ret;
1685
1686 for_each_possible_cpu(cpu) {
1687 struct device *dev = get_cpu_device(cpu);
1688
1689 if (!dev)
1690 continue;
1691 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1692 if (ret)
1693 return ret;
1694 }
1695 return 0;
1696}
1697device_initcall(cpuhp_sysfs_init);
1698#endif
1699
e56b3bc7
LT
1700/*
1701 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1702 * represents all NR_CPUS bits binary values of 1<<nr.
1703 *
e0b582ec 1704 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
1705 * mask value that has a single bit set only.
1706 */
b8d317d1 1707
e56b3bc7 1708/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 1709#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
1710#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1711#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1712#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 1713
e56b3bc7
LT
1714const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1715
1716 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1717 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1718#if BITS_PER_LONG > 32
1719 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1720 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
1721#endif
1722};
e56b3bc7 1723EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
1724
1725const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1726EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
1727
1728#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 1729struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 1730 = {CPU_BITS_ALL};
b3199c02 1731#else
4b804c85 1732struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 1733#endif
4b804c85 1734EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 1735
4b804c85
RV
1736struct cpumask __cpu_online_mask __read_mostly;
1737EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 1738
4b804c85
RV
1739struct cpumask __cpu_present_mask __read_mostly;
1740EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 1741
4b804c85
RV
1742struct cpumask __cpu_active_mask __read_mostly;
1743EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 1744
3fa41520
RR
1745void init_cpu_present(const struct cpumask *src)
1746{
c4c54dd1 1747 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
1748}
1749
1750void init_cpu_possible(const struct cpumask *src)
1751{
c4c54dd1 1752 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
1753}
1754
1755void init_cpu_online(const struct cpumask *src)
1756{
c4c54dd1 1757 cpumask_copy(&__cpu_online_mask, src);
3fa41520 1758}
cff7d378
TG
1759
1760/*
1761 * Activate the first processor.
1762 */
1763void __init boot_cpu_init(void)
1764{
1765 int cpu = smp_processor_id();
1766
1767 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1768 set_cpu_online(cpu, true);
1769 set_cpu_active(cpu, true);
1770 set_cpu_present(cpu, true);
1771 set_cpu_possible(cpu, true);
8ce371f9
PZ
1772
1773#ifdef CONFIG_SMP
1774 __boot_cpu_id = cpu;
1775#endif
cff7d378
TG
1776}
1777
1778/*
1779 * Must be called _AFTER_ setting up the per_cpu areas
1780 */
1781void __init boot_cpu_state_init(void)
1782{
1783 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1784}