]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/cpu.c
x86/kprobes: Prohibit probing on optprobe template code
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
3f07c014 10#include <linux/sched/signal.h>
ef8bd77f 11#include <linux/sched/hotplug.h>
29930025 12#include <linux/sched/task.h>
fc8944e3 13#include <linux/sched/smt.h>
1da177e4
LT
14#include <linux/unistd.h>
15#include <linux/cpu.h>
cb79295e
AV
16#include <linux/oom.h>
17#include <linux/rcupdate.h>
9984de1a 18#include <linux/export.h>
e4cc2f87 19#include <linux/bug.h>
1da177e4
LT
20#include <linux/kthread.h>
21#include <linux/stop_machine.h>
81615b62 22#include <linux/mutex.h>
5a0e3ad6 23#include <linux/gfp.h>
79cfbdfa 24#include <linux/suspend.h>
a19423b9 25#include <linux/lockdep.h>
345527b1 26#include <linux/tick.h>
a8994181 27#include <linux/irq.h>
941154bd 28#include <linux/nmi.h>
4cb28ced 29#include <linux/smpboot.h>
e6d4989a 30#include <linux/relay.h>
6731d4f1 31#include <linux/slab.h>
fc8dffd3 32#include <linux/percpu-rwsem.h>
cff7d378 33
bb3632c6 34#include <trace/events/power.h>
cff7d378
TG
35#define CREATE_TRACE_POINTS
36#include <trace/events/cpuhp.h>
1da177e4 37
38498a67
TG
38#include "smpboot.h"
39
cff7d378
TG
40/**
41 * cpuhp_cpu_state - Per cpu hotplug state storage
42 * @state: The current cpu state
43 * @target: The target state
4cb28ced
TG
44 * @thread: Pointer to the hotplug thread
45 * @should_run: Thread should execute
3b9d6da6 46 * @rollback: Perform a rollback
a724632c
TG
47 * @single: Single callback invocation
48 * @bringup: Single callback bringup or teardown selector
49 * @cb_state: The state for a single callback (install/uninstall)
4cb28ced 50 * @result: Result of the operation
5ebe7742
PZ
51 * @done_up: Signal completion to the issuer of the task for cpu-up
52 * @done_down: Signal completion to the issuer of the task for cpu-down
cff7d378
TG
53 */
54struct cpuhp_cpu_state {
55 enum cpuhp_state state;
56 enum cpuhp_state target;
1db49484 57 enum cpuhp_state fail;
4cb28ced
TG
58#ifdef CONFIG_SMP
59 struct task_struct *thread;
60 bool should_run;
3b9d6da6 61 bool rollback;
a724632c
TG
62 bool single;
63 bool bringup;
3762ba90 64 bool booted_once;
cf392d10 65 struct hlist_node *node;
4dddfb5f 66 struct hlist_node *last;
4cb28ced 67 enum cpuhp_state cb_state;
4cb28ced 68 int result;
5ebe7742
PZ
69 struct completion done_up;
70 struct completion done_down;
4cb28ced 71#endif
cff7d378
TG
72};
73
1db49484
PZ
74static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
75 .fail = CPUHP_INVALID,
76};
cff7d378 77
49dfe2a6 78#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
5f4b55e1
PZ
79static struct lockdep_map cpuhp_state_up_map =
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
81static struct lockdep_map cpuhp_state_down_map =
82 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
83
84
76dc6c09 85static inline void cpuhp_lock_acquire(bool bringup)
5f4b55e1
PZ
86{
87 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
88}
89
76dc6c09 90static inline void cpuhp_lock_release(bool bringup)
5f4b55e1
PZ
91{
92 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
93}
94#else
95
76dc6c09
MM
96static inline void cpuhp_lock_acquire(bool bringup) { }
97static inline void cpuhp_lock_release(bool bringup) { }
5f4b55e1 98
49dfe2a6
TG
99#endif
100
cff7d378
TG
101/**
102 * cpuhp_step - Hotplug state machine step
103 * @name: Name of the step
104 * @startup: Startup function of the step
105 * @teardown: Teardown function of the step
106 * @skip_onerr: Do not invoke the functions on error rollback
107 * Will go away once the notifiers are gone
757c989b 108 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
109 */
110struct cpuhp_step {
cf392d10
TG
111 const char *name;
112 union {
3c1627e9
TG
113 int (*single)(unsigned int cpu);
114 int (*multi)(unsigned int cpu,
115 struct hlist_node *node);
116 } startup;
cf392d10 117 union {
3c1627e9
TG
118 int (*single)(unsigned int cpu);
119 int (*multi)(unsigned int cpu,
120 struct hlist_node *node);
121 } teardown;
cf392d10
TG
122 struct hlist_head list;
123 bool skip_onerr;
124 bool cant_stop;
125 bool multi_instance;
cff7d378
TG
126};
127
98f8cdce 128static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378 129static struct cpuhp_step cpuhp_bp_states[];
4baa0afc 130static struct cpuhp_step cpuhp_ap_states[];
cff7d378 131
a724632c
TG
132static bool cpuhp_is_ap_state(enum cpuhp_state state)
133{
134 /*
135 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
136 * purposes as that state is handled explicitly in cpu_down.
137 */
138 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
139}
140
141static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
142{
143 struct cpuhp_step *sp;
144
145 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
146 return sp + state;
147}
148
cff7d378
TG
149/**
150 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
151 * @cpu: The cpu for which the callback should be invoked
96abb968 152 * @state: The state to do callbacks for
a724632c 153 * @bringup: True if the bringup callback should be invoked
96abb968
PZ
154 * @node: For multi-instance, do a single entry callback for install/remove
155 * @lastp: For multi-instance rollback, remember how far we got
cff7d378 156 *
cf392d10 157 * Called from cpu hotplug and from the state register machinery.
cff7d378 158 */
a724632c 159static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
96abb968
PZ
160 bool bringup, struct hlist_node *node,
161 struct hlist_node **lastp)
cff7d378
TG
162{
163 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632c 164 struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10
TG
165 int (*cbm)(unsigned int cpu, struct hlist_node *node);
166 int (*cb)(unsigned int cpu);
167 int ret, cnt;
168
1db49484
PZ
169 if (st->fail == state) {
170 st->fail = CPUHP_INVALID;
171
172 if (!(bringup ? step->startup.single : step->teardown.single))
173 return 0;
174
175 return -EAGAIN;
176 }
177
cf392d10 178 if (!step->multi_instance) {
96abb968 179 WARN_ON_ONCE(lastp && *lastp);
3c1627e9 180 cb = bringup ? step->startup.single : step->teardown.single;
cf392d10
TG
181 if (!cb)
182 return 0;
a724632c 183 trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378 184 ret = cb(cpu);
a724632c 185 trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10
TG
186 return ret;
187 }
3c1627e9 188 cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
189 if (!cbm)
190 return 0;
191
192 /* Single invocation for instance add/remove */
193 if (node) {
96abb968 194 WARN_ON_ONCE(lastp && *lastp);
cf392d10
TG
195 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
196 ret = cbm(cpu, node);
197 trace_cpuhp_exit(cpu, st->state, state, ret);
198 return ret;
199 }
200
201 /* State transition. Invoke on all instances */
202 cnt = 0;
203 hlist_for_each(node, &step->list) {
96abb968
PZ
204 if (lastp && node == *lastp)
205 break;
206
cf392d10
TG
207 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
208 ret = cbm(cpu, node);
209 trace_cpuhp_exit(cpu, st->state, state, ret);
96abb968
PZ
210 if (ret) {
211 if (!lastp)
212 goto err;
213
214 *lastp = node;
215 return ret;
216 }
cf392d10
TG
217 cnt++;
218 }
96abb968
PZ
219 if (lastp)
220 *lastp = NULL;
cf392d10
TG
221 return 0;
222err:
223 /* Rollback the instances if one failed */
3c1627e9 224 cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
225 if (!cbm)
226 return ret;
227
228 hlist_for_each(node, &step->list) {
229 if (!cnt--)
230 break;
724a8688
PZ
231
232 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
233 ret = cbm(cpu, node);
234 trace_cpuhp_exit(cpu, st->state, state, ret);
235 /*
236 * Rollback must not fail,
237 */
238 WARN_ON_ONCE(ret);
cff7d378
TG
239 }
240 return ret;
241}
242
98a79d6a 243#ifdef CONFIG_SMP
5ebe7742
PZ
244static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
245{
246 struct completion *done = bringup ? &st->done_up : &st->done_down;
247 wait_for_completion(done);
248}
249
250static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
251{
252 struct completion *done = bringup ? &st->done_up : &st->done_down;
253 complete(done);
254}
255
256/*
257 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
258 */
259static bool cpuhp_is_atomic_state(enum cpuhp_state state)
260{
261 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
262}
263
b3199c02 264/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 265static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
266bool cpuhp_tasks_frozen;
267EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 268
79a6cdeb 269/*
93ae4f97
SB
270 * The following two APIs (cpu_maps_update_begin/done) must be used when
271 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
79a6cdeb
LJ
272 */
273void cpu_maps_update_begin(void)
274{
275 mutex_lock(&cpu_add_remove_lock);
276}
277
278void cpu_maps_update_done(void)
279{
280 mutex_unlock(&cpu_add_remove_lock);
281}
1da177e4 282
fc8dffd3
TG
283/*
284 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
e3920fb4
RW
285 * Should always be manipulated under cpu_add_remove_lock
286 */
287static int cpu_hotplug_disabled;
288
79a6cdeb
LJ
289#ifdef CONFIG_HOTPLUG_CPU
290
fc8dffd3 291DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
a19423b9 292
8f553c49 293void cpus_read_lock(void)
a9d9baa1 294{
fc8dffd3 295 percpu_down_read(&cpu_hotplug_lock);
a9d9baa1 296}
8f553c49 297EXPORT_SYMBOL_GPL(cpus_read_lock);
90d45d17 298
8f553c49 299void cpus_read_unlock(void)
a9d9baa1 300{
fc8dffd3 301 percpu_up_read(&cpu_hotplug_lock);
a9d9baa1 302}
8f553c49 303EXPORT_SYMBOL_GPL(cpus_read_unlock);
a9d9baa1 304
8f553c49 305void cpus_write_lock(void)
d221938c 306{
fc8dffd3 307 percpu_down_write(&cpu_hotplug_lock);
d221938c 308}
87af9e7f 309
8f553c49 310void cpus_write_unlock(void)
d221938c 311{
fc8dffd3 312 percpu_up_write(&cpu_hotplug_lock);
d221938c
GS
313}
314
fc8dffd3 315void lockdep_assert_cpus_held(void)
d221938c 316{
fc8dffd3 317 percpu_rwsem_assert_held(&cpu_hotplug_lock);
d221938c 318}
79a6cdeb 319
16e53dbf
SB
320/*
321 * Wait for currently running CPU hotplug operations to complete (if any) and
322 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
323 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
324 * hotplug path before performing hotplug operations. So acquiring that lock
325 * guarantees mutual exclusion from any currently running hotplug operations.
326 */
327void cpu_hotplug_disable(void)
328{
329 cpu_maps_update_begin();
89af7ba5 330 cpu_hotplug_disabled++;
16e53dbf
SB
331 cpu_maps_update_done();
332}
32145c46 333EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf 334
01b41159
LW
335static void __cpu_hotplug_enable(void)
336{
337 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
338 return;
339 cpu_hotplug_disabled--;
340}
341
16e53dbf
SB
342void cpu_hotplug_enable(void)
343{
344 cpu_maps_update_begin();
01b41159 345 __cpu_hotplug_enable();
16e53dbf
SB
346 cpu_maps_update_done();
347}
32145c46 348EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 349#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 350
fc8944e3
TG
351/*
352 * Architectures that need SMT-specific errata handling during SMT hotplug
353 * should override this.
354 */
355void __weak arch_smt_update(void) { }
356
3762ba90
TG
357#ifdef CONFIG_HOTPLUG_SMT
358enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
ade31b9e 359
44219799 360void __init cpu_smt_disable(bool force)
3762ba90 361{
44219799
JK
362 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
363 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
364 return;
365
366 if (force) {
3762ba90
TG
367 pr_info("SMT: Force disabled\n");
368 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
44219799
JK
369 } else {
370 cpu_smt_control = CPU_SMT_DISABLED;
3762ba90 371 }
44219799
JK
372}
373
483ec3c6
TG
374/*
375 * The decision whether SMT is supported can only be done after the full
67cd6fac 376 * CPU identification. Called from architecture code.
ade31b9e
TG
377 */
378void __init cpu_smt_check_topology(void)
379{
67cd6fac 380 if (!topology_smt_supported())
ade31b9e
TG
381 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
382}
383
44219799
JK
384static int __init smt_cmdline_disable(char *str)
385{
386 cpu_smt_disable(str && !strcmp(str, "force"));
3762ba90
TG
387 return 0;
388}
389early_param("nosmt", smt_cmdline_disable);
390
391static inline bool cpu_smt_allowed(unsigned int cpu)
392{
67cd6fac 393 if (cpu_smt_control == CPU_SMT_ENABLED)
3762ba90
TG
394 return true;
395
67cd6fac 396 if (topology_is_primary_thread(cpu))
3762ba90
TG
397 return true;
398
399 /*
400 * On x86 it's required to boot all logical CPUs at least once so
401 * that the init code can get a chance to set CR4.MCE on each
402 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
403 * core will shutdown the machine.
404 */
405 return !per_cpu(cpuhp_state, cpu).booted_once;
406}
407#else
408static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
409#endif
410
4dddfb5f
PZ
411static inline enum cpuhp_state
412cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
413{
414 enum cpuhp_state prev_state = st->state;
415
416 st->rollback = false;
417 st->last = NULL;
418
419 st->target = target;
420 st->single = false;
421 st->bringup = st->state < target;
422
423 return prev_state;
424}
425
426static inline void
427cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
428{
429 st->rollback = true;
430
431 /*
432 * If we have st->last we need to undo partial multi_instance of this
433 * state first. Otherwise start undo at the previous state.
434 */
435 if (!st->last) {
436 if (st->bringup)
437 st->state--;
438 else
439 st->state++;
440 }
441
442 st->target = prev_state;
443 st->bringup = !st->bringup;
444}
445
446/* Regular hotplug invocation of the AP hotplug thread */
447static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
448{
449 if (!st->single && st->state == st->target)
450 return;
451
452 st->result = 0;
453 /*
454 * Make sure the above stores are visible before should_run becomes
455 * true. Paired with the mb() above in cpuhp_thread_fun()
456 */
457 smp_mb();
458 st->should_run = true;
459 wake_up_process(st->thread);
5ebe7742 460 wait_for_ap_thread(st, st->bringup);
4dddfb5f
PZ
461}
462
463static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
464{
465 enum cpuhp_state prev_state;
466 int ret;
467
468 prev_state = cpuhp_set_state(st, target);
469 __cpuhp_kick_ap(st);
470 if ((ret = st->result)) {
471 cpuhp_reset_state(st, prev_state);
472 __cpuhp_kick_ap(st);
473 }
474
475 return ret;
476}
9cd4f1a4 477
8df3e07e
TG
478static int bringup_wait_for_ap(unsigned int cpu)
479{
480 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
481
9cd4f1a4 482 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
5ebe7742 483 wait_for_ap_thread(st, true);
dea1d0f5
TG
484 if (WARN_ON_ONCE((!cpu_online(cpu))))
485 return -ECANCELED;
9cd4f1a4
TG
486
487 /* Unpark the stopper thread and the hotplug thread of the target cpu */
488 stop_machine_unpark(cpu);
489 kthread_unpark(st->thread);
490
3762ba90
TG
491 /*
492 * SMT soft disabling on X86 requires to bring the CPU out of the
493 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
494 * CPU marked itself as booted_once in cpu_notify_starting() so the
495 * cpu_smt_allowed() check will now return false if this is not the
496 * primary sibling.
497 */
498 if (!cpu_smt_allowed(cpu))
499 return -ECANCELED;
500
4dddfb5f
PZ
501 if (st->target <= CPUHP_AP_ONLINE_IDLE)
502 return 0;
503
504 return cpuhp_kick_ap(st, st->target);
8df3e07e
TG
505}
506
ba997462
TG
507static int bringup_cpu(unsigned int cpu)
508{
509 struct task_struct *idle = idle_thread_get(cpu);
510 int ret;
511
aa877175
BO
512 /*
513 * Some architectures have to walk the irq descriptors to
514 * setup the vector space for the cpu which comes online.
515 * Prevent irq alloc/free across the bringup.
516 */
517 irq_lock_sparse();
518
ba997462
TG
519 /* Arch-specific enabling code. */
520 ret = __cpu_up(cpu, idle);
aa877175 521 irq_unlock_sparse();
530e9b76 522 if (ret)
ba997462 523 return ret;
9cd4f1a4 524 return bringup_wait_for_ap(cpu);
ba997462
TG
525}
526
2e1a3483
TG
527/*
528 * Hotplug state machine related functions
529 */
2e1a3483 530
a724632c 531static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483
TG
532{
533 for (st->state--; st->state > st->target; st->state--) {
a724632c 534 struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483
TG
535
536 if (!step->skip_onerr)
96abb968 537 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
2e1a3483
TG
538 }
539}
540
541static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632c 542 enum cpuhp_state target)
2e1a3483
TG
543{
544 enum cpuhp_state prev_state = st->state;
545 int ret = 0;
546
547 while (st->state < target) {
2e1a3483 548 st->state++;
96abb968 549 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
2e1a3483
TG
550 if (ret) {
551 st->target = prev_state;
a724632c 552 undo_cpu_up(cpu, st);
2e1a3483
TG
553 break;
554 }
555 }
556 return ret;
557}
558
4cb28ced
TG
559/*
560 * The cpu hotplug threads manage the bringup and teardown of the cpus
561 */
562static void cpuhp_create(unsigned int cpu)
563{
564 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
565
5ebe7742
PZ
566 init_completion(&st->done_up);
567 init_completion(&st->done_down);
4cb28ced
TG
568}
569
570static int cpuhp_should_run(unsigned int cpu)
571{
572 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
573
574 return st->should_run;
575}
576
4cb28ced
TG
577/*
578 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
579 * callbacks when a state gets [un]installed at runtime.
4dddfb5f
PZ
580 *
581 * Each invocation of this function by the smpboot thread does a single AP
582 * state callback.
583 *
584 * It has 3 modes of operation:
585 * - single: runs st->cb_state
586 * - up: runs ++st->state, while st->state < st->target
587 * - down: runs st->state--, while st->state > st->target
588 *
589 * When complete or on error, should_run is cleared and the completion is fired.
4cb28ced
TG
590 */
591static void cpuhp_thread_fun(unsigned int cpu)
592{
593 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
4dddfb5f
PZ
594 bool bringup = st->bringup;
595 enum cpuhp_state state;
4cb28ced 596
027c9910
NU
597 if (WARN_ON_ONCE(!st->should_run))
598 return;
599
4cb28ced 600 /*
4dddfb5f
PZ
601 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
602 * that if we see ->should_run we also see the rest of the state.
4cb28ced
TG
603 */
604 smp_mb();
4cb28ced 605
5f4b55e1 606 cpuhp_lock_acquire(bringup);
4dddfb5f 607
a724632c 608 if (st->single) {
4dddfb5f
PZ
609 state = st->cb_state;
610 st->should_run = false;
611 } else {
612 if (bringup) {
613 st->state++;
614 state = st->state;
615 st->should_run = (st->state < st->target);
616 WARN_ON_ONCE(st->state > st->target);
4cb28ced 617 } else {
4dddfb5f
PZ
618 state = st->state;
619 st->state--;
620 st->should_run = (st->state > st->target);
621 WARN_ON_ONCE(st->state < st->target);
4cb28ced 622 }
4dddfb5f
PZ
623 }
624
625 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
626
627 if (st->rollback) {
628 struct cpuhp_step *step = cpuhp_get_step(state);
629 if (step->skip_onerr)
630 goto next;
631 }
632
633 if (cpuhp_is_atomic_state(state)) {
634 local_irq_disable();
635 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
636 local_irq_enable();
3b9d6da6 637
4dddfb5f
PZ
638 /*
639 * STARTING/DYING must not fail!
640 */
641 WARN_ON_ONCE(st->result);
4cb28ced 642 } else {
4dddfb5f
PZ
643 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
644 }
645
646 if (st->result) {
647 /*
648 * If we fail on a rollback, we're up a creek without no
649 * paddle, no way forward, no way back. We loose, thanks for
650 * playing.
651 */
652 WARN_ON_ONCE(st->rollback);
653 st->should_run = false;
4cb28ced 654 }
4dddfb5f
PZ
655
656next:
5f4b55e1 657 cpuhp_lock_release(bringup);
4dddfb5f
PZ
658
659 if (!st->should_run)
5ebe7742 660 complete_ap_thread(st, bringup);
4cb28ced
TG
661}
662
663/* Invoke a single callback on a remote cpu */
a724632c 664static int
cf392d10
TG
665cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
666 struct hlist_node *node)
4cb28ced
TG
667{
668 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5f 669 int ret;
4cb28ced
TG
670
671 if (!cpu_online(cpu))
672 return 0;
673
5f4b55e1
PZ
674 cpuhp_lock_acquire(false);
675 cpuhp_lock_release(false);
676
677 cpuhp_lock_acquire(true);
678 cpuhp_lock_release(true);
49dfe2a6 679
6a4e2451
TG
680 /*
681 * If we are up and running, use the hotplug thread. For early calls
682 * we invoke the thread function directly.
683 */
684 if (!st->thread)
96abb968 685 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
6a4e2451 686
4dddfb5f
PZ
687 st->rollback = false;
688 st->last = NULL;
689
690 st->node = node;
691 st->bringup = bringup;
4cb28ced 692 st->cb_state = state;
a724632c 693 st->single = true;
a724632c 694
4dddfb5f 695 __cpuhp_kick_ap(st);
4cb28ced 696
4cb28ced 697 /*
4dddfb5f 698 * If we failed and did a partial, do a rollback.
4cb28ced 699 */
4dddfb5f
PZ
700 if ((ret = st->result) && st->last) {
701 st->rollback = true;
702 st->bringup = !bringup;
703
704 __cpuhp_kick_ap(st);
705 }
706
1f7c70d6
TG
707 /*
708 * Clean up the leftovers so the next hotplug operation wont use stale
709 * data.
710 */
711 st->node = st->last = NULL;
4dddfb5f 712 return ret;
1cf4f629
TG
713}
714
715static int cpuhp_kick_ap_work(unsigned int cpu)
716{
717 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5f
PZ
718 enum cpuhp_state prev_state = st->state;
719 int ret;
1cf4f629 720
5f4b55e1
PZ
721 cpuhp_lock_acquire(false);
722 cpuhp_lock_release(false);
723
724 cpuhp_lock_acquire(true);
725 cpuhp_lock_release(true);
4dddfb5f
PZ
726
727 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
728 ret = cpuhp_kick_ap(st, st->target);
729 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
730
731 return ret;
4cb28ced
TG
732}
733
734static struct smp_hotplug_thread cpuhp_threads = {
735 .store = &cpuhp_state.thread,
736 .create = &cpuhp_create,
737 .thread_should_run = cpuhp_should_run,
738 .thread_fn = cpuhp_thread_fun,
739 .thread_comm = "cpuhp/%u",
740 .selfparking = true,
741};
742
743void __init cpuhp_threads_init(void)
744{
745 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
746 kthread_unpark(this_cpu_read(cpuhp_state.thread));
747}
748
777c6e0d 749#ifdef CONFIG_HOTPLUG_CPU
e4cc2f87
AV
750/**
751 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
752 * @cpu: a CPU id
753 *
754 * This function walks all processes, finds a valid mm struct for each one and
755 * then clears a corresponding bit in mm's cpumask. While this all sounds
756 * trivial, there are various non-obvious corner cases, which this function
757 * tries to solve in a safe manner.
758 *
759 * Also note that the function uses a somewhat relaxed locking scheme, so it may
760 * be called only for an already offlined CPU.
761 */
cb79295e
AV
762void clear_tasks_mm_cpumask(int cpu)
763{
764 struct task_struct *p;
765
766 /*
767 * This function is called after the cpu is taken down and marked
768 * offline, so its not like new tasks will ever get this cpu set in
769 * their mm mask. -- Peter Zijlstra
770 * Thus, we may use rcu_read_lock() here, instead of grabbing
771 * full-fledged tasklist_lock.
772 */
e4cc2f87 773 WARN_ON(cpu_online(cpu));
cb79295e
AV
774 rcu_read_lock();
775 for_each_process(p) {
776 struct task_struct *t;
777
e4cc2f87
AV
778 /*
779 * Main thread might exit, but other threads may still have
780 * a valid mm. Find one.
781 */
cb79295e
AV
782 t = find_lock_task_mm(p);
783 if (!t)
784 continue;
785 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
786 task_unlock(t);
787 }
788 rcu_read_unlock();
789}
790
1da177e4 791/* Take this CPU down. */
71cf5aee 792static int take_cpu_down(void *_param)
1da177e4 793{
4baa0afc
TG
794 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
795 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 796 int err, cpu = smp_processor_id();
724a8688 797 int ret;
1da177e4 798
1da177e4
LT
799 /* Ensure this CPU doesn't handle any more interrupts. */
800 err = __cpu_disable();
801 if (err < 0)
f3705136 802 return err;
1da177e4 803
a724632c
TG
804 /*
805 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
806 * do this step again.
807 */
808 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
809 st->state--;
4baa0afc 810 /* Invoke the former CPU_DYING callbacks */
724a8688
PZ
811 for (; st->state > target; st->state--) {
812 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
813 /*
814 * DYING must not fail!
815 */
816 WARN_ON_ONCE(ret);
817 }
4baa0afc 818
52c063d1
TG
819 /* Give up timekeeping duties */
820 tick_handover_do_timer();
14e568e7 821 /* Park the stopper thread */
090e77c3 822 stop_machine_park(cpu);
f3705136 823 return 0;
1da177e4
LT
824}
825
98458172 826static int takedown_cpu(unsigned int cpu)
1da177e4 827{
e69aab13 828 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
98458172 829 int err;
1da177e4 830
2a58c527 831 /* Park the smpboot threads */
1cf4f629
TG
832 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
833
6acce3ef 834 /*
a8994181
TG
835 * Prevent irq alloc/free while the dying cpu reorganizes the
836 * interrupt affinities.
6acce3ef 837 */
a8994181 838 irq_lock_sparse();
6acce3ef 839
a8994181
TG
840 /*
841 * So now all preempt/rcu users must observe !cpu_active().
842 */
210e2133 843 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
04321587 844 if (err) {
3b9d6da6 845 /* CPU refused to die */
a8994181 846 irq_unlock_sparse();
3b9d6da6
SAS
847 /* Unpark the hotplug thread so we can rollback there */
848 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
98458172 849 return err;
8fa1d7d3 850 }
04321587 851 BUG_ON(cpu_online(cpu));
1da177e4 852
48c5ccae 853 /*
5b1ead68
BJ
854 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
855 * all runnable tasks from the CPU, there's only the idle task left now
48c5ccae 856 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
857 *
858 * Wait for the stop thread to go away.
48c5ccae 859 */
5ebe7742 860 wait_for_ap_thread(st, false);
e69aab13 861 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4 862
a8994181
TG
863 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
864 irq_unlock_sparse();
865
345527b1 866 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
867 /* This actually kills the CPU. */
868 __cpu_die(cpu);
869
a49b116d 870 tick_cleanup_dead_cpu(cpu);
a58163d8 871 rcutree_migrate_callbacks(cpu);
98458172
TG
872 return 0;
873}
1da177e4 874
71f87b2f
TG
875static void cpuhp_complete_idle_dead(void *arg)
876{
877 struct cpuhp_cpu_state *st = arg;
878
5ebe7742 879 complete_ap_thread(st, false);
71f87b2f
TG
880}
881
e69aab13
TG
882void cpuhp_report_idle_dead(void)
883{
884 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
885
886 BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7e 887 rcu_report_dead(smp_processor_id());
71f87b2f
TG
888 st->state = CPUHP_AP_IDLE_DEAD;
889 /*
890 * We cannot call complete after rcu_report_dead() so we delegate it
891 * to an online cpu.
892 */
893 smp_call_function_single(cpumask_first(cpu_online_mask),
894 cpuhp_complete_idle_dead, st, 0);
e69aab13
TG
895}
896
4dddfb5f
PZ
897static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
898{
899 for (st->state++; st->state < st->target; st->state++) {
900 struct cpuhp_step *step = cpuhp_get_step(st->state);
cff7d378 901
4dddfb5f
PZ
902 if (!step->skip_onerr)
903 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
904 }
905}
906
907static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
908 enum cpuhp_state target)
909{
910 enum cpuhp_state prev_state = st->state;
911 int ret = 0;
912
913 for (; st->state > target; st->state--) {
914 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
915 if (ret) {
916 st->target = prev_state;
b49a4576
TG
917 if (st->state < prev_state)
918 undo_cpu_down(cpu, st);
4dddfb5f
PZ
919 break;
920 }
921 }
922 return ret;
923}
cff7d378 924
98458172 925/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
926static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
927 enum cpuhp_state target)
98458172 928{
cff7d378
TG
929 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
930 int prev_state, ret = 0;
98458172
TG
931
932 if (num_online_cpus() == 1)
933 return -EBUSY;
934
757c989b 935 if (!cpu_present(cpu))
98458172
TG
936 return -EINVAL;
937
8f553c49 938 cpus_write_lock();
98458172
TG
939
940 cpuhp_tasks_frozen = tasks_frozen;
941
4dddfb5f 942 prev_state = cpuhp_set_state(st, target);
1cf4f629
TG
943 /*
944 * If the current CPU state is in the range of the AP hotplug thread,
945 * then we need to kick the thread.
946 */
8df3e07e 947 if (st->state > CPUHP_TEARDOWN_CPU) {
4dddfb5f 948 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1cf4f629
TG
949 ret = cpuhp_kick_ap_work(cpu);
950 /*
951 * The AP side has done the error rollback already. Just
952 * return the error code..
953 */
954 if (ret)
955 goto out;
956
957 /*
958 * We might have stopped still in the range of the AP hotplug
959 * thread. Nothing to do anymore.
960 */
8df3e07e 961 if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629 962 goto out;
4dddfb5f
PZ
963
964 st->target = target;
1cf4f629
TG
965 }
966 /*
8df3e07e 967 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629
TG
968 * to do the further cleanups.
969 */
a724632c 970 ret = cpuhp_down_callbacks(cpu, st, target);
b49a4576 971 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4dddfb5f
PZ
972 cpuhp_reset_state(st, prev_state);
973 __cpuhp_kick_ap(st);
3b9d6da6 974 }
98458172 975
1cf4f629 976out:
8f553c49 977 cpus_write_unlock();
941154bd
TG
978 /*
979 * Do post unplug cleanup. This is still protected against
980 * concurrent CPU hotplug via cpu_add_remove_lock.
981 */
982 lockup_detector_cleanup();
fc8944e3 983 arch_smt_update();
cff7d378 984 return ret;
e3920fb4
RW
985}
986
97127f50
TG
987static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
988{
989 if (cpu_hotplug_disabled)
990 return -EBUSY;
991 return _cpu_down(cpu, 0, target);
992}
993
af1f4045 994static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 995{
9ea09af3 996 int err;
e3920fb4 997
d221938c 998 cpu_maps_update_begin();
97127f50 999 err = cpu_down_maps_locked(cpu, target);
d221938c 1000 cpu_maps_update_done();
1da177e4
LT
1001 return err;
1002}
4dddfb5f 1003
af1f4045
TG
1004int cpu_down(unsigned int cpu)
1005{
1006 return do_cpu_down(cpu, CPUHP_OFFLINE);
1007}
b62b8ef9 1008EXPORT_SYMBOL(cpu_down);
4dddfb5f
PZ
1009
1010#else
1011#define takedown_cpu NULL
1da177e4
LT
1012#endif /*CONFIG_HOTPLUG_CPU*/
1013
4baa0afc 1014/**
ee1e714b 1015 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc
TG
1016 * @cpu: cpu that just started
1017 *
4baa0afc
TG
1018 * It must be called by the arch code on the new cpu, before the new cpu
1019 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1020 */
1021void notify_cpu_starting(unsigned int cpu)
1022{
1023 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1024 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
724a8688 1025 int ret;
4baa0afc 1026
0c6d4576 1027 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
3762ba90 1028 st->booted_once = true;
4baa0afc 1029 while (st->state < target) {
4baa0afc 1030 st->state++;
724a8688
PZ
1031 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1032 /*
1033 * STARTING must not fail!
1034 */
1035 WARN_ON_ONCE(ret);
4baa0afc
TG
1036 }
1037}
1038
949338e3 1039/*
9cd4f1a4
TG
1040 * Called from the idle task. Wake up the controlling task which brings the
1041 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1042 * the rest of the online bringup to the hotplug thread.
949338e3 1043 */
8df3e07e 1044void cpuhp_online_idle(enum cpuhp_state state)
949338e3 1045{
8df3e07e 1046 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
8df3e07e
TG
1047
1048 /* Happens for the boot cpu */
1049 if (state != CPUHP_AP_ONLINE_IDLE)
1050 return;
1051
1052 st->state = CPUHP_AP_ONLINE_IDLE;
5ebe7742 1053 complete_ap_thread(st, true);
949338e3
TG
1054}
1055
e3920fb4 1056/* Requires cpu_add_remove_lock to be held */
af1f4045 1057static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 1058{
cff7d378 1059 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 1060 struct task_struct *idle;
2e1a3483 1061 int ret = 0;
1da177e4 1062
8f553c49 1063 cpus_write_lock();
38498a67 1064
757c989b 1065 if (!cpu_present(cpu)) {
5e5041f3
YI
1066 ret = -EINVAL;
1067 goto out;
1068 }
1069
757c989b
TG
1070 /*
1071 * The caller of do_cpu_up might have raced with another
1072 * caller. Ignore it for now.
1073 */
1074 if (st->state >= target)
38498a67 1075 goto out;
757c989b
TG
1076
1077 if (st->state == CPUHP_OFFLINE) {
1078 /* Let it fail before we try to bring the cpu up */
1079 idle = idle_thread_get(cpu);
1080 if (IS_ERR(idle)) {
1081 ret = PTR_ERR(idle);
1082 goto out;
1083 }
3bb5d2ee 1084 }
38498a67 1085
ba997462
TG
1086 cpuhp_tasks_frozen = tasks_frozen;
1087
4dddfb5f 1088 cpuhp_set_state(st, target);
1cf4f629
TG
1089 /*
1090 * If the current CPU state is in the range of the AP hotplug thread,
1091 * then we need to kick the thread once more.
1092 */
8df3e07e 1093 if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629
TG
1094 ret = cpuhp_kick_ap_work(cpu);
1095 /*
1096 * The AP side has done the error rollback already. Just
1097 * return the error code..
1098 */
1099 if (ret)
1100 goto out;
1101 }
1102
1103 /*
1104 * Try to reach the target state. We max out on the BP at
8df3e07e 1105 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629
TG
1106 * responsible for bringing it up to the target state.
1107 */
8df3e07e 1108 target = min((int)target, CPUHP_BRINGUP_CPU);
a724632c 1109 ret = cpuhp_up_callbacks(cpu, st, target);
38498a67 1110out:
8f553c49 1111 cpus_write_unlock();
fc8944e3 1112 arch_smt_update();
e3920fb4
RW
1113 return ret;
1114}
1115
af1f4045 1116static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
1117{
1118 int err = 0;
cf23422b 1119
e0b582ec 1120 if (!cpu_possible(cpu)) {
84117da5
FF
1121 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1122 cpu);
87d5e023 1123#if defined(CONFIG_IA64)
84117da5 1124 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
1125#endif
1126 return -EINVAL;
1127 }
e3920fb4 1128
01b0f197
TK
1129 err = try_online_node(cpu_to_node(cpu));
1130 if (err)
1131 return err;
cf23422b 1132
d221938c 1133 cpu_maps_update_begin();
e761b772
MK
1134
1135 if (cpu_hotplug_disabled) {
e3920fb4 1136 err = -EBUSY;
e761b772
MK
1137 goto out;
1138 }
47b4c679
TG
1139 if (!cpu_smt_allowed(cpu)) {
1140 err = -EPERM;
1141 goto out;
1142 }
e761b772 1143
af1f4045 1144 err = _cpu_up(cpu, 0, target);
e761b772 1145out:
d221938c 1146 cpu_maps_update_done();
e3920fb4
RW
1147 return err;
1148}
af1f4045
TG
1149
1150int cpu_up(unsigned int cpu)
1151{
1152 return do_cpu_up(cpu, CPUHP_ONLINE);
1153}
a513f6ba 1154EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 1155
f3de4be9 1156#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 1157static cpumask_var_t frozen_cpus;
e3920fb4 1158
d391e552 1159int freeze_secondary_cpus(int primary)
e3920fb4 1160{
d391e552 1161 int cpu, error = 0;
e3920fb4 1162
d221938c 1163 cpu_maps_update_begin();
d391e552
JM
1164 if (!cpu_online(primary))
1165 primary = cpumask_first(cpu_online_mask);
9ee349ad
XF
1166 /*
1167 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
1168 * with the userspace trying to use the CPU hotplug at the same time
1169 */
e0b582ec 1170 cpumask_clear(frozen_cpus);
6ad4c188 1171
84117da5 1172 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4 1173 for_each_online_cpu(cpu) {
d391e552 1174 if (cpu == primary)
e3920fb4 1175 continue;
bb3632c6 1176 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 1177 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 1178 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 1179 if (!error)
e0b582ec 1180 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 1181 else {
84117da5 1182 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
1183 break;
1184 }
1185 }
86886e55 1186
89af7ba5 1187 if (!error)
e3920fb4 1188 BUG_ON(num_online_cpus() > 1);
89af7ba5 1189 else
84117da5 1190 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
1191
1192 /*
1193 * Make sure the CPUs won't be enabled by someone else. We need to do
1194 * this even in case of failure as all disable_nonboot_cpus() users are
1195 * supposed to do enable_nonboot_cpus() on the failure path.
1196 */
1197 cpu_hotplug_disabled++;
1198
d221938c 1199 cpu_maps_update_done();
e3920fb4
RW
1200 return error;
1201}
1202
d0af9eed
SS
1203void __weak arch_enable_nonboot_cpus_begin(void)
1204{
1205}
1206
1207void __weak arch_enable_nonboot_cpus_end(void)
1208{
1209}
1210
71cf5aee 1211void enable_nonboot_cpus(void)
e3920fb4
RW
1212{
1213 int cpu, error;
1214
1215 /* Allow everyone to use the CPU hotplug again */
d221938c 1216 cpu_maps_update_begin();
01b41159 1217 __cpu_hotplug_enable();
e0b582ec 1218 if (cpumask_empty(frozen_cpus))
1d64b9cb 1219 goto out;
e3920fb4 1220
84117da5 1221 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
1222
1223 arch_enable_nonboot_cpus_begin();
1224
e0b582ec 1225 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 1226 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 1227 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 1228 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 1229 if (!error) {
84117da5 1230 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
1231 continue;
1232 }
84117da5 1233 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 1234 }
d0af9eed
SS
1235
1236 arch_enable_nonboot_cpus_end();
1237
e0b582ec 1238 cpumask_clear(frozen_cpus);
1d64b9cb 1239out:
d221938c 1240 cpu_maps_update_done();
1da177e4 1241}
e0b582ec 1242
d7268a31 1243static int __init alloc_frozen_cpus(void)
e0b582ec
RR
1244{
1245 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1246 return -ENOMEM;
1247 return 0;
1248}
1249core_initcall(alloc_frozen_cpus);
79cfbdfa 1250
79cfbdfa
SB
1251/*
1252 * When callbacks for CPU hotplug notifications are being executed, we must
1253 * ensure that the state of the system with respect to the tasks being frozen
1254 * or not, as reported by the notification, remains unchanged *throughout the
1255 * duration* of the execution of the callbacks.
1256 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1257 *
1258 * This synchronization is implemented by mutually excluding regular CPU
1259 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1260 * Hibernate notifications.
1261 */
1262static int
1263cpu_hotplug_pm_callback(struct notifier_block *nb,
1264 unsigned long action, void *ptr)
1265{
1266 switch (action) {
1267
1268 case PM_SUSPEND_PREPARE:
1269 case PM_HIBERNATION_PREPARE:
16e53dbf 1270 cpu_hotplug_disable();
79cfbdfa
SB
1271 break;
1272
1273 case PM_POST_SUSPEND:
1274 case PM_POST_HIBERNATION:
16e53dbf 1275 cpu_hotplug_enable();
79cfbdfa
SB
1276 break;
1277
1278 default:
1279 return NOTIFY_DONE;
1280 }
1281
1282 return NOTIFY_OK;
1283}
1284
1285
d7268a31 1286static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 1287{
6e32d479
FY
1288 /*
1289 * cpu_hotplug_pm_callback has higher priority than x86
1290 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1291 * to disable cpu hotplug to avoid cpu hotplug race.
1292 */
79cfbdfa
SB
1293 pm_notifier(cpu_hotplug_pm_callback, 0);
1294 return 0;
1295}
1296core_initcall(cpu_hotplug_pm_sync_init);
1297
f3de4be9 1298#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 1299
8ce371f9
PZ
1300int __boot_cpu_id;
1301
68f4f1ec 1302#endif /* CONFIG_SMP */
b8d317d1 1303
cff7d378
TG
1304/* Boot processor state steps */
1305static struct cpuhp_step cpuhp_bp_states[] = {
1306 [CPUHP_OFFLINE] = {
1307 .name = "offline",
3c1627e9
TG
1308 .startup.single = NULL,
1309 .teardown.single = NULL,
cff7d378
TG
1310 },
1311#ifdef CONFIG_SMP
1312 [CPUHP_CREATE_THREADS]= {
677f6646 1313 .name = "threads:prepare",
3c1627e9
TG
1314 .startup.single = smpboot_create_threads,
1315 .teardown.single = NULL,
757c989b 1316 .cant_stop = true,
cff7d378 1317 },
00e16c3d 1318 [CPUHP_PERF_PREPARE] = {
3c1627e9
TG
1319 .name = "perf:prepare",
1320 .startup.single = perf_event_init_cpu,
1321 .teardown.single = perf_event_exit_cpu,
00e16c3d 1322 },
7ee681b2 1323 [CPUHP_WORKQUEUE_PREP] = {
3c1627e9
TG
1324 .name = "workqueue:prepare",
1325 .startup.single = workqueue_prepare_cpu,
1326 .teardown.single = NULL,
7ee681b2 1327 },
27590dc1 1328 [CPUHP_HRTIMERS_PREPARE] = {
3c1627e9
TG
1329 .name = "hrtimers:prepare",
1330 .startup.single = hrtimers_prepare_cpu,
1331 .teardown.single = hrtimers_dead_cpu,
27590dc1 1332 },
31487f83 1333 [CPUHP_SMPCFD_PREPARE] = {
677f6646 1334 .name = "smpcfd:prepare",
3c1627e9
TG
1335 .startup.single = smpcfd_prepare_cpu,
1336 .teardown.single = smpcfd_dead_cpu,
31487f83 1337 },
e6d4989a
RW
1338 [CPUHP_RELAY_PREPARE] = {
1339 .name = "relay:prepare",
1340 .startup.single = relay_prepare_cpu,
1341 .teardown.single = NULL,
1342 },
6731d4f1
SAS
1343 [CPUHP_SLAB_PREPARE] = {
1344 .name = "slab:prepare",
1345 .startup.single = slab_prepare_cpu,
1346 .teardown.single = slab_dead_cpu,
31487f83 1347 },
4df83742 1348 [CPUHP_RCUTREE_PREP] = {
677f6646 1349 .name = "RCU/tree:prepare",
3c1627e9
TG
1350 .startup.single = rcutree_prepare_cpu,
1351 .teardown.single = rcutree_dead_cpu,
4df83742 1352 },
4fae16df
RC
1353 /*
1354 * On the tear-down path, timers_dead_cpu() must be invoked
1355 * before blk_mq_queue_reinit_notify() from notify_dead(),
1356 * otherwise a RCU stall occurs.
1357 */
26456f87 1358 [CPUHP_TIMERS_PREPARE] = {
3c1627e9 1359 .name = "timers:dead",
26456f87 1360 .startup.single = timers_prepare_cpu,
3c1627e9 1361 .teardown.single = timers_dead_cpu,
4fae16df 1362 },
d10ef6f9 1363 /* Kicks the plugged cpu into life */
cff7d378
TG
1364 [CPUHP_BRINGUP_CPU] = {
1365 .name = "cpu:bringup",
3c1627e9
TG
1366 .startup.single = bringup_cpu,
1367 .teardown.single = NULL,
757c989b 1368 .cant_stop = true,
4baa0afc 1369 },
d10ef6f9
TG
1370 /*
1371 * Handled on controll processor until the plugged processor manages
1372 * this itself.
1373 */
4baa0afc
TG
1374 [CPUHP_TEARDOWN_CPU] = {
1375 .name = "cpu:teardown",
3c1627e9
TG
1376 .startup.single = NULL,
1377 .teardown.single = takedown_cpu,
757c989b 1378 .cant_stop = true,
cff7d378 1379 },
a7c73414
TG
1380#else
1381 [CPUHP_BRINGUP_CPU] = { },
cff7d378 1382#endif
cff7d378
TG
1383};
1384
4baa0afc
TG
1385/* Application processor state steps */
1386static struct cpuhp_step cpuhp_ap_states[] = {
1387#ifdef CONFIG_SMP
d10ef6f9
TG
1388 /* Final state before CPU kills itself */
1389 [CPUHP_AP_IDLE_DEAD] = {
1390 .name = "idle:dead",
1391 },
1392 /*
1393 * Last state before CPU enters the idle loop to die. Transient state
1394 * for synchronization.
1395 */
1396 [CPUHP_AP_OFFLINE] = {
1397 .name = "ap:offline",
1398 .cant_stop = true,
1399 },
9cf7243d
TG
1400 /* First state is scheduler control. Interrupts are disabled */
1401 [CPUHP_AP_SCHED_STARTING] = {
1402 .name = "sched:starting",
3c1627e9
TG
1403 .startup.single = sched_cpu_starting,
1404 .teardown.single = sched_cpu_dying,
9cf7243d 1405 },
4df83742 1406 [CPUHP_AP_RCUTREE_DYING] = {
677f6646 1407 .name = "RCU/tree:dying",
3c1627e9
TG
1408 .startup.single = NULL,
1409 .teardown.single = rcutree_dying_cpu,
4baa0afc 1410 },
46febd37
LJ
1411 [CPUHP_AP_SMPCFD_DYING] = {
1412 .name = "smpcfd:dying",
1413 .startup.single = NULL,
1414 .teardown.single = smpcfd_dying_cpu,
1415 },
d10ef6f9
TG
1416 /* Entry state on starting. Interrupts enabled from here on. Transient
1417 * state for synchronsization */
1418 [CPUHP_AP_ONLINE] = {
1419 .name = "ap:online",
1420 },
1421 /* Handle smpboot threads park/unpark */
1cf4f629 1422 [CPUHP_AP_SMPBOOT_THREADS] = {
677f6646 1423 .name = "smpboot/threads:online",
3c1627e9 1424 .startup.single = smpboot_unpark_threads,
a82cd3a3 1425 .teardown.single = smpboot_park_threads,
1cf4f629 1426 },
c5cb83bb
TG
1427 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1428 .name = "irq/affinity:online",
1429 .startup.single = irq_affinity_online_cpu,
1430 .teardown.single = NULL,
1431 },
00e16c3d 1432 [CPUHP_AP_PERF_ONLINE] = {
3c1627e9
TG
1433 .name = "perf:online",
1434 .startup.single = perf_event_init_cpu,
1435 .teardown.single = perf_event_exit_cpu,
00e16c3d 1436 },
7ee681b2 1437 [CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e9
TG
1438 .name = "workqueue:online",
1439 .startup.single = workqueue_online_cpu,
1440 .teardown.single = workqueue_offline_cpu,
7ee681b2 1441 },
4df83742 1442 [CPUHP_AP_RCUTREE_ONLINE] = {
677f6646 1443 .name = "RCU/tree:online",
3c1627e9
TG
1444 .startup.single = rcutree_online_cpu,
1445 .teardown.single = rcutree_offline_cpu,
4df83742 1446 },
4baa0afc 1447#endif
d10ef6f9
TG
1448 /*
1449 * The dynamically registered state space is here
1450 */
1451
aaddd7d1
TG
1452#ifdef CONFIG_SMP
1453 /* Last state is scheduler control setting the cpu active */
1454 [CPUHP_AP_ACTIVE] = {
1455 .name = "sched:active",
3c1627e9
TG
1456 .startup.single = sched_cpu_activate,
1457 .teardown.single = sched_cpu_deactivate,
aaddd7d1
TG
1458 },
1459#endif
1460
d10ef6f9 1461 /* CPU is fully up and running. */
4baa0afc
TG
1462 [CPUHP_ONLINE] = {
1463 .name = "online",
3c1627e9
TG
1464 .startup.single = NULL,
1465 .teardown.single = NULL,
4baa0afc
TG
1466 },
1467};
1468
5b7aa87e
TG
1469/* Sanity check for callbacks */
1470static int cpuhp_cb_check(enum cpuhp_state state)
1471{
1472 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1473 return -EINVAL;
1474 return 0;
1475}
1476
dc280d93
TG
1477/*
1478 * Returns a free for dynamic slot assignment of the Online state. The states
1479 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1480 * by having no name assigned.
1481 */
1482static int cpuhp_reserve_state(enum cpuhp_state state)
1483{
4205e478
TG
1484 enum cpuhp_state i, end;
1485 struct cpuhp_step *step;
dc280d93 1486
4205e478
TG
1487 switch (state) {
1488 case CPUHP_AP_ONLINE_DYN:
1489 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1490 end = CPUHP_AP_ONLINE_DYN_END;
1491 break;
1492 case CPUHP_BP_PREPARE_DYN:
1493 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1494 end = CPUHP_BP_PREPARE_DYN_END;
1495 break;
1496 default:
1497 return -EINVAL;
1498 }
1499
1500 for (i = state; i <= end; i++, step++) {
1501 if (!step->name)
dc280d93
TG
1502 return i;
1503 }
1504 WARN(1, "No more dynamic states available for CPU hotplug\n");
1505 return -ENOSPC;
1506}
1507
1508static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1509 int (*startup)(unsigned int cpu),
1510 int (*teardown)(unsigned int cpu),
1511 bool multi_instance)
5b7aa87e
TG
1512{
1513 /* (Un)Install the callbacks for further cpu hotplug operations */
1514 struct cpuhp_step *sp;
dc280d93 1515 int ret = 0;
5b7aa87e 1516
0c96b273
EB
1517 /*
1518 * If name is NULL, then the state gets removed.
1519 *
1520 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1521 * the first allocation from these dynamic ranges, so the removal
1522 * would trigger a new allocation and clear the wrong (already
1523 * empty) state, leaving the callbacks of the to be cleared state
1524 * dangling, which causes wreckage on the next hotplug operation.
1525 */
1526 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1527 state == CPUHP_BP_PREPARE_DYN)) {
dc280d93
TG
1528 ret = cpuhp_reserve_state(state);
1529 if (ret < 0)
dc434e05 1530 return ret;
dc280d93
TG
1531 state = ret;
1532 }
5b7aa87e 1533 sp = cpuhp_get_step(state);
dc434e05
SAS
1534 if (name && sp->name)
1535 return -EBUSY;
1536
3c1627e9
TG
1537 sp->startup.single = startup;
1538 sp->teardown.single = teardown;
5b7aa87e 1539 sp->name = name;
cf392d10
TG
1540 sp->multi_instance = multi_instance;
1541 INIT_HLIST_HEAD(&sp->list);
dc280d93 1542 return ret;
5b7aa87e
TG
1543}
1544
1545static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1546{
3c1627e9 1547 return cpuhp_get_step(state)->teardown.single;
5b7aa87e
TG
1548}
1549
5b7aa87e
TG
1550/*
1551 * Call the startup/teardown function for a step either on the AP or
1552 * on the current CPU.
1553 */
cf392d10
TG
1554static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1555 struct hlist_node *node)
5b7aa87e 1556{
a724632c 1557 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1558 int ret;
1559
4dddfb5f
PZ
1560 /*
1561 * If there's nothing to do, we done.
1562 * Relies on the union for multi_instance.
1563 */
3c1627e9
TG
1564 if ((bringup && !sp->startup.single) ||
1565 (!bringup && !sp->teardown.single))
5b7aa87e 1566 return 0;
5b7aa87e
TG
1567 /*
1568 * The non AP bound callbacks can fail on bringup. On teardown
1569 * e.g. module removal we crash for now.
1570 */
1cf4f629
TG
1571#ifdef CONFIG_SMP
1572 if (cpuhp_is_ap_state(state))
cf392d10 1573 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629 1574 else
96abb968 1575 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629 1576#else
96abb968 1577 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629 1578#endif
5b7aa87e
TG
1579 BUG_ON(ret && !bringup);
1580 return ret;
1581}
1582
1583/*
1584 * Called from __cpuhp_setup_state on a recoverable failure.
1585 *
1586 * Note: The teardown callbacks for rollback are not allowed to fail!
1587 */
1588static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
cf392d10 1589 struct hlist_node *node)
5b7aa87e
TG
1590{
1591 int cpu;
1592
5b7aa87e
TG
1593 /* Roll back the already executed steps on the other cpus */
1594 for_each_present_cpu(cpu) {
1595 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1596 int cpustate = st->state;
1597
1598 if (cpu >= failedcpu)
1599 break;
1600
1601 /* Did we invoke the startup call on that cpu ? */
1602 if (cpustate >= state)
cf392d10 1603 cpuhp_issue_call(cpu, state, false, node);
5b7aa87e
TG
1604 }
1605}
1606
9805c673
TG
1607int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1608 struct hlist_node *node,
1609 bool invoke)
cf392d10
TG
1610{
1611 struct cpuhp_step *sp;
1612 int cpu;
1613 int ret;
1614
9805c673
TG
1615 lockdep_assert_cpus_held();
1616
cf392d10
TG
1617 sp = cpuhp_get_step(state);
1618 if (sp->multi_instance == false)
1619 return -EINVAL;
1620
dc434e05 1621 mutex_lock(&cpuhp_state_mutex);
cf392d10 1622
3c1627e9 1623 if (!invoke || !sp->startup.multi)
cf392d10
TG
1624 goto add_node;
1625
1626 /*
1627 * Try to call the startup callback for each present cpu
1628 * depending on the hotplug state of the cpu.
1629 */
1630 for_each_present_cpu(cpu) {
1631 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1632 int cpustate = st->state;
1633
1634 if (cpustate < state)
1635 continue;
1636
1637 ret = cpuhp_issue_call(cpu, state, true, node);
1638 if (ret) {
3c1627e9 1639 if (sp->teardown.multi)
cf392d10 1640 cpuhp_rollback_install(cpu, state, node);
dc434e05 1641 goto unlock;
cf392d10
TG
1642 }
1643 }
1644add_node:
1645 ret = 0;
cf392d10 1646 hlist_add_head(node, &sp->list);
dc434e05 1647unlock:
cf392d10 1648 mutex_unlock(&cpuhp_state_mutex);
9805c673
TG
1649 return ret;
1650}
1651
1652int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1653 bool invoke)
1654{
1655 int ret;
1656
1657 cpus_read_lock();
1658 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
8f553c49 1659 cpus_read_unlock();
cf392d10
TG
1660 return ret;
1661}
1662EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1663
5b7aa87e 1664/**
71def423 1665 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
dc280d93
TG
1666 * @state: The state to setup
1667 * @invoke: If true, the startup function is invoked for cpus where
1668 * cpu state >= @state
1669 * @startup: startup callback function
1670 * @teardown: teardown callback function
1671 * @multi_instance: State is set up for multiple instances which get
1672 * added afterwards.
5b7aa87e 1673 *
71def423 1674 * The caller needs to hold cpus read locked while calling this function.
512f0980
BO
1675 * Returns:
1676 * On success:
1677 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1678 * 0 for all other states
1679 * On failure: proper (negative) error code
5b7aa87e 1680 */
71def423
SAS
1681int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1682 const char *name, bool invoke,
1683 int (*startup)(unsigned int cpu),
1684 int (*teardown)(unsigned int cpu),
1685 bool multi_instance)
5b7aa87e
TG
1686{
1687 int cpu, ret = 0;
b9d9d691 1688 bool dynstate;
5b7aa87e 1689
71def423
SAS
1690 lockdep_assert_cpus_held();
1691
5b7aa87e
TG
1692 if (cpuhp_cb_check(state) || !name)
1693 return -EINVAL;
1694
dc434e05 1695 mutex_lock(&cpuhp_state_mutex);
5b7aa87e 1696
dc280d93
TG
1697 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1698 multi_instance);
5b7aa87e 1699
b9d9d691
TG
1700 dynstate = state == CPUHP_AP_ONLINE_DYN;
1701 if (ret > 0 && dynstate) {
1702 state = ret;
1703 ret = 0;
1704 }
1705
dc280d93 1706 if (ret || !invoke || !startup)
5b7aa87e
TG
1707 goto out;
1708
1709 /*
1710 * Try to call the startup callback for each present cpu
1711 * depending on the hotplug state of the cpu.
1712 */
1713 for_each_present_cpu(cpu) {
1714 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1715 int cpustate = st->state;
1716
1717 if (cpustate < state)
1718 continue;
1719
cf392d10 1720 ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e 1721 if (ret) {
a724632c 1722 if (teardown)
cf392d10
TG
1723 cpuhp_rollback_install(cpu, state, NULL);
1724 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e
TG
1725 goto out;
1726 }
1727 }
1728out:
dc434e05 1729 mutex_unlock(&cpuhp_state_mutex);
dc280d93
TG
1730 /*
1731 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1732 * dynamically allocated state in case of success.
1733 */
b9d9d691 1734 if (!ret && dynstate)
5b7aa87e
TG
1735 return state;
1736 return ret;
1737}
71def423
SAS
1738EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1739
1740int __cpuhp_setup_state(enum cpuhp_state state,
1741 const char *name, bool invoke,
1742 int (*startup)(unsigned int cpu),
1743 int (*teardown)(unsigned int cpu),
1744 bool multi_instance)
1745{
1746 int ret;
1747
1748 cpus_read_lock();
1749 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1750 teardown, multi_instance);
1751 cpus_read_unlock();
1752 return ret;
1753}
5b7aa87e
TG
1754EXPORT_SYMBOL(__cpuhp_setup_state);
1755
cf392d10
TG
1756int __cpuhp_state_remove_instance(enum cpuhp_state state,
1757 struct hlist_node *node, bool invoke)
1758{
1759 struct cpuhp_step *sp = cpuhp_get_step(state);
1760 int cpu;
1761
1762 BUG_ON(cpuhp_cb_check(state));
1763
1764 if (!sp->multi_instance)
1765 return -EINVAL;
1766
8f553c49 1767 cpus_read_lock();
dc434e05
SAS
1768 mutex_lock(&cpuhp_state_mutex);
1769
cf392d10
TG
1770 if (!invoke || !cpuhp_get_teardown_cb(state))
1771 goto remove;
1772 /*
1773 * Call the teardown callback for each present cpu depending
1774 * on the hotplug state of the cpu. This function is not
1775 * allowed to fail currently!
1776 */
1777 for_each_present_cpu(cpu) {
1778 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1779 int cpustate = st->state;
1780
1781 if (cpustate >= state)
1782 cpuhp_issue_call(cpu, state, false, node);
1783 }
1784
1785remove:
cf392d10
TG
1786 hlist_del(node);
1787 mutex_unlock(&cpuhp_state_mutex);
8f553c49 1788 cpus_read_unlock();
cf392d10
TG
1789
1790 return 0;
1791}
1792EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
dc434e05 1793
5b7aa87e 1794/**
71def423 1795 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
5b7aa87e
TG
1796 * @state: The state to remove
1797 * @invoke: If true, the teardown function is invoked for cpus where
1798 * cpu state >= @state
1799 *
71def423 1800 * The caller needs to hold cpus read locked while calling this function.
5b7aa87e
TG
1801 * The teardown callback is currently not allowed to fail. Think
1802 * about module removal!
1803 */
71def423 1804void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
5b7aa87e 1805{
cf392d10 1806 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1807 int cpu;
1808
1809 BUG_ON(cpuhp_cb_check(state));
1810
71def423 1811 lockdep_assert_cpus_held();
5b7aa87e 1812
dc434e05 1813 mutex_lock(&cpuhp_state_mutex);
cf392d10
TG
1814 if (sp->multi_instance) {
1815 WARN(!hlist_empty(&sp->list),
1816 "Error: Removing state %d which has instances left.\n",
1817 state);
1818 goto remove;
1819 }
1820
a724632c 1821 if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e
TG
1822 goto remove;
1823
1824 /*
1825 * Call the teardown callback for each present cpu depending
1826 * on the hotplug state of the cpu. This function is not
1827 * allowed to fail currently!
1828 */
1829 for_each_present_cpu(cpu) {
1830 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1831 int cpustate = st->state;
1832
1833 if (cpustate >= state)
cf392d10 1834 cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e
TG
1835 }
1836remove:
cf392d10 1837 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
dc434e05 1838 mutex_unlock(&cpuhp_state_mutex);
71def423
SAS
1839}
1840EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1841
1842void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1843{
1844 cpus_read_lock();
1845 __cpuhp_remove_state_cpuslocked(state, invoke);
8f553c49 1846 cpus_read_unlock();
5b7aa87e
TG
1847}
1848EXPORT_SYMBOL(__cpuhp_remove_state);
1849
98f8cdce
TG
1850#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1851static ssize_t show_cpuhp_state(struct device *dev,
1852 struct device_attribute *attr, char *buf)
1853{
1854 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1855
1856 return sprintf(buf, "%d\n", st->state);
1857}
1858static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1859
757c989b
TG
1860static ssize_t write_cpuhp_target(struct device *dev,
1861 struct device_attribute *attr,
1862 const char *buf, size_t count)
1863{
1864 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1865 struct cpuhp_step *sp;
1866 int target, ret;
1867
1868 ret = kstrtoint(buf, 10, &target);
1869 if (ret)
1870 return ret;
1871
1872#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1873 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1874 return -EINVAL;
1875#else
1876 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1877 return -EINVAL;
1878#endif
1879
1880 ret = lock_device_hotplug_sysfs();
1881 if (ret)
1882 return ret;
1883
1884 mutex_lock(&cpuhp_state_mutex);
1885 sp = cpuhp_get_step(target);
1886 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1887 mutex_unlock(&cpuhp_state_mutex);
1888 if (ret)
40da1b11 1889 goto out;
757c989b
TG
1890
1891 if (st->state < target)
1892 ret = do_cpu_up(dev->id, target);
1893 else
1894 ret = do_cpu_down(dev->id, target);
40da1b11 1895out:
757c989b
TG
1896 unlock_device_hotplug();
1897 return ret ? ret : count;
1898}
1899
98f8cdce
TG
1900static ssize_t show_cpuhp_target(struct device *dev,
1901 struct device_attribute *attr, char *buf)
1902{
1903 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1904
1905 return sprintf(buf, "%d\n", st->target);
1906}
757c989b 1907static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce 1908
1db49484
PZ
1909
1910static ssize_t write_cpuhp_fail(struct device *dev,
1911 struct device_attribute *attr,
1912 const char *buf, size_t count)
1913{
1914 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1915 struct cpuhp_step *sp;
1916 int fail, ret;
1917
1918 ret = kstrtoint(buf, 10, &fail);
1919 if (ret)
1920 return ret;
1921
1922 /*
1923 * Cannot fail STARTING/DYING callbacks.
1924 */
1925 if (cpuhp_is_atomic_state(fail))
1926 return -EINVAL;
1927
1928 /*
1929 * Cannot fail anything that doesn't have callbacks.
1930 */
1931 mutex_lock(&cpuhp_state_mutex);
1932 sp = cpuhp_get_step(fail);
1933 if (!sp->startup.single && !sp->teardown.single)
1934 ret = -EINVAL;
1935 mutex_unlock(&cpuhp_state_mutex);
1936 if (ret)
1937 return ret;
1938
1939 st->fail = fail;
1940
1941 return count;
1942}
1943
1944static ssize_t show_cpuhp_fail(struct device *dev,
1945 struct device_attribute *attr, char *buf)
1946{
1947 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1948
1949 return sprintf(buf, "%d\n", st->fail);
1950}
1951
1952static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1953
98f8cdce
TG
1954static struct attribute *cpuhp_cpu_attrs[] = {
1955 &dev_attr_state.attr,
1956 &dev_attr_target.attr,
1db49484 1957 &dev_attr_fail.attr,
98f8cdce
TG
1958 NULL
1959};
1960
993647a2 1961static const struct attribute_group cpuhp_cpu_attr_group = {
98f8cdce
TG
1962 .attrs = cpuhp_cpu_attrs,
1963 .name = "hotplug",
1964 NULL
1965};
1966
1967static ssize_t show_cpuhp_states(struct device *dev,
1968 struct device_attribute *attr, char *buf)
1969{
1970 ssize_t cur, res = 0;
1971 int i;
1972
1973 mutex_lock(&cpuhp_state_mutex);
757c989b 1974 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1975 struct cpuhp_step *sp = cpuhp_get_step(i);
1976
1977 if (sp->name) {
1978 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1979 buf += cur;
1980 res += cur;
1981 }
1982 }
1983 mutex_unlock(&cpuhp_state_mutex);
1984 return res;
1985}
1986static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1987
1988static struct attribute *cpuhp_cpu_root_attrs[] = {
1989 &dev_attr_states.attr,
1990 NULL
1991};
1992
993647a2 1993static const struct attribute_group cpuhp_cpu_root_attr_group = {
98f8cdce
TG
1994 .attrs = cpuhp_cpu_root_attrs,
1995 .name = "hotplug",
1996 NULL
1997};
1998
47b4c679
TG
1999#ifdef CONFIG_HOTPLUG_SMT
2000
2001static const char *smt_states[] = {
2002 [CPU_SMT_ENABLED] = "on",
2003 [CPU_SMT_DISABLED] = "off",
2004 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2005 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2006};
2007
2008static ssize_t
2009show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2010{
2011 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2012}
2013
2014static void cpuhp_offline_cpu_device(unsigned int cpu)
2015{
2016 struct device *dev = get_cpu_device(cpu);
2017
2018 dev->offline = true;
2019 /* Tell user space about the state change */
2020 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2021}
2022
b582c9f2
TG
2023static void cpuhp_online_cpu_device(unsigned int cpu)
2024{
2025 struct device *dev = get_cpu_device(cpu);
2026
2027 dev->offline = false;
2028 /* Tell user space about the state change */
2029 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2030}
2031
47b4c679
TG
2032static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2033{
2034 int cpu, ret = 0;
2035
2036 cpu_maps_update_begin();
2037 for_each_online_cpu(cpu) {
2038 if (topology_is_primary_thread(cpu))
2039 continue;
2040 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2041 if (ret)
2042 break;
2043 /*
2044 * As this needs to hold the cpu maps lock it's impossible
2045 * to call device_offline() because that ends up calling
2046 * cpu_down() which takes cpu maps lock. cpu maps lock
2047 * needs to be held as this might race against in kernel
2048 * abusers of the hotplug machinery (thermal management).
2049 *
2050 * So nothing would update device:offline state. That would
2051 * leave the sysfs entry stale and prevent onlining after
2052 * smt control has been changed to 'off' again. This is
2053 * called under the sysfs hotplug lock, so it is properly
2054 * serialized against the regular offline usage.
2055 */
2056 cpuhp_offline_cpu_device(cpu);
2057 }
7011f443 2058 if (!ret) {
47b4c679 2059 cpu_smt_control = ctrlval;
7011f443
JK
2060 arch_smt_update();
2061 }
47b4c679
TG
2062 cpu_maps_update_done();
2063 return ret;
2064}
2065
b582c9f2 2066static int cpuhp_smt_enable(void)
47b4c679 2067{
b582c9f2
TG
2068 int cpu, ret = 0;
2069
47b4c679
TG
2070 cpu_maps_update_begin();
2071 cpu_smt_control = CPU_SMT_ENABLED;
7011f443 2072 arch_smt_update();
b582c9f2
TG
2073 for_each_present_cpu(cpu) {
2074 /* Skip online CPUs and CPUs on offline nodes */
2075 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2076 continue;
2077 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2078 if (ret)
2079 break;
2080 /* See comment in cpuhp_smt_disable() */
2081 cpuhp_online_cpu_device(cpu);
2082 }
47b4c679 2083 cpu_maps_update_done();
b582c9f2 2084 return ret;
47b4c679
TG
2085}
2086
2087static ssize_t
2088store_smt_control(struct device *dev, struct device_attribute *attr,
2089 const char *buf, size_t count)
2090{
2091 int ctrlval, ret;
2092
2093 if (sysfs_streq(buf, "on"))
2094 ctrlval = CPU_SMT_ENABLED;
2095 else if (sysfs_streq(buf, "off"))
2096 ctrlval = CPU_SMT_DISABLED;
2097 else if (sysfs_streq(buf, "forceoff"))
2098 ctrlval = CPU_SMT_FORCE_DISABLED;
2099 else
2100 return -EINVAL;
2101
2102 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2103 return -EPERM;
2104
2105 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2106 return -ENODEV;
2107
2108 ret = lock_device_hotplug_sysfs();
2109 if (ret)
2110 return ret;
2111
2112 if (ctrlval != cpu_smt_control) {
2113 switch (ctrlval) {
2114 case CPU_SMT_ENABLED:
b582c9f2 2115 ret = cpuhp_smt_enable();
47b4c679
TG
2116 break;
2117 case CPU_SMT_DISABLED:
2118 case CPU_SMT_FORCE_DISABLED:
2119 ret = cpuhp_smt_disable(ctrlval);
2120 break;
2121 }
2122 }
2123
2124 unlock_device_hotplug();
2125 return ret ? ret : count;
2126}
2127static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2128
2129static ssize_t
2130show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2131{
2132 bool active = topology_max_smt_threads() > 1;
2133
2134 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2135}
2136static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2137
2138static struct attribute *cpuhp_smt_attrs[] = {
2139 &dev_attr_control.attr,
2140 &dev_attr_active.attr,
2141 NULL
2142};
2143
2144static const struct attribute_group cpuhp_smt_attr_group = {
2145 .attrs = cpuhp_smt_attrs,
2146 .name = "smt",
2147 NULL
2148};
2149
2150static int __init cpu_smt_state_init(void)
2151{
47b4c679
TG
2152 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2153 &cpuhp_smt_attr_group);
2154}
2155
2156#else
2157static inline int cpu_smt_state_init(void) { return 0; }
2158#endif
2159
98f8cdce
TG
2160static int __init cpuhp_sysfs_init(void)
2161{
2162 int cpu, ret;
2163
47b4c679
TG
2164 ret = cpu_smt_state_init();
2165 if (ret)
2166 return ret;
2167
98f8cdce
TG
2168 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2169 &cpuhp_cpu_root_attr_group);
2170 if (ret)
2171 return ret;
2172
2173 for_each_possible_cpu(cpu) {
2174 struct device *dev = get_cpu_device(cpu);
2175
2176 if (!dev)
2177 continue;
2178 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2179 if (ret)
2180 return ret;
2181 }
2182 return 0;
2183}
2184device_initcall(cpuhp_sysfs_init);
2185#endif
2186
e56b3bc7
LT
2187/*
2188 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2189 * represents all NR_CPUS bits binary values of 1<<nr.
2190 *
e0b582ec 2191 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
2192 * mask value that has a single bit set only.
2193 */
b8d317d1 2194
e56b3bc7 2195/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 2196#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
2197#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2198#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2199#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 2200
e56b3bc7
LT
2201const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2202
2203 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2204 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2205#if BITS_PER_LONG > 32
2206 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2207 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
2208#endif
2209};
e56b3bc7 2210EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
2211
2212const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2213EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
2214
2215#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 2216struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 2217 = {CPU_BITS_ALL};
b3199c02 2218#else
4b804c85 2219struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 2220#endif
4b804c85 2221EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 2222
4b804c85
RV
2223struct cpumask __cpu_online_mask __read_mostly;
2224EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 2225
4b804c85
RV
2226struct cpumask __cpu_present_mask __read_mostly;
2227EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 2228
4b804c85
RV
2229struct cpumask __cpu_active_mask __read_mostly;
2230EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 2231
3fa41520
RR
2232void init_cpu_present(const struct cpumask *src)
2233{
c4c54dd1 2234 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
2235}
2236
2237void init_cpu_possible(const struct cpumask *src)
2238{
c4c54dd1 2239 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
2240}
2241
2242void init_cpu_online(const struct cpumask *src)
2243{
c4c54dd1 2244 cpumask_copy(&__cpu_online_mask, src);
3fa41520 2245}
cff7d378
TG
2246
2247/*
2248 * Activate the first processor.
2249 */
2250void __init boot_cpu_init(void)
2251{
2252 int cpu = smp_processor_id();
2253
2254 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2255 set_cpu_online(cpu, true);
2256 set_cpu_active(cpu, true);
2257 set_cpu_present(cpu, true);
2258 set_cpu_possible(cpu, true);
8ce371f9
PZ
2259
2260#ifdef CONFIG_SMP
2261 __boot_cpu_id = cpu;
2262#endif
cff7d378
TG
2263}
2264
2265/*
2266 * Must be called _AFTER_ setting up the per_cpu areas
2267 */
d403c57a 2268void __init boot_cpu_hotplug_init(void)
cff7d378 2269{
59b2bd3d 2270#ifdef CONFIG_SMP
c1f7608e 2271 per_cpu_ptr(&cpuhp_state, smp_processor_id())->booted_once = true;
59b2bd3d 2272#endif
c1f7608e 2273 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
cff7d378 2274}
658dad68
JP
2275
2276enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
2277
2278static int __init mitigations_parse_cmdline(char *arg)
2279{
2280 if (!strcmp(arg, "off"))
2281 cpu_mitigations = CPU_MITIGATIONS_OFF;
2282 else if (!strcmp(arg, "auto"))
2283 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2284 else if (!strcmp(arg, "auto,nosmt"))
2285 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2286
2287 return 0;
2288}
2289early_param("mitigations", mitigations_parse_cmdline);