2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/sched/mm.h>
7 #include <linux/proc_fs.h>
9 #include <linux/init.h>
10 #include <linux/notifier.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/isolation.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/smt.h>
16 #include <linux/unistd.h>
17 #include <linux/cpu.h>
18 #include <linux/oom.h>
19 #include <linux/rcupdate.h>
20 #include <linux/export.h>
21 #include <linux/bug.h>
22 #include <linux/kthread.h>
23 #include <linux/stop_machine.h>
24 #include <linux/mutex.h>
25 #include <linux/gfp.h>
26 #include <linux/suspend.h>
27 #include <linux/lockdep.h>
28 #include <linux/tick.h>
29 #include <linux/irq.h>
30 #include <linux/nmi.h>
31 #include <linux/smpboot.h>
32 #include <linux/relay.h>
33 #include <linux/slab.h>
34 #include <linux/percpu-rwsem.h>
36 #include <trace/events/power.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/cpuhp.h>
43 * cpuhp_cpu_state - Per cpu hotplug state storage
44 * @state: The current cpu state
45 * @target: The target state
46 * @thread: Pointer to the hotplug thread
47 * @should_run: Thread should execute
48 * @rollback: Perform a rollback
49 * @single: Single callback invocation
50 * @bringup: Single callback bringup or teardown selector
51 * @cb_state: The state for a single callback (install/uninstall)
52 * @result: Result of the operation
53 * @done_up: Signal completion to the issuer of the task for cpu-up
54 * @done_down: Signal completion to the issuer of the task for cpu-down
56 struct cpuhp_cpu_state
{
57 enum cpuhp_state state
;
58 enum cpuhp_state target
;
59 enum cpuhp_state fail
;
61 struct task_struct
*thread
;
66 struct hlist_node
*node
;
67 struct hlist_node
*last
;
68 enum cpuhp_state cb_state
;
70 struct completion done_up
;
71 struct completion done_down
;
75 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
) = {
76 .fail
= CPUHP_INVALID
,
80 cpumask_t cpus_booted_once_mask
;
83 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
84 static struct lockdep_map cpuhp_state_up_map
=
85 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map
);
86 static struct lockdep_map cpuhp_state_down_map
=
87 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map
);
90 static inline void cpuhp_lock_acquire(bool bringup
)
92 lock_map_acquire(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
95 static inline void cpuhp_lock_release(bool bringup
)
97 lock_map_release(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
101 static inline void cpuhp_lock_acquire(bool bringup
) { }
102 static inline void cpuhp_lock_release(bool bringup
) { }
107 * cpuhp_step - Hotplug state machine step
108 * @name: Name of the step
109 * @startup: Startup function of the step
110 * @teardown: Teardown function of the step
111 * @cant_stop: Bringup/teardown can't be stopped at this step
116 int (*single
)(unsigned int cpu
);
117 int (*multi
)(unsigned int cpu
,
118 struct hlist_node
*node
);
121 int (*single
)(unsigned int cpu
);
122 int (*multi
)(unsigned int cpu
,
123 struct hlist_node
*node
);
125 struct hlist_head list
;
130 static DEFINE_MUTEX(cpuhp_state_mutex
);
131 static struct cpuhp_step cpuhp_hp_states
[];
133 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
135 return cpuhp_hp_states
+ state
;
139 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
140 * @cpu: The cpu for which the callback should be invoked
141 * @state: The state to do callbacks for
142 * @bringup: True if the bringup callback should be invoked
143 * @node: For multi-instance, do a single entry callback for install/remove
144 * @lastp: For multi-instance rollback, remember how far we got
146 * Called from cpu hotplug and from the state register machinery.
148 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
149 bool bringup
, struct hlist_node
*node
,
150 struct hlist_node
**lastp
)
152 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
153 struct cpuhp_step
*step
= cpuhp_get_step(state
);
154 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
155 int (*cb
)(unsigned int cpu
);
158 if (st
->fail
== state
) {
159 st
->fail
= CPUHP_INVALID
;
161 if (!(bringup
? step
->startup
.single
: step
->teardown
.single
))
167 if (!step
->multi_instance
) {
168 WARN_ON_ONCE(lastp
&& *lastp
);
169 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
172 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
174 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
177 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
181 /* Single invocation for instance add/remove */
183 WARN_ON_ONCE(lastp
&& *lastp
);
184 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
185 ret
= cbm(cpu
, node
);
186 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
190 /* State transition. Invoke on all instances */
192 hlist_for_each(node
, &step
->list
) {
193 if (lastp
&& node
== *lastp
)
196 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
197 ret
= cbm(cpu
, node
);
198 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
212 /* Rollback the instances if one failed */
213 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
217 hlist_for_each(node
, &step
->list
) {
221 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
222 ret
= cbm(cpu
, node
);
223 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
225 * Rollback must not fail,
233 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
236 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
237 * purposes as that state is handled explicitly in cpu_down.
239 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
242 static inline void wait_for_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
244 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
245 wait_for_completion(done
);
248 static inline void complete_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
250 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
255 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
257 static bool cpuhp_is_atomic_state(enum cpuhp_state state
)
259 return CPUHP_AP_IDLE_DEAD
<= state
&& state
< CPUHP_AP_ONLINE
;
262 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
263 static DEFINE_MUTEX(cpu_add_remove_lock
);
264 bool cpuhp_tasks_frozen
;
265 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
268 * The following two APIs (cpu_maps_update_begin/done) must be used when
269 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
271 void cpu_maps_update_begin(void)
273 mutex_lock(&cpu_add_remove_lock
);
276 void cpu_maps_update_done(void)
278 mutex_unlock(&cpu_add_remove_lock
);
282 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
283 * Should always be manipulated under cpu_add_remove_lock
285 static int cpu_hotplug_disabled
;
287 #ifdef CONFIG_HOTPLUG_CPU
289 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock
);
291 void cpus_read_lock(void)
293 percpu_down_read(&cpu_hotplug_lock
);
295 EXPORT_SYMBOL_GPL(cpus_read_lock
);
297 int cpus_read_trylock(void)
299 return percpu_down_read_trylock(&cpu_hotplug_lock
);
301 EXPORT_SYMBOL_GPL(cpus_read_trylock
);
303 void cpus_read_unlock(void)
305 percpu_up_read(&cpu_hotplug_lock
);
307 EXPORT_SYMBOL_GPL(cpus_read_unlock
);
309 void cpus_write_lock(void)
311 percpu_down_write(&cpu_hotplug_lock
);
314 void cpus_write_unlock(void)
316 percpu_up_write(&cpu_hotplug_lock
);
319 void lockdep_assert_cpus_held(void)
322 * We can't have hotplug operations before userspace starts running,
323 * and some init codepaths will knowingly not take the hotplug lock.
324 * This is all valid, so mute lockdep until it makes sense to report
327 if (system_state
< SYSTEM_RUNNING
)
330 percpu_rwsem_assert_held(&cpu_hotplug_lock
);
333 #ifdef CONFIG_LOCKDEP
334 int lockdep_is_cpus_held(void)
336 return percpu_rwsem_is_held(&cpu_hotplug_lock
);
340 static void lockdep_acquire_cpus_lock(void)
342 rwsem_acquire(&cpu_hotplug_lock
.dep_map
, 0, 0, _THIS_IP_
);
345 static void lockdep_release_cpus_lock(void)
347 rwsem_release(&cpu_hotplug_lock
.dep_map
, _THIS_IP_
);
351 * Wait for currently running CPU hotplug operations to complete (if any) and
352 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
353 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
354 * hotplug path before performing hotplug operations. So acquiring that lock
355 * guarantees mutual exclusion from any currently running hotplug operations.
357 void cpu_hotplug_disable(void)
359 cpu_maps_update_begin();
360 cpu_hotplug_disabled
++;
361 cpu_maps_update_done();
363 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
365 static void __cpu_hotplug_enable(void)
367 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
369 cpu_hotplug_disabled
--;
372 void cpu_hotplug_enable(void)
374 cpu_maps_update_begin();
375 __cpu_hotplug_enable();
376 cpu_maps_update_done();
378 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
382 static void lockdep_acquire_cpus_lock(void)
386 static void lockdep_release_cpus_lock(void)
390 #endif /* CONFIG_HOTPLUG_CPU */
393 * Architectures that need SMT-specific errata handling during SMT hotplug
394 * should override this.
396 void __weak
arch_smt_update(void) { }
398 #ifdef CONFIG_HOTPLUG_SMT
399 enum cpuhp_smt_control cpu_smt_control __read_mostly
= CPU_SMT_ENABLED
;
401 void __init
cpu_smt_disable(bool force
)
403 if (!cpu_smt_possible())
407 pr_info("SMT: Force disabled\n");
408 cpu_smt_control
= CPU_SMT_FORCE_DISABLED
;
410 pr_info("SMT: disabled\n");
411 cpu_smt_control
= CPU_SMT_DISABLED
;
416 * The decision whether SMT is supported can only be done after the full
417 * CPU identification. Called from architecture code.
419 void __init
cpu_smt_check_topology(void)
421 if (!topology_smt_supported())
422 cpu_smt_control
= CPU_SMT_NOT_SUPPORTED
;
425 static int __init
smt_cmdline_disable(char *str
)
427 cpu_smt_disable(str
&& !strcmp(str
, "force"));
430 early_param("nosmt", smt_cmdline_disable
);
432 static inline bool cpu_smt_allowed(unsigned int cpu
)
434 if (cpu_smt_control
== CPU_SMT_ENABLED
)
437 if (topology_is_primary_thread(cpu
))
441 * On x86 it's required to boot all logical CPUs at least once so
442 * that the init code can get a chance to set CR4.MCE on each
443 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
444 * core will shutdown the machine.
446 return !cpumask_test_cpu(cpu
, &cpus_booted_once_mask
);
449 /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
450 bool cpu_smt_possible(void)
452 return cpu_smt_control
!= CPU_SMT_FORCE_DISABLED
&&
453 cpu_smt_control
!= CPU_SMT_NOT_SUPPORTED
;
455 EXPORT_SYMBOL_GPL(cpu_smt_possible
);
457 static inline bool cpu_smt_allowed(unsigned int cpu
) { return true; }
460 static inline enum cpuhp_state
461 cpuhp_set_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
463 enum cpuhp_state prev_state
= st
->state
;
465 st
->rollback
= false;
470 st
->bringup
= st
->state
< target
;
476 cpuhp_reset_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state prev_state
)
481 * If we have st->last we need to undo partial multi_instance of this
482 * state first. Otherwise start undo at the previous state.
491 st
->target
= prev_state
;
492 st
->bringup
= !st
->bringup
;
495 /* Regular hotplug invocation of the AP hotplug thread */
496 static void __cpuhp_kick_ap(struct cpuhp_cpu_state
*st
)
498 if (!st
->single
&& st
->state
== st
->target
)
503 * Make sure the above stores are visible before should_run becomes
504 * true. Paired with the mb() above in cpuhp_thread_fun()
507 st
->should_run
= true;
508 wake_up_process(st
->thread
);
509 wait_for_ap_thread(st
, st
->bringup
);
512 static int cpuhp_kick_ap(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
514 enum cpuhp_state prev_state
;
517 prev_state
= cpuhp_set_state(st
, target
);
519 if ((ret
= st
->result
)) {
520 cpuhp_reset_state(st
, prev_state
);
527 static int bringup_wait_for_ap(unsigned int cpu
)
529 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
531 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
532 wait_for_ap_thread(st
, true);
533 if (WARN_ON_ONCE((!cpu_online(cpu
))))
536 /* Unpark the hotplug thread of the target cpu */
537 kthread_unpark(st
->thread
);
540 * SMT soft disabling on X86 requires to bring the CPU out of the
541 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
542 * CPU marked itself as booted_once in notify_cpu_starting() so the
543 * cpu_smt_allowed() check will now return false if this is not the
546 if (!cpu_smt_allowed(cpu
))
549 if (st
->target
<= CPUHP_AP_ONLINE_IDLE
)
552 return cpuhp_kick_ap(st
, st
->target
);
555 static int bringup_cpu(unsigned int cpu
)
557 struct task_struct
*idle
= idle_thread_get(cpu
);
561 * Some architectures have to walk the irq descriptors to
562 * setup the vector space for the cpu which comes online.
563 * Prevent irq alloc/free across the bringup.
567 /* Arch-specific enabling code. */
568 ret
= __cpu_up(cpu
, idle
);
572 return bringup_wait_for_ap(cpu
);
575 static int finish_cpu(unsigned int cpu
)
577 struct task_struct
*idle
= idle_thread_get(cpu
);
578 struct mm_struct
*mm
= idle
->active_mm
;
581 * idle_task_exit() will have switched to &init_mm, now
582 * clean up any remaining active_mm state.
585 idle
->active_mm
= &init_mm
;
591 * Hotplug state machine related functions
594 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
596 for (st
->state
--; st
->state
> st
->target
; st
->state
--)
597 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
600 static inline bool can_rollback_cpu(struct cpuhp_cpu_state
*st
)
602 if (IS_ENABLED(CONFIG_HOTPLUG_CPU
))
605 * When CPU hotplug is disabled, then taking the CPU down is not
606 * possible because takedown_cpu() and the architecture and
607 * subsystem specific mechanisms are not available. So the CPU
608 * which would be completely unplugged again needs to stay around
609 * in the current state.
611 return st
->state
<= CPUHP_BRINGUP_CPU
;
614 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
615 enum cpuhp_state target
)
617 enum cpuhp_state prev_state
= st
->state
;
620 while (st
->state
< target
) {
622 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
624 if (can_rollback_cpu(st
)) {
625 st
->target
= prev_state
;
626 undo_cpu_up(cpu
, st
);
635 * The cpu hotplug threads manage the bringup and teardown of the cpus
637 static void cpuhp_create(unsigned int cpu
)
639 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
641 init_completion(&st
->done_up
);
642 init_completion(&st
->done_down
);
645 static int cpuhp_should_run(unsigned int cpu
)
647 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
649 return st
->should_run
;
653 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
654 * callbacks when a state gets [un]installed at runtime.
656 * Each invocation of this function by the smpboot thread does a single AP
659 * It has 3 modes of operation:
660 * - single: runs st->cb_state
661 * - up: runs ++st->state, while st->state < st->target
662 * - down: runs st->state--, while st->state > st->target
664 * When complete or on error, should_run is cleared and the completion is fired.
666 static void cpuhp_thread_fun(unsigned int cpu
)
668 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
669 bool bringup
= st
->bringup
;
670 enum cpuhp_state state
;
672 if (WARN_ON_ONCE(!st
->should_run
))
676 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
677 * that if we see ->should_run we also see the rest of the state.
682 * The BP holds the hotplug lock, but we're now running on the AP,
683 * ensure that anybody asserting the lock is held, will actually find
686 lockdep_acquire_cpus_lock();
687 cpuhp_lock_acquire(bringup
);
690 state
= st
->cb_state
;
691 st
->should_run
= false;
696 st
->should_run
= (st
->state
< st
->target
);
697 WARN_ON_ONCE(st
->state
> st
->target
);
701 st
->should_run
= (st
->state
> st
->target
);
702 WARN_ON_ONCE(st
->state
< st
->target
);
706 WARN_ON_ONCE(!cpuhp_is_ap_state(state
));
708 if (cpuhp_is_atomic_state(state
)) {
710 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
714 * STARTING/DYING must not fail!
716 WARN_ON_ONCE(st
->result
);
718 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
723 * If we fail on a rollback, we're up a creek without no
724 * paddle, no way forward, no way back. We loose, thanks for
727 WARN_ON_ONCE(st
->rollback
);
728 st
->should_run
= false;
731 cpuhp_lock_release(bringup
);
732 lockdep_release_cpus_lock();
735 complete_ap_thread(st
, bringup
);
738 /* Invoke a single callback on a remote cpu */
740 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
741 struct hlist_node
*node
)
743 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
746 if (!cpu_online(cpu
))
749 cpuhp_lock_acquire(false);
750 cpuhp_lock_release(false);
752 cpuhp_lock_acquire(true);
753 cpuhp_lock_release(true);
756 * If we are up and running, use the hotplug thread. For early calls
757 * we invoke the thread function directly.
760 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
762 st
->rollback
= false;
766 st
->bringup
= bringup
;
767 st
->cb_state
= state
;
773 * If we failed and did a partial, do a rollback.
775 if ((ret
= st
->result
) && st
->last
) {
777 st
->bringup
= !bringup
;
783 * Clean up the leftovers so the next hotplug operation wont use stale
786 st
->node
= st
->last
= NULL
;
790 static int cpuhp_kick_ap_work(unsigned int cpu
)
792 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
793 enum cpuhp_state prev_state
= st
->state
;
796 cpuhp_lock_acquire(false);
797 cpuhp_lock_release(false);
799 cpuhp_lock_acquire(true);
800 cpuhp_lock_release(true);
802 trace_cpuhp_enter(cpu
, st
->target
, prev_state
, cpuhp_kick_ap_work
);
803 ret
= cpuhp_kick_ap(st
, st
->target
);
804 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
809 static struct smp_hotplug_thread cpuhp_threads
= {
810 .store
= &cpuhp_state
.thread
,
811 .create
= &cpuhp_create
,
812 .thread_should_run
= cpuhp_should_run
,
813 .thread_fn
= cpuhp_thread_fun
,
814 .thread_comm
= "cpuhp/%u",
818 void __init
cpuhp_threads_init(void)
820 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
821 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
824 #ifdef CONFIG_HOTPLUG_CPU
825 #ifndef arch_clear_mm_cpumask_cpu
826 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
830 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
833 * This function walks all processes, finds a valid mm struct for each one and
834 * then clears a corresponding bit in mm's cpumask. While this all sounds
835 * trivial, there are various non-obvious corner cases, which this function
836 * tries to solve in a safe manner.
838 * Also note that the function uses a somewhat relaxed locking scheme, so it may
839 * be called only for an already offlined CPU.
841 void clear_tasks_mm_cpumask(int cpu
)
843 struct task_struct
*p
;
846 * This function is called after the cpu is taken down and marked
847 * offline, so its not like new tasks will ever get this cpu set in
848 * their mm mask. -- Peter Zijlstra
849 * Thus, we may use rcu_read_lock() here, instead of grabbing
850 * full-fledged tasklist_lock.
852 WARN_ON(cpu_online(cpu
));
854 for_each_process(p
) {
855 struct task_struct
*t
;
858 * Main thread might exit, but other threads may still have
859 * a valid mm. Find one.
861 t
= find_lock_task_mm(p
);
864 arch_clear_mm_cpumask_cpu(cpu
, t
->mm
);
870 /* Take this CPU down. */
871 static int take_cpu_down(void *_param
)
873 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
874 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
875 int err
, cpu
= smp_processor_id();
878 /* Ensure this CPU doesn't handle any more interrupts. */
879 err
= __cpu_disable();
884 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
885 * do this step again.
887 WARN_ON(st
->state
!= CPUHP_TEARDOWN_CPU
);
889 /* Invoke the former CPU_DYING callbacks */
890 for (; st
->state
> target
; st
->state
--) {
891 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
893 * DYING must not fail!
898 /* Give up timekeeping duties */
899 tick_handover_do_timer();
900 /* Remove CPU from timer broadcasting */
901 tick_offline_cpu(cpu
);
902 /* Park the stopper thread */
903 stop_machine_park(cpu
);
907 static int takedown_cpu(unsigned int cpu
)
909 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
912 /* Park the smpboot threads */
913 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
916 * Prevent irq alloc/free while the dying cpu reorganizes the
917 * interrupt affinities.
922 * So now all preempt/rcu users must observe !cpu_active().
924 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, cpumask_of(cpu
));
926 /* CPU refused to die */
928 /* Unpark the hotplug thread so we can rollback there */
929 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
932 BUG_ON(cpu_online(cpu
));
935 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
936 * all runnable tasks from the CPU, there's only the idle task left now
937 * that the migration thread is done doing the stop_machine thing.
939 * Wait for the stop thread to go away.
941 wait_for_ap_thread(st
, false);
942 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
944 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
947 hotplug_cpu__broadcast_tick_pull(cpu
);
948 /* This actually kills the CPU. */
951 tick_cleanup_dead_cpu(cpu
);
952 rcutree_migrate_callbacks(cpu
);
956 static void cpuhp_complete_idle_dead(void *arg
)
958 struct cpuhp_cpu_state
*st
= arg
;
960 complete_ap_thread(st
, false);
963 void cpuhp_report_idle_dead(void)
965 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
967 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
968 rcu_report_dead(smp_processor_id());
969 st
->state
= CPUHP_AP_IDLE_DEAD
;
971 * We cannot call complete after rcu_report_dead() so we delegate it
974 smp_call_function_single(cpumask_first(cpu_online_mask
),
975 cpuhp_complete_idle_dead
, st
, 0);
978 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
980 for (st
->state
++; st
->state
< st
->target
; st
->state
++)
981 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
984 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
985 enum cpuhp_state target
)
987 enum cpuhp_state prev_state
= st
->state
;
990 for (; st
->state
> target
; st
->state
--) {
991 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
993 st
->target
= prev_state
;
994 if (st
->state
< prev_state
)
995 undo_cpu_down(cpu
, st
);
1002 /* Requires cpu_add_remove_lock to be held */
1003 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
1004 enum cpuhp_state target
)
1006 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1007 int prev_state
, ret
= 0;
1009 if (num_online_cpus() == 1)
1012 if (!cpu_present(cpu
))
1017 cpuhp_tasks_frozen
= tasks_frozen
;
1019 prev_state
= cpuhp_set_state(st
, target
);
1021 * If the current CPU state is in the range of the AP hotplug thread,
1022 * then we need to kick the thread.
1024 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
1025 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
1026 ret
= cpuhp_kick_ap_work(cpu
);
1028 * The AP side has done the error rollback already. Just
1029 * return the error code..
1035 * We might have stopped still in the range of the AP hotplug
1036 * thread. Nothing to do anymore.
1038 if (st
->state
> CPUHP_TEARDOWN_CPU
)
1041 st
->target
= target
;
1044 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1045 * to do the further cleanups.
1047 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
1048 if (ret
&& st
->state
== CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
1049 cpuhp_reset_state(st
, prev_state
);
1050 __cpuhp_kick_ap(st
);
1054 cpus_write_unlock();
1056 * Do post unplug cleanup. This is still protected against
1057 * concurrent CPU hotplug via cpu_add_remove_lock.
1059 lockup_detector_cleanup();
1064 static int cpu_down_maps_locked(unsigned int cpu
, enum cpuhp_state target
)
1066 if (cpu_hotplug_disabled
)
1068 return _cpu_down(cpu
, 0, target
);
1071 static int cpu_down(unsigned int cpu
, enum cpuhp_state target
)
1075 cpu_maps_update_begin();
1076 err
= cpu_down_maps_locked(cpu
, target
);
1077 cpu_maps_update_done();
1082 * cpu_device_down - Bring down a cpu device
1083 * @dev: Pointer to the cpu device to offline
1085 * This function is meant to be used by device core cpu subsystem only.
1087 * Other subsystems should use remove_cpu() instead.
1089 int cpu_device_down(struct device
*dev
)
1091 return cpu_down(dev
->id
, CPUHP_OFFLINE
);
1094 int remove_cpu(unsigned int cpu
)
1098 lock_device_hotplug();
1099 ret
= device_offline(get_cpu_device(cpu
));
1100 unlock_device_hotplug();
1104 EXPORT_SYMBOL_GPL(remove_cpu
);
1106 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu
)
1111 cpu_maps_update_begin();
1114 * Make certain the cpu I'm about to reboot on is online.
1116 * This is inline to what migrate_to_reboot_cpu() already do.
1118 if (!cpu_online(primary_cpu
))
1119 primary_cpu
= cpumask_first(cpu_online_mask
);
1121 for_each_online_cpu(cpu
) {
1122 if (cpu
== primary_cpu
)
1125 error
= cpu_down_maps_locked(cpu
, CPUHP_OFFLINE
);
1127 pr_err("Failed to offline CPU%d - error=%d",
1134 * Ensure all but the reboot CPU are offline.
1136 BUG_ON(num_online_cpus() > 1);
1139 * Make sure the CPUs won't be enabled by someone else after this
1140 * point. Kexec will reboot to a new kernel shortly resetting
1141 * everything along the way.
1143 cpu_hotplug_disabled
++;
1145 cpu_maps_update_done();
1149 #define takedown_cpu NULL
1150 #endif /*CONFIG_HOTPLUG_CPU*/
1153 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1154 * @cpu: cpu that just started
1156 * It must be called by the arch code on the new cpu, before the new cpu
1157 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1159 void notify_cpu_starting(unsigned int cpu
)
1161 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1162 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
1165 rcu_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
1166 cpumask_set_cpu(cpu
, &cpus_booted_once_mask
);
1167 while (st
->state
< target
) {
1169 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
1171 * STARTING must not fail!
1178 * Called from the idle task. Wake up the controlling task which brings the
1179 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1180 * online bringup to the hotplug thread.
1182 void cpuhp_online_idle(enum cpuhp_state state
)
1184 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1186 /* Happens for the boot cpu */
1187 if (state
!= CPUHP_AP_ONLINE_IDLE
)
1191 * Unpart the stopper thread before we start the idle loop (and start
1192 * scheduling); this ensures the stopper task is always available.
1194 stop_machine_unpark(smp_processor_id());
1196 st
->state
= CPUHP_AP_ONLINE_IDLE
;
1197 complete_ap_thread(st
, true);
1200 /* Requires cpu_add_remove_lock to be held */
1201 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
1203 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1204 struct task_struct
*idle
;
1209 if (!cpu_present(cpu
)) {
1215 * The caller of cpu_up() might have raced with another
1216 * caller. Nothing to do.
1218 if (st
->state
>= target
)
1221 if (st
->state
== CPUHP_OFFLINE
) {
1222 /* Let it fail before we try to bring the cpu up */
1223 idle
= idle_thread_get(cpu
);
1225 ret
= PTR_ERR(idle
);
1230 cpuhp_tasks_frozen
= tasks_frozen
;
1232 cpuhp_set_state(st
, target
);
1234 * If the current CPU state is in the range of the AP hotplug thread,
1235 * then we need to kick the thread once more.
1237 if (st
->state
> CPUHP_BRINGUP_CPU
) {
1238 ret
= cpuhp_kick_ap_work(cpu
);
1240 * The AP side has done the error rollback already. Just
1241 * return the error code..
1248 * Try to reach the target state. We max out on the BP at
1249 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1250 * responsible for bringing it up to the target state.
1252 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1253 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1255 cpus_write_unlock();
1260 static int cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1264 if (!cpu_possible(cpu
)) {
1265 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1267 #if defined(CONFIG_IA64)
1268 pr_err("please check additional_cpus= boot parameter\n");
1273 err
= try_online_node(cpu_to_node(cpu
));
1277 cpu_maps_update_begin();
1279 if (cpu_hotplug_disabled
) {
1283 if (!cpu_smt_allowed(cpu
)) {
1288 err
= _cpu_up(cpu
, 0, target
);
1290 cpu_maps_update_done();
1295 * cpu_device_up - Bring up a cpu device
1296 * @dev: Pointer to the cpu device to online
1298 * This function is meant to be used by device core cpu subsystem only.
1300 * Other subsystems should use add_cpu() instead.
1302 int cpu_device_up(struct device
*dev
)
1304 return cpu_up(dev
->id
, CPUHP_ONLINE
);
1307 int add_cpu(unsigned int cpu
)
1311 lock_device_hotplug();
1312 ret
= device_online(get_cpu_device(cpu
));
1313 unlock_device_hotplug();
1317 EXPORT_SYMBOL_GPL(add_cpu
);
1320 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1321 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1323 * On some architectures like arm64, we can hibernate on any CPU, but on
1324 * wake up the CPU we hibernated on might be offline as a side effect of
1325 * using maxcpus= for example.
1327 int bringup_hibernate_cpu(unsigned int sleep_cpu
)
1331 if (!cpu_online(sleep_cpu
)) {
1332 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1333 ret
= cpu_up(sleep_cpu
, CPUHP_ONLINE
);
1335 pr_err("Failed to bring hibernate-CPU up!\n");
1342 void bringup_nonboot_cpus(unsigned int setup_max_cpus
)
1346 for_each_present_cpu(cpu
) {
1347 if (num_online_cpus() >= setup_max_cpus
)
1349 if (!cpu_online(cpu
))
1350 cpu_up(cpu
, CPUHP_ONLINE
);
1354 #ifdef CONFIG_PM_SLEEP_SMP
1355 static cpumask_var_t frozen_cpus
;
1357 int freeze_secondary_cpus(int primary
)
1361 cpu_maps_update_begin();
1362 if (primary
== -1) {
1363 primary
= cpumask_first(cpu_online_mask
);
1364 if (!housekeeping_cpu(primary
, HK_FLAG_TIMER
))
1365 primary
= housekeeping_any_cpu(HK_FLAG_TIMER
);
1367 if (!cpu_online(primary
))
1368 primary
= cpumask_first(cpu_online_mask
);
1372 * We take down all of the non-boot CPUs in one shot to avoid races
1373 * with the userspace trying to use the CPU hotplug at the same time
1375 cpumask_clear(frozen_cpus
);
1377 pr_info("Disabling non-boot CPUs ...\n");
1378 for_each_online_cpu(cpu
) {
1382 if (pm_wakeup_pending()) {
1383 pr_info("Wakeup pending. Abort CPU freeze\n");
1388 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1389 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1390 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1392 cpumask_set_cpu(cpu
, frozen_cpus
);
1394 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1400 BUG_ON(num_online_cpus() > 1);
1402 pr_err("Non-boot CPUs are not disabled\n");
1405 * Make sure the CPUs won't be enabled by someone else. We need to do
1406 * this even in case of failure as all freeze_secondary_cpus() users are
1407 * supposed to do thaw_secondary_cpus() on the failure path.
1409 cpu_hotplug_disabled
++;
1411 cpu_maps_update_done();
1415 void __weak
arch_thaw_secondary_cpus_begin(void)
1419 void __weak
arch_thaw_secondary_cpus_end(void)
1423 void thaw_secondary_cpus(void)
1427 /* Allow everyone to use the CPU hotplug again */
1428 cpu_maps_update_begin();
1429 __cpu_hotplug_enable();
1430 if (cpumask_empty(frozen_cpus
))
1433 pr_info("Enabling non-boot CPUs ...\n");
1435 arch_thaw_secondary_cpus_begin();
1437 for_each_cpu(cpu
, frozen_cpus
) {
1438 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
1439 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
1440 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
1442 pr_info("CPU%d is up\n", cpu
);
1445 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
1448 arch_thaw_secondary_cpus_end();
1450 cpumask_clear(frozen_cpus
);
1452 cpu_maps_update_done();
1455 static int __init
alloc_frozen_cpus(void)
1457 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1461 core_initcall(alloc_frozen_cpus
);
1464 * When callbacks for CPU hotplug notifications are being executed, we must
1465 * ensure that the state of the system with respect to the tasks being frozen
1466 * or not, as reported by the notification, remains unchanged *throughout the
1467 * duration* of the execution of the callbacks.
1468 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1470 * This synchronization is implemented by mutually excluding regular CPU
1471 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1472 * Hibernate notifications.
1475 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1476 unsigned long action
, void *ptr
)
1480 case PM_SUSPEND_PREPARE
:
1481 case PM_HIBERNATION_PREPARE
:
1482 cpu_hotplug_disable();
1485 case PM_POST_SUSPEND
:
1486 case PM_POST_HIBERNATION
:
1487 cpu_hotplug_enable();
1498 static int __init
cpu_hotplug_pm_sync_init(void)
1501 * cpu_hotplug_pm_callback has higher priority than x86
1502 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1503 * to disable cpu hotplug to avoid cpu hotplug race.
1505 pm_notifier(cpu_hotplug_pm_callback
, 0);
1508 core_initcall(cpu_hotplug_pm_sync_init
);
1510 #endif /* CONFIG_PM_SLEEP_SMP */
1514 #endif /* CONFIG_SMP */
1516 /* Boot processor state steps */
1517 static struct cpuhp_step cpuhp_hp_states
[] = {
1520 .startup
.single
= NULL
,
1521 .teardown
.single
= NULL
,
1524 [CPUHP_CREATE_THREADS
]= {
1525 .name
= "threads:prepare",
1526 .startup
.single
= smpboot_create_threads
,
1527 .teardown
.single
= NULL
,
1530 [CPUHP_PERF_PREPARE
] = {
1531 .name
= "perf:prepare",
1532 .startup
.single
= perf_event_init_cpu
,
1533 .teardown
.single
= perf_event_exit_cpu
,
1535 [CPUHP_WORKQUEUE_PREP
] = {
1536 .name
= "workqueue:prepare",
1537 .startup
.single
= workqueue_prepare_cpu
,
1538 .teardown
.single
= NULL
,
1540 [CPUHP_HRTIMERS_PREPARE
] = {
1541 .name
= "hrtimers:prepare",
1542 .startup
.single
= hrtimers_prepare_cpu
,
1543 .teardown
.single
= hrtimers_dead_cpu
,
1545 [CPUHP_SMPCFD_PREPARE
] = {
1546 .name
= "smpcfd:prepare",
1547 .startup
.single
= smpcfd_prepare_cpu
,
1548 .teardown
.single
= smpcfd_dead_cpu
,
1550 [CPUHP_RELAY_PREPARE
] = {
1551 .name
= "relay:prepare",
1552 .startup
.single
= relay_prepare_cpu
,
1553 .teardown
.single
= NULL
,
1555 [CPUHP_SLAB_PREPARE
] = {
1556 .name
= "slab:prepare",
1557 .startup
.single
= slab_prepare_cpu
,
1558 .teardown
.single
= slab_dead_cpu
,
1560 [CPUHP_RCUTREE_PREP
] = {
1561 .name
= "RCU/tree:prepare",
1562 .startup
.single
= rcutree_prepare_cpu
,
1563 .teardown
.single
= rcutree_dead_cpu
,
1566 * On the tear-down path, timers_dead_cpu() must be invoked
1567 * before blk_mq_queue_reinit_notify() from notify_dead(),
1568 * otherwise a RCU stall occurs.
1570 [CPUHP_TIMERS_PREPARE
] = {
1571 .name
= "timers:prepare",
1572 .startup
.single
= timers_prepare_cpu
,
1573 .teardown
.single
= timers_dead_cpu
,
1575 /* Kicks the plugged cpu into life */
1576 [CPUHP_BRINGUP_CPU
] = {
1577 .name
= "cpu:bringup",
1578 .startup
.single
= bringup_cpu
,
1579 .teardown
.single
= finish_cpu
,
1582 /* Final state before CPU kills itself */
1583 [CPUHP_AP_IDLE_DEAD
] = {
1584 .name
= "idle:dead",
1587 * Last state before CPU enters the idle loop to die. Transient state
1588 * for synchronization.
1590 [CPUHP_AP_OFFLINE
] = {
1591 .name
= "ap:offline",
1594 /* First state is scheduler control. Interrupts are disabled */
1595 [CPUHP_AP_SCHED_STARTING
] = {
1596 .name
= "sched:starting",
1597 .startup
.single
= sched_cpu_starting
,
1598 .teardown
.single
= sched_cpu_dying
,
1600 [CPUHP_AP_RCUTREE_DYING
] = {
1601 .name
= "RCU/tree:dying",
1602 .startup
.single
= NULL
,
1603 .teardown
.single
= rcutree_dying_cpu
,
1605 [CPUHP_AP_SMPCFD_DYING
] = {
1606 .name
= "smpcfd:dying",
1607 .startup
.single
= NULL
,
1608 .teardown
.single
= smpcfd_dying_cpu
,
1610 /* Entry state on starting. Interrupts enabled from here on. Transient
1611 * state for synchronsization */
1612 [CPUHP_AP_ONLINE
] = {
1613 .name
= "ap:online",
1616 * Handled on control processor until the plugged processor manages
1619 [CPUHP_TEARDOWN_CPU
] = {
1620 .name
= "cpu:teardown",
1621 .startup
.single
= NULL
,
1622 .teardown
.single
= takedown_cpu
,
1626 [CPUHP_AP_SCHED_WAIT_EMPTY
] = {
1627 .name
= "sched:waitempty",
1628 .startup
.single
= NULL
,
1629 .teardown
.single
= sched_cpu_wait_empty
,
1632 /* Handle smpboot threads park/unpark */
1633 [CPUHP_AP_SMPBOOT_THREADS
] = {
1634 .name
= "smpboot/threads:online",
1635 .startup
.single
= smpboot_unpark_threads
,
1636 .teardown
.single
= smpboot_park_threads
,
1638 [CPUHP_AP_IRQ_AFFINITY_ONLINE
] = {
1639 .name
= "irq/affinity:online",
1640 .startup
.single
= irq_affinity_online_cpu
,
1641 .teardown
.single
= NULL
,
1643 [CPUHP_AP_PERF_ONLINE
] = {
1644 .name
= "perf:online",
1645 .startup
.single
= perf_event_init_cpu
,
1646 .teardown
.single
= perf_event_exit_cpu
,
1648 [CPUHP_AP_WATCHDOG_ONLINE
] = {
1649 .name
= "lockup_detector:online",
1650 .startup
.single
= lockup_detector_online_cpu
,
1651 .teardown
.single
= lockup_detector_offline_cpu
,
1653 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
1654 .name
= "workqueue:online",
1655 .startup
.single
= workqueue_online_cpu
,
1656 .teardown
.single
= workqueue_offline_cpu
,
1658 [CPUHP_AP_RCUTREE_ONLINE
] = {
1659 .name
= "RCU/tree:online",
1660 .startup
.single
= rcutree_online_cpu
,
1661 .teardown
.single
= rcutree_offline_cpu
,
1665 * The dynamically registered state space is here
1669 /* Last state is scheduler control setting the cpu active */
1670 [CPUHP_AP_ACTIVE
] = {
1671 .name
= "sched:active",
1672 .startup
.single
= sched_cpu_activate
,
1673 .teardown
.single
= sched_cpu_deactivate
,
1677 /* CPU is fully up and running. */
1680 .startup
.single
= NULL
,
1681 .teardown
.single
= NULL
,
1685 /* Sanity check for callbacks */
1686 static int cpuhp_cb_check(enum cpuhp_state state
)
1688 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1694 * Returns a free for dynamic slot assignment of the Online state. The states
1695 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1696 * by having no name assigned.
1698 static int cpuhp_reserve_state(enum cpuhp_state state
)
1700 enum cpuhp_state i
, end
;
1701 struct cpuhp_step
*step
;
1704 case CPUHP_AP_ONLINE_DYN
:
1705 step
= cpuhp_hp_states
+ CPUHP_AP_ONLINE_DYN
;
1706 end
= CPUHP_AP_ONLINE_DYN_END
;
1708 case CPUHP_BP_PREPARE_DYN
:
1709 step
= cpuhp_hp_states
+ CPUHP_BP_PREPARE_DYN
;
1710 end
= CPUHP_BP_PREPARE_DYN_END
;
1716 for (i
= state
; i
<= end
; i
++, step
++) {
1720 WARN(1, "No more dynamic states available for CPU hotplug\n");
1724 static int cpuhp_store_callbacks(enum cpuhp_state state
, const char *name
,
1725 int (*startup
)(unsigned int cpu
),
1726 int (*teardown
)(unsigned int cpu
),
1727 bool multi_instance
)
1729 /* (Un)Install the callbacks for further cpu hotplug operations */
1730 struct cpuhp_step
*sp
;
1734 * If name is NULL, then the state gets removed.
1736 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1737 * the first allocation from these dynamic ranges, so the removal
1738 * would trigger a new allocation and clear the wrong (already
1739 * empty) state, leaving the callbacks of the to be cleared state
1740 * dangling, which causes wreckage on the next hotplug operation.
1742 if (name
&& (state
== CPUHP_AP_ONLINE_DYN
||
1743 state
== CPUHP_BP_PREPARE_DYN
)) {
1744 ret
= cpuhp_reserve_state(state
);
1749 sp
= cpuhp_get_step(state
);
1750 if (name
&& sp
->name
)
1753 sp
->startup
.single
= startup
;
1754 sp
->teardown
.single
= teardown
;
1756 sp
->multi_instance
= multi_instance
;
1757 INIT_HLIST_HEAD(&sp
->list
);
1761 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1763 return cpuhp_get_step(state
)->teardown
.single
;
1767 * Call the startup/teardown function for a step either on the AP or
1768 * on the current CPU.
1770 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
1771 struct hlist_node
*node
)
1773 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1777 * If there's nothing to do, we done.
1778 * Relies on the union for multi_instance.
1780 if ((bringup
&& !sp
->startup
.single
) ||
1781 (!bringup
&& !sp
->teardown
.single
))
1784 * The non AP bound callbacks can fail on bringup. On teardown
1785 * e.g. module removal we crash for now.
1788 if (cpuhp_is_ap_state(state
))
1789 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
1791 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1793 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1795 BUG_ON(ret
&& !bringup
);
1800 * Called from __cpuhp_setup_state on a recoverable failure.
1802 * Note: The teardown callbacks for rollback are not allowed to fail!
1804 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1805 struct hlist_node
*node
)
1809 /* Roll back the already executed steps on the other cpus */
1810 for_each_present_cpu(cpu
) {
1811 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1812 int cpustate
= st
->state
;
1814 if (cpu
>= failedcpu
)
1817 /* Did we invoke the startup call on that cpu ? */
1818 if (cpustate
>= state
)
1819 cpuhp_issue_call(cpu
, state
, false, node
);
1823 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state
,
1824 struct hlist_node
*node
,
1827 struct cpuhp_step
*sp
;
1831 lockdep_assert_cpus_held();
1833 sp
= cpuhp_get_step(state
);
1834 if (sp
->multi_instance
== false)
1837 mutex_lock(&cpuhp_state_mutex
);
1839 if (!invoke
|| !sp
->startup
.multi
)
1843 * Try to call the startup callback for each present cpu
1844 * depending on the hotplug state of the cpu.
1846 for_each_present_cpu(cpu
) {
1847 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1848 int cpustate
= st
->state
;
1850 if (cpustate
< state
)
1853 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
1855 if (sp
->teardown
.multi
)
1856 cpuhp_rollback_install(cpu
, state
, node
);
1862 hlist_add_head(node
, &sp
->list
);
1864 mutex_unlock(&cpuhp_state_mutex
);
1868 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
1874 ret
= __cpuhp_state_add_instance_cpuslocked(state
, node
, invoke
);
1878 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
1881 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1882 * @state: The state to setup
1883 * @invoke: If true, the startup function is invoked for cpus where
1884 * cpu state >= @state
1885 * @startup: startup callback function
1886 * @teardown: teardown callback function
1887 * @multi_instance: State is set up for multiple instances which get
1890 * The caller needs to hold cpus read locked while calling this function.
1893 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1894 * 0 for all other states
1895 * On failure: proper (negative) error code
1897 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state
,
1898 const char *name
, bool invoke
,
1899 int (*startup
)(unsigned int cpu
),
1900 int (*teardown
)(unsigned int cpu
),
1901 bool multi_instance
)
1906 lockdep_assert_cpus_held();
1908 if (cpuhp_cb_check(state
) || !name
)
1911 mutex_lock(&cpuhp_state_mutex
);
1913 ret
= cpuhp_store_callbacks(state
, name
, startup
, teardown
,
1916 dynstate
= state
== CPUHP_AP_ONLINE_DYN
;
1917 if (ret
> 0 && dynstate
) {
1922 if (ret
|| !invoke
|| !startup
)
1926 * Try to call the startup callback for each present cpu
1927 * depending on the hotplug state of the cpu.
1929 for_each_present_cpu(cpu
) {
1930 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1931 int cpustate
= st
->state
;
1933 if (cpustate
< state
)
1936 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
1939 cpuhp_rollback_install(cpu
, state
, NULL
);
1940 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1945 mutex_unlock(&cpuhp_state_mutex
);
1947 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1948 * dynamically allocated state in case of success.
1950 if (!ret
&& dynstate
)
1954 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked
);
1956 int __cpuhp_setup_state(enum cpuhp_state state
,
1957 const char *name
, bool invoke
,
1958 int (*startup
)(unsigned int cpu
),
1959 int (*teardown
)(unsigned int cpu
),
1960 bool multi_instance
)
1965 ret
= __cpuhp_setup_state_cpuslocked(state
, name
, invoke
, startup
,
1966 teardown
, multi_instance
);
1970 EXPORT_SYMBOL(__cpuhp_setup_state
);
1972 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
1973 struct hlist_node
*node
, bool invoke
)
1975 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1978 BUG_ON(cpuhp_cb_check(state
));
1980 if (!sp
->multi_instance
)
1984 mutex_lock(&cpuhp_state_mutex
);
1986 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1989 * Call the teardown callback for each present cpu depending
1990 * on the hotplug state of the cpu. This function is not
1991 * allowed to fail currently!
1993 for_each_present_cpu(cpu
) {
1994 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1995 int cpustate
= st
->state
;
1997 if (cpustate
>= state
)
1998 cpuhp_issue_call(cpu
, state
, false, node
);
2003 mutex_unlock(&cpuhp_state_mutex
);
2008 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
2011 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2012 * @state: The state to remove
2013 * @invoke: If true, the teardown function is invoked for cpus where
2014 * cpu state >= @state
2016 * The caller needs to hold cpus read locked while calling this function.
2017 * The teardown callback is currently not allowed to fail. Think
2018 * about module removal!
2020 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state
, bool invoke
)
2022 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2025 BUG_ON(cpuhp_cb_check(state
));
2027 lockdep_assert_cpus_held();
2029 mutex_lock(&cpuhp_state_mutex
);
2030 if (sp
->multi_instance
) {
2031 WARN(!hlist_empty(&sp
->list
),
2032 "Error: Removing state %d which has instances left.\n",
2037 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
2041 * Call the teardown callback for each present cpu depending
2042 * on the hotplug state of the cpu. This function is not
2043 * allowed to fail currently!
2045 for_each_present_cpu(cpu
) {
2046 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2047 int cpustate
= st
->state
;
2049 if (cpustate
>= state
)
2050 cpuhp_issue_call(cpu
, state
, false, NULL
);
2053 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
2054 mutex_unlock(&cpuhp_state_mutex
);
2056 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked
);
2058 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
2061 __cpuhp_remove_state_cpuslocked(state
, invoke
);
2064 EXPORT_SYMBOL(__cpuhp_remove_state
);
2066 #ifdef CONFIG_HOTPLUG_SMT
2067 static void cpuhp_offline_cpu_device(unsigned int cpu
)
2069 struct device
*dev
= get_cpu_device(cpu
);
2071 dev
->offline
= true;
2072 /* Tell user space about the state change */
2073 kobject_uevent(&dev
->kobj
, KOBJ_OFFLINE
);
2076 static void cpuhp_online_cpu_device(unsigned int cpu
)
2078 struct device
*dev
= get_cpu_device(cpu
);
2080 dev
->offline
= false;
2081 /* Tell user space about the state change */
2082 kobject_uevent(&dev
->kobj
, KOBJ_ONLINE
);
2085 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval
)
2089 cpu_maps_update_begin();
2090 for_each_online_cpu(cpu
) {
2091 if (topology_is_primary_thread(cpu
))
2093 ret
= cpu_down_maps_locked(cpu
, CPUHP_OFFLINE
);
2097 * As this needs to hold the cpu maps lock it's impossible
2098 * to call device_offline() because that ends up calling
2099 * cpu_down() which takes cpu maps lock. cpu maps lock
2100 * needs to be held as this might race against in kernel
2101 * abusers of the hotplug machinery (thermal management).
2103 * So nothing would update device:offline state. That would
2104 * leave the sysfs entry stale and prevent onlining after
2105 * smt control has been changed to 'off' again. This is
2106 * called under the sysfs hotplug lock, so it is properly
2107 * serialized against the regular offline usage.
2109 cpuhp_offline_cpu_device(cpu
);
2112 cpu_smt_control
= ctrlval
;
2113 cpu_maps_update_done();
2117 int cpuhp_smt_enable(void)
2121 cpu_maps_update_begin();
2122 cpu_smt_control
= CPU_SMT_ENABLED
;
2123 for_each_present_cpu(cpu
) {
2124 /* Skip online CPUs and CPUs on offline nodes */
2125 if (cpu_online(cpu
) || !node_online(cpu_to_node(cpu
)))
2127 ret
= _cpu_up(cpu
, 0, CPUHP_ONLINE
);
2130 /* See comment in cpuhp_smt_disable() */
2131 cpuhp_online_cpu_device(cpu
);
2133 cpu_maps_update_done();
2138 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2139 static ssize_t
show_cpuhp_state(struct device
*dev
,
2140 struct device_attribute
*attr
, char *buf
)
2142 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2144 return sprintf(buf
, "%d\n", st
->state
);
2146 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
2148 static ssize_t
write_cpuhp_target(struct device
*dev
,
2149 struct device_attribute
*attr
,
2150 const char *buf
, size_t count
)
2152 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2153 struct cpuhp_step
*sp
;
2156 ret
= kstrtoint(buf
, 10, &target
);
2160 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2161 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
2164 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
2168 ret
= lock_device_hotplug_sysfs();
2172 mutex_lock(&cpuhp_state_mutex
);
2173 sp
= cpuhp_get_step(target
);
2174 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
2175 mutex_unlock(&cpuhp_state_mutex
);
2179 if (st
->state
< target
)
2180 ret
= cpu_up(dev
->id
, target
);
2182 ret
= cpu_down(dev
->id
, target
);
2184 unlock_device_hotplug();
2185 return ret
? ret
: count
;
2188 static ssize_t
show_cpuhp_target(struct device
*dev
,
2189 struct device_attribute
*attr
, char *buf
)
2191 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2193 return sprintf(buf
, "%d\n", st
->target
);
2195 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
2198 static ssize_t
write_cpuhp_fail(struct device
*dev
,
2199 struct device_attribute
*attr
,
2200 const char *buf
, size_t count
)
2202 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2203 struct cpuhp_step
*sp
;
2206 ret
= kstrtoint(buf
, 10, &fail
);
2210 if (fail
< CPUHP_OFFLINE
|| fail
> CPUHP_ONLINE
)
2214 * Cannot fail STARTING/DYING callbacks.
2216 if (cpuhp_is_atomic_state(fail
))
2220 * Cannot fail anything that doesn't have callbacks.
2222 mutex_lock(&cpuhp_state_mutex
);
2223 sp
= cpuhp_get_step(fail
);
2224 if (!sp
->startup
.single
&& !sp
->teardown
.single
)
2226 mutex_unlock(&cpuhp_state_mutex
);
2235 static ssize_t
show_cpuhp_fail(struct device
*dev
,
2236 struct device_attribute
*attr
, char *buf
)
2238 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2240 return sprintf(buf
, "%d\n", st
->fail
);
2243 static DEVICE_ATTR(fail
, 0644, show_cpuhp_fail
, write_cpuhp_fail
);
2245 static struct attribute
*cpuhp_cpu_attrs
[] = {
2246 &dev_attr_state
.attr
,
2247 &dev_attr_target
.attr
,
2248 &dev_attr_fail
.attr
,
2252 static const struct attribute_group cpuhp_cpu_attr_group
= {
2253 .attrs
= cpuhp_cpu_attrs
,
2258 static ssize_t
show_cpuhp_states(struct device
*dev
,
2259 struct device_attribute
*attr
, char *buf
)
2261 ssize_t cur
, res
= 0;
2264 mutex_lock(&cpuhp_state_mutex
);
2265 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
2266 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
2269 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
2274 mutex_unlock(&cpuhp_state_mutex
);
2277 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
2279 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
2280 &dev_attr_states
.attr
,
2284 static const struct attribute_group cpuhp_cpu_root_attr_group
= {
2285 .attrs
= cpuhp_cpu_root_attrs
,
2290 #ifdef CONFIG_HOTPLUG_SMT
2293 __store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2294 const char *buf
, size_t count
)
2298 if (sysfs_streq(buf
, "on"))
2299 ctrlval
= CPU_SMT_ENABLED
;
2300 else if (sysfs_streq(buf
, "off"))
2301 ctrlval
= CPU_SMT_DISABLED
;
2302 else if (sysfs_streq(buf
, "forceoff"))
2303 ctrlval
= CPU_SMT_FORCE_DISABLED
;
2307 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
)
2310 if (cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
2313 ret
= lock_device_hotplug_sysfs();
2317 if (ctrlval
!= cpu_smt_control
) {
2319 case CPU_SMT_ENABLED
:
2320 ret
= cpuhp_smt_enable();
2322 case CPU_SMT_DISABLED
:
2323 case CPU_SMT_FORCE_DISABLED
:
2324 ret
= cpuhp_smt_disable(ctrlval
);
2329 unlock_device_hotplug();
2330 return ret
? ret
: count
;
2333 #else /* !CONFIG_HOTPLUG_SMT */
2335 __store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2336 const char *buf
, size_t count
)
2340 #endif /* CONFIG_HOTPLUG_SMT */
2342 static const char *smt_states
[] = {
2343 [CPU_SMT_ENABLED
] = "on",
2344 [CPU_SMT_DISABLED
] = "off",
2345 [CPU_SMT_FORCE_DISABLED
] = "forceoff",
2346 [CPU_SMT_NOT_SUPPORTED
] = "notsupported",
2347 [CPU_SMT_NOT_IMPLEMENTED
] = "notimplemented",
2351 show_smt_control(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2353 const char *state
= smt_states
[cpu_smt_control
];
2355 return snprintf(buf
, PAGE_SIZE
- 2, "%s\n", state
);
2359 store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2360 const char *buf
, size_t count
)
2362 return __store_smt_control(dev
, attr
, buf
, count
);
2364 static DEVICE_ATTR(control
, 0644, show_smt_control
, store_smt_control
);
2367 show_smt_active(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2369 return snprintf(buf
, PAGE_SIZE
- 2, "%d\n", sched_smt_active());
2371 static DEVICE_ATTR(active
, 0444, show_smt_active
, NULL
);
2373 static struct attribute
*cpuhp_smt_attrs
[] = {
2374 &dev_attr_control
.attr
,
2375 &dev_attr_active
.attr
,
2379 static const struct attribute_group cpuhp_smt_attr_group
= {
2380 .attrs
= cpuhp_smt_attrs
,
2385 static int __init
cpu_smt_sysfs_init(void)
2387 return sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2388 &cpuhp_smt_attr_group
);
2391 static int __init
cpuhp_sysfs_init(void)
2395 ret
= cpu_smt_sysfs_init();
2399 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2400 &cpuhp_cpu_root_attr_group
);
2404 for_each_possible_cpu(cpu
) {
2405 struct device
*dev
= get_cpu_device(cpu
);
2409 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
2415 device_initcall(cpuhp_sysfs_init
);
2416 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2419 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2420 * represents all NR_CPUS bits binary values of 1<<nr.
2422 * It is used by cpumask_of() to get a constant address to a CPU
2423 * mask value that has a single bit set only.
2426 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2427 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2428 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2429 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2430 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2432 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
2434 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2435 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2436 #if BITS_PER_LONG > 32
2437 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2438 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2441 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
2443 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
2444 EXPORT_SYMBOL(cpu_all_bits
);
2446 #ifdef CONFIG_INIT_ALL_POSSIBLE
2447 struct cpumask __cpu_possible_mask __read_mostly
2450 struct cpumask __cpu_possible_mask __read_mostly
;
2452 EXPORT_SYMBOL(__cpu_possible_mask
);
2454 struct cpumask __cpu_online_mask __read_mostly
;
2455 EXPORT_SYMBOL(__cpu_online_mask
);
2457 struct cpumask __cpu_present_mask __read_mostly
;
2458 EXPORT_SYMBOL(__cpu_present_mask
);
2460 struct cpumask __cpu_active_mask __read_mostly
;
2461 EXPORT_SYMBOL(__cpu_active_mask
);
2463 atomic_t __num_online_cpus __read_mostly
;
2464 EXPORT_SYMBOL(__num_online_cpus
);
2466 void init_cpu_present(const struct cpumask
*src
)
2468 cpumask_copy(&__cpu_present_mask
, src
);
2471 void init_cpu_possible(const struct cpumask
*src
)
2473 cpumask_copy(&__cpu_possible_mask
, src
);
2476 void init_cpu_online(const struct cpumask
*src
)
2478 cpumask_copy(&__cpu_online_mask
, src
);
2481 void set_cpu_online(unsigned int cpu
, bool online
)
2484 * atomic_inc/dec() is required to handle the horrid abuse of this
2485 * function by the reboot and kexec code which invoke it from
2486 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2487 * regular CPU hotplug is properly serialized.
2489 * Note, that the fact that __num_online_cpus is of type atomic_t
2490 * does not protect readers which are not serialized against
2491 * concurrent hotplug operations.
2494 if (!cpumask_test_and_set_cpu(cpu
, &__cpu_online_mask
))
2495 atomic_inc(&__num_online_cpus
);
2497 if (cpumask_test_and_clear_cpu(cpu
, &__cpu_online_mask
))
2498 atomic_dec(&__num_online_cpus
);
2503 * Activate the first processor.
2505 void __init
boot_cpu_init(void)
2507 int cpu
= smp_processor_id();
2509 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2510 set_cpu_online(cpu
, true);
2511 set_cpu_active(cpu
, true);
2512 set_cpu_present(cpu
, true);
2513 set_cpu_possible(cpu
, true);
2516 __boot_cpu_id
= cpu
;
2521 * Must be called _AFTER_ setting up the per_cpu areas
2523 void __init
boot_cpu_hotplug_init(void)
2526 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask
);
2528 this_cpu_write(cpuhp_state
.state
, CPUHP_ONLINE
);
2532 * These are used for a global "mitigations=" cmdline option for toggling
2533 * optional CPU mitigations.
2535 enum cpu_mitigations
{
2536 CPU_MITIGATIONS_OFF
,
2537 CPU_MITIGATIONS_AUTO
,
2538 CPU_MITIGATIONS_AUTO_NOSMT
,
2541 static enum cpu_mitigations cpu_mitigations __ro_after_init
=
2542 CPU_MITIGATIONS_AUTO
;
2544 static int __init
mitigations_parse_cmdline(char *arg
)
2546 if (!strcmp(arg
, "off"))
2547 cpu_mitigations
= CPU_MITIGATIONS_OFF
;
2548 else if (!strcmp(arg
, "auto"))
2549 cpu_mitigations
= CPU_MITIGATIONS_AUTO
;
2550 else if (!strcmp(arg
, "auto,nosmt"))
2551 cpu_mitigations
= CPU_MITIGATIONS_AUTO_NOSMT
;
2553 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2558 early_param("mitigations", mitigations_parse_cmdline
);
2560 /* mitigations=off */
2561 bool cpu_mitigations_off(void)
2563 return cpu_mitigations
== CPU_MITIGATIONS_OFF
;
2565 EXPORT_SYMBOL_GPL(cpu_mitigations_off
);
2567 /* mitigations=auto,nosmt */
2568 bool cpu_mitigations_auto_nosmt(void)
2570 return cpu_mitigations
== CPU_MITIGATIONS_AUTO_NOSMT
;
2572 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt
);