2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/unistd.h>
14 #include <linux/cpu.h>
15 #include <linux/oom.h>
16 #include <linux/rcupdate.h>
17 #include <linux/export.h>
18 #include <linux/bug.h>
19 #include <linux/kthread.h>
20 #include <linux/stop_machine.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
23 #include <linux/suspend.h>
24 #include <linux/lockdep.h>
25 #include <linux/tick.h>
26 #include <linux/irq.h>
27 #include <linux/nmi.h>
28 #include <linux/smpboot.h>
29 #include <linux/relay.h>
30 #include <linux/slab.h>
31 #include <linux/percpu-rwsem.h>
33 #include <trace/events/power.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpuhp.h>
40 * cpuhp_cpu_state - Per cpu hotplug state storage
41 * @state: The current cpu state
42 * @target: The target state
43 * @thread: Pointer to the hotplug thread
44 * @should_run: Thread should execute
45 * @rollback: Perform a rollback
46 * @single: Single callback invocation
47 * @bringup: Single callback bringup or teardown selector
48 * @cb_state: The state for a single callback (install/uninstall)
49 * @result: Result of the operation
50 * @done_up: Signal completion to the issuer of the task for cpu-up
51 * @done_down: Signal completion to the issuer of the task for cpu-down
53 struct cpuhp_cpu_state
{
54 enum cpuhp_state state
;
55 enum cpuhp_state target
;
56 enum cpuhp_state fail
;
58 struct task_struct
*thread
;
64 struct hlist_node
*node
;
65 struct hlist_node
*last
;
66 enum cpuhp_state cb_state
;
68 struct completion done_up
;
69 struct completion done_down
;
73 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
) = {
74 .fail
= CPUHP_INVALID
,
77 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
78 static struct lockdep_map cpuhp_state_up_map
=
79 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map
);
80 static struct lockdep_map cpuhp_state_down_map
=
81 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map
);
84 static inline void cpuhp_lock_acquire(bool bringup
)
86 lock_map_acquire(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
89 static inline void cpuhp_lock_release(bool bringup
)
91 lock_map_release(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
95 static inline void cpuhp_lock_acquire(bool bringup
) { }
96 static inline void cpuhp_lock_release(bool bringup
) { }
101 * cpuhp_step - Hotplug state machine step
102 * @name: Name of the step
103 * @startup: Startup function of the step
104 * @teardown: Teardown function of the step
105 * @skip_onerr: Do not invoke the functions on error rollback
106 * Will go away once the notifiers are gone
107 * @cant_stop: Bringup/teardown can't be stopped at this step
112 int (*single
)(unsigned int cpu
);
113 int (*multi
)(unsigned int cpu
,
114 struct hlist_node
*node
);
117 int (*single
)(unsigned int cpu
);
118 int (*multi
)(unsigned int cpu
,
119 struct hlist_node
*node
);
121 struct hlist_head list
;
127 static DEFINE_MUTEX(cpuhp_state_mutex
);
128 static struct cpuhp_step cpuhp_bp_states
[];
129 static struct cpuhp_step cpuhp_ap_states
[];
131 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
134 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
135 * purposes as that state is handled explicitly in cpu_down.
137 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
140 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
142 struct cpuhp_step
*sp
;
144 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
149 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
150 * @cpu: The cpu for which the callback should be invoked
151 * @state: The state to do callbacks for
152 * @bringup: True if the bringup callback should be invoked
153 * @node: For multi-instance, do a single entry callback for install/remove
154 * @lastp: For multi-instance rollback, remember how far we got
156 * Called from cpu hotplug and from the state register machinery.
158 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
159 bool bringup
, struct hlist_node
*node
,
160 struct hlist_node
**lastp
)
162 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
163 struct cpuhp_step
*step
= cpuhp_get_step(state
);
164 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
165 int (*cb
)(unsigned int cpu
);
168 if (st
->fail
== state
) {
169 st
->fail
= CPUHP_INVALID
;
171 if (!(bringup
? step
->startup
.single
: step
->teardown
.single
))
177 if (!step
->multi_instance
) {
178 WARN_ON_ONCE(lastp
&& *lastp
);
179 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
182 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
184 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
187 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
191 /* Single invocation for instance add/remove */
193 WARN_ON_ONCE(lastp
&& *lastp
);
194 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
195 ret
= cbm(cpu
, node
);
196 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
200 /* State transition. Invoke on all instances */
202 hlist_for_each(node
, &step
->list
) {
203 if (lastp
&& node
== *lastp
)
206 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
207 ret
= cbm(cpu
, node
);
208 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
222 /* Rollback the instances if one failed */
223 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
227 hlist_for_each(node
, &step
->list
) {
231 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
232 ret
= cbm(cpu
, node
);
233 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
235 * Rollback must not fail,
243 static inline void wait_for_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
245 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
246 wait_for_completion(done
);
249 static inline void complete_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
251 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
256 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
258 static bool cpuhp_is_atomic_state(enum cpuhp_state state
)
260 return CPUHP_AP_IDLE_DEAD
<= state
&& state
< CPUHP_AP_ONLINE
;
263 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
264 static DEFINE_MUTEX(cpu_add_remove_lock
);
265 bool cpuhp_tasks_frozen
;
266 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
269 * The following two APIs (cpu_maps_update_begin/done) must be used when
270 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
272 void cpu_maps_update_begin(void)
274 mutex_lock(&cpu_add_remove_lock
);
277 void cpu_maps_update_done(void)
279 mutex_unlock(&cpu_add_remove_lock
);
283 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
284 * Should always be manipulated under cpu_add_remove_lock
286 static int cpu_hotplug_disabled
;
288 #ifdef CONFIG_HOTPLUG_CPU
290 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock
);
292 void cpus_read_lock(void)
294 percpu_down_read(&cpu_hotplug_lock
);
296 EXPORT_SYMBOL_GPL(cpus_read_lock
);
298 void cpus_read_unlock(void)
300 percpu_up_read(&cpu_hotplug_lock
);
302 EXPORT_SYMBOL_GPL(cpus_read_unlock
);
304 void cpus_write_lock(void)
306 percpu_down_write(&cpu_hotplug_lock
);
309 void cpus_write_unlock(void)
311 percpu_up_write(&cpu_hotplug_lock
);
314 void lockdep_assert_cpus_held(void)
316 percpu_rwsem_assert_held(&cpu_hotplug_lock
);
320 * Wait for currently running CPU hotplug operations to complete (if any) and
321 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
322 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
323 * hotplug path before performing hotplug operations. So acquiring that lock
324 * guarantees mutual exclusion from any currently running hotplug operations.
326 void cpu_hotplug_disable(void)
328 cpu_maps_update_begin();
329 cpu_hotplug_disabled
++;
330 cpu_maps_update_done();
332 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
334 static void __cpu_hotplug_enable(void)
336 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
338 cpu_hotplug_disabled
--;
341 void cpu_hotplug_enable(void)
343 cpu_maps_update_begin();
344 __cpu_hotplug_enable();
345 cpu_maps_update_done();
347 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
348 #endif /* CONFIG_HOTPLUG_CPU */
350 #ifdef CONFIG_HOTPLUG_SMT
351 enum cpuhp_smt_control cpu_smt_control __read_mostly
= CPU_SMT_ENABLED
;
352 EXPORT_SYMBOL_GPL(cpu_smt_control
);
354 static bool cpu_smt_available __read_mostly
;
356 void __init
cpu_smt_disable(bool force
)
358 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
359 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
363 pr_info("SMT: Force disabled\n");
364 cpu_smt_control
= CPU_SMT_FORCE_DISABLED
;
366 cpu_smt_control
= CPU_SMT_DISABLED
;
371 * The decision whether SMT is supported can only be done after the full
372 * CPU identification. Called from architecture code before non boot CPUs
375 void __init
cpu_smt_check_topology_early(void)
377 if (!topology_smt_supported())
378 cpu_smt_control
= CPU_SMT_NOT_SUPPORTED
;
382 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
383 * brought online. This ensures the smt/l1tf sysfs entries are consistent
384 * with reality. cpu_smt_available is set to true during the bringup of non
385 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
386 * cpu_smt_control's previous setting.
388 void __init
cpu_smt_check_topology(void)
390 if (!cpu_smt_available
)
391 cpu_smt_control
= CPU_SMT_NOT_SUPPORTED
;
394 static int __init
smt_cmdline_disable(char *str
)
396 cpu_smt_disable(str
&& !strcmp(str
, "force"));
399 early_param("nosmt", smt_cmdline_disable
);
401 static inline bool cpu_smt_allowed(unsigned int cpu
)
403 if (topology_is_primary_thread(cpu
))
407 * If the CPU is not a 'primary' thread and the booted_once bit is
408 * set then the processor has SMT support. Store this information
409 * for the late check of SMT support in cpu_smt_check_topology().
411 if (per_cpu(cpuhp_state
, cpu
).booted_once
)
412 cpu_smt_available
= true;
414 if (cpu_smt_control
== CPU_SMT_ENABLED
)
418 * On x86 it's required to boot all logical CPUs at least once so
419 * that the init code can get a chance to set CR4.MCE on each
420 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
421 * core will shutdown the machine.
423 return !per_cpu(cpuhp_state
, cpu
).booted_once
;
426 static inline bool cpu_smt_allowed(unsigned int cpu
) { return true; }
429 static inline enum cpuhp_state
430 cpuhp_set_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
432 enum cpuhp_state prev_state
= st
->state
;
434 st
->rollback
= false;
439 st
->bringup
= st
->state
< target
;
445 cpuhp_reset_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state prev_state
)
450 * If we have st->last we need to undo partial multi_instance of this
451 * state first. Otherwise start undo at the previous state.
460 st
->target
= prev_state
;
461 st
->bringup
= !st
->bringup
;
464 /* Regular hotplug invocation of the AP hotplug thread */
465 static void __cpuhp_kick_ap(struct cpuhp_cpu_state
*st
)
467 if (!st
->single
&& st
->state
== st
->target
)
472 * Make sure the above stores are visible before should_run becomes
473 * true. Paired with the mb() above in cpuhp_thread_fun()
476 st
->should_run
= true;
477 wake_up_process(st
->thread
);
478 wait_for_ap_thread(st
, st
->bringup
);
481 static int cpuhp_kick_ap(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
483 enum cpuhp_state prev_state
;
486 prev_state
= cpuhp_set_state(st
, target
);
488 if ((ret
= st
->result
)) {
489 cpuhp_reset_state(st
, prev_state
);
496 static int bringup_wait_for_ap(unsigned int cpu
)
498 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
500 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
501 wait_for_ap_thread(st
, true);
502 if (WARN_ON_ONCE((!cpu_online(cpu
))))
505 /* Unpark the stopper thread and the hotplug thread of the target cpu */
506 stop_machine_unpark(cpu
);
507 kthread_unpark(st
->thread
);
510 * SMT soft disabling on X86 requires to bring the CPU out of the
511 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
512 * CPU marked itself as booted_once in cpu_notify_starting() so the
513 * cpu_smt_allowed() check will now return false if this is not the
516 if (!cpu_smt_allowed(cpu
))
519 if (st
->target
<= CPUHP_AP_ONLINE_IDLE
)
522 return cpuhp_kick_ap(st
, st
->target
);
525 static int bringup_cpu(unsigned int cpu
)
527 struct task_struct
*idle
= idle_thread_get(cpu
);
531 * Some architectures have to walk the irq descriptors to
532 * setup the vector space for the cpu which comes online.
533 * Prevent irq alloc/free across the bringup.
537 /* Arch-specific enabling code. */
538 ret
= __cpu_up(cpu
, idle
);
542 return bringup_wait_for_ap(cpu
);
546 * Hotplug state machine related functions
549 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
551 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
552 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
554 if (!step
->skip_onerr
)
555 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
559 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
560 enum cpuhp_state target
)
562 enum cpuhp_state prev_state
= st
->state
;
565 while (st
->state
< target
) {
567 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
569 st
->target
= prev_state
;
570 undo_cpu_up(cpu
, st
);
578 * The cpu hotplug threads manage the bringup and teardown of the cpus
580 static void cpuhp_create(unsigned int cpu
)
582 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
584 init_completion(&st
->done_up
);
585 init_completion(&st
->done_down
);
588 static int cpuhp_should_run(unsigned int cpu
)
590 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
592 return st
->should_run
;
596 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
597 * callbacks when a state gets [un]installed at runtime.
599 * Each invocation of this function by the smpboot thread does a single AP
602 * It has 3 modes of operation:
603 * - single: runs st->cb_state
604 * - up: runs ++st->state, while st->state < st->target
605 * - down: runs st->state--, while st->state > st->target
607 * When complete or on error, should_run is cleared and the completion is fired.
609 static void cpuhp_thread_fun(unsigned int cpu
)
611 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
612 bool bringup
= st
->bringup
;
613 enum cpuhp_state state
;
616 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
617 * that if we see ->should_run we also see the rest of the state.
621 if (WARN_ON_ONCE(!st
->should_run
))
624 cpuhp_lock_acquire(bringup
);
627 state
= st
->cb_state
;
628 st
->should_run
= false;
633 st
->should_run
= (st
->state
< st
->target
);
634 WARN_ON_ONCE(st
->state
> st
->target
);
638 st
->should_run
= (st
->state
> st
->target
);
639 WARN_ON_ONCE(st
->state
< st
->target
);
643 WARN_ON_ONCE(!cpuhp_is_ap_state(state
));
646 struct cpuhp_step
*step
= cpuhp_get_step(state
);
647 if (step
->skip_onerr
)
651 if (cpuhp_is_atomic_state(state
)) {
653 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
657 * STARTING/DYING must not fail!
659 WARN_ON_ONCE(st
->result
);
661 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
666 * If we fail on a rollback, we're up a creek without no
667 * paddle, no way forward, no way back. We loose, thanks for
670 WARN_ON_ONCE(st
->rollback
);
671 st
->should_run
= false;
675 cpuhp_lock_release(bringup
);
678 complete_ap_thread(st
, bringup
);
681 /* Invoke a single callback on a remote cpu */
683 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
684 struct hlist_node
*node
)
686 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
689 if (!cpu_online(cpu
))
692 cpuhp_lock_acquire(false);
693 cpuhp_lock_release(false);
695 cpuhp_lock_acquire(true);
696 cpuhp_lock_release(true);
699 * If we are up and running, use the hotplug thread. For early calls
700 * we invoke the thread function directly.
703 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
705 st
->rollback
= false;
709 st
->bringup
= bringup
;
710 st
->cb_state
= state
;
716 * If we failed and did a partial, do a rollback.
718 if ((ret
= st
->result
) && st
->last
) {
720 st
->bringup
= !bringup
;
726 * Clean up the leftovers so the next hotplug operation wont use stale
729 st
->node
= st
->last
= NULL
;
733 static int cpuhp_kick_ap_work(unsigned int cpu
)
735 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
736 enum cpuhp_state prev_state
= st
->state
;
739 cpuhp_lock_acquire(false);
740 cpuhp_lock_release(false);
742 cpuhp_lock_acquire(true);
743 cpuhp_lock_release(true);
745 trace_cpuhp_enter(cpu
, st
->target
, prev_state
, cpuhp_kick_ap_work
);
746 ret
= cpuhp_kick_ap(st
, st
->target
);
747 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
752 static struct smp_hotplug_thread cpuhp_threads
= {
753 .store
= &cpuhp_state
.thread
,
754 .create
= &cpuhp_create
,
755 .thread_should_run
= cpuhp_should_run
,
756 .thread_fn
= cpuhp_thread_fun
,
757 .thread_comm
= "cpuhp/%u",
761 void __init
cpuhp_threads_init(void)
763 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
764 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
767 #ifdef CONFIG_HOTPLUG_CPU
769 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
772 * This function walks all processes, finds a valid mm struct for each one and
773 * then clears a corresponding bit in mm's cpumask. While this all sounds
774 * trivial, there are various non-obvious corner cases, which this function
775 * tries to solve in a safe manner.
777 * Also note that the function uses a somewhat relaxed locking scheme, so it may
778 * be called only for an already offlined CPU.
780 void clear_tasks_mm_cpumask(int cpu
)
782 struct task_struct
*p
;
785 * This function is called after the cpu is taken down and marked
786 * offline, so its not like new tasks will ever get this cpu set in
787 * their mm mask. -- Peter Zijlstra
788 * Thus, we may use rcu_read_lock() here, instead of grabbing
789 * full-fledged tasklist_lock.
791 WARN_ON(cpu_online(cpu
));
793 for_each_process(p
) {
794 struct task_struct
*t
;
797 * Main thread might exit, but other threads may still have
798 * a valid mm. Find one.
800 t
= find_lock_task_mm(p
);
803 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
809 /* Take this CPU down. */
810 static int take_cpu_down(void *_param
)
812 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
813 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
814 int err
, cpu
= smp_processor_id();
817 /* Ensure this CPU doesn't handle any more interrupts. */
818 err
= __cpu_disable();
823 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
824 * do this step again.
826 WARN_ON(st
->state
!= CPUHP_TEARDOWN_CPU
);
828 /* Invoke the former CPU_DYING callbacks */
829 for (; st
->state
> target
; st
->state
--) {
830 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
832 * DYING must not fail!
837 /* Give up timekeeping duties */
838 tick_handover_do_timer();
839 /* Park the stopper thread */
840 stop_machine_park(cpu
);
844 static int takedown_cpu(unsigned int cpu
)
846 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
849 /* Park the smpboot threads */
850 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
853 * Prevent irq alloc/free while the dying cpu reorganizes the
854 * interrupt affinities.
859 * So now all preempt/rcu users must observe !cpu_active().
861 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, cpumask_of(cpu
));
863 /* CPU refused to die */
865 /* Unpark the hotplug thread so we can rollback there */
866 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
869 BUG_ON(cpu_online(cpu
));
872 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
873 * all runnable tasks from the CPU, there's only the idle task left now
874 * that the migration thread is done doing the stop_machine thing.
876 * Wait for the stop thread to go away.
878 wait_for_ap_thread(st
, false);
879 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
881 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
884 hotplug_cpu__broadcast_tick_pull(cpu
);
885 /* This actually kills the CPU. */
888 tick_cleanup_dead_cpu(cpu
);
889 rcutree_migrate_callbacks(cpu
);
893 static void cpuhp_complete_idle_dead(void *arg
)
895 struct cpuhp_cpu_state
*st
= arg
;
897 complete_ap_thread(st
, false);
900 void cpuhp_report_idle_dead(void)
902 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
904 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
905 rcu_report_dead(smp_processor_id());
906 st
->state
= CPUHP_AP_IDLE_DEAD
;
908 * We cannot call complete after rcu_report_dead() so we delegate it
911 smp_call_function_single(cpumask_first(cpu_online_mask
),
912 cpuhp_complete_idle_dead
, st
, 0);
915 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
917 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
918 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
920 if (!step
->skip_onerr
)
921 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
925 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
926 enum cpuhp_state target
)
928 enum cpuhp_state prev_state
= st
->state
;
931 for (; st
->state
> target
; st
->state
--) {
932 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
934 st
->target
= prev_state
;
935 undo_cpu_down(cpu
, st
);
942 /* Requires cpu_add_remove_lock to be held */
943 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
944 enum cpuhp_state target
)
946 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
947 int prev_state
, ret
= 0;
949 if (num_online_cpus() == 1)
952 if (!cpu_present(cpu
))
957 cpuhp_tasks_frozen
= tasks_frozen
;
959 prev_state
= cpuhp_set_state(st
, target
);
961 * If the current CPU state is in the range of the AP hotplug thread,
962 * then we need to kick the thread.
964 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
965 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
966 ret
= cpuhp_kick_ap_work(cpu
);
968 * The AP side has done the error rollback already. Just
969 * return the error code..
975 * We might have stopped still in the range of the AP hotplug
976 * thread. Nothing to do anymore.
978 if (st
->state
> CPUHP_TEARDOWN_CPU
)
984 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
985 * to do the further cleanups.
987 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
988 if (ret
&& st
->state
> CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
989 cpuhp_reset_state(st
, prev_state
);
996 * Do post unplug cleanup. This is still protected against
997 * concurrent CPU hotplug via cpu_add_remove_lock.
999 lockup_detector_cleanup();
1003 static int cpu_down_maps_locked(unsigned int cpu
, enum cpuhp_state target
)
1005 if (cpu_hotplug_disabled
)
1007 return _cpu_down(cpu
, 0, target
);
1010 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
1014 cpu_maps_update_begin();
1015 err
= cpu_down_maps_locked(cpu
, target
);
1016 cpu_maps_update_done();
1020 int cpu_down(unsigned int cpu
)
1022 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
1024 EXPORT_SYMBOL(cpu_down
);
1027 #define takedown_cpu NULL
1028 #endif /*CONFIG_HOTPLUG_CPU*/
1031 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1032 * @cpu: cpu that just started
1034 * It must be called by the arch code on the new cpu, before the new cpu
1035 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1037 void notify_cpu_starting(unsigned int cpu
)
1039 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1040 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
1043 rcu_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
1044 st
->booted_once
= true;
1045 while (st
->state
< target
) {
1047 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
1049 * STARTING must not fail!
1056 * Called from the idle task. Wake up the controlling task which brings the
1057 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1058 * the rest of the online bringup to the hotplug thread.
1060 void cpuhp_online_idle(enum cpuhp_state state
)
1062 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1064 /* Happens for the boot cpu */
1065 if (state
!= CPUHP_AP_ONLINE_IDLE
)
1068 st
->state
= CPUHP_AP_ONLINE_IDLE
;
1069 complete_ap_thread(st
, true);
1072 /* Requires cpu_add_remove_lock to be held */
1073 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
1075 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1076 struct task_struct
*idle
;
1081 if (!cpu_present(cpu
)) {
1087 * The caller of do_cpu_up might have raced with another
1088 * caller. Ignore it for now.
1090 if (st
->state
>= target
)
1093 if (st
->state
== CPUHP_OFFLINE
) {
1094 /* Let it fail before we try to bring the cpu up */
1095 idle
= idle_thread_get(cpu
);
1097 ret
= PTR_ERR(idle
);
1102 cpuhp_tasks_frozen
= tasks_frozen
;
1104 cpuhp_set_state(st
, target
);
1106 * If the current CPU state is in the range of the AP hotplug thread,
1107 * then we need to kick the thread once more.
1109 if (st
->state
> CPUHP_BRINGUP_CPU
) {
1110 ret
= cpuhp_kick_ap_work(cpu
);
1112 * The AP side has done the error rollback already. Just
1113 * return the error code..
1120 * Try to reach the target state. We max out on the BP at
1121 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1122 * responsible for bringing it up to the target state.
1124 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1125 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1127 cpus_write_unlock();
1131 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1135 if (!cpu_possible(cpu
)) {
1136 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1138 #if defined(CONFIG_IA64)
1139 pr_err("please check additional_cpus= boot parameter\n");
1144 err
= try_online_node(cpu_to_node(cpu
));
1148 cpu_maps_update_begin();
1150 if (cpu_hotplug_disabled
) {
1154 if (!cpu_smt_allowed(cpu
)) {
1159 err
= _cpu_up(cpu
, 0, target
);
1161 cpu_maps_update_done();
1165 int cpu_up(unsigned int cpu
)
1167 return do_cpu_up(cpu
, CPUHP_ONLINE
);
1169 EXPORT_SYMBOL_GPL(cpu_up
);
1171 #ifdef CONFIG_PM_SLEEP_SMP
1172 static cpumask_var_t frozen_cpus
;
1174 int freeze_secondary_cpus(int primary
)
1178 cpu_maps_update_begin();
1179 if (!cpu_online(primary
))
1180 primary
= cpumask_first(cpu_online_mask
);
1182 * We take down all of the non-boot CPUs in one shot to avoid races
1183 * with the userspace trying to use the CPU hotplug at the same time
1185 cpumask_clear(frozen_cpus
);
1187 pr_info("Disabling non-boot CPUs ...\n");
1188 for_each_online_cpu(cpu
) {
1191 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1192 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1193 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1195 cpumask_set_cpu(cpu
, frozen_cpus
);
1197 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1203 BUG_ON(num_online_cpus() > 1);
1205 pr_err("Non-boot CPUs are not disabled\n");
1208 * Make sure the CPUs won't be enabled by someone else. We need to do
1209 * this even in case of failure as all disable_nonboot_cpus() users are
1210 * supposed to do enable_nonboot_cpus() on the failure path.
1212 cpu_hotplug_disabled
++;
1214 cpu_maps_update_done();
1218 void __weak
arch_enable_nonboot_cpus_begin(void)
1222 void __weak
arch_enable_nonboot_cpus_end(void)
1226 void enable_nonboot_cpus(void)
1230 /* Allow everyone to use the CPU hotplug again */
1231 cpu_maps_update_begin();
1232 __cpu_hotplug_enable();
1233 if (cpumask_empty(frozen_cpus
))
1236 pr_info("Enabling non-boot CPUs ...\n");
1238 arch_enable_nonboot_cpus_begin();
1240 for_each_cpu(cpu
, frozen_cpus
) {
1241 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
1242 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
1243 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
1245 pr_info("CPU%d is up\n", cpu
);
1248 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
1251 arch_enable_nonboot_cpus_end();
1253 cpumask_clear(frozen_cpus
);
1255 cpu_maps_update_done();
1258 static int __init
alloc_frozen_cpus(void)
1260 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1264 core_initcall(alloc_frozen_cpus
);
1267 * When callbacks for CPU hotplug notifications are being executed, we must
1268 * ensure that the state of the system with respect to the tasks being frozen
1269 * or not, as reported by the notification, remains unchanged *throughout the
1270 * duration* of the execution of the callbacks.
1271 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1273 * This synchronization is implemented by mutually excluding regular CPU
1274 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1275 * Hibernate notifications.
1278 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1279 unsigned long action
, void *ptr
)
1283 case PM_SUSPEND_PREPARE
:
1284 case PM_HIBERNATION_PREPARE
:
1285 cpu_hotplug_disable();
1288 case PM_POST_SUSPEND
:
1289 case PM_POST_HIBERNATION
:
1290 cpu_hotplug_enable();
1301 static int __init
cpu_hotplug_pm_sync_init(void)
1304 * cpu_hotplug_pm_callback has higher priority than x86
1305 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1306 * to disable cpu hotplug to avoid cpu hotplug race.
1308 pm_notifier(cpu_hotplug_pm_callback
, 0);
1311 core_initcall(cpu_hotplug_pm_sync_init
);
1313 #endif /* CONFIG_PM_SLEEP_SMP */
1317 #endif /* CONFIG_SMP */
1319 /* Boot processor state steps */
1320 static struct cpuhp_step cpuhp_bp_states
[] = {
1323 .startup
.single
= NULL
,
1324 .teardown
.single
= NULL
,
1327 [CPUHP_CREATE_THREADS
]= {
1328 .name
= "threads:prepare",
1329 .startup
.single
= smpboot_create_threads
,
1330 .teardown
.single
= NULL
,
1333 [CPUHP_PERF_PREPARE
] = {
1334 .name
= "perf:prepare",
1335 .startup
.single
= perf_event_init_cpu
,
1336 .teardown
.single
= perf_event_exit_cpu
,
1338 [CPUHP_WORKQUEUE_PREP
] = {
1339 .name
= "workqueue:prepare",
1340 .startup
.single
= workqueue_prepare_cpu
,
1341 .teardown
.single
= NULL
,
1343 [CPUHP_HRTIMERS_PREPARE
] = {
1344 .name
= "hrtimers:prepare",
1345 .startup
.single
= hrtimers_prepare_cpu
,
1346 .teardown
.single
= hrtimers_dead_cpu
,
1348 [CPUHP_SMPCFD_PREPARE
] = {
1349 .name
= "smpcfd:prepare",
1350 .startup
.single
= smpcfd_prepare_cpu
,
1351 .teardown
.single
= smpcfd_dead_cpu
,
1353 [CPUHP_RELAY_PREPARE
] = {
1354 .name
= "relay:prepare",
1355 .startup
.single
= relay_prepare_cpu
,
1356 .teardown
.single
= NULL
,
1358 [CPUHP_SLAB_PREPARE
] = {
1359 .name
= "slab:prepare",
1360 .startup
.single
= slab_prepare_cpu
,
1361 .teardown
.single
= slab_dead_cpu
,
1363 [CPUHP_RCUTREE_PREP
] = {
1364 .name
= "RCU/tree:prepare",
1365 .startup
.single
= rcutree_prepare_cpu
,
1366 .teardown
.single
= rcutree_dead_cpu
,
1369 * On the tear-down path, timers_dead_cpu() must be invoked
1370 * before blk_mq_queue_reinit_notify() from notify_dead(),
1371 * otherwise a RCU stall occurs.
1373 [CPUHP_TIMERS_PREPARE
] = {
1374 .name
= "timers:dead",
1375 .startup
.single
= timers_prepare_cpu
,
1376 .teardown
.single
= timers_dead_cpu
,
1378 /* Kicks the plugged cpu into life */
1379 [CPUHP_BRINGUP_CPU
] = {
1380 .name
= "cpu:bringup",
1381 .startup
.single
= bringup_cpu
,
1382 .teardown
.single
= NULL
,
1386 * Handled on controll processor until the plugged processor manages
1389 [CPUHP_TEARDOWN_CPU
] = {
1390 .name
= "cpu:teardown",
1391 .startup
.single
= NULL
,
1392 .teardown
.single
= takedown_cpu
,
1396 [CPUHP_BRINGUP_CPU
] = { },
1400 /* Application processor state steps */
1401 static struct cpuhp_step cpuhp_ap_states
[] = {
1403 /* Final state before CPU kills itself */
1404 [CPUHP_AP_IDLE_DEAD
] = {
1405 .name
= "idle:dead",
1408 * Last state before CPU enters the idle loop to die. Transient state
1409 * for synchronization.
1411 [CPUHP_AP_OFFLINE
] = {
1412 .name
= "ap:offline",
1415 /* First state is scheduler control. Interrupts are disabled */
1416 [CPUHP_AP_SCHED_STARTING
] = {
1417 .name
= "sched:starting",
1418 .startup
.single
= sched_cpu_starting
,
1419 .teardown
.single
= sched_cpu_dying
,
1421 [CPUHP_AP_RCUTREE_DYING
] = {
1422 .name
= "RCU/tree:dying",
1423 .startup
.single
= NULL
,
1424 .teardown
.single
= rcutree_dying_cpu
,
1426 [CPUHP_AP_SMPCFD_DYING
] = {
1427 .name
= "smpcfd:dying",
1428 .startup
.single
= NULL
,
1429 .teardown
.single
= smpcfd_dying_cpu
,
1431 /* Entry state on starting. Interrupts enabled from here on. Transient
1432 * state for synchronsization */
1433 [CPUHP_AP_ONLINE
] = {
1434 .name
= "ap:online",
1436 /* Handle smpboot threads park/unpark */
1437 [CPUHP_AP_SMPBOOT_THREADS
] = {
1438 .name
= "smpboot/threads:online",
1439 .startup
.single
= smpboot_unpark_threads
,
1440 .teardown
.single
= smpboot_park_threads
,
1442 [CPUHP_AP_IRQ_AFFINITY_ONLINE
] = {
1443 .name
= "irq/affinity:online",
1444 .startup
.single
= irq_affinity_online_cpu
,
1445 .teardown
.single
= NULL
,
1447 [CPUHP_AP_PERF_ONLINE
] = {
1448 .name
= "perf:online",
1449 .startup
.single
= perf_event_init_cpu
,
1450 .teardown
.single
= perf_event_exit_cpu
,
1452 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
1453 .name
= "workqueue:online",
1454 .startup
.single
= workqueue_online_cpu
,
1455 .teardown
.single
= workqueue_offline_cpu
,
1457 [CPUHP_AP_RCUTREE_ONLINE
] = {
1458 .name
= "RCU/tree:online",
1459 .startup
.single
= rcutree_online_cpu
,
1460 .teardown
.single
= rcutree_offline_cpu
,
1464 * The dynamically registered state space is here
1468 /* Last state is scheduler control setting the cpu active */
1469 [CPUHP_AP_ACTIVE
] = {
1470 .name
= "sched:active",
1471 .startup
.single
= sched_cpu_activate
,
1472 .teardown
.single
= sched_cpu_deactivate
,
1476 /* CPU is fully up and running. */
1479 .startup
.single
= NULL
,
1480 .teardown
.single
= NULL
,
1484 /* Sanity check for callbacks */
1485 static int cpuhp_cb_check(enum cpuhp_state state
)
1487 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1493 * Returns a free for dynamic slot assignment of the Online state. The states
1494 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1495 * by having no name assigned.
1497 static int cpuhp_reserve_state(enum cpuhp_state state
)
1499 enum cpuhp_state i
, end
;
1500 struct cpuhp_step
*step
;
1503 case CPUHP_AP_ONLINE_DYN
:
1504 step
= cpuhp_ap_states
+ CPUHP_AP_ONLINE_DYN
;
1505 end
= CPUHP_AP_ONLINE_DYN_END
;
1507 case CPUHP_BP_PREPARE_DYN
:
1508 step
= cpuhp_bp_states
+ CPUHP_BP_PREPARE_DYN
;
1509 end
= CPUHP_BP_PREPARE_DYN_END
;
1515 for (i
= state
; i
<= end
; i
++, step
++) {
1519 WARN(1, "No more dynamic states available for CPU hotplug\n");
1523 static int cpuhp_store_callbacks(enum cpuhp_state state
, const char *name
,
1524 int (*startup
)(unsigned int cpu
),
1525 int (*teardown
)(unsigned int cpu
),
1526 bool multi_instance
)
1528 /* (Un)Install the callbacks for further cpu hotplug operations */
1529 struct cpuhp_step
*sp
;
1533 * If name is NULL, then the state gets removed.
1535 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1536 * the first allocation from these dynamic ranges, so the removal
1537 * would trigger a new allocation and clear the wrong (already
1538 * empty) state, leaving the callbacks of the to be cleared state
1539 * dangling, which causes wreckage on the next hotplug operation.
1541 if (name
&& (state
== CPUHP_AP_ONLINE_DYN
||
1542 state
== CPUHP_BP_PREPARE_DYN
)) {
1543 ret
= cpuhp_reserve_state(state
);
1548 sp
= cpuhp_get_step(state
);
1549 if (name
&& sp
->name
)
1552 sp
->startup
.single
= startup
;
1553 sp
->teardown
.single
= teardown
;
1555 sp
->multi_instance
= multi_instance
;
1556 INIT_HLIST_HEAD(&sp
->list
);
1560 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1562 return cpuhp_get_step(state
)->teardown
.single
;
1566 * Call the startup/teardown function for a step either on the AP or
1567 * on the current CPU.
1569 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
1570 struct hlist_node
*node
)
1572 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1576 * If there's nothing to do, we done.
1577 * Relies on the union for multi_instance.
1579 if ((bringup
&& !sp
->startup
.single
) ||
1580 (!bringup
&& !sp
->teardown
.single
))
1583 * The non AP bound callbacks can fail on bringup. On teardown
1584 * e.g. module removal we crash for now.
1587 if (cpuhp_is_ap_state(state
))
1588 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
1590 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1592 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1594 BUG_ON(ret
&& !bringup
);
1599 * Called from __cpuhp_setup_state on a recoverable failure.
1601 * Note: The teardown callbacks for rollback are not allowed to fail!
1603 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1604 struct hlist_node
*node
)
1608 /* Roll back the already executed steps on the other cpus */
1609 for_each_present_cpu(cpu
) {
1610 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1611 int cpustate
= st
->state
;
1613 if (cpu
>= failedcpu
)
1616 /* Did we invoke the startup call on that cpu ? */
1617 if (cpustate
>= state
)
1618 cpuhp_issue_call(cpu
, state
, false, node
);
1622 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state
,
1623 struct hlist_node
*node
,
1626 struct cpuhp_step
*sp
;
1630 lockdep_assert_cpus_held();
1632 sp
= cpuhp_get_step(state
);
1633 if (sp
->multi_instance
== false)
1636 mutex_lock(&cpuhp_state_mutex
);
1638 if (!invoke
|| !sp
->startup
.multi
)
1642 * Try to call the startup callback for each present cpu
1643 * depending on the hotplug state of the cpu.
1645 for_each_present_cpu(cpu
) {
1646 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1647 int cpustate
= st
->state
;
1649 if (cpustate
< state
)
1652 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
1654 if (sp
->teardown
.multi
)
1655 cpuhp_rollback_install(cpu
, state
, node
);
1661 hlist_add_head(node
, &sp
->list
);
1663 mutex_unlock(&cpuhp_state_mutex
);
1667 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
1673 ret
= __cpuhp_state_add_instance_cpuslocked(state
, node
, invoke
);
1677 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
1680 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1681 * @state: The state to setup
1682 * @invoke: If true, the startup function is invoked for cpus where
1683 * cpu state >= @state
1684 * @startup: startup callback function
1685 * @teardown: teardown callback function
1686 * @multi_instance: State is set up for multiple instances which get
1689 * The caller needs to hold cpus read locked while calling this function.
1692 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1693 * 0 for all other states
1694 * On failure: proper (negative) error code
1696 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state
,
1697 const char *name
, bool invoke
,
1698 int (*startup
)(unsigned int cpu
),
1699 int (*teardown
)(unsigned int cpu
),
1700 bool multi_instance
)
1705 lockdep_assert_cpus_held();
1707 if (cpuhp_cb_check(state
) || !name
)
1710 mutex_lock(&cpuhp_state_mutex
);
1712 ret
= cpuhp_store_callbacks(state
, name
, startup
, teardown
,
1715 dynstate
= state
== CPUHP_AP_ONLINE_DYN
;
1716 if (ret
> 0 && dynstate
) {
1721 if (ret
|| !invoke
|| !startup
)
1725 * Try to call the startup callback for each present cpu
1726 * depending on the hotplug state of the cpu.
1728 for_each_present_cpu(cpu
) {
1729 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1730 int cpustate
= st
->state
;
1732 if (cpustate
< state
)
1735 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
1738 cpuhp_rollback_install(cpu
, state
, NULL
);
1739 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1744 mutex_unlock(&cpuhp_state_mutex
);
1746 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1747 * dynamically allocated state in case of success.
1749 if (!ret
&& dynstate
)
1753 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked
);
1755 int __cpuhp_setup_state(enum cpuhp_state state
,
1756 const char *name
, bool invoke
,
1757 int (*startup
)(unsigned int cpu
),
1758 int (*teardown
)(unsigned int cpu
),
1759 bool multi_instance
)
1764 ret
= __cpuhp_setup_state_cpuslocked(state
, name
, invoke
, startup
,
1765 teardown
, multi_instance
);
1769 EXPORT_SYMBOL(__cpuhp_setup_state
);
1771 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
1772 struct hlist_node
*node
, bool invoke
)
1774 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1777 BUG_ON(cpuhp_cb_check(state
));
1779 if (!sp
->multi_instance
)
1783 mutex_lock(&cpuhp_state_mutex
);
1785 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1788 * Call the teardown callback for each present cpu depending
1789 * on the hotplug state of the cpu. This function is not
1790 * allowed to fail currently!
1792 for_each_present_cpu(cpu
) {
1793 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1794 int cpustate
= st
->state
;
1796 if (cpustate
>= state
)
1797 cpuhp_issue_call(cpu
, state
, false, node
);
1802 mutex_unlock(&cpuhp_state_mutex
);
1807 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
1810 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1811 * @state: The state to remove
1812 * @invoke: If true, the teardown function is invoked for cpus where
1813 * cpu state >= @state
1815 * The caller needs to hold cpus read locked while calling this function.
1816 * The teardown callback is currently not allowed to fail. Think
1817 * about module removal!
1819 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state
, bool invoke
)
1821 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1824 BUG_ON(cpuhp_cb_check(state
));
1826 lockdep_assert_cpus_held();
1828 mutex_lock(&cpuhp_state_mutex
);
1829 if (sp
->multi_instance
) {
1830 WARN(!hlist_empty(&sp
->list
),
1831 "Error: Removing state %d which has instances left.\n",
1836 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1840 * Call the teardown callback for each present cpu depending
1841 * on the hotplug state of the cpu. This function is not
1842 * allowed to fail currently!
1844 for_each_present_cpu(cpu
) {
1845 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1846 int cpustate
= st
->state
;
1848 if (cpustate
>= state
)
1849 cpuhp_issue_call(cpu
, state
, false, NULL
);
1852 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1853 mutex_unlock(&cpuhp_state_mutex
);
1855 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked
);
1857 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1860 __cpuhp_remove_state_cpuslocked(state
, invoke
);
1863 EXPORT_SYMBOL(__cpuhp_remove_state
);
1865 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1866 static ssize_t
show_cpuhp_state(struct device
*dev
,
1867 struct device_attribute
*attr
, char *buf
)
1869 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1871 return sprintf(buf
, "%d\n", st
->state
);
1873 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1875 static ssize_t
write_cpuhp_target(struct device
*dev
,
1876 struct device_attribute
*attr
,
1877 const char *buf
, size_t count
)
1879 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1880 struct cpuhp_step
*sp
;
1883 ret
= kstrtoint(buf
, 10, &target
);
1887 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1888 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1891 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1895 ret
= lock_device_hotplug_sysfs();
1899 mutex_lock(&cpuhp_state_mutex
);
1900 sp
= cpuhp_get_step(target
);
1901 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1902 mutex_unlock(&cpuhp_state_mutex
);
1906 if (st
->state
< target
)
1907 ret
= do_cpu_up(dev
->id
, target
);
1909 ret
= do_cpu_down(dev
->id
, target
);
1911 unlock_device_hotplug();
1912 return ret
? ret
: count
;
1915 static ssize_t
show_cpuhp_target(struct device
*dev
,
1916 struct device_attribute
*attr
, char *buf
)
1918 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1920 return sprintf(buf
, "%d\n", st
->target
);
1922 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1925 static ssize_t
write_cpuhp_fail(struct device
*dev
,
1926 struct device_attribute
*attr
,
1927 const char *buf
, size_t count
)
1929 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1930 struct cpuhp_step
*sp
;
1933 ret
= kstrtoint(buf
, 10, &fail
);
1938 * Cannot fail STARTING/DYING callbacks.
1940 if (cpuhp_is_atomic_state(fail
))
1944 * Cannot fail anything that doesn't have callbacks.
1946 mutex_lock(&cpuhp_state_mutex
);
1947 sp
= cpuhp_get_step(fail
);
1948 if (!sp
->startup
.single
&& !sp
->teardown
.single
)
1950 mutex_unlock(&cpuhp_state_mutex
);
1959 static ssize_t
show_cpuhp_fail(struct device
*dev
,
1960 struct device_attribute
*attr
, char *buf
)
1962 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1964 return sprintf(buf
, "%d\n", st
->fail
);
1967 static DEVICE_ATTR(fail
, 0644, show_cpuhp_fail
, write_cpuhp_fail
);
1969 static struct attribute
*cpuhp_cpu_attrs
[] = {
1970 &dev_attr_state
.attr
,
1971 &dev_attr_target
.attr
,
1972 &dev_attr_fail
.attr
,
1976 static const struct attribute_group cpuhp_cpu_attr_group
= {
1977 .attrs
= cpuhp_cpu_attrs
,
1982 static ssize_t
show_cpuhp_states(struct device
*dev
,
1983 struct device_attribute
*attr
, char *buf
)
1985 ssize_t cur
, res
= 0;
1988 mutex_lock(&cpuhp_state_mutex
);
1989 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
1990 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
1993 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
1998 mutex_unlock(&cpuhp_state_mutex
);
2001 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
2003 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
2004 &dev_attr_states
.attr
,
2008 static const struct attribute_group cpuhp_cpu_root_attr_group
= {
2009 .attrs
= cpuhp_cpu_root_attrs
,
2014 #ifdef CONFIG_HOTPLUG_SMT
2016 static const char *smt_states
[] = {
2017 [CPU_SMT_ENABLED
] = "on",
2018 [CPU_SMT_DISABLED
] = "off",
2019 [CPU_SMT_FORCE_DISABLED
] = "forceoff",
2020 [CPU_SMT_NOT_SUPPORTED
] = "notsupported",
2024 show_smt_control(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2026 return snprintf(buf
, PAGE_SIZE
- 2, "%s\n", smt_states
[cpu_smt_control
]);
2029 static void cpuhp_offline_cpu_device(unsigned int cpu
)
2031 struct device
*dev
= get_cpu_device(cpu
);
2033 dev
->offline
= true;
2034 /* Tell user space about the state change */
2035 kobject_uevent(&dev
->kobj
, KOBJ_OFFLINE
);
2038 static void cpuhp_online_cpu_device(unsigned int cpu
)
2040 struct device
*dev
= get_cpu_device(cpu
);
2042 dev
->offline
= false;
2043 /* Tell user space about the state change */
2044 kobject_uevent(&dev
->kobj
, KOBJ_ONLINE
);
2047 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval
)
2051 cpu_maps_update_begin();
2052 for_each_online_cpu(cpu
) {
2053 if (topology_is_primary_thread(cpu
))
2055 ret
= cpu_down_maps_locked(cpu
, CPUHP_OFFLINE
);
2059 * As this needs to hold the cpu maps lock it's impossible
2060 * to call device_offline() because that ends up calling
2061 * cpu_down() which takes cpu maps lock. cpu maps lock
2062 * needs to be held as this might race against in kernel
2063 * abusers of the hotplug machinery (thermal management).
2065 * So nothing would update device:offline state. That would
2066 * leave the sysfs entry stale and prevent onlining after
2067 * smt control has been changed to 'off' again. This is
2068 * called under the sysfs hotplug lock, so it is properly
2069 * serialized against the regular offline usage.
2071 cpuhp_offline_cpu_device(cpu
);
2074 cpu_smt_control
= ctrlval
;
2075 cpu_maps_update_done();
2079 static int cpuhp_smt_enable(void)
2083 cpu_maps_update_begin();
2084 cpu_smt_control
= CPU_SMT_ENABLED
;
2085 for_each_present_cpu(cpu
) {
2086 /* Skip online CPUs and CPUs on offline nodes */
2087 if (cpu_online(cpu
) || !node_online(cpu_to_node(cpu
)))
2089 ret
= _cpu_up(cpu
, 0, CPUHP_ONLINE
);
2092 /* See comment in cpuhp_smt_disable() */
2093 cpuhp_online_cpu_device(cpu
);
2095 cpu_maps_update_done();
2100 store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2101 const char *buf
, size_t count
)
2105 if (sysfs_streq(buf
, "on"))
2106 ctrlval
= CPU_SMT_ENABLED
;
2107 else if (sysfs_streq(buf
, "off"))
2108 ctrlval
= CPU_SMT_DISABLED
;
2109 else if (sysfs_streq(buf
, "forceoff"))
2110 ctrlval
= CPU_SMT_FORCE_DISABLED
;
2114 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
)
2117 if (cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
2120 ret
= lock_device_hotplug_sysfs();
2124 if (ctrlval
!= cpu_smt_control
) {
2126 case CPU_SMT_ENABLED
:
2127 ret
= cpuhp_smt_enable();
2129 case CPU_SMT_DISABLED
:
2130 case CPU_SMT_FORCE_DISABLED
:
2131 ret
= cpuhp_smt_disable(ctrlval
);
2136 unlock_device_hotplug();
2137 return ret
? ret
: count
;
2139 static DEVICE_ATTR(control
, 0644, show_smt_control
, store_smt_control
);
2142 show_smt_active(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2144 bool active
= topology_max_smt_threads() > 1;
2146 return snprintf(buf
, PAGE_SIZE
- 2, "%d\n", active
);
2148 static DEVICE_ATTR(active
, 0444, show_smt_active
, NULL
);
2150 static struct attribute
*cpuhp_smt_attrs
[] = {
2151 &dev_attr_control
.attr
,
2152 &dev_attr_active
.attr
,
2156 static const struct attribute_group cpuhp_smt_attr_group
= {
2157 .attrs
= cpuhp_smt_attrs
,
2162 static int __init
cpu_smt_state_init(void)
2164 return sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2165 &cpuhp_smt_attr_group
);
2169 static inline int cpu_smt_state_init(void) { return 0; }
2172 static int __init
cpuhp_sysfs_init(void)
2176 ret
= cpu_smt_state_init();
2180 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2181 &cpuhp_cpu_root_attr_group
);
2185 for_each_possible_cpu(cpu
) {
2186 struct device
*dev
= get_cpu_device(cpu
);
2190 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
2196 device_initcall(cpuhp_sysfs_init
);
2200 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2201 * represents all NR_CPUS bits binary values of 1<<nr.
2203 * It is used by cpumask_of() to get a constant address to a CPU
2204 * mask value that has a single bit set only.
2207 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2208 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2209 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2210 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2211 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2213 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
2215 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2216 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2217 #if BITS_PER_LONG > 32
2218 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2219 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2222 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
2224 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
2225 EXPORT_SYMBOL(cpu_all_bits
);
2227 #ifdef CONFIG_INIT_ALL_POSSIBLE
2228 struct cpumask __cpu_possible_mask __read_mostly
2231 struct cpumask __cpu_possible_mask __read_mostly
;
2233 EXPORT_SYMBOL(__cpu_possible_mask
);
2235 struct cpumask __cpu_online_mask __read_mostly
;
2236 EXPORT_SYMBOL(__cpu_online_mask
);
2238 struct cpumask __cpu_present_mask __read_mostly
;
2239 EXPORT_SYMBOL(__cpu_present_mask
);
2241 struct cpumask __cpu_active_mask __read_mostly
;
2242 EXPORT_SYMBOL(__cpu_active_mask
);
2244 void init_cpu_present(const struct cpumask
*src
)
2246 cpumask_copy(&__cpu_present_mask
, src
);
2249 void init_cpu_possible(const struct cpumask
*src
)
2251 cpumask_copy(&__cpu_possible_mask
, src
);
2254 void init_cpu_online(const struct cpumask
*src
)
2256 cpumask_copy(&__cpu_online_mask
, src
);
2260 * Activate the first processor.
2262 void __init
boot_cpu_init(void)
2264 int cpu
= smp_processor_id();
2266 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2267 set_cpu_online(cpu
, true);
2268 set_cpu_active(cpu
, true);
2269 set_cpu_present(cpu
, true);
2270 set_cpu_possible(cpu
, true);
2273 __boot_cpu_id
= cpu
;
2278 * Must be called _AFTER_ setting up the per_cpu areas
2280 void __init
boot_cpu_state_init(void)
2282 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->booted_once
= true;
2283 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;