2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/unistd.h>
14 #include <linux/cpu.h>
15 #include <linux/oom.h>
16 #include <linux/rcupdate.h>
17 #include <linux/export.h>
18 #include <linux/bug.h>
19 #include <linux/kthread.h>
20 #include <linux/stop_machine.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
23 #include <linux/suspend.h>
24 #include <linux/lockdep.h>
25 #include <linux/tick.h>
26 #include <linux/irq.h>
27 #include <linux/smpboot.h>
28 #include <linux/relay.h>
29 #include <linux/slab.h>
30 #include <linux/percpu-rwsem.h>
32 #include <trace/events/power.h>
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/cpuhp.h>
39 * cpuhp_cpu_state - Per cpu hotplug state storage
40 * @state: The current cpu state
41 * @target: The target state
42 * @thread: Pointer to the hotplug thread
43 * @should_run: Thread should execute
44 * @rollback: Perform a rollback
45 * @single: Single callback invocation
46 * @bringup: Single callback bringup or teardown selector
47 * @cb_state: The state for a single callback (install/uninstall)
48 * @result: Result of the operation
49 * @done: Signal completion to the issuer of the task
51 struct cpuhp_cpu_state
{
52 enum cpuhp_state state
;
53 enum cpuhp_state target
;
55 struct task_struct
*thread
;
60 struct hlist_node
*node
;
61 enum cpuhp_state cb_state
;
63 struct completion done
;
67 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
69 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
70 static struct lock_class_key cpuhp_state_key
;
71 static struct lockdep_map cpuhp_state_lock_map
=
72 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key
);
76 * cpuhp_step - Hotplug state machine step
77 * @name: Name of the step
78 * @startup: Startup function of the step
79 * @teardown: Teardown function of the step
80 * @skip_onerr: Do not invoke the functions on error rollback
81 * Will go away once the notifiers are gone
82 * @cant_stop: Bringup/teardown can't be stopped at this step
87 int (*single
)(unsigned int cpu
);
88 int (*multi
)(unsigned int cpu
,
89 struct hlist_node
*node
);
92 int (*single
)(unsigned int cpu
);
93 int (*multi
)(unsigned int cpu
,
94 struct hlist_node
*node
);
96 struct hlist_head list
;
102 static DEFINE_MUTEX(cpuhp_state_mutex
);
103 static struct cpuhp_step cpuhp_bp_states
[];
104 static struct cpuhp_step cpuhp_ap_states
[];
106 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
109 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
110 * purposes as that state is handled explicitly in cpu_down.
112 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
115 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
117 struct cpuhp_step
*sp
;
119 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
124 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
125 * @cpu: The cpu for which the callback should be invoked
126 * @step: The step in the state machine
127 * @bringup: True if the bringup callback should be invoked
129 * Called from cpu hotplug and from the state register machinery.
131 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
132 bool bringup
, struct hlist_node
*node
)
134 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
135 struct cpuhp_step
*step
= cpuhp_get_step(state
);
136 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
137 int (*cb
)(unsigned int cpu
);
140 if (!step
->multi_instance
) {
141 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
144 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
146 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
149 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
153 /* Single invocation for instance add/remove */
155 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
156 ret
= cbm(cpu
, node
);
157 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
161 /* State transition. Invoke on all instances */
163 hlist_for_each(node
, &step
->list
) {
164 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
165 ret
= cbm(cpu
, node
);
166 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
173 /* Rollback the instances if one failed */
174 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
178 hlist_for_each(node
, &step
->list
) {
187 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
188 static DEFINE_MUTEX(cpu_add_remove_lock
);
189 bool cpuhp_tasks_frozen
;
190 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
193 * The following two APIs (cpu_maps_update_begin/done) must be used when
194 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
196 void cpu_maps_update_begin(void)
198 mutex_lock(&cpu_add_remove_lock
);
201 void cpu_maps_update_done(void)
203 mutex_unlock(&cpu_add_remove_lock
);
207 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
208 * Should always be manipulated under cpu_add_remove_lock
210 static int cpu_hotplug_disabled
;
212 #ifdef CONFIG_HOTPLUG_CPU
214 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock
);
216 void cpus_read_lock(void)
218 percpu_down_read(&cpu_hotplug_lock
);
220 EXPORT_SYMBOL_GPL(cpus_read_lock
);
222 void cpus_read_unlock(void)
224 percpu_up_read(&cpu_hotplug_lock
);
226 EXPORT_SYMBOL_GPL(cpus_read_unlock
);
228 void cpus_write_lock(void)
230 percpu_down_write(&cpu_hotplug_lock
);
233 void cpus_write_unlock(void)
235 percpu_up_write(&cpu_hotplug_lock
);
238 void lockdep_assert_cpus_held(void)
240 percpu_rwsem_assert_held(&cpu_hotplug_lock
);
244 * Wait for currently running CPU hotplug operations to complete (if any) and
245 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
246 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
247 * hotplug path before performing hotplug operations. So acquiring that lock
248 * guarantees mutual exclusion from any currently running hotplug operations.
250 void cpu_hotplug_disable(void)
252 cpu_maps_update_begin();
253 cpu_hotplug_disabled
++;
254 cpu_maps_update_done();
256 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
258 static void __cpu_hotplug_enable(void)
260 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
262 cpu_hotplug_disabled
--;
265 void cpu_hotplug_enable(void)
267 cpu_maps_update_begin();
268 __cpu_hotplug_enable();
269 cpu_maps_update_done();
271 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
272 #endif /* CONFIG_HOTPLUG_CPU */
274 static int bringup_wait_for_ap(unsigned int cpu
)
276 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
278 wait_for_completion(&st
->done
);
282 static int bringup_cpu(unsigned int cpu
)
284 struct task_struct
*idle
= idle_thread_get(cpu
);
288 * Some architectures have to walk the irq descriptors to
289 * setup the vector space for the cpu which comes online.
290 * Prevent irq alloc/free across the bringup.
294 /* Arch-specific enabling code. */
295 ret
= __cpu_up(cpu
, idle
);
299 ret
= bringup_wait_for_ap(cpu
);
300 BUG_ON(!cpu_online(cpu
));
305 * Hotplug state machine related functions
307 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
309 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
310 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
312 if (!step
->skip_onerr
)
313 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
);
317 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
318 enum cpuhp_state target
)
320 enum cpuhp_state prev_state
= st
->state
;
323 for (; st
->state
> target
; st
->state
--) {
324 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
);
326 st
->target
= prev_state
;
327 undo_cpu_down(cpu
, st
);
334 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
336 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
337 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
339 if (!step
->skip_onerr
)
340 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
);
344 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
345 enum cpuhp_state target
)
347 enum cpuhp_state prev_state
= st
->state
;
350 while (st
->state
< target
) {
352 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
);
354 st
->target
= prev_state
;
355 undo_cpu_up(cpu
, st
);
363 * The cpu hotplug threads manage the bringup and teardown of the cpus
365 static void cpuhp_create(unsigned int cpu
)
367 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
369 init_completion(&st
->done
);
372 static int cpuhp_should_run(unsigned int cpu
)
374 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
376 return st
->should_run
;
379 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
380 static int cpuhp_ap_offline(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
382 enum cpuhp_state target
= max((int)st
->target
, CPUHP_TEARDOWN_CPU
);
384 return cpuhp_down_callbacks(cpu
, st
, target
);
387 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
388 static int cpuhp_ap_online(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
390 return cpuhp_up_callbacks(cpu
, st
, st
->target
);
394 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
395 * callbacks when a state gets [un]installed at runtime.
397 static void cpuhp_thread_fun(unsigned int cpu
)
399 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
403 * Paired with the mb() in cpuhp_kick_ap_work and
404 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
410 st
->should_run
= false;
412 lock_map_acquire(&cpuhp_state_lock_map
);
413 /* Single callback invocation for [un]install ? */
415 if (st
->cb_state
< CPUHP_AP_ONLINE
) {
417 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
,
418 st
->bringup
, st
->node
);
421 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
,
422 st
->bringup
, st
->node
);
424 } else if (st
->rollback
) {
425 BUG_ON(st
->state
< CPUHP_AP_ONLINE_IDLE
);
427 undo_cpu_down(cpu
, st
);
428 st
->rollback
= false;
430 /* Cannot happen .... */
431 BUG_ON(st
->state
< CPUHP_AP_ONLINE_IDLE
);
433 /* Regular hotplug work */
434 if (st
->state
< st
->target
)
435 ret
= cpuhp_ap_online(cpu
, st
);
436 else if (st
->state
> st
->target
)
437 ret
= cpuhp_ap_offline(cpu
, st
);
439 lock_map_release(&cpuhp_state_lock_map
);
444 /* Invoke a single callback on a remote cpu */
446 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
447 struct hlist_node
*node
)
449 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
451 if (!cpu_online(cpu
))
454 lock_map_acquire(&cpuhp_state_lock_map
);
455 lock_map_release(&cpuhp_state_lock_map
);
458 * If we are up and running, use the hotplug thread. For early calls
459 * we invoke the thread function directly.
462 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
);
464 st
->cb_state
= state
;
466 st
->bringup
= bringup
;
470 * Make sure the above stores are visible before should_run becomes
471 * true. Paired with the mb() above in cpuhp_thread_fun()
474 st
->should_run
= true;
475 wake_up_process(st
->thread
);
476 wait_for_completion(&st
->done
);
480 /* Regular hotplug invocation of the AP hotplug thread */
481 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state
*st
)
486 * Make sure the above stores are visible before should_run becomes
487 * true. Paired with the mb() above in cpuhp_thread_fun()
490 st
->should_run
= true;
491 wake_up_process(st
->thread
);
494 static int cpuhp_kick_ap_work(unsigned int cpu
)
496 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
497 enum cpuhp_state state
= st
->state
;
499 trace_cpuhp_enter(cpu
, st
->target
, state
, cpuhp_kick_ap_work
);
500 lock_map_acquire(&cpuhp_state_lock_map
);
501 lock_map_release(&cpuhp_state_lock_map
);
502 __cpuhp_kick_ap_work(st
);
503 wait_for_completion(&st
->done
);
504 trace_cpuhp_exit(cpu
, st
->state
, state
, st
->result
);
508 static struct smp_hotplug_thread cpuhp_threads
= {
509 .store
= &cpuhp_state
.thread
,
510 .create
= &cpuhp_create
,
511 .thread_should_run
= cpuhp_should_run
,
512 .thread_fn
= cpuhp_thread_fun
,
513 .thread_comm
= "cpuhp/%u",
517 void __init
cpuhp_threads_init(void)
519 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
520 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
523 #ifdef CONFIG_HOTPLUG_CPU
525 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
528 * This function walks all processes, finds a valid mm struct for each one and
529 * then clears a corresponding bit in mm's cpumask. While this all sounds
530 * trivial, there are various non-obvious corner cases, which this function
531 * tries to solve in a safe manner.
533 * Also note that the function uses a somewhat relaxed locking scheme, so it may
534 * be called only for an already offlined CPU.
536 void clear_tasks_mm_cpumask(int cpu
)
538 struct task_struct
*p
;
541 * This function is called after the cpu is taken down and marked
542 * offline, so its not like new tasks will ever get this cpu set in
543 * their mm mask. -- Peter Zijlstra
544 * Thus, we may use rcu_read_lock() here, instead of grabbing
545 * full-fledged tasklist_lock.
547 WARN_ON(cpu_online(cpu
));
549 for_each_process(p
) {
550 struct task_struct
*t
;
553 * Main thread might exit, but other threads may still have
554 * a valid mm. Find one.
556 t
= find_lock_task_mm(p
);
559 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
565 /* Take this CPU down. */
566 static int take_cpu_down(void *_param
)
568 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
569 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
570 int err
, cpu
= smp_processor_id();
572 /* Ensure this CPU doesn't handle any more interrupts. */
573 err
= __cpu_disable();
578 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
579 * do this step again.
581 WARN_ON(st
->state
!= CPUHP_TEARDOWN_CPU
);
583 /* Invoke the former CPU_DYING callbacks */
584 for (; st
->state
> target
; st
->state
--)
585 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
);
587 /* Give up timekeeping duties */
588 tick_handover_do_timer();
589 /* Park the stopper thread */
590 stop_machine_park(cpu
);
594 static int takedown_cpu(unsigned int cpu
)
596 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
599 /* Park the smpboot threads */
600 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
601 smpboot_park_threads(cpu
);
604 * Prevent irq alloc/free while the dying cpu reorganizes the
605 * interrupt affinities.
610 * So now all preempt/rcu users must observe !cpu_active().
612 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, cpumask_of(cpu
));
614 /* CPU refused to die */
616 /* Unpark the hotplug thread so we can rollback there */
617 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
620 BUG_ON(cpu_online(cpu
));
623 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
624 * runnable tasks from the cpu, there's only the idle task left now
625 * that the migration thread is done doing the stop_machine thing.
627 * Wait for the stop thread to go away.
629 wait_for_completion(&st
->done
);
630 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
632 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
635 hotplug_cpu__broadcast_tick_pull(cpu
);
636 /* This actually kills the CPU. */
639 tick_cleanup_dead_cpu(cpu
);
643 static void cpuhp_complete_idle_dead(void *arg
)
645 struct cpuhp_cpu_state
*st
= arg
;
650 void cpuhp_report_idle_dead(void)
652 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
654 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
655 rcu_report_dead(smp_processor_id());
656 st
->state
= CPUHP_AP_IDLE_DEAD
;
658 * We cannot call complete after rcu_report_dead() so we delegate it
661 smp_call_function_single(cpumask_first(cpu_online_mask
),
662 cpuhp_complete_idle_dead
, st
, 0);
666 #define takedown_cpu NULL
669 #ifdef CONFIG_HOTPLUG_CPU
671 /* Requires cpu_add_remove_lock to be held */
672 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
673 enum cpuhp_state target
)
675 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
676 int prev_state
, ret
= 0;
678 if (num_online_cpus() == 1)
681 if (!cpu_present(cpu
))
686 cpuhp_tasks_frozen
= tasks_frozen
;
688 prev_state
= st
->state
;
691 * If the current CPU state is in the range of the AP hotplug thread,
692 * then we need to kick the thread.
694 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
695 ret
= cpuhp_kick_ap_work(cpu
);
697 * The AP side has done the error rollback already. Just
698 * return the error code..
704 * We might have stopped still in the range of the AP hotplug
705 * thread. Nothing to do anymore.
707 if (st
->state
> CPUHP_TEARDOWN_CPU
)
711 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
712 * to do the further cleanups.
714 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
715 if (ret
&& st
->state
> CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
716 st
->target
= prev_state
;
718 cpuhp_kick_ap_work(cpu
);
726 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
730 cpu_maps_update_begin();
732 if (cpu_hotplug_disabled
) {
737 err
= _cpu_down(cpu
, 0, target
);
740 cpu_maps_update_done();
743 int cpu_down(unsigned int cpu
)
745 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
747 EXPORT_SYMBOL(cpu_down
);
748 #endif /*CONFIG_HOTPLUG_CPU*/
751 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
752 * @cpu: cpu that just started
754 * It must be called by the arch code on the new cpu, before the new cpu
755 * enables interrupts and before the "boot" cpu returns from __cpu_up().
757 void notify_cpu_starting(unsigned int cpu
)
759 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
760 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
762 rcu_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
763 while (st
->state
< target
) {
765 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
);
770 * Called from the idle task. We need to set active here, so we can kick off
771 * the stopper thread and unpark the smpboot threads. If the target state is
772 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
775 void cpuhp_online_idle(enum cpuhp_state state
)
777 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
778 unsigned int cpu
= smp_processor_id();
780 /* Happens for the boot cpu */
781 if (state
!= CPUHP_AP_ONLINE_IDLE
)
784 st
->state
= CPUHP_AP_ONLINE_IDLE
;
786 /* Unpark the stopper thread and the hotplug thread of this cpu */
787 stop_machine_unpark(cpu
);
788 kthread_unpark(st
->thread
);
790 /* Should we go further up ? */
791 if (st
->target
> CPUHP_AP_ONLINE_IDLE
)
792 __cpuhp_kick_ap_work(st
);
797 /* Requires cpu_add_remove_lock to be held */
798 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
800 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
801 struct task_struct
*idle
;
806 if (!cpu_present(cpu
)) {
812 * The caller of do_cpu_up might have raced with another
813 * caller. Ignore it for now.
815 if (st
->state
>= target
)
818 if (st
->state
== CPUHP_OFFLINE
) {
819 /* Let it fail before we try to bring the cpu up */
820 idle
= idle_thread_get(cpu
);
827 cpuhp_tasks_frozen
= tasks_frozen
;
831 * If the current CPU state is in the range of the AP hotplug thread,
832 * then we need to kick the thread once more.
834 if (st
->state
> CPUHP_BRINGUP_CPU
) {
835 ret
= cpuhp_kick_ap_work(cpu
);
837 * The AP side has done the error rollback already. Just
838 * return the error code..
845 * Try to reach the target state. We max out on the BP at
846 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
847 * responsible for bringing it up to the target state.
849 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
850 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
856 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
860 if (!cpu_possible(cpu
)) {
861 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
863 #if defined(CONFIG_IA64)
864 pr_err("please check additional_cpus= boot parameter\n");
869 err
= try_online_node(cpu_to_node(cpu
));
873 cpu_maps_update_begin();
875 if (cpu_hotplug_disabled
) {
880 err
= _cpu_up(cpu
, 0, target
);
882 cpu_maps_update_done();
886 int cpu_up(unsigned int cpu
)
888 return do_cpu_up(cpu
, CPUHP_ONLINE
);
890 EXPORT_SYMBOL_GPL(cpu_up
);
892 #ifdef CONFIG_PM_SLEEP_SMP
893 static cpumask_var_t frozen_cpus
;
895 int freeze_secondary_cpus(int primary
)
899 cpu_maps_update_begin();
900 if (!cpu_online(primary
))
901 primary
= cpumask_first(cpu_online_mask
);
903 * We take down all of the non-boot CPUs in one shot to avoid races
904 * with the userspace trying to use the CPU hotplug at the same time
906 cpumask_clear(frozen_cpus
);
908 pr_info("Disabling non-boot CPUs ...\n");
909 for_each_online_cpu(cpu
) {
912 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
913 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
914 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
916 cpumask_set_cpu(cpu
, frozen_cpus
);
918 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
924 BUG_ON(num_online_cpus() > 1);
926 pr_err("Non-boot CPUs are not disabled\n");
929 * Make sure the CPUs won't be enabled by someone else. We need to do
930 * this even in case of failure as all disable_nonboot_cpus() users are
931 * supposed to do enable_nonboot_cpus() on the failure path.
933 cpu_hotplug_disabled
++;
935 cpu_maps_update_done();
939 void __weak
arch_enable_nonboot_cpus_begin(void)
943 void __weak
arch_enable_nonboot_cpus_end(void)
947 void enable_nonboot_cpus(void)
951 /* Allow everyone to use the CPU hotplug again */
952 cpu_maps_update_begin();
953 __cpu_hotplug_enable();
954 if (cpumask_empty(frozen_cpus
))
957 pr_info("Enabling non-boot CPUs ...\n");
959 arch_enable_nonboot_cpus_begin();
961 for_each_cpu(cpu
, frozen_cpus
) {
962 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
963 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
964 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
966 pr_info("CPU%d is up\n", cpu
);
969 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
972 arch_enable_nonboot_cpus_end();
974 cpumask_clear(frozen_cpus
);
976 cpu_maps_update_done();
979 static int __init
alloc_frozen_cpus(void)
981 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
985 core_initcall(alloc_frozen_cpus
);
988 * When callbacks for CPU hotplug notifications are being executed, we must
989 * ensure that the state of the system with respect to the tasks being frozen
990 * or not, as reported by the notification, remains unchanged *throughout the
991 * duration* of the execution of the callbacks.
992 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
994 * This synchronization is implemented by mutually excluding regular CPU
995 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
996 * Hibernate notifications.
999 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1000 unsigned long action
, void *ptr
)
1004 case PM_SUSPEND_PREPARE
:
1005 case PM_HIBERNATION_PREPARE
:
1006 cpu_hotplug_disable();
1009 case PM_POST_SUSPEND
:
1010 case PM_POST_HIBERNATION
:
1011 cpu_hotplug_enable();
1022 static int __init
cpu_hotplug_pm_sync_init(void)
1025 * cpu_hotplug_pm_callback has higher priority than x86
1026 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1027 * to disable cpu hotplug to avoid cpu hotplug race.
1029 pm_notifier(cpu_hotplug_pm_callback
, 0);
1032 core_initcall(cpu_hotplug_pm_sync_init
);
1034 #endif /* CONFIG_PM_SLEEP_SMP */
1038 #endif /* CONFIG_SMP */
1040 /* Boot processor state steps */
1041 static struct cpuhp_step cpuhp_bp_states
[] = {
1044 .startup
.single
= NULL
,
1045 .teardown
.single
= NULL
,
1048 [CPUHP_CREATE_THREADS
]= {
1049 .name
= "threads:prepare",
1050 .startup
.single
= smpboot_create_threads
,
1051 .teardown
.single
= NULL
,
1054 [CPUHP_PERF_PREPARE
] = {
1055 .name
= "perf:prepare",
1056 .startup
.single
= perf_event_init_cpu
,
1057 .teardown
.single
= perf_event_exit_cpu
,
1059 [CPUHP_WORKQUEUE_PREP
] = {
1060 .name
= "workqueue:prepare",
1061 .startup
.single
= workqueue_prepare_cpu
,
1062 .teardown
.single
= NULL
,
1064 [CPUHP_HRTIMERS_PREPARE
] = {
1065 .name
= "hrtimers:prepare",
1066 .startup
.single
= hrtimers_prepare_cpu
,
1067 .teardown
.single
= hrtimers_dead_cpu
,
1069 [CPUHP_SMPCFD_PREPARE
] = {
1070 .name
= "smpcfd:prepare",
1071 .startup
.single
= smpcfd_prepare_cpu
,
1072 .teardown
.single
= smpcfd_dead_cpu
,
1074 [CPUHP_RELAY_PREPARE
] = {
1075 .name
= "relay:prepare",
1076 .startup
.single
= relay_prepare_cpu
,
1077 .teardown
.single
= NULL
,
1079 [CPUHP_SLAB_PREPARE
] = {
1080 .name
= "slab:prepare",
1081 .startup
.single
= slab_prepare_cpu
,
1082 .teardown
.single
= slab_dead_cpu
,
1084 [CPUHP_RCUTREE_PREP
] = {
1085 .name
= "RCU/tree:prepare",
1086 .startup
.single
= rcutree_prepare_cpu
,
1087 .teardown
.single
= rcutree_dead_cpu
,
1090 * On the tear-down path, timers_dead_cpu() must be invoked
1091 * before blk_mq_queue_reinit_notify() from notify_dead(),
1092 * otherwise a RCU stall occurs.
1094 [CPUHP_TIMERS_DEAD
] = {
1095 .name
= "timers:dead",
1096 .startup
.single
= NULL
,
1097 .teardown
.single
= timers_dead_cpu
,
1099 /* Kicks the plugged cpu into life */
1100 [CPUHP_BRINGUP_CPU
] = {
1101 .name
= "cpu:bringup",
1102 .startup
.single
= bringup_cpu
,
1103 .teardown
.single
= NULL
,
1106 [CPUHP_AP_SMPCFD_DYING
] = {
1107 .name
= "smpcfd:dying",
1108 .startup
.single
= NULL
,
1109 .teardown
.single
= smpcfd_dying_cpu
,
1112 * Handled on controll processor until the plugged processor manages
1115 [CPUHP_TEARDOWN_CPU
] = {
1116 .name
= "cpu:teardown",
1117 .startup
.single
= NULL
,
1118 .teardown
.single
= takedown_cpu
,
1122 [CPUHP_BRINGUP_CPU
] = { },
1126 /* Application processor state steps */
1127 static struct cpuhp_step cpuhp_ap_states
[] = {
1129 /* Final state before CPU kills itself */
1130 [CPUHP_AP_IDLE_DEAD
] = {
1131 .name
= "idle:dead",
1134 * Last state before CPU enters the idle loop to die. Transient state
1135 * for synchronization.
1137 [CPUHP_AP_OFFLINE
] = {
1138 .name
= "ap:offline",
1141 /* First state is scheduler control. Interrupts are disabled */
1142 [CPUHP_AP_SCHED_STARTING
] = {
1143 .name
= "sched:starting",
1144 .startup
.single
= sched_cpu_starting
,
1145 .teardown
.single
= sched_cpu_dying
,
1147 [CPUHP_AP_RCUTREE_DYING
] = {
1148 .name
= "RCU/tree:dying",
1149 .startup
.single
= NULL
,
1150 .teardown
.single
= rcutree_dying_cpu
,
1152 /* Entry state on starting. Interrupts enabled from here on. Transient
1153 * state for synchronsization */
1154 [CPUHP_AP_ONLINE
] = {
1155 .name
= "ap:online",
1157 /* Handle smpboot threads park/unpark */
1158 [CPUHP_AP_SMPBOOT_THREADS
] = {
1159 .name
= "smpboot/threads:online",
1160 .startup
.single
= smpboot_unpark_threads
,
1161 .teardown
.single
= NULL
,
1163 [CPUHP_AP_IRQ_AFFINITY_ONLINE
] = {
1164 .name
= "irq/affinity:online",
1165 .startup
.single
= irq_affinity_online_cpu
,
1166 .teardown
.single
= NULL
,
1168 [CPUHP_AP_PERF_ONLINE
] = {
1169 .name
= "perf:online",
1170 .startup
.single
= perf_event_init_cpu
,
1171 .teardown
.single
= perf_event_exit_cpu
,
1173 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
1174 .name
= "workqueue:online",
1175 .startup
.single
= workqueue_online_cpu
,
1176 .teardown
.single
= workqueue_offline_cpu
,
1178 [CPUHP_AP_RCUTREE_ONLINE
] = {
1179 .name
= "RCU/tree:online",
1180 .startup
.single
= rcutree_online_cpu
,
1181 .teardown
.single
= rcutree_offline_cpu
,
1185 * The dynamically registered state space is here
1189 /* Last state is scheduler control setting the cpu active */
1190 [CPUHP_AP_ACTIVE
] = {
1191 .name
= "sched:active",
1192 .startup
.single
= sched_cpu_activate
,
1193 .teardown
.single
= sched_cpu_deactivate
,
1197 /* CPU is fully up and running. */
1200 .startup
.single
= NULL
,
1201 .teardown
.single
= NULL
,
1205 /* Sanity check for callbacks */
1206 static int cpuhp_cb_check(enum cpuhp_state state
)
1208 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1214 * Returns a free for dynamic slot assignment of the Online state. The states
1215 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1216 * by having no name assigned.
1218 static int cpuhp_reserve_state(enum cpuhp_state state
)
1220 enum cpuhp_state i
, end
;
1221 struct cpuhp_step
*step
;
1224 case CPUHP_AP_ONLINE_DYN
:
1225 step
= cpuhp_ap_states
+ CPUHP_AP_ONLINE_DYN
;
1226 end
= CPUHP_AP_ONLINE_DYN_END
;
1228 case CPUHP_BP_PREPARE_DYN
:
1229 step
= cpuhp_bp_states
+ CPUHP_BP_PREPARE_DYN
;
1230 end
= CPUHP_BP_PREPARE_DYN_END
;
1236 for (i
= state
; i
<= end
; i
++, step
++) {
1240 WARN(1, "No more dynamic states available for CPU hotplug\n");
1244 static int cpuhp_store_callbacks(enum cpuhp_state state
, const char *name
,
1245 int (*startup
)(unsigned int cpu
),
1246 int (*teardown
)(unsigned int cpu
),
1247 bool multi_instance
)
1249 /* (Un)Install the callbacks for further cpu hotplug operations */
1250 struct cpuhp_step
*sp
;
1253 if (state
== CPUHP_AP_ONLINE_DYN
|| state
== CPUHP_BP_PREPARE_DYN
) {
1254 ret
= cpuhp_reserve_state(state
);
1259 sp
= cpuhp_get_step(state
);
1260 if (name
&& sp
->name
)
1263 sp
->startup
.single
= startup
;
1264 sp
->teardown
.single
= teardown
;
1266 sp
->multi_instance
= multi_instance
;
1267 INIT_HLIST_HEAD(&sp
->list
);
1271 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1273 return cpuhp_get_step(state
)->teardown
.single
;
1277 * Call the startup/teardown function for a step either on the AP or
1278 * on the current CPU.
1280 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
1281 struct hlist_node
*node
)
1283 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1286 if ((bringup
&& !sp
->startup
.single
) ||
1287 (!bringup
&& !sp
->teardown
.single
))
1290 * The non AP bound callbacks can fail on bringup. On teardown
1291 * e.g. module removal we crash for now.
1294 if (cpuhp_is_ap_state(state
))
1295 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
1297 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
);
1299 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
);
1301 BUG_ON(ret
&& !bringup
);
1306 * Called from __cpuhp_setup_state on a recoverable failure.
1308 * Note: The teardown callbacks for rollback are not allowed to fail!
1310 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1311 struct hlist_node
*node
)
1315 /* Roll back the already executed steps on the other cpus */
1316 for_each_present_cpu(cpu
) {
1317 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1318 int cpustate
= st
->state
;
1320 if (cpu
>= failedcpu
)
1323 /* Did we invoke the startup call on that cpu ? */
1324 if (cpustate
>= state
)
1325 cpuhp_issue_call(cpu
, state
, false, node
);
1329 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state
,
1330 struct hlist_node
*node
,
1333 struct cpuhp_step
*sp
;
1337 lockdep_assert_cpus_held();
1339 sp
= cpuhp_get_step(state
);
1340 if (sp
->multi_instance
== false)
1343 mutex_lock(&cpuhp_state_mutex
);
1345 if (!invoke
|| !sp
->startup
.multi
)
1349 * Try to call the startup callback for each present cpu
1350 * depending on the hotplug state of the cpu.
1352 for_each_present_cpu(cpu
) {
1353 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1354 int cpustate
= st
->state
;
1356 if (cpustate
< state
)
1359 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
1361 if (sp
->teardown
.multi
)
1362 cpuhp_rollback_install(cpu
, state
, node
);
1368 hlist_add_head(node
, &sp
->list
);
1370 mutex_unlock(&cpuhp_state_mutex
);
1374 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
1380 ret
= __cpuhp_state_add_instance_cpuslocked(state
, node
, invoke
);
1384 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
1387 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1388 * @state: The state to setup
1389 * @invoke: If true, the startup function is invoked for cpus where
1390 * cpu state >= @state
1391 * @startup: startup callback function
1392 * @teardown: teardown callback function
1393 * @multi_instance: State is set up for multiple instances which get
1396 * The caller needs to hold cpus read locked while calling this function.
1399 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1400 * 0 for all other states
1401 * On failure: proper (negative) error code
1403 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state
,
1404 const char *name
, bool invoke
,
1405 int (*startup
)(unsigned int cpu
),
1406 int (*teardown
)(unsigned int cpu
),
1407 bool multi_instance
)
1412 lockdep_assert_cpus_held();
1414 if (cpuhp_cb_check(state
) || !name
)
1417 mutex_lock(&cpuhp_state_mutex
);
1419 ret
= cpuhp_store_callbacks(state
, name
, startup
, teardown
,
1422 dynstate
= state
== CPUHP_AP_ONLINE_DYN
;
1423 if (ret
> 0 && dynstate
) {
1428 if (ret
|| !invoke
|| !startup
)
1432 * Try to call the startup callback for each present cpu
1433 * depending on the hotplug state of the cpu.
1435 for_each_present_cpu(cpu
) {
1436 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1437 int cpustate
= st
->state
;
1439 if (cpustate
< state
)
1442 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
1445 cpuhp_rollback_install(cpu
, state
, NULL
);
1446 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1451 mutex_unlock(&cpuhp_state_mutex
);
1453 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1454 * dynamically allocated state in case of success.
1456 if (!ret
&& dynstate
)
1460 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked
);
1462 int __cpuhp_setup_state(enum cpuhp_state state
,
1463 const char *name
, bool invoke
,
1464 int (*startup
)(unsigned int cpu
),
1465 int (*teardown
)(unsigned int cpu
),
1466 bool multi_instance
)
1471 ret
= __cpuhp_setup_state_cpuslocked(state
, name
, invoke
, startup
,
1472 teardown
, multi_instance
);
1476 EXPORT_SYMBOL(__cpuhp_setup_state
);
1478 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
1479 struct hlist_node
*node
, bool invoke
)
1481 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1484 BUG_ON(cpuhp_cb_check(state
));
1486 if (!sp
->multi_instance
)
1490 mutex_lock(&cpuhp_state_mutex
);
1492 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1495 * Call the teardown callback for each present cpu depending
1496 * on the hotplug state of the cpu. This function is not
1497 * allowed to fail currently!
1499 for_each_present_cpu(cpu
) {
1500 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1501 int cpustate
= st
->state
;
1503 if (cpustate
>= state
)
1504 cpuhp_issue_call(cpu
, state
, false, node
);
1509 mutex_unlock(&cpuhp_state_mutex
);
1514 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
1517 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1518 * @state: The state to remove
1519 * @invoke: If true, the teardown function is invoked for cpus where
1520 * cpu state >= @state
1522 * The caller needs to hold cpus read locked while calling this function.
1523 * The teardown callback is currently not allowed to fail. Think
1524 * about module removal!
1526 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state
, bool invoke
)
1528 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1531 BUG_ON(cpuhp_cb_check(state
));
1533 lockdep_assert_cpus_held();
1535 mutex_lock(&cpuhp_state_mutex
);
1536 if (sp
->multi_instance
) {
1537 WARN(!hlist_empty(&sp
->list
),
1538 "Error: Removing state %d which has instances left.\n",
1543 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1547 * Call the teardown callback for each present cpu depending
1548 * on the hotplug state of the cpu. This function is not
1549 * allowed to fail currently!
1551 for_each_present_cpu(cpu
) {
1552 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1553 int cpustate
= st
->state
;
1555 if (cpustate
>= state
)
1556 cpuhp_issue_call(cpu
, state
, false, NULL
);
1559 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1560 mutex_unlock(&cpuhp_state_mutex
);
1562 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked
);
1564 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1567 __cpuhp_remove_state_cpuslocked(state
, invoke
);
1570 EXPORT_SYMBOL(__cpuhp_remove_state
);
1572 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1573 static ssize_t
show_cpuhp_state(struct device
*dev
,
1574 struct device_attribute
*attr
, char *buf
)
1576 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1578 return sprintf(buf
, "%d\n", st
->state
);
1580 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1582 static ssize_t
write_cpuhp_target(struct device
*dev
,
1583 struct device_attribute
*attr
,
1584 const char *buf
, size_t count
)
1586 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1587 struct cpuhp_step
*sp
;
1590 ret
= kstrtoint(buf
, 10, &target
);
1594 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1595 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1598 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1602 ret
= lock_device_hotplug_sysfs();
1606 mutex_lock(&cpuhp_state_mutex
);
1607 sp
= cpuhp_get_step(target
);
1608 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1609 mutex_unlock(&cpuhp_state_mutex
);
1613 if (st
->state
< target
)
1614 ret
= do_cpu_up(dev
->id
, target
);
1616 ret
= do_cpu_down(dev
->id
, target
);
1618 unlock_device_hotplug();
1619 return ret
? ret
: count
;
1622 static ssize_t
show_cpuhp_target(struct device
*dev
,
1623 struct device_attribute
*attr
, char *buf
)
1625 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1627 return sprintf(buf
, "%d\n", st
->target
);
1629 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1631 static struct attribute
*cpuhp_cpu_attrs
[] = {
1632 &dev_attr_state
.attr
,
1633 &dev_attr_target
.attr
,
1637 static const struct attribute_group cpuhp_cpu_attr_group
= {
1638 .attrs
= cpuhp_cpu_attrs
,
1643 static ssize_t
show_cpuhp_states(struct device
*dev
,
1644 struct device_attribute
*attr
, char *buf
)
1646 ssize_t cur
, res
= 0;
1649 mutex_lock(&cpuhp_state_mutex
);
1650 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
1651 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
1654 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
1659 mutex_unlock(&cpuhp_state_mutex
);
1662 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
1664 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
1665 &dev_attr_states
.attr
,
1669 static const struct attribute_group cpuhp_cpu_root_attr_group
= {
1670 .attrs
= cpuhp_cpu_root_attrs
,
1675 static int __init
cpuhp_sysfs_init(void)
1679 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
1680 &cpuhp_cpu_root_attr_group
);
1684 for_each_possible_cpu(cpu
) {
1685 struct device
*dev
= get_cpu_device(cpu
);
1689 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
1695 device_initcall(cpuhp_sysfs_init
);
1699 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1700 * represents all NR_CPUS bits binary values of 1<<nr.
1702 * It is used by cpumask_of() to get a constant address to a CPU
1703 * mask value that has a single bit set only.
1706 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1707 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1708 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1709 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1710 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1712 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
1714 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1715 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1716 #if BITS_PER_LONG > 32
1717 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1718 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1721 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
1723 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
1724 EXPORT_SYMBOL(cpu_all_bits
);
1726 #ifdef CONFIG_INIT_ALL_POSSIBLE
1727 struct cpumask __cpu_possible_mask __read_mostly
1730 struct cpumask __cpu_possible_mask __read_mostly
;
1732 EXPORT_SYMBOL(__cpu_possible_mask
);
1734 struct cpumask __cpu_online_mask __read_mostly
;
1735 EXPORT_SYMBOL(__cpu_online_mask
);
1737 struct cpumask __cpu_present_mask __read_mostly
;
1738 EXPORT_SYMBOL(__cpu_present_mask
);
1740 struct cpumask __cpu_active_mask __read_mostly
;
1741 EXPORT_SYMBOL(__cpu_active_mask
);
1743 void init_cpu_present(const struct cpumask
*src
)
1745 cpumask_copy(&__cpu_present_mask
, src
);
1748 void init_cpu_possible(const struct cpumask
*src
)
1750 cpumask_copy(&__cpu_possible_mask
, src
);
1753 void init_cpu_online(const struct cpumask
*src
)
1755 cpumask_copy(&__cpu_online_mask
, src
);
1759 * Activate the first processor.
1761 void __init
boot_cpu_init(void)
1763 int cpu
= smp_processor_id();
1765 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1766 set_cpu_online(cpu
, true);
1767 set_cpu_active(cpu
, true);
1768 set_cpu_present(cpu
, true);
1769 set_cpu_possible(cpu
, true);
1772 __boot_cpu_id
= cpu
;
1777 * Must be called _AFTER_ setting up the per_cpu areas
1779 void __init
boot_cpu_state_init(void)
1781 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;