2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
26 #include <trace/events/power.h>
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/cpuhp.h>
33 * cpuhp_cpu_state - Per cpu hotplug state storage
34 * @state: The current cpu state
35 * @target: The target state
37 struct cpuhp_cpu_state
{
38 enum cpuhp_state state
;
39 enum cpuhp_state target
;
42 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
45 * cpuhp_step - Hotplug state machine step
46 * @name: Name of the step
47 * @startup: Startup function of the step
48 * @teardown: Teardown function of the step
49 * @skip_onerr: Do not invoke the functions on error rollback
50 * Will go away once the notifiers are gone
54 int (*startup
)(unsigned int cpu
);
55 int (*teardown
)(unsigned int cpu
);
59 static struct cpuhp_step cpuhp_bp_states
[];
60 static struct cpuhp_step cpuhp_ap_states
[];
63 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
64 * @cpu: The cpu for which the callback should be invoked
65 * @step: The step in the state machine
66 * @cb: The callback function to invoke
68 * Called from cpu hotplug and from the state register machinery
70 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state step
,
71 int (*cb
)(unsigned int))
73 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
77 trace_cpuhp_enter(cpu
, st
->target
, step
, cb
);
79 trace_cpuhp_exit(cpu
, st
->state
, step
, ret
);
85 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
86 static DEFINE_MUTEX(cpu_add_remove_lock
);
87 bool cpuhp_tasks_frozen
;
88 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
91 * The following two APIs (cpu_maps_update_begin/done) must be used when
92 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
93 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
94 * hotplug callback (un)registration performed using __register_cpu_notifier()
95 * or __unregister_cpu_notifier().
97 void cpu_maps_update_begin(void)
99 mutex_lock(&cpu_add_remove_lock
);
101 EXPORT_SYMBOL(cpu_notifier_register_begin
);
103 void cpu_maps_update_done(void)
105 mutex_unlock(&cpu_add_remove_lock
);
107 EXPORT_SYMBOL(cpu_notifier_register_done
);
109 static RAW_NOTIFIER_HEAD(cpu_chain
);
111 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
112 * Should always be manipulated under cpu_add_remove_lock
114 static int cpu_hotplug_disabled
;
116 #ifdef CONFIG_HOTPLUG_CPU
119 struct task_struct
*active_writer
;
120 /* wait queue to wake up the active_writer */
121 wait_queue_head_t wq
;
122 /* verifies that no writer will get active while readers are active */
125 * Also blocks the new readers during
126 * an ongoing cpu hotplug operation.
130 #ifdef CONFIG_DEBUG_LOCK_ALLOC
131 struct lockdep_map dep_map
;
134 .active_writer
= NULL
,
135 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
136 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
137 #ifdef CONFIG_DEBUG_LOCK_ALLOC
138 .dep_map
= {.name
= "cpu_hotplug.lock" },
142 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
143 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
144 #define cpuhp_lock_acquire_tryread() \
145 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
146 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
147 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
150 void get_online_cpus(void)
153 if (cpu_hotplug
.active_writer
== current
)
155 cpuhp_lock_acquire_read();
156 mutex_lock(&cpu_hotplug
.lock
);
157 atomic_inc(&cpu_hotplug
.refcount
);
158 mutex_unlock(&cpu_hotplug
.lock
);
160 EXPORT_SYMBOL_GPL(get_online_cpus
);
162 void put_online_cpus(void)
166 if (cpu_hotplug
.active_writer
== current
)
169 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
170 if (WARN_ON(refcount
< 0)) /* try to fix things up */
171 atomic_inc(&cpu_hotplug
.refcount
);
173 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
174 wake_up(&cpu_hotplug
.wq
);
176 cpuhp_lock_release();
179 EXPORT_SYMBOL_GPL(put_online_cpus
);
182 * This ensures that the hotplug operation can begin only when the
183 * refcount goes to zero.
185 * Note that during a cpu-hotplug operation, the new readers, if any,
186 * will be blocked by the cpu_hotplug.lock
188 * Since cpu_hotplug_begin() is always called after invoking
189 * cpu_maps_update_begin(), we can be sure that only one writer is active.
191 * Note that theoretically, there is a possibility of a livelock:
192 * - Refcount goes to zero, last reader wakes up the sleeping
194 * - Last reader unlocks the cpu_hotplug.lock.
195 * - A new reader arrives at this moment, bumps up the refcount.
196 * - The writer acquires the cpu_hotplug.lock finds the refcount
197 * non zero and goes to sleep again.
199 * However, this is very difficult to achieve in practice since
200 * get_online_cpus() not an api which is called all that often.
203 void cpu_hotplug_begin(void)
207 cpu_hotplug
.active_writer
= current
;
208 cpuhp_lock_acquire();
211 mutex_lock(&cpu_hotplug
.lock
);
212 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
213 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
215 mutex_unlock(&cpu_hotplug
.lock
);
218 finish_wait(&cpu_hotplug
.wq
, &wait
);
221 void cpu_hotplug_done(void)
223 cpu_hotplug
.active_writer
= NULL
;
224 mutex_unlock(&cpu_hotplug
.lock
);
225 cpuhp_lock_release();
229 * Wait for currently running CPU hotplug operations to complete (if any) and
230 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
231 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
232 * hotplug path before performing hotplug operations. So acquiring that lock
233 * guarantees mutual exclusion from any currently running hotplug operations.
235 void cpu_hotplug_disable(void)
237 cpu_maps_update_begin();
238 cpu_hotplug_disabled
++;
239 cpu_maps_update_done();
241 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
243 void cpu_hotplug_enable(void)
245 cpu_maps_update_begin();
246 WARN_ON(--cpu_hotplug_disabled
< 0);
247 cpu_maps_update_done();
249 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
250 #endif /* CONFIG_HOTPLUG_CPU */
252 /* Need to know about CPUs going up/down? */
253 int register_cpu_notifier(struct notifier_block
*nb
)
256 cpu_maps_update_begin();
257 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
258 cpu_maps_update_done();
262 int __register_cpu_notifier(struct notifier_block
*nb
)
264 return raw_notifier_chain_register(&cpu_chain
, nb
);
267 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
270 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
271 void *hcpu
= (void *)(long)cpu
;
275 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
278 return notifier_to_errno(ret
);
281 static int cpu_notify(unsigned long val
, unsigned int cpu
)
283 return __cpu_notify(val
, cpu
, -1, NULL
);
286 /* Notifier wrappers for transitioning to state machine */
287 static int notify_prepare(unsigned int cpu
)
292 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
295 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
297 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
302 static int notify_online(unsigned int cpu
)
304 cpu_notify(CPU_ONLINE
, cpu
);
308 static int notify_starting(unsigned int cpu
)
310 cpu_notify(CPU_STARTING
, cpu
);
314 static int bringup_cpu(unsigned int cpu
)
316 struct task_struct
*idle
= idle_thread_get(cpu
);
319 /* Arch-specific enabling code. */
320 ret
= __cpu_up(cpu
, idle
);
322 cpu_notify(CPU_UP_CANCELED
, cpu
);
325 BUG_ON(!cpu_online(cpu
));
329 #ifdef CONFIG_HOTPLUG_CPU
330 EXPORT_SYMBOL(register_cpu_notifier
);
331 EXPORT_SYMBOL(__register_cpu_notifier
);
333 void unregister_cpu_notifier(struct notifier_block
*nb
)
335 cpu_maps_update_begin();
336 raw_notifier_chain_unregister(&cpu_chain
, nb
);
337 cpu_maps_update_done();
339 EXPORT_SYMBOL(unregister_cpu_notifier
);
341 void __unregister_cpu_notifier(struct notifier_block
*nb
)
343 raw_notifier_chain_unregister(&cpu_chain
, nb
);
345 EXPORT_SYMBOL(__unregister_cpu_notifier
);
348 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
351 * This function walks all processes, finds a valid mm struct for each one and
352 * then clears a corresponding bit in mm's cpumask. While this all sounds
353 * trivial, there are various non-obvious corner cases, which this function
354 * tries to solve in a safe manner.
356 * Also note that the function uses a somewhat relaxed locking scheme, so it may
357 * be called only for an already offlined CPU.
359 void clear_tasks_mm_cpumask(int cpu
)
361 struct task_struct
*p
;
364 * This function is called after the cpu is taken down and marked
365 * offline, so its not like new tasks will ever get this cpu set in
366 * their mm mask. -- Peter Zijlstra
367 * Thus, we may use rcu_read_lock() here, instead of grabbing
368 * full-fledged tasklist_lock.
370 WARN_ON(cpu_online(cpu
));
372 for_each_process(p
) {
373 struct task_struct
*t
;
376 * Main thread might exit, but other threads may still have
377 * a valid mm. Find one.
379 t
= find_lock_task_mm(p
);
382 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
388 static inline void check_for_tasks(int dead_cpu
)
390 struct task_struct
*g
, *p
;
392 read_lock(&tasklist_lock
);
393 for_each_process_thread(g
, p
) {
397 * We do the check with unlocked task_rq(p)->lock.
398 * Order the reading to do not warn about a task,
399 * which was running on this cpu in the past, and
400 * it's just been woken on another cpu.
403 if (task_cpu(p
) != dead_cpu
)
406 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
407 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
409 read_unlock(&tasklist_lock
);
412 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
414 BUG_ON(cpu_notify(val
, cpu
));
417 static int notify_down_prepare(unsigned int cpu
)
419 int err
, nr_calls
= 0;
421 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
424 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
425 pr_warn("%s: attempt to take down CPU %u failed\n",
431 static int notify_dying(unsigned int cpu
)
433 cpu_notify(CPU_DYING
, cpu
);
437 /* Take this CPU down. */
438 static int take_cpu_down(void *_param
)
440 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
441 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
442 int err
, cpu
= smp_processor_id();
444 /* Ensure this CPU doesn't handle any more interrupts. */
445 err
= __cpu_disable();
449 /* Invoke the former CPU_DYING callbacks */
450 for (; st
->state
> target
; st
->state
--) {
451 struct cpuhp_step
*step
= cpuhp_ap_states
+ st
->state
;
453 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
455 /* Give up timekeeping duties */
456 tick_handover_do_timer();
457 /* Park the stopper thread */
458 stop_machine_park(cpu
);
462 static int takedown_cpu(unsigned int cpu
)
467 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
468 * and RCU users of this state to go away such that all new such users
471 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
472 * not imply sync_sched(), so wait for both.
474 * Do sync before park smpboot threads to take care the rcu boost case.
476 if (IS_ENABLED(CONFIG_PREEMPT
))
477 synchronize_rcu_mult(call_rcu
, call_rcu_sched
);
481 smpboot_park_threads(cpu
);
484 * Prevent irq alloc/free while the dying cpu reorganizes the
485 * interrupt affinities.
490 * So now all preempt/rcu users must observe !cpu_active().
492 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
494 /* CPU didn't die: tell everyone. Can't complain. */
495 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
499 BUG_ON(cpu_online(cpu
));
502 * The migration_call() CPU_DYING callback will have removed all
503 * runnable tasks from the cpu, there's only the idle task left now
504 * that the migration thread is done doing the stop_machine thing.
506 * Wait for the stop thread to go away.
508 while (!per_cpu(cpu_dead_idle
, cpu
))
510 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
511 per_cpu(cpu_dead_idle
, cpu
) = false;
513 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
516 hotplug_cpu__broadcast_tick_pull(cpu
);
517 /* This actually kills the CPU. */
520 tick_cleanup_dead_cpu(cpu
);
524 static int notify_dead(unsigned int cpu
)
526 cpu_notify_nofail(CPU_DEAD
, cpu
);
527 check_for_tasks(cpu
);
532 #define notify_down_prepare NULL
533 #define takedown_cpu NULL
534 #define notify_dead NULL
535 #define notify_dying NULL
538 #ifdef CONFIG_HOTPLUG_CPU
539 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
541 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
542 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
544 if (!step
->skip_onerr
)
545 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
549 /* Requires cpu_add_remove_lock to be held */
550 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
)
552 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
553 int prev_state
, ret
= 0;
554 bool hasdied
= false;
556 if (num_online_cpus() == 1)
559 if (!cpu_online(cpu
))
564 cpuhp_tasks_frozen
= tasks_frozen
;
566 prev_state
= st
->state
;
567 st
->target
= CPUHP_OFFLINE
;
568 for (; st
->state
> st
->target
; st
->state
--) {
569 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
571 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
573 st
->target
= prev_state
;
574 undo_cpu_down(cpu
, st
);
578 hasdied
= prev_state
!= st
->state
&& st
->state
== CPUHP_OFFLINE
;
581 /* This post dead nonsense must die */
583 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
587 int cpu_down(unsigned int cpu
)
591 cpu_maps_update_begin();
593 if (cpu_hotplug_disabled
) {
598 err
= _cpu_down(cpu
, 0);
601 cpu_maps_update_done();
604 EXPORT_SYMBOL(cpu_down
);
605 #endif /*CONFIG_HOTPLUG_CPU*/
608 * Unpark per-CPU smpboot kthreads at CPU-online time.
610 static int smpboot_thread_call(struct notifier_block
*nfb
,
611 unsigned long action
, void *hcpu
)
613 int cpu
= (long)hcpu
;
615 switch (action
& ~CPU_TASKS_FROZEN
) {
617 case CPU_DOWN_FAILED
:
619 smpboot_unpark_threads(cpu
);
629 static struct notifier_block smpboot_thread_notifier
= {
630 .notifier_call
= smpboot_thread_call
,
631 .priority
= CPU_PRI_SMPBOOT
,
634 void smpboot_thread_init(void)
636 register_cpu_notifier(&smpboot_thread_notifier
);
640 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
641 * @cpu: cpu that just started
643 * This function calls the cpu_chain notifiers with CPU_STARTING.
644 * It must be called by the arch code on the new cpu, before the new cpu
645 * enables interrupts and before the "boot" cpu returns from __cpu_up().
647 void notify_cpu_starting(unsigned int cpu
)
649 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
650 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
652 while (st
->state
< target
) {
653 struct cpuhp_step
*step
;
656 step
= cpuhp_ap_states
+ st
->state
;
657 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
661 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
663 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
664 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
666 if (!step
->skip_onerr
)
667 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
671 /* Requires cpu_add_remove_lock to be held */
672 static int _cpu_up(unsigned int cpu
, int tasks_frozen
)
674 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
675 struct task_struct
*idle
;
676 int prev_state
, ret
= 0;
680 if (cpu_online(cpu
) || !cpu_present(cpu
)) {
685 /* Let it fail before we try to bring the cpu up */
686 idle
= idle_thread_get(cpu
);
692 cpuhp_tasks_frozen
= tasks_frozen
;
694 prev_state
= st
->state
;
695 st
->target
= CPUHP_ONLINE
;
696 while (st
->state
< st
->target
) {
697 struct cpuhp_step
*step
;
700 step
= cpuhp_bp_states
+ st
->state
;
701 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
703 st
->target
= prev_state
;
704 undo_cpu_up(cpu
, st
);
713 int cpu_up(unsigned int cpu
)
717 if (!cpu_possible(cpu
)) {
718 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
720 #if defined(CONFIG_IA64)
721 pr_err("please check additional_cpus= boot parameter\n");
726 err
= try_online_node(cpu_to_node(cpu
));
730 cpu_maps_update_begin();
732 if (cpu_hotplug_disabled
) {
737 err
= _cpu_up(cpu
, 0);
740 cpu_maps_update_done();
743 EXPORT_SYMBOL_GPL(cpu_up
);
745 #ifdef CONFIG_PM_SLEEP_SMP
746 static cpumask_var_t frozen_cpus
;
748 int disable_nonboot_cpus(void)
750 int cpu
, first_cpu
, error
= 0;
752 cpu_maps_update_begin();
753 first_cpu
= cpumask_first(cpu_online_mask
);
755 * We take down all of the non-boot CPUs in one shot to avoid races
756 * with the userspace trying to use the CPU hotplug at the same time
758 cpumask_clear(frozen_cpus
);
760 pr_info("Disabling non-boot CPUs ...\n");
761 for_each_online_cpu(cpu
) {
762 if (cpu
== first_cpu
)
764 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
765 error
= _cpu_down(cpu
, 1);
766 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
768 cpumask_set_cpu(cpu
, frozen_cpus
);
770 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
776 BUG_ON(num_online_cpus() > 1);
778 pr_err("Non-boot CPUs are not disabled\n");
781 * Make sure the CPUs won't be enabled by someone else. We need to do
782 * this even in case of failure as all disable_nonboot_cpus() users are
783 * supposed to do enable_nonboot_cpus() on the failure path.
785 cpu_hotplug_disabled
++;
787 cpu_maps_update_done();
791 void __weak
arch_enable_nonboot_cpus_begin(void)
795 void __weak
arch_enable_nonboot_cpus_end(void)
799 void enable_nonboot_cpus(void)
803 /* Allow everyone to use the CPU hotplug again */
804 cpu_maps_update_begin();
805 WARN_ON(--cpu_hotplug_disabled
< 0);
806 if (cpumask_empty(frozen_cpus
))
809 pr_info("Enabling non-boot CPUs ...\n");
811 arch_enable_nonboot_cpus_begin();
813 for_each_cpu(cpu
, frozen_cpus
) {
814 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
815 error
= _cpu_up(cpu
, 1);
816 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
818 pr_info("CPU%d is up\n", cpu
);
821 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
824 arch_enable_nonboot_cpus_end();
826 cpumask_clear(frozen_cpus
);
828 cpu_maps_update_done();
831 static int __init
alloc_frozen_cpus(void)
833 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
837 core_initcall(alloc_frozen_cpus
);
840 * When callbacks for CPU hotplug notifications are being executed, we must
841 * ensure that the state of the system with respect to the tasks being frozen
842 * or not, as reported by the notification, remains unchanged *throughout the
843 * duration* of the execution of the callbacks.
844 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
846 * This synchronization is implemented by mutually excluding regular CPU
847 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
848 * Hibernate notifications.
851 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
852 unsigned long action
, void *ptr
)
856 case PM_SUSPEND_PREPARE
:
857 case PM_HIBERNATION_PREPARE
:
858 cpu_hotplug_disable();
861 case PM_POST_SUSPEND
:
862 case PM_POST_HIBERNATION
:
863 cpu_hotplug_enable();
874 static int __init
cpu_hotplug_pm_sync_init(void)
877 * cpu_hotplug_pm_callback has higher priority than x86
878 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
879 * to disable cpu hotplug to avoid cpu hotplug race.
881 pm_notifier(cpu_hotplug_pm_callback
, 0);
884 core_initcall(cpu_hotplug_pm_sync_init
);
886 #endif /* CONFIG_PM_SLEEP_SMP */
888 #endif /* CONFIG_SMP */
890 /* Boot processor state steps */
891 static struct cpuhp_step cpuhp_bp_states
[] = {
898 [CPUHP_CREATE_THREADS
]= {
899 .name
= "threads:create",
900 .startup
= smpboot_create_threads
,
903 [CPUHP_NOTIFY_PREPARE
] = {
904 .name
= "notify:prepare",
905 .startup
= notify_prepare
,
906 .teardown
= notify_dead
,
909 [CPUHP_BRINGUP_CPU
] = {
910 .name
= "cpu:bringup",
911 .startup
= bringup_cpu
,
914 [CPUHP_TEARDOWN_CPU
] = {
915 .name
= "cpu:teardown",
917 .teardown
= takedown_cpu
,
919 [CPUHP_NOTIFY_ONLINE
] = {
920 .name
= "notify:online",
921 .startup
= notify_online
,
922 .teardown
= notify_down_prepare
,
932 /* Application processor state steps */
933 static struct cpuhp_step cpuhp_ap_states
[] = {
935 [CPUHP_AP_NOTIFY_STARTING
] = {
936 .name
= "notify:starting",
937 .startup
= notify_starting
,
938 .teardown
= notify_dying
,
950 * cpu_bit_bitmap[] is a special, "compressed" data structure that
951 * represents all NR_CPUS bits binary values of 1<<nr.
953 * It is used by cpumask_of() to get a constant address to a CPU
954 * mask value that has a single bit set only.
957 /* cpu_bit_bitmap[0] is empty - so we can back into it */
958 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
959 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
960 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
961 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
963 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
965 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
966 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
967 #if BITS_PER_LONG > 32
968 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
969 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
972 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
974 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
975 EXPORT_SYMBOL(cpu_all_bits
);
977 #ifdef CONFIG_INIT_ALL_POSSIBLE
978 struct cpumask __cpu_possible_mask __read_mostly
981 struct cpumask __cpu_possible_mask __read_mostly
;
983 EXPORT_SYMBOL(__cpu_possible_mask
);
985 struct cpumask __cpu_online_mask __read_mostly
;
986 EXPORT_SYMBOL(__cpu_online_mask
);
988 struct cpumask __cpu_present_mask __read_mostly
;
989 EXPORT_SYMBOL(__cpu_present_mask
);
991 struct cpumask __cpu_active_mask __read_mostly
;
992 EXPORT_SYMBOL(__cpu_active_mask
);
994 void init_cpu_present(const struct cpumask
*src
)
996 cpumask_copy(&__cpu_present_mask
, src
);
999 void init_cpu_possible(const struct cpumask
*src
)
1001 cpumask_copy(&__cpu_possible_mask
, src
);
1004 void init_cpu_online(const struct cpumask
*src
)
1006 cpumask_copy(&__cpu_online_mask
, src
);
1010 * Activate the first processor.
1012 void __init
boot_cpu_init(void)
1014 int cpu
= smp_processor_id();
1016 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1017 set_cpu_online(cpu
, true);
1018 set_cpu_active(cpu
, true);
1019 set_cpu_present(cpu
, true);
1020 set_cpu_possible(cpu
, true);
1024 * Must be called _AFTER_ setting up the per_cpu areas
1026 void __init
boot_cpu_state_init(void)
1028 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;