]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/cpu.c
cpu/hotplug: Restructure cpu_up code
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
a19423b9 22#include <linux/lockdep.h>
345527b1 23#include <linux/tick.h>
a8994181 24#include <linux/irq.h>
bb3632c6 25#include <trace/events/power.h>
1da177e4 26
38498a67
TG
27#include "smpboot.h"
28
98a79d6a 29#ifdef CONFIG_SMP
b3199c02 30/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 31static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
32bool cpuhp_tasks_frozen;
33EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 34
79a6cdeb 35/*
93ae4f97
SB
36 * The following two APIs (cpu_maps_update_begin/done) must be used when
37 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
38 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
39 * hotplug callback (un)registration performed using __register_cpu_notifier()
40 * or __unregister_cpu_notifier().
79a6cdeb
LJ
41 */
42void cpu_maps_update_begin(void)
43{
44 mutex_lock(&cpu_add_remove_lock);
45}
93ae4f97 46EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb
LJ
47
48void cpu_maps_update_done(void)
49{
50 mutex_unlock(&cpu_add_remove_lock);
51}
93ae4f97 52EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb 53
5c113fbe 54static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 55
e3920fb4
RW
56/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
57 * Should always be manipulated under cpu_add_remove_lock
58 */
59static int cpu_hotplug_disabled;
60
79a6cdeb
LJ
61#ifdef CONFIG_HOTPLUG_CPU
62
d221938c
GS
63static struct {
64 struct task_struct *active_writer;
87af9e7f
DH
65 /* wait queue to wake up the active_writer */
66 wait_queue_head_t wq;
67 /* verifies that no writer will get active while readers are active */
68 struct mutex lock;
d221938c
GS
69 /*
70 * Also blocks the new readers during
71 * an ongoing cpu hotplug operation.
72 */
87af9e7f 73 atomic_t refcount;
a19423b9
GS
74
75#ifdef CONFIG_DEBUG_LOCK_ALLOC
76 struct lockdep_map dep_map;
77#endif
31950eb6
LT
78} cpu_hotplug = {
79 .active_writer = NULL,
87af9e7f 80 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb6 81 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b9
GS
82#ifdef CONFIG_DEBUG_LOCK_ALLOC
83 .dep_map = {.name = "cpu_hotplug.lock" },
84#endif
31950eb6 85};
d221938c 86
a19423b9
GS
87/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
88#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42
PM
89#define cpuhp_lock_acquire_tryread() \
90 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b9
GS
91#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
92#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
93
62db99f4 94
86ef5c9a 95void get_online_cpus(void)
a9d9baa1 96{
d221938c
GS
97 might_sleep();
98 if (cpu_hotplug.active_writer == current)
aa953877 99 return;
a19423b9 100 cpuhp_lock_acquire_read();
d221938c 101 mutex_lock(&cpu_hotplug.lock);
87af9e7f 102 atomic_inc(&cpu_hotplug.refcount);
d221938c 103 mutex_unlock(&cpu_hotplug.lock);
a9d9baa1 104}
86ef5c9a 105EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 106
86ef5c9a 107void put_online_cpus(void)
a9d9baa1 108{
87af9e7f
DH
109 int refcount;
110
d221938c 111 if (cpu_hotplug.active_writer == current)
aa953877 112 return;
075663d1 113
87af9e7f
DH
114 refcount = atomic_dec_return(&cpu_hotplug.refcount);
115 if (WARN_ON(refcount < 0)) /* try to fix things up */
116 atomic_inc(&cpu_hotplug.refcount);
117
118 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
119 wake_up(&cpu_hotplug.wq);
075663d1 120
a19423b9 121 cpuhp_lock_release();
d221938c 122
a9d9baa1 123}
86ef5c9a 124EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 125
d221938c
GS
126/*
127 * This ensures that the hotplug operation can begin only when the
128 * refcount goes to zero.
129 *
130 * Note that during a cpu-hotplug operation, the new readers, if any,
131 * will be blocked by the cpu_hotplug.lock
132 *
d2ba7e2a
ON
133 * Since cpu_hotplug_begin() is always called after invoking
134 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
135 *
136 * Note that theoretically, there is a possibility of a livelock:
137 * - Refcount goes to zero, last reader wakes up the sleeping
138 * writer.
139 * - Last reader unlocks the cpu_hotplug.lock.
140 * - A new reader arrives at this moment, bumps up the refcount.
141 * - The writer acquires the cpu_hotplug.lock finds the refcount
142 * non zero and goes to sleep again.
143 *
144 * However, this is very difficult to achieve in practice since
86ef5c9a 145 * get_online_cpus() not an api which is called all that often.
d221938c
GS
146 *
147 */
b9d10be7 148void cpu_hotplug_begin(void)
d221938c 149{
87af9e7f 150 DEFINE_WAIT(wait);
d2ba7e2a 151
87af9e7f 152 cpu_hotplug.active_writer = current;
a19423b9 153 cpuhp_lock_acquire();
87af9e7f 154
d2ba7e2a
ON
155 for (;;) {
156 mutex_lock(&cpu_hotplug.lock);
87af9e7f
DH
157 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
158 if (likely(!atomic_read(&cpu_hotplug.refcount)))
159 break;
d221938c
GS
160 mutex_unlock(&cpu_hotplug.lock);
161 schedule();
d221938c 162 }
87af9e7f 163 finish_wait(&cpu_hotplug.wq, &wait);
d221938c
GS
164}
165
b9d10be7 166void cpu_hotplug_done(void)
d221938c
GS
167{
168 cpu_hotplug.active_writer = NULL;
169 mutex_unlock(&cpu_hotplug.lock);
a19423b9 170 cpuhp_lock_release();
d221938c 171}
79a6cdeb 172
16e53dbf
SB
173/*
174 * Wait for currently running CPU hotplug operations to complete (if any) and
175 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
176 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
177 * hotplug path before performing hotplug operations. So acquiring that lock
178 * guarantees mutual exclusion from any currently running hotplug operations.
179 */
180void cpu_hotplug_disable(void)
181{
182 cpu_maps_update_begin();
89af7ba5 183 cpu_hotplug_disabled++;
16e53dbf
SB
184 cpu_maps_update_done();
185}
32145c46 186EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf
SB
187
188void cpu_hotplug_enable(void)
189{
190 cpu_maps_update_begin();
89af7ba5 191 WARN_ON(--cpu_hotplug_disabled < 0);
16e53dbf
SB
192 cpu_maps_update_done();
193}
32145c46 194EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 195#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 196
1da177e4 197/* Need to know about CPUs going up/down? */
71cf5aee 198int register_cpu_notifier(struct notifier_block *nb)
1da177e4 199{
bd5349cf 200 int ret;
d221938c 201 cpu_maps_update_begin();
bd5349cf 202 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 203 cpu_maps_update_done();
bd5349cf 204 return ret;
1da177e4 205}
65edc68c 206
71cf5aee 207int __register_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
208{
209 return raw_notifier_chain_register(&cpu_chain, nb);
210}
211
090e77c3 212static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
e9fb7631
AM
213 int *nr_calls)
214{
090e77c3
TG
215 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
216 void *hcpu = (void *)(long)cpu;
217
e6bde73b
AM
218 int ret;
219
090e77c3 220 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
e9fb7631 221 nr_calls);
e6bde73b
AM
222
223 return notifier_to_errno(ret);
e9fb7631
AM
224}
225
090e77c3 226static int cpu_notify(unsigned long val, unsigned int cpu)
e9fb7631 227{
090e77c3 228 return __cpu_notify(val, cpu, -1, NULL);
e9fb7631
AM
229}
230
ba997462
TG
231/* Notifier wrappers for transitioning to state machine */
232static int notify_prepare(unsigned int cpu)
233{
234 int nr_calls = 0;
235 int ret;
236
237 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
238 if (ret) {
239 nr_calls--;
240 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
241 __func__, cpu);
242 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
243 }
244 return ret;
245}
246
247static int notify_online(unsigned int cpu)
248{
249 cpu_notify(CPU_ONLINE, cpu);
250 return 0;
251}
252
253static int bringup_cpu(unsigned int cpu)
254{
255 struct task_struct *idle = idle_thread_get(cpu);
256 int ret;
257
258 /* Arch-specific enabling code. */
259 ret = __cpu_up(cpu, idle);
260 if (ret) {
261 cpu_notify(CPU_UP_CANCELED, cpu);
262 return ret;
263 }
264 BUG_ON(!cpu_online(cpu));
265 return 0;
266}
267
00b9b0af
LT
268#ifdef CONFIG_HOTPLUG_CPU
269
090e77c3 270static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
e9fb7631 271{
090e77c3 272 BUG_ON(cpu_notify(val, cpu));
e9fb7631 273}
1da177e4 274EXPORT_SYMBOL(register_cpu_notifier);
93ae4f97 275EXPORT_SYMBOL(__register_cpu_notifier);
1da177e4 276
71cf5aee 277void unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 278{
d221938c 279 cpu_maps_update_begin();
bd5349cf 280 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 281 cpu_maps_update_done();
1da177e4
LT
282}
283EXPORT_SYMBOL(unregister_cpu_notifier);
284
71cf5aee 285void __unregister_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
286{
287 raw_notifier_chain_unregister(&cpu_chain, nb);
288}
289EXPORT_SYMBOL(__unregister_cpu_notifier);
290
e4cc2f87
AV
291/**
292 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
293 * @cpu: a CPU id
294 *
295 * This function walks all processes, finds a valid mm struct for each one and
296 * then clears a corresponding bit in mm's cpumask. While this all sounds
297 * trivial, there are various non-obvious corner cases, which this function
298 * tries to solve in a safe manner.
299 *
300 * Also note that the function uses a somewhat relaxed locking scheme, so it may
301 * be called only for an already offlined CPU.
302 */
cb79295e
AV
303void clear_tasks_mm_cpumask(int cpu)
304{
305 struct task_struct *p;
306
307 /*
308 * This function is called after the cpu is taken down and marked
309 * offline, so its not like new tasks will ever get this cpu set in
310 * their mm mask. -- Peter Zijlstra
311 * Thus, we may use rcu_read_lock() here, instead of grabbing
312 * full-fledged tasklist_lock.
313 */
e4cc2f87 314 WARN_ON(cpu_online(cpu));
cb79295e
AV
315 rcu_read_lock();
316 for_each_process(p) {
317 struct task_struct *t;
318
e4cc2f87
AV
319 /*
320 * Main thread might exit, but other threads may still have
321 * a valid mm. Find one.
322 */
cb79295e
AV
323 t = find_lock_task_mm(p);
324 if (!t)
325 continue;
326 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
327 task_unlock(t);
328 }
329 rcu_read_unlock();
330}
331
b728ca06 332static inline void check_for_tasks(int dead_cpu)
1da177e4 333{
b728ca06 334 struct task_struct *g, *p;
1da177e4 335
a75a6068
ON
336 read_lock(&tasklist_lock);
337 for_each_process_thread(g, p) {
b728ca06
KT
338 if (!p->on_rq)
339 continue;
340 /*
341 * We do the check with unlocked task_rq(p)->lock.
342 * Order the reading to do not warn about a task,
343 * which was running on this cpu in the past, and
344 * it's just been woken on another cpu.
345 */
346 rmb();
347 if (task_cpu(p) != dead_cpu)
348 continue;
349
350 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
351 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068
ON
352 }
353 read_unlock(&tasklist_lock);
1da177e4
LT
354}
355
356/* Take this CPU down. */
71cf5aee 357static int take_cpu_down(void *_param)
1da177e4 358{
090e77c3 359 int err, cpu = smp_processor_id();
1da177e4 360
1da177e4
LT
361 /* Ensure this CPU doesn't handle any more interrupts. */
362 err = __cpu_disable();
363 if (err < 0)
f3705136 364 return err;
1da177e4 365
090e77c3 366 cpu_notify(CPU_DYING, cpu);
52c063d1
TG
367 /* Give up timekeeping duties */
368 tick_handover_do_timer();
14e568e7 369 /* Park the stopper thread */
090e77c3 370 stop_machine_park(cpu);
f3705136 371 return 0;
1da177e4
LT
372}
373
e3920fb4 374/* Requires cpu_add_remove_lock to be held */
71cf5aee 375static int _cpu_down(unsigned int cpu, int tasks_frozen)
1da177e4 376{
e7407dcc 377 int err, nr_calls = 0;
1da177e4 378
e3920fb4
RW
379 if (num_online_cpus() == 1)
380 return -EBUSY;
1da177e4 381
e3920fb4
RW
382 if (!cpu_online(cpu))
383 return -EINVAL;
1da177e4 384
d221938c 385 cpu_hotplug_begin();
4d51985e 386
090e77c3
TG
387 cpuhp_tasks_frozen = tasks_frozen;
388
389 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
e6bde73b 390 if (err) {
a0d8cdb6 391 nr_calls--;
090e77c3 392 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
84117da5
FF
393 pr_warn("%s: attempt to take down CPU %u failed\n",
394 __func__, cpu);
baaca49f 395 goto out_release;
1da177e4
LT
396 }
397
6acce3ef
PZ
398 /*
399 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
400 * and RCU users of this state to go away such that all new such users
401 * will observe it.
402 *
403 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
779de6ce 404 * not imply sync_sched(), so wait for both.
106dd5af
M
405 *
406 * Do sync before park smpboot threads to take care the rcu boost case.
6acce3ef 407 */
779de6ce
PM
408 if (IS_ENABLED(CONFIG_PREEMPT))
409 synchronize_rcu_mult(call_rcu, call_rcu_sched);
410 else
411 synchronize_rcu();
6acce3ef 412
106dd5af
M
413 smpboot_park_threads(cpu);
414
6acce3ef 415 /*
a8994181
TG
416 * Prevent irq alloc/free while the dying cpu reorganizes the
417 * interrupt affinities.
6acce3ef 418 */
a8994181 419 irq_lock_sparse();
6acce3ef 420
a8994181
TG
421 /*
422 * So now all preempt/rcu users must observe !cpu_active().
423 */
090e77c3 424 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
04321587 425 if (err) {
1da177e4 426 /* CPU didn't die: tell everyone. Can't complain. */
090e77c3 427 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
a8994181 428 irq_unlock_sparse();
6a1bdc1b 429 goto out_release;
8fa1d7d3 430 }
04321587 431 BUG_ON(cpu_online(cpu));
1da177e4 432
48c5ccae
PZ
433 /*
434 * The migration_call() CPU_DYING callback will have removed all
435 * runnable tasks from the cpu, there's only the idle task left now
436 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
437 *
438 * Wait for the stop thread to go away.
48c5ccae 439 */
528a25b0 440 while (!per_cpu(cpu_dead_idle, cpu))
51a96c77 441 cpu_relax();
528a25b0
PM
442 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
443 per_cpu(cpu_dead_idle, cpu) = false;
1da177e4 444
a8994181
TG
445 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
446 irq_unlock_sparse();
447
345527b1 448 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
449 /* This actually kills the CPU. */
450 __cpu_die(cpu);
451
1da177e4 452 /* CPU is completely dead: tell everyone. Too late to complain. */
a49b116d 453 tick_cleanup_dead_cpu(cpu);
090e77c3 454 cpu_notify_nofail(CPU_DEAD, cpu);
1da177e4
LT
455
456 check_for_tasks(cpu);
457
baaca49f 458out_release:
d221938c 459 cpu_hotplug_done();
e9fb7631 460 if (!err)
090e77c3 461 cpu_notify_nofail(CPU_POST_DEAD, cpu);
e3920fb4
RW
462 return err;
463}
464
71cf5aee 465int cpu_down(unsigned int cpu)
e3920fb4 466{
9ea09af3 467 int err;
e3920fb4 468
d221938c 469 cpu_maps_update_begin();
e761b772
MK
470
471 if (cpu_hotplug_disabled) {
e3920fb4 472 err = -EBUSY;
e761b772
MK
473 goto out;
474 }
475
e761b772 476 err = _cpu_down(cpu, 0);
e3920fb4 477
e761b772 478out:
d221938c 479 cpu_maps_update_done();
1da177e4
LT
480 return err;
481}
b62b8ef9 482EXPORT_SYMBOL(cpu_down);
1da177e4
LT
483#endif /*CONFIG_HOTPLUG_CPU*/
484
00df35f9
PM
485/*
486 * Unpark per-CPU smpboot kthreads at CPU-online time.
487 */
488static int smpboot_thread_call(struct notifier_block *nfb,
489 unsigned long action, void *hcpu)
490{
491 int cpu = (long)hcpu;
492
493 switch (action & ~CPU_TASKS_FROZEN) {
494
64eaf974 495 case CPU_DOWN_FAILED:
00df35f9
PM
496 case CPU_ONLINE:
497 smpboot_unpark_threads(cpu);
498 break;
499
500 default:
501 break;
502 }
503
504 return NOTIFY_OK;
505}
506
507static struct notifier_block smpboot_thread_notifier = {
508 .notifier_call = smpboot_thread_call,
509 .priority = CPU_PRI_SMPBOOT,
510};
511
927da9df 512void smpboot_thread_init(void)
00df35f9
PM
513{
514 register_cpu_notifier(&smpboot_thread_notifier);
515}
516
e3920fb4 517/* Requires cpu_add_remove_lock to be held */
0db0628d 518static int _cpu_up(unsigned int cpu, int tasks_frozen)
1da177e4 519{
3bb5d2ee 520 struct task_struct *idle;
ba997462 521 int ret;
1da177e4 522
d221938c 523 cpu_hotplug_begin();
38498a67 524
5e5041f3
YI
525 if (cpu_online(cpu) || !cpu_present(cpu)) {
526 ret = -EINVAL;
527 goto out;
528 }
529
3bb5d2ee
SS
530 idle = idle_thread_get(cpu);
531 if (IS_ERR(idle)) {
532 ret = PTR_ERR(idle);
38498a67 533 goto out;
3bb5d2ee 534 }
38498a67 535
ba997462
TG
536 cpuhp_tasks_frozen = tasks_frozen;
537
f97f8f06
TG
538 ret = smpboot_create_threads(cpu);
539 if (ret)
540 goto out;
541
ba997462
TG
542 ret = notify_prepare(cpu);
543 if (ret)
544 goto out;
1da177e4 545
ba997462
TG
546 ret = bringup_cpu(cpu);
547 if (ret)
548 goto out;
1da177e4 549
ba997462 550 notify_online(cpu);
38498a67 551out:
d221938c 552 cpu_hotplug_done();
e3920fb4
RW
553
554 return ret;
555}
556
0db0628d 557int cpu_up(unsigned int cpu)
e3920fb4
RW
558{
559 int err = 0;
cf23422b 560
e0b582ec 561 if (!cpu_possible(cpu)) {
84117da5
FF
562 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
563 cpu);
87d5e023 564#if defined(CONFIG_IA64)
84117da5 565 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
566#endif
567 return -EINVAL;
568 }
e3920fb4 569
01b0f197
TK
570 err = try_online_node(cpu_to_node(cpu));
571 if (err)
572 return err;
cf23422b 573
d221938c 574 cpu_maps_update_begin();
e761b772
MK
575
576 if (cpu_hotplug_disabled) {
e3920fb4 577 err = -EBUSY;
e761b772
MK
578 goto out;
579 }
580
581 err = _cpu_up(cpu, 0);
582
e761b772 583out:
d221938c 584 cpu_maps_update_done();
e3920fb4
RW
585 return err;
586}
a513f6ba 587EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 588
f3de4be9 589#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 590static cpumask_var_t frozen_cpus;
e3920fb4
RW
591
592int disable_nonboot_cpus(void)
593{
e9a5f426 594 int cpu, first_cpu, error = 0;
e3920fb4 595
d221938c 596 cpu_maps_update_begin();
e0b582ec 597 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
598 /*
599 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
600 * with the userspace trying to use the CPU hotplug at the same time
601 */
e0b582ec 602 cpumask_clear(frozen_cpus);
6ad4c188 603
84117da5 604 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4
RW
605 for_each_online_cpu(cpu) {
606 if (cpu == first_cpu)
607 continue;
bb3632c6 608 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
8bb78442 609 error = _cpu_down(cpu, 1);
bb3632c6 610 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 611 if (!error)
e0b582ec 612 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 613 else {
84117da5 614 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
615 break;
616 }
617 }
86886e55 618
89af7ba5 619 if (!error)
e3920fb4 620 BUG_ON(num_online_cpus() > 1);
89af7ba5 621 else
84117da5 622 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
623
624 /*
625 * Make sure the CPUs won't be enabled by someone else. We need to do
626 * this even in case of failure as all disable_nonboot_cpus() users are
627 * supposed to do enable_nonboot_cpus() on the failure path.
628 */
629 cpu_hotplug_disabled++;
630
d221938c 631 cpu_maps_update_done();
e3920fb4
RW
632 return error;
633}
634
d0af9eed
SS
635void __weak arch_enable_nonboot_cpus_begin(void)
636{
637}
638
639void __weak arch_enable_nonboot_cpus_end(void)
640{
641}
642
71cf5aee 643void enable_nonboot_cpus(void)
e3920fb4
RW
644{
645 int cpu, error;
646
647 /* Allow everyone to use the CPU hotplug again */
d221938c 648 cpu_maps_update_begin();
89af7ba5 649 WARN_ON(--cpu_hotplug_disabled < 0);
e0b582ec 650 if (cpumask_empty(frozen_cpus))
1d64b9cb 651 goto out;
e3920fb4 652
84117da5 653 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
654
655 arch_enable_nonboot_cpus_begin();
656
e0b582ec 657 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 658 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
8bb78442 659 error = _cpu_up(cpu, 1);
bb3632c6 660 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 661 if (!error) {
84117da5 662 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
663 continue;
664 }
84117da5 665 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 666 }
d0af9eed
SS
667
668 arch_enable_nonboot_cpus_end();
669
e0b582ec 670 cpumask_clear(frozen_cpus);
1d64b9cb 671out:
d221938c 672 cpu_maps_update_done();
1da177e4 673}
e0b582ec 674
d7268a31 675static int __init alloc_frozen_cpus(void)
e0b582ec
RR
676{
677 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
678 return -ENOMEM;
679 return 0;
680}
681core_initcall(alloc_frozen_cpus);
79cfbdfa 682
79cfbdfa
SB
683/*
684 * When callbacks for CPU hotplug notifications are being executed, we must
685 * ensure that the state of the system with respect to the tasks being frozen
686 * or not, as reported by the notification, remains unchanged *throughout the
687 * duration* of the execution of the callbacks.
688 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
689 *
690 * This synchronization is implemented by mutually excluding regular CPU
691 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
692 * Hibernate notifications.
693 */
694static int
695cpu_hotplug_pm_callback(struct notifier_block *nb,
696 unsigned long action, void *ptr)
697{
698 switch (action) {
699
700 case PM_SUSPEND_PREPARE:
701 case PM_HIBERNATION_PREPARE:
16e53dbf 702 cpu_hotplug_disable();
79cfbdfa
SB
703 break;
704
705 case PM_POST_SUSPEND:
706 case PM_POST_HIBERNATION:
16e53dbf 707 cpu_hotplug_enable();
79cfbdfa
SB
708 break;
709
710 default:
711 return NOTIFY_DONE;
712 }
713
714 return NOTIFY_OK;
715}
716
717
d7268a31 718static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 719{
6e32d479
FY
720 /*
721 * cpu_hotplug_pm_callback has higher priority than x86
722 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
723 * to disable cpu hotplug to avoid cpu hotplug race.
724 */
79cfbdfa
SB
725 pm_notifier(cpu_hotplug_pm_callback, 0);
726 return 0;
727}
728core_initcall(cpu_hotplug_pm_sync_init);
729
f3de4be9 730#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 731
e545a614
MS
732/**
733 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
734 * @cpu: cpu that just started
735 *
736 * This function calls the cpu_chain notifiers with CPU_STARTING.
737 * It must be called by the arch code on the new cpu, before the new cpu
738 * enables interrupts and before the "boot" cpu returns from __cpu_up().
739 */
0db0628d 740void notify_cpu_starting(unsigned int cpu)
e545a614 741{
090e77c3 742 cpu_notify(CPU_STARTING, cpu);
e545a614
MS
743}
744
68f4f1ec 745#endif /* CONFIG_SMP */
b8d317d1 746
e56b3bc7
LT
747/*
748 * cpu_bit_bitmap[] is a special, "compressed" data structure that
749 * represents all NR_CPUS bits binary values of 1<<nr.
750 *
e0b582ec 751 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
752 * mask value that has a single bit set only.
753 */
b8d317d1 754
e56b3bc7 755/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 756#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
757#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
758#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
759#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 760
e56b3bc7
LT
761const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
762
763 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
764 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
765#if BITS_PER_LONG > 32
766 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
767 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
768#endif
769};
e56b3bc7 770EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
771
772const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
773EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
774
775#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 776struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 777 = {CPU_BITS_ALL};
b3199c02 778#else
4b804c85 779struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 780#endif
4b804c85 781EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 782
4b804c85
RV
783struct cpumask __cpu_online_mask __read_mostly;
784EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 785
4b804c85
RV
786struct cpumask __cpu_present_mask __read_mostly;
787EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 788
4b804c85
RV
789struct cpumask __cpu_active_mask __read_mostly;
790EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 791
3fa41520
RR
792void init_cpu_present(const struct cpumask *src)
793{
c4c54dd1 794 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
795}
796
797void init_cpu_possible(const struct cpumask *src)
798{
c4c54dd1 799 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
800}
801
802void init_cpu_online(const struct cpumask *src)
803{
c4c54dd1 804 cpumask_copy(&__cpu_online_mask, src);
3fa41520 805}