]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/cpu.c
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
[mirror_ubuntu-artful-kernel.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
a19423b9 22#include <linux/lockdep.h>
bb3632c6 23#include <trace/events/power.h>
1da177e4 24
38498a67
TG
25#include "smpboot.h"
26
98a79d6a 27#ifdef CONFIG_SMP
b3199c02 28/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 29static DEFINE_MUTEX(cpu_add_remove_lock);
1da177e4 30
79a6cdeb 31/*
93ae4f97
SB
32 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
79a6cdeb
LJ
37 */
38void cpu_maps_update_begin(void)
39{
40 mutex_lock(&cpu_add_remove_lock);
41}
93ae4f97 42EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb
LJ
43
44void cpu_maps_update_done(void)
45{
46 mutex_unlock(&cpu_add_remove_lock);
47}
93ae4f97 48EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb 49
5c113fbe 50static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 51
e3920fb4
RW
52/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
54 */
55static int cpu_hotplug_disabled;
56
79a6cdeb
LJ
57#ifdef CONFIG_HOTPLUG_CPU
58
d221938c
GS
59static struct {
60 struct task_struct *active_writer;
61 struct mutex lock; /* Synchronizes accesses to refcount, */
62 /*
63 * Also blocks the new readers during
64 * an ongoing cpu hotplug operation.
65 */
66 int refcount;
b2c4623d
PM
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
a19423b9
GS
69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 struct lockdep_map dep_map;
72#endif
31950eb6
LT
73} cpu_hotplug = {
74 .active_writer = NULL,
75 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
76 .refcount = 0,
a19423b9
GS
77#ifdef CONFIG_DEBUG_LOCK_ALLOC
78 .dep_map = {.name = "cpu_hotplug.lock" },
79#endif
31950eb6 80};
d221938c 81
a19423b9
GS
82/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
83#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42
PM
84#define cpuhp_lock_acquire_tryread() \
85 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b9
GS
86#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
87#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
88
86ef5c9a 89void get_online_cpus(void)
a9d9baa1 90{
d221938c
GS
91 might_sleep();
92 if (cpu_hotplug.active_writer == current)
aa953877 93 return;
a19423b9 94 cpuhp_lock_acquire_read();
d221938c
GS
95 mutex_lock(&cpu_hotplug.lock);
96 cpu_hotplug.refcount++;
97 mutex_unlock(&cpu_hotplug.lock);
a9d9baa1 98}
86ef5c9a 99EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 100
dd56af42
PM
101bool try_get_online_cpus(void)
102{
103 if (cpu_hotplug.active_writer == current)
104 return true;
105 if (!mutex_trylock(&cpu_hotplug.lock))
106 return false;
107 cpuhp_lock_acquire_tryread();
108 cpu_hotplug.refcount++;
109 mutex_unlock(&cpu_hotplug.lock);
110 return true;
111}
112EXPORT_SYMBOL_GPL(try_get_online_cpus);
113
86ef5c9a 114void put_online_cpus(void)
a9d9baa1 115{
d221938c 116 if (cpu_hotplug.active_writer == current)
aa953877 117 return;
b2c4623d
PM
118 if (!mutex_trylock(&cpu_hotplug.lock)) {
119 atomic_inc(&cpu_hotplug.puts_pending);
120 cpuhp_lock_release();
121 return;
122 }
075663d1
SB
123
124 if (WARN_ON(!cpu_hotplug.refcount))
125 cpu_hotplug.refcount++; /* try to fix things up */
126
d2ba7e2a
ON
127 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
128 wake_up_process(cpu_hotplug.active_writer);
d221938c 129 mutex_unlock(&cpu_hotplug.lock);
a19423b9 130 cpuhp_lock_release();
d221938c 131
a9d9baa1 132}
86ef5c9a 133EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 134
d221938c
GS
135/*
136 * This ensures that the hotplug operation can begin only when the
137 * refcount goes to zero.
138 *
139 * Note that during a cpu-hotplug operation, the new readers, if any,
140 * will be blocked by the cpu_hotplug.lock
141 *
d2ba7e2a
ON
142 * Since cpu_hotplug_begin() is always called after invoking
143 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
144 *
145 * Note that theoretically, there is a possibility of a livelock:
146 * - Refcount goes to zero, last reader wakes up the sleeping
147 * writer.
148 * - Last reader unlocks the cpu_hotplug.lock.
149 * - A new reader arrives at this moment, bumps up the refcount.
150 * - The writer acquires the cpu_hotplug.lock finds the refcount
151 * non zero and goes to sleep again.
152 *
153 * However, this is very difficult to achieve in practice since
86ef5c9a 154 * get_online_cpus() not an api which is called all that often.
d221938c
GS
155 *
156 */
b9d10be7 157void cpu_hotplug_begin(void)
d221938c 158{
d221938c 159 cpu_hotplug.active_writer = current;
d2ba7e2a 160
a19423b9 161 cpuhp_lock_acquire();
d2ba7e2a
ON
162 for (;;) {
163 mutex_lock(&cpu_hotplug.lock);
b2c4623d
PM
164 if (atomic_read(&cpu_hotplug.puts_pending)) {
165 int delta;
166
167 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
168 cpu_hotplug.refcount -= delta;
169 }
d2ba7e2a
ON
170 if (likely(!cpu_hotplug.refcount))
171 break;
172 __set_current_state(TASK_UNINTERRUPTIBLE);
d221938c
GS
173 mutex_unlock(&cpu_hotplug.lock);
174 schedule();
d221938c 175 }
d221938c
GS
176}
177
b9d10be7 178void cpu_hotplug_done(void)
d221938c
GS
179{
180 cpu_hotplug.active_writer = NULL;
181 mutex_unlock(&cpu_hotplug.lock);
a19423b9 182 cpuhp_lock_release();
d221938c 183}
79a6cdeb 184
16e53dbf
SB
185/*
186 * Wait for currently running CPU hotplug operations to complete (if any) and
187 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
188 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
189 * hotplug path before performing hotplug operations. So acquiring that lock
190 * guarantees mutual exclusion from any currently running hotplug operations.
191 */
192void cpu_hotplug_disable(void)
193{
194 cpu_maps_update_begin();
195 cpu_hotplug_disabled = 1;
196 cpu_maps_update_done();
197}
198
199void cpu_hotplug_enable(void)
200{
201 cpu_maps_update_begin();
202 cpu_hotplug_disabled = 0;
203 cpu_maps_update_done();
204}
205
b9d10be7 206#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 207
1da177e4 208/* Need to know about CPUs going up/down? */
f7b16c10 209int __ref register_cpu_notifier(struct notifier_block *nb)
1da177e4 210{
bd5349cf 211 int ret;
d221938c 212 cpu_maps_update_begin();
bd5349cf 213 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 214 cpu_maps_update_done();
bd5349cf 215 return ret;
1da177e4 216}
65edc68c 217
93ae4f97
SB
218int __ref __register_cpu_notifier(struct notifier_block *nb)
219{
220 return raw_notifier_chain_register(&cpu_chain, nb);
221}
222
e9fb7631
AM
223static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
224 int *nr_calls)
225{
e6bde73b
AM
226 int ret;
227
228 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
e9fb7631 229 nr_calls);
e6bde73b
AM
230
231 return notifier_to_errno(ret);
e9fb7631
AM
232}
233
234static int cpu_notify(unsigned long val, void *v)
235{
236 return __cpu_notify(val, v, -1, NULL);
237}
238
00b9b0af
LT
239#ifdef CONFIG_HOTPLUG_CPU
240
e9fb7631
AM
241static void cpu_notify_nofail(unsigned long val, void *v)
242{
00b9b0af 243 BUG_ON(cpu_notify(val, v));
e9fb7631 244}
1da177e4 245EXPORT_SYMBOL(register_cpu_notifier);
93ae4f97 246EXPORT_SYMBOL(__register_cpu_notifier);
1da177e4 247
9647155f 248void __ref unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 249{
d221938c 250 cpu_maps_update_begin();
bd5349cf 251 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 252 cpu_maps_update_done();
1da177e4
LT
253}
254EXPORT_SYMBOL(unregister_cpu_notifier);
255
93ae4f97
SB
256void __ref __unregister_cpu_notifier(struct notifier_block *nb)
257{
258 raw_notifier_chain_unregister(&cpu_chain, nb);
259}
260EXPORT_SYMBOL(__unregister_cpu_notifier);
261
e4cc2f87
AV
262/**
263 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
264 * @cpu: a CPU id
265 *
266 * This function walks all processes, finds a valid mm struct for each one and
267 * then clears a corresponding bit in mm's cpumask. While this all sounds
268 * trivial, there are various non-obvious corner cases, which this function
269 * tries to solve in a safe manner.
270 *
271 * Also note that the function uses a somewhat relaxed locking scheme, so it may
272 * be called only for an already offlined CPU.
273 */
cb79295e
AV
274void clear_tasks_mm_cpumask(int cpu)
275{
276 struct task_struct *p;
277
278 /*
279 * This function is called after the cpu is taken down and marked
280 * offline, so its not like new tasks will ever get this cpu set in
281 * their mm mask. -- Peter Zijlstra
282 * Thus, we may use rcu_read_lock() here, instead of grabbing
283 * full-fledged tasklist_lock.
284 */
e4cc2f87 285 WARN_ON(cpu_online(cpu));
cb79295e
AV
286 rcu_read_lock();
287 for_each_process(p) {
288 struct task_struct *t;
289
e4cc2f87
AV
290 /*
291 * Main thread might exit, but other threads may still have
292 * a valid mm. Find one.
293 */
cb79295e
AV
294 t = find_lock_task_mm(p);
295 if (!t)
296 continue;
297 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
298 task_unlock(t);
299 }
300 rcu_read_unlock();
301}
302
b728ca06 303static inline void check_for_tasks(int dead_cpu)
1da177e4 304{
b728ca06 305 struct task_struct *g, *p;
1da177e4 306
b728ca06
KT
307 read_lock_irq(&tasklist_lock);
308 do_each_thread(g, p) {
309 if (!p->on_rq)
310 continue;
311 /*
312 * We do the check with unlocked task_rq(p)->lock.
313 * Order the reading to do not warn about a task,
314 * which was running on this cpu in the past, and
315 * it's just been woken on another cpu.
316 */
317 rmb();
318 if (task_cpu(p) != dead_cpu)
319 continue;
320
321 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
322 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
323 } while_each_thread(g, p);
324 read_unlock_irq(&tasklist_lock);
1da177e4
LT
325}
326
db912f96
AK
327struct take_cpu_down_param {
328 unsigned long mod;
329 void *hcpu;
330};
331
1da177e4 332/* Take this CPU down. */
514a20a5 333static int __ref take_cpu_down(void *_param)
1da177e4 334{
db912f96 335 struct take_cpu_down_param *param = _param;
1da177e4
LT
336 int err;
337
1da177e4
LT
338 /* Ensure this CPU doesn't handle any more interrupts. */
339 err = __cpu_disable();
340 if (err < 0)
f3705136 341 return err;
1da177e4 342
e9fb7631 343 cpu_notify(CPU_DYING | param->mod, param->hcpu);
14e568e7
TG
344 /* Park the stopper thread */
345 kthread_park(current);
f3705136 346 return 0;
1da177e4
LT
347}
348
e3920fb4 349/* Requires cpu_add_remove_lock to be held */
514a20a5 350static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1da177e4 351{
e7407dcc 352 int err, nr_calls = 0;
e7407dcc 353 void *hcpu = (void *)(long)cpu;
8bb78442 354 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
db912f96
AK
355 struct take_cpu_down_param tcd_param = {
356 .mod = mod,
357 .hcpu = hcpu,
358 };
1da177e4 359
e3920fb4
RW
360 if (num_online_cpus() == 1)
361 return -EBUSY;
1da177e4 362
e3920fb4
RW
363 if (!cpu_online(cpu))
364 return -EINVAL;
1da177e4 365
d221938c 366 cpu_hotplug_begin();
4d51985e 367
e9fb7631 368 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
e6bde73b 369 if (err) {
a0d8cdb6 370 nr_calls--;
e9fb7631 371 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
84117da5
FF
372 pr_warn("%s: attempt to take down CPU %u failed\n",
373 __func__, cpu);
baaca49f 374 goto out_release;
1da177e4
LT
375 }
376
6acce3ef
PZ
377 /*
378 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
379 * and RCU users of this state to go away such that all new such users
380 * will observe it.
381 *
382 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
383 * not imply sync_sched(), so explicitly call both.
106dd5af
M
384 *
385 * Do sync before park smpboot threads to take care the rcu boost case.
6acce3ef
PZ
386 */
387#ifdef CONFIG_PREEMPT
388 synchronize_sched();
389#endif
390 synchronize_rcu();
391
106dd5af
M
392 smpboot_park_threads(cpu);
393
6acce3ef
PZ
394 /*
395 * So now all preempt/rcu users must observe !cpu_active().
396 */
397
e0b582ec 398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
04321587 399 if (err) {
1da177e4 400 /* CPU didn't die: tell everyone. Can't complain. */
f97f8f06 401 smpboot_unpark_threads(cpu);
e9fb7631 402 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
6a1bdc1b 403 goto out_release;
8fa1d7d3 404 }
04321587 405 BUG_ON(cpu_online(cpu));
1da177e4 406
48c5ccae
PZ
407 /*
408 * The migration_call() CPU_DYING callback will have removed all
409 * runnable tasks from the cpu, there's only the idle task left now
410 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
411 *
412 * Wait for the stop thread to go away.
48c5ccae 413 */
51a96c77
PZ
414 while (!idle_cpu(cpu))
415 cpu_relax();
1da177e4
LT
416
417 /* This actually kills the CPU. */
418 __cpu_die(cpu);
419
1da177e4 420 /* CPU is completely dead: tell everyone. Too late to complain. */
e9fb7631 421 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
1da177e4
LT
422
423 check_for_tasks(cpu);
424
baaca49f 425out_release:
d221938c 426 cpu_hotplug_done();
e9fb7631
AM
427 if (!err)
428 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
e3920fb4
RW
429 return err;
430}
431
514a20a5 432int __ref cpu_down(unsigned int cpu)
e3920fb4 433{
9ea09af3 434 int err;
e3920fb4 435
d221938c 436 cpu_maps_update_begin();
e761b772
MK
437
438 if (cpu_hotplug_disabled) {
e3920fb4 439 err = -EBUSY;
e761b772
MK
440 goto out;
441 }
442
e761b772 443 err = _cpu_down(cpu, 0);
e3920fb4 444
e761b772 445out:
d221938c 446 cpu_maps_update_done();
1da177e4
LT
447 return err;
448}
b62b8ef9 449EXPORT_SYMBOL(cpu_down);
1da177e4
LT
450#endif /*CONFIG_HOTPLUG_CPU*/
451
e3920fb4 452/* Requires cpu_add_remove_lock to be held */
0db0628d 453static int _cpu_up(unsigned int cpu, int tasks_frozen)
1da177e4 454{
baaca49f 455 int ret, nr_calls = 0;
1da177e4 456 void *hcpu = (void *)(long)cpu;
8bb78442 457 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
3bb5d2ee 458 struct task_struct *idle;
1da177e4 459
d221938c 460 cpu_hotplug_begin();
38498a67 461
5e5041f3
YI
462 if (cpu_online(cpu) || !cpu_present(cpu)) {
463 ret = -EINVAL;
464 goto out;
465 }
466
3bb5d2ee
SS
467 idle = idle_thread_get(cpu);
468 if (IS_ERR(idle)) {
469 ret = PTR_ERR(idle);
38498a67 470 goto out;
3bb5d2ee 471 }
38498a67 472
f97f8f06
TG
473 ret = smpboot_create_threads(cpu);
474 if (ret)
475 goto out;
476
e9fb7631 477 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
e6bde73b 478 if (ret) {
a0d8cdb6 479 nr_calls--;
84117da5
FF
480 pr_warn("%s: attempt to bring up CPU %u failed\n",
481 __func__, cpu);
1da177e4
LT
482 goto out_notify;
483 }
484
485 /* Arch-specific enabling code. */
3bb5d2ee 486 ret = __cpu_up(cpu, idle);
1da177e4
LT
487 if (ret != 0)
488 goto out_notify;
6978c705 489 BUG_ON(!cpu_online(cpu));
1da177e4 490
f97f8f06
TG
491 /* Wake the per cpu threads */
492 smpboot_unpark_threads(cpu);
493
1da177e4 494 /* Now call notifier in preparation. */
e9fb7631 495 cpu_notify(CPU_ONLINE | mod, hcpu);
1da177e4
LT
496
497out_notify:
498 if (ret != 0)
e9fb7631 499 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
38498a67 500out:
d221938c 501 cpu_hotplug_done();
e3920fb4
RW
502
503 return ret;
504}
505
0db0628d 506int cpu_up(unsigned int cpu)
e3920fb4
RW
507{
508 int err = 0;
cf23422b 509
e0b582ec 510 if (!cpu_possible(cpu)) {
84117da5
FF
511 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
512 cpu);
87d5e023 513#if defined(CONFIG_IA64)
84117da5 514 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
515#endif
516 return -EINVAL;
517 }
e3920fb4 518
01b0f197
TK
519 err = try_online_node(cpu_to_node(cpu));
520 if (err)
521 return err;
cf23422b 522
d221938c 523 cpu_maps_update_begin();
e761b772
MK
524
525 if (cpu_hotplug_disabled) {
e3920fb4 526 err = -EBUSY;
e761b772
MK
527 goto out;
528 }
529
530 err = _cpu_up(cpu, 0);
531
e761b772 532out:
d221938c 533 cpu_maps_update_done();
e3920fb4
RW
534 return err;
535}
a513f6ba 536EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 537
f3de4be9 538#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 539static cpumask_var_t frozen_cpus;
e3920fb4
RW
540
541int disable_nonboot_cpus(void)
542{
e9a5f426 543 int cpu, first_cpu, error = 0;
e3920fb4 544
d221938c 545 cpu_maps_update_begin();
e0b582ec 546 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
547 /*
548 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
549 * with the userspace trying to use the CPU hotplug at the same time
550 */
e0b582ec 551 cpumask_clear(frozen_cpus);
6ad4c188 552
84117da5 553 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4
RW
554 for_each_online_cpu(cpu) {
555 if (cpu == first_cpu)
556 continue;
bb3632c6 557 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
8bb78442 558 error = _cpu_down(cpu, 1);
bb3632c6 559 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 560 if (!error)
e0b582ec 561 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 562 else {
84117da5 563 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
564 break;
565 }
566 }
86886e55 567
e3920fb4
RW
568 if (!error) {
569 BUG_ON(num_online_cpus() > 1);
570 /* Make sure the CPUs won't be enabled by someone else */
571 cpu_hotplug_disabled = 1;
572 } else {
84117da5 573 pr_err("Non-boot CPUs are not disabled\n");
e3920fb4 574 }
d221938c 575 cpu_maps_update_done();
e3920fb4
RW
576 return error;
577}
578
d0af9eed
SS
579void __weak arch_enable_nonboot_cpus_begin(void)
580{
581}
582
583void __weak arch_enable_nonboot_cpus_end(void)
584{
585}
586
fa7303e2 587void __ref enable_nonboot_cpus(void)
e3920fb4
RW
588{
589 int cpu, error;
590
591 /* Allow everyone to use the CPU hotplug again */
d221938c 592 cpu_maps_update_begin();
e3920fb4 593 cpu_hotplug_disabled = 0;
e0b582ec 594 if (cpumask_empty(frozen_cpus))
1d64b9cb 595 goto out;
e3920fb4 596
84117da5 597 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
598
599 arch_enable_nonboot_cpus_begin();
600
e0b582ec 601 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 602 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
8bb78442 603 error = _cpu_up(cpu, 1);
bb3632c6 604 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 605 if (!error) {
84117da5 606 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
607 continue;
608 }
84117da5 609 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 610 }
d0af9eed
SS
611
612 arch_enable_nonboot_cpus_end();
613
e0b582ec 614 cpumask_clear(frozen_cpus);
1d64b9cb 615out:
d221938c 616 cpu_maps_update_done();
1da177e4 617}
e0b582ec 618
d7268a31 619static int __init alloc_frozen_cpus(void)
e0b582ec
RR
620{
621 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
622 return -ENOMEM;
623 return 0;
624}
625core_initcall(alloc_frozen_cpus);
79cfbdfa 626
79cfbdfa
SB
627/*
628 * When callbacks for CPU hotplug notifications are being executed, we must
629 * ensure that the state of the system with respect to the tasks being frozen
630 * or not, as reported by the notification, remains unchanged *throughout the
631 * duration* of the execution of the callbacks.
632 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
633 *
634 * This synchronization is implemented by mutually excluding regular CPU
635 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
636 * Hibernate notifications.
637 */
638static int
639cpu_hotplug_pm_callback(struct notifier_block *nb,
640 unsigned long action, void *ptr)
641{
642 switch (action) {
643
644 case PM_SUSPEND_PREPARE:
645 case PM_HIBERNATION_PREPARE:
16e53dbf 646 cpu_hotplug_disable();
79cfbdfa
SB
647 break;
648
649 case PM_POST_SUSPEND:
650 case PM_POST_HIBERNATION:
16e53dbf 651 cpu_hotplug_enable();
79cfbdfa
SB
652 break;
653
654 default:
655 return NOTIFY_DONE;
656 }
657
658 return NOTIFY_OK;
659}
660
661
d7268a31 662static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 663{
6e32d479
FY
664 /*
665 * cpu_hotplug_pm_callback has higher priority than x86
666 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
667 * to disable cpu hotplug to avoid cpu hotplug race.
668 */
79cfbdfa
SB
669 pm_notifier(cpu_hotplug_pm_callback, 0);
670 return 0;
671}
672core_initcall(cpu_hotplug_pm_sync_init);
673
f3de4be9 674#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 675
e545a614
MS
676/**
677 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
678 * @cpu: cpu that just started
679 *
680 * This function calls the cpu_chain notifiers with CPU_STARTING.
681 * It must be called by the arch code on the new cpu, before the new cpu
682 * enables interrupts and before the "boot" cpu returns from __cpu_up().
683 */
0db0628d 684void notify_cpu_starting(unsigned int cpu)
e545a614
MS
685{
686 unsigned long val = CPU_STARTING;
687
688#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 689 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
e545a614
MS
690 val = CPU_STARTING_FROZEN;
691#endif /* CONFIG_PM_SLEEP_SMP */
e9fb7631 692 cpu_notify(val, (void *)(long)cpu);
e545a614
MS
693}
694
68f4f1ec 695#endif /* CONFIG_SMP */
b8d317d1 696
e56b3bc7
LT
697/*
698 * cpu_bit_bitmap[] is a special, "compressed" data structure that
699 * represents all NR_CPUS bits binary values of 1<<nr.
700 *
e0b582ec 701 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
702 * mask value that has a single bit set only.
703 */
b8d317d1 704
e56b3bc7 705/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 706#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
707#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
708#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
709#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 710
e56b3bc7
LT
711const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
712
713 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
714 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
715#if BITS_PER_LONG > 32
716 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
717 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
718#endif
719};
e56b3bc7 720EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
721
722const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
723EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
724
725#ifdef CONFIG_INIT_ALL_POSSIBLE
726static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
727 = CPU_BITS_ALL;
728#else
729static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
730#endif
731const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
732EXPORT_SYMBOL(cpu_possible_mask);
733
734static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
735const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
736EXPORT_SYMBOL(cpu_online_mask);
737
738static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
739const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
740EXPORT_SYMBOL(cpu_present_mask);
741
742static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
743const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
744EXPORT_SYMBOL(cpu_active_mask);
3fa41520
RR
745
746void set_cpu_possible(unsigned int cpu, bool possible)
747{
748 if (possible)
749 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
750 else
751 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
752}
753
754void set_cpu_present(unsigned int cpu, bool present)
755{
756 if (present)
757 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
758 else
759 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
760}
761
762void set_cpu_online(unsigned int cpu, bool online)
763{
6acbfb96 764 if (online) {
3fa41520 765 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
6acbfb96
LJ
766 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
767 } else {
3fa41520 768 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
6acbfb96 769 }
3fa41520
RR
770}
771
772void set_cpu_active(unsigned int cpu, bool active)
773{
774 if (active)
775 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
776 else
777 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
778}
779
780void init_cpu_present(const struct cpumask *src)
781{
782 cpumask_copy(to_cpumask(cpu_present_bits), src);
783}
784
785void init_cpu_possible(const struct cpumask *src)
786{
787 cpumask_copy(to_cpumask(cpu_possible_bits), src);
788}
789
790void init_cpu_online(const struct cpumask *src)
791{
792 cpumask_copy(to_cpumask(cpu_online_bits), src);
793}