]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - kernel/cpu.c
Linux 3.14-rc7
[mirror_ubuntu-disco-kernel.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
1da177e4 22
38498a67
TG
23#include "smpboot.h"
24
98a79d6a 25#ifdef CONFIG_SMP
b3199c02 26/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 27static DEFINE_MUTEX(cpu_add_remove_lock);
1da177e4 28
79a6cdeb
LJ
29/*
30 * The following two API's must be used when attempting
31 * to serialize the updates to cpu_online_mask, cpu_present_mask.
32 */
33void cpu_maps_update_begin(void)
34{
35 mutex_lock(&cpu_add_remove_lock);
36}
37
38void cpu_maps_update_done(void)
39{
40 mutex_unlock(&cpu_add_remove_lock);
41}
42
5c113fbe 43static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 44
e3920fb4
RW
45/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock
47 */
48static int cpu_hotplug_disabled;
49
79a6cdeb
LJ
50#ifdef CONFIG_HOTPLUG_CPU
51
d221938c
GS
52static struct {
53 struct task_struct *active_writer;
54 struct mutex lock; /* Synchronizes accesses to refcount, */
55 /*
56 * Also blocks the new readers during
57 * an ongoing cpu hotplug operation.
58 */
59 int refcount;
31950eb6
LT
60} cpu_hotplug = {
61 .active_writer = NULL,
62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
63 .refcount = 0,
64};
d221938c 65
86ef5c9a 66void get_online_cpus(void)
a9d9baa1 67{
d221938c
GS
68 might_sleep();
69 if (cpu_hotplug.active_writer == current)
aa953877 70 return;
d221938c
GS
71 mutex_lock(&cpu_hotplug.lock);
72 cpu_hotplug.refcount++;
73 mutex_unlock(&cpu_hotplug.lock);
74
a9d9baa1 75}
86ef5c9a 76EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 77
86ef5c9a 78void put_online_cpus(void)
a9d9baa1 79{
d221938c 80 if (cpu_hotplug.active_writer == current)
aa953877 81 return;
d221938c 82 mutex_lock(&cpu_hotplug.lock);
075663d1
SB
83
84 if (WARN_ON(!cpu_hotplug.refcount))
85 cpu_hotplug.refcount++; /* try to fix things up */
86
d2ba7e2a
ON
87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 wake_up_process(cpu_hotplug.active_writer);
d221938c
GS
89 mutex_unlock(&cpu_hotplug.lock);
90
a9d9baa1 91}
86ef5c9a 92EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 93
d221938c
GS
94/*
95 * This ensures that the hotplug operation can begin only when the
96 * refcount goes to zero.
97 *
98 * Note that during a cpu-hotplug operation, the new readers, if any,
99 * will be blocked by the cpu_hotplug.lock
100 *
d2ba7e2a
ON
101 * Since cpu_hotplug_begin() is always called after invoking
102 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
103 *
104 * Note that theoretically, there is a possibility of a livelock:
105 * - Refcount goes to zero, last reader wakes up the sleeping
106 * writer.
107 * - Last reader unlocks the cpu_hotplug.lock.
108 * - A new reader arrives at this moment, bumps up the refcount.
109 * - The writer acquires the cpu_hotplug.lock finds the refcount
110 * non zero and goes to sleep again.
111 *
112 * However, this is very difficult to achieve in practice since
86ef5c9a 113 * get_online_cpus() not an api which is called all that often.
d221938c
GS
114 *
115 */
b9d10be7 116void cpu_hotplug_begin(void)
d221938c 117{
d221938c 118 cpu_hotplug.active_writer = current;
d2ba7e2a
ON
119
120 for (;;) {
121 mutex_lock(&cpu_hotplug.lock);
122 if (likely(!cpu_hotplug.refcount))
123 break;
124 __set_current_state(TASK_UNINTERRUPTIBLE);
d221938c
GS
125 mutex_unlock(&cpu_hotplug.lock);
126 schedule();
d221938c 127 }
d221938c
GS
128}
129
b9d10be7 130void cpu_hotplug_done(void)
d221938c
GS
131{
132 cpu_hotplug.active_writer = NULL;
133 mutex_unlock(&cpu_hotplug.lock);
134}
79a6cdeb 135
16e53dbf
SB
136/*
137 * Wait for currently running CPU hotplug operations to complete (if any) and
138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
140 * hotplug path before performing hotplug operations. So acquiring that lock
141 * guarantees mutual exclusion from any currently running hotplug operations.
142 */
143void cpu_hotplug_disable(void)
144{
145 cpu_maps_update_begin();
146 cpu_hotplug_disabled = 1;
147 cpu_maps_update_done();
148}
149
150void cpu_hotplug_enable(void)
151{
152 cpu_maps_update_begin();
153 cpu_hotplug_disabled = 0;
154 cpu_maps_update_done();
155}
156
b9d10be7 157#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 158
1da177e4 159/* Need to know about CPUs going up/down? */
f7b16c10 160int __ref register_cpu_notifier(struct notifier_block *nb)
1da177e4 161{
bd5349cf 162 int ret;
d221938c 163 cpu_maps_update_begin();
bd5349cf 164 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 165 cpu_maps_update_done();
bd5349cf 166 return ret;
1da177e4 167}
65edc68c 168
e9fb7631
AM
169static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
170 int *nr_calls)
171{
e6bde73b
AM
172 int ret;
173
174 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
e9fb7631 175 nr_calls);
e6bde73b
AM
176
177 return notifier_to_errno(ret);
e9fb7631
AM
178}
179
180static int cpu_notify(unsigned long val, void *v)
181{
182 return __cpu_notify(val, v, -1, NULL);
183}
184
00b9b0af
LT
185#ifdef CONFIG_HOTPLUG_CPU
186
e9fb7631
AM
187static void cpu_notify_nofail(unsigned long val, void *v)
188{
00b9b0af 189 BUG_ON(cpu_notify(val, v));
e9fb7631 190}
1da177e4
LT
191EXPORT_SYMBOL(register_cpu_notifier);
192
9647155f 193void __ref unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 194{
d221938c 195 cpu_maps_update_begin();
bd5349cf 196 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 197 cpu_maps_update_done();
1da177e4
LT
198}
199EXPORT_SYMBOL(unregister_cpu_notifier);
200
e4cc2f87
AV
201/**
202 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
203 * @cpu: a CPU id
204 *
205 * This function walks all processes, finds a valid mm struct for each one and
206 * then clears a corresponding bit in mm's cpumask. While this all sounds
207 * trivial, there are various non-obvious corner cases, which this function
208 * tries to solve in a safe manner.
209 *
210 * Also note that the function uses a somewhat relaxed locking scheme, so it may
211 * be called only for an already offlined CPU.
212 */
cb79295e
AV
213void clear_tasks_mm_cpumask(int cpu)
214{
215 struct task_struct *p;
216
217 /*
218 * This function is called after the cpu is taken down and marked
219 * offline, so its not like new tasks will ever get this cpu set in
220 * their mm mask. -- Peter Zijlstra
221 * Thus, we may use rcu_read_lock() here, instead of grabbing
222 * full-fledged tasklist_lock.
223 */
e4cc2f87 224 WARN_ON(cpu_online(cpu));
cb79295e
AV
225 rcu_read_lock();
226 for_each_process(p) {
227 struct task_struct *t;
228
e4cc2f87
AV
229 /*
230 * Main thread might exit, but other threads may still have
231 * a valid mm. Find one.
232 */
cb79295e
AV
233 t = find_lock_task_mm(p);
234 if (!t)
235 continue;
236 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
237 task_unlock(t);
238 }
239 rcu_read_unlock();
240}
241
1da177e4
LT
242static inline void check_for_tasks(int cpu)
243{
244 struct task_struct *p;
6fac4829 245 cputime_t utime, stime;
1da177e4
LT
246
247 write_lock_irq(&tasklist_lock);
248 for_each_process(p) {
6fac4829 249 task_cputime(p, &utime, &stime);
11854247 250 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
6fac4829 251 (utime || stime))
9d3cfc4c
FP
252 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
253 "(state = %ld, flags = %x)\n",
254 p->comm, task_pid_nr(p), cpu,
255 p->state, p->flags);
1da177e4
LT
256 }
257 write_unlock_irq(&tasklist_lock);
258}
259
db912f96
AK
260struct take_cpu_down_param {
261 unsigned long mod;
262 void *hcpu;
263};
264
1da177e4 265/* Take this CPU down. */
514a20a5 266static int __ref take_cpu_down(void *_param)
1da177e4 267{
db912f96 268 struct take_cpu_down_param *param = _param;
1da177e4
LT
269 int err;
270
1da177e4
LT
271 /* Ensure this CPU doesn't handle any more interrupts. */
272 err = __cpu_disable();
273 if (err < 0)
f3705136 274 return err;
1da177e4 275
e9fb7631 276 cpu_notify(CPU_DYING | param->mod, param->hcpu);
14e568e7
TG
277 /* Park the stopper thread */
278 kthread_park(current);
f3705136 279 return 0;
1da177e4
LT
280}
281
e3920fb4 282/* Requires cpu_add_remove_lock to be held */
514a20a5 283static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1da177e4 284{
e7407dcc 285 int err, nr_calls = 0;
e7407dcc 286 void *hcpu = (void *)(long)cpu;
8bb78442 287 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
db912f96
AK
288 struct take_cpu_down_param tcd_param = {
289 .mod = mod,
290 .hcpu = hcpu,
291 };
1da177e4 292
e3920fb4
RW
293 if (num_online_cpus() == 1)
294 return -EBUSY;
1da177e4 295
e3920fb4
RW
296 if (!cpu_online(cpu))
297 return -EINVAL;
1da177e4 298
d221938c 299 cpu_hotplug_begin();
4d51985e 300
e9fb7631 301 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
e6bde73b 302 if (err) {
a0d8cdb6 303 nr_calls--;
e9fb7631 304 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
1da177e4 305 printk("%s: attempt to take down CPU %u failed\n",
af1f16d0 306 __func__, cpu);
baaca49f 307 goto out_release;
1da177e4
LT
308 }
309
6acce3ef
PZ
310 /*
311 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
312 * and RCU users of this state to go away such that all new such users
313 * will observe it.
314 *
315 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
316 * not imply sync_sched(), so explicitly call both.
106dd5af
M
317 *
318 * Do sync before park smpboot threads to take care the rcu boost case.
6acce3ef
PZ
319 */
320#ifdef CONFIG_PREEMPT
321 synchronize_sched();
322#endif
323 synchronize_rcu();
324
106dd5af
M
325 smpboot_park_threads(cpu);
326
6acce3ef
PZ
327 /*
328 * So now all preempt/rcu users must observe !cpu_active().
329 */
330
e0b582ec 331 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
04321587 332 if (err) {
1da177e4 333 /* CPU didn't die: tell everyone. Can't complain. */
f97f8f06 334 smpboot_unpark_threads(cpu);
e9fb7631 335 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
6a1bdc1b 336 goto out_release;
8fa1d7d3 337 }
04321587 338 BUG_ON(cpu_online(cpu));
1da177e4 339
48c5ccae
PZ
340 /*
341 * The migration_call() CPU_DYING callback will have removed all
342 * runnable tasks from the cpu, there's only the idle task left now
343 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
344 *
345 * Wait for the stop thread to go away.
48c5ccae 346 */
51a96c77
PZ
347 while (!idle_cpu(cpu))
348 cpu_relax();
1da177e4
LT
349
350 /* This actually kills the CPU. */
351 __cpu_die(cpu);
352
1da177e4 353 /* CPU is completely dead: tell everyone. Too late to complain. */
e9fb7631 354 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
1da177e4
LT
355
356 check_for_tasks(cpu);
357
baaca49f 358out_release:
d221938c 359 cpu_hotplug_done();
e9fb7631
AM
360 if (!err)
361 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
e3920fb4
RW
362 return err;
363}
364
514a20a5 365int __ref cpu_down(unsigned int cpu)
e3920fb4 366{
9ea09af3 367 int err;
e3920fb4 368
d221938c 369 cpu_maps_update_begin();
e761b772
MK
370
371 if (cpu_hotplug_disabled) {
e3920fb4 372 err = -EBUSY;
e761b772
MK
373 goto out;
374 }
375
e761b772 376 err = _cpu_down(cpu, 0);
e3920fb4 377
e761b772 378out:
d221938c 379 cpu_maps_update_done();
1da177e4
LT
380 return err;
381}
b62b8ef9 382EXPORT_SYMBOL(cpu_down);
1da177e4
LT
383#endif /*CONFIG_HOTPLUG_CPU*/
384
e3920fb4 385/* Requires cpu_add_remove_lock to be held */
0db0628d 386static int _cpu_up(unsigned int cpu, int tasks_frozen)
1da177e4 387{
baaca49f 388 int ret, nr_calls = 0;
1da177e4 389 void *hcpu = (void *)(long)cpu;
8bb78442 390 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
3bb5d2ee 391 struct task_struct *idle;
1da177e4 392
d221938c 393 cpu_hotplug_begin();
38498a67 394
5e5041f3
YI
395 if (cpu_online(cpu) || !cpu_present(cpu)) {
396 ret = -EINVAL;
397 goto out;
398 }
399
3bb5d2ee
SS
400 idle = idle_thread_get(cpu);
401 if (IS_ERR(idle)) {
402 ret = PTR_ERR(idle);
38498a67 403 goto out;
3bb5d2ee 404 }
38498a67 405
f97f8f06
TG
406 ret = smpboot_create_threads(cpu);
407 if (ret)
408 goto out;
409
e9fb7631 410 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
e6bde73b 411 if (ret) {
a0d8cdb6 412 nr_calls--;
4d51985e 413 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
af1f16d0 414 __func__, cpu);
1da177e4
LT
415 goto out_notify;
416 }
417
418 /* Arch-specific enabling code. */
3bb5d2ee 419 ret = __cpu_up(cpu, idle);
1da177e4
LT
420 if (ret != 0)
421 goto out_notify;
6978c705 422 BUG_ON(!cpu_online(cpu));
1da177e4 423
f97f8f06
TG
424 /* Wake the per cpu threads */
425 smpboot_unpark_threads(cpu);
426
1da177e4 427 /* Now call notifier in preparation. */
e9fb7631 428 cpu_notify(CPU_ONLINE | mod, hcpu);
1da177e4
LT
429
430out_notify:
431 if (ret != 0)
e9fb7631 432 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
38498a67 433out:
d221938c 434 cpu_hotplug_done();
e3920fb4
RW
435
436 return ret;
437}
438
0db0628d 439int cpu_up(unsigned int cpu)
e3920fb4
RW
440{
441 int err = 0;
cf23422b 442
e0b582ec 443 if (!cpu_possible(cpu)) {
73e753a5
KH
444 printk(KERN_ERR "can't online cpu %d because it is not "
445 "configured as may-hotadd at boot time\n", cpu);
87d5e023 446#if defined(CONFIG_IA64)
73e753a5
KH
447 printk(KERN_ERR "please check additional_cpus= boot "
448 "parameter\n");
449#endif
450 return -EINVAL;
451 }
e3920fb4 452
01b0f197
TK
453 err = try_online_node(cpu_to_node(cpu));
454 if (err)
455 return err;
cf23422b 456
d221938c 457 cpu_maps_update_begin();
e761b772
MK
458
459 if (cpu_hotplug_disabled) {
e3920fb4 460 err = -EBUSY;
e761b772
MK
461 goto out;
462 }
463
464 err = _cpu_up(cpu, 0);
465
e761b772 466out:
d221938c 467 cpu_maps_update_done();
e3920fb4
RW
468 return err;
469}
a513f6ba 470EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 471
f3de4be9 472#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 473static cpumask_var_t frozen_cpus;
e3920fb4
RW
474
475int disable_nonboot_cpus(void)
476{
e9a5f426 477 int cpu, first_cpu, error = 0;
e3920fb4 478
d221938c 479 cpu_maps_update_begin();
e0b582ec 480 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
481 /*
482 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
483 * with the userspace trying to use the CPU hotplug at the same time
484 */
e0b582ec 485 cpumask_clear(frozen_cpus);
6ad4c188 486
e3920fb4
RW
487 printk("Disabling non-boot CPUs ...\n");
488 for_each_online_cpu(cpu) {
489 if (cpu == first_cpu)
490 continue;
8bb78442 491 error = _cpu_down(cpu, 1);
feae3203 492 if (!error)
e0b582ec 493 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 494 else {
e3920fb4
RW
495 printk(KERN_ERR "Error taking CPU%d down: %d\n",
496 cpu, error);
497 break;
498 }
499 }
86886e55 500
e3920fb4
RW
501 if (!error) {
502 BUG_ON(num_online_cpus() > 1);
503 /* Make sure the CPUs won't be enabled by someone else */
504 cpu_hotplug_disabled = 1;
505 } else {
e1d9fd2e 506 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
e3920fb4 507 }
d221938c 508 cpu_maps_update_done();
e3920fb4
RW
509 return error;
510}
511
d0af9eed
SS
512void __weak arch_enable_nonboot_cpus_begin(void)
513{
514}
515
516void __weak arch_enable_nonboot_cpus_end(void)
517{
518}
519
fa7303e2 520void __ref enable_nonboot_cpus(void)
e3920fb4
RW
521{
522 int cpu, error;
523
524 /* Allow everyone to use the CPU hotplug again */
d221938c 525 cpu_maps_update_begin();
e3920fb4 526 cpu_hotplug_disabled = 0;
e0b582ec 527 if (cpumask_empty(frozen_cpus))
1d64b9cb 528 goto out;
e3920fb4 529
4d51985e 530 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
d0af9eed
SS
531
532 arch_enable_nonboot_cpus_begin();
533
e0b582ec 534 for_each_cpu(cpu, frozen_cpus) {
8bb78442 535 error = _cpu_up(cpu, 1);
e3920fb4 536 if (!error) {
4d51985e 537 printk(KERN_INFO "CPU%d is up\n", cpu);
e3920fb4
RW
538 continue;
539 }
1d64b9cb 540 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 541 }
d0af9eed
SS
542
543 arch_enable_nonboot_cpus_end();
544
e0b582ec 545 cpumask_clear(frozen_cpus);
1d64b9cb 546out:
d221938c 547 cpu_maps_update_done();
1da177e4 548}
e0b582ec 549
d7268a31 550static int __init alloc_frozen_cpus(void)
e0b582ec
RR
551{
552 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
553 return -ENOMEM;
554 return 0;
555}
556core_initcall(alloc_frozen_cpus);
79cfbdfa 557
79cfbdfa
SB
558/*
559 * When callbacks for CPU hotplug notifications are being executed, we must
560 * ensure that the state of the system with respect to the tasks being frozen
561 * or not, as reported by the notification, remains unchanged *throughout the
562 * duration* of the execution of the callbacks.
563 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
564 *
565 * This synchronization is implemented by mutually excluding regular CPU
566 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
567 * Hibernate notifications.
568 */
569static int
570cpu_hotplug_pm_callback(struct notifier_block *nb,
571 unsigned long action, void *ptr)
572{
573 switch (action) {
574
575 case PM_SUSPEND_PREPARE:
576 case PM_HIBERNATION_PREPARE:
16e53dbf 577 cpu_hotplug_disable();
79cfbdfa
SB
578 break;
579
580 case PM_POST_SUSPEND:
581 case PM_POST_HIBERNATION:
16e53dbf 582 cpu_hotplug_enable();
79cfbdfa
SB
583 break;
584
585 default:
586 return NOTIFY_DONE;
587 }
588
589 return NOTIFY_OK;
590}
591
592
d7268a31 593static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 594{
6e32d479
FY
595 /*
596 * cpu_hotplug_pm_callback has higher priority than x86
597 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
598 * to disable cpu hotplug to avoid cpu hotplug race.
599 */
79cfbdfa
SB
600 pm_notifier(cpu_hotplug_pm_callback, 0);
601 return 0;
602}
603core_initcall(cpu_hotplug_pm_sync_init);
604
f3de4be9 605#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 606
e545a614
MS
607/**
608 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
609 * @cpu: cpu that just started
610 *
611 * This function calls the cpu_chain notifiers with CPU_STARTING.
612 * It must be called by the arch code on the new cpu, before the new cpu
613 * enables interrupts and before the "boot" cpu returns from __cpu_up().
614 */
0db0628d 615void notify_cpu_starting(unsigned int cpu)
e545a614
MS
616{
617 unsigned long val = CPU_STARTING;
618
619#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 620 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
e545a614
MS
621 val = CPU_STARTING_FROZEN;
622#endif /* CONFIG_PM_SLEEP_SMP */
e9fb7631 623 cpu_notify(val, (void *)(long)cpu);
e545a614
MS
624}
625
68f4f1ec 626#endif /* CONFIG_SMP */
b8d317d1 627
e56b3bc7
LT
628/*
629 * cpu_bit_bitmap[] is a special, "compressed" data structure that
630 * represents all NR_CPUS bits binary values of 1<<nr.
631 *
e0b582ec 632 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
633 * mask value that has a single bit set only.
634 */
b8d317d1 635
e56b3bc7 636/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 637#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
638#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
639#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
640#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 641
e56b3bc7
LT
642const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
643
644 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
645 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
646#if BITS_PER_LONG > 32
647 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
648 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
649#endif
650};
e56b3bc7 651EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
652
653const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
654EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
655
656#ifdef CONFIG_INIT_ALL_POSSIBLE
657static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
658 = CPU_BITS_ALL;
659#else
660static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
661#endif
662const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
663EXPORT_SYMBOL(cpu_possible_mask);
664
665static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
666const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
667EXPORT_SYMBOL(cpu_online_mask);
668
669static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
670const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
671EXPORT_SYMBOL(cpu_present_mask);
672
673static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
674const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
675EXPORT_SYMBOL(cpu_active_mask);
3fa41520
RR
676
677void set_cpu_possible(unsigned int cpu, bool possible)
678{
679 if (possible)
680 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
681 else
682 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
683}
684
685void set_cpu_present(unsigned int cpu, bool present)
686{
687 if (present)
688 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
689 else
690 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
691}
692
693void set_cpu_online(unsigned int cpu, bool online)
694{
695 if (online)
696 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
697 else
698 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
699}
700
701void set_cpu_active(unsigned int cpu, bool active)
702{
703 if (active)
704 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
705 else
706 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
707}
708
709void init_cpu_present(const struct cpumask *src)
710{
711 cpumask_copy(to_cpumask(cpu_present_bits), src);
712}
713
714void init_cpu_possible(const struct cpumask *src)
715{
716 cpumask_copy(to_cpumask(cpu_possible_bits), src);
717}
718
719void init_cpu_online(const struct cpumask *src)
720{
721 cpumask_copy(to_cpumask(cpu_online_bits), src);
722}