]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/cpu.c
Merge tag 'pm+acpi-3.17-final' of git://git.kernel.org/pub/scm/linux/kernel/git/rafae...
[mirror_ubuntu-artful-kernel.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <trace/events/power.h>
24
25 #include "smpboot.h"
26
27 #ifdef CONFIG_SMP
28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
29 static DEFINE_MUTEX(cpu_add_remove_lock);
30
31 /*
32 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
37 */
38 void cpu_maps_update_begin(void)
39 {
40 mutex_lock(&cpu_add_remove_lock);
41 }
42 EXPORT_SYMBOL(cpu_notifier_register_begin);
43
44 void cpu_maps_update_done(void)
45 {
46 mutex_unlock(&cpu_add_remove_lock);
47 }
48 EXPORT_SYMBOL(cpu_notifier_register_done);
49
50 static RAW_NOTIFIER_HEAD(cpu_chain);
51
52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
54 */
55 static int cpu_hotplug_disabled;
56
57 #ifdef CONFIG_HOTPLUG_CPU
58
59 static struct {
60 struct task_struct *active_writer;
61 struct mutex lock; /* Synchronizes accesses to refcount, */
62 /*
63 * Also blocks the new readers during
64 * an ongoing cpu hotplug operation.
65 */
66 int refcount;
67
68 #ifdef CONFIG_DEBUG_LOCK_ALLOC
69 struct lockdep_map dep_map;
70 #endif
71 } cpu_hotplug = {
72 .active_writer = NULL,
73 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
74 .refcount = 0,
75 #ifdef CONFIG_DEBUG_LOCK_ALLOC
76 .dep_map = {.name = "cpu_hotplug.lock" },
77 #endif
78 };
79
80 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
81 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
82 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
83 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
84
85 void get_online_cpus(void)
86 {
87 might_sleep();
88 if (cpu_hotplug.active_writer == current)
89 return;
90 cpuhp_lock_acquire_read();
91 mutex_lock(&cpu_hotplug.lock);
92 cpu_hotplug.refcount++;
93 mutex_unlock(&cpu_hotplug.lock);
94
95 }
96 EXPORT_SYMBOL_GPL(get_online_cpus);
97
98 void put_online_cpus(void)
99 {
100 if (cpu_hotplug.active_writer == current)
101 return;
102 mutex_lock(&cpu_hotplug.lock);
103
104 if (WARN_ON(!cpu_hotplug.refcount))
105 cpu_hotplug.refcount++; /* try to fix things up */
106
107 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
108 wake_up_process(cpu_hotplug.active_writer);
109 mutex_unlock(&cpu_hotplug.lock);
110 cpuhp_lock_release();
111
112 }
113 EXPORT_SYMBOL_GPL(put_online_cpus);
114
115 /*
116 * This ensures that the hotplug operation can begin only when the
117 * refcount goes to zero.
118 *
119 * Note that during a cpu-hotplug operation, the new readers, if any,
120 * will be blocked by the cpu_hotplug.lock
121 *
122 * Since cpu_hotplug_begin() is always called after invoking
123 * cpu_maps_update_begin(), we can be sure that only one writer is active.
124 *
125 * Note that theoretically, there is a possibility of a livelock:
126 * - Refcount goes to zero, last reader wakes up the sleeping
127 * writer.
128 * - Last reader unlocks the cpu_hotplug.lock.
129 * - A new reader arrives at this moment, bumps up the refcount.
130 * - The writer acquires the cpu_hotplug.lock finds the refcount
131 * non zero and goes to sleep again.
132 *
133 * However, this is very difficult to achieve in practice since
134 * get_online_cpus() not an api which is called all that often.
135 *
136 */
137 void cpu_hotplug_begin(void)
138 {
139 cpu_hotplug.active_writer = current;
140
141 cpuhp_lock_acquire();
142 for (;;) {
143 mutex_lock(&cpu_hotplug.lock);
144 if (likely(!cpu_hotplug.refcount))
145 break;
146 __set_current_state(TASK_UNINTERRUPTIBLE);
147 mutex_unlock(&cpu_hotplug.lock);
148 schedule();
149 }
150 }
151
152 void cpu_hotplug_done(void)
153 {
154 cpu_hotplug.active_writer = NULL;
155 mutex_unlock(&cpu_hotplug.lock);
156 cpuhp_lock_release();
157 }
158
159 /*
160 * Wait for currently running CPU hotplug operations to complete (if any) and
161 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
162 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
163 * hotplug path before performing hotplug operations. So acquiring that lock
164 * guarantees mutual exclusion from any currently running hotplug operations.
165 */
166 void cpu_hotplug_disable(void)
167 {
168 cpu_maps_update_begin();
169 cpu_hotplug_disabled = 1;
170 cpu_maps_update_done();
171 }
172
173 void cpu_hotplug_enable(void)
174 {
175 cpu_maps_update_begin();
176 cpu_hotplug_disabled = 0;
177 cpu_maps_update_done();
178 }
179
180 #endif /* CONFIG_HOTPLUG_CPU */
181
182 /* Need to know about CPUs going up/down? */
183 int __ref register_cpu_notifier(struct notifier_block *nb)
184 {
185 int ret;
186 cpu_maps_update_begin();
187 ret = raw_notifier_chain_register(&cpu_chain, nb);
188 cpu_maps_update_done();
189 return ret;
190 }
191
192 int __ref __register_cpu_notifier(struct notifier_block *nb)
193 {
194 return raw_notifier_chain_register(&cpu_chain, nb);
195 }
196
197 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
198 int *nr_calls)
199 {
200 int ret;
201
202 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
203 nr_calls);
204
205 return notifier_to_errno(ret);
206 }
207
208 static int cpu_notify(unsigned long val, void *v)
209 {
210 return __cpu_notify(val, v, -1, NULL);
211 }
212
213 #ifdef CONFIG_HOTPLUG_CPU
214
215 static void cpu_notify_nofail(unsigned long val, void *v)
216 {
217 BUG_ON(cpu_notify(val, v));
218 }
219 EXPORT_SYMBOL(register_cpu_notifier);
220 EXPORT_SYMBOL(__register_cpu_notifier);
221
222 void __ref unregister_cpu_notifier(struct notifier_block *nb)
223 {
224 cpu_maps_update_begin();
225 raw_notifier_chain_unregister(&cpu_chain, nb);
226 cpu_maps_update_done();
227 }
228 EXPORT_SYMBOL(unregister_cpu_notifier);
229
230 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
231 {
232 raw_notifier_chain_unregister(&cpu_chain, nb);
233 }
234 EXPORT_SYMBOL(__unregister_cpu_notifier);
235
236 /**
237 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
238 * @cpu: a CPU id
239 *
240 * This function walks all processes, finds a valid mm struct for each one and
241 * then clears a corresponding bit in mm's cpumask. While this all sounds
242 * trivial, there are various non-obvious corner cases, which this function
243 * tries to solve in a safe manner.
244 *
245 * Also note that the function uses a somewhat relaxed locking scheme, so it may
246 * be called only for an already offlined CPU.
247 */
248 void clear_tasks_mm_cpumask(int cpu)
249 {
250 struct task_struct *p;
251
252 /*
253 * This function is called after the cpu is taken down and marked
254 * offline, so its not like new tasks will ever get this cpu set in
255 * their mm mask. -- Peter Zijlstra
256 * Thus, we may use rcu_read_lock() here, instead of grabbing
257 * full-fledged tasklist_lock.
258 */
259 WARN_ON(cpu_online(cpu));
260 rcu_read_lock();
261 for_each_process(p) {
262 struct task_struct *t;
263
264 /*
265 * Main thread might exit, but other threads may still have
266 * a valid mm. Find one.
267 */
268 t = find_lock_task_mm(p);
269 if (!t)
270 continue;
271 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
272 task_unlock(t);
273 }
274 rcu_read_unlock();
275 }
276
277 static inline void check_for_tasks(int dead_cpu)
278 {
279 struct task_struct *g, *p;
280
281 read_lock_irq(&tasklist_lock);
282 do_each_thread(g, p) {
283 if (!p->on_rq)
284 continue;
285 /*
286 * We do the check with unlocked task_rq(p)->lock.
287 * Order the reading to do not warn about a task,
288 * which was running on this cpu in the past, and
289 * it's just been woken on another cpu.
290 */
291 rmb();
292 if (task_cpu(p) != dead_cpu)
293 continue;
294
295 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
296 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
297 } while_each_thread(g, p);
298 read_unlock_irq(&tasklist_lock);
299 }
300
301 struct take_cpu_down_param {
302 unsigned long mod;
303 void *hcpu;
304 };
305
306 /* Take this CPU down. */
307 static int __ref take_cpu_down(void *_param)
308 {
309 struct take_cpu_down_param *param = _param;
310 int err;
311
312 /* Ensure this CPU doesn't handle any more interrupts. */
313 err = __cpu_disable();
314 if (err < 0)
315 return err;
316
317 cpu_notify(CPU_DYING | param->mod, param->hcpu);
318 /* Park the stopper thread */
319 kthread_park(current);
320 return 0;
321 }
322
323 /* Requires cpu_add_remove_lock to be held */
324 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
325 {
326 int err, nr_calls = 0;
327 void *hcpu = (void *)(long)cpu;
328 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
329 struct take_cpu_down_param tcd_param = {
330 .mod = mod,
331 .hcpu = hcpu,
332 };
333
334 if (num_online_cpus() == 1)
335 return -EBUSY;
336
337 if (!cpu_online(cpu))
338 return -EINVAL;
339
340 cpu_hotplug_begin();
341
342 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
343 if (err) {
344 nr_calls--;
345 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
346 pr_warn("%s: attempt to take down CPU %u failed\n",
347 __func__, cpu);
348 goto out_release;
349 }
350
351 /*
352 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
353 * and RCU users of this state to go away such that all new such users
354 * will observe it.
355 *
356 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
357 * not imply sync_sched(), so explicitly call both.
358 *
359 * Do sync before park smpboot threads to take care the rcu boost case.
360 */
361 #ifdef CONFIG_PREEMPT
362 synchronize_sched();
363 #endif
364 synchronize_rcu();
365
366 smpboot_park_threads(cpu);
367
368 /*
369 * So now all preempt/rcu users must observe !cpu_active().
370 */
371
372 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
373 if (err) {
374 /* CPU didn't die: tell everyone. Can't complain. */
375 smpboot_unpark_threads(cpu);
376 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
377 goto out_release;
378 }
379 BUG_ON(cpu_online(cpu));
380
381 /*
382 * The migration_call() CPU_DYING callback will have removed all
383 * runnable tasks from the cpu, there's only the idle task left now
384 * that the migration thread is done doing the stop_machine thing.
385 *
386 * Wait for the stop thread to go away.
387 */
388 while (!idle_cpu(cpu))
389 cpu_relax();
390
391 /* This actually kills the CPU. */
392 __cpu_die(cpu);
393
394 /* CPU is completely dead: tell everyone. Too late to complain. */
395 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
396
397 check_for_tasks(cpu);
398
399 out_release:
400 cpu_hotplug_done();
401 if (!err)
402 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
403 return err;
404 }
405
406 int __ref cpu_down(unsigned int cpu)
407 {
408 int err;
409
410 cpu_maps_update_begin();
411
412 if (cpu_hotplug_disabled) {
413 err = -EBUSY;
414 goto out;
415 }
416
417 err = _cpu_down(cpu, 0);
418
419 out:
420 cpu_maps_update_done();
421 return err;
422 }
423 EXPORT_SYMBOL(cpu_down);
424 #endif /*CONFIG_HOTPLUG_CPU*/
425
426 /* Requires cpu_add_remove_lock to be held */
427 static int _cpu_up(unsigned int cpu, int tasks_frozen)
428 {
429 int ret, nr_calls = 0;
430 void *hcpu = (void *)(long)cpu;
431 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
432 struct task_struct *idle;
433
434 cpu_hotplug_begin();
435
436 if (cpu_online(cpu) || !cpu_present(cpu)) {
437 ret = -EINVAL;
438 goto out;
439 }
440
441 idle = idle_thread_get(cpu);
442 if (IS_ERR(idle)) {
443 ret = PTR_ERR(idle);
444 goto out;
445 }
446
447 ret = smpboot_create_threads(cpu);
448 if (ret)
449 goto out;
450
451 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
452 if (ret) {
453 nr_calls--;
454 pr_warn("%s: attempt to bring up CPU %u failed\n",
455 __func__, cpu);
456 goto out_notify;
457 }
458
459 /* Arch-specific enabling code. */
460 ret = __cpu_up(cpu, idle);
461 if (ret != 0)
462 goto out_notify;
463 BUG_ON(!cpu_online(cpu));
464
465 /* Wake the per cpu threads */
466 smpboot_unpark_threads(cpu);
467
468 /* Now call notifier in preparation. */
469 cpu_notify(CPU_ONLINE | mod, hcpu);
470
471 out_notify:
472 if (ret != 0)
473 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
474 out:
475 cpu_hotplug_done();
476
477 return ret;
478 }
479
480 int cpu_up(unsigned int cpu)
481 {
482 int err = 0;
483
484 if (!cpu_possible(cpu)) {
485 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
486 cpu);
487 #if defined(CONFIG_IA64)
488 pr_err("please check additional_cpus= boot parameter\n");
489 #endif
490 return -EINVAL;
491 }
492
493 err = try_online_node(cpu_to_node(cpu));
494 if (err)
495 return err;
496
497 cpu_maps_update_begin();
498
499 if (cpu_hotplug_disabled) {
500 err = -EBUSY;
501 goto out;
502 }
503
504 err = _cpu_up(cpu, 0);
505
506 out:
507 cpu_maps_update_done();
508 return err;
509 }
510 EXPORT_SYMBOL_GPL(cpu_up);
511
512 #ifdef CONFIG_PM_SLEEP_SMP
513 static cpumask_var_t frozen_cpus;
514
515 int disable_nonboot_cpus(void)
516 {
517 int cpu, first_cpu, error = 0;
518
519 cpu_maps_update_begin();
520 first_cpu = cpumask_first(cpu_online_mask);
521 /*
522 * We take down all of the non-boot CPUs in one shot to avoid races
523 * with the userspace trying to use the CPU hotplug at the same time
524 */
525 cpumask_clear(frozen_cpus);
526
527 pr_info("Disabling non-boot CPUs ...\n");
528 for_each_online_cpu(cpu) {
529 if (cpu == first_cpu)
530 continue;
531 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
532 error = _cpu_down(cpu, 1);
533 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
534 if (!error)
535 cpumask_set_cpu(cpu, frozen_cpus);
536 else {
537 pr_err("Error taking CPU%d down: %d\n", cpu, error);
538 break;
539 }
540 }
541
542 if (!error) {
543 BUG_ON(num_online_cpus() > 1);
544 /* Make sure the CPUs won't be enabled by someone else */
545 cpu_hotplug_disabled = 1;
546 } else {
547 pr_err("Non-boot CPUs are not disabled\n");
548 }
549 cpu_maps_update_done();
550 return error;
551 }
552
553 void __weak arch_enable_nonboot_cpus_begin(void)
554 {
555 }
556
557 void __weak arch_enable_nonboot_cpus_end(void)
558 {
559 }
560
561 void __ref enable_nonboot_cpus(void)
562 {
563 int cpu, error;
564
565 /* Allow everyone to use the CPU hotplug again */
566 cpu_maps_update_begin();
567 cpu_hotplug_disabled = 0;
568 if (cpumask_empty(frozen_cpus))
569 goto out;
570
571 pr_info("Enabling non-boot CPUs ...\n");
572
573 arch_enable_nonboot_cpus_begin();
574
575 for_each_cpu(cpu, frozen_cpus) {
576 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
577 error = _cpu_up(cpu, 1);
578 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
579 if (!error) {
580 pr_info("CPU%d is up\n", cpu);
581 continue;
582 }
583 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
584 }
585
586 arch_enable_nonboot_cpus_end();
587
588 cpumask_clear(frozen_cpus);
589 out:
590 cpu_maps_update_done();
591 }
592
593 static int __init alloc_frozen_cpus(void)
594 {
595 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
596 return -ENOMEM;
597 return 0;
598 }
599 core_initcall(alloc_frozen_cpus);
600
601 /*
602 * When callbacks for CPU hotplug notifications are being executed, we must
603 * ensure that the state of the system with respect to the tasks being frozen
604 * or not, as reported by the notification, remains unchanged *throughout the
605 * duration* of the execution of the callbacks.
606 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
607 *
608 * This synchronization is implemented by mutually excluding regular CPU
609 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
610 * Hibernate notifications.
611 */
612 static int
613 cpu_hotplug_pm_callback(struct notifier_block *nb,
614 unsigned long action, void *ptr)
615 {
616 switch (action) {
617
618 case PM_SUSPEND_PREPARE:
619 case PM_HIBERNATION_PREPARE:
620 cpu_hotplug_disable();
621 break;
622
623 case PM_POST_SUSPEND:
624 case PM_POST_HIBERNATION:
625 cpu_hotplug_enable();
626 break;
627
628 default:
629 return NOTIFY_DONE;
630 }
631
632 return NOTIFY_OK;
633 }
634
635
636 static int __init cpu_hotplug_pm_sync_init(void)
637 {
638 /*
639 * cpu_hotplug_pm_callback has higher priority than x86
640 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
641 * to disable cpu hotplug to avoid cpu hotplug race.
642 */
643 pm_notifier(cpu_hotplug_pm_callback, 0);
644 return 0;
645 }
646 core_initcall(cpu_hotplug_pm_sync_init);
647
648 #endif /* CONFIG_PM_SLEEP_SMP */
649
650 /**
651 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
652 * @cpu: cpu that just started
653 *
654 * This function calls the cpu_chain notifiers with CPU_STARTING.
655 * It must be called by the arch code on the new cpu, before the new cpu
656 * enables interrupts and before the "boot" cpu returns from __cpu_up().
657 */
658 void notify_cpu_starting(unsigned int cpu)
659 {
660 unsigned long val = CPU_STARTING;
661
662 #ifdef CONFIG_PM_SLEEP_SMP
663 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
664 val = CPU_STARTING_FROZEN;
665 #endif /* CONFIG_PM_SLEEP_SMP */
666 cpu_notify(val, (void *)(long)cpu);
667 }
668
669 #endif /* CONFIG_SMP */
670
671 /*
672 * cpu_bit_bitmap[] is a special, "compressed" data structure that
673 * represents all NR_CPUS bits binary values of 1<<nr.
674 *
675 * It is used by cpumask_of() to get a constant address to a CPU
676 * mask value that has a single bit set only.
677 */
678
679 /* cpu_bit_bitmap[0] is empty - so we can back into it */
680 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
681 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
682 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
683 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
684
685 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
686
687 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
688 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
689 #if BITS_PER_LONG > 32
690 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
691 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
692 #endif
693 };
694 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
695
696 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
697 EXPORT_SYMBOL(cpu_all_bits);
698
699 #ifdef CONFIG_INIT_ALL_POSSIBLE
700 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
701 = CPU_BITS_ALL;
702 #else
703 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
704 #endif
705 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
706 EXPORT_SYMBOL(cpu_possible_mask);
707
708 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
709 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
710 EXPORT_SYMBOL(cpu_online_mask);
711
712 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
713 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
714 EXPORT_SYMBOL(cpu_present_mask);
715
716 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
717 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
718 EXPORT_SYMBOL(cpu_active_mask);
719
720 void set_cpu_possible(unsigned int cpu, bool possible)
721 {
722 if (possible)
723 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
724 else
725 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
726 }
727
728 void set_cpu_present(unsigned int cpu, bool present)
729 {
730 if (present)
731 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
732 else
733 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
734 }
735
736 void set_cpu_online(unsigned int cpu, bool online)
737 {
738 if (online) {
739 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
740 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
741 } else {
742 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
743 }
744 }
745
746 void set_cpu_active(unsigned int cpu, bool active)
747 {
748 if (active)
749 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
750 else
751 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
752 }
753
754 void init_cpu_present(const struct cpumask *src)
755 {
756 cpumask_copy(to_cpumask(cpu_present_bits), src);
757 }
758
759 void init_cpu_possible(const struct cpumask *src)
760 {
761 cpumask_copy(to_cpumask(cpu_possible_bits), src);
762 }
763
764 void init_cpu_online(const struct cpumask *src)
765 {
766 cpumask_copy(to_cpumask(cpu_online_bits), src);
767 }