]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/cpu.c
rtlwifi: rtl8192ce: Improve RF sleep routine
[mirror_ubuntu-zesty-kernel.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <trace/events/power.h>
24
25 #include "smpboot.h"
26
27 #ifdef CONFIG_SMP
28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
29 static DEFINE_MUTEX(cpu_add_remove_lock);
30
31 /*
32 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
37 */
38 void cpu_maps_update_begin(void)
39 {
40 mutex_lock(&cpu_add_remove_lock);
41 }
42 EXPORT_SYMBOL(cpu_notifier_register_begin);
43
44 void cpu_maps_update_done(void)
45 {
46 mutex_unlock(&cpu_add_remove_lock);
47 }
48 EXPORT_SYMBOL(cpu_notifier_register_done);
49
50 static RAW_NOTIFIER_HEAD(cpu_chain);
51
52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
54 */
55 static int cpu_hotplug_disabled;
56
57 #ifdef CONFIG_HOTPLUG_CPU
58
59 static struct {
60 struct task_struct *active_writer;
61 struct mutex lock; /* Synchronizes accesses to refcount, */
62 /*
63 * Also blocks the new readers during
64 * an ongoing cpu hotplug operation.
65 */
66 int refcount;
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
69
70 #ifdef CONFIG_DEBUG_LOCK_ALLOC
71 struct lockdep_map dep_map;
72 #endif
73 } cpu_hotplug = {
74 .active_writer = NULL,
75 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
76 .refcount = 0,
77 #ifdef CONFIG_DEBUG_LOCK_ALLOC
78 .dep_map = {.name = "cpu_hotplug.lock" },
79 #endif
80 };
81
82 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
83 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
84 #define cpuhp_lock_acquire_tryread() \
85 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
86 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
87 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
88
89 static void apply_puts_pending(int max)
90 {
91 int delta;
92
93 if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
94 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
95 cpu_hotplug.refcount -= delta;
96 }
97 }
98
99 void get_online_cpus(void)
100 {
101 might_sleep();
102 if (cpu_hotplug.active_writer == current)
103 return;
104 cpuhp_lock_acquire_read();
105 mutex_lock(&cpu_hotplug.lock);
106 apply_puts_pending(65536);
107 cpu_hotplug.refcount++;
108 mutex_unlock(&cpu_hotplug.lock);
109 }
110 EXPORT_SYMBOL_GPL(get_online_cpus);
111
112 bool try_get_online_cpus(void)
113 {
114 if (cpu_hotplug.active_writer == current)
115 return true;
116 if (!mutex_trylock(&cpu_hotplug.lock))
117 return false;
118 cpuhp_lock_acquire_tryread();
119 apply_puts_pending(65536);
120 cpu_hotplug.refcount++;
121 mutex_unlock(&cpu_hotplug.lock);
122 return true;
123 }
124 EXPORT_SYMBOL_GPL(try_get_online_cpus);
125
126 void put_online_cpus(void)
127 {
128 if (cpu_hotplug.active_writer == current)
129 return;
130 if (!mutex_trylock(&cpu_hotplug.lock)) {
131 atomic_inc(&cpu_hotplug.puts_pending);
132 cpuhp_lock_release();
133 return;
134 }
135
136 if (WARN_ON(!cpu_hotplug.refcount))
137 cpu_hotplug.refcount++; /* try to fix things up */
138
139 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
140 wake_up_process(cpu_hotplug.active_writer);
141 mutex_unlock(&cpu_hotplug.lock);
142 cpuhp_lock_release();
143
144 }
145 EXPORT_SYMBOL_GPL(put_online_cpus);
146
147 /*
148 * This ensures that the hotplug operation can begin only when the
149 * refcount goes to zero.
150 *
151 * Note that during a cpu-hotplug operation, the new readers, if any,
152 * will be blocked by the cpu_hotplug.lock
153 *
154 * Since cpu_hotplug_begin() is always called after invoking
155 * cpu_maps_update_begin(), we can be sure that only one writer is active.
156 *
157 * Note that theoretically, there is a possibility of a livelock:
158 * - Refcount goes to zero, last reader wakes up the sleeping
159 * writer.
160 * - Last reader unlocks the cpu_hotplug.lock.
161 * - A new reader arrives at this moment, bumps up the refcount.
162 * - The writer acquires the cpu_hotplug.lock finds the refcount
163 * non zero and goes to sleep again.
164 *
165 * However, this is very difficult to achieve in practice since
166 * get_online_cpus() not an api which is called all that often.
167 *
168 */
169 void cpu_hotplug_begin(void)
170 {
171 cpu_hotplug.active_writer = current;
172
173 cpuhp_lock_acquire();
174 for (;;) {
175 mutex_lock(&cpu_hotplug.lock);
176 apply_puts_pending(1);
177 if (likely(!cpu_hotplug.refcount))
178 break;
179 __set_current_state(TASK_UNINTERRUPTIBLE);
180 mutex_unlock(&cpu_hotplug.lock);
181 schedule();
182 }
183 }
184
185 void cpu_hotplug_done(void)
186 {
187 cpu_hotplug.active_writer = NULL;
188 mutex_unlock(&cpu_hotplug.lock);
189 cpuhp_lock_release();
190 }
191
192 /*
193 * Wait for currently running CPU hotplug operations to complete (if any) and
194 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
195 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
196 * hotplug path before performing hotplug operations. So acquiring that lock
197 * guarantees mutual exclusion from any currently running hotplug operations.
198 */
199 void cpu_hotplug_disable(void)
200 {
201 cpu_maps_update_begin();
202 cpu_hotplug_disabled = 1;
203 cpu_maps_update_done();
204 }
205
206 void cpu_hotplug_enable(void)
207 {
208 cpu_maps_update_begin();
209 cpu_hotplug_disabled = 0;
210 cpu_maps_update_done();
211 }
212
213 #endif /* CONFIG_HOTPLUG_CPU */
214
215 /* Need to know about CPUs going up/down? */
216 int __ref register_cpu_notifier(struct notifier_block *nb)
217 {
218 int ret;
219 cpu_maps_update_begin();
220 ret = raw_notifier_chain_register(&cpu_chain, nb);
221 cpu_maps_update_done();
222 return ret;
223 }
224
225 int __ref __register_cpu_notifier(struct notifier_block *nb)
226 {
227 return raw_notifier_chain_register(&cpu_chain, nb);
228 }
229
230 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
231 int *nr_calls)
232 {
233 int ret;
234
235 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
236 nr_calls);
237
238 return notifier_to_errno(ret);
239 }
240
241 static int cpu_notify(unsigned long val, void *v)
242 {
243 return __cpu_notify(val, v, -1, NULL);
244 }
245
246 #ifdef CONFIG_HOTPLUG_CPU
247
248 static void cpu_notify_nofail(unsigned long val, void *v)
249 {
250 BUG_ON(cpu_notify(val, v));
251 }
252 EXPORT_SYMBOL(register_cpu_notifier);
253 EXPORT_SYMBOL(__register_cpu_notifier);
254
255 void __ref unregister_cpu_notifier(struct notifier_block *nb)
256 {
257 cpu_maps_update_begin();
258 raw_notifier_chain_unregister(&cpu_chain, nb);
259 cpu_maps_update_done();
260 }
261 EXPORT_SYMBOL(unregister_cpu_notifier);
262
263 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
264 {
265 raw_notifier_chain_unregister(&cpu_chain, nb);
266 }
267 EXPORT_SYMBOL(__unregister_cpu_notifier);
268
269 /**
270 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
271 * @cpu: a CPU id
272 *
273 * This function walks all processes, finds a valid mm struct for each one and
274 * then clears a corresponding bit in mm's cpumask. While this all sounds
275 * trivial, there are various non-obvious corner cases, which this function
276 * tries to solve in a safe manner.
277 *
278 * Also note that the function uses a somewhat relaxed locking scheme, so it may
279 * be called only for an already offlined CPU.
280 */
281 void clear_tasks_mm_cpumask(int cpu)
282 {
283 struct task_struct *p;
284
285 /*
286 * This function is called after the cpu is taken down and marked
287 * offline, so its not like new tasks will ever get this cpu set in
288 * their mm mask. -- Peter Zijlstra
289 * Thus, we may use rcu_read_lock() here, instead of grabbing
290 * full-fledged tasklist_lock.
291 */
292 WARN_ON(cpu_online(cpu));
293 rcu_read_lock();
294 for_each_process(p) {
295 struct task_struct *t;
296
297 /*
298 * Main thread might exit, but other threads may still have
299 * a valid mm. Find one.
300 */
301 t = find_lock_task_mm(p);
302 if (!t)
303 continue;
304 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
305 task_unlock(t);
306 }
307 rcu_read_unlock();
308 }
309
310 static inline void check_for_tasks(int dead_cpu)
311 {
312 struct task_struct *g, *p;
313
314 read_lock_irq(&tasklist_lock);
315 do_each_thread(g, p) {
316 if (!p->on_rq)
317 continue;
318 /*
319 * We do the check with unlocked task_rq(p)->lock.
320 * Order the reading to do not warn about a task,
321 * which was running on this cpu in the past, and
322 * it's just been woken on another cpu.
323 */
324 rmb();
325 if (task_cpu(p) != dead_cpu)
326 continue;
327
328 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
329 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
330 } while_each_thread(g, p);
331 read_unlock_irq(&tasklist_lock);
332 }
333
334 struct take_cpu_down_param {
335 unsigned long mod;
336 void *hcpu;
337 };
338
339 /* Take this CPU down. */
340 static int __ref take_cpu_down(void *_param)
341 {
342 struct take_cpu_down_param *param = _param;
343 int err;
344
345 /* Ensure this CPU doesn't handle any more interrupts. */
346 err = __cpu_disable();
347 if (err < 0)
348 return err;
349
350 cpu_notify(CPU_DYING | param->mod, param->hcpu);
351 /* Park the stopper thread */
352 kthread_park(current);
353 return 0;
354 }
355
356 /* Requires cpu_add_remove_lock to be held */
357 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
358 {
359 int err, nr_calls = 0;
360 void *hcpu = (void *)(long)cpu;
361 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
362 struct take_cpu_down_param tcd_param = {
363 .mod = mod,
364 .hcpu = hcpu,
365 };
366
367 if (num_online_cpus() == 1)
368 return -EBUSY;
369
370 if (!cpu_online(cpu))
371 return -EINVAL;
372
373 cpu_hotplug_begin();
374
375 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
376 if (err) {
377 nr_calls--;
378 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
379 pr_warn("%s: attempt to take down CPU %u failed\n",
380 __func__, cpu);
381 goto out_release;
382 }
383
384 /*
385 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
386 * and RCU users of this state to go away such that all new such users
387 * will observe it.
388 *
389 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
390 * not imply sync_sched(), so explicitly call both.
391 *
392 * Do sync before park smpboot threads to take care the rcu boost case.
393 */
394 #ifdef CONFIG_PREEMPT
395 synchronize_sched();
396 #endif
397 synchronize_rcu();
398
399 smpboot_park_threads(cpu);
400
401 /*
402 * So now all preempt/rcu users must observe !cpu_active().
403 */
404
405 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
406 if (err) {
407 /* CPU didn't die: tell everyone. Can't complain. */
408 smpboot_unpark_threads(cpu);
409 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
410 goto out_release;
411 }
412 BUG_ON(cpu_online(cpu));
413
414 /*
415 * The migration_call() CPU_DYING callback will have removed all
416 * runnable tasks from the cpu, there's only the idle task left now
417 * that the migration thread is done doing the stop_machine thing.
418 *
419 * Wait for the stop thread to go away.
420 */
421 while (!idle_cpu(cpu))
422 cpu_relax();
423
424 /* This actually kills the CPU. */
425 __cpu_die(cpu);
426
427 /* CPU is completely dead: tell everyone. Too late to complain. */
428 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
429
430 check_for_tasks(cpu);
431
432 out_release:
433 cpu_hotplug_done();
434 if (!err)
435 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
436 return err;
437 }
438
439 int __ref cpu_down(unsigned int cpu)
440 {
441 int err;
442
443 cpu_maps_update_begin();
444
445 if (cpu_hotplug_disabled) {
446 err = -EBUSY;
447 goto out;
448 }
449
450 err = _cpu_down(cpu, 0);
451
452 out:
453 cpu_maps_update_done();
454 return err;
455 }
456 EXPORT_SYMBOL(cpu_down);
457 #endif /*CONFIG_HOTPLUG_CPU*/
458
459 /* Requires cpu_add_remove_lock to be held */
460 static int _cpu_up(unsigned int cpu, int tasks_frozen)
461 {
462 int ret, nr_calls = 0;
463 void *hcpu = (void *)(long)cpu;
464 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
465 struct task_struct *idle;
466
467 cpu_hotplug_begin();
468
469 if (cpu_online(cpu) || !cpu_present(cpu)) {
470 ret = -EINVAL;
471 goto out;
472 }
473
474 idle = idle_thread_get(cpu);
475 if (IS_ERR(idle)) {
476 ret = PTR_ERR(idle);
477 goto out;
478 }
479
480 ret = smpboot_create_threads(cpu);
481 if (ret)
482 goto out;
483
484 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
485 if (ret) {
486 nr_calls--;
487 pr_warn("%s: attempt to bring up CPU %u failed\n",
488 __func__, cpu);
489 goto out_notify;
490 }
491
492 /* Arch-specific enabling code. */
493 ret = __cpu_up(cpu, idle);
494 if (ret != 0)
495 goto out_notify;
496 BUG_ON(!cpu_online(cpu));
497
498 /* Wake the per cpu threads */
499 smpboot_unpark_threads(cpu);
500
501 /* Now call notifier in preparation. */
502 cpu_notify(CPU_ONLINE | mod, hcpu);
503
504 out_notify:
505 if (ret != 0)
506 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
507 out:
508 cpu_hotplug_done();
509
510 return ret;
511 }
512
513 int cpu_up(unsigned int cpu)
514 {
515 int err = 0;
516
517 if (!cpu_possible(cpu)) {
518 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
519 cpu);
520 #if defined(CONFIG_IA64)
521 pr_err("please check additional_cpus= boot parameter\n");
522 #endif
523 return -EINVAL;
524 }
525
526 err = try_online_node(cpu_to_node(cpu));
527 if (err)
528 return err;
529
530 cpu_maps_update_begin();
531
532 if (cpu_hotplug_disabled) {
533 err = -EBUSY;
534 goto out;
535 }
536
537 err = _cpu_up(cpu, 0);
538
539 out:
540 cpu_maps_update_done();
541 return err;
542 }
543 EXPORT_SYMBOL_GPL(cpu_up);
544
545 #ifdef CONFIG_PM_SLEEP_SMP
546 static cpumask_var_t frozen_cpus;
547
548 int disable_nonboot_cpus(void)
549 {
550 int cpu, first_cpu, error = 0;
551
552 cpu_maps_update_begin();
553 first_cpu = cpumask_first(cpu_online_mask);
554 /*
555 * We take down all of the non-boot CPUs in one shot to avoid races
556 * with the userspace trying to use the CPU hotplug at the same time
557 */
558 cpumask_clear(frozen_cpus);
559
560 pr_info("Disabling non-boot CPUs ...\n");
561 for_each_online_cpu(cpu) {
562 if (cpu == first_cpu)
563 continue;
564 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
565 error = _cpu_down(cpu, 1);
566 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
567 if (!error)
568 cpumask_set_cpu(cpu, frozen_cpus);
569 else {
570 pr_err("Error taking CPU%d down: %d\n", cpu, error);
571 break;
572 }
573 }
574
575 if (!error) {
576 BUG_ON(num_online_cpus() > 1);
577 /* Make sure the CPUs won't be enabled by someone else */
578 cpu_hotplug_disabled = 1;
579 } else {
580 pr_err("Non-boot CPUs are not disabled\n");
581 }
582 cpu_maps_update_done();
583 return error;
584 }
585
586 void __weak arch_enable_nonboot_cpus_begin(void)
587 {
588 }
589
590 void __weak arch_enable_nonboot_cpus_end(void)
591 {
592 }
593
594 void __ref enable_nonboot_cpus(void)
595 {
596 int cpu, error;
597
598 /* Allow everyone to use the CPU hotplug again */
599 cpu_maps_update_begin();
600 cpu_hotplug_disabled = 0;
601 if (cpumask_empty(frozen_cpus))
602 goto out;
603
604 pr_info("Enabling non-boot CPUs ...\n");
605
606 arch_enable_nonboot_cpus_begin();
607
608 for_each_cpu(cpu, frozen_cpus) {
609 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
610 error = _cpu_up(cpu, 1);
611 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
612 if (!error) {
613 pr_info("CPU%d is up\n", cpu);
614 continue;
615 }
616 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
617 }
618
619 arch_enable_nonboot_cpus_end();
620
621 cpumask_clear(frozen_cpus);
622 out:
623 cpu_maps_update_done();
624 }
625
626 static int __init alloc_frozen_cpus(void)
627 {
628 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
629 return -ENOMEM;
630 return 0;
631 }
632 core_initcall(alloc_frozen_cpus);
633
634 /*
635 * When callbacks for CPU hotplug notifications are being executed, we must
636 * ensure that the state of the system with respect to the tasks being frozen
637 * or not, as reported by the notification, remains unchanged *throughout the
638 * duration* of the execution of the callbacks.
639 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
640 *
641 * This synchronization is implemented by mutually excluding regular CPU
642 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
643 * Hibernate notifications.
644 */
645 static int
646 cpu_hotplug_pm_callback(struct notifier_block *nb,
647 unsigned long action, void *ptr)
648 {
649 switch (action) {
650
651 case PM_SUSPEND_PREPARE:
652 case PM_HIBERNATION_PREPARE:
653 cpu_hotplug_disable();
654 break;
655
656 case PM_POST_SUSPEND:
657 case PM_POST_HIBERNATION:
658 cpu_hotplug_enable();
659 break;
660
661 default:
662 return NOTIFY_DONE;
663 }
664
665 return NOTIFY_OK;
666 }
667
668
669 static int __init cpu_hotplug_pm_sync_init(void)
670 {
671 /*
672 * cpu_hotplug_pm_callback has higher priority than x86
673 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
674 * to disable cpu hotplug to avoid cpu hotplug race.
675 */
676 pm_notifier(cpu_hotplug_pm_callback, 0);
677 return 0;
678 }
679 core_initcall(cpu_hotplug_pm_sync_init);
680
681 #endif /* CONFIG_PM_SLEEP_SMP */
682
683 /**
684 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
685 * @cpu: cpu that just started
686 *
687 * This function calls the cpu_chain notifiers with CPU_STARTING.
688 * It must be called by the arch code on the new cpu, before the new cpu
689 * enables interrupts and before the "boot" cpu returns from __cpu_up().
690 */
691 void notify_cpu_starting(unsigned int cpu)
692 {
693 unsigned long val = CPU_STARTING;
694
695 #ifdef CONFIG_PM_SLEEP_SMP
696 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
697 val = CPU_STARTING_FROZEN;
698 #endif /* CONFIG_PM_SLEEP_SMP */
699 cpu_notify(val, (void *)(long)cpu);
700 }
701
702 #endif /* CONFIG_SMP */
703
704 /*
705 * cpu_bit_bitmap[] is a special, "compressed" data structure that
706 * represents all NR_CPUS bits binary values of 1<<nr.
707 *
708 * It is used by cpumask_of() to get a constant address to a CPU
709 * mask value that has a single bit set only.
710 */
711
712 /* cpu_bit_bitmap[0] is empty - so we can back into it */
713 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
714 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
715 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
716 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
717
718 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
719
720 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
721 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
722 #if BITS_PER_LONG > 32
723 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
724 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
725 #endif
726 };
727 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
728
729 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
730 EXPORT_SYMBOL(cpu_all_bits);
731
732 #ifdef CONFIG_INIT_ALL_POSSIBLE
733 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
734 = CPU_BITS_ALL;
735 #else
736 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
737 #endif
738 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
739 EXPORT_SYMBOL(cpu_possible_mask);
740
741 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
742 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
743 EXPORT_SYMBOL(cpu_online_mask);
744
745 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
746 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
747 EXPORT_SYMBOL(cpu_present_mask);
748
749 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
750 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
751 EXPORT_SYMBOL(cpu_active_mask);
752
753 void set_cpu_possible(unsigned int cpu, bool possible)
754 {
755 if (possible)
756 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
757 else
758 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
759 }
760
761 void set_cpu_present(unsigned int cpu, bool present)
762 {
763 if (present)
764 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
765 else
766 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
767 }
768
769 void set_cpu_online(unsigned int cpu, bool online)
770 {
771 if (online) {
772 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
773 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
774 } else {
775 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
776 }
777 }
778
779 void set_cpu_active(unsigned int cpu, bool active)
780 {
781 if (active)
782 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
783 else
784 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
785 }
786
787 void init_cpu_present(const struct cpumask *src)
788 {
789 cpumask_copy(to_cpumask(cpu_present_bits), src);
790 }
791
792 void init_cpu_possible(const struct cpumask *src)
793 {
794 cpumask_copy(to_cpumask(cpu_possible_bits), src);
795 }
796
797 void init_cpu_online(const struct cpumask *src)
798 {
799 cpumask_copy(to_cpumask(cpu_online_bits), src);
800 }