]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/cpu.c
Merge branch 'for-2.6.30' into for-2.6.31
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17
18 #ifdef CONFIG_SMP
19 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
20 static DEFINE_MUTEX(cpu_add_remove_lock);
21
22 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
23
24 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
25 * Should always be manipulated under cpu_add_remove_lock
26 */
27 static int cpu_hotplug_disabled;
28
29 static struct {
30 struct task_struct *active_writer;
31 struct mutex lock; /* Synchronizes accesses to refcount, */
32 /*
33 * Also blocks the new readers during
34 * an ongoing cpu hotplug operation.
35 */
36 int refcount;
37 } cpu_hotplug;
38
39 void __init cpu_hotplug_init(void)
40 {
41 cpu_hotplug.active_writer = NULL;
42 mutex_init(&cpu_hotplug.lock);
43 cpu_hotplug.refcount = 0;
44 }
45
46 #ifdef CONFIG_HOTPLUG_CPU
47
48 void get_online_cpus(void)
49 {
50 might_sleep();
51 if (cpu_hotplug.active_writer == current)
52 return;
53 mutex_lock(&cpu_hotplug.lock);
54 cpu_hotplug.refcount++;
55 mutex_unlock(&cpu_hotplug.lock);
56
57 }
58 EXPORT_SYMBOL_GPL(get_online_cpus);
59
60 void put_online_cpus(void)
61 {
62 if (cpu_hotplug.active_writer == current)
63 return;
64 mutex_lock(&cpu_hotplug.lock);
65 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
66 wake_up_process(cpu_hotplug.active_writer);
67 mutex_unlock(&cpu_hotplug.lock);
68
69 }
70 EXPORT_SYMBOL_GPL(put_online_cpus);
71
72 #endif /* CONFIG_HOTPLUG_CPU */
73
74 /*
75 * The following two API's must be used when attempting
76 * to serialize the updates to cpu_online_mask, cpu_present_mask.
77 */
78 void cpu_maps_update_begin(void)
79 {
80 mutex_lock(&cpu_add_remove_lock);
81 }
82
83 void cpu_maps_update_done(void)
84 {
85 mutex_unlock(&cpu_add_remove_lock);
86 }
87
88 /*
89 * This ensures that the hotplug operation can begin only when the
90 * refcount goes to zero.
91 *
92 * Note that during a cpu-hotplug operation, the new readers, if any,
93 * will be blocked by the cpu_hotplug.lock
94 *
95 * Since cpu_hotplug_begin() is always called after invoking
96 * cpu_maps_update_begin(), we can be sure that only one writer is active.
97 *
98 * Note that theoretically, there is a possibility of a livelock:
99 * - Refcount goes to zero, last reader wakes up the sleeping
100 * writer.
101 * - Last reader unlocks the cpu_hotplug.lock.
102 * - A new reader arrives at this moment, bumps up the refcount.
103 * - The writer acquires the cpu_hotplug.lock finds the refcount
104 * non zero and goes to sleep again.
105 *
106 * However, this is very difficult to achieve in practice since
107 * get_online_cpus() not an api which is called all that often.
108 *
109 */
110 static void cpu_hotplug_begin(void)
111 {
112 cpu_hotplug.active_writer = current;
113
114 for (;;) {
115 mutex_lock(&cpu_hotplug.lock);
116 if (likely(!cpu_hotplug.refcount))
117 break;
118 __set_current_state(TASK_UNINTERRUPTIBLE);
119 mutex_unlock(&cpu_hotplug.lock);
120 schedule();
121 }
122 }
123
124 static void cpu_hotplug_done(void)
125 {
126 cpu_hotplug.active_writer = NULL;
127 mutex_unlock(&cpu_hotplug.lock);
128 }
129 /* Need to know about CPUs going up/down? */
130 int __ref register_cpu_notifier(struct notifier_block *nb)
131 {
132 int ret;
133 cpu_maps_update_begin();
134 ret = raw_notifier_chain_register(&cpu_chain, nb);
135 cpu_maps_update_done();
136 return ret;
137 }
138
139 #ifdef CONFIG_HOTPLUG_CPU
140
141 EXPORT_SYMBOL(register_cpu_notifier);
142
143 void __ref unregister_cpu_notifier(struct notifier_block *nb)
144 {
145 cpu_maps_update_begin();
146 raw_notifier_chain_unregister(&cpu_chain, nb);
147 cpu_maps_update_done();
148 }
149 EXPORT_SYMBOL(unregister_cpu_notifier);
150
151 static inline void check_for_tasks(int cpu)
152 {
153 struct task_struct *p;
154
155 write_lock_irq(&tasklist_lock);
156 for_each_process(p) {
157 if (task_cpu(p) == cpu &&
158 (!cputime_eq(p->utime, cputime_zero) ||
159 !cputime_eq(p->stime, cputime_zero)))
160 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
161 (state = %ld, flags = %x) \n",
162 p->comm, task_pid_nr(p), cpu,
163 p->state, p->flags);
164 }
165 write_unlock_irq(&tasklist_lock);
166 }
167
168 struct take_cpu_down_param {
169 unsigned long mod;
170 void *hcpu;
171 };
172
173 /* Take this CPU down. */
174 static int __ref take_cpu_down(void *_param)
175 {
176 struct take_cpu_down_param *param = _param;
177 int err;
178
179 /* Ensure this CPU doesn't handle any more interrupts. */
180 err = __cpu_disable();
181 if (err < 0)
182 return err;
183
184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
185 param->hcpu);
186
187 /* Force idle task to run as soon as we yield: it should
188 immediately notice cpu is offline and die quickly. */
189 sched_idle_next();
190 return 0;
191 }
192
193 /* Requires cpu_add_remove_lock to be held */
194 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
195 {
196 int err, nr_calls = 0;
197 cpumask_var_t old_allowed;
198 void *hcpu = (void *)(long)cpu;
199 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
200 struct take_cpu_down_param tcd_param = {
201 .mod = mod,
202 .hcpu = hcpu,
203 };
204
205 if (num_online_cpus() == 1)
206 return -EBUSY;
207
208 if (!cpu_online(cpu))
209 return -EINVAL;
210
211 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
212 return -ENOMEM;
213
214 cpu_hotplug_begin();
215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
216 hcpu, -1, &nr_calls);
217 if (err == NOTIFY_BAD) {
218 nr_calls--;
219 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
220 hcpu, nr_calls, NULL);
221 printk("%s: attempt to take down CPU %u failed\n",
222 __func__, cpu);
223 err = -EINVAL;
224 goto out_release;
225 }
226
227 /* Ensure that we are not runnable on dying cpu */
228 cpumask_copy(old_allowed, &current->cpus_allowed);
229 set_cpus_allowed_ptr(current,
230 cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
231
232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
233 if (err) {
234 /* CPU didn't die: tell everyone. Can't complain. */
235 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
236 hcpu) == NOTIFY_BAD)
237 BUG();
238
239 goto out_allowed;
240 }
241 BUG_ON(cpu_online(cpu));
242
243 /* Wait for it to sleep (leaving idle task). */
244 while (!idle_cpu(cpu))
245 yield();
246
247 /* This actually kills the CPU. */
248 __cpu_die(cpu);
249
250 /* CPU is completely dead: tell everyone. Too late to complain. */
251 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
252 hcpu) == NOTIFY_BAD)
253 BUG();
254
255 check_for_tasks(cpu);
256
257 out_allowed:
258 set_cpus_allowed_ptr(current, old_allowed);
259 out_release:
260 cpu_hotplug_done();
261 if (!err) {
262 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
263 hcpu) == NOTIFY_BAD)
264 BUG();
265 }
266 free_cpumask_var(old_allowed);
267 return err;
268 }
269
270 int __ref cpu_down(unsigned int cpu)
271 {
272 int err;
273
274 err = stop_machine_create();
275 if (err)
276 return err;
277 cpu_maps_update_begin();
278
279 if (cpu_hotplug_disabled) {
280 err = -EBUSY;
281 goto out;
282 }
283
284 set_cpu_active(cpu, false);
285
286 /*
287 * Make sure the all cpus did the reschedule and are not
288 * using stale version of the cpu_active_mask.
289 * This is not strictly necessary becuase stop_machine()
290 * that we run down the line already provides the required
291 * synchronization. But it's really a side effect and we do not
292 * want to depend on the innards of the stop_machine here.
293 */
294 synchronize_sched();
295
296 err = _cpu_down(cpu, 0);
297
298 if (cpu_online(cpu))
299 set_cpu_active(cpu, true);
300
301 out:
302 cpu_maps_update_done();
303 stop_machine_destroy();
304 return err;
305 }
306 EXPORT_SYMBOL(cpu_down);
307 #endif /*CONFIG_HOTPLUG_CPU*/
308
309 /* Requires cpu_add_remove_lock to be held */
310 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
311 {
312 int ret, nr_calls = 0;
313 void *hcpu = (void *)(long)cpu;
314 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
315
316 if (cpu_online(cpu) || !cpu_present(cpu))
317 return -EINVAL;
318
319 cpu_hotplug_begin();
320 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
321 -1, &nr_calls);
322 if (ret == NOTIFY_BAD) {
323 nr_calls--;
324 printk("%s: attempt to bring up CPU %u failed\n",
325 __func__, cpu);
326 ret = -EINVAL;
327 goto out_notify;
328 }
329
330 /* Arch-specific enabling code. */
331 ret = __cpu_up(cpu);
332 if (ret != 0)
333 goto out_notify;
334 BUG_ON(!cpu_online(cpu));
335
336 set_cpu_active(cpu, true);
337
338 /* Now call notifier in preparation. */
339 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
340
341 out_notify:
342 if (ret != 0)
343 __raw_notifier_call_chain(&cpu_chain,
344 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
345 cpu_hotplug_done();
346
347 return ret;
348 }
349
350 int __cpuinit cpu_up(unsigned int cpu)
351 {
352 int err = 0;
353 if (!cpu_possible(cpu)) {
354 printk(KERN_ERR "can't online cpu %d because it is not "
355 "configured as may-hotadd at boot time\n", cpu);
356 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
357 printk(KERN_ERR "please check additional_cpus= boot "
358 "parameter\n");
359 #endif
360 return -EINVAL;
361 }
362
363 cpu_maps_update_begin();
364
365 if (cpu_hotplug_disabled) {
366 err = -EBUSY;
367 goto out;
368 }
369
370 err = _cpu_up(cpu, 0);
371
372 out:
373 cpu_maps_update_done();
374 return err;
375 }
376
377 #ifdef CONFIG_PM_SLEEP_SMP
378 static cpumask_var_t frozen_cpus;
379
380 int disable_nonboot_cpus(void)
381 {
382 int cpu, first_cpu, error;
383
384 error = stop_machine_create();
385 if (error)
386 return error;
387 cpu_maps_update_begin();
388 first_cpu = cpumask_first(cpu_online_mask);
389 /* We take down all of the non-boot CPUs in one shot to avoid races
390 * with the userspace trying to use the CPU hotplug at the same time
391 */
392 cpumask_clear(frozen_cpus);
393 printk("Disabling non-boot CPUs ...\n");
394 for_each_online_cpu(cpu) {
395 if (cpu == first_cpu)
396 continue;
397 error = _cpu_down(cpu, 1);
398 if (!error) {
399 cpumask_set_cpu(cpu, frozen_cpus);
400 printk("CPU%d is down\n", cpu);
401 } else {
402 printk(KERN_ERR "Error taking CPU%d down: %d\n",
403 cpu, error);
404 break;
405 }
406 }
407 if (!error) {
408 BUG_ON(num_online_cpus() > 1);
409 /* Make sure the CPUs won't be enabled by someone else */
410 cpu_hotplug_disabled = 1;
411 } else {
412 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
413 }
414 cpu_maps_update_done();
415 stop_machine_destroy();
416 return error;
417 }
418
419 void __ref enable_nonboot_cpus(void)
420 {
421 int cpu, error;
422
423 /* Allow everyone to use the CPU hotplug again */
424 cpu_maps_update_begin();
425 cpu_hotplug_disabled = 0;
426 if (cpumask_empty(frozen_cpus))
427 goto out;
428
429 printk("Enabling non-boot CPUs ...\n");
430 for_each_cpu(cpu, frozen_cpus) {
431 error = _cpu_up(cpu, 1);
432 if (!error) {
433 printk("CPU%d is up\n", cpu);
434 continue;
435 }
436 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
437 }
438 cpumask_clear(frozen_cpus);
439 out:
440 cpu_maps_update_done();
441 }
442
443 static int alloc_frozen_cpus(void)
444 {
445 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
446 return -ENOMEM;
447 return 0;
448 }
449 core_initcall(alloc_frozen_cpus);
450 #endif /* CONFIG_PM_SLEEP_SMP */
451
452 /**
453 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
454 * @cpu: cpu that just started
455 *
456 * This function calls the cpu_chain notifiers with CPU_STARTING.
457 * It must be called by the arch code on the new cpu, before the new cpu
458 * enables interrupts and before the "boot" cpu returns from __cpu_up().
459 */
460 void __cpuinit notify_cpu_starting(unsigned int cpu)
461 {
462 unsigned long val = CPU_STARTING;
463
464 #ifdef CONFIG_PM_SLEEP_SMP
465 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
466 val = CPU_STARTING_FROZEN;
467 #endif /* CONFIG_PM_SLEEP_SMP */
468 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
469 }
470
471 #endif /* CONFIG_SMP */
472
473 /*
474 * cpu_bit_bitmap[] is a special, "compressed" data structure that
475 * represents all NR_CPUS bits binary values of 1<<nr.
476 *
477 * It is used by cpumask_of() to get a constant address to a CPU
478 * mask value that has a single bit set only.
479 */
480
481 /* cpu_bit_bitmap[0] is empty - so we can back into it */
482 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
483 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
484 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
485 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
486
487 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
488
489 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
490 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
491 #if BITS_PER_LONG > 32
492 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
493 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
494 #endif
495 };
496 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
497
498 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
499 EXPORT_SYMBOL(cpu_all_bits);
500
501 #ifdef CONFIG_INIT_ALL_POSSIBLE
502 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
503 = CPU_BITS_ALL;
504 #else
505 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
506 #endif
507 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
508 EXPORT_SYMBOL(cpu_possible_mask);
509
510 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
511 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
512 EXPORT_SYMBOL(cpu_online_mask);
513
514 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
515 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
516 EXPORT_SYMBOL(cpu_present_mask);
517
518 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
519 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
520 EXPORT_SYMBOL(cpu_active_mask);
521
522 void set_cpu_possible(unsigned int cpu, bool possible)
523 {
524 if (possible)
525 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
526 else
527 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
528 }
529
530 void set_cpu_present(unsigned int cpu, bool present)
531 {
532 if (present)
533 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
534 else
535 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
536 }
537
538 void set_cpu_online(unsigned int cpu, bool online)
539 {
540 if (online)
541 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
542 else
543 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
544 }
545
546 void set_cpu_active(unsigned int cpu, bool active)
547 {
548 if (active)
549 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
550 else
551 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
552 }
553
554 void init_cpu_present(const struct cpumask *src)
555 {
556 cpumask_copy(to_cpumask(cpu_present_bits), src);
557 }
558
559 void init_cpu_possible(const struct cpumask *src)
560 {
561 cpumask_copy(to_cpumask(cpu_possible_bits), src);
562 }
563
564 void init_cpu_online(const struct cpumask *src)
565 {
566 cpumask_copy(to_cpumask(cpu_online_bits), src);
567 }