]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/cpu.c
Merge branch 'linus' into sched/core
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17 #include <linux/gfp.h>
18
19 #ifdef CONFIG_SMP
20 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
21 static DEFINE_MUTEX(cpu_add_remove_lock);
22
23 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
24
25 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
26 * Should always be manipulated under cpu_add_remove_lock
27 */
28 static int cpu_hotplug_disabled;
29
30 static struct {
31 struct task_struct *active_writer;
32 struct mutex lock; /* Synchronizes accesses to refcount, */
33 /*
34 * Also blocks the new readers during
35 * an ongoing cpu hotplug operation.
36 */
37 int refcount;
38 } cpu_hotplug = {
39 .active_writer = NULL,
40 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
41 .refcount = 0,
42 };
43
44 #ifdef CONFIG_HOTPLUG_CPU
45
46 void get_online_cpus(void)
47 {
48 might_sleep();
49 if (cpu_hotplug.active_writer == current)
50 return;
51 mutex_lock(&cpu_hotplug.lock);
52 cpu_hotplug.refcount++;
53 mutex_unlock(&cpu_hotplug.lock);
54
55 }
56 EXPORT_SYMBOL_GPL(get_online_cpus);
57
58 void put_online_cpus(void)
59 {
60 if (cpu_hotplug.active_writer == current)
61 return;
62 mutex_lock(&cpu_hotplug.lock);
63 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
64 wake_up_process(cpu_hotplug.active_writer);
65 mutex_unlock(&cpu_hotplug.lock);
66
67 }
68 EXPORT_SYMBOL_GPL(put_online_cpus);
69
70 #endif /* CONFIG_HOTPLUG_CPU */
71
72 /*
73 * The following two API's must be used when attempting
74 * to serialize the updates to cpu_online_mask, cpu_present_mask.
75 */
76 void cpu_maps_update_begin(void)
77 {
78 mutex_lock(&cpu_add_remove_lock);
79 }
80
81 void cpu_maps_update_done(void)
82 {
83 mutex_unlock(&cpu_add_remove_lock);
84 }
85
86 /*
87 * This ensures that the hotplug operation can begin only when the
88 * refcount goes to zero.
89 *
90 * Note that during a cpu-hotplug operation, the new readers, if any,
91 * will be blocked by the cpu_hotplug.lock
92 *
93 * Since cpu_hotplug_begin() is always called after invoking
94 * cpu_maps_update_begin(), we can be sure that only one writer is active.
95 *
96 * Note that theoretically, there is a possibility of a livelock:
97 * - Refcount goes to zero, last reader wakes up the sleeping
98 * writer.
99 * - Last reader unlocks the cpu_hotplug.lock.
100 * - A new reader arrives at this moment, bumps up the refcount.
101 * - The writer acquires the cpu_hotplug.lock finds the refcount
102 * non zero and goes to sleep again.
103 *
104 * However, this is very difficult to achieve in practice since
105 * get_online_cpus() not an api which is called all that often.
106 *
107 */
108 static void cpu_hotplug_begin(void)
109 {
110 cpu_hotplug.active_writer = current;
111
112 for (;;) {
113 mutex_lock(&cpu_hotplug.lock);
114 if (likely(!cpu_hotplug.refcount))
115 break;
116 __set_current_state(TASK_UNINTERRUPTIBLE);
117 mutex_unlock(&cpu_hotplug.lock);
118 schedule();
119 }
120 }
121
122 static void cpu_hotplug_done(void)
123 {
124 cpu_hotplug.active_writer = NULL;
125 mutex_unlock(&cpu_hotplug.lock);
126 }
127 /* Need to know about CPUs going up/down? */
128 int __ref register_cpu_notifier(struct notifier_block *nb)
129 {
130 int ret;
131 cpu_maps_update_begin();
132 ret = raw_notifier_chain_register(&cpu_chain, nb);
133 cpu_maps_update_done();
134 return ret;
135 }
136
137 #ifdef CONFIG_HOTPLUG_CPU
138
139 EXPORT_SYMBOL(register_cpu_notifier);
140
141 void __ref unregister_cpu_notifier(struct notifier_block *nb)
142 {
143 cpu_maps_update_begin();
144 raw_notifier_chain_unregister(&cpu_chain, nb);
145 cpu_maps_update_done();
146 }
147 EXPORT_SYMBOL(unregister_cpu_notifier);
148
149 static inline void check_for_tasks(int cpu)
150 {
151 struct task_struct *p;
152
153 write_lock_irq(&tasklist_lock);
154 for_each_process(p) {
155 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
156 (!cputime_eq(p->utime, cputime_zero) ||
157 !cputime_eq(p->stime, cputime_zero)))
158 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
159 "(state = %ld, flags = %x)\n",
160 p->comm, task_pid_nr(p), cpu,
161 p->state, p->flags);
162 }
163 write_unlock_irq(&tasklist_lock);
164 }
165
166 struct take_cpu_down_param {
167 struct task_struct *caller;
168 unsigned long mod;
169 void *hcpu;
170 };
171
172 /* Take this CPU down. */
173 static int __ref take_cpu_down(void *_param)
174 {
175 struct take_cpu_down_param *param = _param;
176 unsigned int cpu = (unsigned long)param->hcpu;
177 int err;
178
179 /* Ensure this CPU doesn't handle any more interrupts. */
180 err = __cpu_disable();
181 if (err < 0)
182 return err;
183
184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
185 param->hcpu);
186
187 if (task_cpu(param->caller) == cpu)
188 move_task_off_dead_cpu(cpu, param->caller);
189 /* Force idle task to run as soon as we yield: it should
190 immediately notice cpu is offline and die quickly. */
191 sched_idle_next();
192 return 0;
193 }
194
195 /* Requires cpu_add_remove_lock to be held */
196 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
197 {
198 int err, nr_calls = 0;
199 void *hcpu = (void *)(long)cpu;
200 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
201 struct take_cpu_down_param tcd_param = {
202 .caller = current,
203 .mod = mod,
204 .hcpu = hcpu,
205 };
206
207 if (num_online_cpus() == 1)
208 return -EBUSY;
209
210 if (!cpu_online(cpu))
211 return -EINVAL;
212
213 cpu_hotplug_begin();
214 set_cpu_active(cpu, false);
215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
216 hcpu, -1, &nr_calls);
217 if (err == NOTIFY_BAD) {
218 set_cpu_active(cpu, true);
219
220 nr_calls--;
221 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
222 hcpu, nr_calls, NULL);
223 printk("%s: attempt to take down CPU %u failed\n",
224 __func__, cpu);
225 err = -EINVAL;
226 goto out_release;
227 }
228
229 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
230 if (err) {
231 set_cpu_active(cpu, true);
232 /* CPU didn't die: tell everyone. Can't complain. */
233 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
234 hcpu) == NOTIFY_BAD)
235 BUG();
236
237 goto out_release;
238 }
239 BUG_ON(cpu_online(cpu));
240
241 /* Wait for it to sleep (leaving idle task). */
242 while (!idle_cpu(cpu))
243 yield();
244
245 /* This actually kills the CPU. */
246 __cpu_die(cpu);
247
248 /* CPU is completely dead: tell everyone. Too late to complain. */
249 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
250 hcpu) == NOTIFY_BAD)
251 BUG();
252
253 check_for_tasks(cpu);
254
255 out_release:
256 cpu_hotplug_done();
257 if (!err) {
258 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
259 hcpu) == NOTIFY_BAD)
260 BUG();
261 }
262 return err;
263 }
264
265 int __ref cpu_down(unsigned int cpu)
266 {
267 int err;
268
269 err = stop_machine_create();
270 if (err)
271 return err;
272 cpu_maps_update_begin();
273
274 if (cpu_hotplug_disabled) {
275 err = -EBUSY;
276 goto out;
277 }
278
279 err = _cpu_down(cpu, 0);
280
281 out:
282 cpu_maps_update_done();
283 stop_machine_destroy();
284 return err;
285 }
286 EXPORT_SYMBOL(cpu_down);
287 #endif /*CONFIG_HOTPLUG_CPU*/
288
289 /* Requires cpu_add_remove_lock to be held */
290 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
291 {
292 int ret, nr_calls = 0;
293 void *hcpu = (void *)(long)cpu;
294 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
295
296 if (cpu_online(cpu) || !cpu_present(cpu))
297 return -EINVAL;
298
299 cpu_hotplug_begin();
300 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
301 -1, &nr_calls);
302 if (ret == NOTIFY_BAD) {
303 nr_calls--;
304 printk("%s: attempt to bring up CPU %u failed\n",
305 __func__, cpu);
306 ret = -EINVAL;
307 goto out_notify;
308 }
309
310 /* Arch-specific enabling code. */
311 ret = __cpu_up(cpu);
312 if (ret != 0)
313 goto out_notify;
314 BUG_ON(!cpu_online(cpu));
315
316 set_cpu_active(cpu, true);
317
318 /* Now call notifier in preparation. */
319 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
320
321 out_notify:
322 if (ret != 0)
323 __raw_notifier_call_chain(&cpu_chain,
324 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
325 cpu_hotplug_done();
326
327 return ret;
328 }
329
330 int __cpuinit cpu_up(unsigned int cpu)
331 {
332 int err = 0;
333 if (!cpu_possible(cpu)) {
334 printk(KERN_ERR "can't online cpu %d because it is not "
335 "configured as may-hotadd at boot time\n", cpu);
336 #if defined(CONFIG_IA64)
337 printk(KERN_ERR "please check additional_cpus= boot "
338 "parameter\n");
339 #endif
340 return -EINVAL;
341 }
342
343 cpu_maps_update_begin();
344
345 if (cpu_hotplug_disabled) {
346 err = -EBUSY;
347 goto out;
348 }
349
350 err = _cpu_up(cpu, 0);
351
352 out:
353 cpu_maps_update_done();
354 return err;
355 }
356
357 #ifdef CONFIG_PM_SLEEP_SMP
358 static cpumask_var_t frozen_cpus;
359
360 int disable_nonboot_cpus(void)
361 {
362 int cpu, first_cpu, error;
363
364 error = stop_machine_create();
365 if (error)
366 return error;
367 cpu_maps_update_begin();
368 first_cpu = cpumask_first(cpu_online_mask);
369 /*
370 * We take down all of the non-boot CPUs in one shot to avoid races
371 * with the userspace trying to use the CPU hotplug at the same time
372 */
373 cpumask_clear(frozen_cpus);
374
375 printk("Disabling non-boot CPUs ...\n");
376 for_each_online_cpu(cpu) {
377 if (cpu == first_cpu)
378 continue;
379 error = _cpu_down(cpu, 1);
380 if (!error)
381 cpumask_set_cpu(cpu, frozen_cpus);
382 else {
383 printk(KERN_ERR "Error taking CPU%d down: %d\n",
384 cpu, error);
385 break;
386 }
387 }
388
389 if (!error) {
390 BUG_ON(num_online_cpus() > 1);
391 /* Make sure the CPUs won't be enabled by someone else */
392 cpu_hotplug_disabled = 1;
393 } else {
394 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
395 }
396 cpu_maps_update_done();
397 stop_machine_destroy();
398 return error;
399 }
400
401 void __weak arch_enable_nonboot_cpus_begin(void)
402 {
403 }
404
405 void __weak arch_enable_nonboot_cpus_end(void)
406 {
407 }
408
409 void __ref enable_nonboot_cpus(void)
410 {
411 int cpu, error;
412
413 /* Allow everyone to use the CPU hotplug again */
414 cpu_maps_update_begin();
415 cpu_hotplug_disabled = 0;
416 if (cpumask_empty(frozen_cpus))
417 goto out;
418
419 printk("Enabling non-boot CPUs ...\n");
420
421 arch_enable_nonboot_cpus_begin();
422
423 for_each_cpu(cpu, frozen_cpus) {
424 error = _cpu_up(cpu, 1);
425 if (!error) {
426 printk("CPU%d is up\n", cpu);
427 continue;
428 }
429 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
430 }
431
432 arch_enable_nonboot_cpus_end();
433
434 cpumask_clear(frozen_cpus);
435 out:
436 cpu_maps_update_done();
437 }
438
439 static int alloc_frozen_cpus(void)
440 {
441 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
442 return -ENOMEM;
443 return 0;
444 }
445 core_initcall(alloc_frozen_cpus);
446 #endif /* CONFIG_PM_SLEEP_SMP */
447
448 /**
449 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
450 * @cpu: cpu that just started
451 *
452 * This function calls the cpu_chain notifiers with CPU_STARTING.
453 * It must be called by the arch code on the new cpu, before the new cpu
454 * enables interrupts and before the "boot" cpu returns from __cpu_up().
455 */
456 void __cpuinit notify_cpu_starting(unsigned int cpu)
457 {
458 unsigned long val = CPU_STARTING;
459
460 #ifdef CONFIG_PM_SLEEP_SMP
461 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
462 val = CPU_STARTING_FROZEN;
463 #endif /* CONFIG_PM_SLEEP_SMP */
464 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
465 }
466
467 #endif /* CONFIG_SMP */
468
469 /*
470 * cpu_bit_bitmap[] is a special, "compressed" data structure that
471 * represents all NR_CPUS bits binary values of 1<<nr.
472 *
473 * It is used by cpumask_of() to get a constant address to a CPU
474 * mask value that has a single bit set only.
475 */
476
477 /* cpu_bit_bitmap[0] is empty - so we can back into it */
478 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
479 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
480 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
481 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
482
483 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
484
485 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
486 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
487 #if BITS_PER_LONG > 32
488 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
489 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
490 #endif
491 };
492 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
493
494 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
495 EXPORT_SYMBOL(cpu_all_bits);
496
497 #ifdef CONFIG_INIT_ALL_POSSIBLE
498 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
499 = CPU_BITS_ALL;
500 #else
501 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
502 #endif
503 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
504 EXPORT_SYMBOL(cpu_possible_mask);
505
506 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
507 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
508 EXPORT_SYMBOL(cpu_online_mask);
509
510 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
511 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
512 EXPORT_SYMBOL(cpu_present_mask);
513
514 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
515 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
516 EXPORT_SYMBOL(cpu_active_mask);
517
518 void set_cpu_possible(unsigned int cpu, bool possible)
519 {
520 if (possible)
521 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
522 else
523 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
524 }
525
526 void set_cpu_present(unsigned int cpu, bool present)
527 {
528 if (present)
529 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
530 else
531 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
532 }
533
534 void set_cpu_online(unsigned int cpu, bool online)
535 {
536 if (online)
537 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
538 else
539 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
540 }
541
542 void set_cpu_active(unsigned int cpu, bool active)
543 {
544 if (active)
545 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
546 else
547 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
548 }
549
550 void init_cpu_present(const struct cpumask *src)
551 {
552 cpumask_copy(to_cpumask(cpu_present_bits), src);
553 }
554
555 void init_cpu_possible(const struct cpumask *src)
556 {
557 cpumask_copy(to_cpumask(cpu_possible_bits), src);
558 }
559
560 void init_cpu_online(const struct cpumask *src)
561 {
562 cpumask_copy(to_cpumask(cpu_online_bits), src);
563 }