]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/cpu.c
Merge branch 'linus' into core/softlockup
[mirror_ubuntu-hirsute-kernel.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17
18 /*
19 * Represents all cpu's present in the system
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24 cpumask_t cpu_present_map __read_mostly;
25 EXPORT_SYMBOL(cpu_present_map);
26
27 #ifndef CONFIG_SMP
28
29 /*
30 * Represents all cpu's that are currently online.
31 */
32 cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
33 EXPORT_SYMBOL(cpu_online_map);
34
35 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
36 EXPORT_SYMBOL(cpu_possible_map);
37
38 #else /* CONFIG_SMP */
39
40 /* Serializes the updates to cpu_online_map, cpu_present_map */
41 static DEFINE_MUTEX(cpu_add_remove_lock);
42
43 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
44
45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock
47 */
48 static int cpu_hotplug_disabled;
49
50 static struct {
51 struct task_struct *active_writer;
52 struct mutex lock; /* Synchronizes accesses to refcount, */
53 /*
54 * Also blocks the new readers during
55 * an ongoing cpu hotplug operation.
56 */
57 int refcount;
58 } cpu_hotplug;
59
60 void __init cpu_hotplug_init(void)
61 {
62 cpu_hotplug.active_writer = NULL;
63 mutex_init(&cpu_hotplug.lock);
64 cpu_hotplug.refcount = 0;
65 }
66
67 #ifdef CONFIG_HOTPLUG_CPU
68
69 void get_online_cpus(void)
70 {
71 might_sleep();
72 if (cpu_hotplug.active_writer == current)
73 return;
74 mutex_lock(&cpu_hotplug.lock);
75 cpu_hotplug.refcount++;
76 mutex_unlock(&cpu_hotplug.lock);
77
78 }
79 EXPORT_SYMBOL_GPL(get_online_cpus);
80
81 void put_online_cpus(void)
82 {
83 if (cpu_hotplug.active_writer == current)
84 return;
85 mutex_lock(&cpu_hotplug.lock);
86 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
87 wake_up_process(cpu_hotplug.active_writer);
88 mutex_unlock(&cpu_hotplug.lock);
89
90 }
91 EXPORT_SYMBOL_GPL(put_online_cpus);
92
93 #endif /* CONFIG_HOTPLUG_CPU */
94
95 /*
96 * The following two API's must be used when attempting
97 * to serialize the updates to cpu_online_map, cpu_present_map.
98 */
99 void cpu_maps_update_begin(void)
100 {
101 mutex_lock(&cpu_add_remove_lock);
102 }
103
104 void cpu_maps_update_done(void)
105 {
106 mutex_unlock(&cpu_add_remove_lock);
107 }
108
109 /*
110 * This ensures that the hotplug operation can begin only when the
111 * refcount goes to zero.
112 *
113 * Note that during a cpu-hotplug operation, the new readers, if any,
114 * will be blocked by the cpu_hotplug.lock
115 *
116 * Since cpu_hotplug_begin() is always called after invoking
117 * cpu_maps_update_begin(), we can be sure that only one writer is active.
118 *
119 * Note that theoretically, there is a possibility of a livelock:
120 * - Refcount goes to zero, last reader wakes up the sleeping
121 * writer.
122 * - Last reader unlocks the cpu_hotplug.lock.
123 * - A new reader arrives at this moment, bumps up the refcount.
124 * - The writer acquires the cpu_hotplug.lock finds the refcount
125 * non zero and goes to sleep again.
126 *
127 * However, this is very difficult to achieve in practice since
128 * get_online_cpus() not an api which is called all that often.
129 *
130 */
131 static void cpu_hotplug_begin(void)
132 {
133 cpu_hotplug.active_writer = current;
134
135 for (;;) {
136 mutex_lock(&cpu_hotplug.lock);
137 if (likely(!cpu_hotplug.refcount))
138 break;
139 __set_current_state(TASK_UNINTERRUPTIBLE);
140 mutex_unlock(&cpu_hotplug.lock);
141 schedule();
142 }
143 }
144
145 static void cpu_hotplug_done(void)
146 {
147 cpu_hotplug.active_writer = NULL;
148 mutex_unlock(&cpu_hotplug.lock);
149 }
150 /* Need to know about CPUs going up/down? */
151 int __ref register_cpu_notifier(struct notifier_block *nb)
152 {
153 int ret;
154 cpu_maps_update_begin();
155 ret = raw_notifier_chain_register(&cpu_chain, nb);
156 cpu_maps_update_done();
157 return ret;
158 }
159
160 #ifdef CONFIG_HOTPLUG_CPU
161
162 EXPORT_SYMBOL(register_cpu_notifier);
163
164 void __ref unregister_cpu_notifier(struct notifier_block *nb)
165 {
166 cpu_maps_update_begin();
167 raw_notifier_chain_unregister(&cpu_chain, nb);
168 cpu_maps_update_done();
169 }
170 EXPORT_SYMBOL(unregister_cpu_notifier);
171
172 static inline void check_for_tasks(int cpu)
173 {
174 struct task_struct *p;
175
176 write_lock_irq(&tasklist_lock);
177 for_each_process(p) {
178 if (task_cpu(p) == cpu &&
179 (!cputime_eq(p->utime, cputime_zero) ||
180 !cputime_eq(p->stime, cputime_zero)))
181 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
182 (state = %ld, flags = %x) \n",
183 p->comm, task_pid_nr(p), cpu,
184 p->state, p->flags);
185 }
186 write_unlock_irq(&tasklist_lock);
187 }
188
189 struct take_cpu_down_param {
190 unsigned long mod;
191 void *hcpu;
192 };
193
194 /* Take this CPU down. */
195 static int __ref take_cpu_down(void *_param)
196 {
197 struct take_cpu_down_param *param = _param;
198 int err;
199
200 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
201 param->hcpu);
202 /* Ensure this CPU doesn't handle any more interrupts. */
203 err = __cpu_disable();
204 if (err < 0)
205 return err;
206
207 /* Force idle task to run as soon as we yield: it should
208 immediately notice cpu is offline and die quickly. */
209 sched_idle_next();
210 return 0;
211 }
212
213 /* Requires cpu_add_remove_lock to be held */
214 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
215 {
216 int err, nr_calls = 0;
217 struct task_struct *p;
218 cpumask_t old_allowed, tmp;
219 void *hcpu = (void *)(long)cpu;
220 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
221 struct take_cpu_down_param tcd_param = {
222 .mod = mod,
223 .hcpu = hcpu,
224 };
225
226 if (num_online_cpus() == 1)
227 return -EBUSY;
228
229 if (!cpu_online(cpu))
230 return -EINVAL;
231
232 cpu_hotplug_begin();
233 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
234 hcpu, -1, &nr_calls);
235 if (err == NOTIFY_BAD) {
236 nr_calls--;
237 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
238 hcpu, nr_calls, NULL);
239 printk("%s: attempt to take down CPU %u failed\n",
240 __func__, cpu);
241 err = -EINVAL;
242 goto out_release;
243 }
244
245 /* Ensure that we are not runnable on dying cpu */
246 old_allowed = current->cpus_allowed;
247 cpus_setall(tmp);
248 cpu_clear(cpu, tmp);
249 set_cpus_allowed_ptr(current, &tmp);
250
251 p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
252
253 if (IS_ERR(p) || cpu_online(cpu)) {
254 /* CPU didn't die: tell everyone. Can't complain. */
255 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
256 hcpu) == NOTIFY_BAD)
257 BUG();
258
259 if (IS_ERR(p)) {
260 err = PTR_ERR(p);
261 goto out_allowed;
262 }
263 goto out_thread;
264 }
265
266 /* Wait for it to sleep (leaving idle task). */
267 while (!idle_cpu(cpu))
268 yield();
269
270 /* This actually kills the CPU. */
271 __cpu_die(cpu);
272
273 /* CPU is completely dead: tell everyone. Too late to complain. */
274 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
275 hcpu) == NOTIFY_BAD)
276 BUG();
277
278 check_for_tasks(cpu);
279
280 out_thread:
281 err = kthread_stop(p);
282 out_allowed:
283 set_cpus_allowed_ptr(current, &old_allowed);
284 out_release:
285 cpu_hotplug_done();
286 return err;
287 }
288
289 int __ref cpu_down(unsigned int cpu)
290 {
291 int err = 0;
292
293 cpu_maps_update_begin();
294 if (cpu_hotplug_disabled)
295 err = -EBUSY;
296 else
297 err = _cpu_down(cpu, 0);
298
299 cpu_maps_update_done();
300 return err;
301 }
302 #endif /*CONFIG_HOTPLUG_CPU*/
303
304 /* Requires cpu_add_remove_lock to be held */
305 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
306 {
307 int ret, nr_calls = 0;
308 void *hcpu = (void *)(long)cpu;
309 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
310
311 if (cpu_online(cpu) || !cpu_present(cpu))
312 return -EINVAL;
313
314 cpu_hotplug_begin();
315 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
316 -1, &nr_calls);
317 if (ret == NOTIFY_BAD) {
318 nr_calls--;
319 printk("%s: attempt to bring up CPU %u failed\n",
320 __func__, cpu);
321 ret = -EINVAL;
322 goto out_notify;
323 }
324
325 /* Arch-specific enabling code. */
326 ret = __cpu_up(cpu);
327 if (ret != 0)
328 goto out_notify;
329 BUG_ON(!cpu_online(cpu));
330
331 /* Now call notifier in preparation. */
332 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
333
334 out_notify:
335 if (ret != 0)
336 __raw_notifier_call_chain(&cpu_chain,
337 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
338 cpu_hotplug_done();
339
340 return ret;
341 }
342
343 int __cpuinit cpu_up(unsigned int cpu)
344 {
345 int err = 0;
346 if (!cpu_isset(cpu, cpu_possible_map)) {
347 printk(KERN_ERR "can't online cpu %d because it is not "
348 "configured as may-hotadd at boot time\n", cpu);
349 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
350 printk(KERN_ERR "please check additional_cpus= boot "
351 "parameter\n");
352 #endif
353 return -EINVAL;
354 }
355
356 cpu_maps_update_begin();
357 if (cpu_hotplug_disabled)
358 err = -EBUSY;
359 else
360 err = _cpu_up(cpu, 0);
361
362 cpu_maps_update_done();
363 return err;
364 }
365
366 #ifdef CONFIG_PM_SLEEP_SMP
367 static cpumask_t frozen_cpus;
368
369 int disable_nonboot_cpus(void)
370 {
371 int cpu, first_cpu, error = 0;
372
373 cpu_maps_update_begin();
374 first_cpu = first_cpu(cpu_online_map);
375 /* We take down all of the non-boot CPUs in one shot to avoid races
376 * with the userspace trying to use the CPU hotplug at the same time
377 */
378 cpus_clear(frozen_cpus);
379 printk("Disabling non-boot CPUs ...\n");
380 for_each_online_cpu(cpu) {
381 if (cpu == first_cpu)
382 continue;
383 error = _cpu_down(cpu, 1);
384 if (!error) {
385 cpu_set(cpu, frozen_cpus);
386 printk("CPU%d is down\n", cpu);
387 } else {
388 printk(KERN_ERR "Error taking CPU%d down: %d\n",
389 cpu, error);
390 break;
391 }
392 }
393 if (!error) {
394 BUG_ON(num_online_cpus() > 1);
395 /* Make sure the CPUs won't be enabled by someone else */
396 cpu_hotplug_disabled = 1;
397 } else {
398 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
399 }
400 cpu_maps_update_done();
401 return error;
402 }
403
404 void __ref enable_nonboot_cpus(void)
405 {
406 int cpu, error;
407
408 /* Allow everyone to use the CPU hotplug again */
409 cpu_maps_update_begin();
410 cpu_hotplug_disabled = 0;
411 if (cpus_empty(frozen_cpus))
412 goto out;
413
414 printk("Enabling non-boot CPUs ...\n");
415 for_each_cpu_mask(cpu, frozen_cpus) {
416 error = _cpu_up(cpu, 1);
417 if (!error) {
418 printk("CPU%d is up\n", cpu);
419 continue;
420 }
421 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
422 }
423 cpus_clear(frozen_cpus);
424 out:
425 cpu_maps_update_done();
426 }
427 #endif /* CONFIG_PM_SLEEP_SMP */
428
429 #endif /* CONFIG_SMP */