]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
9984de1a | 13 | #include <linux/export.h> |
1da177e4 LT |
14 | #include <linux/kthread.h> |
15 | #include <linux/stop_machine.h> | |
81615b62 | 16 | #include <linux/mutex.h> |
5a0e3ad6 | 17 | #include <linux/gfp.h> |
79cfbdfa | 18 | #include <linux/suspend.h> |
1da177e4 | 19 | |
38498a67 TG |
20 | #include "smpboot.h" |
21 | ||
98a79d6a | 22 | #ifdef CONFIG_SMP |
b3199c02 | 23 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
aa953877 | 24 | static DEFINE_MUTEX(cpu_add_remove_lock); |
1da177e4 | 25 | |
79a6cdeb LJ |
26 | /* |
27 | * The following two API's must be used when attempting | |
28 | * to serialize the updates to cpu_online_mask, cpu_present_mask. | |
29 | */ | |
30 | void cpu_maps_update_begin(void) | |
31 | { | |
32 | mutex_lock(&cpu_add_remove_lock); | |
33 | } | |
34 | ||
35 | void cpu_maps_update_done(void) | |
36 | { | |
37 | mutex_unlock(&cpu_add_remove_lock); | |
38 | } | |
39 | ||
5c113fbe | 40 | static RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4 | 41 | |
e3920fb4 RW |
42 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
43 | * Should always be manipulated under cpu_add_remove_lock | |
44 | */ | |
45 | static int cpu_hotplug_disabled; | |
46 | ||
79a6cdeb LJ |
47 | #ifdef CONFIG_HOTPLUG_CPU |
48 | ||
d221938c GS |
49 | static struct { |
50 | struct task_struct *active_writer; | |
51 | struct mutex lock; /* Synchronizes accesses to refcount, */ | |
52 | /* | |
53 | * Also blocks the new readers during | |
54 | * an ongoing cpu hotplug operation. | |
55 | */ | |
56 | int refcount; | |
31950eb6 LT |
57 | } cpu_hotplug = { |
58 | .active_writer = NULL, | |
59 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | |
60 | .refcount = 0, | |
61 | }; | |
d221938c | 62 | |
86ef5c9a | 63 | void get_online_cpus(void) |
a9d9baa1 | 64 | { |
d221938c GS |
65 | might_sleep(); |
66 | if (cpu_hotplug.active_writer == current) | |
aa953877 | 67 | return; |
d221938c GS |
68 | mutex_lock(&cpu_hotplug.lock); |
69 | cpu_hotplug.refcount++; | |
70 | mutex_unlock(&cpu_hotplug.lock); | |
71 | ||
a9d9baa1 | 72 | } |
86ef5c9a | 73 | EXPORT_SYMBOL_GPL(get_online_cpus); |
90d45d17 | 74 | |
86ef5c9a | 75 | void put_online_cpus(void) |
a9d9baa1 | 76 | { |
d221938c | 77 | if (cpu_hotplug.active_writer == current) |
aa953877 | 78 | return; |
d221938c | 79 | mutex_lock(&cpu_hotplug.lock); |
d2ba7e2a ON |
80 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) |
81 | wake_up_process(cpu_hotplug.active_writer); | |
d221938c GS |
82 | mutex_unlock(&cpu_hotplug.lock); |
83 | ||
a9d9baa1 | 84 | } |
86ef5c9a | 85 | EXPORT_SYMBOL_GPL(put_online_cpus); |
a9d9baa1 | 86 | |
d221938c GS |
87 | /* |
88 | * This ensures that the hotplug operation can begin only when the | |
89 | * refcount goes to zero. | |
90 | * | |
91 | * Note that during a cpu-hotplug operation, the new readers, if any, | |
92 | * will be blocked by the cpu_hotplug.lock | |
93 | * | |
d2ba7e2a ON |
94 | * Since cpu_hotplug_begin() is always called after invoking |
95 | * cpu_maps_update_begin(), we can be sure that only one writer is active. | |
d221938c GS |
96 | * |
97 | * Note that theoretically, there is a possibility of a livelock: | |
98 | * - Refcount goes to zero, last reader wakes up the sleeping | |
99 | * writer. | |
100 | * - Last reader unlocks the cpu_hotplug.lock. | |
101 | * - A new reader arrives at this moment, bumps up the refcount. | |
102 | * - The writer acquires the cpu_hotplug.lock finds the refcount | |
103 | * non zero and goes to sleep again. | |
104 | * | |
105 | * However, this is very difficult to achieve in practice since | |
86ef5c9a | 106 | * get_online_cpus() not an api which is called all that often. |
d221938c GS |
107 | * |
108 | */ | |
109 | static void cpu_hotplug_begin(void) | |
110 | { | |
d221938c | 111 | cpu_hotplug.active_writer = current; |
d2ba7e2a ON |
112 | |
113 | for (;;) { | |
114 | mutex_lock(&cpu_hotplug.lock); | |
115 | if (likely(!cpu_hotplug.refcount)) | |
116 | break; | |
117 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
d221938c GS |
118 | mutex_unlock(&cpu_hotplug.lock); |
119 | schedule(); | |
d221938c | 120 | } |
d221938c GS |
121 | } |
122 | ||
123 | static void cpu_hotplug_done(void) | |
124 | { | |
125 | cpu_hotplug.active_writer = NULL; | |
126 | mutex_unlock(&cpu_hotplug.lock); | |
127 | } | |
79a6cdeb LJ |
128 | |
129 | #else /* #if CONFIG_HOTPLUG_CPU */ | |
130 | static void cpu_hotplug_begin(void) {} | |
131 | static void cpu_hotplug_done(void) {} | |
25985edc | 132 | #endif /* #else #if CONFIG_HOTPLUG_CPU */ |
79a6cdeb | 133 | |
1da177e4 | 134 | /* Need to know about CPUs going up/down? */ |
f7b16c10 | 135 | int __ref register_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 136 | { |
bd5349cf | 137 | int ret; |
d221938c | 138 | cpu_maps_update_begin(); |
bd5349cf | 139 | ret = raw_notifier_chain_register(&cpu_chain, nb); |
d221938c | 140 | cpu_maps_update_done(); |
bd5349cf | 141 | return ret; |
1da177e4 | 142 | } |
65edc68c | 143 | |
e9fb7631 AM |
144 | static int __cpu_notify(unsigned long val, void *v, int nr_to_call, |
145 | int *nr_calls) | |
146 | { | |
e6bde73b AM |
147 | int ret; |
148 | ||
149 | ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, | |
e9fb7631 | 150 | nr_calls); |
e6bde73b AM |
151 | |
152 | return notifier_to_errno(ret); | |
e9fb7631 AM |
153 | } |
154 | ||
155 | static int cpu_notify(unsigned long val, void *v) | |
156 | { | |
157 | return __cpu_notify(val, v, -1, NULL); | |
158 | } | |
159 | ||
00b9b0af LT |
160 | #ifdef CONFIG_HOTPLUG_CPU |
161 | ||
e9fb7631 AM |
162 | static void cpu_notify_nofail(unsigned long val, void *v) |
163 | { | |
00b9b0af | 164 | BUG_ON(cpu_notify(val, v)); |
e9fb7631 | 165 | } |
1da177e4 LT |
166 | EXPORT_SYMBOL(register_cpu_notifier); |
167 | ||
9647155f | 168 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 169 | { |
d221938c | 170 | cpu_maps_update_begin(); |
bd5349cf | 171 | raw_notifier_chain_unregister(&cpu_chain, nb); |
d221938c | 172 | cpu_maps_update_done(); |
1da177e4 LT |
173 | } |
174 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
175 | ||
1da177e4 LT |
176 | static inline void check_for_tasks(int cpu) |
177 | { | |
178 | struct task_struct *p; | |
179 | ||
180 | write_lock_irq(&tasklist_lock); | |
181 | for_each_process(p) { | |
11854247 | 182 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && |
64861634 | 183 | (p->utime || p->stime)) |
9d3cfc4c FP |
184 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " |
185 | "(state = %ld, flags = %x)\n", | |
186 | p->comm, task_pid_nr(p), cpu, | |
187 | p->state, p->flags); | |
1da177e4 LT |
188 | } |
189 | write_unlock_irq(&tasklist_lock); | |
190 | } | |
191 | ||
db912f96 AK |
192 | struct take_cpu_down_param { |
193 | unsigned long mod; | |
194 | void *hcpu; | |
195 | }; | |
196 | ||
1da177e4 | 197 | /* Take this CPU down. */ |
514a20a5 | 198 | static int __ref take_cpu_down(void *_param) |
1da177e4 | 199 | { |
db912f96 | 200 | struct take_cpu_down_param *param = _param; |
1da177e4 LT |
201 | int err; |
202 | ||
1da177e4 LT |
203 | /* Ensure this CPU doesn't handle any more interrupts. */ |
204 | err = __cpu_disable(); | |
205 | if (err < 0) | |
f3705136 | 206 | return err; |
1da177e4 | 207 | |
e9fb7631 | 208 | cpu_notify(CPU_DYING | param->mod, param->hcpu); |
f3705136 | 209 | return 0; |
1da177e4 LT |
210 | } |
211 | ||
e3920fb4 | 212 | /* Requires cpu_add_remove_lock to be held */ |
514a20a5 | 213 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
1da177e4 | 214 | { |
e7407dcc | 215 | int err, nr_calls = 0; |
e7407dcc | 216 | void *hcpu = (void *)(long)cpu; |
8bb78442 | 217 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
db912f96 AK |
218 | struct take_cpu_down_param tcd_param = { |
219 | .mod = mod, | |
220 | .hcpu = hcpu, | |
221 | }; | |
1da177e4 | 222 | |
e3920fb4 RW |
223 | if (num_online_cpus() == 1) |
224 | return -EBUSY; | |
1da177e4 | 225 | |
e3920fb4 RW |
226 | if (!cpu_online(cpu)) |
227 | return -EINVAL; | |
1da177e4 | 228 | |
d221938c | 229 | cpu_hotplug_begin(); |
4d51985e | 230 | |
e9fb7631 | 231 | err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b | 232 | if (err) { |
a0d8cdb6 | 233 | nr_calls--; |
e9fb7631 | 234 | __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); |
1da177e4 | 235 | printk("%s: attempt to take down CPU %u failed\n", |
af1f16d0 | 236 | __func__, cpu); |
baaca49f | 237 | goto out_release; |
1da177e4 LT |
238 | } |
239 | ||
e0b582ec | 240 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
04321587 | 241 | if (err) { |
1da177e4 | 242 | /* CPU didn't die: tell everyone. Can't complain. */ |
e9fb7631 | 243 | cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); |
1da177e4 | 244 | |
6a1bdc1b | 245 | goto out_release; |
8fa1d7d3 | 246 | } |
04321587 | 247 | BUG_ON(cpu_online(cpu)); |
1da177e4 | 248 | |
48c5ccae PZ |
249 | /* |
250 | * The migration_call() CPU_DYING callback will have removed all | |
251 | * runnable tasks from the cpu, there's only the idle task left now | |
252 | * that the migration thread is done doing the stop_machine thing. | |
51a96c77 PZ |
253 | * |
254 | * Wait for the stop thread to go away. | |
48c5ccae | 255 | */ |
51a96c77 PZ |
256 | while (!idle_cpu(cpu)) |
257 | cpu_relax(); | |
1da177e4 LT |
258 | |
259 | /* This actually kills the CPU. */ | |
260 | __cpu_die(cpu); | |
261 | ||
1da177e4 | 262 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
e9fb7631 | 263 | cpu_notify_nofail(CPU_DEAD | mod, hcpu); |
1da177e4 LT |
264 | |
265 | check_for_tasks(cpu); | |
266 | ||
baaca49f | 267 | out_release: |
d221938c | 268 | cpu_hotplug_done(); |
e9fb7631 AM |
269 | if (!err) |
270 | cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); | |
e3920fb4 RW |
271 | return err; |
272 | } | |
273 | ||
514a20a5 | 274 | int __ref cpu_down(unsigned int cpu) |
e3920fb4 | 275 | { |
9ea09af3 | 276 | int err; |
e3920fb4 | 277 | |
d221938c | 278 | cpu_maps_update_begin(); |
e761b772 MK |
279 | |
280 | if (cpu_hotplug_disabled) { | |
e3920fb4 | 281 | err = -EBUSY; |
e761b772 MK |
282 | goto out; |
283 | } | |
284 | ||
e761b772 | 285 | err = _cpu_down(cpu, 0); |
e3920fb4 | 286 | |
e761b772 | 287 | out: |
d221938c | 288 | cpu_maps_update_done(); |
1da177e4 LT |
289 | return err; |
290 | } | |
b62b8ef9 | 291 | EXPORT_SYMBOL(cpu_down); |
1da177e4 LT |
292 | #endif /*CONFIG_HOTPLUG_CPU*/ |
293 | ||
e3920fb4 | 294 | /* Requires cpu_add_remove_lock to be held */ |
8bb78442 | 295 | static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) |
1da177e4 | 296 | { |
baaca49f | 297 | int ret, nr_calls = 0; |
1da177e4 | 298 | void *hcpu = (void *)(long)cpu; |
8bb78442 | 299 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
3bb5d2ee | 300 | struct task_struct *idle; |
1da177e4 | 301 | |
e3920fb4 RW |
302 | if (cpu_online(cpu) || !cpu_present(cpu)) |
303 | return -EINVAL; | |
90d45d17 | 304 | |
d221938c | 305 | cpu_hotplug_begin(); |
38498a67 | 306 | |
3bb5d2ee SS |
307 | idle = idle_thread_get(cpu); |
308 | if (IS_ERR(idle)) { | |
309 | ret = PTR_ERR(idle); | |
38498a67 | 310 | goto out; |
3bb5d2ee | 311 | } |
38498a67 | 312 | |
e9fb7631 | 313 | ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b | 314 | if (ret) { |
a0d8cdb6 | 315 | nr_calls--; |
4d51985e | 316 | printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", |
af1f16d0 | 317 | __func__, cpu); |
1da177e4 LT |
318 | goto out_notify; |
319 | } | |
320 | ||
321 | /* Arch-specific enabling code. */ | |
3bb5d2ee | 322 | ret = __cpu_up(cpu, idle); |
1da177e4 LT |
323 | if (ret != 0) |
324 | goto out_notify; | |
6978c705 | 325 | BUG_ON(!cpu_online(cpu)); |
1da177e4 LT |
326 | |
327 | /* Now call notifier in preparation. */ | |
e9fb7631 | 328 | cpu_notify(CPU_ONLINE | mod, hcpu); |
1da177e4 LT |
329 | |
330 | out_notify: | |
331 | if (ret != 0) | |
e9fb7631 | 332 | __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
38498a67 | 333 | out: |
d221938c | 334 | cpu_hotplug_done(); |
e3920fb4 RW |
335 | |
336 | return ret; | |
337 | } | |
338 | ||
b282b6f8 | 339 | int __cpuinit cpu_up(unsigned int cpu) |
e3920fb4 RW |
340 | { |
341 | int err = 0; | |
cf23422b | 342 | |
343 | #ifdef CONFIG_MEMORY_HOTPLUG | |
344 | int nid; | |
345 | pg_data_t *pgdat; | |
346 | #endif | |
347 | ||
e0b582ec | 348 | if (!cpu_possible(cpu)) { |
73e753a5 KH |
349 | printk(KERN_ERR "can't online cpu %d because it is not " |
350 | "configured as may-hotadd at boot time\n", cpu); | |
87d5e023 | 351 | #if defined(CONFIG_IA64) |
73e753a5 KH |
352 | printk(KERN_ERR "please check additional_cpus= boot " |
353 | "parameter\n"); | |
354 | #endif | |
355 | return -EINVAL; | |
356 | } | |
e3920fb4 | 357 | |
cf23422b | 358 | #ifdef CONFIG_MEMORY_HOTPLUG |
359 | nid = cpu_to_node(cpu); | |
360 | if (!node_online(nid)) { | |
361 | err = mem_online_node(nid); | |
362 | if (err) | |
363 | return err; | |
364 | } | |
365 | ||
366 | pgdat = NODE_DATA(nid); | |
367 | if (!pgdat) { | |
368 | printk(KERN_ERR | |
369 | "Can't online cpu %d due to NULL pgdat\n", cpu); | |
370 | return -ENOMEM; | |
371 | } | |
372 | ||
4eaf3f64 HL |
373 | if (pgdat->node_zonelists->_zonerefs->zone == NULL) { |
374 | mutex_lock(&zonelists_mutex); | |
1f522509 | 375 | build_all_zonelists(NULL); |
4eaf3f64 HL |
376 | mutex_unlock(&zonelists_mutex); |
377 | } | |
cf23422b | 378 | #endif |
379 | ||
d221938c | 380 | cpu_maps_update_begin(); |
e761b772 MK |
381 | |
382 | if (cpu_hotplug_disabled) { | |
e3920fb4 | 383 | err = -EBUSY; |
e761b772 MK |
384 | goto out; |
385 | } | |
386 | ||
387 | err = _cpu_up(cpu, 0); | |
388 | ||
e761b772 | 389 | out: |
d221938c | 390 | cpu_maps_update_done(); |
e3920fb4 RW |
391 | return err; |
392 | } | |
a513f6ba | 393 | EXPORT_SYMBOL_GPL(cpu_up); |
e3920fb4 | 394 | |
f3de4be9 | 395 | #ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec | 396 | static cpumask_var_t frozen_cpus; |
e3920fb4 | 397 | |
3fb82d56 SS |
398 | void __weak arch_disable_nonboot_cpus_begin(void) |
399 | { | |
400 | } | |
401 | ||
402 | void __weak arch_disable_nonboot_cpus_end(void) | |
403 | { | |
404 | } | |
405 | ||
e3920fb4 RW |
406 | int disable_nonboot_cpus(void) |
407 | { | |
e9a5f426 | 408 | int cpu, first_cpu, error = 0; |
e3920fb4 | 409 | |
d221938c | 410 | cpu_maps_update_begin(); |
e0b582ec | 411 | first_cpu = cpumask_first(cpu_online_mask); |
9ee349ad XF |
412 | /* |
413 | * We take down all of the non-boot CPUs in one shot to avoid races | |
e3920fb4 RW |
414 | * with the userspace trying to use the CPU hotplug at the same time |
415 | */ | |
e0b582ec | 416 | cpumask_clear(frozen_cpus); |
3fb82d56 | 417 | arch_disable_nonboot_cpus_begin(); |
6ad4c188 | 418 | |
e3920fb4 RW |
419 | printk("Disabling non-boot CPUs ...\n"); |
420 | for_each_online_cpu(cpu) { | |
421 | if (cpu == first_cpu) | |
422 | continue; | |
8bb78442 | 423 | error = _cpu_down(cpu, 1); |
feae3203 | 424 | if (!error) |
e0b582ec | 425 | cpumask_set_cpu(cpu, frozen_cpus); |
feae3203 | 426 | else { |
e3920fb4 RW |
427 | printk(KERN_ERR "Error taking CPU%d down: %d\n", |
428 | cpu, error); | |
429 | break; | |
430 | } | |
431 | } | |
86886e55 | 432 | |
3fb82d56 SS |
433 | arch_disable_nonboot_cpus_end(); |
434 | ||
e3920fb4 RW |
435 | if (!error) { |
436 | BUG_ON(num_online_cpus() > 1); | |
437 | /* Make sure the CPUs won't be enabled by someone else */ | |
438 | cpu_hotplug_disabled = 1; | |
439 | } else { | |
e1d9fd2e | 440 | printk(KERN_ERR "Non-boot CPUs are not disabled\n"); |
e3920fb4 | 441 | } |
d221938c | 442 | cpu_maps_update_done(); |
e3920fb4 RW |
443 | return error; |
444 | } | |
445 | ||
d0af9eed SS |
446 | void __weak arch_enable_nonboot_cpus_begin(void) |
447 | { | |
448 | } | |
449 | ||
450 | void __weak arch_enable_nonboot_cpus_end(void) | |
451 | { | |
452 | } | |
453 | ||
fa7303e2 | 454 | void __ref enable_nonboot_cpus(void) |
e3920fb4 RW |
455 | { |
456 | int cpu, error; | |
457 | ||
458 | /* Allow everyone to use the CPU hotplug again */ | |
d221938c | 459 | cpu_maps_update_begin(); |
e3920fb4 | 460 | cpu_hotplug_disabled = 0; |
e0b582ec | 461 | if (cpumask_empty(frozen_cpus)) |
1d64b9cb | 462 | goto out; |
e3920fb4 | 463 | |
4d51985e | 464 | printk(KERN_INFO "Enabling non-boot CPUs ...\n"); |
d0af9eed SS |
465 | |
466 | arch_enable_nonboot_cpus_begin(); | |
467 | ||
e0b582ec | 468 | for_each_cpu(cpu, frozen_cpus) { |
8bb78442 | 469 | error = _cpu_up(cpu, 1); |
e3920fb4 | 470 | if (!error) { |
4d51985e | 471 | printk(KERN_INFO "CPU%d is up\n", cpu); |
e3920fb4 RW |
472 | continue; |
473 | } | |
1d64b9cb | 474 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
e3920fb4 | 475 | } |
d0af9eed SS |
476 | |
477 | arch_enable_nonboot_cpus_end(); | |
478 | ||
e0b582ec | 479 | cpumask_clear(frozen_cpus); |
1d64b9cb | 480 | out: |
d221938c | 481 | cpu_maps_update_done(); |
1da177e4 | 482 | } |
e0b582ec | 483 | |
d7268a31 | 484 | static int __init alloc_frozen_cpus(void) |
e0b582ec RR |
485 | { |
486 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | |
487 | return -ENOMEM; | |
488 | return 0; | |
489 | } | |
490 | core_initcall(alloc_frozen_cpus); | |
79cfbdfa SB |
491 | |
492 | /* | |
493 | * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU | |
494 | * hotplug when tasks are about to be frozen. Also, don't allow the freezer | |
495 | * to continue until any currently running CPU hotplug operation gets | |
496 | * completed. | |
497 | * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the | |
498 | * 'cpu_add_remove_lock'. And this same lock is also taken by the regular | |
499 | * CPU hotplug path and released only after it is complete. Thus, we | |
500 | * (and hence the freezer) will block here until any currently running CPU | |
501 | * hotplug operation gets completed. | |
502 | */ | |
503 | void cpu_hotplug_disable_before_freeze(void) | |
504 | { | |
505 | cpu_maps_update_begin(); | |
506 | cpu_hotplug_disabled = 1; | |
507 | cpu_maps_update_done(); | |
508 | } | |
509 | ||
510 | ||
511 | /* | |
512 | * When tasks have been thawed, re-enable regular CPU hotplug (which had been | |
513 | * disabled while beginning to freeze tasks). | |
514 | */ | |
515 | void cpu_hotplug_enable_after_thaw(void) | |
516 | { | |
517 | cpu_maps_update_begin(); | |
518 | cpu_hotplug_disabled = 0; | |
519 | cpu_maps_update_done(); | |
520 | } | |
521 | ||
522 | /* | |
523 | * When callbacks for CPU hotplug notifications are being executed, we must | |
524 | * ensure that the state of the system with respect to the tasks being frozen | |
525 | * or not, as reported by the notification, remains unchanged *throughout the | |
526 | * duration* of the execution of the callbacks. | |
527 | * Hence we need to prevent the freezer from racing with regular CPU hotplug. | |
528 | * | |
529 | * This synchronization is implemented by mutually excluding regular CPU | |
530 | * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ | |
531 | * Hibernate notifications. | |
532 | */ | |
533 | static int | |
534 | cpu_hotplug_pm_callback(struct notifier_block *nb, | |
535 | unsigned long action, void *ptr) | |
536 | { | |
537 | switch (action) { | |
538 | ||
539 | case PM_SUSPEND_PREPARE: | |
540 | case PM_HIBERNATION_PREPARE: | |
541 | cpu_hotplug_disable_before_freeze(); | |
542 | break; | |
543 | ||
544 | case PM_POST_SUSPEND: | |
545 | case PM_POST_HIBERNATION: | |
546 | cpu_hotplug_enable_after_thaw(); | |
547 | break; | |
548 | ||
549 | default: | |
550 | return NOTIFY_DONE; | |
551 | } | |
552 | ||
553 | return NOTIFY_OK; | |
554 | } | |
555 | ||
556 | ||
d7268a31 | 557 | static int __init cpu_hotplug_pm_sync_init(void) |
79cfbdfa SB |
558 | { |
559 | pm_notifier(cpu_hotplug_pm_callback, 0); | |
560 | return 0; | |
561 | } | |
562 | core_initcall(cpu_hotplug_pm_sync_init); | |
563 | ||
f3de4be9 | 564 | #endif /* CONFIG_PM_SLEEP_SMP */ |
68f4f1ec | 565 | |
e545a614 MS |
566 | /** |
567 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | |
568 | * @cpu: cpu that just started | |
569 | * | |
570 | * This function calls the cpu_chain notifiers with CPU_STARTING. | |
571 | * It must be called by the arch code on the new cpu, before the new cpu | |
572 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | |
573 | */ | |
84196414 | 574 | void __cpuinit notify_cpu_starting(unsigned int cpu) |
e545a614 MS |
575 | { |
576 | unsigned long val = CPU_STARTING; | |
577 | ||
578 | #ifdef CONFIG_PM_SLEEP_SMP | |
e0b582ec | 579 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
e545a614 MS |
580 | val = CPU_STARTING_FROZEN; |
581 | #endif /* CONFIG_PM_SLEEP_SMP */ | |
e9fb7631 | 582 | cpu_notify(val, (void *)(long)cpu); |
e545a614 MS |
583 | } |
584 | ||
68f4f1ec | 585 | #endif /* CONFIG_SMP */ |
b8d317d1 | 586 | |
e56b3bc7 LT |
587 | /* |
588 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | |
589 | * represents all NR_CPUS bits binary values of 1<<nr. | |
590 | * | |
e0b582ec | 591 | * It is used by cpumask_of() to get a constant address to a CPU |
e56b3bc7 LT |
592 | * mask value that has a single bit set only. |
593 | */ | |
b8d317d1 | 594 | |
e56b3bc7 | 595 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ |
4d51985e | 596 | #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) |
e56b3bc7 LT |
597 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) |
598 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) | |
599 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) | |
b8d317d1 | 600 | |
e56b3bc7 LT |
601 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { |
602 | ||
603 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), | |
604 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), | |
605 | #if BITS_PER_LONG > 32 | |
606 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), | |
607 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), | |
b8d317d1 MT |
608 | #endif |
609 | }; | |
e56b3bc7 | 610 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
2d3854a3 RR |
611 | |
612 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | |
613 | EXPORT_SYMBOL(cpu_all_bits); | |
b3199c02 RR |
614 | |
615 | #ifdef CONFIG_INIT_ALL_POSSIBLE | |
616 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly | |
617 | = CPU_BITS_ALL; | |
618 | #else | |
619 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; | |
620 | #endif | |
621 | const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); | |
622 | EXPORT_SYMBOL(cpu_possible_mask); | |
623 | ||
624 | static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; | |
625 | const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); | |
626 | EXPORT_SYMBOL(cpu_online_mask); | |
627 | ||
628 | static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; | |
629 | const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); | |
630 | EXPORT_SYMBOL(cpu_present_mask); | |
631 | ||
632 | static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; | |
633 | const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); | |
634 | EXPORT_SYMBOL(cpu_active_mask); | |
3fa41520 RR |
635 | |
636 | void set_cpu_possible(unsigned int cpu, bool possible) | |
637 | { | |
638 | if (possible) | |
639 | cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); | |
640 | else | |
641 | cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); | |
642 | } | |
643 | ||
644 | void set_cpu_present(unsigned int cpu, bool present) | |
645 | { | |
646 | if (present) | |
647 | cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); | |
648 | else | |
649 | cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); | |
650 | } | |
651 | ||
652 | void set_cpu_online(unsigned int cpu, bool online) | |
653 | { | |
654 | if (online) | |
655 | cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); | |
656 | else | |
657 | cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); | |
658 | } | |
659 | ||
660 | void set_cpu_active(unsigned int cpu, bool active) | |
661 | { | |
662 | if (active) | |
663 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | |
664 | else | |
665 | cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); | |
666 | } | |
667 | ||
668 | void init_cpu_present(const struct cpumask *src) | |
669 | { | |
670 | cpumask_copy(to_cpumask(cpu_present_bits), src); | |
671 | } | |
672 | ||
673 | void init_cpu_possible(const struct cpumask *src) | |
674 | { | |
675 | cpumask_copy(to_cpumask(cpu_possible_bits), src); | |
676 | } | |
677 | ||
678 | void init_cpu_online(const struct cpumask *src) | |
679 | { | |
680 | cpumask_copy(to_cpumask(cpu_online_bits), src); | |
681 | } |