]>
Commit | Line | Data |
---|---|---|
38498a67 TG |
1 | /* |
2 | * Common SMP CPU bringup/teardown functions | |
3 | */ | |
f97f8f06 | 4 | #include <linux/cpu.h> |
29d5e047 TG |
5 | #include <linux/err.h> |
6 | #include <linux/smp.h> | |
8038dad7 | 7 | #include <linux/delay.h> |
38498a67 | 8 | #include <linux/init.h> |
f97f8f06 TG |
9 | #include <linux/list.h> |
10 | #include <linux/slab.h> | |
29d5e047 | 11 | #include <linux/sched.h> |
f97f8f06 | 12 | #include <linux/export.h> |
29d5e047 | 13 | #include <linux/percpu.h> |
f97f8f06 TG |
14 | #include <linux/kthread.h> |
15 | #include <linux/smpboot.h> | |
38498a67 TG |
16 | |
17 | #include "smpboot.h" | |
18 | ||
3180d89b PM |
19 | #ifdef CONFIG_SMP |
20 | ||
29d5e047 | 21 | #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD |
29d5e047 TG |
22 | /* |
23 | * For the hotplug case we keep the task structs around and reuse | |
24 | * them. | |
25 | */ | |
26 | static DEFINE_PER_CPU(struct task_struct *, idle_threads); | |
27 | ||
0db0628d | 28 | struct task_struct *idle_thread_get(unsigned int cpu) |
29d5e047 TG |
29 | { |
30 | struct task_struct *tsk = per_cpu(idle_threads, cpu); | |
31 | ||
32 | if (!tsk) | |
3bb5d2ee | 33 | return ERR_PTR(-ENOMEM); |
29d5e047 TG |
34 | init_idle(tsk, cpu); |
35 | return tsk; | |
36 | } | |
37 | ||
3bb5d2ee | 38 | void __init idle_thread_set_boot_cpu(void) |
29d5e047 | 39 | { |
3bb5d2ee | 40 | per_cpu(idle_threads, smp_processor_id()) = current; |
29d5e047 TG |
41 | } |
42 | ||
4a70d2d9 SB |
43 | /** |
44 | * idle_init - Initialize the idle thread for a cpu | |
45 | * @cpu: The cpu for which the idle thread should be initialized | |
46 | * | |
47 | * Creates the thread if it does not exist. | |
48 | */ | |
3bb5d2ee | 49 | static inline void idle_init(unsigned int cpu) |
29d5e047 | 50 | { |
3bb5d2ee SS |
51 | struct task_struct *tsk = per_cpu(idle_threads, cpu); |
52 | ||
53 | if (!tsk) { | |
54 | tsk = fork_idle(cpu); | |
55 | if (IS_ERR(tsk)) | |
56 | pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); | |
57 | else | |
58 | per_cpu(idle_threads, cpu) = tsk; | |
59 | } | |
29d5e047 TG |
60 | } |
61 | ||
62 | /** | |
4a70d2d9 | 63 | * idle_threads_init - Initialize idle threads for all cpus |
29d5e047 | 64 | */ |
3bb5d2ee | 65 | void __init idle_threads_init(void) |
29d5e047 | 66 | { |
ee74d132 SB |
67 | unsigned int cpu, boot_cpu; |
68 | ||
69 | boot_cpu = smp_processor_id(); | |
29d5e047 | 70 | |
3bb5d2ee | 71 | for_each_possible_cpu(cpu) { |
ee74d132 | 72 | if (cpu != boot_cpu) |
3bb5d2ee | 73 | idle_init(cpu); |
29d5e047 | 74 | } |
29d5e047 | 75 | } |
29d5e047 | 76 | #endif |
f97f8f06 | 77 | |
3180d89b PM |
78 | #endif /* #ifdef CONFIG_SMP */ |
79 | ||
f97f8f06 TG |
80 | static LIST_HEAD(hotplug_threads); |
81 | static DEFINE_MUTEX(smpboot_threads_lock); | |
82 | ||
83 | struct smpboot_thread_data { | |
84 | unsigned int cpu; | |
85 | unsigned int status; | |
86 | struct smp_hotplug_thread *ht; | |
87 | }; | |
88 | ||
89 | enum { | |
90 | HP_THREAD_NONE = 0, | |
91 | HP_THREAD_ACTIVE, | |
92 | HP_THREAD_PARKED, | |
93 | }; | |
94 | ||
95 | /** | |
96 | * smpboot_thread_fn - percpu hotplug thread loop function | |
97 | * @data: thread data pointer | |
98 | * | |
99 | * Checks for thread stop and park conditions. Calls the necessary | |
100 | * setup, cleanup, park and unpark functions for the registered | |
101 | * thread. | |
102 | * | |
103 | * Returns 1 when the thread should exit, 0 otherwise. | |
104 | */ | |
105 | static int smpboot_thread_fn(void *data) | |
106 | { | |
107 | struct smpboot_thread_data *td = data; | |
108 | struct smp_hotplug_thread *ht = td->ht; | |
109 | ||
110 | while (1) { | |
111 | set_current_state(TASK_INTERRUPTIBLE); | |
112 | preempt_disable(); | |
113 | if (kthread_should_stop()) { | |
7d4d2696 | 114 | __set_current_state(TASK_RUNNING); |
f97f8f06 TG |
115 | preempt_enable(); |
116 | if (ht->cleanup) | |
117 | ht->cleanup(td->cpu, cpu_online(td->cpu)); | |
118 | kfree(td); | |
119 | return 0; | |
120 | } | |
121 | ||
122 | if (kthread_should_park()) { | |
123 | __set_current_state(TASK_RUNNING); | |
124 | preempt_enable(); | |
125 | if (ht->park && td->status == HP_THREAD_ACTIVE) { | |
126 | BUG_ON(td->cpu != smp_processor_id()); | |
127 | ht->park(td->cpu); | |
128 | td->status = HP_THREAD_PARKED; | |
129 | } | |
130 | kthread_parkme(); | |
131 | /* We might have been woken for stop */ | |
132 | continue; | |
133 | } | |
134 | ||
dc893e19 | 135 | BUG_ON(td->cpu != smp_processor_id()); |
f97f8f06 TG |
136 | |
137 | /* Check for state change setup */ | |
138 | switch (td->status) { | |
139 | case HP_THREAD_NONE: | |
7d4d2696 | 140 | __set_current_state(TASK_RUNNING); |
f97f8f06 TG |
141 | preempt_enable(); |
142 | if (ht->setup) | |
143 | ht->setup(td->cpu); | |
144 | td->status = HP_THREAD_ACTIVE; | |
7d4d2696 PZ |
145 | continue; |
146 | ||
f97f8f06 | 147 | case HP_THREAD_PARKED: |
7d4d2696 | 148 | __set_current_state(TASK_RUNNING); |
f97f8f06 TG |
149 | preempt_enable(); |
150 | if (ht->unpark) | |
151 | ht->unpark(td->cpu); | |
152 | td->status = HP_THREAD_ACTIVE; | |
7d4d2696 | 153 | continue; |
f97f8f06 TG |
154 | } |
155 | ||
156 | if (!ht->thread_should_run(td->cpu)) { | |
7d4d2696 | 157 | preempt_enable_no_resched(); |
f97f8f06 TG |
158 | schedule(); |
159 | } else { | |
7d4d2696 | 160 | __set_current_state(TASK_RUNNING); |
f97f8f06 TG |
161 | preempt_enable(); |
162 | ht->thread_fn(td->cpu); | |
163 | } | |
164 | } | |
165 | } | |
166 | ||
167 | static int | |
168 | __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | |
169 | { | |
170 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | |
171 | struct smpboot_thread_data *td; | |
172 | ||
173 | if (tsk) | |
174 | return 0; | |
175 | ||
176 | td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu)); | |
177 | if (!td) | |
178 | return -ENOMEM; | |
179 | td->cpu = cpu; | |
180 | td->ht = ht; | |
181 | ||
182 | tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu, | |
183 | ht->thread_comm); | |
184 | if (IS_ERR(tsk)) { | |
185 | kfree(td); | |
186 | return PTR_ERR(tsk); | |
187 | } | |
f97f8f06 TG |
188 | get_task_struct(tsk); |
189 | *per_cpu_ptr(ht->store, cpu) = tsk; | |
f2530dc7 TG |
190 | if (ht->create) { |
191 | /* | |
192 | * Make sure that the task has actually scheduled out | |
193 | * into park position, before calling the create | |
194 | * callback. At least the migration thread callback | |
195 | * requires that the task is off the runqueue. | |
196 | */ | |
197 | if (!wait_task_inactive(tsk, TASK_PARKED)) | |
198 | WARN_ON(1); | |
199 | else | |
200 | ht->create(cpu); | |
201 | } | |
f97f8f06 TG |
202 | return 0; |
203 | } | |
204 | ||
205 | int smpboot_create_threads(unsigned int cpu) | |
206 | { | |
207 | struct smp_hotplug_thread *cur; | |
208 | int ret = 0; | |
209 | ||
210 | mutex_lock(&smpboot_threads_lock); | |
211 | list_for_each_entry(cur, &hotplug_threads, list) { | |
212 | ret = __smpboot_create_thread(cur, cpu); | |
213 | if (ret) | |
214 | break; | |
215 | } | |
216 | mutex_unlock(&smpboot_threads_lock); | |
217 | return ret; | |
218 | } | |
219 | ||
220 | static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | |
221 | { | |
222 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | |
223 | ||
46c498c2 TG |
224 | if (ht->pre_unpark) |
225 | ht->pre_unpark(cpu); | |
f97f8f06 TG |
226 | kthread_unpark(tsk); |
227 | } | |
228 | ||
229 | void smpboot_unpark_threads(unsigned int cpu) | |
230 | { | |
231 | struct smp_hotplug_thread *cur; | |
232 | ||
233 | mutex_lock(&smpboot_threads_lock); | |
234 | list_for_each_entry(cur, &hotplug_threads, list) | |
b5242e98 CM |
235 | if (cpumask_test_cpu(cpu, cur->cpumask)) |
236 | smpboot_unpark_thread(cur, cpu); | |
f97f8f06 TG |
237 | mutex_unlock(&smpboot_threads_lock); |
238 | } | |
239 | ||
240 | static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | |
241 | { | |
242 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | |
243 | ||
7d7e499f | 244 | if (tsk && !ht->selfparking) |
f97f8f06 TG |
245 | kthread_park(tsk); |
246 | } | |
247 | ||
248 | void smpboot_park_threads(unsigned int cpu) | |
249 | { | |
250 | struct smp_hotplug_thread *cur; | |
251 | ||
252 | mutex_lock(&smpboot_threads_lock); | |
253 | list_for_each_entry_reverse(cur, &hotplug_threads, list) | |
254 | smpboot_park_thread(cur, cpu); | |
255 | mutex_unlock(&smpboot_threads_lock); | |
256 | } | |
257 | ||
258 | static void smpboot_destroy_threads(struct smp_hotplug_thread *ht) | |
259 | { | |
260 | unsigned int cpu; | |
261 | ||
b5242e98 CM |
262 | /* Unpark any threads that were voluntarily parked. */ |
263 | for_each_cpu_not(cpu, ht->cpumask) { | |
264 | if (cpu_online(cpu)) { | |
265 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | |
266 | if (tsk) | |
267 | kthread_unpark(tsk); | |
268 | } | |
269 | } | |
270 | ||
f97f8f06 TG |
271 | /* We need to destroy also the parked threads of offline cpus */ |
272 | for_each_possible_cpu(cpu) { | |
273 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | |
274 | ||
275 | if (tsk) { | |
276 | kthread_stop(tsk); | |
277 | put_task_struct(tsk); | |
278 | *per_cpu_ptr(ht->store, cpu) = NULL; | |
279 | } | |
280 | } | |
281 | } | |
282 | ||
283 | /** | |
284 | * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug | |
285 | * @plug_thread: Hotplug thread descriptor | |
286 | * | |
287 | * Creates and starts the threads on all online cpus. | |
288 | */ | |
289 | int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) | |
290 | { | |
291 | unsigned int cpu; | |
292 | int ret = 0; | |
293 | ||
b5242e98 CM |
294 | if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL)) |
295 | return -ENOMEM; | |
296 | cpumask_copy(plug_thread->cpumask, cpu_possible_mask); | |
297 | ||
4bee9686 | 298 | get_online_cpus(); |
f97f8f06 TG |
299 | mutex_lock(&smpboot_threads_lock); |
300 | for_each_online_cpu(cpu) { | |
301 | ret = __smpboot_create_thread(plug_thread, cpu); | |
302 | if (ret) { | |
303 | smpboot_destroy_threads(plug_thread); | |
304 | goto out; | |
305 | } | |
306 | smpboot_unpark_thread(plug_thread, cpu); | |
307 | } | |
308 | list_add(&plug_thread->list, &hotplug_threads); | |
309 | out: | |
310 | mutex_unlock(&smpboot_threads_lock); | |
4bee9686 | 311 | put_online_cpus(); |
f97f8f06 TG |
312 | return ret; |
313 | } | |
314 | EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); | |
315 | ||
316 | /** | |
317 | * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug | |
318 | * @plug_thread: Hotplug thread descriptor | |
319 | * | |
320 | * Stops all threads on all possible cpus. | |
321 | */ | |
322 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) | |
323 | { | |
324 | get_online_cpus(); | |
325 | mutex_lock(&smpboot_threads_lock); | |
326 | list_del(&plug_thread->list); | |
327 | smpboot_destroy_threads(plug_thread); | |
328 | mutex_unlock(&smpboot_threads_lock); | |
329 | put_online_cpus(); | |
b5242e98 | 330 | free_cpumask_var(plug_thread->cpumask); |
f97f8f06 TG |
331 | } |
332 | EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); | |
8038dad7 | 333 | |
b5242e98 CM |
334 | /** |
335 | * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked | |
336 | * @plug_thread: Hotplug thread descriptor | |
337 | * @new: Revised mask to use | |
338 | * | |
339 | * The cpumask field in the smp_hotplug_thread must not be updated directly | |
340 | * by the client, but only by calling this function. | |
fe4ba3c3 | 341 | * This function can only be called on a registered smp_hotplug_thread. |
b5242e98 CM |
342 | */ |
343 | int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, | |
344 | const struct cpumask *new) | |
345 | { | |
346 | struct cpumask *old = plug_thread->cpumask; | |
347 | cpumask_var_t tmp; | |
348 | unsigned int cpu; | |
349 | ||
350 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | |
351 | return -ENOMEM; | |
352 | ||
353 | get_online_cpus(); | |
354 | mutex_lock(&smpboot_threads_lock); | |
355 | ||
356 | /* Park threads that were exclusively enabled on the old mask. */ | |
357 | cpumask_andnot(tmp, old, new); | |
358 | for_each_cpu_and(cpu, tmp, cpu_online_mask) | |
359 | smpboot_park_thread(plug_thread, cpu); | |
360 | ||
361 | /* Unpark threads that are exclusively enabled on the new mask. */ | |
362 | cpumask_andnot(tmp, new, old); | |
363 | for_each_cpu_and(cpu, tmp, cpu_online_mask) | |
364 | smpboot_unpark_thread(plug_thread, cpu); | |
365 | ||
366 | cpumask_copy(old, new); | |
367 | ||
368 | mutex_unlock(&smpboot_threads_lock); | |
369 | put_online_cpus(); | |
370 | ||
371 | free_cpumask_var(tmp); | |
372 | ||
373 | return 0; | |
374 | } | |
375 | EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread); | |
376 | ||
8038dad7 PM |
377 | static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD); |
378 | ||
379 | /* | |
380 | * Called to poll specified CPU's state, for example, when waiting for | |
381 | * a CPU to come online. | |
382 | */ | |
383 | int cpu_report_state(int cpu) | |
384 | { | |
385 | return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); | |
386 | } | |
387 | ||
388 | /* | |
389 | * If CPU has died properly, set its state to CPU_UP_PREPARE and | |
390 | * return success. Otherwise, return -EBUSY if the CPU died after | |
391 | * cpu_wait_death() timed out. And yet otherwise again, return -EAGAIN | |
392 | * if cpu_wait_death() timed out and the CPU still hasn't gotten around | |
393 | * to dying. In the latter two cases, the CPU might not be set up | |
394 | * properly, but it is up to the arch-specific code to decide. | |
395 | * Finally, -EIO indicates an unanticipated problem. | |
396 | * | |
397 | * Note that it is permissible to omit this call entirely, as is | |
398 | * done in architectures that do no CPU-hotplug error checking. | |
399 | */ | |
400 | int cpu_check_up_prepare(int cpu) | |
401 | { | |
402 | if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) { | |
403 | atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); | |
404 | return 0; | |
405 | } | |
406 | ||
407 | switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { | |
408 | ||
409 | case CPU_POST_DEAD: | |
410 | ||
411 | /* The CPU died properly, so just start it up again. */ | |
412 | atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); | |
413 | return 0; | |
414 | ||
415 | case CPU_DEAD_FROZEN: | |
416 | ||
417 | /* | |
418 | * Timeout during CPU death, so let caller know. | |
419 | * The outgoing CPU completed its processing, but after | |
420 | * cpu_wait_death() timed out and reported the error. The | |
421 | * caller is free to proceed, in which case the state | |
422 | * will be reset properly by cpu_set_state_online(). | |
423 | * Proceeding despite this -EBUSY return makes sense | |
424 | * for systems where the outgoing CPUs take themselves | |
425 | * offline, with no post-death manipulation required from | |
426 | * a surviving CPU. | |
427 | */ | |
428 | return -EBUSY; | |
429 | ||
430 | case CPU_BROKEN: | |
431 | ||
432 | /* | |
433 | * The most likely reason we got here is that there was | |
434 | * a timeout during CPU death, and the outgoing CPU never | |
435 | * did complete its processing. This could happen on | |
436 | * a virtualized system if the outgoing VCPU gets preempted | |
437 | * for more than five seconds, and the user attempts to | |
438 | * immediately online that same CPU. Trying again later | |
439 | * might return -EBUSY above, hence -EAGAIN. | |
440 | */ | |
441 | return -EAGAIN; | |
442 | ||
443 | default: | |
444 | ||
445 | /* Should not happen. Famous last words. */ | |
446 | return -EIO; | |
447 | } | |
448 | } | |
449 | ||
450 | /* | |
451 | * Mark the specified CPU online. | |
452 | * | |
453 | * Note that it is permissible to omit this call entirely, as is | |
454 | * done in architectures that do no CPU-hotplug error checking. | |
455 | */ | |
456 | void cpu_set_state_online(int cpu) | |
457 | { | |
458 | (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE); | |
459 | } | |
460 | ||
461 | #ifdef CONFIG_HOTPLUG_CPU | |
462 | ||
463 | /* | |
464 | * Wait for the specified CPU to exit the idle loop and die. | |
465 | */ | |
466 | bool cpu_wait_death(unsigned int cpu, int seconds) | |
467 | { | |
468 | int jf_left = seconds * HZ; | |
469 | int oldstate; | |
470 | bool ret = true; | |
471 | int sleep_jf = 1; | |
472 | ||
473 | might_sleep(); | |
474 | ||
475 | /* The outgoing CPU will normally get done quite quickly. */ | |
476 | if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) | |
477 | goto update_state; | |
478 | udelay(5); | |
479 | ||
480 | /* But if the outgoing CPU dawdles, wait increasingly long times. */ | |
481 | while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) { | |
482 | schedule_timeout_uninterruptible(sleep_jf); | |
483 | jf_left -= sleep_jf; | |
484 | if (jf_left <= 0) | |
485 | break; | |
486 | sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10); | |
487 | } | |
488 | update_state: | |
489 | oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); | |
490 | if (oldstate == CPU_DEAD) { | |
491 | /* Outgoing CPU died normally, update state. */ | |
492 | smp_mb(); /* atomic_read() before update. */ | |
493 | atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD); | |
494 | } else { | |
495 | /* Outgoing CPU still hasn't died, set state accordingly. */ | |
496 | if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), | |
497 | oldstate, CPU_BROKEN) != oldstate) | |
498 | goto update_state; | |
499 | ret = false; | |
500 | } | |
501 | return ret; | |
502 | } | |
503 | ||
504 | /* | |
505 | * Called by the outgoing CPU to report its successful death. Return | |
506 | * false if this report follows the surviving CPU's timing out. | |
507 | * | |
508 | * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU | |
509 | * timed out. This approach allows architectures to omit calls to | |
510 | * cpu_check_up_prepare() and cpu_set_state_online() without defeating | |
511 | * the next cpu_wait_death()'s polling loop. | |
512 | */ | |
513 | bool cpu_report_death(void) | |
514 | { | |
515 | int oldstate; | |
516 | int newstate; | |
517 | int cpu = smp_processor_id(); | |
518 | ||
519 | do { | |
520 | oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); | |
521 | if (oldstate != CPU_BROKEN) | |
522 | newstate = CPU_DEAD; | |
523 | else | |
524 | newstate = CPU_DEAD_FROZEN; | |
525 | } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), | |
526 | oldstate, newstate) != oldstate); | |
527 | return newstate == CPU_DEAD; | |
528 | } | |
529 | ||
530 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |