]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/kthread.c
UBUNTU: Start new release
[mirror_ubuntu-artful-kernel.git] / kernel / kthread.c
CommitLineData
1da177e4
LT
1/* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
73c27992 4 * Creation is done via kthreadd, so that we get a clean environment
1da177e4
LT
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
58568d2a 12#include <linux/cpuset.h>
1da177e4
LT
13#include <linux/unistd.h>
14#include <linux/file.h>
9984de1a 15#include <linux/export.h>
97d1f15b 16#include <linux/mutex.h>
b56c0d89
TH
17#include <linux/slab.h>
18#include <linux/freezer.h>
a74fb73c 19#include <linux/ptrace.h>
cd42d559 20#include <linux/uaccess.h>
c98a800b 21#include <linux/cgroup.h>
ad8d75ff 22#include <trace/events/sched.h>
1da177e4 23
73c27992
EB
24static DEFINE_SPINLOCK(kthread_create_lock);
25static LIST_HEAD(kthread_create_list);
26struct task_struct *kthreadd_task;
1da177e4
LT
27
28struct kthread_create_info
29{
73c27992 30 /* Information passed to kthread() from kthreadd. */
1da177e4
LT
31 int (*threadfn)(void *data);
32 void *data;
207205a2 33 int node;
1da177e4 34
73c27992 35 /* Result passed back to kthread_create() from kthreadd. */
1da177e4 36 struct task_struct *result;
786235ee 37 struct completion *done;
65f27f38 38
73c27992 39 struct list_head list;
1da177e4
LT
40};
41
63706172 42struct kthread {
2a1d4460
TG
43 unsigned long flags;
44 unsigned int cpu;
82805ab7 45 void *data;
2a1d4460 46 struct completion parked;
63706172 47 struct completion exited;
1da177e4
LT
48};
49
2a1d4460
TG
50enum KTHREAD_BITS {
51 KTHREAD_IS_PER_CPU = 0,
52 KTHREAD_SHOULD_STOP,
53 KTHREAD_SHOULD_PARK,
54 KTHREAD_IS_PARKED,
55};
56
4ecdafc8
ON
57#define __to_kthread(vfork) \
58 container_of(vfork, struct kthread, exited)
59
60static inline struct kthread *to_kthread(struct task_struct *k)
61{
62 return __to_kthread(k->vfork_done);
63}
64
65static struct kthread *to_live_kthread(struct task_struct *k)
66{
67 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
68 if (likely(vfork))
69 return __to_kthread(vfork);
70 return NULL;
71}
1da177e4 72
9e37bd30
RD
73/**
74 * kthread_should_stop - should this kthread return now?
75 *
72fd4a35 76 * When someone calls kthread_stop() on your kthread, it will be woken
9e37bd30
RD
77 * and this will return true. You should then return, and your return
78 * value will be passed through to kthread_stop().
79 */
2a1d4460 80bool kthread_should_stop(void)
1da177e4 81{
2a1d4460 82 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
1da177e4
LT
83}
84EXPORT_SYMBOL(kthread_should_stop);
85
2a1d4460
TG
86/**
87 * kthread_should_park - should this kthread park now?
88 *
89 * When someone calls kthread_park() on your kthread, it will be woken
90 * and this will return true. You should then do the necessary
91 * cleanup and call kthread_parkme()
92 *
93 * Similar to kthread_should_stop(), but this keeps the thread alive
94 * and in a park position. kthread_unpark() "restarts" the thread and
95 * calls the thread function again.
96 */
97bool kthread_should_park(void)
98{
99 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
100}
18896451 101EXPORT_SYMBOL_GPL(kthread_should_park);
2a1d4460 102
8a32c441
TH
103/**
104 * kthread_freezable_should_stop - should this freezable kthread return now?
105 * @was_frozen: optional out parameter, indicates whether %current was frozen
106 *
107 * kthread_should_stop() for freezable kthreads, which will enter
108 * refrigerator if necessary. This function is safe from kthread_stop() /
109 * freezer deadlock and freezable kthreads should use this function instead
110 * of calling try_to_freeze() directly.
111 */
112bool kthread_freezable_should_stop(bool *was_frozen)
113{
114 bool frozen = false;
115
116 might_sleep();
117
118 if (unlikely(freezing(current)))
119 frozen = __refrigerator(true);
120
121 if (was_frozen)
122 *was_frozen = frozen;
123
124 return kthread_should_stop();
125}
126EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
127
82805ab7
TH
128/**
129 * kthread_data - return data value specified on kthread creation
130 * @task: kthread task in question
131 *
132 * Return the data value specified when kthread @task was created.
133 * The caller is responsible for ensuring the validity of @task when
134 * calling this function.
135 */
136void *kthread_data(struct task_struct *task)
137{
138 return to_kthread(task)->data;
139}
140
cd42d559
TH
141/**
142 * probe_kthread_data - speculative version of kthread_data()
143 * @task: possible kthread task in question
144 *
145 * @task could be a kthread task. Return the data value specified when it
146 * was created if accessible. If @task isn't a kthread task or its data is
147 * inaccessible for any reason, %NULL is returned. This function requires
148 * that @task itself is safe to dereference.
149 */
150void *probe_kthread_data(struct task_struct *task)
151{
152 struct kthread *kthread = to_kthread(task);
153 void *data = NULL;
154
155 probe_kernel_read(&data, &kthread->data, sizeof(data));
156 return data;
157}
158
2a1d4460
TG
159static void __kthread_parkme(struct kthread *self)
160{
f2530dc7 161 __set_current_state(TASK_PARKED);
2a1d4460
TG
162 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
163 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
164 complete(&self->parked);
165 schedule();
f2530dc7 166 __set_current_state(TASK_PARKED);
2a1d4460
TG
167 }
168 clear_bit(KTHREAD_IS_PARKED, &self->flags);
169 __set_current_state(TASK_RUNNING);
170}
171
172void kthread_parkme(void)
173{
174 __kthread_parkme(to_kthread(current));
175}
18896451 176EXPORT_SYMBOL_GPL(kthread_parkme);
2a1d4460 177
1da177e4
LT
178static int kthread(void *_create)
179{
63706172 180 /* Copy data: it's on kthread's stack */
1da177e4 181 struct kthread_create_info *create = _create;
63706172
ON
182 int (*threadfn)(void *data) = create->threadfn;
183 void *data = create->data;
786235ee 184 struct completion *done;
63706172
ON
185 struct kthread self;
186 int ret;
1da177e4 187
2a1d4460 188 self.flags = 0;
82805ab7 189 self.data = data;
63706172 190 init_completion(&self.exited);
2a1d4460 191 init_completion(&self.parked);
63706172 192 current->vfork_done = &self.exited;
1da177e4 193
786235ee
TH
194 /* If user was SIGKILLed, I release the structure. */
195 done = xchg(&create->done, NULL);
196 if (!done) {
197 kfree(create);
198 do_exit(-EINTR);
199 }
1da177e4 200 /* OK, tell user we're spawned, wait for stop or wakeup */
a076e4bc 201 __set_current_state(TASK_UNINTERRUPTIBLE);
3217ab97 202 create->result = current;
786235ee 203 complete(done);
1da177e4
LT
204 schedule();
205
63706172 206 ret = -EINTR;
1da177e4 207
2a1d4460 208 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
c98a800b 209 cgroup_kthread_ready();
2a1d4460
TG
210 __kthread_parkme(&self);
211 ret = threadfn(data);
212 }
63706172
ON
213 /* we can't just return, we must preserve "self" on stack */
214 do_exit(ret);
1da177e4
LT
215}
216
207205a2
ED
217/* called from do_fork() to get node information for about to be created task */
218int tsk_fork_get_node(struct task_struct *tsk)
219{
220#ifdef CONFIG_NUMA
221 if (tsk == kthreadd_task)
222 return tsk->pref_node_fork;
223#endif
81c98869 224 return NUMA_NO_NODE;
207205a2
ED
225}
226
73c27992 227static void create_kthread(struct kthread_create_info *create)
1da177e4 228{
1da177e4
LT
229 int pid;
230
207205a2
ED
231#ifdef CONFIG_NUMA
232 current->pref_node_fork = create->node;
233#endif
1da177e4
LT
234 /* We want our own signal handler (we take no signals by default). */
235 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
cdd140bd 236 if (pid < 0) {
786235ee
TH
237 /* If user was SIGKILLed, I release the structure. */
238 struct completion *done = xchg(&create->done, NULL);
239
240 if (!done) {
241 kfree(create);
242 return;
243 }
1da177e4 244 create->result = ERR_PTR(pid);
786235ee 245 complete(done);
cdd140bd 246 }
1da177e4
LT
247}
248
9e37bd30 249/**
207205a2 250 * kthread_create_on_node - create a kthread.
9e37bd30
RD
251 * @threadfn: the function to run until signal_pending(current).
252 * @data: data ptr for @threadfn.
e9f06986 253 * @node: task and thread structures for the thread are allocated on this node
9e37bd30
RD
254 * @namefmt: printf-style name for the thread.
255 *
256 * Description: This helper function creates and names a kernel
257 * thread. The thread will be stopped: use wake_up_process() to start
e9f06986
AM
258 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
259 * is affine to all CPUs.
9e37bd30 260 *
207205a2 261 * If thread is going to be bound on a particular cpu, give its node
e9f06986 262 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
9e37bd30 263 * When woken, the thread will run @threadfn() with @data as its
72fd4a35 264 * argument. @threadfn() can either call do_exit() directly if it is a
25985edc 265 * standalone thread for which no one will call kthread_stop(), or
9e37bd30
RD
266 * return when 'kthread_should_stop()' is true (which means
267 * kthread_stop() has been called). The return value should be zero
268 * or a negative error number; it will be passed to kthread_stop().
269 *
8fe6929c 270 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
9e37bd30 271 */
207205a2 272struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
2a1d4460 273 void *data, int node,
207205a2
ED
274 const char namefmt[],
275 ...)
1da177e4 276{
786235ee
TH
277 DECLARE_COMPLETION_ONSTACK(done);
278 struct task_struct *task;
279 struct kthread_create_info *create = kmalloc(sizeof(*create),
280 GFP_KERNEL);
281
282 if (!create)
283 return ERR_PTR(-ENOMEM);
284 create->threadfn = threadfn;
285 create->data = data;
286 create->node = node;
287 create->done = &done;
73c27992
EB
288
289 spin_lock(&kthread_create_lock);
786235ee 290 list_add_tail(&create->list, &kthread_create_list);
73c27992
EB
291 spin_unlock(&kthread_create_lock);
292
cbd9b67b 293 wake_up_process(kthreadd_task);
786235ee
TH
294 /*
295 * Wait for completion in killable state, for I might be chosen by
296 * the OOM killer while kthreadd is trying to allocate memory for
297 * new kernel thread.
298 */
299 if (unlikely(wait_for_completion_killable(&done))) {
81769990
TH
300 int i = 0;
301
302 /*
303 * I got SIGKILL, but wait for 10 more seconds for completion
304 * unless chosen by the OOM killer. This delay is there as a
305 * workaround for boot failure caused by SIGKILL upon device
306 * driver initialization timeout.
307 */
308 while (i++ < 10 && !test_tsk_thread_flag(current, TIF_MEMDIE))
309 if (wait_for_completion_timeout(&done, HZ))
310 goto ready;
786235ee
TH
311 /*
312 * If I was SIGKILLed before kthreadd (or new kernel thread)
313 * calls complete(), leave the cleanup of this structure to
314 * that thread.
315 */
316 if (xchg(&create->done, NULL))
8fe6929c 317 return ERR_PTR(-EINTR);
786235ee
TH
318 /*
319 * kthreadd (or new kernel thread) will call complete()
320 * shortly.
321 */
322 wait_for_completion(&done);
323 }
81769990 324ready:
786235ee
TH
325 task = create->result;
326 if (!IS_ERR(task)) {
c9b5f501 327 static const struct sched_param param = { .sched_priority = 0 };
1da177e4 328 va_list args;
1c99315b 329
1da177e4 330 va_start(args, namefmt);
786235ee 331 vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
1da177e4 332 va_end(args);
1c99315b
ON
333 /*
334 * root may have changed our (kthreadd's) priority or CPU mask.
335 * The kernel thread should not inherit these properties.
336 */
786235ee
TH
337 sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
338 set_cpus_allowed_ptr(task, cpu_all_mask);
1da177e4 339 }
786235ee
TH
340 kfree(create);
341 return task;
1da177e4 342}
207205a2 343EXPORT_SYMBOL(kthread_create_on_node);
1da177e4 344
25834c73 345static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
2a1d4460 346{
25834c73
PZ
347 unsigned long flags;
348
f2530dc7
TG
349 if (!wait_task_inactive(p, state)) {
350 WARN_ON(1);
351 return;
352 }
25834c73 353
2a1d4460 354 /* It's safe because the task is inactive. */
25834c73
PZ
355 raw_spin_lock_irqsave(&p->pi_lock, flags);
356 do_set_cpus_allowed(p, mask);
14a40ffc 357 p->flags |= PF_NO_SETAFFINITY;
25834c73
PZ
358 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
359}
360
361static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
362{
363 __kthread_bind_mask(p, cpumask_of(cpu), state);
364}
365
366void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
367{
368 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
2a1d4460
TG
369}
370
881232b7
PZ
371/**
372 * kthread_bind - bind a just-created kthread to a cpu.
373 * @p: thread created by kthread_create().
374 * @cpu: cpu (might not be online, must be possible) for @k to run on.
375 *
376 * Description: This function is equivalent to set_cpus_allowed(),
377 * except that @cpu doesn't need to be online, and the thread must be
378 * stopped (i.e., just returned from kthread_create()).
379 */
380void kthread_bind(struct task_struct *p, unsigned int cpu)
381{
f2530dc7 382 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
881232b7
PZ
383}
384EXPORT_SYMBOL(kthread_bind);
385
2a1d4460
TG
386/**
387 * kthread_create_on_cpu - Create a cpu bound kthread
388 * @threadfn: the function to run until signal_pending(current).
389 * @data: data ptr for @threadfn.
390 * @cpu: The cpu on which the thread should be bound,
391 * @namefmt: printf-style name for the thread. Format is restricted
392 * to "name.*%u". Code fills in cpu number.
393 *
394 * Description: This helper function creates and names a kernel thread
395 * The thread will be woken and put into park mode.
396 */
397struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
398 void *data, unsigned int cpu,
399 const char *namefmt)
400{
401 struct task_struct *p;
402
10922838 403 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
2a1d4460
TG
404 cpu);
405 if (IS_ERR(p))
406 return p;
407 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
408 to_kthread(p)->cpu = cpu;
409 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
410 kthread_park(p);
411 return p;
412}
413
f2530dc7
TG
414static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
415{
416 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
417 /*
418 * We clear the IS_PARKED bit here as we don't wait
419 * until the task has left the park code. So if we'd
420 * park before that happens we'd see the IS_PARKED bit
421 * which might be about to be cleared.
422 */
423 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
424 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
425 __kthread_bind(k, kthread->cpu, TASK_PARKED);
426 wake_up_state(k, TASK_PARKED);
427 }
428}
429
2a1d4460
TG
430/**
431 * kthread_unpark - unpark a thread created by kthread_create().
432 * @k: thread created by kthread_create().
433 *
434 * Sets kthread_should_park() for @k to return false, wakes it, and
435 * waits for it to return. If the thread is marked percpu then its
436 * bound to the cpu again.
437 */
438void kthread_unpark(struct task_struct *k)
439{
b5c5442b 440 struct kthread *kthread = to_live_kthread(k);
2a1d4460 441
f2530dc7
TG
442 if (kthread)
443 __kthread_unpark(k, kthread);
2a1d4460 444}
18896451 445EXPORT_SYMBOL_GPL(kthread_unpark);
2a1d4460
TG
446
447/**
448 * kthread_park - park a thread created by kthread_create().
449 * @k: thread created by kthread_create().
450 *
451 * Sets kthread_should_park() for @k to return true, wakes it, and
452 * waits for it to return. This can also be called after kthread_create()
453 * instead of calling wake_up_process(): the thread will park without
454 * calling threadfn().
455 *
456 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
457 * If called by the kthread itself just the park bit is set.
458 */
459int kthread_park(struct task_struct *k)
460{
b5c5442b 461 struct kthread *kthread = to_live_kthread(k);
2a1d4460
TG
462 int ret = -ENOSYS;
463
464 if (kthread) {
465 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
466 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
467 if (k != current) {
468 wake_up_process(k);
469 wait_for_completion(&kthread->parked);
470 }
471 }
472 ret = 0;
473 }
2a1d4460
TG
474 return ret;
475}
18896451 476EXPORT_SYMBOL_GPL(kthread_park);
2a1d4460 477
9e37bd30
RD
478/**
479 * kthread_stop - stop a thread created by kthread_create().
480 * @k: thread created by kthread_create().
481 *
482 * Sets kthread_should_stop() for @k to return true, wakes it, and
9ae26027
ON
483 * waits for it to exit. This can also be called after kthread_create()
484 * instead of calling wake_up_process(): the thread will exit without
485 * calling threadfn().
486 *
487 * If threadfn() may call do_exit() itself, the caller must ensure
488 * task_struct can't go away.
9e37bd30
RD
489 *
490 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
491 * was never called.
492 */
1da177e4
LT
493int kthread_stop(struct task_struct *k)
494{
b5c5442b 495 struct kthread *kthread;
1da177e4
LT
496 int ret;
497
0a16b607 498 trace_sched_kthread_stop(k);
b5c5442b
ON
499
500 get_task_struct(k);
501 kthread = to_live_kthread(k);
2a1d4460
TG
502 if (kthread) {
503 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
f2530dc7 504 __kthread_unpark(k, kthread);
63706172
ON
505 wake_up_process(k);
506 wait_for_completion(&kthread->exited);
507 }
508 ret = k->exit_code;
1da177e4 509 put_task_struct(k);
0a16b607 510
b5c5442b 511 trace_sched_kthread_stop_ret(ret);
1da177e4
LT
512 return ret;
513}
52e92e57 514EXPORT_SYMBOL(kthread_stop);
1da177e4 515
e804a4a4 516int kthreadd(void *unused)
1da177e4 517{
73c27992 518 struct task_struct *tsk = current;
1da177e4 519
e804a4a4 520 /* Setup a clean context for our children to inherit. */
73c27992 521 set_task_comm(tsk, "kthreadd");
10ab825b 522 ignore_signals(tsk);
1a2142af 523 set_cpus_allowed_ptr(tsk, cpu_all_mask);
aee4faa4 524 set_mems_allowed(node_states[N_MEMORY]);
73c27992 525
34b087e4 526 current->flags |= PF_NOFREEZE;
c98a800b 527 cgroup_init_kthreadd();
73c27992
EB
528
529 for (;;) {
530 set_current_state(TASK_INTERRUPTIBLE);
531 if (list_empty(&kthread_create_list))
532 schedule();
533 __set_current_state(TASK_RUNNING);
534
535 spin_lock(&kthread_create_lock);
536 while (!list_empty(&kthread_create_list)) {
537 struct kthread_create_info *create;
538
539 create = list_entry(kthread_create_list.next,
540 struct kthread_create_info, list);
541 list_del_init(&create->list);
542 spin_unlock(&kthread_create_lock);
543
544 create_kthread(create);
545
546 spin_lock(&kthread_create_lock);
547 }
548 spin_unlock(&kthread_create_lock);
549 }
550
551 return 0;
552}
b56c0d89 553
4f32e9b1
YZ
554void __init_kthread_worker(struct kthread_worker *worker,
555 const char *name,
556 struct lock_class_key *key)
557{
558 spin_lock_init(&worker->lock);
559 lockdep_set_class_and_name(&worker->lock, key, name);
560 INIT_LIST_HEAD(&worker->work_list);
561 worker->task = NULL;
562}
563EXPORT_SYMBOL_GPL(__init_kthread_worker);
564
b56c0d89
TH
565/**
566 * kthread_worker_fn - kthread function to process kthread_worker
567 * @worker_ptr: pointer to initialized kthread_worker
568 *
569 * This function can be used as @threadfn to kthread_create() or
570 * kthread_run() with @worker_ptr argument pointing to an initialized
571 * kthread_worker. The started kthread will process work_list until
572 * the it is stopped with kthread_stop(). A kthread can also call
573 * this function directly after extra initialization.
574 *
575 * Different kthreads can be used for the same kthread_worker as long
576 * as there's only one kthread attached to it at any given time. A
577 * kthread_worker without an attached kthread simply collects queued
578 * kthread_works.
579 */
580int kthread_worker_fn(void *worker_ptr)
581{
582 struct kthread_worker *worker = worker_ptr;
583 struct kthread_work *work;
584
585 WARN_ON(worker->task);
586 worker->task = current;
587repeat:
588 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
589
590 if (kthread_should_stop()) {
591 __set_current_state(TASK_RUNNING);
592 spin_lock_irq(&worker->lock);
593 worker->task = NULL;
594 spin_unlock_irq(&worker->lock);
595 return 0;
596 }
597
598 work = NULL;
599 spin_lock_irq(&worker->lock);
600 if (!list_empty(&worker->work_list)) {
601 work = list_first_entry(&worker->work_list,
602 struct kthread_work, node);
603 list_del_init(&work->node);
604 }
46f3d976 605 worker->current_work = work;
b56c0d89
TH
606 spin_unlock_irq(&worker->lock);
607
608 if (work) {
609 __set_current_state(TASK_RUNNING);
610 work->func(work);
b56c0d89
TH
611 } else if (!freezing(current))
612 schedule();
613
614 try_to_freeze();
615 goto repeat;
616}
617EXPORT_SYMBOL_GPL(kthread_worker_fn);
618
9a2e03d8
TH
619/* insert @work before @pos in @worker */
620static void insert_kthread_work(struct kthread_worker *worker,
621 struct kthread_work *work,
622 struct list_head *pos)
623{
624 lockdep_assert_held(&worker->lock);
625
626 list_add_tail(&work->node, pos);
46f3d976 627 work->worker = worker;
ed1403ec 628 if (!worker->current_work && likely(worker->task))
9a2e03d8
TH
629 wake_up_process(worker->task);
630}
631
b56c0d89
TH
632/**
633 * queue_kthread_work - queue a kthread_work
634 * @worker: target kthread_worker
635 * @work: kthread_work to queue
636 *
637 * Queue @work to work processor @task for async execution. @task
638 * must have been created with kthread_worker_create(). Returns %true
639 * if @work was successfully queued, %false if it was already pending.
640 */
641bool queue_kthread_work(struct kthread_worker *worker,
642 struct kthread_work *work)
643{
644 bool ret = false;
645 unsigned long flags;
646
647 spin_lock_irqsave(&worker->lock, flags);
648 if (list_empty(&work->node)) {
9a2e03d8 649 insert_kthread_work(worker, work, &worker->work_list);
b56c0d89
TH
650 ret = true;
651 }
652 spin_unlock_irqrestore(&worker->lock, flags);
653 return ret;
654}
655EXPORT_SYMBOL_GPL(queue_kthread_work);
656
9a2e03d8
TH
657struct kthread_flush_work {
658 struct kthread_work work;
659 struct completion done;
660};
661
662static void kthread_flush_work_fn(struct kthread_work *work)
663{
664 struct kthread_flush_work *fwork =
665 container_of(work, struct kthread_flush_work, work);
666 complete(&fwork->done);
667}
668
b56c0d89
TH
669/**
670 * flush_kthread_work - flush a kthread_work
671 * @work: work to flush
672 *
673 * If @work is queued or executing, wait for it to finish execution.
674 */
675void flush_kthread_work(struct kthread_work *work)
676{
46f3d976
TH
677 struct kthread_flush_work fwork = {
678 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
679 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
680 };
681 struct kthread_worker *worker;
682 bool noop = false;
683
684retry:
685 worker = work->worker;
686 if (!worker)
687 return;
b56c0d89 688
46f3d976
TH
689 spin_lock_irq(&worker->lock);
690 if (work->worker != worker) {
691 spin_unlock_irq(&worker->lock);
692 goto retry;
693 }
b56c0d89 694
46f3d976
TH
695 if (!list_empty(&work->node))
696 insert_kthread_work(worker, &fwork.work, work->node.next);
697 else if (worker->current_work == work)
698 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
699 else
700 noop = true;
b56c0d89 701
46f3d976 702 spin_unlock_irq(&worker->lock);
b56c0d89 703
46f3d976
TH
704 if (!noop)
705 wait_for_completion(&fwork.done);
b56c0d89
TH
706}
707EXPORT_SYMBOL_GPL(flush_kthread_work);
708
b56c0d89
TH
709/**
710 * flush_kthread_worker - flush all current works on a kthread_worker
711 * @worker: worker to flush
712 *
713 * Wait until all currently executing or pending works on @worker are
714 * finished.
715 */
716void flush_kthread_worker(struct kthread_worker *worker)
717{
718 struct kthread_flush_work fwork = {
719 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
720 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
721 };
722
723 queue_kthread_work(worker, &fwork.work);
724 wait_for_completion(&fwork.done);
725}
726EXPORT_SYMBOL_GPL(flush_kthread_worker);