]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/workqueue.c
workqueue: fix freezeable workqueues implementation
[mirror_ubuntu-zesty-kernel.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679
CL
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
1da177e4
LT
35
36/*
f756d5e2
NL
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
1da177e4
LT
39 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
1da177e4
LT
44 struct list_head worklist;
45 wait_queue_head_t more_work;
1da177e4
LT
46
47 struct workqueue_struct *wq;
36c8b586 48 struct task_struct *thread;
b89deed3 49 struct work_struct *current_work;
1da177e4
LT
50
51 int run_depth; /* Detect run_workqueue() recursion depth */
52} ____cacheline_aligned;
53
54/*
55 * The externally visible workqueue abstraction is an array of
56 * per-CPU workqueues:
57 */
58struct workqueue_struct {
89ada679 59 struct cpu_workqueue_struct *cpu_wq;
1da177e4
LT
60 const char *name;
61 struct list_head list; /* Empty if single thread */
319c2a98 62 int freezeable; /* Freeze threads during suspend */
1da177e4
LT
63};
64
65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */
9b41ea72 67static DEFINE_MUTEX(workqueue_mutex);
1da177e4
LT
68static LIST_HEAD(workqueues);
69
f756d5e2
NL
70static int singlethread_cpu;
71
1da177e4
LT
72/* If it's single threaded, it isn't in the list of workqueues. */
73static inline int is_single_threaded(struct workqueue_struct *wq)
74{
75 return list_empty(&wq->list);
76}
77
4594bf15
DH
78/*
79 * Set the workqueue on which a work item is to be run
80 * - Must *only* be called if the pending flag is set
81 */
365970a1
DH
82static inline void set_wq_data(struct work_struct *work, void *wq)
83{
4594bf15
DH
84 unsigned long new;
85
86 BUG_ON(!work_pending(work));
365970a1 87
365970a1 88 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
89 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
90 atomic_long_set(&work->data, new);
365970a1
DH
91}
92
93static inline void *get_wq_data(struct work_struct *work)
94{
a08727ba 95 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
96}
97
68380b58
LT
98static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
99{
100 int ret = 0;
101 unsigned long flags;
102
103 spin_lock_irqsave(&cwq->lock, flags);
104 /*
105 * We need to re-validate the work info after we've gotten
106 * the cpu_workqueue lock. We can run the work now iff:
107 *
108 * - the wq_data still matches the cpu_workqueue_struct
109 * - AND the work is still marked pending
110 * - AND the work is still on a list (which will be this
111 * workqueue_struct list)
112 *
113 * All these conditions are important, because we
114 * need to protect against the work being run right
115 * now on another CPU (all but the last one might be
116 * true if it's currently running and has not been
117 * released yet, for example).
118 */
119 if (get_wq_data(work) == cwq
120 && work_pending(work)
121 && !list_empty(&work->entry)) {
122 work_func_t f = work->func;
b89deed3 123 cwq->current_work = work;
68380b58
LT
124 list_del_init(&work->entry);
125 spin_unlock_irqrestore(&cwq->lock, flags);
126
a08727ba 127 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
68380b58
LT
128 work_release(work);
129 f(work);
130
131 spin_lock_irqsave(&cwq->lock, flags);
b89deed3 132 cwq->current_work = NULL;
68380b58
LT
133 ret = 1;
134 }
135 spin_unlock_irqrestore(&cwq->lock, flags);
136 return ret;
137}
138
139/**
140 * run_scheduled_work - run scheduled work synchronously
141 * @work: work to run
142 *
143 * This checks if the work was pending, and runs it
144 * synchronously if so. It returns a boolean to indicate
145 * whether it had any scheduled work to run or not.
146 *
147 * NOTE! This _only_ works for normal work_structs. You
148 * CANNOT use this for delayed work, because the wq data
149 * for delayed work will not point properly to the per-
150 * CPU workqueue struct, but will change!
151 */
152int fastcall run_scheduled_work(struct work_struct *work)
153{
154 for (;;) {
155 struct cpu_workqueue_struct *cwq;
156
157 if (!work_pending(work))
158 return 0;
159 if (list_empty(&work->entry))
160 return 0;
161 /* NOTE! This depends intimately on __queue_work! */
162 cwq = get_wq_data(work);
163 if (!cwq)
164 return 0;
165 if (__run_work(cwq, work))
166 return 1;
167 }
168}
169EXPORT_SYMBOL(run_scheduled_work);
170
b89deed3
ON
171static void insert_work(struct cpu_workqueue_struct *cwq,
172 struct work_struct *work, int tail)
173{
174 set_wq_data(work, cwq);
175 if (tail)
176 list_add_tail(&work->entry, &cwq->worklist);
177 else
178 list_add(&work->entry, &cwq->worklist);
179 wake_up(&cwq->more_work);
180}
181
1da177e4
LT
182/* Preempt must be disabled. */
183static void __queue_work(struct cpu_workqueue_struct *cwq,
184 struct work_struct *work)
185{
186 unsigned long flags;
187
188 spin_lock_irqsave(&cwq->lock, flags);
b89deed3 189 insert_work(cwq, work, 1);
1da177e4
LT
190 spin_unlock_irqrestore(&cwq->lock, flags);
191}
192
0fcb78c2
REB
193/**
194 * queue_work - queue work on a workqueue
195 * @wq: workqueue to use
196 * @work: work to queue
197 *
057647fc 198 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4
LT
199 *
200 * We queue the work to the CPU it was submitted, but there is no
201 * guarantee that it will be processed by that CPU.
202 */
203int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
204{
205 int ret = 0, cpu = get_cpu();
206
a08727ba 207 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4 208 if (unlikely(is_single_threaded(wq)))
f756d5e2 209 cpu = singlethread_cpu;
1da177e4 210 BUG_ON(!list_empty(&work->entry));
89ada679 211 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
1da177e4
LT
212 ret = 1;
213 }
214 put_cpu();
215 return ret;
216}
ae90dd5d 217EXPORT_SYMBOL_GPL(queue_work);
1da177e4 218
82f67cd9 219void delayed_work_timer_fn(unsigned long __data)
1da177e4 220{
52bad64d 221 struct delayed_work *dwork = (struct delayed_work *)__data;
365970a1 222 struct workqueue_struct *wq = get_wq_data(&dwork->work);
1da177e4
LT
223 int cpu = smp_processor_id();
224
225 if (unlikely(is_single_threaded(wq)))
f756d5e2 226 cpu = singlethread_cpu;
1da177e4 227
52bad64d 228 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
1da177e4
LT
229}
230
0fcb78c2
REB
231/**
232 * queue_delayed_work - queue work on a workqueue after delay
233 * @wq: workqueue to use
af9997e4 234 * @dwork: delayable work to queue
0fcb78c2
REB
235 * @delay: number of jiffies to wait before queueing
236 *
057647fc 237 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 238 */
1da177e4 239int fastcall queue_delayed_work(struct workqueue_struct *wq,
52bad64d 240 struct delayed_work *dwork, unsigned long delay)
1da177e4
LT
241{
242 int ret = 0;
52bad64d
DH
243 struct timer_list *timer = &dwork->timer;
244 struct work_struct *work = &dwork->work;
245
82f67cd9 246 timer_stats_timer_set_start_info(timer);
52bad64d
DH
247 if (delay == 0)
248 return queue_work(wq, work);
1da177e4 249
a08727ba 250 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4
LT
251 BUG_ON(timer_pending(timer));
252 BUG_ON(!list_empty(&work->entry));
253
254 /* This stores wq for the moment, for the timer_fn */
365970a1 255 set_wq_data(work, wq);
1da177e4 256 timer->expires = jiffies + delay;
52bad64d 257 timer->data = (unsigned long)dwork;
1da177e4
LT
258 timer->function = delayed_work_timer_fn;
259 add_timer(timer);
260 ret = 1;
261 }
262 return ret;
263}
ae90dd5d 264EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 265
0fcb78c2
REB
266/**
267 * queue_delayed_work_on - queue work on specific CPU after delay
268 * @cpu: CPU number to execute work on
269 * @wq: workqueue to use
af9997e4 270 * @dwork: work to queue
0fcb78c2
REB
271 * @delay: number of jiffies to wait before queueing
272 *
057647fc 273 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 274 */
7a6bc1cd 275int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 276 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
277{
278 int ret = 0;
52bad64d
DH
279 struct timer_list *timer = &dwork->timer;
280 struct work_struct *work = &dwork->work;
7a6bc1cd 281
a08727ba 282 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
283 BUG_ON(timer_pending(timer));
284 BUG_ON(!list_empty(&work->entry));
285
286 /* This stores wq for the moment, for the timer_fn */
365970a1 287 set_wq_data(work, wq);
7a6bc1cd 288 timer->expires = jiffies + delay;
52bad64d 289 timer->data = (unsigned long)dwork;
7a6bc1cd
VP
290 timer->function = delayed_work_timer_fn;
291 add_timer_on(timer, cpu);
292 ret = 1;
293 }
294 return ret;
295}
ae90dd5d 296EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 297
858119e1 298static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4
LT
299{
300 unsigned long flags;
301
302 /*
303 * Keep taking off work from the queue until
304 * done.
305 */
306 spin_lock_irqsave(&cwq->lock, flags);
307 cwq->run_depth++;
308 if (cwq->run_depth > 3) {
309 /* morton gets to eat his hat */
310 printk("%s: recursion depth exceeded: %d\n",
311 __FUNCTION__, cwq->run_depth);
312 dump_stack();
313 }
314 while (!list_empty(&cwq->worklist)) {
315 struct work_struct *work = list_entry(cwq->worklist.next,
316 struct work_struct, entry);
6bb49e59 317 work_func_t f = work->func;
1da177e4 318
b89deed3 319 cwq->current_work = work;
1da177e4
LT
320 list_del_init(cwq->worklist.next);
321 spin_unlock_irqrestore(&cwq->lock, flags);
322
365970a1 323 BUG_ON(get_wq_data(work) != cwq);
a08727ba 324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
65f27f38
DH
325 work_release(work);
326 f(work);
1da177e4 327
d5abe669
PZ
328 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
329 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
330 "%s/0x%08x/%d\n",
331 current->comm, preempt_count(),
332 current->pid);
333 printk(KERN_ERR " last function: ");
334 print_symbol("%s\n", (unsigned long)f);
335 debug_show_held_locks(current);
336 dump_stack();
337 }
338
1da177e4 339 spin_lock_irqsave(&cwq->lock, flags);
b89deed3 340 cwq->current_work = NULL;
1da177e4
LT
341 }
342 cwq->run_depth--;
343 spin_unlock_irqrestore(&cwq->lock, flags);
344}
345
346static int worker_thread(void *__cwq)
347{
348 struct cpu_workqueue_struct *cwq = __cwq;
349 DECLARE_WAITQUEUE(wait, current);
350 struct k_sigaction sa;
351 sigset_t blocked;
352
319c2a98 353 if (!cwq->wq->freezeable)
341a5958 354 current->flags |= PF_NOFREEZE;
1da177e4
LT
355
356 set_user_nice(current, -5);
357
358 /* Block and flush all signals */
359 sigfillset(&blocked);
360 sigprocmask(SIG_BLOCK, &blocked, NULL);
361 flush_signals(current);
362
46934023
CL
363 /*
364 * We inherited MPOL_INTERLEAVE from the booting kernel.
365 * Set MPOL_DEFAULT to insure node local allocations.
366 */
367 numa_default_policy();
368
1da177e4
LT
369 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
370 sa.sa.sa_handler = SIG_IGN;
371 sa.sa.sa_flags = 0;
372 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
373 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
374
375 set_current_state(TASK_INTERRUPTIBLE);
376 while (!kthread_should_stop()) {
319c2a98 377 if (cwq->wq->freezeable)
341a5958
RW
378 try_to_freeze();
379
1da177e4
LT
380 add_wait_queue(&cwq->more_work, &wait);
381 if (list_empty(&cwq->worklist))
382 schedule();
383 else
384 __set_current_state(TASK_RUNNING);
385 remove_wait_queue(&cwq->more_work, &wait);
386
387 if (!list_empty(&cwq->worklist))
388 run_workqueue(cwq);
389 set_current_state(TASK_INTERRUPTIBLE);
390 }
391 __set_current_state(TASK_RUNNING);
392 return 0;
393}
394
fc2e4d70
ON
395struct wq_barrier {
396 struct work_struct work;
397 struct completion done;
398};
399
400static void wq_barrier_func(struct work_struct *work)
401{
402 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
403 complete(&barr->done);
404}
405
83c22520
ON
406static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
407 struct wq_barrier *barr, int tail)
fc2e4d70
ON
408{
409 INIT_WORK(&barr->work, wq_barrier_func);
410 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
411
412 init_completion(&barr->done);
83c22520
ON
413
414 insert_work(cwq, &barr->work, tail);
fc2e4d70
ON
415}
416
1da177e4
LT
417static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
418{
419 if (cwq->thread == current) {
420 /*
421 * Probably keventd trying to flush its own queue. So simply run
422 * it by hand rather than deadlocking.
423 */
edab2516
AM
424 preempt_enable();
425 /*
426 * We can still touch *cwq here because we are keventd, and
427 * hot-unplug will be waiting us to exit.
428 */
1da177e4 429 run_workqueue(cwq);
edab2516 430 preempt_disable();
1da177e4 431 } else {
fc2e4d70 432 struct wq_barrier barr;
83c22520 433 int active = 0;
1da177e4 434
83c22520
ON
435 spin_lock_irq(&cwq->lock);
436 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
437 insert_wq_barrier(cwq, &barr, 1);
438 active = 1;
439 }
440 spin_unlock_irq(&cwq->lock);
1da177e4 441
83c22520
ON
442 if (active) {
443 preempt_enable();
444 wait_for_completion(&barr.done);
445 preempt_disable();
446 }
1da177e4
LT
447 }
448}
449
0fcb78c2 450/**
1da177e4 451 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 452 * @wq: workqueue to flush
1da177e4
LT
453 *
454 * Forces execution of the workqueue and blocks until its completion.
455 * This is typically used in driver shutdown handlers.
456 *
fc2e4d70
ON
457 * We sleep until all works which were queued on entry have been handled,
458 * but we are not livelocked by new incoming ones.
1da177e4
LT
459 *
460 * This function used to run the workqueues itself. Now we just wait for the
461 * helper threads to do it.
462 */
463void fastcall flush_workqueue(struct workqueue_struct *wq)
464{
edab2516 465 preempt_disable(); /* CPU hotplug */
1da177e4 466 if (is_single_threaded(wq)) {
bce61dd4 467 /* Always use first cpu's area. */
f756d5e2 468 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
1da177e4
LT
469 } else {
470 int cpu;
471
1da177e4 472 for_each_online_cpu(cpu)
89ada679 473 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 474 }
edab2516 475 preempt_enable();
1da177e4 476}
ae90dd5d 477EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 478
b89deed3
ON
479static void wait_on_work(struct cpu_workqueue_struct *cwq,
480 struct work_struct *work)
481{
482 struct wq_barrier barr;
483 int running = 0;
484
485 spin_lock_irq(&cwq->lock);
486 if (unlikely(cwq->current_work == work)) {
83c22520 487 insert_wq_barrier(cwq, &barr, 0);
b89deed3
ON
488 running = 1;
489 }
490 spin_unlock_irq(&cwq->lock);
491
492 if (unlikely(running)) {
493 mutex_unlock(&workqueue_mutex);
494 wait_for_completion(&barr.done);
495 mutex_lock(&workqueue_mutex);
496 }
497}
498
499/**
500 * flush_work - block until a work_struct's callback has terminated
501 * @wq: the workqueue on which the work is queued
502 * @work: the work which is to be flushed
503 *
504 * flush_work() will attempt to cancel the work if it is queued. If the work's
505 * callback appears to be running, flush_work() will block until it has
506 * completed.
507 *
508 * flush_work() is designed to be used when the caller is tearing down data
509 * structures which the callback function operates upon. It is expected that,
510 * prior to calling flush_work(), the caller has arranged for the work to not
511 * be requeued.
512 */
513void flush_work(struct workqueue_struct *wq, struct work_struct *work)
514{
515 struct cpu_workqueue_struct *cwq;
516
517 mutex_lock(&workqueue_mutex);
518 cwq = get_wq_data(work);
519 /* Was it ever queued ? */
520 if (!cwq)
521 goto out;
522
523 /*
524 * This work can't be re-queued, and the lock above protects us
525 * from take_over_work(), no need to re-check that get_wq_data()
526 * is still the same when we take cwq->lock.
527 */
528 spin_lock_irq(&cwq->lock);
529 list_del_init(&work->entry);
530 work_release(work);
531 spin_unlock_irq(&cwq->lock);
532
533 if (is_single_threaded(wq)) {
534 /* Always use first cpu's area. */
535 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
536 } else {
537 int cpu;
538
539 for_each_online_cpu(cpu)
540 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
541 }
542out:
543 mutex_unlock(&workqueue_mutex);
544}
545EXPORT_SYMBOL_GPL(flush_work);
546
1da177e4 547static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
319c2a98 548 int cpu)
1da177e4 549{
89ada679 550 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4
LT
551 struct task_struct *p;
552
553 spin_lock_init(&cwq->lock);
554 cwq->wq = wq;
555 cwq->thread = NULL;
1da177e4
LT
556 INIT_LIST_HEAD(&cwq->worklist);
557 init_waitqueue_head(&cwq->more_work);
1da177e4
LT
558
559 if (is_single_threaded(wq))
560 p = kthread_create(worker_thread, cwq, "%s", wq->name);
561 else
562 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
563 if (IS_ERR(p))
564 return NULL;
565 cwq->thread = p;
566 return p;
567}
568
569struct workqueue_struct *__create_workqueue(const char *name,
341a5958 570 int singlethread, int freezeable)
1da177e4
LT
571{
572 int cpu, destroy = 0;
573 struct workqueue_struct *wq;
574 struct task_struct *p;
575
dd392710 576 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1da177e4
LT
577 if (!wq)
578 return NULL;
1da177e4 579
89ada679 580 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
676121fc
BC
581 if (!wq->cpu_wq) {
582 kfree(wq);
583 return NULL;
584 }
585
1da177e4 586 wq->name = name;
319c2a98
ON
587 wq->freezeable = freezeable;
588
9b41ea72 589 mutex_lock(&workqueue_mutex);
1da177e4
LT
590 if (singlethread) {
591 INIT_LIST_HEAD(&wq->list);
319c2a98 592 p = create_workqueue_thread(wq, singlethread_cpu);
1da177e4
LT
593 if (!p)
594 destroy = 1;
595 else
596 wake_up_process(p);
597 } else {
1da177e4 598 list_add(&wq->list, &workqueues);
1da177e4 599 for_each_online_cpu(cpu) {
319c2a98 600 p = create_workqueue_thread(wq, cpu);
1da177e4
LT
601 if (p) {
602 kthread_bind(p, cpu);
603 wake_up_process(p);
604 } else
605 destroy = 1;
606 }
607 }
9b41ea72 608 mutex_unlock(&workqueue_mutex);
1da177e4
LT
609
610 /*
611 * Was there any error during startup? If yes then clean up:
612 */
613 if (destroy) {
614 destroy_workqueue(wq);
615 wq = NULL;
616 }
617 return wq;
618}
ae90dd5d 619EXPORT_SYMBOL_GPL(__create_workqueue);
1da177e4
LT
620
621static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
622{
623 struct cpu_workqueue_struct *cwq;
624 unsigned long flags;
625 struct task_struct *p;
626
89ada679 627 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4
LT
628 spin_lock_irqsave(&cwq->lock, flags);
629 p = cwq->thread;
630 cwq->thread = NULL;
631 spin_unlock_irqrestore(&cwq->lock, flags);
632 if (p)
633 kthread_stop(p);
634}
635
0fcb78c2
REB
636/**
637 * destroy_workqueue - safely terminate a workqueue
638 * @wq: target workqueue
639 *
640 * Safely destroy a workqueue. All work currently pending will be done first.
641 */
1da177e4
LT
642void destroy_workqueue(struct workqueue_struct *wq)
643{
644 int cpu;
645
646 flush_workqueue(wq);
647
648 /* We don't need the distraction of CPUs appearing and vanishing. */
9b41ea72 649 mutex_lock(&workqueue_mutex);
1da177e4 650 if (is_single_threaded(wq))
f756d5e2 651 cleanup_workqueue_thread(wq, singlethread_cpu);
1da177e4
LT
652 else {
653 for_each_online_cpu(cpu)
654 cleanup_workqueue_thread(wq, cpu);
1da177e4 655 list_del(&wq->list);
1da177e4 656 }
9b41ea72 657 mutex_unlock(&workqueue_mutex);
89ada679 658 free_percpu(wq->cpu_wq);
1da177e4
LT
659 kfree(wq);
660}
ae90dd5d 661EXPORT_SYMBOL_GPL(destroy_workqueue);
1da177e4
LT
662
663static struct workqueue_struct *keventd_wq;
664
0fcb78c2
REB
665/**
666 * schedule_work - put work task in global workqueue
667 * @work: job to be done
668 *
669 * This puts a job in the kernel-global workqueue.
670 */
1da177e4
LT
671int fastcall schedule_work(struct work_struct *work)
672{
673 return queue_work(keventd_wq, work);
674}
ae90dd5d 675EXPORT_SYMBOL(schedule_work);
1da177e4 676
0fcb78c2
REB
677/**
678 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
679 * @dwork: job to be done
680 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
681 *
682 * After waiting for a given time this puts a job in the kernel-global
683 * workqueue.
684 */
82f67cd9
IM
685int fastcall schedule_delayed_work(struct delayed_work *dwork,
686 unsigned long delay)
1da177e4 687{
82f67cd9 688 timer_stats_timer_set_start_info(&dwork->timer);
52bad64d 689 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 690}
ae90dd5d 691EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 692
0fcb78c2
REB
693/**
694 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
695 * @cpu: cpu to use
52bad64d 696 * @dwork: job to be done
0fcb78c2
REB
697 * @delay: number of jiffies to wait
698 *
699 * After waiting for a given time this puts a job in the kernel-global
700 * workqueue on the specified CPU.
701 */
1da177e4 702int schedule_delayed_work_on(int cpu,
52bad64d 703 struct delayed_work *dwork, unsigned long delay)
1da177e4 704{
52bad64d 705 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 706}
ae90dd5d 707EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 708
b6136773
AM
709/**
710 * schedule_on_each_cpu - call a function on each online CPU from keventd
711 * @func: the function to call
b6136773
AM
712 *
713 * Returns zero on success.
714 * Returns -ve errno on failure.
715 *
716 * Appears to be racy against CPU hotplug.
717 *
718 * schedule_on_each_cpu() is very slow.
719 */
65f27f38 720int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
721{
722 int cpu;
b6136773 723 struct work_struct *works;
15316ba8 724
b6136773
AM
725 works = alloc_percpu(struct work_struct);
726 if (!works)
15316ba8 727 return -ENOMEM;
b6136773 728
e18f3ffb 729 preempt_disable(); /* CPU hotplug */
15316ba8 730 for_each_online_cpu(cpu) {
9bfb1839
IM
731 struct work_struct *work = per_cpu_ptr(works, cpu);
732
733 INIT_WORK(work, func);
734 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
735 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
15316ba8 736 }
e18f3ffb 737 preempt_enable();
15316ba8 738 flush_workqueue(keventd_wq);
b6136773 739 free_percpu(works);
15316ba8
CL
740 return 0;
741}
742
1da177e4
LT
743void flush_scheduled_work(void)
744{
745 flush_workqueue(keventd_wq);
746}
ae90dd5d 747EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 748
b89deed3
ON
749void flush_work_keventd(struct work_struct *work)
750{
751 flush_work(keventd_wq, work);
752}
753EXPORT_SYMBOL(flush_work_keventd);
754
1da177e4 755/**
72fd4a35 756 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
1da177e4 757 * @wq: the controlling workqueue structure
52bad64d 758 * @dwork: the delayed work struct
1da177e4 759 */
81ddef77 760void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
52bad64d 761 struct delayed_work *dwork)
1da177e4 762{
52bad64d 763 while (!cancel_delayed_work(dwork))
1da177e4
LT
764 flush_workqueue(wq);
765}
81ddef77 766EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
1da177e4
LT
767
768/**
72fd4a35 769 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
52bad64d 770 * @dwork: the delayed work struct
1da177e4 771 */
52bad64d 772void cancel_rearming_delayed_work(struct delayed_work *dwork)
1da177e4 773{
52bad64d 774 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
1da177e4
LT
775}
776EXPORT_SYMBOL(cancel_rearming_delayed_work);
777
1fa44eca
JB
778/**
779 * execute_in_process_context - reliably execute the routine with user context
780 * @fn: the function to execute
1fa44eca
JB
781 * @ew: guaranteed storage for the execute work structure (must
782 * be available when the work executes)
783 *
784 * Executes the function immediately if process context is available,
785 * otherwise schedules the function for delayed execution.
786 *
787 * Returns: 0 - function was executed
788 * 1 - function was scheduled for execution
789 */
65f27f38 790int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
791{
792 if (!in_interrupt()) {
65f27f38 793 fn(&ew->work);
1fa44eca
JB
794 return 0;
795 }
796
65f27f38 797 INIT_WORK(&ew->work, fn);
1fa44eca
JB
798 schedule_work(&ew->work);
799
800 return 1;
801}
802EXPORT_SYMBOL_GPL(execute_in_process_context);
803
1da177e4
LT
804int keventd_up(void)
805{
806 return keventd_wq != NULL;
807}
808
809int current_is_keventd(void)
810{
811 struct cpu_workqueue_struct *cwq;
812 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
813 int ret = 0;
814
815 BUG_ON(!keventd_wq);
816
89ada679 817 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
818 if (current == cwq->thread)
819 ret = 1;
820
821 return ret;
822
823}
824
1da177e4
LT
825/* Take the work from this (downed) CPU. */
826static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
827{
89ada679 828 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
626ab0e6 829 struct list_head list;
1da177e4
LT
830 struct work_struct *work;
831
832 spin_lock_irq(&cwq->lock);
626ab0e6 833 list_replace_init(&cwq->worklist, &list);
1da177e4
LT
834
835 while (!list_empty(&list)) {
836 printk("Taking work for %s\n", wq->name);
837 work = list_entry(list.next,struct work_struct,entry);
838 list_del(&work->entry);
89ada679 839 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
1da177e4
LT
840 }
841 spin_unlock_irq(&cwq->lock);
842}
843
844/* We're holding the cpucontrol mutex here */
9c7b216d 845static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1da177e4
LT
846 unsigned long action,
847 void *hcpu)
848{
849 unsigned int hotcpu = (unsigned long)hcpu;
850 struct workqueue_struct *wq;
851
852 switch (action) {
853 case CPU_UP_PREPARE:
9b41ea72 854 mutex_lock(&workqueue_mutex);
1da177e4
LT
855 /* Create a new workqueue thread for it. */
856 list_for_each_entry(wq, &workqueues, list) {
319c2a98 857 if (!create_workqueue_thread(wq, hotcpu)) {
1da177e4
LT
858 printk("workqueue for %i failed\n", hotcpu);
859 return NOTIFY_BAD;
860 }
861 }
862 break;
863
864 case CPU_ONLINE:
865 /* Kick off worker threads. */
866 list_for_each_entry(wq, &workqueues, list) {
89ada679
CL
867 struct cpu_workqueue_struct *cwq;
868
869 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
870 kthread_bind(cwq->thread, hotcpu);
871 wake_up_process(cwq->thread);
1da177e4 872 }
9b41ea72 873 mutex_unlock(&workqueue_mutex);
1da177e4
LT
874 break;
875
876 case CPU_UP_CANCELED:
877 list_for_each_entry(wq, &workqueues, list) {
fc75cdfa
HC
878 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
879 continue;
1da177e4 880 /* Unbind so it can run. */
89ada679 881 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
a4c4af7c 882 any_online_cpu(cpu_online_map));
1da177e4
LT
883 cleanup_workqueue_thread(wq, hotcpu);
884 }
9b41ea72
AM
885 mutex_unlock(&workqueue_mutex);
886 break;
887
888 case CPU_DOWN_PREPARE:
889 mutex_lock(&workqueue_mutex);
890 break;
891
892 case CPU_DOWN_FAILED:
893 mutex_unlock(&workqueue_mutex);
1da177e4
LT
894 break;
895
896 case CPU_DEAD:
897 list_for_each_entry(wq, &workqueues, list)
898 cleanup_workqueue_thread(wq, hotcpu);
899 list_for_each_entry(wq, &workqueues, list)
900 take_over_work(wq, hotcpu);
9b41ea72 901 mutex_unlock(&workqueue_mutex);
1da177e4
LT
902 break;
903 }
904
905 return NOTIFY_OK;
906}
1da177e4
LT
907
908void init_workqueues(void)
909{
f756d5e2 910 singlethread_cpu = first_cpu(cpu_possible_map);
1da177e4
LT
911 hotcpu_notifier(workqueue_cpu_callback, 0);
912 keventd_wq = create_workqueue("events");
913 BUG_ON(!keventd_wq);
914}
915