2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
37 * The per-CPU workqueue (if single thread, we always use the first
40 struct cpu_workqueue_struct
{
44 struct list_head worklist
;
45 wait_queue_head_t more_work
;
46 struct work_struct
*current_work
;
48 struct workqueue_struct
*wq
;
49 struct task_struct
*thread
;
52 int run_depth
; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned
;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct
{
60 struct cpu_workqueue_struct
*cpu_wq
;
61 struct list_head list
;
64 int freezeable
; /* Freeze threads during suspend */
67 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
69 static DEFINE_MUTEX(workqueue_mutex
);
70 static LIST_HEAD(workqueues
);
72 static int singlethread_cpu __read_mostly
;
73 static cpumask_t cpu_singlethread_map __read_mostly
;
74 /* optimization, we could use cpu_possible_map */
75 static cpumask_t cpu_populated_map __read_mostly
;
77 /* If it's single threaded, it isn't in the list of workqueues. */
78 static inline int is_single_threaded(struct workqueue_struct
*wq
)
80 return wq
->singlethread
;
83 static const cpumask_t
*wq_cpu_map(struct workqueue_struct
*wq
)
85 return is_single_threaded(wq
)
86 ? &cpu_singlethread_map
: &cpu_populated_map
;
90 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
92 if (unlikely(is_single_threaded(wq
)))
93 cpu
= singlethread_cpu
;
94 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
98 * Set the workqueue on which a work item is to be run
99 * - Must *only* be called if the pending flag is set
101 static inline void set_wq_data(struct work_struct
*work
,
102 struct cpu_workqueue_struct
*cwq
)
106 BUG_ON(!work_pending(work
));
108 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
109 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
110 atomic_long_set(&work
->data
, new);
114 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
116 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
119 static void insert_work(struct cpu_workqueue_struct
*cwq
,
120 struct work_struct
*work
, int tail
)
122 set_wq_data(work
, cwq
);
124 list_add_tail(&work
->entry
, &cwq
->worklist
);
126 list_add(&work
->entry
, &cwq
->worklist
);
127 wake_up(&cwq
->more_work
);
130 /* Preempt must be disabled. */
131 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
132 struct work_struct
*work
)
136 spin_lock_irqsave(&cwq
->lock
, flags
);
137 insert_work(cwq
, work
, 1);
138 spin_unlock_irqrestore(&cwq
->lock
, flags
);
142 * queue_work - queue work on a workqueue
143 * @wq: workqueue to use
144 * @work: work to queue
146 * Returns 0 if @work was already on a queue, non-zero otherwise.
148 * We queue the work to the CPU it was submitted, but there is no
149 * guarantee that it will be processed by that CPU.
151 int fastcall
queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
155 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
156 BUG_ON(!list_empty(&work
->entry
));
157 __queue_work(wq_per_cpu(wq
, get_cpu()), work
);
163 EXPORT_SYMBOL_GPL(queue_work
);
165 void delayed_work_timer_fn(unsigned long __data
)
167 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
168 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
169 struct workqueue_struct
*wq
= cwq
->wq
;
171 __queue_work(wq_per_cpu(wq
, smp_processor_id()), &dwork
->work
);
175 * queue_delayed_work - queue work on a workqueue after delay
176 * @wq: workqueue to use
177 * @dwork: delayable work to queue
178 * @delay: number of jiffies to wait before queueing
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
182 int fastcall
queue_delayed_work(struct workqueue_struct
*wq
,
183 struct delayed_work
*dwork
, unsigned long delay
)
185 timer_stats_timer_set_start_info(&dwork
->timer
);
187 return queue_work(wq
, &dwork
->work
);
189 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
191 EXPORT_SYMBOL_GPL(queue_delayed_work
);
194 * queue_delayed_work_on - queue work on specific CPU after delay
195 * @cpu: CPU number to execute work on
196 * @wq: workqueue to use
197 * @dwork: work to queue
198 * @delay: number of jiffies to wait before queueing
200 * Returns 0 if @work was already on a queue, non-zero otherwise.
202 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
203 struct delayed_work
*dwork
, unsigned long delay
)
206 struct timer_list
*timer
= &dwork
->timer
;
207 struct work_struct
*work
= &dwork
->work
;
209 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
210 BUG_ON(timer_pending(timer
));
211 BUG_ON(!list_empty(&work
->entry
));
213 /* This stores cwq for the moment, for the timer_fn */
214 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
215 timer
->expires
= jiffies
+ delay
;
216 timer
->data
= (unsigned long)dwork
;
217 timer
->function
= delayed_work_timer_fn
;
219 if (unlikely(cpu
>= 0))
220 add_timer_on(timer
, cpu
);
227 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
229 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
231 spin_lock_irq(&cwq
->lock
);
233 if (cwq
->run_depth
> 3) {
234 /* morton gets to eat his hat */
235 printk("%s: recursion depth exceeded: %d\n",
236 __FUNCTION__
, cwq
->run_depth
);
239 while (!list_empty(&cwq
->worklist
)) {
240 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
241 struct work_struct
, entry
);
242 work_func_t f
= work
->func
;
244 cwq
->current_work
= work
;
245 list_del_init(cwq
->worklist
.next
);
246 spin_unlock_irq(&cwq
->lock
);
248 BUG_ON(get_wq_data(work
) != cwq
);
249 work_clear_pending(work
);
252 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
253 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
255 current
->comm
, preempt_count(),
257 printk(KERN_ERR
" last function: ");
258 print_symbol("%s\n", (unsigned long)f
);
259 debug_show_held_locks(current
);
263 spin_lock_irq(&cwq
->lock
);
264 cwq
->current_work
= NULL
;
267 spin_unlock_irq(&cwq
->lock
);
271 * NOTE: the caller must not touch *cwq if this func returns true
273 static int cwq_should_stop(struct cpu_workqueue_struct
*cwq
)
275 int should_stop
= cwq
->should_stop
;
277 if (unlikely(should_stop
)) {
278 spin_lock_irq(&cwq
->lock
);
279 should_stop
= cwq
->should_stop
&& list_empty(&cwq
->worklist
);
282 spin_unlock_irq(&cwq
->lock
);
288 static int worker_thread(void *__cwq
)
290 struct cpu_workqueue_struct
*cwq
= __cwq
;
293 if (!cwq
->wq
->freezeable
)
294 current
->flags
|= PF_NOFREEZE
;
296 set_user_nice(current
, -5);
299 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
300 if (!freezing(current
) && !cwq
->should_stop
301 && list_empty(&cwq
->worklist
))
303 finish_wait(&cwq
->more_work
, &wait
);
307 if (cwq_should_stop(cwq
))
317 struct work_struct work
;
318 struct completion done
;
321 static void wq_barrier_func(struct work_struct
*work
)
323 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
324 complete(&barr
->done
);
327 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
328 struct wq_barrier
*barr
, int tail
)
330 INIT_WORK(&barr
->work
, wq_barrier_func
);
331 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
333 init_completion(&barr
->done
);
335 insert_work(cwq
, &barr
->work
, tail
);
338 static void flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
340 if (cwq
->thread
== current
) {
342 * Probably keventd trying to flush its own queue. So simply run
343 * it by hand rather than deadlocking.
347 struct wq_barrier barr
;
350 spin_lock_irq(&cwq
->lock
);
351 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
352 insert_wq_barrier(cwq
, &barr
, 1);
355 spin_unlock_irq(&cwq
->lock
);
358 wait_for_completion(&barr
.done
);
363 * flush_workqueue - ensure that any scheduled work has run to completion.
364 * @wq: workqueue to flush
366 * Forces execution of the workqueue and blocks until its completion.
367 * This is typically used in driver shutdown handlers.
369 * We sleep until all works which were queued on entry have been handled,
370 * but we are not livelocked by new incoming ones.
372 * This function used to run the workqueues itself. Now we just wait for the
373 * helper threads to do it.
375 void fastcall
flush_workqueue(struct workqueue_struct
*wq
)
377 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
381 for_each_cpu_mask(cpu
, *cpu_map
)
382 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
384 EXPORT_SYMBOL_GPL(flush_workqueue
);
386 static void wait_on_work(struct cpu_workqueue_struct
*cwq
,
387 struct work_struct
*work
)
389 struct wq_barrier barr
;
392 spin_lock_irq(&cwq
->lock
);
393 if (unlikely(cwq
->current_work
== work
)) {
394 insert_wq_barrier(cwq
, &barr
, 0);
397 spin_unlock_irq(&cwq
->lock
);
399 if (unlikely(running
))
400 wait_for_completion(&barr
.done
);
404 * cancel_work_sync - block until a work_struct's callback has terminated
405 * @work: the work which is to be flushed
407 * cancel_work_sync() will attempt to cancel the work if it is queued. If the
408 * work's callback appears to be running, cancel_work_sync() will block until
411 * cancel_work_sync() is designed to be used when the caller is tearing down
412 * data structures which the callback function operates upon. It is expected
413 * that, prior to calling cancel_work_sync(), the caller has arranged for the
414 * work to not be requeued.
416 void cancel_work_sync(struct work_struct
*work
)
418 struct cpu_workqueue_struct
*cwq
;
419 struct workqueue_struct
*wq
;
420 const cpumask_t
*cpu_map
;
425 cwq
= get_wq_data(work
);
426 /* Was it ever queued ? */
431 * This work can't be re-queued, no need to re-check that
432 * get_wq_data() is still the same when we take cwq->lock.
434 spin_lock_irq(&cwq
->lock
);
435 list_del_init(&work
->entry
);
436 work_clear_pending(work
);
437 spin_unlock_irq(&cwq
->lock
);
440 cpu_map
= wq_cpu_map(wq
);
442 for_each_cpu_mask(cpu
, *cpu_map
)
443 wait_on_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
445 EXPORT_SYMBOL_GPL(cancel_work_sync
);
448 static struct workqueue_struct
*keventd_wq
;
451 * schedule_work - put work task in global workqueue
452 * @work: job to be done
454 * This puts a job in the kernel-global workqueue.
456 int fastcall
schedule_work(struct work_struct
*work
)
458 return queue_work(keventd_wq
, work
);
460 EXPORT_SYMBOL(schedule_work
);
463 * schedule_delayed_work - put work task in global workqueue after delay
464 * @dwork: job to be done
465 * @delay: number of jiffies to wait or 0 for immediate execution
467 * After waiting for a given time this puts a job in the kernel-global
470 int fastcall
schedule_delayed_work(struct delayed_work
*dwork
,
473 timer_stats_timer_set_start_info(&dwork
->timer
);
474 return queue_delayed_work(keventd_wq
, dwork
, delay
);
476 EXPORT_SYMBOL(schedule_delayed_work
);
479 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
481 * @dwork: job to be done
482 * @delay: number of jiffies to wait
484 * After waiting for a given time this puts a job in the kernel-global
485 * workqueue on the specified CPU.
487 int schedule_delayed_work_on(int cpu
,
488 struct delayed_work
*dwork
, unsigned long delay
)
490 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
492 EXPORT_SYMBOL(schedule_delayed_work_on
);
495 * schedule_on_each_cpu - call a function on each online CPU from keventd
496 * @func: the function to call
498 * Returns zero on success.
499 * Returns -ve errno on failure.
501 * Appears to be racy against CPU hotplug.
503 * schedule_on_each_cpu() is very slow.
505 int schedule_on_each_cpu(work_func_t func
)
508 struct work_struct
*works
;
510 works
= alloc_percpu(struct work_struct
);
514 preempt_disable(); /* CPU hotplug */
515 for_each_online_cpu(cpu
) {
516 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
518 INIT_WORK(work
, func
);
519 set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
));
520 __queue_work(per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
), work
);
523 flush_workqueue(keventd_wq
);
528 void flush_scheduled_work(void)
530 flush_workqueue(keventd_wq
);
532 EXPORT_SYMBOL(flush_scheduled_work
);
535 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
536 * @dwork: the delayed work struct
538 * Note that the work callback function may still be running on return from
539 * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait
542 void cancel_rearming_delayed_work(struct delayed_work
*dwork
)
544 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
546 /* Was it ever queued ? */
548 struct workqueue_struct
*wq
= cwq
->wq
;
550 while (!cancel_delayed_work(dwork
))
554 EXPORT_SYMBOL(cancel_rearming_delayed_work
);
557 * execute_in_process_context - reliably execute the routine with user context
558 * @fn: the function to execute
559 * @ew: guaranteed storage for the execute work structure (must
560 * be available when the work executes)
562 * Executes the function immediately if process context is available,
563 * otherwise schedules the function for delayed execution.
565 * Returns: 0 - function was executed
566 * 1 - function was scheduled for execution
568 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
570 if (!in_interrupt()) {
575 INIT_WORK(&ew
->work
, fn
);
576 schedule_work(&ew
->work
);
580 EXPORT_SYMBOL_GPL(execute_in_process_context
);
584 return keventd_wq
!= NULL
;
587 int current_is_keventd(void)
589 struct cpu_workqueue_struct
*cwq
;
590 int cpu
= smp_processor_id(); /* preempt-safe: keventd is per-cpu */
595 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
596 if (current
== cwq
->thread
)
603 static struct cpu_workqueue_struct
*
604 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
606 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
609 spin_lock_init(&cwq
->lock
);
610 INIT_LIST_HEAD(&cwq
->worklist
);
611 init_waitqueue_head(&cwq
->more_work
);
616 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
618 struct workqueue_struct
*wq
= cwq
->wq
;
619 const char *fmt
= is_single_threaded(wq
) ? "%s" : "%s/%d";
620 struct task_struct
*p
;
622 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
624 * Nobody can add the work_struct to this cwq,
625 * if (caller is __create_workqueue)
626 * nobody should see this wq
627 * else // caller is CPU_UP_PREPARE
628 * cpu is not on cpu_online_map
629 * so we can abort safely.
635 cwq
->should_stop
= 0;
640 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
642 struct task_struct
*p
= cwq
->thread
;
646 kthread_bind(p
, cpu
);
651 struct workqueue_struct
*__create_workqueue(const char *name
,
652 int singlethread
, int freezeable
)
654 struct workqueue_struct
*wq
;
655 struct cpu_workqueue_struct
*cwq
;
658 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
662 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
669 wq
->singlethread
= singlethread
;
670 wq
->freezeable
= freezeable
;
671 INIT_LIST_HEAD(&wq
->list
);
674 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
675 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
676 start_workqueue_thread(cwq
, -1);
678 mutex_lock(&workqueue_mutex
);
679 list_add(&wq
->list
, &workqueues
);
681 for_each_possible_cpu(cpu
) {
682 cwq
= init_cpu_workqueue(wq
, cpu
);
683 if (err
|| !cpu_online(cpu
))
685 err
= create_workqueue_thread(cwq
, cpu
);
686 start_workqueue_thread(cwq
, cpu
);
688 mutex_unlock(&workqueue_mutex
);
692 destroy_workqueue(wq
);
697 EXPORT_SYMBOL_GPL(__create_workqueue
);
699 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
701 struct wq_barrier barr
;
704 spin_lock_irq(&cwq
->lock
);
705 if (cwq
->thread
!= NULL
) {
706 insert_wq_barrier(cwq
, &barr
, 1);
707 cwq
->should_stop
= 1;
710 spin_unlock_irq(&cwq
->lock
);
713 wait_for_completion(&barr
.done
);
715 while (unlikely(cwq
->thread
!= NULL
))
718 * Wait until cwq->thread unlocks cwq->lock,
719 * it won't touch *cwq after that.
722 spin_unlock_wait(&cwq
->lock
);
727 * destroy_workqueue - safely terminate a workqueue
728 * @wq: target workqueue
730 * Safely destroy a workqueue. All work currently pending will be done first.
732 void destroy_workqueue(struct workqueue_struct
*wq
)
734 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
735 struct cpu_workqueue_struct
*cwq
;
738 mutex_lock(&workqueue_mutex
);
740 mutex_unlock(&workqueue_mutex
);
742 for_each_cpu_mask(cpu
, *cpu_map
) {
743 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
744 cleanup_workqueue_thread(cwq
, cpu
);
747 free_percpu(wq
->cpu_wq
);
750 EXPORT_SYMBOL_GPL(destroy_workqueue
);
752 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
753 unsigned long action
,
756 unsigned int cpu
= (unsigned long)hcpu
;
757 struct cpu_workqueue_struct
*cwq
;
758 struct workqueue_struct
*wq
;
761 case CPU_LOCK_ACQUIRE
:
762 mutex_lock(&workqueue_mutex
);
765 case CPU_LOCK_RELEASE
:
766 mutex_unlock(&workqueue_mutex
);
770 cpu_set(cpu
, cpu_populated_map
);
773 list_for_each_entry(wq
, &workqueues
, list
) {
774 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
778 if (!create_workqueue_thread(cwq
, cpu
))
780 printk(KERN_ERR
"workqueue for %i failed\n", cpu
);
784 start_workqueue_thread(cwq
, cpu
);
787 case CPU_UP_CANCELED
:
788 start_workqueue_thread(cwq
, -1);
790 cleanup_workqueue_thread(cwq
, cpu
);
798 void __init
init_workqueues(void)
800 cpu_populated_map
= cpu_online_map
;
801 singlethread_cpu
= first_cpu(cpu_possible_map
);
802 cpu_singlethread_map
= cpumask_of_cpu(singlethread_cpu
);
803 hotcpu_notifier(workqueue_cpu_callback
, 0);
804 keventd_wq
= create_workqueue("events");