2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
37 * The per-CPU workqueue (if single thread, we always use the first
40 struct cpu_workqueue_struct
{
44 struct list_head worklist
;
45 wait_queue_head_t more_work
;
47 struct workqueue_struct
*wq
;
48 struct task_struct
*thread
;
49 struct work_struct
*current_work
;
51 int run_depth
; /* Detect run_workqueue() recursion depth */
53 int freezeable
; /* Freeze the thread during suspend */
54 } ____cacheline_aligned
;
57 * The externally visible workqueue abstraction is an array of
60 struct workqueue_struct
{
61 struct cpu_workqueue_struct
*cpu_wq
;
63 struct list_head list
; /* Empty if single thread */
66 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67 threads to each one as cpus come/go. */
68 static DEFINE_MUTEX(workqueue_mutex
);
69 static LIST_HEAD(workqueues
);
71 static int singlethread_cpu
;
73 /* If it's single threaded, it isn't in the list of workqueues. */
74 static inline int is_single_threaded(struct workqueue_struct
*wq
)
76 return list_empty(&wq
->list
);
80 * Set the workqueue on which a work item is to be run
81 * - Must *only* be called if the pending flag is set
83 static inline void set_wq_data(struct work_struct
*work
, void *wq
)
87 BUG_ON(!work_pending(work
));
89 new = (unsigned long) wq
| (1UL << WORK_STRUCT_PENDING
);
90 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
91 atomic_long_set(&work
->data
, new);
94 static inline void *get_wq_data(struct work_struct
*work
)
96 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
99 static int __run_work(struct cpu_workqueue_struct
*cwq
, struct work_struct
*work
)
104 spin_lock_irqsave(&cwq
->lock
, flags
);
106 * We need to re-validate the work info after we've gotten
107 * the cpu_workqueue lock. We can run the work now iff:
109 * - the wq_data still matches the cpu_workqueue_struct
110 * - AND the work is still marked pending
111 * - AND the work is still on a list (which will be this
112 * workqueue_struct list)
114 * All these conditions are important, because we
115 * need to protect against the work being run right
116 * now on another CPU (all but the last one might be
117 * true if it's currently running and has not been
118 * released yet, for example).
120 if (get_wq_data(work
) == cwq
121 && work_pending(work
)
122 && !list_empty(&work
->entry
)) {
123 work_func_t f
= work
->func
;
124 cwq
->current_work
= work
;
125 list_del_init(&work
->entry
);
126 spin_unlock_irqrestore(&cwq
->lock
, flags
);
128 if (!test_bit(WORK_STRUCT_NOAUTOREL
, work_data_bits(work
)))
132 spin_lock_irqsave(&cwq
->lock
, flags
);
133 cwq
->current_work
= NULL
;
136 spin_unlock_irqrestore(&cwq
->lock
, flags
);
141 * run_scheduled_work - run scheduled work synchronously
144 * This checks if the work was pending, and runs it
145 * synchronously if so. It returns a boolean to indicate
146 * whether it had any scheduled work to run or not.
148 * NOTE! This _only_ works for normal work_structs. You
149 * CANNOT use this for delayed work, because the wq data
150 * for delayed work will not point properly to the per-
151 * CPU workqueue struct, but will change!
153 int fastcall
run_scheduled_work(struct work_struct
*work
)
156 struct cpu_workqueue_struct
*cwq
;
158 if (!work_pending(work
))
160 if (list_empty(&work
->entry
))
162 /* NOTE! This depends intimately on __queue_work! */
163 cwq
= get_wq_data(work
);
166 if (__run_work(cwq
, work
))
170 EXPORT_SYMBOL(run_scheduled_work
);
172 static void insert_work(struct cpu_workqueue_struct
*cwq
,
173 struct work_struct
*work
, int tail
)
175 set_wq_data(work
, cwq
);
177 list_add_tail(&work
->entry
, &cwq
->worklist
);
179 list_add(&work
->entry
, &cwq
->worklist
);
180 wake_up(&cwq
->more_work
);
183 /* Preempt must be disabled. */
184 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
185 struct work_struct
*work
)
189 spin_lock_irqsave(&cwq
->lock
, flags
);
190 insert_work(cwq
, work
, 1);
191 spin_unlock_irqrestore(&cwq
->lock
, flags
);
195 * queue_work - queue work on a workqueue
196 * @wq: workqueue to use
197 * @work: work to queue
199 * Returns 0 if @work was already on a queue, non-zero otherwise.
201 * We queue the work to the CPU it was submitted, but there is no
202 * guarantee that it will be processed by that CPU.
204 int fastcall
queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
206 int ret
= 0, cpu
= get_cpu();
208 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
209 if (unlikely(is_single_threaded(wq
)))
210 cpu
= singlethread_cpu
;
211 BUG_ON(!list_empty(&work
->entry
));
212 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
218 EXPORT_SYMBOL_GPL(queue_work
);
220 void delayed_work_timer_fn(unsigned long __data
)
222 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
223 struct workqueue_struct
*wq
= get_wq_data(&dwork
->work
);
224 int cpu
= smp_processor_id();
226 if (unlikely(is_single_threaded(wq
)))
227 cpu
= singlethread_cpu
;
229 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), &dwork
->work
);
233 * queue_delayed_work - queue work on a workqueue after delay
234 * @wq: workqueue to use
235 * @dwork: delayable work to queue
236 * @delay: number of jiffies to wait before queueing
238 * Returns 0 if @work was already on a queue, non-zero otherwise.
240 int fastcall
queue_delayed_work(struct workqueue_struct
*wq
,
241 struct delayed_work
*dwork
, unsigned long delay
)
244 struct timer_list
*timer
= &dwork
->timer
;
245 struct work_struct
*work
= &dwork
->work
;
247 timer_stats_timer_set_start_info(timer
);
249 return queue_work(wq
, work
);
251 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
252 BUG_ON(timer_pending(timer
));
253 BUG_ON(!list_empty(&work
->entry
));
255 /* This stores wq for the moment, for the timer_fn */
256 set_wq_data(work
, wq
);
257 timer
->expires
= jiffies
+ delay
;
258 timer
->data
= (unsigned long)dwork
;
259 timer
->function
= delayed_work_timer_fn
;
265 EXPORT_SYMBOL_GPL(queue_delayed_work
);
268 * queue_delayed_work_on - queue work on specific CPU after delay
269 * @cpu: CPU number to execute work on
270 * @wq: workqueue to use
271 * @dwork: work to queue
272 * @delay: number of jiffies to wait before queueing
274 * Returns 0 if @work was already on a queue, non-zero otherwise.
276 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
277 struct delayed_work
*dwork
, unsigned long delay
)
280 struct timer_list
*timer
= &dwork
->timer
;
281 struct work_struct
*work
= &dwork
->work
;
283 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
284 BUG_ON(timer_pending(timer
));
285 BUG_ON(!list_empty(&work
->entry
));
287 /* This stores wq for the moment, for the timer_fn */
288 set_wq_data(work
, wq
);
289 timer
->expires
= jiffies
+ delay
;
290 timer
->data
= (unsigned long)dwork
;
291 timer
->function
= delayed_work_timer_fn
;
292 add_timer_on(timer
, cpu
);
297 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
299 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
304 * Keep taking off work from the queue until
307 spin_lock_irqsave(&cwq
->lock
, flags
);
309 if (cwq
->run_depth
> 3) {
310 /* morton gets to eat his hat */
311 printk("%s: recursion depth exceeded: %d\n",
312 __FUNCTION__
, cwq
->run_depth
);
315 while (!list_empty(&cwq
->worklist
)) {
316 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
317 struct work_struct
, entry
);
318 work_func_t f
= work
->func
;
320 cwq
->current_work
= work
;
321 list_del_init(cwq
->worklist
.next
);
322 spin_unlock_irqrestore(&cwq
->lock
, flags
);
324 BUG_ON(get_wq_data(work
) != cwq
);
325 if (!test_bit(WORK_STRUCT_NOAUTOREL
, work_data_bits(work
)))
329 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
330 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
332 current
->comm
, preempt_count(),
334 printk(KERN_ERR
" last function: ");
335 print_symbol("%s\n", (unsigned long)f
);
336 debug_show_held_locks(current
);
340 spin_lock_irqsave(&cwq
->lock
, flags
);
341 cwq
->current_work
= NULL
;
344 spin_unlock_irqrestore(&cwq
->lock
, flags
);
347 static int worker_thread(void *__cwq
)
349 struct cpu_workqueue_struct
*cwq
= __cwq
;
350 DECLARE_WAITQUEUE(wait
, current
);
351 struct k_sigaction sa
;
354 if (!cwq
->freezeable
)
355 current
->flags
|= PF_NOFREEZE
;
357 set_user_nice(current
, -5);
359 /* Block and flush all signals */
360 sigfillset(&blocked
);
361 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
362 flush_signals(current
);
365 * We inherited MPOL_INTERLEAVE from the booting kernel.
366 * Set MPOL_DEFAULT to insure node local allocations.
368 numa_default_policy();
370 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
371 sa
.sa
.sa_handler
= SIG_IGN
;
373 siginitset(&sa
.sa
.sa_mask
, sigmask(SIGCHLD
));
374 do_sigaction(SIGCHLD
, &sa
, (struct k_sigaction
*)0);
376 set_current_state(TASK_INTERRUPTIBLE
);
377 while (!kthread_should_stop()) {
381 add_wait_queue(&cwq
->more_work
, &wait
);
382 if (list_empty(&cwq
->worklist
))
385 __set_current_state(TASK_RUNNING
);
386 remove_wait_queue(&cwq
->more_work
, &wait
);
388 if (!list_empty(&cwq
->worklist
))
390 set_current_state(TASK_INTERRUPTIBLE
);
392 __set_current_state(TASK_RUNNING
);
397 struct work_struct work
;
398 struct completion done
;
401 static void wq_barrier_func(struct work_struct
*work
)
403 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
404 complete(&barr
->done
);
407 static inline void init_wq_barrier(struct wq_barrier
*barr
)
409 INIT_WORK(&barr
->work
, wq_barrier_func
);
410 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
412 init_completion(&barr
->done
);
415 static void flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
417 if (cwq
->thread
== current
) {
419 * Probably keventd trying to flush its own queue. So simply run
420 * it by hand rather than deadlocking.
424 * We can still touch *cwq here because we are keventd, and
425 * hot-unplug will be waiting us to exit.
430 struct wq_barrier barr
;
432 init_wq_barrier(&barr
);
433 __queue_work(cwq
, &barr
.work
);
435 preempt_enable(); /* Can no longer touch *cwq */
436 wait_for_completion(&barr
.done
);
442 * flush_workqueue - ensure that any scheduled work has run to completion.
443 * @wq: workqueue to flush
445 * Forces execution of the workqueue and blocks until its completion.
446 * This is typically used in driver shutdown handlers.
448 * We sleep until all works which were queued on entry have been handled,
449 * but we are not livelocked by new incoming ones.
451 * This function used to run the workqueues itself. Now we just wait for the
452 * helper threads to do it.
454 void fastcall
flush_workqueue(struct workqueue_struct
*wq
)
456 preempt_disable(); /* CPU hotplug */
457 if (is_single_threaded(wq
)) {
458 /* Always use first cpu's area. */
459 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, singlethread_cpu
));
463 for_each_online_cpu(cpu
)
464 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
468 EXPORT_SYMBOL_GPL(flush_workqueue
);
470 static void wait_on_work(struct cpu_workqueue_struct
*cwq
,
471 struct work_struct
*work
)
473 struct wq_barrier barr
;
476 spin_lock_irq(&cwq
->lock
);
477 if (unlikely(cwq
->current_work
== work
)) {
478 init_wq_barrier(&barr
);
479 insert_work(cwq
, &barr
.work
, 0);
482 spin_unlock_irq(&cwq
->lock
);
484 if (unlikely(running
)) {
485 mutex_unlock(&workqueue_mutex
);
486 wait_for_completion(&barr
.done
);
487 mutex_lock(&workqueue_mutex
);
492 * flush_work - block until a work_struct's callback has terminated
493 * @wq: the workqueue on which the work is queued
494 * @work: the work which is to be flushed
496 * flush_work() will attempt to cancel the work if it is queued. If the work's
497 * callback appears to be running, flush_work() will block until it has
500 * flush_work() is designed to be used when the caller is tearing down data
501 * structures which the callback function operates upon. It is expected that,
502 * prior to calling flush_work(), the caller has arranged for the work to not
505 void flush_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
507 struct cpu_workqueue_struct
*cwq
;
509 mutex_lock(&workqueue_mutex
);
510 cwq
= get_wq_data(work
);
511 /* Was it ever queued ? */
516 * This work can't be re-queued, and the lock above protects us
517 * from take_over_work(), no need to re-check that get_wq_data()
518 * is still the same when we take cwq->lock.
520 spin_lock_irq(&cwq
->lock
);
521 list_del_init(&work
->entry
);
523 spin_unlock_irq(&cwq
->lock
);
525 if (is_single_threaded(wq
)) {
526 /* Always use first cpu's area. */
527 wait_on_work(per_cpu_ptr(wq
->cpu_wq
, singlethread_cpu
), work
);
531 for_each_online_cpu(cpu
)
532 wait_on_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
535 mutex_unlock(&workqueue_mutex
);
537 EXPORT_SYMBOL_GPL(flush_work
);
539 static struct task_struct
*create_workqueue_thread(struct workqueue_struct
*wq
,
540 int cpu
, int freezeable
)
542 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
543 struct task_struct
*p
;
545 spin_lock_init(&cwq
->lock
);
548 cwq
->freezeable
= freezeable
;
549 INIT_LIST_HEAD(&cwq
->worklist
);
550 init_waitqueue_head(&cwq
->more_work
);
552 if (is_single_threaded(wq
))
553 p
= kthread_create(worker_thread
, cwq
, "%s", wq
->name
);
555 p
= kthread_create(worker_thread
, cwq
, "%s/%d", wq
->name
, cpu
);
562 struct workqueue_struct
*__create_workqueue(const char *name
,
563 int singlethread
, int freezeable
)
565 int cpu
, destroy
= 0;
566 struct workqueue_struct
*wq
;
567 struct task_struct
*p
;
569 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
573 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
580 mutex_lock(&workqueue_mutex
);
582 INIT_LIST_HEAD(&wq
->list
);
583 p
= create_workqueue_thread(wq
, singlethread_cpu
, freezeable
);
589 list_add(&wq
->list
, &workqueues
);
590 for_each_online_cpu(cpu
) {
591 p
= create_workqueue_thread(wq
, cpu
, freezeable
);
593 kthread_bind(p
, cpu
);
599 mutex_unlock(&workqueue_mutex
);
602 * Was there any error during startup? If yes then clean up:
605 destroy_workqueue(wq
);
610 EXPORT_SYMBOL_GPL(__create_workqueue
);
612 static void cleanup_workqueue_thread(struct workqueue_struct
*wq
, int cpu
)
614 struct cpu_workqueue_struct
*cwq
;
616 struct task_struct
*p
;
618 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
619 spin_lock_irqsave(&cwq
->lock
, flags
);
622 spin_unlock_irqrestore(&cwq
->lock
, flags
);
628 * destroy_workqueue - safely terminate a workqueue
629 * @wq: target workqueue
631 * Safely destroy a workqueue. All work currently pending will be done first.
633 void destroy_workqueue(struct workqueue_struct
*wq
)
639 /* We don't need the distraction of CPUs appearing and vanishing. */
640 mutex_lock(&workqueue_mutex
);
641 if (is_single_threaded(wq
))
642 cleanup_workqueue_thread(wq
, singlethread_cpu
);
644 for_each_online_cpu(cpu
)
645 cleanup_workqueue_thread(wq
, cpu
);
648 mutex_unlock(&workqueue_mutex
);
649 free_percpu(wq
->cpu_wq
);
652 EXPORT_SYMBOL_GPL(destroy_workqueue
);
654 static struct workqueue_struct
*keventd_wq
;
657 * schedule_work - put work task in global workqueue
658 * @work: job to be done
660 * This puts a job in the kernel-global workqueue.
662 int fastcall
schedule_work(struct work_struct
*work
)
664 return queue_work(keventd_wq
, work
);
666 EXPORT_SYMBOL(schedule_work
);
669 * schedule_delayed_work - put work task in global workqueue after delay
670 * @dwork: job to be done
671 * @delay: number of jiffies to wait or 0 for immediate execution
673 * After waiting for a given time this puts a job in the kernel-global
676 int fastcall
schedule_delayed_work(struct delayed_work
*dwork
,
679 timer_stats_timer_set_start_info(&dwork
->timer
);
680 return queue_delayed_work(keventd_wq
, dwork
, delay
);
682 EXPORT_SYMBOL(schedule_delayed_work
);
685 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
687 * @dwork: job to be done
688 * @delay: number of jiffies to wait
690 * After waiting for a given time this puts a job in the kernel-global
691 * workqueue on the specified CPU.
693 int schedule_delayed_work_on(int cpu
,
694 struct delayed_work
*dwork
, unsigned long delay
)
696 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
698 EXPORT_SYMBOL(schedule_delayed_work_on
);
701 * schedule_on_each_cpu - call a function on each online CPU from keventd
702 * @func: the function to call
704 * Returns zero on success.
705 * Returns -ve errno on failure.
707 * Appears to be racy against CPU hotplug.
709 * schedule_on_each_cpu() is very slow.
711 int schedule_on_each_cpu(work_func_t func
)
714 struct work_struct
*works
;
716 works
= alloc_percpu(struct work_struct
);
720 preempt_disable(); /* CPU hotplug */
721 for_each_online_cpu(cpu
) {
722 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
724 INIT_WORK(work
, func
);
725 set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
));
726 __queue_work(per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
), work
);
729 flush_workqueue(keventd_wq
);
734 void flush_scheduled_work(void)
736 flush_workqueue(keventd_wq
);
738 EXPORT_SYMBOL(flush_scheduled_work
);
740 void flush_work_keventd(struct work_struct
*work
)
742 flush_work(keventd_wq
, work
);
744 EXPORT_SYMBOL(flush_work_keventd
);
747 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
748 * @wq: the controlling workqueue structure
749 * @dwork: the delayed work struct
751 void cancel_rearming_delayed_workqueue(struct workqueue_struct
*wq
,
752 struct delayed_work
*dwork
)
754 while (!cancel_delayed_work(dwork
))
757 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue
);
760 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
761 * @dwork: the delayed work struct
763 void cancel_rearming_delayed_work(struct delayed_work
*dwork
)
765 cancel_rearming_delayed_workqueue(keventd_wq
, dwork
);
767 EXPORT_SYMBOL(cancel_rearming_delayed_work
);
770 * execute_in_process_context - reliably execute the routine with user context
771 * @fn: the function to execute
772 * @ew: guaranteed storage for the execute work structure (must
773 * be available when the work executes)
775 * Executes the function immediately if process context is available,
776 * otherwise schedules the function for delayed execution.
778 * Returns: 0 - function was executed
779 * 1 - function was scheduled for execution
781 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
783 if (!in_interrupt()) {
788 INIT_WORK(&ew
->work
, fn
);
789 schedule_work(&ew
->work
);
793 EXPORT_SYMBOL_GPL(execute_in_process_context
);
797 return keventd_wq
!= NULL
;
800 int current_is_keventd(void)
802 struct cpu_workqueue_struct
*cwq
;
803 int cpu
= smp_processor_id(); /* preempt-safe: keventd is per-cpu */
808 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
809 if (current
== cwq
->thread
)
816 /* Take the work from this (downed) CPU. */
817 static void take_over_work(struct workqueue_struct
*wq
, unsigned int cpu
)
819 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
820 struct list_head list
;
821 struct work_struct
*work
;
823 spin_lock_irq(&cwq
->lock
);
824 list_replace_init(&cwq
->worklist
, &list
);
826 while (!list_empty(&list
)) {
827 printk("Taking work for %s\n", wq
->name
);
828 work
= list_entry(list
.next
,struct work_struct
,entry
);
829 list_del(&work
->entry
);
830 __queue_work(per_cpu_ptr(wq
->cpu_wq
, smp_processor_id()), work
);
832 spin_unlock_irq(&cwq
->lock
);
835 /* We're holding the cpucontrol mutex here */
836 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
837 unsigned long action
,
840 unsigned int hotcpu
= (unsigned long)hcpu
;
841 struct workqueue_struct
*wq
;
845 mutex_lock(&workqueue_mutex
);
846 /* Create a new workqueue thread for it. */
847 list_for_each_entry(wq
, &workqueues
, list
) {
848 if (!create_workqueue_thread(wq
, hotcpu
, 0)) {
849 printk("workqueue for %i failed\n", hotcpu
);
856 /* Kick off worker threads. */
857 list_for_each_entry(wq
, &workqueues
, list
) {
858 struct cpu_workqueue_struct
*cwq
;
860 cwq
= per_cpu_ptr(wq
->cpu_wq
, hotcpu
);
861 kthread_bind(cwq
->thread
, hotcpu
);
862 wake_up_process(cwq
->thread
);
864 mutex_unlock(&workqueue_mutex
);
867 case CPU_UP_CANCELED
:
868 list_for_each_entry(wq
, &workqueues
, list
) {
869 if (!per_cpu_ptr(wq
->cpu_wq
, hotcpu
)->thread
)
871 /* Unbind so it can run. */
872 kthread_bind(per_cpu_ptr(wq
->cpu_wq
, hotcpu
)->thread
,
873 any_online_cpu(cpu_online_map
));
874 cleanup_workqueue_thread(wq
, hotcpu
);
876 mutex_unlock(&workqueue_mutex
);
879 case CPU_DOWN_PREPARE
:
880 mutex_lock(&workqueue_mutex
);
883 case CPU_DOWN_FAILED
:
884 mutex_unlock(&workqueue_mutex
);
888 list_for_each_entry(wq
, &workqueues
, list
)
889 cleanup_workqueue_thread(wq
, hotcpu
);
890 list_for_each_entry(wq
, &workqueues
, list
)
891 take_over_work(wq
, hotcpu
);
892 mutex_unlock(&workqueue_mutex
);
899 void init_workqueues(void)
901 singlethread_cpu
= first_cpu(cpu_possible_map
);
902 hotcpu_notifier(workqueue_cpu_callback
, 0);
903 keventd_wq
= create_workqueue("events");