2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #include <linux/idr.h>
39 /* global_cwq flags */
40 GCWQ_FREEZING
= 1 << 3, /* freeze in progress */
43 WORKER_STARTED
= 1 << 0, /* started */
44 WORKER_DIE
= 1 << 1, /* die die die */
45 WORKER_IDLE
= 1 << 2, /* is idle */
46 WORKER_ROGUE
= 1 << 4, /* not bound to any cpu */
48 /* gcwq->trustee_state */
49 TRUSTEE_START
= 0, /* start */
50 TRUSTEE_IN_CHARGE
= 1, /* trustee in charge of gcwq */
51 TRUSTEE_BUTCHER
= 2, /* butcher workers */
52 TRUSTEE_RELEASE
= 3, /* release workers */
53 TRUSTEE_DONE
= 4, /* trustee is done */
55 BUSY_WORKER_HASH_ORDER
= 6, /* 64 pointers */
56 BUSY_WORKER_HASH_SIZE
= 1 << BUSY_WORKER_HASH_ORDER
,
57 BUSY_WORKER_HASH_MASK
= BUSY_WORKER_HASH_SIZE
- 1,
59 TRUSTEE_COOLDOWN
= HZ
/ 10, /* for trustee draining */
63 * Structure fields follow one of the following exclusion rules.
65 * I: Set during initialization and read-only afterwards.
67 * L: gcwq->lock protected. Access with gcwq->lock held.
69 * F: wq->flush_mutex protected.
71 * W: workqueue_lock protected.
75 struct cpu_workqueue_struct
;
78 /* on idle list while idle, on busy hash table while busy */
80 struct list_head entry
; /* L: while idle */
81 struct hlist_node hentry
; /* L: while busy */
84 struct work_struct
*current_work
; /* L: work being processed */
85 struct cpu_workqueue_struct
*current_cwq
; /* L: current_work's cwq */
86 struct list_head scheduled
; /* L: scheduled works */
87 struct task_struct
*task
; /* I: worker task */
88 struct global_cwq
*gcwq
; /* I: the associated gcwq */
89 struct cpu_workqueue_struct
*cwq
; /* I: the associated cwq */
90 unsigned int flags
; /* L: flags */
91 int id
; /* I: worker id */
95 * Global per-cpu workqueue.
98 spinlock_t lock
; /* the gcwq lock */
99 unsigned int cpu
; /* I: the associated cpu */
100 unsigned int flags
; /* L: GCWQ_* flags */
102 int nr_workers
; /* L: total number of workers */
103 int nr_idle
; /* L: currently idle ones */
105 /* workers are chained either in the idle_list or busy_hash */
106 struct list_head idle_list
; /* L: list of idle workers */
107 struct hlist_head busy_hash
[BUSY_WORKER_HASH_SIZE
];
108 /* L: hash of busy workers */
110 struct ida worker_ida
; /* L: for worker IDs */
112 struct task_struct
*trustee
; /* L: for gcwq shutdown */
113 unsigned int trustee_state
; /* L: trustee state */
114 wait_queue_head_t trustee_wait
; /* trustee wait */
115 } ____cacheline_aligned_in_smp
;
118 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
119 * work_struct->data are used for flags and thus cwqs need to be
120 * aligned at two's power of the number of flag bits.
122 struct cpu_workqueue_struct
{
123 struct global_cwq
*gcwq
; /* I: the associated gcwq */
124 struct list_head worklist
;
125 struct worker
*worker
;
126 struct workqueue_struct
*wq
; /* I: the owning workqueue */
127 int work_color
; /* L: current color */
128 int flush_color
; /* L: flushing color */
129 int nr_in_flight
[WORK_NR_COLORS
];
130 /* L: nr of in_flight works */
131 int nr_active
; /* L: nr of active works */
132 int max_active
; /* L: max active works */
133 struct list_head delayed_works
; /* L: delayed works */
137 * Structure used to wait for workqueue flush.
140 struct list_head list
; /* F: list of flushers */
141 int flush_color
; /* F: flush color waiting for */
142 struct completion done
; /* flush completion */
146 * The externally visible workqueue abstraction is an array of
147 * per-CPU workqueues:
149 struct workqueue_struct
{
150 unsigned int flags
; /* I: WQ_* flags */
151 struct cpu_workqueue_struct
*cpu_wq
; /* I: cwq's */
152 struct list_head list
; /* W: list of all workqueues */
154 struct mutex flush_mutex
; /* protects wq flushing */
155 int work_color
; /* F: current work color */
156 int flush_color
; /* F: current flush color */
157 atomic_t nr_cwqs_to_flush
; /* flush in progress */
158 struct wq_flusher
*first_flusher
; /* F: first flusher */
159 struct list_head flusher_queue
; /* F: flush waiters */
160 struct list_head flusher_overflow
; /* F: flush overflow list */
162 unsigned long single_cpu
; /* cpu for single cpu wq */
164 int saved_max_active
; /* I: saved cwq max_active */
165 const char *name
; /* I: workqueue name */
166 #ifdef CONFIG_LOCKDEP
167 struct lockdep_map lockdep_map
;
171 #define for_each_busy_worker(worker, i, pos, gcwq) \
172 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
173 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
175 #ifdef CONFIG_DEBUG_OBJECTS_WORK
177 static struct debug_obj_descr work_debug_descr
;
180 * fixup_init is called when:
181 * - an active object is initialized
183 static int work_fixup_init(void *addr
, enum debug_obj_state state
)
185 struct work_struct
*work
= addr
;
188 case ODEBUG_STATE_ACTIVE
:
189 cancel_work_sync(work
);
190 debug_object_init(work
, &work_debug_descr
);
198 * fixup_activate is called when:
199 * - an active object is activated
200 * - an unknown object is activated (might be a statically initialized object)
202 static int work_fixup_activate(void *addr
, enum debug_obj_state state
)
204 struct work_struct
*work
= addr
;
208 case ODEBUG_STATE_NOTAVAILABLE
:
210 * This is not really a fixup. The work struct was
211 * statically initialized. We just make sure that it
212 * is tracked in the object tracker.
214 if (test_bit(WORK_STRUCT_STATIC_BIT
, work_data_bits(work
))) {
215 debug_object_init(work
, &work_debug_descr
);
216 debug_object_activate(work
, &work_debug_descr
);
222 case ODEBUG_STATE_ACTIVE
:
231 * fixup_free is called when:
232 * - an active object is freed
234 static int work_fixup_free(void *addr
, enum debug_obj_state state
)
236 struct work_struct
*work
= addr
;
239 case ODEBUG_STATE_ACTIVE
:
240 cancel_work_sync(work
);
241 debug_object_free(work
, &work_debug_descr
);
248 static struct debug_obj_descr work_debug_descr
= {
249 .name
= "work_struct",
250 .fixup_init
= work_fixup_init
,
251 .fixup_activate
= work_fixup_activate
,
252 .fixup_free
= work_fixup_free
,
255 static inline void debug_work_activate(struct work_struct
*work
)
257 debug_object_activate(work
, &work_debug_descr
);
260 static inline void debug_work_deactivate(struct work_struct
*work
)
262 debug_object_deactivate(work
, &work_debug_descr
);
265 void __init_work(struct work_struct
*work
, int onstack
)
268 debug_object_init_on_stack(work
, &work_debug_descr
);
270 debug_object_init(work
, &work_debug_descr
);
272 EXPORT_SYMBOL_GPL(__init_work
);
274 void destroy_work_on_stack(struct work_struct
*work
)
276 debug_object_free(work
, &work_debug_descr
);
278 EXPORT_SYMBOL_GPL(destroy_work_on_stack
);
281 static inline void debug_work_activate(struct work_struct
*work
) { }
282 static inline void debug_work_deactivate(struct work_struct
*work
) { }
285 /* Serializes the accesses to the list of workqueues. */
286 static DEFINE_SPINLOCK(workqueue_lock
);
287 static LIST_HEAD(workqueues
);
288 static bool workqueue_freezing
; /* W: have wqs started freezing? */
290 static DEFINE_PER_CPU(struct global_cwq
, global_cwq
);
292 static int worker_thread(void *__worker
);
294 static struct global_cwq
*get_gcwq(unsigned int cpu
)
296 return &per_cpu(global_cwq
, cpu
);
299 static struct cpu_workqueue_struct
*get_cwq(unsigned int cpu
,
300 struct workqueue_struct
*wq
)
302 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
305 static unsigned int work_color_to_flags(int color
)
307 return color
<< WORK_STRUCT_COLOR_SHIFT
;
310 static int get_work_color(struct work_struct
*work
)
312 return (*work_data_bits(work
) >> WORK_STRUCT_COLOR_SHIFT
) &
313 ((1 << WORK_STRUCT_COLOR_BITS
) - 1);
316 static int work_next_color(int color
)
318 return (color
+ 1) % WORK_NR_COLORS
;
322 * Set the workqueue on which a work item is to be run
323 * - Must *only* be called if the pending flag is set
325 static inline void set_wq_data(struct work_struct
*work
,
326 struct cpu_workqueue_struct
*cwq
,
327 unsigned long extra_flags
)
329 BUG_ON(!work_pending(work
));
331 atomic_long_set(&work
->data
, (unsigned long)cwq
| work_static(work
) |
332 WORK_STRUCT_PENDING
| extra_flags
);
336 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
338 static inline void clear_wq_data(struct work_struct
*work
)
340 atomic_long_set(&work
->data
, work_static(work
));
343 static inline struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
345 return (void *)(atomic_long_read(&work
->data
) &
346 WORK_STRUCT_WQ_DATA_MASK
);
350 * busy_worker_head - return the busy hash head for a work
351 * @gcwq: gcwq of interest
352 * @work: work to be hashed
354 * Return hash head of @gcwq for @work.
357 * spin_lock_irq(gcwq->lock).
360 * Pointer to the hash head.
362 static struct hlist_head
*busy_worker_head(struct global_cwq
*gcwq
,
363 struct work_struct
*work
)
365 const int base_shift
= ilog2(sizeof(struct work_struct
));
366 unsigned long v
= (unsigned long)work
;
368 /* simple shift and fold hash, do we need something better? */
370 v
+= v
>> BUSY_WORKER_HASH_ORDER
;
371 v
&= BUSY_WORKER_HASH_MASK
;
373 return &gcwq
->busy_hash
[v
];
377 * __find_worker_executing_work - find worker which is executing a work
378 * @gcwq: gcwq of interest
379 * @bwh: hash head as returned by busy_worker_head()
380 * @work: work to find worker for
382 * Find a worker which is executing @work on @gcwq. @bwh should be
383 * the hash head obtained by calling busy_worker_head() with the same
387 * spin_lock_irq(gcwq->lock).
390 * Pointer to worker which is executing @work if found, NULL
393 static struct worker
*__find_worker_executing_work(struct global_cwq
*gcwq
,
394 struct hlist_head
*bwh
,
395 struct work_struct
*work
)
397 struct worker
*worker
;
398 struct hlist_node
*tmp
;
400 hlist_for_each_entry(worker
, tmp
, bwh
, hentry
)
401 if (worker
->current_work
== work
)
407 * find_worker_executing_work - find worker which is executing a work
408 * @gcwq: gcwq of interest
409 * @work: work to find worker for
411 * Find a worker which is executing @work on @gcwq. This function is
412 * identical to __find_worker_executing_work() except that this
413 * function calculates @bwh itself.
416 * spin_lock_irq(gcwq->lock).
419 * Pointer to worker which is executing @work if found, NULL
422 static struct worker
*find_worker_executing_work(struct global_cwq
*gcwq
,
423 struct work_struct
*work
)
425 return __find_worker_executing_work(gcwq
, busy_worker_head(gcwq
, work
),
430 * insert_work - insert a work into cwq
431 * @cwq: cwq @work belongs to
432 * @work: work to insert
433 * @head: insertion point
434 * @extra_flags: extra WORK_STRUCT_* flags to set
436 * Insert @work into @cwq after @head.
439 * spin_lock_irq(gcwq->lock).
441 static void insert_work(struct cpu_workqueue_struct
*cwq
,
442 struct work_struct
*work
, struct list_head
*head
,
443 unsigned int extra_flags
)
445 /* we own @work, set data and link */
446 set_wq_data(work
, cwq
, extra_flags
);
449 * Ensure that we get the right work->data if we see the
450 * result of list_add() below, see try_to_grab_pending().
454 list_add_tail(&work
->entry
, head
);
455 wake_up_process(cwq
->worker
->task
);
459 * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
460 * @cwq: cwq to unbind
462 * Try to unbind @cwq from single cpu workqueue processing. If
463 * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
466 * spin_lock_irq(gcwq->lock).
468 static void cwq_unbind_single_cpu(struct cpu_workqueue_struct
*cwq
)
470 struct workqueue_struct
*wq
= cwq
->wq
;
471 struct global_cwq
*gcwq
= cwq
->gcwq
;
473 BUG_ON(wq
->single_cpu
!= gcwq
->cpu
);
475 * Unbind from workqueue if @cwq is not frozen. If frozen,
476 * thaw_workqueues() will either restart processing on this
477 * cpu or unbind if empty. This keeps works queued while
478 * frozen fully ordered and flushable.
480 if (likely(!(gcwq
->flags
& GCWQ_FREEZING
))) {
481 smp_wmb(); /* paired with cmpxchg() in __queue_work() */
482 wq
->single_cpu
= NR_CPUS
;
486 static void __queue_work(unsigned int cpu
, struct workqueue_struct
*wq
,
487 struct work_struct
*work
)
489 struct global_cwq
*gcwq
;
490 struct cpu_workqueue_struct
*cwq
;
491 struct list_head
*worklist
;
495 debug_work_activate(work
);
497 /* determine gcwq to use */
498 if (!(wq
->flags
& WQ_SINGLE_CPU
)) {
499 /* just use the requested cpu for multicpu workqueues */
500 gcwq
= get_gcwq(cpu
);
501 spin_lock_irqsave(&gcwq
->lock
, flags
);
503 unsigned int req_cpu
= cpu
;
506 * It's a bit more complex for single cpu workqueues.
507 * We first need to determine which cpu is going to be
508 * used. If no cpu is currently serving this
509 * workqueue, arbitrate using atomic accesses to
510 * wq->single_cpu; otherwise, use the current one.
513 cpu
= wq
->single_cpu
;
514 arbitrate
= cpu
== NR_CPUS
;
518 gcwq
= get_gcwq(cpu
);
519 spin_lock_irqsave(&gcwq
->lock
, flags
);
522 * The following cmpxchg() is a full barrier paired
523 * with smp_wmb() in cwq_unbind_single_cpu() and
524 * guarantees that all changes to wq->st_* fields are
525 * visible on the new cpu after this point.
528 cmpxchg(&wq
->single_cpu
, NR_CPUS
, cpu
);
530 if (unlikely(wq
->single_cpu
!= cpu
)) {
531 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
536 /* gcwq determined, get cwq and queue */
537 cwq
= get_cwq(gcwq
->cpu
, wq
);
539 BUG_ON(!list_empty(&work
->entry
));
541 cwq
->nr_in_flight
[cwq
->work_color
]++;
543 if (likely(cwq
->nr_active
< cwq
->max_active
)) {
545 worklist
= &cwq
->worklist
;
547 worklist
= &cwq
->delayed_works
;
549 insert_work(cwq
, work
, worklist
, work_color_to_flags(cwq
->work_color
));
551 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
555 * queue_work - queue work on a workqueue
556 * @wq: workqueue to use
557 * @work: work to queue
559 * Returns 0 if @work was already on a queue, non-zero otherwise.
561 * We queue the work to the CPU on which it was submitted, but if the CPU dies
562 * it can be processed by another CPU.
564 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
568 ret
= queue_work_on(get_cpu(), wq
, work
);
573 EXPORT_SYMBOL_GPL(queue_work
);
576 * queue_work_on - queue work on specific cpu
577 * @cpu: CPU number to execute work on
578 * @wq: workqueue to use
579 * @work: work to queue
581 * Returns 0 if @work was already on a queue, non-zero otherwise.
583 * We queue the work to a specific CPU, the caller must ensure it
587 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
591 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
592 __queue_work(cpu
, wq
, work
);
597 EXPORT_SYMBOL_GPL(queue_work_on
);
599 static void delayed_work_timer_fn(unsigned long __data
)
601 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
602 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
604 __queue_work(smp_processor_id(), cwq
->wq
, &dwork
->work
);
608 * queue_delayed_work - queue work on a workqueue after delay
609 * @wq: workqueue to use
610 * @dwork: delayable work to queue
611 * @delay: number of jiffies to wait before queueing
613 * Returns 0 if @work was already on a queue, non-zero otherwise.
615 int queue_delayed_work(struct workqueue_struct
*wq
,
616 struct delayed_work
*dwork
, unsigned long delay
)
619 return queue_work(wq
, &dwork
->work
);
621 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
623 EXPORT_SYMBOL_GPL(queue_delayed_work
);
626 * queue_delayed_work_on - queue work on specific CPU after delay
627 * @cpu: CPU number to execute work on
628 * @wq: workqueue to use
629 * @dwork: work to queue
630 * @delay: number of jiffies to wait before queueing
632 * Returns 0 if @work was already on a queue, non-zero otherwise.
634 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
635 struct delayed_work
*dwork
, unsigned long delay
)
638 struct timer_list
*timer
= &dwork
->timer
;
639 struct work_struct
*work
= &dwork
->work
;
641 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
642 BUG_ON(timer_pending(timer
));
643 BUG_ON(!list_empty(&work
->entry
));
645 timer_stats_timer_set_start_info(&dwork
->timer
);
647 /* This stores cwq for the moment, for the timer_fn */
648 set_wq_data(work
, get_cwq(raw_smp_processor_id(), wq
), 0);
649 timer
->expires
= jiffies
+ delay
;
650 timer
->data
= (unsigned long)dwork
;
651 timer
->function
= delayed_work_timer_fn
;
653 if (unlikely(cpu
>= 0))
654 add_timer_on(timer
, cpu
);
661 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
664 * worker_enter_idle - enter idle state
665 * @worker: worker which is entering idle state
667 * @worker is entering idle state. Update stats and idle timer if
671 * spin_lock_irq(gcwq->lock).
673 static void worker_enter_idle(struct worker
*worker
)
675 struct global_cwq
*gcwq
= worker
->gcwq
;
677 BUG_ON(worker
->flags
& WORKER_IDLE
);
678 BUG_ON(!list_empty(&worker
->entry
) &&
679 (worker
->hentry
.next
|| worker
->hentry
.pprev
));
681 worker
->flags
|= WORKER_IDLE
;
684 /* idle_list is LIFO */
685 list_add(&worker
->entry
, &gcwq
->idle_list
);
687 if (unlikely(worker
->flags
& WORKER_ROGUE
))
688 wake_up_all(&gcwq
->trustee_wait
);
692 * worker_leave_idle - leave idle state
693 * @worker: worker which is leaving idle state
695 * @worker is leaving idle state. Update stats.
698 * spin_lock_irq(gcwq->lock).
700 static void worker_leave_idle(struct worker
*worker
)
702 struct global_cwq
*gcwq
= worker
->gcwq
;
704 BUG_ON(!(worker
->flags
& WORKER_IDLE
));
705 worker
->flags
&= ~WORKER_IDLE
;
707 list_del_init(&worker
->entry
);
710 static struct worker
*alloc_worker(void)
712 struct worker
*worker
;
714 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL
);
716 INIT_LIST_HEAD(&worker
->entry
);
717 INIT_LIST_HEAD(&worker
->scheduled
);
723 * create_worker - create a new workqueue worker
724 * @cwq: cwq the new worker will belong to
725 * @bind: whether to set affinity to @cpu or not
727 * Create a new worker which is bound to @cwq. The returned worker
728 * can be started by calling start_worker() or destroyed using
732 * Might sleep. Does GFP_KERNEL allocations.
735 * Pointer to the newly created worker.
737 static struct worker
*create_worker(struct cpu_workqueue_struct
*cwq
, bool bind
)
739 struct global_cwq
*gcwq
= cwq
->gcwq
;
741 struct worker
*worker
= NULL
;
743 spin_lock_irq(&gcwq
->lock
);
744 while (ida_get_new(&gcwq
->worker_ida
, &id
)) {
745 spin_unlock_irq(&gcwq
->lock
);
746 if (!ida_pre_get(&gcwq
->worker_ida
, GFP_KERNEL
))
748 spin_lock_irq(&gcwq
->lock
);
750 spin_unlock_irq(&gcwq
->lock
);
752 worker
= alloc_worker();
760 worker
->task
= kthread_create(worker_thread
, worker
, "kworker/%u:%d",
762 if (IS_ERR(worker
->task
))
766 * A rogue worker will become a regular one if CPU comes
767 * online later on. Make sure every worker has
768 * PF_THREAD_BOUND set.
771 kthread_bind(worker
->task
, gcwq
->cpu
);
773 worker
->task
->flags
|= PF_THREAD_BOUND
;
778 spin_lock_irq(&gcwq
->lock
);
779 ida_remove(&gcwq
->worker_ida
, id
);
780 spin_unlock_irq(&gcwq
->lock
);
787 * start_worker - start a newly created worker
788 * @worker: worker to start
790 * Make the gcwq aware of @worker and start it.
793 * spin_lock_irq(gcwq->lock).
795 static void start_worker(struct worker
*worker
)
797 worker
->flags
|= WORKER_STARTED
;
798 worker
->gcwq
->nr_workers
++;
799 worker_enter_idle(worker
);
800 wake_up_process(worker
->task
);
804 * destroy_worker - destroy a workqueue worker
805 * @worker: worker to be destroyed
807 * Destroy @worker and adjust @gcwq stats accordingly.
810 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
812 static void destroy_worker(struct worker
*worker
)
814 struct global_cwq
*gcwq
= worker
->gcwq
;
817 /* sanity check frenzy */
818 BUG_ON(worker
->current_work
);
819 BUG_ON(!list_empty(&worker
->scheduled
));
821 if (worker
->flags
& WORKER_STARTED
)
823 if (worker
->flags
& WORKER_IDLE
)
826 list_del_init(&worker
->entry
);
827 worker
->flags
|= WORKER_DIE
;
829 spin_unlock_irq(&gcwq
->lock
);
831 kthread_stop(worker
->task
);
834 spin_lock_irq(&gcwq
->lock
);
835 ida_remove(&gcwq
->worker_ida
, id
);
839 * move_linked_works - move linked works to a list
840 * @work: start of series of works to be scheduled
841 * @head: target list to append @work to
842 * @nextp: out paramter for nested worklist walking
844 * Schedule linked works starting from @work to @head. Work series to
845 * be scheduled starts at @work and includes any consecutive work with
846 * WORK_STRUCT_LINKED set in its predecessor.
848 * If @nextp is not NULL, it's updated to point to the next work of
849 * the last scheduled work. This allows move_linked_works() to be
850 * nested inside outer list_for_each_entry_safe().
853 * spin_lock_irq(gcwq->lock).
855 static void move_linked_works(struct work_struct
*work
, struct list_head
*head
,
856 struct work_struct
**nextp
)
858 struct work_struct
*n
;
861 * Linked worklist will always end before the end of the list,
862 * use NULL for list head.
864 list_for_each_entry_safe_from(work
, n
, NULL
, entry
) {
865 list_move_tail(&work
->entry
, head
);
866 if (!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))
871 * If we're already inside safe list traversal and have moved
872 * multiple works to the scheduled queue, the next position
873 * needs to be updated.
879 static void cwq_activate_first_delayed(struct cpu_workqueue_struct
*cwq
)
881 struct work_struct
*work
= list_first_entry(&cwq
->delayed_works
,
882 struct work_struct
, entry
);
884 move_linked_works(work
, &cwq
->worklist
, NULL
);
889 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
890 * @cwq: cwq of interest
891 * @color: color of work which left the queue
893 * A work either has completed or is removed from pending queue,
894 * decrement nr_in_flight of its cwq and handle workqueue flushing.
897 * spin_lock_irq(gcwq->lock).
899 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct
*cwq
, int color
)
901 /* ignore uncolored works */
902 if (color
== WORK_NO_COLOR
)
905 cwq
->nr_in_flight
[color
]--;
908 if (!list_empty(&cwq
->delayed_works
)) {
909 /* one down, submit a delayed one */
910 if (cwq
->nr_active
< cwq
->max_active
)
911 cwq_activate_first_delayed(cwq
);
912 } else if (!cwq
->nr_active
&& cwq
->wq
->flags
& WQ_SINGLE_CPU
) {
913 /* this was the last work, unbind from single cpu */
914 cwq_unbind_single_cpu(cwq
);
917 /* is flush in progress and are we at the flushing tip? */
918 if (likely(cwq
->flush_color
!= color
))
921 /* are there still in-flight works? */
922 if (cwq
->nr_in_flight
[color
])
925 /* this cwq is done, clear flush_color */
926 cwq
->flush_color
= -1;
929 * If this was the last cwq, wake up the first flusher. It
930 * will handle the rest.
932 if (atomic_dec_and_test(&cwq
->wq
->nr_cwqs_to_flush
))
933 complete(&cwq
->wq
->first_flusher
->done
);
937 * process_one_work - process single work
939 * @work: work to process
941 * Process @work. This function contains all the logics necessary to
942 * process a single work including synchronization against and
943 * interaction with other workers on the same cpu, queueing and
944 * flushing. As long as context requirement is met, any worker can
945 * call this function to process a work.
948 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
950 static void process_one_work(struct worker
*worker
, struct work_struct
*work
)
952 struct cpu_workqueue_struct
*cwq
= worker
->cwq
;
953 struct global_cwq
*gcwq
= cwq
->gcwq
;
954 struct hlist_head
*bwh
= busy_worker_head(gcwq
, work
);
955 work_func_t f
= work
->func
;
957 #ifdef CONFIG_LOCKDEP
959 * It is permissible to free the struct work_struct from
960 * inside the function that is called from it, this we need to
961 * take into account for lockdep too. To avoid bogus "held
962 * lock freed" warnings as well as problems when looking into
963 * work->lockdep_map, make a copy and use that here.
965 struct lockdep_map lockdep_map
= work
->lockdep_map
;
967 /* claim and process */
968 debug_work_deactivate(work
);
969 hlist_add_head(&worker
->hentry
, bwh
);
970 worker
->current_work
= work
;
971 worker
->current_cwq
= cwq
;
972 work_color
= get_work_color(work
);
973 list_del_init(&work
->entry
);
975 spin_unlock_irq(&gcwq
->lock
);
977 BUG_ON(get_wq_data(work
) != cwq
);
978 work_clear_pending(work
);
979 lock_map_acquire(&cwq
->wq
->lockdep_map
);
980 lock_map_acquire(&lockdep_map
);
982 lock_map_release(&lockdep_map
);
983 lock_map_release(&cwq
->wq
->lockdep_map
);
985 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
986 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
988 current
->comm
, preempt_count(), task_pid_nr(current
));
989 printk(KERN_ERR
" last function: ");
990 print_symbol("%s\n", (unsigned long)f
);
991 debug_show_held_locks(current
);
995 spin_lock_irq(&gcwq
->lock
);
997 /* we're done with it, release */
998 hlist_del_init(&worker
->hentry
);
999 worker
->current_work
= NULL
;
1000 worker
->current_cwq
= NULL
;
1001 cwq_dec_nr_in_flight(cwq
, work_color
);
1005 * process_scheduled_works - process scheduled works
1008 * Process all scheduled works. Please note that the scheduled list
1009 * may change while processing a work, so this function repeatedly
1010 * fetches a work from the top and executes it.
1013 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1016 static void process_scheduled_works(struct worker
*worker
)
1018 while (!list_empty(&worker
->scheduled
)) {
1019 struct work_struct
*work
= list_first_entry(&worker
->scheduled
,
1020 struct work_struct
, entry
);
1021 process_one_work(worker
, work
);
1026 * worker_thread - the worker thread function
1029 * The cwq worker thread function.
1031 static int worker_thread(void *__worker
)
1033 struct worker
*worker
= __worker
;
1034 struct global_cwq
*gcwq
= worker
->gcwq
;
1035 struct cpu_workqueue_struct
*cwq
= worker
->cwq
;
1038 spin_lock_irq(&gcwq
->lock
);
1040 /* DIE can be set only while we're idle, checking here is enough */
1041 if (worker
->flags
& WORKER_DIE
) {
1042 spin_unlock_irq(&gcwq
->lock
);
1046 worker_leave_idle(worker
);
1049 * ->scheduled list can only be filled while a worker is
1050 * preparing to process a work or actually processing it.
1051 * Make sure nobody diddled with it while I was sleeping.
1053 BUG_ON(!list_empty(&worker
->scheduled
));
1055 while (!list_empty(&cwq
->worklist
)) {
1056 struct work_struct
*work
=
1057 list_first_entry(&cwq
->worklist
,
1058 struct work_struct
, entry
);
1061 * The following is a rather inefficient way to close
1062 * race window against cpu hotplug operations. Will
1065 if (unlikely(!(worker
->flags
& WORKER_ROGUE
) &&
1066 !cpumask_equal(&worker
->task
->cpus_allowed
,
1067 get_cpu_mask(gcwq
->cpu
)))) {
1068 spin_unlock_irq(&gcwq
->lock
);
1069 set_cpus_allowed_ptr(worker
->task
,
1070 get_cpu_mask(gcwq
->cpu
));
1072 spin_lock_irq(&gcwq
->lock
);
1076 if (likely(!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))) {
1077 /* optimization path, not strictly necessary */
1078 process_one_work(worker
, work
);
1079 if (unlikely(!list_empty(&worker
->scheduled
)))
1080 process_scheduled_works(worker
);
1082 move_linked_works(work
, &worker
->scheduled
, NULL
);
1083 process_scheduled_works(worker
);
1088 * gcwq->lock is held and there's no work to process, sleep.
1089 * Workers are woken up only while holding gcwq->lock, so
1090 * setting the current state before releasing gcwq->lock is
1091 * enough to prevent losing any event.
1093 worker_enter_idle(worker
);
1094 __set_current_state(TASK_INTERRUPTIBLE
);
1095 spin_unlock_irq(&gcwq
->lock
);
1101 struct work_struct work
;
1102 struct completion done
;
1105 static void wq_barrier_func(struct work_struct
*work
)
1107 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
1108 complete(&barr
->done
);
1112 * insert_wq_barrier - insert a barrier work
1113 * @cwq: cwq to insert barrier into
1114 * @barr: wq_barrier to insert
1115 * @target: target work to attach @barr to
1116 * @worker: worker currently executing @target, NULL if @target is not executing
1118 * @barr is linked to @target such that @barr is completed only after
1119 * @target finishes execution. Please note that the ordering
1120 * guarantee is observed only with respect to @target and on the local
1123 * Currently, a queued barrier can't be canceled. This is because
1124 * try_to_grab_pending() can't determine whether the work to be
1125 * grabbed is at the head of the queue and thus can't clear LINKED
1126 * flag of the previous work while there must be a valid next work
1127 * after a work with LINKED flag set.
1129 * Note that when @worker is non-NULL, @target may be modified
1130 * underneath us, so we can't reliably determine cwq from @target.
1133 * spin_lock_irq(gcwq->lock).
1135 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
1136 struct wq_barrier
*barr
,
1137 struct work_struct
*target
, struct worker
*worker
)
1139 struct list_head
*head
;
1140 unsigned int linked
= 0;
1143 * debugobject calls are safe here even with gcwq->lock locked
1144 * as we know for sure that this will not trigger any of the
1145 * checks and call back into the fixup functions where we
1148 INIT_WORK_ON_STACK(&barr
->work
, wq_barrier_func
);
1149 __set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(&barr
->work
));
1150 init_completion(&barr
->done
);
1153 * If @target is currently being executed, schedule the
1154 * barrier to the worker; otherwise, put it after @target.
1157 head
= worker
->scheduled
.next
;
1159 unsigned long *bits
= work_data_bits(target
);
1161 head
= target
->entry
.next
;
1162 /* there can already be other linked works, inherit and set */
1163 linked
= *bits
& WORK_STRUCT_LINKED
;
1164 __set_bit(WORK_STRUCT_LINKED_BIT
, bits
);
1167 debug_work_activate(&barr
->work
);
1168 insert_work(cwq
, &barr
->work
, head
,
1169 work_color_to_flags(WORK_NO_COLOR
) | linked
);
1173 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
1174 * @wq: workqueue being flushed
1175 * @flush_color: new flush color, < 0 for no-op
1176 * @work_color: new work color, < 0 for no-op
1178 * Prepare cwqs for workqueue flushing.
1180 * If @flush_color is non-negative, flush_color on all cwqs should be
1181 * -1. If no cwq has in-flight commands at the specified color, all
1182 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
1183 * has in flight commands, its cwq->flush_color is set to
1184 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
1185 * wakeup logic is armed and %true is returned.
1187 * The caller should have initialized @wq->first_flusher prior to
1188 * calling this function with non-negative @flush_color. If
1189 * @flush_color is negative, no flush color update is done and %false
1192 * If @work_color is non-negative, all cwqs should have the same
1193 * work_color which is previous to @work_color and all will be
1194 * advanced to @work_color.
1197 * mutex_lock(wq->flush_mutex).
1200 * %true if @flush_color >= 0 and there's something to flush. %false
1203 static bool flush_workqueue_prep_cwqs(struct workqueue_struct
*wq
,
1204 int flush_color
, int work_color
)
1209 if (flush_color
>= 0) {
1210 BUG_ON(atomic_read(&wq
->nr_cwqs_to_flush
));
1211 atomic_set(&wq
->nr_cwqs_to_flush
, 1);
1214 for_each_possible_cpu(cpu
) {
1215 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
1216 struct global_cwq
*gcwq
= cwq
->gcwq
;
1218 spin_lock_irq(&gcwq
->lock
);
1220 if (flush_color
>= 0) {
1221 BUG_ON(cwq
->flush_color
!= -1);
1223 if (cwq
->nr_in_flight
[flush_color
]) {
1224 cwq
->flush_color
= flush_color
;
1225 atomic_inc(&wq
->nr_cwqs_to_flush
);
1230 if (work_color
>= 0) {
1231 BUG_ON(work_color
!= work_next_color(cwq
->work_color
));
1232 cwq
->work_color
= work_color
;
1235 spin_unlock_irq(&gcwq
->lock
);
1238 if (flush_color
>= 0 && atomic_dec_and_test(&wq
->nr_cwqs_to_flush
))
1239 complete(&wq
->first_flusher
->done
);
1245 * flush_workqueue - ensure that any scheduled work has run to completion.
1246 * @wq: workqueue to flush
1248 * Forces execution of the workqueue and blocks until its completion.
1249 * This is typically used in driver shutdown handlers.
1251 * We sleep until all works which were queued on entry have been handled,
1252 * but we are not livelocked by new incoming ones.
1254 void flush_workqueue(struct workqueue_struct
*wq
)
1256 struct wq_flusher this_flusher
= {
1257 .list
= LIST_HEAD_INIT(this_flusher
.list
),
1259 .done
= COMPLETION_INITIALIZER_ONSTACK(this_flusher
.done
),
1263 lock_map_acquire(&wq
->lockdep_map
);
1264 lock_map_release(&wq
->lockdep_map
);
1266 mutex_lock(&wq
->flush_mutex
);
1269 * Start-to-wait phase
1271 next_color
= work_next_color(wq
->work_color
);
1273 if (next_color
!= wq
->flush_color
) {
1275 * Color space is not full. The current work_color
1276 * becomes our flush_color and work_color is advanced
1279 BUG_ON(!list_empty(&wq
->flusher_overflow
));
1280 this_flusher
.flush_color
= wq
->work_color
;
1281 wq
->work_color
= next_color
;
1283 if (!wq
->first_flusher
) {
1284 /* no flush in progress, become the first flusher */
1285 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
1287 wq
->first_flusher
= &this_flusher
;
1289 if (!flush_workqueue_prep_cwqs(wq
, wq
->flush_color
,
1291 /* nothing to flush, done */
1292 wq
->flush_color
= next_color
;
1293 wq
->first_flusher
= NULL
;
1298 BUG_ON(wq
->flush_color
== this_flusher
.flush_color
);
1299 list_add_tail(&this_flusher
.list
, &wq
->flusher_queue
);
1300 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
1304 * Oops, color space is full, wait on overflow queue.
1305 * The next flush completion will assign us
1306 * flush_color and transfer to flusher_queue.
1308 list_add_tail(&this_flusher
.list
, &wq
->flusher_overflow
);
1311 mutex_unlock(&wq
->flush_mutex
);
1313 wait_for_completion(&this_flusher
.done
);
1316 * Wake-up-and-cascade phase
1318 * First flushers are responsible for cascading flushes and
1319 * handling overflow. Non-first flushers can simply return.
1321 if (wq
->first_flusher
!= &this_flusher
)
1324 mutex_lock(&wq
->flush_mutex
);
1326 wq
->first_flusher
= NULL
;
1328 BUG_ON(!list_empty(&this_flusher
.list
));
1329 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
1332 struct wq_flusher
*next
, *tmp
;
1334 /* complete all the flushers sharing the current flush color */
1335 list_for_each_entry_safe(next
, tmp
, &wq
->flusher_queue
, list
) {
1336 if (next
->flush_color
!= wq
->flush_color
)
1338 list_del_init(&next
->list
);
1339 complete(&next
->done
);
1342 BUG_ON(!list_empty(&wq
->flusher_overflow
) &&
1343 wq
->flush_color
!= work_next_color(wq
->work_color
));
1345 /* this flush_color is finished, advance by one */
1346 wq
->flush_color
= work_next_color(wq
->flush_color
);
1348 /* one color has been freed, handle overflow queue */
1349 if (!list_empty(&wq
->flusher_overflow
)) {
1351 * Assign the same color to all overflowed
1352 * flushers, advance work_color and append to
1353 * flusher_queue. This is the start-to-wait
1354 * phase for these overflowed flushers.
1356 list_for_each_entry(tmp
, &wq
->flusher_overflow
, list
)
1357 tmp
->flush_color
= wq
->work_color
;
1359 wq
->work_color
= work_next_color(wq
->work_color
);
1361 list_splice_tail_init(&wq
->flusher_overflow
,
1362 &wq
->flusher_queue
);
1363 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
1366 if (list_empty(&wq
->flusher_queue
)) {
1367 BUG_ON(wq
->flush_color
!= wq
->work_color
);
1372 * Need to flush more colors. Make the next flusher
1373 * the new first flusher and arm cwqs.
1375 BUG_ON(wq
->flush_color
== wq
->work_color
);
1376 BUG_ON(wq
->flush_color
!= next
->flush_color
);
1378 list_del_init(&next
->list
);
1379 wq
->first_flusher
= next
;
1381 if (flush_workqueue_prep_cwqs(wq
, wq
->flush_color
, -1))
1385 * Meh... this color is already done, clear first
1386 * flusher and repeat cascading.
1388 wq
->first_flusher
= NULL
;
1392 mutex_unlock(&wq
->flush_mutex
);
1394 EXPORT_SYMBOL_GPL(flush_workqueue
);
1397 * flush_work - block until a work_struct's callback has terminated
1398 * @work: the work which is to be flushed
1400 * Returns false if @work has already terminated.
1402 * It is expected that, prior to calling flush_work(), the caller has
1403 * arranged for the work to not be requeued, otherwise it doesn't make
1404 * sense to use this function.
1406 int flush_work(struct work_struct
*work
)
1408 struct worker
*worker
= NULL
;
1409 struct cpu_workqueue_struct
*cwq
;
1410 struct global_cwq
*gcwq
;
1411 struct wq_barrier barr
;
1414 cwq
= get_wq_data(work
);
1419 lock_map_acquire(&cwq
->wq
->lockdep_map
);
1420 lock_map_release(&cwq
->wq
->lockdep_map
);
1422 spin_lock_irq(&gcwq
->lock
);
1423 if (!list_empty(&work
->entry
)) {
1425 * See the comment near try_to_grab_pending()->smp_rmb().
1426 * If it was re-queued under us we are not going to wait.
1429 if (unlikely(cwq
!= get_wq_data(work
)))
1432 if (cwq
->worker
&& cwq
->worker
->current_work
== work
)
1433 worker
= cwq
->worker
;
1438 insert_wq_barrier(cwq
, &barr
, work
, worker
);
1439 spin_unlock_irq(&gcwq
->lock
);
1440 wait_for_completion(&barr
.done
);
1441 destroy_work_on_stack(&barr
.work
);
1444 spin_unlock_irq(&gcwq
->lock
);
1447 EXPORT_SYMBOL_GPL(flush_work
);
1450 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
1451 * so this work can't be re-armed in any way.
1453 static int try_to_grab_pending(struct work_struct
*work
)
1455 struct global_cwq
*gcwq
;
1456 struct cpu_workqueue_struct
*cwq
;
1459 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
)))
1463 * The queueing is in progress, or it is already queued. Try to
1464 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1467 cwq
= get_wq_data(work
);
1472 spin_lock_irq(&gcwq
->lock
);
1473 if (!list_empty(&work
->entry
)) {
1475 * This work is queued, but perhaps we locked the wrong cwq.
1476 * In that case we must see the new value after rmb(), see
1477 * insert_work()->wmb().
1480 if (cwq
== get_wq_data(work
)) {
1481 debug_work_deactivate(work
);
1482 list_del_init(&work
->entry
);
1483 cwq_dec_nr_in_flight(cwq
, get_work_color(work
));
1487 spin_unlock_irq(&gcwq
->lock
);
1492 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
1493 struct work_struct
*work
)
1495 struct global_cwq
*gcwq
= cwq
->gcwq
;
1496 struct wq_barrier barr
;
1497 struct worker
*worker
;
1499 spin_lock_irq(&gcwq
->lock
);
1502 if (unlikely(cwq
->worker
&& cwq
->worker
->current_work
== work
)) {
1503 worker
= cwq
->worker
;
1504 insert_wq_barrier(cwq
, &barr
, work
, worker
);
1507 spin_unlock_irq(&gcwq
->lock
);
1509 if (unlikely(worker
)) {
1510 wait_for_completion(&barr
.done
);
1511 destroy_work_on_stack(&barr
.work
);
1515 static void wait_on_work(struct work_struct
*work
)
1517 struct cpu_workqueue_struct
*cwq
;
1518 struct workqueue_struct
*wq
;
1523 lock_map_acquire(&work
->lockdep_map
);
1524 lock_map_release(&work
->lockdep_map
);
1526 cwq
= get_wq_data(work
);
1532 for_each_possible_cpu(cpu
)
1533 wait_on_cpu_work(get_cwq(cpu
, wq
), work
);
1536 static int __cancel_work_timer(struct work_struct
*work
,
1537 struct timer_list
* timer
)
1542 ret
= (timer
&& likely(del_timer(timer
)));
1544 ret
= try_to_grab_pending(work
);
1546 } while (unlikely(ret
< 0));
1548 clear_wq_data(work
);
1553 * cancel_work_sync - block until a work_struct's callback has terminated
1554 * @work: the work which is to be flushed
1556 * Returns true if @work was pending.
1558 * cancel_work_sync() will cancel the work if it is queued. If the work's
1559 * callback appears to be running, cancel_work_sync() will block until it
1562 * It is possible to use this function if the work re-queues itself. It can
1563 * cancel the work even if it migrates to another workqueue, however in that
1564 * case it only guarantees that work->func() has completed on the last queued
1567 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1568 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1570 * The caller must ensure that workqueue_struct on which this work was last
1571 * queued can't be destroyed before this function returns.
1573 int cancel_work_sync(struct work_struct
*work
)
1575 return __cancel_work_timer(work
, NULL
);
1577 EXPORT_SYMBOL_GPL(cancel_work_sync
);
1580 * cancel_delayed_work_sync - reliably kill off a delayed work.
1581 * @dwork: the delayed work struct
1583 * Returns true if @dwork was pending.
1585 * It is possible to use this function if @dwork rearms itself via queue_work()
1586 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1588 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
1590 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
1592 EXPORT_SYMBOL(cancel_delayed_work_sync
);
1594 static struct workqueue_struct
*keventd_wq __read_mostly
;
1597 * schedule_work - put work task in global workqueue
1598 * @work: job to be done
1600 * Returns zero if @work was already on the kernel-global workqueue and
1601 * non-zero otherwise.
1603 * This puts a job in the kernel-global workqueue if it was not already
1604 * queued and leaves it in the same position on the kernel-global
1605 * workqueue otherwise.
1607 int schedule_work(struct work_struct
*work
)
1609 return queue_work(keventd_wq
, work
);
1611 EXPORT_SYMBOL(schedule_work
);
1614 * schedule_work_on - put work task on a specific cpu
1615 * @cpu: cpu to put the work task on
1616 * @work: job to be done
1618 * This puts a job on a specific cpu
1620 int schedule_work_on(int cpu
, struct work_struct
*work
)
1622 return queue_work_on(cpu
, keventd_wq
, work
);
1624 EXPORT_SYMBOL(schedule_work_on
);
1627 * schedule_delayed_work - put work task in global workqueue after delay
1628 * @dwork: job to be done
1629 * @delay: number of jiffies to wait or 0 for immediate execution
1631 * After waiting for a given time this puts a job in the kernel-global
1634 int schedule_delayed_work(struct delayed_work
*dwork
,
1635 unsigned long delay
)
1637 return queue_delayed_work(keventd_wq
, dwork
, delay
);
1639 EXPORT_SYMBOL(schedule_delayed_work
);
1642 * flush_delayed_work - block until a dwork_struct's callback has terminated
1643 * @dwork: the delayed work which is to be flushed
1645 * Any timeout is cancelled, and any pending work is run immediately.
1647 void flush_delayed_work(struct delayed_work
*dwork
)
1649 if (del_timer_sync(&dwork
->timer
)) {
1650 __queue_work(get_cpu(), get_wq_data(&dwork
->work
)->wq
,
1654 flush_work(&dwork
->work
);
1656 EXPORT_SYMBOL(flush_delayed_work
);
1659 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1661 * @dwork: job to be done
1662 * @delay: number of jiffies to wait
1664 * After waiting for a given time this puts a job in the kernel-global
1665 * workqueue on the specified CPU.
1667 int schedule_delayed_work_on(int cpu
,
1668 struct delayed_work
*dwork
, unsigned long delay
)
1670 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
1672 EXPORT_SYMBOL(schedule_delayed_work_on
);
1675 * schedule_on_each_cpu - call a function on each online CPU from keventd
1676 * @func: the function to call
1678 * Returns zero on success.
1679 * Returns -ve errno on failure.
1681 * schedule_on_each_cpu() is very slow.
1683 int schedule_on_each_cpu(work_func_t func
)
1687 struct work_struct
*works
;
1689 works
= alloc_percpu(struct work_struct
);
1696 * When running in keventd don't schedule a work item on
1697 * itself. Can just call directly because the work queue is
1698 * already bound. This also is faster.
1700 if (current_is_keventd())
1701 orig
= raw_smp_processor_id();
1703 for_each_online_cpu(cpu
) {
1704 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
1706 INIT_WORK(work
, func
);
1708 schedule_work_on(cpu
, work
);
1711 func(per_cpu_ptr(works
, orig
));
1713 for_each_online_cpu(cpu
)
1714 flush_work(per_cpu_ptr(works
, cpu
));
1722 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1724 * Forces execution of the kernel-global workqueue and blocks until its
1727 * Think twice before calling this function! It's very easy to get into
1728 * trouble if you don't take great care. Either of the following situations
1729 * will lead to deadlock:
1731 * One of the work items currently on the workqueue needs to acquire
1732 * a lock held by your code or its caller.
1734 * Your code is running in the context of a work routine.
1736 * They will be detected by lockdep when they occur, but the first might not
1737 * occur very often. It depends on what work items are on the workqueue and
1738 * what locks they need, which you have no control over.
1740 * In most situations flushing the entire workqueue is overkill; you merely
1741 * need to know that a particular work item isn't queued and isn't running.
1742 * In such cases you should use cancel_delayed_work_sync() or
1743 * cancel_work_sync() instead.
1745 void flush_scheduled_work(void)
1747 flush_workqueue(keventd_wq
);
1749 EXPORT_SYMBOL(flush_scheduled_work
);
1752 * execute_in_process_context - reliably execute the routine with user context
1753 * @fn: the function to execute
1754 * @ew: guaranteed storage for the execute work structure (must
1755 * be available when the work executes)
1757 * Executes the function immediately if process context is available,
1758 * otherwise schedules the function for delayed execution.
1760 * Returns: 0 - function was executed
1761 * 1 - function was scheduled for execution
1763 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
1765 if (!in_interrupt()) {
1770 INIT_WORK(&ew
->work
, fn
);
1771 schedule_work(&ew
->work
);
1775 EXPORT_SYMBOL_GPL(execute_in_process_context
);
1777 int keventd_up(void)
1779 return keventd_wq
!= NULL
;
1782 int current_is_keventd(void)
1784 struct cpu_workqueue_struct
*cwq
;
1785 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1788 BUG_ON(!keventd_wq
);
1790 cwq
= get_cwq(cpu
, keventd_wq
);
1791 if (current
== cwq
->worker
->task
)
1798 static struct cpu_workqueue_struct
*alloc_cwqs(void)
1801 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1802 * Make sure that the alignment isn't lower than that of
1803 * unsigned long long.
1805 const size_t size
= sizeof(struct cpu_workqueue_struct
);
1806 const size_t align
= max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS
,
1807 __alignof__(unsigned long long));
1808 struct cpu_workqueue_struct
*cwqs
;
1813 * On UP, percpu allocator doesn't honor alignment parameter
1814 * and simply uses arch-dependent default. Allocate enough
1815 * room to align cwq and put an extra pointer at the end
1816 * pointing back to the originally allocated pointer which
1817 * will be used for free.
1819 * FIXME: This really belongs to UP percpu code. Update UP
1820 * percpu code to honor alignment and remove this ugliness.
1822 ptr
= __alloc_percpu(size
+ align
+ sizeof(void *), 1);
1823 cwqs
= PTR_ALIGN(ptr
, align
);
1824 *(void **)per_cpu_ptr(cwqs
+ 1, 0) = ptr
;
1826 /* On SMP, percpu allocator can do it itself */
1827 cwqs
= __alloc_percpu(size
, align
);
1829 /* just in case, make sure it's actually aligned */
1830 BUG_ON(!IS_ALIGNED((unsigned long)cwqs
, align
));
1834 static void free_cwqs(struct cpu_workqueue_struct
*cwqs
)
1837 /* on UP, the pointer to free is stored right after the cwq */
1839 free_percpu(*(void **)per_cpu_ptr(cwqs
+ 1, 0));
1845 struct workqueue_struct
*__create_workqueue_key(const char *name
,
1848 struct lock_class_key
*key
,
1849 const char *lock_name
)
1851 struct workqueue_struct
*wq
;
1852 bool failed
= false;
1855 max_active
= clamp_val(max_active
, 1, INT_MAX
);
1857 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1861 wq
->cpu_wq
= alloc_cwqs();
1866 wq
->saved_max_active
= max_active
;
1867 mutex_init(&wq
->flush_mutex
);
1868 atomic_set(&wq
->nr_cwqs_to_flush
, 0);
1869 INIT_LIST_HEAD(&wq
->flusher_queue
);
1870 INIT_LIST_HEAD(&wq
->flusher_overflow
);
1871 wq
->single_cpu
= NR_CPUS
;
1874 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
1875 INIT_LIST_HEAD(&wq
->list
);
1877 cpu_maps_update_begin();
1879 * We must initialize cwqs for each possible cpu even if we
1880 * are going to call destroy_workqueue() finally. Otherwise
1881 * cpu_up() can hit the uninitialized cwq once we drop the
1884 for_each_possible_cpu(cpu
) {
1885 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
1886 struct global_cwq
*gcwq
= get_gcwq(cpu
);
1888 BUG_ON((unsigned long)cwq
& WORK_STRUCT_FLAG_MASK
);
1891 cwq
->flush_color
= -1;
1892 cwq
->max_active
= max_active
;
1893 INIT_LIST_HEAD(&cwq
->worklist
);
1894 INIT_LIST_HEAD(&cwq
->delayed_works
);
1898 cwq
->worker
= create_worker(cwq
, cpu_online(cpu
));
1900 start_worker(cwq
->worker
);
1906 * workqueue_lock protects global freeze state and workqueues
1907 * list. Grab it, set max_active accordingly and add the new
1908 * workqueue to workqueues list.
1910 spin_lock(&workqueue_lock
);
1912 if (workqueue_freezing
&& wq
->flags
& WQ_FREEZEABLE
)
1913 for_each_possible_cpu(cpu
)
1914 get_cwq(cpu
, wq
)->max_active
= 0;
1916 list_add(&wq
->list
, &workqueues
);
1918 spin_unlock(&workqueue_lock
);
1920 cpu_maps_update_done();
1923 destroy_workqueue(wq
);
1929 free_cwqs(wq
->cpu_wq
);
1934 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
1937 * destroy_workqueue - safely terminate a workqueue
1938 * @wq: target workqueue
1940 * Safely destroy a workqueue. All work currently pending will be done first.
1942 void destroy_workqueue(struct workqueue_struct
*wq
)
1946 flush_workqueue(wq
);
1949 * wq list is used to freeze wq, remove from list after
1950 * flushing is complete in case freeze races us.
1952 cpu_maps_update_begin();
1953 spin_lock(&workqueue_lock
);
1954 list_del(&wq
->list
);
1955 spin_unlock(&workqueue_lock
);
1956 cpu_maps_update_done();
1958 for_each_possible_cpu(cpu
) {
1959 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
1963 spin_lock_irq(&cwq
->gcwq
->lock
);
1964 destroy_worker(cwq
->worker
);
1966 spin_unlock_irq(&cwq
->gcwq
->lock
);
1969 for (i
= 0; i
< WORK_NR_COLORS
; i
++)
1970 BUG_ON(cwq
->nr_in_flight
[i
]);
1971 BUG_ON(cwq
->nr_active
);
1972 BUG_ON(!list_empty(&cwq
->delayed_works
));
1975 free_cwqs(wq
->cpu_wq
);
1978 EXPORT_SYMBOL_GPL(destroy_workqueue
);
1983 * CPU hotplug is implemented by allowing cwqs to be detached from
1984 * CPU, running with unbound workers and allowing them to be
1985 * reattached later if the cpu comes back online. A separate thread
1986 * is created to govern cwqs in such state and is called the trustee.
1988 * Trustee states and their descriptions.
1990 * START Command state used on startup. On CPU_DOWN_PREPARE, a
1991 * new trustee is started with this state.
1993 * IN_CHARGE Once started, trustee will enter this state after
1994 * making all existing workers rogue. DOWN_PREPARE waits
1995 * for trustee to enter this state. After reaching
1996 * IN_CHARGE, trustee tries to execute the pending
1997 * worklist until it's empty and the state is set to
1998 * BUTCHER, or the state is set to RELEASE.
2000 * BUTCHER Command state which is set by the cpu callback after
2001 * the cpu has went down. Once this state is set trustee
2002 * knows that there will be no new works on the worklist
2003 * and once the worklist is empty it can proceed to
2004 * killing idle workers.
2006 * RELEASE Command state which is set by the cpu callback if the
2007 * cpu down has been canceled or it has come online
2008 * again. After recognizing this state, trustee stops
2009 * trying to drain or butcher and transits to DONE.
2011 * DONE Trustee will enter this state after BUTCHER or RELEASE
2014 * trustee CPU draining
2015 * took over down complete
2016 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2018 * | CPU is back online v return workers |
2019 * ----------------> RELEASE --------------
2023 * trustee_wait_event_timeout - timed event wait for trustee
2024 * @cond: condition to wait for
2025 * @timeout: timeout in jiffies
2027 * wait_event_timeout() for trustee to use. Handles locking and
2028 * checks for RELEASE request.
2031 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2032 * multiple times. To be used by trustee.
2035 * Positive indicating left time if @cond is satisfied, 0 if timed
2036 * out, -1 if canceled.
2038 #define trustee_wait_event_timeout(cond, timeout) ({ \
2039 long __ret = (timeout); \
2040 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2042 spin_unlock_irq(&gcwq->lock); \
2043 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
2044 (gcwq->trustee_state == TRUSTEE_RELEASE), \
2046 spin_lock_irq(&gcwq->lock); \
2048 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
2052 * trustee_wait_event - event wait for trustee
2053 * @cond: condition to wait for
2055 * wait_event() for trustee to use. Automatically handles locking and
2056 * checks for CANCEL request.
2059 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2060 * multiple times. To be used by trustee.
2063 * 0 if @cond is satisfied, -1 if canceled.
2065 #define trustee_wait_event(cond) ({ \
2067 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2068 __ret1 < 0 ? -1 : 0; \
2071 static int __cpuinit
trustee_thread(void *__gcwq
)
2073 struct global_cwq
*gcwq
= __gcwq
;
2074 struct worker
*worker
;
2075 struct hlist_node
*pos
;
2078 BUG_ON(gcwq
->cpu
!= smp_processor_id());
2080 spin_lock_irq(&gcwq
->lock
);
2082 * Make all workers rogue. Trustee must be bound to the
2083 * target cpu and can't be cancelled.
2085 BUG_ON(gcwq
->cpu
!= smp_processor_id());
2087 list_for_each_entry(worker
, &gcwq
->idle_list
, entry
)
2088 worker
->flags
|= WORKER_ROGUE
;
2090 for_each_busy_worker(worker
, i
, pos
, gcwq
)
2091 worker
->flags
|= WORKER_ROGUE
;
2094 * We're now in charge. Notify and proceed to drain. We need
2095 * to keep the gcwq running during the whole CPU down
2096 * procedure as other cpu hotunplug callbacks may need to
2097 * flush currently running tasks.
2099 gcwq
->trustee_state
= TRUSTEE_IN_CHARGE
;
2100 wake_up_all(&gcwq
->trustee_wait
);
2103 * The original cpu is in the process of dying and may go away
2104 * anytime now. When that happens, we and all workers would
2105 * be migrated to other cpus. Try draining any left work.
2106 * Note that if the gcwq is frozen, there may be frozen works
2107 * in freezeable cwqs. Don't declare completion while frozen.
2109 while (gcwq
->nr_workers
!= gcwq
->nr_idle
||
2110 gcwq
->flags
& GCWQ_FREEZING
||
2111 gcwq
->trustee_state
== TRUSTEE_IN_CHARGE
) {
2112 /* give a breather */
2113 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN
) < 0)
2117 /* notify completion */
2118 gcwq
->trustee
= NULL
;
2119 gcwq
->trustee_state
= TRUSTEE_DONE
;
2120 wake_up_all(&gcwq
->trustee_wait
);
2121 spin_unlock_irq(&gcwq
->lock
);
2126 * wait_trustee_state - wait for trustee to enter the specified state
2127 * @gcwq: gcwq the trustee of interest belongs to
2128 * @state: target state to wait for
2130 * Wait for the trustee to reach @state. DONE is already matched.
2133 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2134 * multiple times. To be used by cpu_callback.
2136 static void __cpuinit
wait_trustee_state(struct global_cwq
*gcwq
, int state
)
2138 if (!(gcwq
->trustee_state
== state
||
2139 gcwq
->trustee_state
== TRUSTEE_DONE
)) {
2140 spin_unlock_irq(&gcwq
->lock
);
2141 __wait_event(gcwq
->trustee_wait
,
2142 gcwq
->trustee_state
== state
||
2143 gcwq
->trustee_state
== TRUSTEE_DONE
);
2144 spin_lock_irq(&gcwq
->lock
);
2148 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
2149 unsigned long action
,
2152 unsigned int cpu
= (unsigned long)hcpu
;
2153 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2154 struct task_struct
*new_trustee
= NULL
;
2155 struct worker
*worker
;
2156 struct hlist_node
*pos
;
2157 unsigned long flags
;
2160 action
&= ~CPU_TASKS_FROZEN
;
2163 case CPU_DOWN_PREPARE
:
2164 new_trustee
= kthread_create(trustee_thread
, gcwq
,
2165 "workqueue_trustee/%d\n", cpu
);
2166 if (IS_ERR(new_trustee
))
2167 return notifier_from_errno(PTR_ERR(new_trustee
));
2168 kthread_bind(new_trustee
, cpu
);
2171 /* some are called w/ irq disabled, don't disturb irq status */
2172 spin_lock_irqsave(&gcwq
->lock
, flags
);
2175 case CPU_DOWN_PREPARE
:
2176 /* initialize trustee and tell it to acquire the gcwq */
2177 BUG_ON(gcwq
->trustee
|| gcwq
->trustee_state
!= TRUSTEE_DONE
);
2178 gcwq
->trustee
= new_trustee
;
2179 gcwq
->trustee_state
= TRUSTEE_START
;
2180 wake_up_process(gcwq
->trustee
);
2181 wait_trustee_state(gcwq
, TRUSTEE_IN_CHARGE
);
2185 gcwq
->trustee_state
= TRUSTEE_BUTCHER
;
2188 case CPU_DOWN_FAILED
:
2190 if (gcwq
->trustee_state
!= TRUSTEE_DONE
) {
2191 gcwq
->trustee_state
= TRUSTEE_RELEASE
;
2192 wake_up_process(gcwq
->trustee
);
2193 wait_trustee_state(gcwq
, TRUSTEE_DONE
);
2196 /* clear ROGUE from all workers */
2197 list_for_each_entry(worker
, &gcwq
->idle_list
, entry
)
2198 worker
->flags
&= ~WORKER_ROGUE
;
2200 for_each_busy_worker(worker
, i
, pos
, gcwq
)
2201 worker
->flags
&= ~WORKER_ROGUE
;
2205 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
2207 return notifier_from_errno(0);
2212 struct work_for_cpu
{
2213 struct completion completion
;
2219 static int do_work_for_cpu(void *_wfc
)
2221 struct work_for_cpu
*wfc
= _wfc
;
2222 wfc
->ret
= wfc
->fn(wfc
->arg
);
2223 complete(&wfc
->completion
);
2228 * work_on_cpu - run a function in user context on a particular cpu
2229 * @cpu: the cpu to run on
2230 * @fn: the function to run
2231 * @arg: the function arg
2233 * This will return the value @fn returns.
2234 * It is up to the caller to ensure that the cpu doesn't go offline.
2235 * The caller must not hold any locks which would prevent @fn from completing.
2237 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
2239 struct task_struct
*sub_thread
;
2240 struct work_for_cpu wfc
= {
2241 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
2246 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
2247 if (IS_ERR(sub_thread
))
2248 return PTR_ERR(sub_thread
);
2249 kthread_bind(sub_thread
, cpu
);
2250 wake_up_process(sub_thread
);
2251 wait_for_completion(&wfc
.completion
);
2254 EXPORT_SYMBOL_GPL(work_on_cpu
);
2255 #endif /* CONFIG_SMP */
2257 #ifdef CONFIG_FREEZER
2260 * freeze_workqueues_begin - begin freezing workqueues
2262 * Start freezing workqueues. After this function returns, all
2263 * freezeable workqueues will queue new works to their frozen_works
2264 * list instead of the cwq ones.
2267 * Grabs and releases workqueue_lock and gcwq->lock's.
2269 void freeze_workqueues_begin(void)
2271 struct workqueue_struct
*wq
;
2274 spin_lock(&workqueue_lock
);
2276 BUG_ON(workqueue_freezing
);
2277 workqueue_freezing
= true;
2279 for_each_possible_cpu(cpu
) {
2280 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2282 spin_lock_irq(&gcwq
->lock
);
2284 BUG_ON(gcwq
->flags
& GCWQ_FREEZING
);
2285 gcwq
->flags
|= GCWQ_FREEZING
;
2287 list_for_each_entry(wq
, &workqueues
, list
) {
2288 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2290 if (wq
->flags
& WQ_FREEZEABLE
)
2291 cwq
->max_active
= 0;
2294 spin_unlock_irq(&gcwq
->lock
);
2297 spin_unlock(&workqueue_lock
);
2301 * freeze_workqueues_busy - are freezeable workqueues still busy?
2303 * Check whether freezing is complete. This function must be called
2304 * between freeze_workqueues_begin() and thaw_workqueues().
2307 * Grabs and releases workqueue_lock.
2310 * %true if some freezeable workqueues are still busy. %false if
2311 * freezing is complete.
2313 bool freeze_workqueues_busy(void)
2315 struct workqueue_struct
*wq
;
2319 spin_lock(&workqueue_lock
);
2321 BUG_ON(!workqueue_freezing
);
2323 for_each_possible_cpu(cpu
) {
2325 * nr_active is monotonically decreasing. It's safe
2326 * to peek without lock.
2328 list_for_each_entry(wq
, &workqueues
, list
) {
2329 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2331 if (!(wq
->flags
& WQ_FREEZEABLE
))
2334 BUG_ON(cwq
->nr_active
< 0);
2335 if (cwq
->nr_active
) {
2342 spin_unlock(&workqueue_lock
);
2347 * thaw_workqueues - thaw workqueues
2349 * Thaw workqueues. Normal queueing is restored and all collected
2350 * frozen works are transferred to their respective cwq worklists.
2353 * Grabs and releases workqueue_lock and gcwq->lock's.
2355 void thaw_workqueues(void)
2357 struct workqueue_struct
*wq
;
2360 spin_lock(&workqueue_lock
);
2362 if (!workqueue_freezing
)
2365 for_each_possible_cpu(cpu
) {
2366 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2368 spin_lock_irq(&gcwq
->lock
);
2370 BUG_ON(!(gcwq
->flags
& GCWQ_FREEZING
));
2371 gcwq
->flags
&= ~GCWQ_FREEZING
;
2373 list_for_each_entry(wq
, &workqueues
, list
) {
2374 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2376 if (!(wq
->flags
& WQ_FREEZEABLE
))
2379 /* restore max_active and repopulate worklist */
2380 cwq
->max_active
= wq
->saved_max_active
;
2382 while (!list_empty(&cwq
->delayed_works
) &&
2383 cwq
->nr_active
< cwq
->max_active
)
2384 cwq_activate_first_delayed(cwq
);
2386 /* perform delayed unbind from single cpu if empty */
2387 if (wq
->single_cpu
== gcwq
->cpu
&&
2388 !cwq
->nr_active
&& list_empty(&cwq
->delayed_works
))
2389 cwq_unbind_single_cpu(cwq
);
2391 wake_up_process(cwq
->worker
->task
);
2394 spin_unlock_irq(&gcwq
->lock
);
2397 workqueue_freezing
= false;
2399 spin_unlock(&workqueue_lock
);
2401 #endif /* CONFIG_FREEZER */
2403 void __init
init_workqueues(void)
2408 hotcpu_notifier(workqueue_cpu_callback
, CPU_PRI_WORKQUEUE
);
2410 /* initialize gcwqs */
2411 for_each_possible_cpu(cpu
) {
2412 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2414 spin_lock_init(&gcwq
->lock
);
2417 INIT_LIST_HEAD(&gcwq
->idle_list
);
2418 for (i
= 0; i
< BUSY_WORKER_HASH_SIZE
; i
++)
2419 INIT_HLIST_HEAD(&gcwq
->busy_hash
[i
]);
2421 ida_init(&gcwq
->worker_ida
);
2423 gcwq
->trustee_state
= TRUSTEE_DONE
;
2424 init_waitqueue_head(&gcwq
->trustee_wait
);
2427 keventd_wq
= create_workqueue("events");
2428 BUG_ON(!keventd_wq
);