]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/workqueue.c
workqueue: kill RT workqueue
[mirror_ubuntu-jammy-kernel.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
e1f8e874 12 * Andrew Morton
1da177e4
LT
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679 15 *
cde53535 16 * Made to use alloc_percpu by Christoph Lameter.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
4e6045f1 35#include <linux/lockdep.h>
fb39125f
Z
36#define CREATE_TRACE_POINTS
37#include <trace/events/workqueue.h>
1da177e4
LT
38
39/*
f756d5e2
NL
40 * The per-CPU workqueue (if single thread, we always use the first
41 * possible cpu).
1da177e4
LT
42 */
43struct cpu_workqueue_struct {
44
45 spinlock_t lock;
46
1da177e4
LT
47 struct list_head worklist;
48 wait_queue_head_t more_work;
3af24433 49 struct work_struct *current_work;
1da177e4
LT
50
51 struct workqueue_struct *wq;
36c8b586 52 struct task_struct *thread;
1da177e4
LT
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
89ada679 60 struct cpu_workqueue_struct *cpu_wq;
cce1a165 61 struct list_head list;
1da177e4 62 const char *name;
cce1a165 63 int singlethread;
319c2a98 64 int freezeable; /* Freeze threads during suspend */
4e6045f1
JB
65#ifdef CONFIG_LOCKDEP
66 struct lockdep_map lockdep_map;
67#endif
1da177e4
LT
68};
69
dc186ad7
TG
70#ifdef CONFIG_DEBUG_OBJECTS_WORK
71
72static struct debug_obj_descr work_debug_descr;
73
74/*
75 * fixup_init is called when:
76 * - an active object is initialized
77 */
78static int work_fixup_init(void *addr, enum debug_obj_state state)
79{
80 struct work_struct *work = addr;
81
82 switch (state) {
83 case ODEBUG_STATE_ACTIVE:
84 cancel_work_sync(work);
85 debug_object_init(work, &work_debug_descr);
86 return 1;
87 default:
88 return 0;
89 }
90}
91
92/*
93 * fixup_activate is called when:
94 * - an active object is activated
95 * - an unknown object is activated (might be a statically initialized object)
96 */
97static int work_fixup_activate(void *addr, enum debug_obj_state state)
98{
99 struct work_struct *work = addr;
100
101 switch (state) {
102
103 case ODEBUG_STATE_NOTAVAILABLE:
104 /*
105 * This is not really a fixup. The work struct was
106 * statically initialized. We just make sure that it
107 * is tracked in the object tracker.
108 */
109 if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {
110 debug_object_init(work, &work_debug_descr);
111 debug_object_activate(work, &work_debug_descr);
112 return 0;
113 }
114 WARN_ON_ONCE(1);
115 return 0;
116
117 case ODEBUG_STATE_ACTIVE:
118 WARN_ON(1);
119
120 default:
121 return 0;
122 }
123}
124
125/*
126 * fixup_free is called when:
127 * - an active object is freed
128 */
129static int work_fixup_free(void *addr, enum debug_obj_state state)
130{
131 struct work_struct *work = addr;
132
133 switch (state) {
134 case ODEBUG_STATE_ACTIVE:
135 cancel_work_sync(work);
136 debug_object_free(work, &work_debug_descr);
137 return 1;
138 default:
139 return 0;
140 }
141}
142
143static struct debug_obj_descr work_debug_descr = {
144 .name = "work_struct",
145 .fixup_init = work_fixup_init,
146 .fixup_activate = work_fixup_activate,
147 .fixup_free = work_fixup_free,
148};
149
150static inline void debug_work_activate(struct work_struct *work)
151{
152 debug_object_activate(work, &work_debug_descr);
153}
154
155static inline void debug_work_deactivate(struct work_struct *work)
156{
157 debug_object_deactivate(work, &work_debug_descr);
158}
159
160void __init_work(struct work_struct *work, int onstack)
161{
162 if (onstack)
163 debug_object_init_on_stack(work, &work_debug_descr);
164 else
165 debug_object_init(work, &work_debug_descr);
166}
167EXPORT_SYMBOL_GPL(__init_work);
168
169void destroy_work_on_stack(struct work_struct *work)
170{
171 debug_object_free(work, &work_debug_descr);
172}
173EXPORT_SYMBOL_GPL(destroy_work_on_stack);
174
175#else
176static inline void debug_work_activate(struct work_struct *work) { }
177static inline void debug_work_deactivate(struct work_struct *work) { }
178#endif
179
95402b38
GS
180/* Serializes the accesses to the list of workqueues. */
181static DEFINE_SPINLOCK(workqueue_lock);
1da177e4
LT
182static LIST_HEAD(workqueues);
183
3af24433 184static int singlethread_cpu __read_mostly;
e7577c50 185static const struct cpumask *cpu_singlethread_map __read_mostly;
14441960
ON
186/*
187 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
188 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
189 * which comes in between can't use for_each_online_cpu(). We could
190 * use cpu_possible_map, the cpumask below is more a documentation
191 * than optimization.
192 */
e7577c50 193static cpumask_var_t cpu_populated_map __read_mostly;
f756d5e2 194
1da177e4 195/* If it's single threaded, it isn't in the list of workqueues. */
6cc88bc4 196static inline int is_wq_single_threaded(struct workqueue_struct *wq)
1da177e4 197{
cce1a165 198 return wq->singlethread;
1da177e4
LT
199}
200
e7577c50 201static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
b1f4ec17 202{
6cc88bc4 203 return is_wq_single_threaded(wq)
e7577c50 204 ? cpu_singlethread_map : cpu_populated_map;
b1f4ec17
ON
205}
206
a848e3b6
ON
207static
208struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
209{
6cc88bc4 210 if (unlikely(is_wq_single_threaded(wq)))
a848e3b6
ON
211 cpu = singlethread_cpu;
212 return per_cpu_ptr(wq->cpu_wq, cpu);
213}
214
4594bf15
DH
215/*
216 * Set the workqueue on which a work item is to be run
217 * - Must *only* be called if the pending flag is set
218 */
ed7c0fee
ON
219static inline void set_wq_data(struct work_struct *work,
220 struct cpu_workqueue_struct *cwq)
365970a1 221{
4594bf15
DH
222 unsigned long new;
223
224 BUG_ON(!work_pending(work));
365970a1 225
ed7c0fee 226 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
227 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
228 atomic_long_set(&work->data, new);
365970a1
DH
229}
230
4d707b9f
ON
231/*
232 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
233 */
234static inline void clear_wq_data(struct work_struct *work)
235{
236 unsigned long flags = *work_data_bits(work) &
237 (1UL << WORK_STRUCT_STATIC);
238 atomic_long_set(&work->data, flags);
239}
240
ed7c0fee
ON
241static inline
242struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 243{
a08727ba 244 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
245}
246
b89deed3 247static void insert_work(struct cpu_workqueue_struct *cwq,
1a4d9b0a 248 struct work_struct *work, struct list_head *head)
b89deed3 249{
e1d8aa9f
FW
250 trace_workqueue_insertion(cwq->thread, work);
251
b89deed3 252 set_wq_data(work, cwq);
6e84d644
ON
253 /*
254 * Ensure that we get the right work->data if we see the
255 * result of list_add() below, see try_to_grab_pending().
256 */
257 smp_wmb();
1a4d9b0a 258 list_add_tail(&work->entry, head);
b89deed3
ON
259 wake_up(&cwq->more_work);
260}
261
1da177e4
LT
262static void __queue_work(struct cpu_workqueue_struct *cwq,
263 struct work_struct *work)
264{
265 unsigned long flags;
266
dc186ad7 267 debug_work_activate(work);
1da177e4 268 spin_lock_irqsave(&cwq->lock, flags);
1a4d9b0a 269 insert_work(cwq, work, &cwq->worklist);
1da177e4
LT
270 spin_unlock_irqrestore(&cwq->lock, flags);
271}
272
0fcb78c2
REB
273/**
274 * queue_work - queue work on a workqueue
275 * @wq: workqueue to use
276 * @work: work to queue
277 *
057647fc 278 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4 279 *
00dfcaf7
ON
280 * We queue the work to the CPU on which it was submitted, but if the CPU dies
281 * it can be processed by another CPU.
1da177e4 282 */
7ad5b3a5 283int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1da177e4 284{
ef1ca236
ON
285 int ret;
286
287 ret = queue_work_on(get_cpu(), wq, work);
288 put_cpu();
289
1da177e4
LT
290 return ret;
291}
ae90dd5d 292EXPORT_SYMBOL_GPL(queue_work);
1da177e4 293
c1a220e7
ZR
294/**
295 * queue_work_on - queue work on specific cpu
296 * @cpu: CPU number to execute work on
297 * @wq: workqueue to use
298 * @work: work to queue
299 *
300 * Returns 0 if @work was already on a queue, non-zero otherwise.
301 *
302 * We queue the work to a specific CPU, the caller must ensure it
303 * can't go away.
304 */
305int
306queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
307{
308 int ret = 0;
309
310 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
311 BUG_ON(!list_empty(&work->entry));
312 __queue_work(wq_per_cpu(wq, cpu), work);
313 ret = 1;
314 }
315 return ret;
316}
317EXPORT_SYMBOL_GPL(queue_work_on);
318
6d141c3f 319static void delayed_work_timer_fn(unsigned long __data)
1da177e4 320{
52bad64d 321 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee
ON
322 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
323 struct workqueue_struct *wq = cwq->wq;
1da177e4 324
a848e3b6 325 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
1da177e4
LT
326}
327
0fcb78c2
REB
328/**
329 * queue_delayed_work - queue work on a workqueue after delay
330 * @wq: workqueue to use
af9997e4 331 * @dwork: delayable work to queue
0fcb78c2
REB
332 * @delay: number of jiffies to wait before queueing
333 *
057647fc 334 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 335 */
7ad5b3a5 336int queue_delayed_work(struct workqueue_struct *wq,
52bad64d 337 struct delayed_work *dwork, unsigned long delay)
1da177e4 338{
52bad64d 339 if (delay == 0)
63bc0362 340 return queue_work(wq, &dwork->work);
1da177e4 341
63bc0362 342 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 343}
ae90dd5d 344EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 345
0fcb78c2
REB
346/**
347 * queue_delayed_work_on - queue work on specific CPU after delay
348 * @cpu: CPU number to execute work on
349 * @wq: workqueue to use
af9997e4 350 * @dwork: work to queue
0fcb78c2
REB
351 * @delay: number of jiffies to wait before queueing
352 *
057647fc 353 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 354 */
7a6bc1cd 355int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 356 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
357{
358 int ret = 0;
52bad64d
DH
359 struct timer_list *timer = &dwork->timer;
360 struct work_struct *work = &dwork->work;
7a6bc1cd 361
a08727ba 362 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
363 BUG_ON(timer_pending(timer));
364 BUG_ON(!list_empty(&work->entry));
365
8a3e77cc
AL
366 timer_stats_timer_set_start_info(&dwork->timer);
367
ed7c0fee 368 /* This stores cwq for the moment, for the timer_fn */
a848e3b6 369 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
7a6bc1cd 370 timer->expires = jiffies + delay;
52bad64d 371 timer->data = (unsigned long)dwork;
7a6bc1cd 372 timer->function = delayed_work_timer_fn;
63bc0362
ON
373
374 if (unlikely(cpu >= 0))
375 add_timer_on(timer, cpu);
376 else
377 add_timer(timer);
7a6bc1cd
VP
378 ret = 1;
379 }
380 return ret;
381}
ae90dd5d 382EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 383
858119e1 384static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 385{
f293ea92 386 spin_lock_irq(&cwq->lock);
1da177e4
LT
387 while (!list_empty(&cwq->worklist)) {
388 struct work_struct *work = list_entry(cwq->worklist.next,
389 struct work_struct, entry);
6bb49e59 390 work_func_t f = work->func;
4e6045f1
JB
391#ifdef CONFIG_LOCKDEP
392 /*
393 * It is permissible to free the struct work_struct
394 * from inside the function that is called from it,
395 * this we need to take into account for lockdep too.
396 * To avoid bogus "held lock freed" warnings as well
397 * as problems when looking into work->lockdep_map,
398 * make a copy and use that here.
399 */
400 struct lockdep_map lockdep_map = work->lockdep_map;
401#endif
e1d8aa9f 402 trace_workqueue_execution(cwq->thread, work);
dc186ad7 403 debug_work_deactivate(work);
b89deed3 404 cwq->current_work = work;
1da177e4 405 list_del_init(cwq->worklist.next);
f293ea92 406 spin_unlock_irq(&cwq->lock);
1da177e4 407
365970a1 408 BUG_ON(get_wq_data(work) != cwq);
23b2e599 409 work_clear_pending(work);
3295f0ef
IM
410 lock_map_acquire(&cwq->wq->lockdep_map);
411 lock_map_acquire(&lockdep_map);
65f27f38 412 f(work);
3295f0ef
IM
413 lock_map_release(&lockdep_map);
414 lock_map_release(&cwq->wq->lockdep_map);
1da177e4 415
d5abe669
PZ
416 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
417 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
418 "%s/0x%08x/%d\n",
419 current->comm, preempt_count(),
ba25f9dc 420 task_pid_nr(current));
d5abe669
PZ
421 printk(KERN_ERR " last function: ");
422 print_symbol("%s\n", (unsigned long)f);
423 debug_show_held_locks(current);
424 dump_stack();
425 }
426
f293ea92 427 spin_lock_irq(&cwq->lock);
b89deed3 428 cwq->current_work = NULL;
1da177e4 429 }
f293ea92 430 spin_unlock_irq(&cwq->lock);
1da177e4
LT
431}
432
433static int worker_thread(void *__cwq)
434{
435 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 436 DEFINE_WAIT(wait);
1da177e4 437
83144186
RW
438 if (cwq->wq->freezeable)
439 set_freezable();
1da177e4 440
3af24433 441 for (;;) {
3af24433 442 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960
ON
443 if (!freezing(current) &&
444 !kthread_should_stop() &&
445 list_empty(&cwq->worklist))
1da177e4 446 schedule();
3af24433
ON
447 finish_wait(&cwq->more_work, &wait);
448
85f4186a
ON
449 try_to_freeze();
450
14441960 451 if (kthread_should_stop())
3af24433 452 break;
1da177e4 453
3af24433 454 run_workqueue(cwq);
1da177e4 455 }
3af24433 456
1da177e4
LT
457 return 0;
458}
459
fc2e4d70
ON
460struct wq_barrier {
461 struct work_struct work;
462 struct completion done;
463};
464
465static void wq_barrier_func(struct work_struct *work)
466{
467 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
468 complete(&barr->done);
469}
470
83c22520 471static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1a4d9b0a 472 struct wq_barrier *barr, struct list_head *head)
fc2e4d70 473{
dc186ad7
TG
474 /*
475 * debugobject calls are safe here even with cwq->lock locked
476 * as we know for sure that this will not trigger any of the
477 * checks and call back into the fixup functions where we
478 * might deadlock.
479 */
480 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
fc2e4d70
ON
481 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
482
483 init_completion(&barr->done);
83c22520 484
dc186ad7 485 debug_work_activate(&barr->work);
1a4d9b0a 486 insert_work(cwq, &barr->work, head);
fc2e4d70
ON
487}
488
14441960 489static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 490{
2355b70f
LJ
491 int active = 0;
492 struct wq_barrier barr;
1da177e4 493
2355b70f 494 WARN_ON(cwq->thread == current);
1da177e4 495
2355b70f
LJ
496 spin_lock_irq(&cwq->lock);
497 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
498 insert_wq_barrier(cwq, &barr, &cwq->worklist);
499 active = 1;
1da177e4 500 }
2355b70f
LJ
501 spin_unlock_irq(&cwq->lock);
502
dc186ad7 503 if (active) {
2355b70f 504 wait_for_completion(&barr.done);
dc186ad7
TG
505 destroy_work_on_stack(&barr.work);
506 }
14441960
ON
507
508 return active;
1da177e4
LT
509}
510
0fcb78c2 511/**
1da177e4 512 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 513 * @wq: workqueue to flush
1da177e4
LT
514 *
515 * Forces execution of the workqueue and blocks until its completion.
516 * This is typically used in driver shutdown handlers.
517 *
fc2e4d70
ON
518 * We sleep until all works which were queued on entry have been handled,
519 * but we are not livelocked by new incoming ones.
1da177e4
LT
520 *
521 * This function used to run the workqueues itself. Now we just wait for the
522 * helper threads to do it.
523 */
7ad5b3a5 524void flush_workqueue(struct workqueue_struct *wq)
1da177e4 525{
e7577c50 526 const struct cpumask *cpu_map = wq_cpu_map(wq);
cce1a165 527 int cpu;
1da177e4 528
b1f4ec17 529 might_sleep();
3295f0ef
IM
530 lock_map_acquire(&wq->lockdep_map);
531 lock_map_release(&wq->lockdep_map);
aa85ea5b 532 for_each_cpu(cpu, cpu_map)
b1f4ec17 533 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 534}
ae90dd5d 535EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 536
db700897
ON
537/**
538 * flush_work - block until a work_struct's callback has terminated
539 * @work: the work which is to be flushed
540 *
a67da70d
ON
541 * Returns false if @work has already terminated.
542 *
db700897
ON
543 * It is expected that, prior to calling flush_work(), the caller has
544 * arranged for the work to not be requeued, otherwise it doesn't make
545 * sense to use this function.
546 */
547int flush_work(struct work_struct *work)
548{
549 struct cpu_workqueue_struct *cwq;
550 struct list_head *prev;
551 struct wq_barrier barr;
552
553 might_sleep();
554 cwq = get_wq_data(work);
555 if (!cwq)
556 return 0;
557
3295f0ef
IM
558 lock_map_acquire(&cwq->wq->lockdep_map);
559 lock_map_release(&cwq->wq->lockdep_map);
a67da70d 560
db700897
ON
561 prev = NULL;
562 spin_lock_irq(&cwq->lock);
563 if (!list_empty(&work->entry)) {
564 /*
565 * See the comment near try_to_grab_pending()->smp_rmb().
566 * If it was re-queued under us we are not going to wait.
567 */
568 smp_rmb();
569 if (unlikely(cwq != get_wq_data(work)))
570 goto out;
571 prev = &work->entry;
572 } else {
573 if (cwq->current_work != work)
574 goto out;
575 prev = &cwq->worklist;
576 }
577 insert_wq_barrier(cwq, &barr, prev->next);
578out:
579 spin_unlock_irq(&cwq->lock);
580 if (!prev)
581 return 0;
582
583 wait_for_completion(&barr.done);
dc186ad7 584 destroy_work_on_stack(&barr.work);
db700897
ON
585 return 1;
586}
587EXPORT_SYMBOL_GPL(flush_work);
588
6e84d644 589/*
1f1f642e 590 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644
ON
591 * so this work can't be re-armed in any way.
592 */
593static int try_to_grab_pending(struct work_struct *work)
594{
595 struct cpu_workqueue_struct *cwq;
1f1f642e 596 int ret = -1;
6e84d644
ON
597
598 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
1f1f642e 599 return 0;
6e84d644
ON
600
601 /*
602 * The queueing is in progress, or it is already queued. Try to
603 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
604 */
605
606 cwq = get_wq_data(work);
607 if (!cwq)
608 return ret;
609
610 spin_lock_irq(&cwq->lock);
611 if (!list_empty(&work->entry)) {
612 /*
613 * This work is queued, but perhaps we locked the wrong cwq.
614 * In that case we must see the new value after rmb(), see
615 * insert_work()->wmb().
616 */
617 smp_rmb();
618 if (cwq == get_wq_data(work)) {
dc186ad7 619 debug_work_deactivate(work);
6e84d644
ON
620 list_del_init(&work->entry);
621 ret = 1;
622 }
623 }
624 spin_unlock_irq(&cwq->lock);
625
626 return ret;
627}
628
629static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed3
ON
630 struct work_struct *work)
631{
632 struct wq_barrier barr;
633 int running = 0;
634
635 spin_lock_irq(&cwq->lock);
636 if (unlikely(cwq->current_work == work)) {
1a4d9b0a 637 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
b89deed3
ON
638 running = 1;
639 }
640 spin_unlock_irq(&cwq->lock);
641
dc186ad7 642 if (unlikely(running)) {
b89deed3 643 wait_for_completion(&barr.done);
dc186ad7
TG
644 destroy_work_on_stack(&barr.work);
645 }
b89deed3
ON
646}
647
6e84d644 648static void wait_on_work(struct work_struct *work)
b89deed3
ON
649{
650 struct cpu_workqueue_struct *cwq;
28e53bdd 651 struct workqueue_struct *wq;
e7577c50 652 const struct cpumask *cpu_map;
b1f4ec17 653 int cpu;
b89deed3 654
f293ea92
ON
655 might_sleep();
656
3295f0ef
IM
657 lock_map_acquire(&work->lockdep_map);
658 lock_map_release(&work->lockdep_map);
4e6045f1 659
b89deed3 660 cwq = get_wq_data(work);
b89deed3 661 if (!cwq)
3af24433 662 return;
b89deed3 663
28e53bdd
ON
664 wq = cwq->wq;
665 cpu_map = wq_cpu_map(wq);
666
aa85ea5b 667 for_each_cpu(cpu, cpu_map)
6e84d644
ON
668 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
669}
670
1f1f642e
ON
671static int __cancel_work_timer(struct work_struct *work,
672 struct timer_list* timer)
673{
674 int ret;
675
676 do {
677 ret = (timer && likely(del_timer(timer)));
678 if (!ret)
679 ret = try_to_grab_pending(work);
680 wait_on_work(work);
681 } while (unlikely(ret < 0));
682
4d707b9f 683 clear_wq_data(work);
1f1f642e
ON
684 return ret;
685}
686
6e84d644
ON
687/**
688 * cancel_work_sync - block until a work_struct's callback has terminated
689 * @work: the work which is to be flushed
690 *
1f1f642e
ON
691 * Returns true if @work was pending.
692 *
6e84d644
ON
693 * cancel_work_sync() will cancel the work if it is queued. If the work's
694 * callback appears to be running, cancel_work_sync() will block until it
695 * has completed.
696 *
697 * It is possible to use this function if the work re-queues itself. It can
698 * cancel the work even if it migrates to another workqueue, however in that
699 * case it only guarantees that work->func() has completed on the last queued
700 * workqueue.
701 *
702 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
703 * pending, otherwise it goes into a busy-wait loop until the timer expires.
704 *
705 * The caller must ensure that workqueue_struct on which this work was last
706 * queued can't be destroyed before this function returns.
707 */
1f1f642e 708int cancel_work_sync(struct work_struct *work)
6e84d644 709{
1f1f642e 710 return __cancel_work_timer(work, NULL);
b89deed3 711}
28e53bdd 712EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 713
6e84d644 714/**
f5a421a4 715 * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644
ON
716 * @dwork: the delayed work struct
717 *
1f1f642e
ON
718 * Returns true if @dwork was pending.
719 *
6e84d644
ON
720 * It is possible to use this function if @dwork rearms itself via queue_work()
721 * or queue_delayed_work(). See also the comment for cancel_work_sync().
722 */
1f1f642e 723int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 724{
1f1f642e 725 return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644 726}
f5a421a4 727EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 728
6e84d644 729static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4 730
0fcb78c2
REB
731/**
732 * schedule_work - put work task in global workqueue
733 * @work: job to be done
734 *
5b0f437d
BVA
735 * Returns zero if @work was already on the kernel-global workqueue and
736 * non-zero otherwise.
737 *
738 * This puts a job in the kernel-global workqueue if it was not already
739 * queued and leaves it in the same position on the kernel-global
740 * workqueue otherwise.
0fcb78c2 741 */
7ad5b3a5 742int schedule_work(struct work_struct *work)
1da177e4
LT
743{
744 return queue_work(keventd_wq, work);
745}
ae90dd5d 746EXPORT_SYMBOL(schedule_work);
1da177e4 747
c1a220e7
ZR
748/*
749 * schedule_work_on - put work task on a specific cpu
750 * @cpu: cpu to put the work task on
751 * @work: job to be done
752 *
753 * This puts a job on a specific cpu
754 */
755int schedule_work_on(int cpu, struct work_struct *work)
756{
757 return queue_work_on(cpu, keventd_wq, work);
758}
759EXPORT_SYMBOL(schedule_work_on);
760
0fcb78c2
REB
761/**
762 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
763 * @dwork: job to be done
764 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
765 *
766 * After waiting for a given time this puts a job in the kernel-global
767 * workqueue.
768 */
7ad5b3a5 769int schedule_delayed_work(struct delayed_work *dwork,
82f67cd9 770 unsigned long delay)
1da177e4 771{
52bad64d 772 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 773}
ae90dd5d 774EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 775
8c53e463
LT
776/**
777 * flush_delayed_work - block until a dwork_struct's callback has terminated
778 * @dwork: the delayed work which is to be flushed
779 *
780 * Any timeout is cancelled, and any pending work is run immediately.
781 */
782void flush_delayed_work(struct delayed_work *dwork)
783{
784 if (del_timer_sync(&dwork->timer)) {
785 struct cpu_workqueue_struct *cwq;
47dd5be2 786 cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
8c53e463
LT
787 __queue_work(cwq, &dwork->work);
788 put_cpu();
789 }
790 flush_work(&dwork->work);
791}
792EXPORT_SYMBOL(flush_delayed_work);
793
0fcb78c2
REB
794/**
795 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
796 * @cpu: cpu to use
52bad64d 797 * @dwork: job to be done
0fcb78c2
REB
798 * @delay: number of jiffies to wait
799 *
800 * After waiting for a given time this puts a job in the kernel-global
801 * workqueue on the specified CPU.
802 */
1da177e4 803int schedule_delayed_work_on(int cpu,
52bad64d 804 struct delayed_work *dwork, unsigned long delay)
1da177e4 805{
52bad64d 806 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 807}
ae90dd5d 808EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 809
b6136773
AM
810/**
811 * schedule_on_each_cpu - call a function on each online CPU from keventd
812 * @func: the function to call
b6136773
AM
813 *
814 * Returns zero on success.
815 * Returns -ve errno on failure.
816 *
b6136773
AM
817 * schedule_on_each_cpu() is very slow.
818 */
65f27f38 819int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
820{
821 int cpu;
65a64464 822 int orig = -1;
b6136773 823 struct work_struct *works;
15316ba8 824
b6136773
AM
825 works = alloc_percpu(struct work_struct);
826 if (!works)
15316ba8 827 return -ENOMEM;
b6136773 828
93981800
TH
829 get_online_cpus();
830
65a64464 831 /*
93981800
TH
832 * When running in keventd don't schedule a work item on
833 * itself. Can just call directly because the work queue is
834 * already bound. This also is faster.
65a64464 835 */
93981800 836 if (current_is_keventd())
65a64464 837 orig = raw_smp_processor_id();
65a64464 838
15316ba8 839 for_each_online_cpu(cpu) {
9bfb1839
IM
840 struct work_struct *work = per_cpu_ptr(works, cpu);
841
842 INIT_WORK(work, func);
65a64464 843 if (cpu != orig)
93981800 844 schedule_work_on(cpu, work);
65a64464 845 }
93981800
TH
846 if (orig >= 0)
847 func(per_cpu_ptr(works, orig));
848
849 for_each_online_cpu(cpu)
850 flush_work(per_cpu_ptr(works, cpu));
851
95402b38 852 put_online_cpus();
b6136773 853 free_percpu(works);
15316ba8
CL
854 return 0;
855}
856
eef6a7d5
AS
857/**
858 * flush_scheduled_work - ensure that any scheduled work has run to completion.
859 *
860 * Forces execution of the kernel-global workqueue and blocks until its
861 * completion.
862 *
863 * Think twice before calling this function! It's very easy to get into
864 * trouble if you don't take great care. Either of the following situations
865 * will lead to deadlock:
866 *
867 * One of the work items currently on the workqueue needs to acquire
868 * a lock held by your code or its caller.
869 *
870 * Your code is running in the context of a work routine.
871 *
872 * They will be detected by lockdep when they occur, but the first might not
873 * occur very often. It depends on what work items are on the workqueue and
874 * what locks they need, which you have no control over.
875 *
876 * In most situations flushing the entire workqueue is overkill; you merely
877 * need to know that a particular work item isn't queued and isn't running.
878 * In such cases you should use cancel_delayed_work_sync() or
879 * cancel_work_sync() instead.
880 */
1da177e4
LT
881void flush_scheduled_work(void)
882{
883 flush_workqueue(keventd_wq);
884}
ae90dd5d 885EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 886
1fa44eca
JB
887/**
888 * execute_in_process_context - reliably execute the routine with user context
889 * @fn: the function to execute
1fa44eca
JB
890 * @ew: guaranteed storage for the execute work structure (must
891 * be available when the work executes)
892 *
893 * Executes the function immediately if process context is available,
894 * otherwise schedules the function for delayed execution.
895 *
896 * Returns: 0 - function was executed
897 * 1 - function was scheduled for execution
898 */
65f27f38 899int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
900{
901 if (!in_interrupt()) {
65f27f38 902 fn(&ew->work);
1fa44eca
JB
903 return 0;
904 }
905
65f27f38 906 INIT_WORK(&ew->work, fn);
1fa44eca
JB
907 schedule_work(&ew->work);
908
909 return 1;
910}
911EXPORT_SYMBOL_GPL(execute_in_process_context);
912
1da177e4
LT
913int keventd_up(void)
914{
915 return keventd_wq != NULL;
916}
917
918int current_is_keventd(void)
919{
920 struct cpu_workqueue_struct *cwq;
d243769d 921 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1da177e4
LT
922 int ret = 0;
923
924 BUG_ON(!keventd_wq);
925
89ada679 926 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
927 if (current == cwq->thread)
928 ret = 1;
929
930 return ret;
931
932}
933
3af24433
ON
934static struct cpu_workqueue_struct *
935init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4 936{
89ada679 937 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4 938
3af24433
ON
939 cwq->wq = wq;
940 spin_lock_init(&cwq->lock);
941 INIT_LIST_HEAD(&cwq->worklist);
942 init_waitqueue_head(&cwq->more_work);
943
944 return cwq;
1da177e4
LT
945}
946
3af24433
ON
947static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
948{
949 struct workqueue_struct *wq = cwq->wq;
6cc88bc4 950 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
3af24433
ON
951 struct task_struct *p;
952
953 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
954 /*
955 * Nobody can add the work_struct to this cwq,
956 * if (caller is __create_workqueue)
957 * nobody should see this wq
958 * else // caller is CPU_UP_PREPARE
959 * cpu is not on cpu_online_map
960 * so we can abort safely.
961 */
962 if (IS_ERR(p))
963 return PTR_ERR(p);
3af24433 964 cwq->thread = p;
3af24433 965
e1d8aa9f
FW
966 trace_workqueue_creation(cwq->thread, cpu);
967
3af24433
ON
968 return 0;
969}
970
06ba38a9
ON
971static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
972{
973 struct task_struct *p = cwq->thread;
974
975 if (p != NULL) {
976 if (cpu >= 0)
977 kthread_bind(p, cpu);
978 wake_up_process(p);
979 }
980}
981
4e6045f1
JB
982struct workqueue_struct *__create_workqueue_key(const char *name,
983 int singlethread,
984 int freezeable,
eb13ba87
JB
985 struct lock_class_key *key,
986 const char *lock_name)
1da177e4 987{
1da177e4 988 struct workqueue_struct *wq;
3af24433
ON
989 struct cpu_workqueue_struct *cwq;
990 int err = 0, cpu;
1da177e4 991
3af24433
ON
992 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
993 if (!wq)
994 return NULL;
995
996 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
997 if (!wq->cpu_wq) {
998 kfree(wq);
999 return NULL;
1000 }
1001
1002 wq->name = name;
eb13ba87 1003 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a165 1004 wq->singlethread = singlethread;
3af24433 1005 wq->freezeable = freezeable;
cce1a165 1006 INIT_LIST_HEAD(&wq->list);
3af24433
ON
1007
1008 if (singlethread) {
3af24433
ON
1009 cwq = init_cpu_workqueue(wq, singlethread_cpu);
1010 err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9 1011 start_workqueue_thread(cwq, -1);
3af24433 1012 } else {
3da1c84c 1013 cpu_maps_update_begin();
6af8bf3d
ON
1014 /*
1015 * We must place this wq on list even if the code below fails.
1016 * cpu_down(cpu) can remove cpu from cpu_populated_map before
1017 * destroy_workqueue() takes the lock, in that case we leak
1018 * cwq[cpu]->thread.
1019 */
95402b38 1020 spin_lock(&workqueue_lock);
3af24433 1021 list_add(&wq->list, &workqueues);
95402b38 1022 spin_unlock(&workqueue_lock);
6af8bf3d
ON
1023 /*
1024 * We must initialize cwqs for each possible cpu even if we
1025 * are going to call destroy_workqueue() finally. Otherwise
1026 * cpu_up() can hit the uninitialized cwq once we drop the
1027 * lock.
1028 */
3af24433
ON
1029 for_each_possible_cpu(cpu) {
1030 cwq = init_cpu_workqueue(wq, cpu);
1031 if (err || !cpu_online(cpu))
1032 continue;
1033 err = create_workqueue_thread(cwq, cpu);
06ba38a9 1034 start_workqueue_thread(cwq, cpu);
1da177e4 1035 }
3da1c84c 1036 cpu_maps_update_done();
3af24433
ON
1037 }
1038
1039 if (err) {
1040 destroy_workqueue(wq);
1041 wq = NULL;
1042 }
1043 return wq;
1044}
4e6045f1 1045EXPORT_SYMBOL_GPL(__create_workqueue_key);
1da177e4 1046
1e35eaa2 1047static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
3af24433 1048{
14441960 1049 /*
3da1c84c
ON
1050 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1051 * cpu_add_remove_lock protects cwq->thread.
14441960
ON
1052 */
1053 if (cwq->thread == NULL)
1054 return;
3af24433 1055
3295f0ef
IM
1056 lock_map_acquire(&cwq->wq->lockdep_map);
1057 lock_map_release(&cwq->wq->lockdep_map);
4e6045f1 1058
13c22168 1059 flush_cpu_workqueue(cwq);
14441960 1060 /*
3da1c84c 1061 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
13c22168
ON
1062 * a concurrent flush_workqueue() can insert a barrier after us.
1063 * However, in that case run_workqueue() won't return and check
1064 * kthread_should_stop() until it flushes all work_struct's.
14441960
ON
1065 * When ->worklist becomes empty it is safe to exit because no
1066 * more work_structs can be queued on this cwq: flush_workqueue
1067 * checks list_empty(), and a "normal" queue_work() can't use
1068 * a dead CPU.
1069 */
e1d8aa9f 1070 trace_workqueue_destruction(cwq->thread);
14441960
ON
1071 kthread_stop(cwq->thread);
1072 cwq->thread = NULL;
3af24433
ON
1073}
1074
1075/**
1076 * destroy_workqueue - safely terminate a workqueue
1077 * @wq: target workqueue
1078 *
1079 * Safely destroy a workqueue. All work currently pending will be done first.
1080 */
1081void destroy_workqueue(struct workqueue_struct *wq)
1082{
e7577c50 1083 const struct cpumask *cpu_map = wq_cpu_map(wq);
b1f4ec17 1084 int cpu;
3af24433 1085
3da1c84c 1086 cpu_maps_update_begin();
95402b38 1087 spin_lock(&workqueue_lock);
b1f4ec17 1088 list_del(&wq->list);
95402b38 1089 spin_unlock(&workqueue_lock);
3af24433 1090
aa85ea5b 1091 for_each_cpu(cpu, cpu_map)
1e35eaa2 1092 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
3da1c84c 1093 cpu_maps_update_done();
9b41ea72 1094
3af24433
ON
1095 free_percpu(wq->cpu_wq);
1096 kfree(wq);
1097}
1098EXPORT_SYMBOL_GPL(destroy_workqueue);
1099
1100static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1101 unsigned long action,
1102 void *hcpu)
1103{
1104 unsigned int cpu = (unsigned long)hcpu;
1105 struct cpu_workqueue_struct *cwq;
1106 struct workqueue_struct *wq;
80b5184c 1107 int err = 0;
3af24433 1108
8bb78442
RW
1109 action &= ~CPU_TASKS_FROZEN;
1110
3af24433 1111 switch (action) {
3af24433 1112 case CPU_UP_PREPARE:
e7577c50 1113 cpumask_set_cpu(cpu, cpu_populated_map);
3af24433 1114 }
8448502c 1115undo:
3af24433
ON
1116 list_for_each_entry(wq, &workqueues, list) {
1117 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1118
1119 switch (action) {
1120 case CPU_UP_PREPARE:
80b5184c
AM
1121 err = create_workqueue_thread(cwq, cpu);
1122 if (!err)
3af24433 1123 break;
95402b38
GS
1124 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1125 wq->name, cpu);
8448502c 1126 action = CPU_UP_CANCELED;
80b5184c 1127 err = -ENOMEM;
8448502c 1128 goto undo;
3af24433
ON
1129
1130 case CPU_ONLINE:
06ba38a9 1131 start_workqueue_thread(cwq, cpu);
3af24433
ON
1132 break;
1133
1134 case CPU_UP_CANCELED:
06ba38a9 1135 start_workqueue_thread(cwq, -1);
3da1c84c 1136 case CPU_POST_DEAD:
1e35eaa2 1137 cleanup_workqueue_thread(cwq);
3af24433
ON
1138 break;
1139 }
1da177e4
LT
1140 }
1141
00dfcaf7
ON
1142 switch (action) {
1143 case CPU_UP_CANCELED:
3da1c84c 1144 case CPU_POST_DEAD:
e7577c50 1145 cpumask_clear_cpu(cpu, cpu_populated_map);
00dfcaf7
ON
1146 }
1147
80b5184c 1148 return notifier_from_errno(err);
1da177e4 1149}
1da177e4 1150
2d3854a3 1151#ifdef CONFIG_SMP
8ccad40d 1152
2d3854a3 1153struct work_for_cpu {
6b44003e 1154 struct completion completion;
2d3854a3
RR
1155 long (*fn)(void *);
1156 void *arg;
1157 long ret;
1158};
1159
6b44003e 1160static int do_work_for_cpu(void *_wfc)
2d3854a3 1161{
6b44003e 1162 struct work_for_cpu *wfc = _wfc;
2d3854a3 1163 wfc->ret = wfc->fn(wfc->arg);
6b44003e
AM
1164 complete(&wfc->completion);
1165 return 0;
2d3854a3
RR
1166}
1167
1168/**
1169 * work_on_cpu - run a function in user context on a particular cpu
1170 * @cpu: the cpu to run on
1171 * @fn: the function to run
1172 * @arg: the function arg
1173 *
31ad9081
RR
1174 * This will return the value @fn returns.
1175 * It is up to the caller to ensure that the cpu doesn't go offline.
6b44003e 1176 * The caller must not hold any locks which would prevent @fn from completing.
2d3854a3
RR
1177 */
1178long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1179{
6b44003e
AM
1180 struct task_struct *sub_thread;
1181 struct work_for_cpu wfc = {
1182 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1183 .fn = fn,
1184 .arg = arg,
1185 };
1186
1187 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1188 if (IS_ERR(sub_thread))
1189 return PTR_ERR(sub_thread);
1190 kthread_bind(sub_thread, cpu);
1191 wake_up_process(sub_thread);
1192 wait_for_completion(&wfc.completion);
2d3854a3
RR
1193 return wfc.ret;
1194}
1195EXPORT_SYMBOL_GPL(work_on_cpu);
1196#endif /* CONFIG_SMP */
1197
c12920d1 1198void __init init_workqueues(void)
1da177e4 1199{
e7577c50
RR
1200 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1201
1202 cpumask_copy(cpu_populated_map, cpu_online_mask);
1203 singlethread_cpu = cpumask_first(cpu_possible_mask);
1204 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1da177e4
LT
1205 hotcpu_notifier(workqueue_cpu_callback, 0);
1206 keventd_wq = create_workqueue("events");
1207 BUG_ON(!keventd_wq);
1208}