]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/workqueue.c
lockdep: handle chains involving classes defined in modules
[mirror_ubuntu-bionic-kernel.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679 15 *
cde53535 16 * Made to use alloc_percpu by Christoph Lameter.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
4e6045f1 35#include <linux/lockdep.h>
1da177e4
LT
36
37/*
f756d5e2
NL
38 * The per-CPU workqueue (if single thread, we always use the first
39 * possible cpu).
1da177e4
LT
40 */
41struct cpu_workqueue_struct {
42
43 spinlock_t lock;
44
1da177e4
LT
45 struct list_head worklist;
46 wait_queue_head_t more_work;
3af24433 47 struct work_struct *current_work;
1da177e4
LT
48
49 struct workqueue_struct *wq;
36c8b586 50 struct task_struct *thread;
1da177e4
LT
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
89ada679 60 struct cpu_workqueue_struct *cpu_wq;
cce1a165 61 struct list_head list;
1da177e4 62 const char *name;
cce1a165 63 int singlethread;
319c2a98 64 int freezeable; /* Freeze threads during suspend */
4e6045f1
JB
65#ifdef CONFIG_LOCKDEP
66 struct lockdep_map lockdep_map;
67#endif
1da177e4
LT
68};
69
95402b38
GS
70/* Serializes the accesses to the list of workqueues. */
71static DEFINE_SPINLOCK(workqueue_lock);
1da177e4
LT
72static LIST_HEAD(workqueues);
73
3af24433 74static int singlethread_cpu __read_mostly;
b1f4ec17 75static cpumask_t cpu_singlethread_map __read_mostly;
14441960
ON
76/*
77 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
78 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
79 * which comes in between can't use for_each_online_cpu(). We could
80 * use cpu_possible_map, the cpumask below is more a documentation
81 * than optimization.
82 */
3af24433 83static cpumask_t cpu_populated_map __read_mostly;
f756d5e2 84
1da177e4
LT
85/* If it's single threaded, it isn't in the list of workqueues. */
86static inline int is_single_threaded(struct workqueue_struct *wq)
87{
cce1a165 88 return wq->singlethread;
1da177e4
LT
89}
90
b1f4ec17
ON
91static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
92{
93 return is_single_threaded(wq)
94 ? &cpu_singlethread_map : &cpu_populated_map;
95}
96
a848e3b6
ON
97static
98struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
99{
100 if (unlikely(is_single_threaded(wq)))
101 cpu = singlethread_cpu;
102 return per_cpu_ptr(wq->cpu_wq, cpu);
103}
104
4594bf15
DH
105/*
106 * Set the workqueue on which a work item is to be run
107 * - Must *only* be called if the pending flag is set
108 */
ed7c0fee
ON
109static inline void set_wq_data(struct work_struct *work,
110 struct cpu_workqueue_struct *cwq)
365970a1 111{
4594bf15
DH
112 unsigned long new;
113
114 BUG_ON(!work_pending(work));
365970a1 115
ed7c0fee 116 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
117 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
118 atomic_long_set(&work->data, new);
365970a1
DH
119}
120
ed7c0fee
ON
121static inline
122struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 123{
a08727ba 124 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
125}
126
b89deed3 127static void insert_work(struct cpu_workqueue_struct *cwq,
1a4d9b0a 128 struct work_struct *work, struct list_head *head)
b89deed3
ON
129{
130 set_wq_data(work, cwq);
6e84d644
ON
131 /*
132 * Ensure that we get the right work->data if we see the
133 * result of list_add() below, see try_to_grab_pending().
134 */
135 smp_wmb();
1a4d9b0a 136 list_add_tail(&work->entry, head);
b89deed3
ON
137 wake_up(&cwq->more_work);
138}
139
1da177e4
LT
140static void __queue_work(struct cpu_workqueue_struct *cwq,
141 struct work_struct *work)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&cwq->lock, flags);
1a4d9b0a 146 insert_work(cwq, work, &cwq->worklist);
1da177e4
LT
147 spin_unlock_irqrestore(&cwq->lock, flags);
148}
149
0fcb78c2
REB
150/**
151 * queue_work - queue work on a workqueue
152 * @wq: workqueue to use
153 * @work: work to queue
154 *
057647fc 155 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4 156 *
00dfcaf7
ON
157 * We queue the work to the CPU on which it was submitted, but if the CPU dies
158 * it can be processed by another CPU.
1da177e4 159 */
7ad5b3a5 160int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1da177e4 161{
ef1ca236
ON
162 int ret;
163
164 ret = queue_work_on(get_cpu(), wq, work);
165 put_cpu();
166
1da177e4
LT
167 return ret;
168}
ae90dd5d 169EXPORT_SYMBOL_GPL(queue_work);
1da177e4 170
c1a220e7
ZR
171/**
172 * queue_work_on - queue work on specific cpu
173 * @cpu: CPU number to execute work on
174 * @wq: workqueue to use
175 * @work: work to queue
176 *
177 * Returns 0 if @work was already on a queue, non-zero otherwise.
178 *
179 * We queue the work to a specific CPU, the caller must ensure it
180 * can't go away.
181 */
182int
183queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
184{
185 int ret = 0;
186
187 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
188 BUG_ON(!list_empty(&work->entry));
189 __queue_work(wq_per_cpu(wq, cpu), work);
190 ret = 1;
191 }
192 return ret;
193}
194EXPORT_SYMBOL_GPL(queue_work_on);
195
6d141c3f 196static void delayed_work_timer_fn(unsigned long __data)
1da177e4 197{
52bad64d 198 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee
ON
199 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
200 struct workqueue_struct *wq = cwq->wq;
1da177e4 201
a848e3b6 202 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
1da177e4
LT
203}
204
0fcb78c2
REB
205/**
206 * queue_delayed_work - queue work on a workqueue after delay
207 * @wq: workqueue to use
af9997e4 208 * @dwork: delayable work to queue
0fcb78c2
REB
209 * @delay: number of jiffies to wait before queueing
210 *
057647fc 211 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 212 */
7ad5b3a5 213int queue_delayed_work(struct workqueue_struct *wq,
52bad64d 214 struct delayed_work *dwork, unsigned long delay)
1da177e4 215{
52bad64d 216 if (delay == 0)
63bc0362 217 return queue_work(wq, &dwork->work);
1da177e4 218
63bc0362 219 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 220}
ae90dd5d 221EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 222
0fcb78c2
REB
223/**
224 * queue_delayed_work_on - queue work on specific CPU after delay
225 * @cpu: CPU number to execute work on
226 * @wq: workqueue to use
af9997e4 227 * @dwork: work to queue
0fcb78c2
REB
228 * @delay: number of jiffies to wait before queueing
229 *
057647fc 230 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 231 */
7a6bc1cd 232int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 233 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
234{
235 int ret = 0;
52bad64d
DH
236 struct timer_list *timer = &dwork->timer;
237 struct work_struct *work = &dwork->work;
7a6bc1cd 238
a08727ba 239 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
240 BUG_ON(timer_pending(timer));
241 BUG_ON(!list_empty(&work->entry));
242
8a3e77cc
AL
243 timer_stats_timer_set_start_info(&dwork->timer);
244
ed7c0fee 245 /* This stores cwq for the moment, for the timer_fn */
a848e3b6 246 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
7a6bc1cd 247 timer->expires = jiffies + delay;
52bad64d 248 timer->data = (unsigned long)dwork;
7a6bc1cd 249 timer->function = delayed_work_timer_fn;
63bc0362
ON
250
251 if (unlikely(cpu >= 0))
252 add_timer_on(timer, cpu);
253 else
254 add_timer(timer);
7a6bc1cd
VP
255 ret = 1;
256 }
257 return ret;
258}
ae90dd5d 259EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 260
858119e1 261static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 262{
f293ea92 263 spin_lock_irq(&cwq->lock);
1da177e4
LT
264 cwq->run_depth++;
265 if (cwq->run_depth > 3) {
266 /* morton gets to eat his hat */
267 printk("%s: recursion depth exceeded: %d\n",
af1f16d0 268 __func__, cwq->run_depth);
1da177e4
LT
269 dump_stack();
270 }
271 while (!list_empty(&cwq->worklist)) {
272 struct work_struct *work = list_entry(cwq->worklist.next,
273 struct work_struct, entry);
6bb49e59 274 work_func_t f = work->func;
4e6045f1
JB
275#ifdef CONFIG_LOCKDEP
276 /*
277 * It is permissible to free the struct work_struct
278 * from inside the function that is called from it,
279 * this we need to take into account for lockdep too.
280 * To avoid bogus "held lock freed" warnings as well
281 * as problems when looking into work->lockdep_map,
282 * make a copy and use that here.
283 */
284 struct lockdep_map lockdep_map = work->lockdep_map;
285#endif
1da177e4 286
b89deed3 287 cwq->current_work = work;
1da177e4 288 list_del_init(cwq->worklist.next);
f293ea92 289 spin_unlock_irq(&cwq->lock);
1da177e4 290
365970a1 291 BUG_ON(get_wq_data(work) != cwq);
23b2e599 292 work_clear_pending(work);
4f3e7524
PZ
293 map_acquire(&cwq->wq->lockdep_map);
294 map_acquire(&lockdep_map);
65f27f38 295 f(work);
4f3e7524
PZ
296 map_release(&lockdep_map);
297 map_release(&cwq->wq->lockdep_map);
1da177e4 298
d5abe669
PZ
299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
301 "%s/0x%08x/%d\n",
302 current->comm, preempt_count(),
ba25f9dc 303 task_pid_nr(current));
d5abe669
PZ
304 printk(KERN_ERR " last function: ");
305 print_symbol("%s\n", (unsigned long)f);
306 debug_show_held_locks(current);
307 dump_stack();
308 }
309
f293ea92 310 spin_lock_irq(&cwq->lock);
b89deed3 311 cwq->current_work = NULL;
1da177e4
LT
312 }
313 cwq->run_depth--;
f293ea92 314 spin_unlock_irq(&cwq->lock);
1da177e4
LT
315}
316
317static int worker_thread(void *__cwq)
318{
319 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 320 DEFINE_WAIT(wait);
1da177e4 321
83144186
RW
322 if (cwq->wq->freezeable)
323 set_freezable();
1da177e4
LT
324
325 set_user_nice(current, -5);
1da177e4 326
3af24433 327 for (;;) {
3af24433 328 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960
ON
329 if (!freezing(current) &&
330 !kthread_should_stop() &&
331 list_empty(&cwq->worklist))
1da177e4 332 schedule();
3af24433
ON
333 finish_wait(&cwq->more_work, &wait);
334
85f4186a
ON
335 try_to_freeze();
336
14441960 337 if (kthread_should_stop())
3af24433 338 break;
1da177e4 339
3af24433 340 run_workqueue(cwq);
1da177e4 341 }
3af24433 342
1da177e4
LT
343 return 0;
344}
345
fc2e4d70
ON
346struct wq_barrier {
347 struct work_struct work;
348 struct completion done;
349};
350
351static void wq_barrier_func(struct work_struct *work)
352{
353 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
354 complete(&barr->done);
355}
356
83c22520 357static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1a4d9b0a 358 struct wq_barrier *barr, struct list_head *head)
fc2e4d70
ON
359{
360 INIT_WORK(&barr->work, wq_barrier_func);
361 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
362
363 init_completion(&barr->done);
83c22520 364
1a4d9b0a 365 insert_work(cwq, &barr->work, head);
fc2e4d70
ON
366}
367
14441960 368static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 369{
14441960
ON
370 int active;
371
1da177e4
LT
372 if (cwq->thread == current) {
373 /*
374 * Probably keventd trying to flush its own queue. So simply run
375 * it by hand rather than deadlocking.
376 */
377 run_workqueue(cwq);
14441960 378 active = 1;
1da177e4 379 } else {
fc2e4d70 380 struct wq_barrier barr;
1da177e4 381
14441960 382 active = 0;
83c22520
ON
383 spin_lock_irq(&cwq->lock);
384 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
1a4d9b0a 385 insert_wq_barrier(cwq, &barr, &cwq->worklist);
83c22520
ON
386 active = 1;
387 }
388 spin_unlock_irq(&cwq->lock);
1da177e4 389
d721304d 390 if (active)
83c22520 391 wait_for_completion(&barr.done);
1da177e4 392 }
14441960
ON
393
394 return active;
1da177e4
LT
395}
396
0fcb78c2 397/**
1da177e4 398 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 399 * @wq: workqueue to flush
1da177e4
LT
400 *
401 * Forces execution of the workqueue and blocks until its completion.
402 * This is typically used in driver shutdown handlers.
403 *
fc2e4d70
ON
404 * We sleep until all works which were queued on entry have been handled,
405 * but we are not livelocked by new incoming ones.
1da177e4
LT
406 *
407 * This function used to run the workqueues itself. Now we just wait for the
408 * helper threads to do it.
409 */
7ad5b3a5 410void flush_workqueue(struct workqueue_struct *wq)
1da177e4 411{
b1f4ec17 412 const cpumask_t *cpu_map = wq_cpu_map(wq);
cce1a165 413 int cpu;
1da177e4 414
b1f4ec17 415 might_sleep();
4f3e7524
PZ
416 map_acquire(&wq->lockdep_map);
417 map_release(&wq->lockdep_map);
363ab6f1 418 for_each_cpu_mask_nr(cpu, *cpu_map)
b1f4ec17 419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 420}
ae90dd5d 421EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 422
db700897
ON
423/**
424 * flush_work - block until a work_struct's callback has terminated
425 * @work: the work which is to be flushed
426 *
a67da70d
ON
427 * Returns false if @work has already terminated.
428 *
db700897
ON
429 * It is expected that, prior to calling flush_work(), the caller has
430 * arranged for the work to not be requeued, otherwise it doesn't make
431 * sense to use this function.
432 */
433int flush_work(struct work_struct *work)
434{
435 struct cpu_workqueue_struct *cwq;
436 struct list_head *prev;
437 struct wq_barrier barr;
438
439 might_sleep();
440 cwq = get_wq_data(work);
441 if (!cwq)
442 return 0;
443
4f3e7524
PZ
444 map_acquire(&cwq->wq->lockdep_map);
445 map_release(&cwq->wq->lockdep_map);
a67da70d 446
db700897
ON
447 prev = NULL;
448 spin_lock_irq(&cwq->lock);
449 if (!list_empty(&work->entry)) {
450 /*
451 * See the comment near try_to_grab_pending()->smp_rmb().
452 * If it was re-queued under us we are not going to wait.
453 */
454 smp_rmb();
455 if (unlikely(cwq != get_wq_data(work)))
456 goto out;
457 prev = &work->entry;
458 } else {
459 if (cwq->current_work != work)
460 goto out;
461 prev = &cwq->worklist;
462 }
463 insert_wq_barrier(cwq, &barr, prev->next);
464out:
465 spin_unlock_irq(&cwq->lock);
466 if (!prev)
467 return 0;
468
469 wait_for_completion(&barr.done);
470 return 1;
471}
472EXPORT_SYMBOL_GPL(flush_work);
473
6e84d644 474/*
1f1f642e 475 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644
ON
476 * so this work can't be re-armed in any way.
477 */
478static int try_to_grab_pending(struct work_struct *work)
479{
480 struct cpu_workqueue_struct *cwq;
1f1f642e 481 int ret = -1;
6e84d644
ON
482
483 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
1f1f642e 484 return 0;
6e84d644
ON
485
486 /*
487 * The queueing is in progress, or it is already queued. Try to
488 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
489 */
490
491 cwq = get_wq_data(work);
492 if (!cwq)
493 return ret;
494
495 spin_lock_irq(&cwq->lock);
496 if (!list_empty(&work->entry)) {
497 /*
498 * This work is queued, but perhaps we locked the wrong cwq.
499 * In that case we must see the new value after rmb(), see
500 * insert_work()->wmb().
501 */
502 smp_rmb();
503 if (cwq == get_wq_data(work)) {
504 list_del_init(&work->entry);
505 ret = 1;
506 }
507 }
508 spin_unlock_irq(&cwq->lock);
509
510 return ret;
511}
512
513static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed3
ON
514 struct work_struct *work)
515{
516 struct wq_barrier barr;
517 int running = 0;
518
519 spin_lock_irq(&cwq->lock);
520 if (unlikely(cwq->current_work == work)) {
1a4d9b0a 521 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
b89deed3
ON
522 running = 1;
523 }
524 spin_unlock_irq(&cwq->lock);
525
3af24433 526 if (unlikely(running))
b89deed3 527 wait_for_completion(&barr.done);
b89deed3
ON
528}
529
6e84d644 530static void wait_on_work(struct work_struct *work)
b89deed3
ON
531{
532 struct cpu_workqueue_struct *cwq;
28e53bdd
ON
533 struct workqueue_struct *wq;
534 const cpumask_t *cpu_map;
b1f4ec17 535 int cpu;
b89deed3 536
f293ea92
ON
537 might_sleep();
538
4f3e7524
PZ
539 map_acquire(&work->lockdep_map);
540 map_release(&work->lockdep_map);
4e6045f1 541
b89deed3 542 cwq = get_wq_data(work);
b89deed3 543 if (!cwq)
3af24433 544 return;
b89deed3 545
28e53bdd
ON
546 wq = cwq->wq;
547 cpu_map = wq_cpu_map(wq);
548
363ab6f1 549 for_each_cpu_mask_nr(cpu, *cpu_map)
6e84d644
ON
550 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
551}
552
1f1f642e
ON
553static int __cancel_work_timer(struct work_struct *work,
554 struct timer_list* timer)
555{
556 int ret;
557
558 do {
559 ret = (timer && likely(del_timer(timer)));
560 if (!ret)
561 ret = try_to_grab_pending(work);
562 wait_on_work(work);
563 } while (unlikely(ret < 0));
564
565 work_clear_pending(work);
566 return ret;
567}
568
6e84d644
ON
569/**
570 * cancel_work_sync - block until a work_struct's callback has terminated
571 * @work: the work which is to be flushed
572 *
1f1f642e
ON
573 * Returns true if @work was pending.
574 *
6e84d644
ON
575 * cancel_work_sync() will cancel the work if it is queued. If the work's
576 * callback appears to be running, cancel_work_sync() will block until it
577 * has completed.
578 *
579 * It is possible to use this function if the work re-queues itself. It can
580 * cancel the work even if it migrates to another workqueue, however in that
581 * case it only guarantees that work->func() has completed on the last queued
582 * workqueue.
583 *
584 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
585 * pending, otherwise it goes into a busy-wait loop until the timer expires.
586 *
587 * The caller must ensure that workqueue_struct on which this work was last
588 * queued can't be destroyed before this function returns.
589 */
1f1f642e 590int cancel_work_sync(struct work_struct *work)
6e84d644 591{
1f1f642e 592 return __cancel_work_timer(work, NULL);
b89deed3 593}
28e53bdd 594EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 595
6e84d644 596/**
f5a421a4 597 * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644
ON
598 * @dwork: the delayed work struct
599 *
1f1f642e
ON
600 * Returns true if @dwork was pending.
601 *
6e84d644
ON
602 * It is possible to use this function if @dwork rearms itself via queue_work()
603 * or queue_delayed_work(). See also the comment for cancel_work_sync().
604 */
1f1f642e 605int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 606{
1f1f642e 607 return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644 608}
f5a421a4 609EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 610
6e84d644 611static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4 612
0fcb78c2
REB
613/**
614 * schedule_work - put work task in global workqueue
615 * @work: job to be done
616 *
617 * This puts a job in the kernel-global workqueue.
618 */
7ad5b3a5 619int schedule_work(struct work_struct *work)
1da177e4
LT
620{
621 return queue_work(keventd_wq, work);
622}
ae90dd5d 623EXPORT_SYMBOL(schedule_work);
1da177e4 624
c1a220e7
ZR
625/*
626 * schedule_work_on - put work task on a specific cpu
627 * @cpu: cpu to put the work task on
628 * @work: job to be done
629 *
630 * This puts a job on a specific cpu
631 */
632int schedule_work_on(int cpu, struct work_struct *work)
633{
634 return queue_work_on(cpu, keventd_wq, work);
635}
636EXPORT_SYMBOL(schedule_work_on);
637
0fcb78c2
REB
638/**
639 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
640 * @dwork: job to be done
641 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
642 *
643 * After waiting for a given time this puts a job in the kernel-global
644 * workqueue.
645 */
7ad5b3a5 646int schedule_delayed_work(struct delayed_work *dwork,
82f67cd9 647 unsigned long delay)
1da177e4 648{
52bad64d 649 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 650}
ae90dd5d 651EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 652
0fcb78c2
REB
653/**
654 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
655 * @cpu: cpu to use
52bad64d 656 * @dwork: job to be done
0fcb78c2
REB
657 * @delay: number of jiffies to wait
658 *
659 * After waiting for a given time this puts a job in the kernel-global
660 * workqueue on the specified CPU.
661 */
1da177e4 662int schedule_delayed_work_on(int cpu,
52bad64d 663 struct delayed_work *dwork, unsigned long delay)
1da177e4 664{
52bad64d 665 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 666}
ae90dd5d 667EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 668
b6136773
AM
669/**
670 * schedule_on_each_cpu - call a function on each online CPU from keventd
671 * @func: the function to call
b6136773
AM
672 *
673 * Returns zero on success.
674 * Returns -ve errno on failure.
675 *
b6136773
AM
676 * schedule_on_each_cpu() is very slow.
677 */
65f27f38 678int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
679{
680 int cpu;
b6136773 681 struct work_struct *works;
15316ba8 682
b6136773
AM
683 works = alloc_percpu(struct work_struct);
684 if (!works)
15316ba8 685 return -ENOMEM;
b6136773 686
95402b38 687 get_online_cpus();
15316ba8 688 for_each_online_cpu(cpu) {
9bfb1839
IM
689 struct work_struct *work = per_cpu_ptr(works, cpu);
690
691 INIT_WORK(work, func);
8de6d308 692 schedule_work_on(cpu, work);
15316ba8 693 }
8616a89a
ON
694 for_each_online_cpu(cpu)
695 flush_work(per_cpu_ptr(works, cpu));
95402b38 696 put_online_cpus();
b6136773 697 free_percpu(works);
15316ba8
CL
698 return 0;
699}
700
1da177e4
LT
701void flush_scheduled_work(void)
702{
703 flush_workqueue(keventd_wq);
704}
ae90dd5d 705EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 706
1fa44eca
JB
707/**
708 * execute_in_process_context - reliably execute the routine with user context
709 * @fn: the function to execute
1fa44eca
JB
710 * @ew: guaranteed storage for the execute work structure (must
711 * be available when the work executes)
712 *
713 * Executes the function immediately if process context is available,
714 * otherwise schedules the function for delayed execution.
715 *
716 * Returns: 0 - function was executed
717 * 1 - function was scheduled for execution
718 */
65f27f38 719int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
720{
721 if (!in_interrupt()) {
65f27f38 722 fn(&ew->work);
1fa44eca
JB
723 return 0;
724 }
725
65f27f38 726 INIT_WORK(&ew->work, fn);
1fa44eca
JB
727 schedule_work(&ew->work);
728
729 return 1;
730}
731EXPORT_SYMBOL_GPL(execute_in_process_context);
732
1da177e4
LT
733int keventd_up(void)
734{
735 return keventd_wq != NULL;
736}
737
738int current_is_keventd(void)
739{
740 struct cpu_workqueue_struct *cwq;
d243769d 741 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1da177e4
LT
742 int ret = 0;
743
744 BUG_ON(!keventd_wq);
745
89ada679 746 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
747 if (current == cwq->thread)
748 ret = 1;
749
750 return ret;
751
752}
753
3af24433
ON
754static struct cpu_workqueue_struct *
755init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4 756{
89ada679 757 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4 758
3af24433
ON
759 cwq->wq = wq;
760 spin_lock_init(&cwq->lock);
761 INIT_LIST_HEAD(&cwq->worklist);
762 init_waitqueue_head(&cwq->more_work);
763
764 return cwq;
1da177e4
LT
765}
766
3af24433
ON
767static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
768{
769 struct workqueue_struct *wq = cwq->wq;
770 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
771 struct task_struct *p;
772
773 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
774 /*
775 * Nobody can add the work_struct to this cwq,
776 * if (caller is __create_workqueue)
777 * nobody should see this wq
778 * else // caller is CPU_UP_PREPARE
779 * cpu is not on cpu_online_map
780 * so we can abort safely.
781 */
782 if (IS_ERR(p))
783 return PTR_ERR(p);
784
785 cwq->thread = p;
3af24433
ON
786
787 return 0;
788}
789
06ba38a9
ON
790static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
791{
792 struct task_struct *p = cwq->thread;
793
794 if (p != NULL) {
795 if (cpu >= 0)
796 kthread_bind(p, cpu);
797 wake_up_process(p);
798 }
799}
800
4e6045f1
JB
801struct workqueue_struct *__create_workqueue_key(const char *name,
802 int singlethread,
803 int freezeable,
eb13ba87
JB
804 struct lock_class_key *key,
805 const char *lock_name)
1da177e4 806{
1da177e4 807 struct workqueue_struct *wq;
3af24433
ON
808 struct cpu_workqueue_struct *cwq;
809 int err = 0, cpu;
1da177e4 810
3af24433
ON
811 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
812 if (!wq)
813 return NULL;
814
815 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
816 if (!wq->cpu_wq) {
817 kfree(wq);
818 return NULL;
819 }
820
821 wq->name = name;
eb13ba87 822 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a165 823 wq->singlethread = singlethread;
3af24433 824 wq->freezeable = freezeable;
cce1a165 825 INIT_LIST_HEAD(&wq->list);
3af24433
ON
826
827 if (singlethread) {
3af24433
ON
828 cwq = init_cpu_workqueue(wq, singlethread_cpu);
829 err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9 830 start_workqueue_thread(cwq, -1);
3af24433 831 } else {
3da1c84c 832 cpu_maps_update_begin();
95402b38 833 spin_lock(&workqueue_lock);
3af24433 834 list_add(&wq->list, &workqueues);
95402b38 835 spin_unlock(&workqueue_lock);
3af24433
ON
836
837 for_each_possible_cpu(cpu) {
838 cwq = init_cpu_workqueue(wq, cpu);
839 if (err || !cpu_online(cpu))
840 continue;
841 err = create_workqueue_thread(cwq, cpu);
06ba38a9 842 start_workqueue_thread(cwq, cpu);
1da177e4 843 }
3da1c84c 844 cpu_maps_update_done();
3af24433
ON
845 }
846
847 if (err) {
848 destroy_workqueue(wq);
849 wq = NULL;
850 }
851 return wq;
852}
4e6045f1 853EXPORT_SYMBOL_GPL(__create_workqueue_key);
1da177e4 854
1e35eaa2 855static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
3af24433 856{
14441960 857 /*
3da1c84c
ON
858 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
859 * cpu_add_remove_lock protects cwq->thread.
14441960
ON
860 */
861 if (cwq->thread == NULL)
862 return;
3af24433 863
4f3e7524
PZ
864 map_acquire(&cwq->wq->lockdep_map);
865 map_release(&cwq->wq->lockdep_map);
4e6045f1 866
13c22168 867 flush_cpu_workqueue(cwq);
14441960 868 /*
3da1c84c 869 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
13c22168
ON
870 * a concurrent flush_workqueue() can insert a barrier after us.
871 * However, in that case run_workqueue() won't return and check
872 * kthread_should_stop() until it flushes all work_struct's.
14441960
ON
873 * When ->worklist becomes empty it is safe to exit because no
874 * more work_structs can be queued on this cwq: flush_workqueue
875 * checks list_empty(), and a "normal" queue_work() can't use
876 * a dead CPU.
877 */
14441960
ON
878 kthread_stop(cwq->thread);
879 cwq->thread = NULL;
3af24433
ON
880}
881
882/**
883 * destroy_workqueue - safely terminate a workqueue
884 * @wq: target workqueue
885 *
886 * Safely destroy a workqueue. All work currently pending will be done first.
887 */
888void destroy_workqueue(struct workqueue_struct *wq)
889{
b1f4ec17 890 const cpumask_t *cpu_map = wq_cpu_map(wq);
b1f4ec17 891 int cpu;
3af24433 892
3da1c84c 893 cpu_maps_update_begin();
95402b38 894 spin_lock(&workqueue_lock);
b1f4ec17 895 list_del(&wq->list);
95402b38 896 spin_unlock(&workqueue_lock);
3af24433 897
363ab6f1 898 for_each_cpu_mask_nr(cpu, *cpu_map)
1e35eaa2 899 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
3da1c84c 900 cpu_maps_update_done();
9b41ea72 901
3af24433
ON
902 free_percpu(wq->cpu_wq);
903 kfree(wq);
904}
905EXPORT_SYMBOL_GPL(destroy_workqueue);
906
907static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
908 unsigned long action,
909 void *hcpu)
910{
911 unsigned int cpu = (unsigned long)hcpu;
912 struct cpu_workqueue_struct *cwq;
913 struct workqueue_struct *wq;
8448502c 914 int ret = NOTIFY_OK;
3af24433 915
8bb78442
RW
916 action &= ~CPU_TASKS_FROZEN;
917
3af24433 918 switch (action) {
3af24433
ON
919 case CPU_UP_PREPARE:
920 cpu_set(cpu, cpu_populated_map);
921 }
8448502c 922undo:
3af24433
ON
923 list_for_each_entry(wq, &workqueues, list) {
924 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
925
926 switch (action) {
927 case CPU_UP_PREPARE:
928 if (!create_workqueue_thread(cwq, cpu))
929 break;
95402b38
GS
930 printk(KERN_ERR "workqueue [%s] for %i failed\n",
931 wq->name, cpu);
8448502c
ON
932 action = CPU_UP_CANCELED;
933 ret = NOTIFY_BAD;
934 goto undo;
3af24433
ON
935
936 case CPU_ONLINE:
06ba38a9 937 start_workqueue_thread(cwq, cpu);
3af24433
ON
938 break;
939
940 case CPU_UP_CANCELED:
06ba38a9 941 start_workqueue_thread(cwq, -1);
3da1c84c 942 case CPU_POST_DEAD:
1e35eaa2 943 cleanup_workqueue_thread(cwq);
3af24433
ON
944 break;
945 }
1da177e4
LT
946 }
947
00dfcaf7
ON
948 switch (action) {
949 case CPU_UP_CANCELED:
3da1c84c 950 case CPU_POST_DEAD:
00dfcaf7
ON
951 cpu_clear(cpu, cpu_populated_map);
952 }
953
8448502c 954 return ret;
1da177e4 955}
1da177e4 956
c12920d1 957void __init init_workqueues(void)
1da177e4 958{
3af24433 959 cpu_populated_map = cpu_online_map;
f756d5e2 960 singlethread_cpu = first_cpu(cpu_possible_map);
b1f4ec17 961 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
1da177e4
LT
962 hotcpu_notifier(workqueue_cpu_callback, 0);
963 keventd_wq = create_workqueue("events");
964 BUG_ON(!keventd_wq);
965}