]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame_incremental - kernel/workqueue.c
workqueue: fix freezeable workqueues implementation
[mirror_ubuntu-zesty-kernel.git] / kernel / workqueue.c
... / ...
CommitLineData
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
30#include <linux/hardirq.h>
31#include <linux/mempolicy.h>
32#include <linux/freezer.h>
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
35
36/*
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
39 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
44 struct list_head worklist;
45 wait_queue_head_t more_work;
46
47 struct workqueue_struct *wq;
48 struct task_struct *thread;
49 struct work_struct *current_work;
50
51 int run_depth; /* Detect run_workqueue() recursion depth */
52} ____cacheline_aligned;
53
54/*
55 * The externally visible workqueue abstraction is an array of
56 * per-CPU workqueues:
57 */
58struct workqueue_struct {
59 struct cpu_workqueue_struct *cpu_wq;
60 const char *name;
61 struct list_head list; /* Empty if single thread */
62 int freezeable; /* Freeze threads during suspend */
63};
64
65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */
67static DEFINE_MUTEX(workqueue_mutex);
68static LIST_HEAD(workqueues);
69
70static int singlethread_cpu;
71
72/* If it's single threaded, it isn't in the list of workqueues. */
73static inline int is_single_threaded(struct workqueue_struct *wq)
74{
75 return list_empty(&wq->list);
76}
77
78/*
79 * Set the workqueue on which a work item is to be run
80 * - Must *only* be called if the pending flag is set
81 */
82static inline void set_wq_data(struct work_struct *work, void *wq)
83{
84 unsigned long new;
85
86 BUG_ON(!work_pending(work));
87
88 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
89 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
90 atomic_long_set(&work->data, new);
91}
92
93static inline void *get_wq_data(struct work_struct *work)
94{
95 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
96}
97
98static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
99{
100 int ret = 0;
101 unsigned long flags;
102
103 spin_lock_irqsave(&cwq->lock, flags);
104 /*
105 * We need to re-validate the work info after we've gotten
106 * the cpu_workqueue lock. We can run the work now iff:
107 *
108 * - the wq_data still matches the cpu_workqueue_struct
109 * - AND the work is still marked pending
110 * - AND the work is still on a list (which will be this
111 * workqueue_struct list)
112 *
113 * All these conditions are important, because we
114 * need to protect against the work being run right
115 * now on another CPU (all but the last one might be
116 * true if it's currently running and has not been
117 * released yet, for example).
118 */
119 if (get_wq_data(work) == cwq
120 && work_pending(work)
121 && !list_empty(&work->entry)) {
122 work_func_t f = work->func;
123 cwq->current_work = work;
124 list_del_init(&work->entry);
125 spin_unlock_irqrestore(&cwq->lock, flags);
126
127 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
128 work_release(work);
129 f(work);
130
131 spin_lock_irqsave(&cwq->lock, flags);
132 cwq->current_work = NULL;
133 ret = 1;
134 }
135 spin_unlock_irqrestore(&cwq->lock, flags);
136 return ret;
137}
138
139/**
140 * run_scheduled_work - run scheduled work synchronously
141 * @work: work to run
142 *
143 * This checks if the work was pending, and runs it
144 * synchronously if so. It returns a boolean to indicate
145 * whether it had any scheduled work to run or not.
146 *
147 * NOTE! This _only_ works for normal work_structs. You
148 * CANNOT use this for delayed work, because the wq data
149 * for delayed work will not point properly to the per-
150 * CPU workqueue struct, but will change!
151 */
152int fastcall run_scheduled_work(struct work_struct *work)
153{
154 for (;;) {
155 struct cpu_workqueue_struct *cwq;
156
157 if (!work_pending(work))
158 return 0;
159 if (list_empty(&work->entry))
160 return 0;
161 /* NOTE! This depends intimately on __queue_work! */
162 cwq = get_wq_data(work);
163 if (!cwq)
164 return 0;
165 if (__run_work(cwq, work))
166 return 1;
167 }
168}
169EXPORT_SYMBOL(run_scheduled_work);
170
171static void insert_work(struct cpu_workqueue_struct *cwq,
172 struct work_struct *work, int tail)
173{
174 set_wq_data(work, cwq);
175 if (tail)
176 list_add_tail(&work->entry, &cwq->worklist);
177 else
178 list_add(&work->entry, &cwq->worklist);
179 wake_up(&cwq->more_work);
180}
181
182/* Preempt must be disabled. */
183static void __queue_work(struct cpu_workqueue_struct *cwq,
184 struct work_struct *work)
185{
186 unsigned long flags;
187
188 spin_lock_irqsave(&cwq->lock, flags);
189 insert_work(cwq, work, 1);
190 spin_unlock_irqrestore(&cwq->lock, flags);
191}
192
193/**
194 * queue_work - queue work on a workqueue
195 * @wq: workqueue to use
196 * @work: work to queue
197 *
198 * Returns 0 if @work was already on a queue, non-zero otherwise.
199 *
200 * We queue the work to the CPU it was submitted, but there is no
201 * guarantee that it will be processed by that CPU.
202 */
203int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
204{
205 int ret = 0, cpu = get_cpu();
206
207 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
208 if (unlikely(is_single_threaded(wq)))
209 cpu = singlethread_cpu;
210 BUG_ON(!list_empty(&work->entry));
211 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
212 ret = 1;
213 }
214 put_cpu();
215 return ret;
216}
217EXPORT_SYMBOL_GPL(queue_work);
218
219void delayed_work_timer_fn(unsigned long __data)
220{
221 struct delayed_work *dwork = (struct delayed_work *)__data;
222 struct workqueue_struct *wq = get_wq_data(&dwork->work);
223 int cpu = smp_processor_id();
224
225 if (unlikely(is_single_threaded(wq)))
226 cpu = singlethread_cpu;
227
228 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
229}
230
231/**
232 * queue_delayed_work - queue work on a workqueue after delay
233 * @wq: workqueue to use
234 * @dwork: delayable work to queue
235 * @delay: number of jiffies to wait before queueing
236 *
237 * Returns 0 if @work was already on a queue, non-zero otherwise.
238 */
239int fastcall queue_delayed_work(struct workqueue_struct *wq,
240 struct delayed_work *dwork, unsigned long delay)
241{
242 int ret = 0;
243 struct timer_list *timer = &dwork->timer;
244 struct work_struct *work = &dwork->work;
245
246 timer_stats_timer_set_start_info(timer);
247 if (delay == 0)
248 return queue_work(wq, work);
249
250 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
251 BUG_ON(timer_pending(timer));
252 BUG_ON(!list_empty(&work->entry));
253
254 /* This stores wq for the moment, for the timer_fn */
255 set_wq_data(work, wq);
256 timer->expires = jiffies + delay;
257 timer->data = (unsigned long)dwork;
258 timer->function = delayed_work_timer_fn;
259 add_timer(timer);
260 ret = 1;
261 }
262 return ret;
263}
264EXPORT_SYMBOL_GPL(queue_delayed_work);
265
266/**
267 * queue_delayed_work_on - queue work on specific CPU after delay
268 * @cpu: CPU number to execute work on
269 * @wq: workqueue to use
270 * @dwork: work to queue
271 * @delay: number of jiffies to wait before queueing
272 *
273 * Returns 0 if @work was already on a queue, non-zero otherwise.
274 */
275int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
276 struct delayed_work *dwork, unsigned long delay)
277{
278 int ret = 0;
279 struct timer_list *timer = &dwork->timer;
280 struct work_struct *work = &dwork->work;
281
282 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
283 BUG_ON(timer_pending(timer));
284 BUG_ON(!list_empty(&work->entry));
285
286 /* This stores wq for the moment, for the timer_fn */
287 set_wq_data(work, wq);
288 timer->expires = jiffies + delay;
289 timer->data = (unsigned long)dwork;
290 timer->function = delayed_work_timer_fn;
291 add_timer_on(timer, cpu);
292 ret = 1;
293 }
294 return ret;
295}
296EXPORT_SYMBOL_GPL(queue_delayed_work_on);
297
298static void run_workqueue(struct cpu_workqueue_struct *cwq)
299{
300 unsigned long flags;
301
302 /*
303 * Keep taking off work from the queue until
304 * done.
305 */
306 spin_lock_irqsave(&cwq->lock, flags);
307 cwq->run_depth++;
308 if (cwq->run_depth > 3) {
309 /* morton gets to eat his hat */
310 printk("%s: recursion depth exceeded: %d\n",
311 __FUNCTION__, cwq->run_depth);
312 dump_stack();
313 }
314 while (!list_empty(&cwq->worklist)) {
315 struct work_struct *work = list_entry(cwq->worklist.next,
316 struct work_struct, entry);
317 work_func_t f = work->func;
318
319 cwq->current_work = work;
320 list_del_init(cwq->worklist.next);
321 spin_unlock_irqrestore(&cwq->lock, flags);
322
323 BUG_ON(get_wq_data(work) != cwq);
324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
325 work_release(work);
326 f(work);
327
328 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
329 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
330 "%s/0x%08x/%d\n",
331 current->comm, preempt_count(),
332 current->pid);
333 printk(KERN_ERR " last function: ");
334 print_symbol("%s\n", (unsigned long)f);
335 debug_show_held_locks(current);
336 dump_stack();
337 }
338
339 spin_lock_irqsave(&cwq->lock, flags);
340 cwq->current_work = NULL;
341 }
342 cwq->run_depth--;
343 spin_unlock_irqrestore(&cwq->lock, flags);
344}
345
346static int worker_thread(void *__cwq)
347{
348 struct cpu_workqueue_struct *cwq = __cwq;
349 DECLARE_WAITQUEUE(wait, current);
350 struct k_sigaction sa;
351 sigset_t blocked;
352
353 if (!cwq->wq->freezeable)
354 current->flags |= PF_NOFREEZE;
355
356 set_user_nice(current, -5);
357
358 /* Block and flush all signals */
359 sigfillset(&blocked);
360 sigprocmask(SIG_BLOCK, &blocked, NULL);
361 flush_signals(current);
362
363 /*
364 * We inherited MPOL_INTERLEAVE from the booting kernel.
365 * Set MPOL_DEFAULT to insure node local allocations.
366 */
367 numa_default_policy();
368
369 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
370 sa.sa.sa_handler = SIG_IGN;
371 sa.sa.sa_flags = 0;
372 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
373 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
374
375 set_current_state(TASK_INTERRUPTIBLE);
376 while (!kthread_should_stop()) {
377 if (cwq->wq->freezeable)
378 try_to_freeze();
379
380 add_wait_queue(&cwq->more_work, &wait);
381 if (list_empty(&cwq->worklist))
382 schedule();
383 else
384 __set_current_state(TASK_RUNNING);
385 remove_wait_queue(&cwq->more_work, &wait);
386
387 if (!list_empty(&cwq->worklist))
388 run_workqueue(cwq);
389 set_current_state(TASK_INTERRUPTIBLE);
390 }
391 __set_current_state(TASK_RUNNING);
392 return 0;
393}
394
395struct wq_barrier {
396 struct work_struct work;
397 struct completion done;
398};
399
400static void wq_barrier_func(struct work_struct *work)
401{
402 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
403 complete(&barr->done);
404}
405
406static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
407 struct wq_barrier *barr, int tail)
408{
409 INIT_WORK(&barr->work, wq_barrier_func);
410 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
411
412 init_completion(&barr->done);
413
414 insert_work(cwq, &barr->work, tail);
415}
416
417static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
418{
419 if (cwq->thread == current) {
420 /*
421 * Probably keventd trying to flush its own queue. So simply run
422 * it by hand rather than deadlocking.
423 */
424 preempt_enable();
425 /*
426 * We can still touch *cwq here because we are keventd, and
427 * hot-unplug will be waiting us to exit.
428 */
429 run_workqueue(cwq);
430 preempt_disable();
431 } else {
432 struct wq_barrier barr;
433 int active = 0;
434
435 spin_lock_irq(&cwq->lock);
436 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
437 insert_wq_barrier(cwq, &barr, 1);
438 active = 1;
439 }
440 spin_unlock_irq(&cwq->lock);
441
442 if (active) {
443 preempt_enable();
444 wait_for_completion(&barr.done);
445 preempt_disable();
446 }
447 }
448}
449
450/**
451 * flush_workqueue - ensure that any scheduled work has run to completion.
452 * @wq: workqueue to flush
453 *
454 * Forces execution of the workqueue and blocks until its completion.
455 * This is typically used in driver shutdown handlers.
456 *
457 * We sleep until all works which were queued on entry have been handled,
458 * but we are not livelocked by new incoming ones.
459 *
460 * This function used to run the workqueues itself. Now we just wait for the
461 * helper threads to do it.
462 */
463void fastcall flush_workqueue(struct workqueue_struct *wq)
464{
465 preempt_disable(); /* CPU hotplug */
466 if (is_single_threaded(wq)) {
467 /* Always use first cpu's area. */
468 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
469 } else {
470 int cpu;
471
472 for_each_online_cpu(cpu)
473 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
474 }
475 preempt_enable();
476}
477EXPORT_SYMBOL_GPL(flush_workqueue);
478
479static void wait_on_work(struct cpu_workqueue_struct *cwq,
480 struct work_struct *work)
481{
482 struct wq_barrier barr;
483 int running = 0;
484
485 spin_lock_irq(&cwq->lock);
486 if (unlikely(cwq->current_work == work)) {
487 insert_wq_barrier(cwq, &barr, 0);
488 running = 1;
489 }
490 spin_unlock_irq(&cwq->lock);
491
492 if (unlikely(running)) {
493 mutex_unlock(&workqueue_mutex);
494 wait_for_completion(&barr.done);
495 mutex_lock(&workqueue_mutex);
496 }
497}
498
499/**
500 * flush_work - block until a work_struct's callback has terminated
501 * @wq: the workqueue on which the work is queued
502 * @work: the work which is to be flushed
503 *
504 * flush_work() will attempt to cancel the work if it is queued. If the work's
505 * callback appears to be running, flush_work() will block until it has
506 * completed.
507 *
508 * flush_work() is designed to be used when the caller is tearing down data
509 * structures which the callback function operates upon. It is expected that,
510 * prior to calling flush_work(), the caller has arranged for the work to not
511 * be requeued.
512 */
513void flush_work(struct workqueue_struct *wq, struct work_struct *work)
514{
515 struct cpu_workqueue_struct *cwq;
516
517 mutex_lock(&workqueue_mutex);
518 cwq = get_wq_data(work);
519 /* Was it ever queued ? */
520 if (!cwq)
521 goto out;
522
523 /*
524 * This work can't be re-queued, and the lock above protects us
525 * from take_over_work(), no need to re-check that get_wq_data()
526 * is still the same when we take cwq->lock.
527 */
528 spin_lock_irq(&cwq->lock);
529 list_del_init(&work->entry);
530 work_release(work);
531 spin_unlock_irq(&cwq->lock);
532
533 if (is_single_threaded(wq)) {
534 /* Always use first cpu's area. */
535 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
536 } else {
537 int cpu;
538
539 for_each_online_cpu(cpu)
540 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
541 }
542out:
543 mutex_unlock(&workqueue_mutex);
544}
545EXPORT_SYMBOL_GPL(flush_work);
546
547static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
548 int cpu)
549{
550 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
551 struct task_struct *p;
552
553 spin_lock_init(&cwq->lock);
554 cwq->wq = wq;
555 cwq->thread = NULL;
556 INIT_LIST_HEAD(&cwq->worklist);
557 init_waitqueue_head(&cwq->more_work);
558
559 if (is_single_threaded(wq))
560 p = kthread_create(worker_thread, cwq, "%s", wq->name);
561 else
562 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
563 if (IS_ERR(p))
564 return NULL;
565 cwq->thread = p;
566 return p;
567}
568
569struct workqueue_struct *__create_workqueue(const char *name,
570 int singlethread, int freezeable)
571{
572 int cpu, destroy = 0;
573 struct workqueue_struct *wq;
574 struct task_struct *p;
575
576 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
577 if (!wq)
578 return NULL;
579
580 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
581 if (!wq->cpu_wq) {
582 kfree(wq);
583 return NULL;
584 }
585
586 wq->name = name;
587 wq->freezeable = freezeable;
588
589 mutex_lock(&workqueue_mutex);
590 if (singlethread) {
591 INIT_LIST_HEAD(&wq->list);
592 p = create_workqueue_thread(wq, singlethread_cpu);
593 if (!p)
594 destroy = 1;
595 else
596 wake_up_process(p);
597 } else {
598 list_add(&wq->list, &workqueues);
599 for_each_online_cpu(cpu) {
600 p = create_workqueue_thread(wq, cpu);
601 if (p) {
602 kthread_bind(p, cpu);
603 wake_up_process(p);
604 } else
605 destroy = 1;
606 }
607 }
608 mutex_unlock(&workqueue_mutex);
609
610 /*
611 * Was there any error during startup? If yes then clean up:
612 */
613 if (destroy) {
614 destroy_workqueue(wq);
615 wq = NULL;
616 }
617 return wq;
618}
619EXPORT_SYMBOL_GPL(__create_workqueue);
620
621static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
622{
623 struct cpu_workqueue_struct *cwq;
624 unsigned long flags;
625 struct task_struct *p;
626
627 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
628 spin_lock_irqsave(&cwq->lock, flags);
629 p = cwq->thread;
630 cwq->thread = NULL;
631 spin_unlock_irqrestore(&cwq->lock, flags);
632 if (p)
633 kthread_stop(p);
634}
635
636/**
637 * destroy_workqueue - safely terminate a workqueue
638 * @wq: target workqueue
639 *
640 * Safely destroy a workqueue. All work currently pending will be done first.
641 */
642void destroy_workqueue(struct workqueue_struct *wq)
643{
644 int cpu;
645
646 flush_workqueue(wq);
647
648 /* We don't need the distraction of CPUs appearing and vanishing. */
649 mutex_lock(&workqueue_mutex);
650 if (is_single_threaded(wq))
651 cleanup_workqueue_thread(wq, singlethread_cpu);
652 else {
653 for_each_online_cpu(cpu)
654 cleanup_workqueue_thread(wq, cpu);
655 list_del(&wq->list);
656 }
657 mutex_unlock(&workqueue_mutex);
658 free_percpu(wq->cpu_wq);
659 kfree(wq);
660}
661EXPORT_SYMBOL_GPL(destroy_workqueue);
662
663static struct workqueue_struct *keventd_wq;
664
665/**
666 * schedule_work - put work task in global workqueue
667 * @work: job to be done
668 *
669 * This puts a job in the kernel-global workqueue.
670 */
671int fastcall schedule_work(struct work_struct *work)
672{
673 return queue_work(keventd_wq, work);
674}
675EXPORT_SYMBOL(schedule_work);
676
677/**
678 * schedule_delayed_work - put work task in global workqueue after delay
679 * @dwork: job to be done
680 * @delay: number of jiffies to wait or 0 for immediate execution
681 *
682 * After waiting for a given time this puts a job in the kernel-global
683 * workqueue.
684 */
685int fastcall schedule_delayed_work(struct delayed_work *dwork,
686 unsigned long delay)
687{
688 timer_stats_timer_set_start_info(&dwork->timer);
689 return queue_delayed_work(keventd_wq, dwork, delay);
690}
691EXPORT_SYMBOL(schedule_delayed_work);
692
693/**
694 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
695 * @cpu: cpu to use
696 * @dwork: job to be done
697 * @delay: number of jiffies to wait
698 *
699 * After waiting for a given time this puts a job in the kernel-global
700 * workqueue on the specified CPU.
701 */
702int schedule_delayed_work_on(int cpu,
703 struct delayed_work *dwork, unsigned long delay)
704{
705 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
706}
707EXPORT_SYMBOL(schedule_delayed_work_on);
708
709/**
710 * schedule_on_each_cpu - call a function on each online CPU from keventd
711 * @func: the function to call
712 *
713 * Returns zero on success.
714 * Returns -ve errno on failure.
715 *
716 * Appears to be racy against CPU hotplug.
717 *
718 * schedule_on_each_cpu() is very slow.
719 */
720int schedule_on_each_cpu(work_func_t func)
721{
722 int cpu;
723 struct work_struct *works;
724
725 works = alloc_percpu(struct work_struct);
726 if (!works)
727 return -ENOMEM;
728
729 preempt_disable(); /* CPU hotplug */
730 for_each_online_cpu(cpu) {
731 struct work_struct *work = per_cpu_ptr(works, cpu);
732
733 INIT_WORK(work, func);
734 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
735 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
736 }
737 preempt_enable();
738 flush_workqueue(keventd_wq);
739 free_percpu(works);
740 return 0;
741}
742
743void flush_scheduled_work(void)
744{
745 flush_workqueue(keventd_wq);
746}
747EXPORT_SYMBOL(flush_scheduled_work);
748
749void flush_work_keventd(struct work_struct *work)
750{
751 flush_work(keventd_wq, work);
752}
753EXPORT_SYMBOL(flush_work_keventd);
754
755/**
756 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
757 * @wq: the controlling workqueue structure
758 * @dwork: the delayed work struct
759 */
760void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
761 struct delayed_work *dwork)
762{
763 while (!cancel_delayed_work(dwork))
764 flush_workqueue(wq);
765}
766EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
767
768/**
769 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
770 * @dwork: the delayed work struct
771 */
772void cancel_rearming_delayed_work(struct delayed_work *dwork)
773{
774 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
775}
776EXPORT_SYMBOL(cancel_rearming_delayed_work);
777
778/**
779 * execute_in_process_context - reliably execute the routine with user context
780 * @fn: the function to execute
781 * @ew: guaranteed storage for the execute work structure (must
782 * be available when the work executes)
783 *
784 * Executes the function immediately if process context is available,
785 * otherwise schedules the function for delayed execution.
786 *
787 * Returns: 0 - function was executed
788 * 1 - function was scheduled for execution
789 */
790int execute_in_process_context(work_func_t fn, struct execute_work *ew)
791{
792 if (!in_interrupt()) {
793 fn(&ew->work);
794 return 0;
795 }
796
797 INIT_WORK(&ew->work, fn);
798 schedule_work(&ew->work);
799
800 return 1;
801}
802EXPORT_SYMBOL_GPL(execute_in_process_context);
803
804int keventd_up(void)
805{
806 return keventd_wq != NULL;
807}
808
809int current_is_keventd(void)
810{
811 struct cpu_workqueue_struct *cwq;
812 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
813 int ret = 0;
814
815 BUG_ON(!keventd_wq);
816
817 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
818 if (current == cwq->thread)
819 ret = 1;
820
821 return ret;
822
823}
824
825/* Take the work from this (downed) CPU. */
826static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
827{
828 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
829 struct list_head list;
830 struct work_struct *work;
831
832 spin_lock_irq(&cwq->lock);
833 list_replace_init(&cwq->worklist, &list);
834
835 while (!list_empty(&list)) {
836 printk("Taking work for %s\n", wq->name);
837 work = list_entry(list.next,struct work_struct,entry);
838 list_del(&work->entry);
839 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
840 }
841 spin_unlock_irq(&cwq->lock);
842}
843
844/* We're holding the cpucontrol mutex here */
845static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
846 unsigned long action,
847 void *hcpu)
848{
849 unsigned int hotcpu = (unsigned long)hcpu;
850 struct workqueue_struct *wq;
851
852 switch (action) {
853 case CPU_UP_PREPARE:
854 mutex_lock(&workqueue_mutex);
855 /* Create a new workqueue thread for it. */
856 list_for_each_entry(wq, &workqueues, list) {
857 if (!create_workqueue_thread(wq, hotcpu)) {
858 printk("workqueue for %i failed\n", hotcpu);
859 return NOTIFY_BAD;
860 }
861 }
862 break;
863
864 case CPU_ONLINE:
865 /* Kick off worker threads. */
866 list_for_each_entry(wq, &workqueues, list) {
867 struct cpu_workqueue_struct *cwq;
868
869 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
870 kthread_bind(cwq->thread, hotcpu);
871 wake_up_process(cwq->thread);
872 }
873 mutex_unlock(&workqueue_mutex);
874 break;
875
876 case CPU_UP_CANCELED:
877 list_for_each_entry(wq, &workqueues, list) {
878 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
879 continue;
880 /* Unbind so it can run. */
881 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
882 any_online_cpu(cpu_online_map));
883 cleanup_workqueue_thread(wq, hotcpu);
884 }
885 mutex_unlock(&workqueue_mutex);
886 break;
887
888 case CPU_DOWN_PREPARE:
889 mutex_lock(&workqueue_mutex);
890 break;
891
892 case CPU_DOWN_FAILED:
893 mutex_unlock(&workqueue_mutex);
894 break;
895
896 case CPU_DEAD:
897 list_for_each_entry(wq, &workqueues, list)
898 cleanup_workqueue_thread(wq, hotcpu);
899 list_for_each_entry(wq, &workqueues, list)
900 take_over_work(wq, hotcpu);
901 mutex_unlock(&workqueue_mutex);
902 break;
903 }
904
905 return NOTIFY_OK;
906}
907
908void init_workqueues(void)
909{
910 singlethread_cpu = first_cpu(cpu_possible_map);
911 hotcpu_notifier(workqueue_cpu_callback, 0);
912 keventd_wq = create_workqueue("events");
913 BUG_ON(!keventd_wq);
914}
915