]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/workqueue.c
[PATCH] pci/search: cleanups, add to kernel-api.tmpl
[mirror_ubuntu-artful-kernel.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679
CL
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
1da177e4
LT
31
32/*
f756d5e2
NL
33 * The per-CPU workqueue (if single thread, we always use the first
34 * possible cpu).
1da177e4
LT
35 *
36 * The sequence counters are for flush_scheduled_work(). It wants to wait
37 * until until all currently-scheduled works are completed, but it doesn't
38 * want to be livelocked by new, incoming ones. So it waits until
39 * remove_sequence is >= the insert_sequence which pertained when
40 * flush_scheduled_work() was called.
41 */
42struct cpu_workqueue_struct {
43
44 spinlock_t lock;
45
46 long remove_sequence; /* Least-recently added (next to run) */
47 long insert_sequence; /* Next to add */
48
49 struct list_head worklist;
50 wait_queue_head_t more_work;
51 wait_queue_head_t work_done;
52
53 struct workqueue_struct *wq;
36c8b586 54 struct task_struct *thread;
1da177e4
LT
55
56 int run_depth; /* Detect run_workqueue() recursion depth */
57} ____cacheline_aligned;
58
59/*
60 * The externally visible workqueue abstraction is an array of
61 * per-CPU workqueues:
62 */
63struct workqueue_struct {
89ada679 64 struct cpu_workqueue_struct *cpu_wq;
1da177e4
LT
65 const char *name;
66 struct list_head list; /* Empty if single thread */
67};
68
69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
70 threads to each one as cpus come/go. */
71static DEFINE_SPINLOCK(workqueue_lock);
72static LIST_HEAD(workqueues);
73
f756d5e2
NL
74static int singlethread_cpu;
75
1da177e4
LT
76/* If it's single threaded, it isn't in the list of workqueues. */
77static inline int is_single_threaded(struct workqueue_struct *wq)
78{
79 return list_empty(&wq->list);
80}
81
82/* Preempt must be disabled. */
83static void __queue_work(struct cpu_workqueue_struct *cwq,
84 struct work_struct *work)
85{
86 unsigned long flags;
87
88 spin_lock_irqsave(&cwq->lock, flags);
89 work->wq_data = cwq;
90 list_add_tail(&work->entry, &cwq->worklist);
91 cwq->insert_sequence++;
92 wake_up(&cwq->more_work);
93 spin_unlock_irqrestore(&cwq->lock, flags);
94}
95
96/*
97 * Queue work on a workqueue. Return non-zero if it was successfully
98 * added.
99 *
100 * We queue the work to the CPU it was submitted, but there is no
101 * guarantee that it will be processed by that CPU.
102 */
103int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
104{
105 int ret = 0, cpu = get_cpu();
106
107 if (!test_and_set_bit(0, &work->pending)) {
108 if (unlikely(is_single_threaded(wq)))
f756d5e2 109 cpu = singlethread_cpu;
1da177e4 110 BUG_ON(!list_empty(&work->entry));
89ada679 111 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
1da177e4
LT
112 ret = 1;
113 }
114 put_cpu();
115 return ret;
116}
ae90dd5d 117EXPORT_SYMBOL_GPL(queue_work);
1da177e4
LT
118
119static void delayed_work_timer_fn(unsigned long __data)
120{
121 struct work_struct *work = (struct work_struct *)__data;
122 struct workqueue_struct *wq = work->wq_data;
123 int cpu = smp_processor_id();
124
125 if (unlikely(is_single_threaded(wq)))
f756d5e2 126 cpu = singlethread_cpu;
1da177e4 127
89ada679 128 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
1da177e4
LT
129}
130
131int fastcall queue_delayed_work(struct workqueue_struct *wq,
132 struct work_struct *work, unsigned long delay)
133{
134 int ret = 0;
135 struct timer_list *timer = &work->timer;
136
137 if (!test_and_set_bit(0, &work->pending)) {
138 BUG_ON(timer_pending(timer));
139 BUG_ON(!list_empty(&work->entry));
140
141 /* This stores wq for the moment, for the timer_fn */
142 work->wq_data = wq;
143 timer->expires = jiffies + delay;
144 timer->data = (unsigned long)work;
145 timer->function = delayed_work_timer_fn;
146 add_timer(timer);
147 ret = 1;
148 }
149 return ret;
150}
ae90dd5d 151EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 152
7a6bc1cd
VP
153int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay)
155{
156 int ret = 0;
157 struct timer_list *timer = &work->timer;
158
159 if (!test_and_set_bit(0, &work->pending)) {
160 BUG_ON(timer_pending(timer));
161 BUG_ON(!list_empty(&work->entry));
162
163 /* This stores wq for the moment, for the timer_fn */
164 work->wq_data = wq;
165 timer->expires = jiffies + delay;
166 timer->data = (unsigned long)work;
167 timer->function = delayed_work_timer_fn;
168 add_timer_on(timer, cpu);
169 ret = 1;
170 }
171 return ret;
172}
ae90dd5d 173EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 174
858119e1 175static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4
LT
176{
177 unsigned long flags;
178
179 /*
180 * Keep taking off work from the queue until
181 * done.
182 */
183 spin_lock_irqsave(&cwq->lock, flags);
184 cwq->run_depth++;
185 if (cwq->run_depth > 3) {
186 /* morton gets to eat his hat */
187 printk("%s: recursion depth exceeded: %d\n",
188 __FUNCTION__, cwq->run_depth);
189 dump_stack();
190 }
191 while (!list_empty(&cwq->worklist)) {
192 struct work_struct *work = list_entry(cwq->worklist.next,
193 struct work_struct, entry);
194 void (*f) (void *) = work->func;
195 void *data = work->data;
196
197 list_del_init(cwq->worklist.next);
198 spin_unlock_irqrestore(&cwq->lock, flags);
199
200 BUG_ON(work->wq_data != cwq);
201 clear_bit(0, &work->pending);
202 f(data);
203
204 spin_lock_irqsave(&cwq->lock, flags);
205 cwq->remove_sequence++;
206 wake_up(&cwq->work_done);
207 }
208 cwq->run_depth--;
209 spin_unlock_irqrestore(&cwq->lock, flags);
210}
211
212static int worker_thread(void *__cwq)
213{
214 struct cpu_workqueue_struct *cwq = __cwq;
215 DECLARE_WAITQUEUE(wait, current);
216 struct k_sigaction sa;
217 sigset_t blocked;
218
219 current->flags |= PF_NOFREEZE;
220
221 set_user_nice(current, -5);
222
223 /* Block and flush all signals */
224 sigfillset(&blocked);
225 sigprocmask(SIG_BLOCK, &blocked, NULL);
226 flush_signals(current);
227
228 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
229 sa.sa.sa_handler = SIG_IGN;
230 sa.sa.sa_flags = 0;
231 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
232 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
233
234 set_current_state(TASK_INTERRUPTIBLE);
235 while (!kthread_should_stop()) {
236 add_wait_queue(&cwq->more_work, &wait);
237 if (list_empty(&cwq->worklist))
238 schedule();
239 else
240 __set_current_state(TASK_RUNNING);
241 remove_wait_queue(&cwq->more_work, &wait);
242
243 if (!list_empty(&cwq->worklist))
244 run_workqueue(cwq);
245 set_current_state(TASK_INTERRUPTIBLE);
246 }
247 __set_current_state(TASK_RUNNING);
248 return 0;
249}
250
251static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
252{
253 if (cwq->thread == current) {
254 /*
255 * Probably keventd trying to flush its own queue. So simply run
256 * it by hand rather than deadlocking.
257 */
258 run_workqueue(cwq);
259 } else {
260 DEFINE_WAIT(wait);
261 long sequence_needed;
262
263 spin_lock_irq(&cwq->lock);
264 sequence_needed = cwq->insert_sequence;
265
266 while (sequence_needed - cwq->remove_sequence > 0) {
267 prepare_to_wait(&cwq->work_done, &wait,
268 TASK_UNINTERRUPTIBLE);
269 spin_unlock_irq(&cwq->lock);
270 schedule();
271 spin_lock_irq(&cwq->lock);
272 }
273 finish_wait(&cwq->work_done, &wait);
274 spin_unlock_irq(&cwq->lock);
275 }
276}
277
278/*
279 * flush_workqueue - ensure that any scheduled work has run to completion.
280 *
281 * Forces execution of the workqueue and blocks until its completion.
282 * This is typically used in driver shutdown handlers.
283 *
284 * This function will sample each workqueue's current insert_sequence number and
285 * will sleep until the head sequence is greater than or equal to that. This
286 * means that we sleep until all works which were queued on entry have been
287 * handled, but we are not livelocked by new incoming ones.
288 *
289 * This function used to run the workqueues itself. Now we just wait for the
290 * helper threads to do it.
291 */
292void fastcall flush_workqueue(struct workqueue_struct *wq)
293{
294 might_sleep();
295
296 if (is_single_threaded(wq)) {
bce61dd4 297 /* Always use first cpu's area. */
f756d5e2 298 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
1da177e4
LT
299 } else {
300 int cpu;
301
302 lock_cpu_hotplug();
303 for_each_online_cpu(cpu)
89ada679 304 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4
LT
305 unlock_cpu_hotplug();
306 }
307}
ae90dd5d 308EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4
LT
309
310static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
311 int cpu)
312{
89ada679 313 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4
LT
314 struct task_struct *p;
315
316 spin_lock_init(&cwq->lock);
317 cwq->wq = wq;
318 cwq->thread = NULL;
319 cwq->insert_sequence = 0;
320 cwq->remove_sequence = 0;
321 INIT_LIST_HEAD(&cwq->worklist);
322 init_waitqueue_head(&cwq->more_work);
323 init_waitqueue_head(&cwq->work_done);
324
325 if (is_single_threaded(wq))
326 p = kthread_create(worker_thread, cwq, "%s", wq->name);
327 else
328 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
329 if (IS_ERR(p))
330 return NULL;
331 cwq->thread = p;
332 return p;
333}
334
335struct workqueue_struct *__create_workqueue(const char *name,
336 int singlethread)
337{
338 int cpu, destroy = 0;
339 struct workqueue_struct *wq;
340 struct task_struct *p;
341
dd392710 342 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1da177e4
LT
343 if (!wq)
344 return NULL;
1da177e4 345
89ada679 346 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
676121fc
BC
347 if (!wq->cpu_wq) {
348 kfree(wq);
349 return NULL;
350 }
351
1da177e4
LT
352 wq->name = name;
353 /* We don't need the distraction of CPUs appearing and vanishing. */
354 lock_cpu_hotplug();
355 if (singlethread) {
356 INIT_LIST_HEAD(&wq->list);
f756d5e2 357 p = create_workqueue_thread(wq, singlethread_cpu);
1da177e4
LT
358 if (!p)
359 destroy = 1;
360 else
361 wake_up_process(p);
362 } else {
363 spin_lock(&workqueue_lock);
364 list_add(&wq->list, &workqueues);
365 spin_unlock(&workqueue_lock);
366 for_each_online_cpu(cpu) {
367 p = create_workqueue_thread(wq, cpu);
368 if (p) {
369 kthread_bind(p, cpu);
370 wake_up_process(p);
371 } else
372 destroy = 1;
373 }
374 }
375 unlock_cpu_hotplug();
376
377 /*
378 * Was there any error during startup? If yes then clean up:
379 */
380 if (destroy) {
381 destroy_workqueue(wq);
382 wq = NULL;
383 }
384 return wq;
385}
ae90dd5d 386EXPORT_SYMBOL_GPL(__create_workqueue);
1da177e4
LT
387
388static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
389{
390 struct cpu_workqueue_struct *cwq;
391 unsigned long flags;
392 struct task_struct *p;
393
89ada679 394 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4
LT
395 spin_lock_irqsave(&cwq->lock, flags);
396 p = cwq->thread;
397 cwq->thread = NULL;
398 spin_unlock_irqrestore(&cwq->lock, flags);
399 if (p)
400 kthread_stop(p);
401}
402
403void destroy_workqueue(struct workqueue_struct *wq)
404{
405 int cpu;
406
407 flush_workqueue(wq);
408
409 /* We don't need the distraction of CPUs appearing and vanishing. */
410 lock_cpu_hotplug();
411 if (is_single_threaded(wq))
f756d5e2 412 cleanup_workqueue_thread(wq, singlethread_cpu);
1da177e4
LT
413 else {
414 for_each_online_cpu(cpu)
415 cleanup_workqueue_thread(wq, cpu);
416 spin_lock(&workqueue_lock);
417 list_del(&wq->list);
418 spin_unlock(&workqueue_lock);
419 }
420 unlock_cpu_hotplug();
89ada679 421 free_percpu(wq->cpu_wq);
1da177e4
LT
422 kfree(wq);
423}
ae90dd5d 424EXPORT_SYMBOL_GPL(destroy_workqueue);
1da177e4
LT
425
426static struct workqueue_struct *keventd_wq;
427
428int fastcall schedule_work(struct work_struct *work)
429{
430 return queue_work(keventd_wq, work);
431}
ae90dd5d 432EXPORT_SYMBOL(schedule_work);
1da177e4
LT
433
434int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
435{
436 return queue_delayed_work(keventd_wq, work, delay);
437}
ae90dd5d 438EXPORT_SYMBOL(schedule_delayed_work);
1da177e4
LT
439
440int schedule_delayed_work_on(int cpu,
441 struct work_struct *work, unsigned long delay)
442{
7a6bc1cd 443 return queue_delayed_work_on(cpu, keventd_wq, work, delay);
1da177e4 444}
ae90dd5d 445EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 446
b6136773
AM
447/**
448 * schedule_on_each_cpu - call a function on each online CPU from keventd
449 * @func: the function to call
450 * @info: a pointer to pass to func()
451 *
452 * Returns zero on success.
453 * Returns -ve errno on failure.
454 *
455 * Appears to be racy against CPU hotplug.
456 *
457 * schedule_on_each_cpu() is very slow.
458 */
459int schedule_on_each_cpu(void (*func)(void *info), void *info)
15316ba8
CL
460{
461 int cpu;
b6136773 462 struct work_struct *works;
15316ba8 463
b6136773
AM
464 works = alloc_percpu(struct work_struct);
465 if (!works)
15316ba8 466 return -ENOMEM;
b6136773 467
15316ba8 468 for_each_online_cpu(cpu) {
b6136773 469 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
15316ba8 470 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
b6136773 471 per_cpu_ptr(works, cpu));
15316ba8
CL
472 }
473 flush_workqueue(keventd_wq);
b6136773 474 free_percpu(works);
15316ba8
CL
475 return 0;
476}
477
1da177e4
LT
478void flush_scheduled_work(void)
479{
480 flush_workqueue(keventd_wq);
481}
ae90dd5d 482EXPORT_SYMBOL(flush_scheduled_work);
1da177e4
LT
483
484/**
485 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
486 * work whose handler rearms the delayed work.
487 * @wq: the controlling workqueue structure
488 * @work: the delayed work struct
489 */
81ddef77
JB
490void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
491 struct work_struct *work)
1da177e4
LT
492{
493 while (!cancel_delayed_work(work))
494 flush_workqueue(wq);
495}
81ddef77 496EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
1da177e4
LT
497
498/**
499 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
500 * work whose handler rearms the delayed work.
501 * @work: the delayed work struct
502 */
503void cancel_rearming_delayed_work(struct work_struct *work)
504{
505 cancel_rearming_delayed_workqueue(keventd_wq, work);
506}
507EXPORT_SYMBOL(cancel_rearming_delayed_work);
508
1fa44eca
JB
509/**
510 * execute_in_process_context - reliably execute the routine with user context
511 * @fn: the function to execute
512 * @data: data to pass to the function
513 * @ew: guaranteed storage for the execute work structure (must
514 * be available when the work executes)
515 *
516 * Executes the function immediately if process context is available,
517 * otherwise schedules the function for delayed execution.
518 *
519 * Returns: 0 - function was executed
520 * 1 - function was scheduled for execution
521 */
522int execute_in_process_context(void (*fn)(void *data), void *data,
523 struct execute_work *ew)
524{
525 if (!in_interrupt()) {
526 fn(data);
527 return 0;
528 }
529
530 INIT_WORK(&ew->work, fn, data);
531 schedule_work(&ew->work);
532
533 return 1;
534}
535EXPORT_SYMBOL_GPL(execute_in_process_context);
536
1da177e4
LT
537int keventd_up(void)
538{
539 return keventd_wq != NULL;
540}
541
542int current_is_keventd(void)
543{
544 struct cpu_workqueue_struct *cwq;
545 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
546 int ret = 0;
547
548 BUG_ON(!keventd_wq);
549
89ada679 550 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
551 if (current == cwq->thread)
552 ret = 1;
553
554 return ret;
555
556}
557
558#ifdef CONFIG_HOTPLUG_CPU
559/* Take the work from this (downed) CPU. */
560static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
561{
89ada679 562 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
626ab0e6 563 struct list_head list;
1da177e4
LT
564 struct work_struct *work;
565
566 spin_lock_irq(&cwq->lock);
626ab0e6 567 list_replace_init(&cwq->worklist, &list);
1da177e4
LT
568
569 while (!list_empty(&list)) {
570 printk("Taking work for %s\n", wq->name);
571 work = list_entry(list.next,struct work_struct,entry);
572 list_del(&work->entry);
89ada679 573 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
1da177e4
LT
574 }
575 spin_unlock_irq(&cwq->lock);
576}
577
578/* We're holding the cpucontrol mutex here */
9c7b216d 579static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1da177e4
LT
580 unsigned long action,
581 void *hcpu)
582{
583 unsigned int hotcpu = (unsigned long)hcpu;
584 struct workqueue_struct *wq;
585
586 switch (action) {
587 case CPU_UP_PREPARE:
588 /* Create a new workqueue thread for it. */
589 list_for_each_entry(wq, &workqueues, list) {
230649da 590 if (!create_workqueue_thread(wq, hotcpu)) {
1da177e4
LT
591 printk("workqueue for %i failed\n", hotcpu);
592 return NOTIFY_BAD;
593 }
594 }
595 break;
596
597 case CPU_ONLINE:
598 /* Kick off worker threads. */
599 list_for_each_entry(wq, &workqueues, list) {
89ada679
CL
600 struct cpu_workqueue_struct *cwq;
601
602 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
603 kthread_bind(cwq->thread, hotcpu);
604 wake_up_process(cwq->thread);
1da177e4
LT
605 }
606 break;
607
608 case CPU_UP_CANCELED:
609 list_for_each_entry(wq, &workqueues, list) {
fc75cdfa
HC
610 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
611 continue;
1da177e4 612 /* Unbind so it can run. */
89ada679 613 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
a4c4af7c 614 any_online_cpu(cpu_online_map));
1da177e4
LT
615 cleanup_workqueue_thread(wq, hotcpu);
616 }
617 break;
618
619 case CPU_DEAD:
620 list_for_each_entry(wq, &workqueues, list)
621 cleanup_workqueue_thread(wq, hotcpu);
622 list_for_each_entry(wq, &workqueues, list)
623 take_over_work(wq, hotcpu);
624 break;
625 }
626
627 return NOTIFY_OK;
628}
629#endif
630
631void init_workqueues(void)
632{
f756d5e2 633 singlethread_cpu = first_cpu(cpu_possible_map);
1da177e4
LT
634 hotcpu_notifier(workqueue_cpu_callback, 0);
635 keventd_wq = create_workqueue("events");
636 BUG_ON(!keventd_wq);
637}
638