]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/wait.h
Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
1da177e4 4
1da177e4
LT
5#include <linux/list.h>
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
1da177e4 8#include <asm/current.h>
607ca46e 9#include <uapi/linux/wait.h>
1da177e4
LT
10
11typedef struct __wait_queue wait_queue_t;
7d478721
PZ
12typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
1da177e4
LT
14
15struct __wait_queue {
16 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01
c43dc2fd 18 void *private;
1da177e4
LT
19 wait_queue_func_t func;
20 struct list_head task_list;
21};
22
23struct wait_bit_key {
24 void *flags;
25 int bit_nr;
26};
27
28struct wait_bit_queue {
29 struct wait_bit_key key;
30 wait_queue_t wait;
31};
32
33struct __wait_queue_head {
34 spinlock_t lock;
35 struct list_head task_list;
36};
37typedef struct __wait_queue_head wait_queue_head_t;
38
8c65b4a6 39struct task_struct;
1da177e4
LT
40
41/*
42 * Macros for declaration and initialisaton of the datatypes
43 */
44
45#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 46 .private = tsk, \
1da177e4
LT
47 .func = default_wake_function, \
48 .task_list = { NULL, NULL } }
49
50#define DECLARE_WAITQUEUE(name, tsk) \
51 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
52
53#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 54 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
55 .task_list = { &(name).task_list, &(name).task_list } }
56
57#define DECLARE_WAIT_QUEUE_HEAD(name) \
58 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
59
60#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
61 { .flags = word, .bit_nr = bit, }
62
f07fdec5 63extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
2fc39111
PZ
64
65#define init_waitqueue_head(q) \
66 do { \
67 static struct lock_class_key __key; \
68 \
f07fdec5 69 __init_waitqueue_head((q), #q, &__key); \
2fc39111 70 } while (0)
1da177e4 71
7259f0d0
PZ
72#ifdef CONFIG_LOCKDEP
73# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 ({ init_waitqueue_head(&name); name; })
75# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77#else
78# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79#endif
80
1da177e4
LT
81static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
82{
83 q->flags = 0;
c43dc2fd 84 q->private = p;
1da177e4
LT
85 q->func = default_wake_function;
86}
87
88static inline void init_waitqueue_func_entry(wait_queue_t *q,
89 wait_queue_func_t func)
90{
91 q->flags = 0;
c43dc2fd 92 q->private = NULL;
1da177e4
LT
93 q->func = func;
94}
95
96static inline int waitqueue_active(wait_queue_head_t *q)
97{
98 return !list_empty(&q->task_list);
99}
100
b3c97528
HH
101extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
102extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
103extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
104
105static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
106{
107 list_add(&new->task_list, &head->task_list);
108}
109
110/*
111 * Used for wake-one threads:
112 */
a93d2f17
CG
113static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
114 wait_queue_t *wait)
115{
116 wait->flags |= WQ_FLAG_EXCLUSIVE;
117 __add_wait_queue(q, wait);
118}
119
1da177e4 120static inline void __add_wait_queue_tail(wait_queue_head_t *head,
a93d2f17 121 wait_queue_t *new)
1da177e4
LT
122{
123 list_add_tail(&new->task_list, &head->task_list);
124}
125
a93d2f17
CG
126static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
127 wait_queue_t *wait)
128{
129 wait->flags |= WQ_FLAG_EXCLUSIVE;
130 __add_wait_queue_tail(q, wait);
131}
132
1da177e4
LT
133static inline void __remove_wait_queue(wait_queue_head_t *head,
134 wait_queue_t *old)
135{
136 list_del(&old->task_list);
137}
138
b3c97528 139void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a
DL
140void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
141void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
142 void *key);
63b20011 143void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
4ede816a 144void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528
HH
145void __wake_up_bit(wait_queue_head_t *, void *, int);
146int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
147int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
148void wake_up_bit(void *, int);
149int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
150int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
151wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 152
e64d66c8
MW
153#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
154#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
155#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
156#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
157#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 158
1da177e4
LT
159#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
160#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
161#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 162#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 163
0ccf831c 164/*
c0da3775 165 * Wakeup macros to be used to report events to the targets.
0ccf831c 166 */
c0da3775
DL
167#define wake_up_poll(x, m) \
168 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
169#define wake_up_locked_poll(x, m) \
170 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
171#define wake_up_interruptible_poll(x, m) \
172 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
173#define wake_up_interruptible_sync_poll(x, m) \
174 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
0ccf831c 175
1da177e4
LT
176#define __wait_event(wq, condition) \
177do { \
178 DEFINE_WAIT(__wait); \
179 \
180 for (;;) { \
181 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
182 if (condition) \
183 break; \
184 schedule(); \
185 } \
186 finish_wait(&wq, &__wait); \
187} while (0)
188
189/**
190 * wait_event - sleep until a condition gets true
191 * @wq: the waitqueue to wait on
192 * @condition: a C expression for the event to wait for
193 *
194 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
195 * @condition evaluates to true. The @condition is checked each time
196 * the waitqueue @wq is woken up.
197 *
198 * wake_up() has to be called after changing any variable that could
199 * change the result of the wait condition.
200 */
201#define wait_event(wq, condition) \
202do { \
203 if (condition) \
204 break; \
205 __wait_event(wq, condition); \
206} while (0)
207
208#define __wait_event_timeout(wq, condition, ret) \
209do { \
210 DEFINE_WAIT(__wait); \
211 \
212 for (;;) { \
213 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
214 if (condition) \
215 break; \
216 ret = schedule_timeout(ret); \
217 if (!ret) \
218 break; \
219 } \
220 finish_wait(&wq, &__wait); \
221} while (0)
222
223/**
224 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
225 * @wq: the waitqueue to wait on
226 * @condition: a C expression for the event to wait for
227 * @timeout: timeout, in jiffies
228 *
229 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
230 * @condition evaluates to true. The @condition is checked each time
231 * the waitqueue @wq is woken up.
232 *
233 * wake_up() has to be called after changing any variable that could
234 * change the result of the wait condition.
235 *
236 * The function returns 0 if the @timeout elapsed, and the remaining
237 * jiffies if the condition evaluated to true before the timeout elapsed.
238 */
239#define wait_event_timeout(wq, condition, timeout) \
240({ \
241 long __ret = timeout; \
242 if (!(condition)) \
243 __wait_event_timeout(wq, condition, __ret); \
244 __ret; \
245})
246
247#define __wait_event_interruptible(wq, condition, ret) \
248do { \
249 DEFINE_WAIT(__wait); \
250 \
251 for (;;) { \
252 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
253 if (condition) \
254 break; \
255 if (!signal_pending(current)) { \
256 schedule(); \
257 continue; \
258 } \
259 ret = -ERESTARTSYS; \
260 break; \
261 } \
262 finish_wait(&wq, &__wait); \
263} while (0)
264
265/**
266 * wait_event_interruptible - sleep until a condition gets true
267 * @wq: the waitqueue to wait on
268 * @condition: a C expression for the event to wait for
269 *
270 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
271 * @condition evaluates to true or a signal is received.
272 * The @condition is checked each time the waitqueue @wq is woken up.
273 *
274 * wake_up() has to be called after changing any variable that could
275 * change the result of the wait condition.
276 *
277 * The function will return -ERESTARTSYS if it was interrupted by a
278 * signal and 0 if @condition evaluated to true.
279 */
280#define wait_event_interruptible(wq, condition) \
281({ \
282 int __ret = 0; \
283 if (!(condition)) \
284 __wait_event_interruptible(wq, condition, __ret); \
285 __ret; \
286})
287
288#define __wait_event_interruptible_timeout(wq, condition, ret) \
289do { \
290 DEFINE_WAIT(__wait); \
291 \
292 for (;;) { \
293 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
294 if (condition) \
295 break; \
296 if (!signal_pending(current)) { \
297 ret = schedule_timeout(ret); \
298 if (!ret) \
299 break; \
300 continue; \
301 } \
302 ret = -ERESTARTSYS; \
303 break; \
304 } \
305 finish_wait(&wq, &__wait); \
306} while (0)
307
308/**
309 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
310 * @wq: the waitqueue to wait on
311 * @condition: a C expression for the event to wait for
312 * @timeout: timeout, in jiffies
313 *
314 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
315 * @condition evaluates to true or a signal is received.
316 * The @condition is checked each time the waitqueue @wq is woken up.
317 *
318 * wake_up() has to be called after changing any variable that could
319 * change the result of the wait condition.
320 *
321 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
322 * was interrupted by a signal, and the remaining jiffies otherwise
323 * if the condition evaluated to true before the timeout elapsed.
324 */
325#define wait_event_interruptible_timeout(wq, condition, timeout) \
326({ \
327 long __ret = timeout; \
328 if (!(condition)) \
329 __wait_event_interruptible_timeout(wq, condition, __ret); \
330 __ret; \
331})
332
774a08b3
KO
333#define __wait_event_hrtimeout(wq, condition, timeout, state) \
334({ \
335 int __ret = 0; \
336 DEFINE_WAIT(__wait); \
337 struct hrtimer_sleeper __t; \
338 \
339 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
340 HRTIMER_MODE_REL); \
341 hrtimer_init_sleeper(&__t, current); \
342 if ((timeout).tv64 != KTIME_MAX) \
343 hrtimer_start_range_ns(&__t.timer, timeout, \
344 current->timer_slack_ns, \
345 HRTIMER_MODE_REL); \
346 \
347 for (;;) { \
348 prepare_to_wait(&wq, &__wait, state); \
349 if (condition) \
350 break; \
351 if (state == TASK_INTERRUPTIBLE && \
352 signal_pending(current)) { \
353 __ret = -ERESTARTSYS; \
354 break; \
355 } \
356 if (!__t.task) { \
357 __ret = -ETIME; \
358 break; \
359 } \
360 schedule(); \
361 } \
362 \
363 hrtimer_cancel(&__t.timer); \
364 destroy_hrtimer_on_stack(&__t.timer); \
365 finish_wait(&wq, &__wait); \
366 __ret; \
367})
368
369/**
370 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
371 * @wq: the waitqueue to wait on
372 * @condition: a C expression for the event to wait for
373 * @timeout: timeout, as a ktime_t
374 *
375 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
376 * @condition evaluates to true or a signal is received.
377 * The @condition is checked each time the waitqueue @wq is woken up.
378 *
379 * wake_up() has to be called after changing any variable that could
380 * change the result of the wait condition.
381 *
382 * The function returns 0 if @condition became true, or -ETIME if the timeout
383 * elapsed.
384 */
385#define wait_event_hrtimeout(wq, condition, timeout) \
386({ \
387 int __ret = 0; \
388 if (!(condition)) \
389 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
390 TASK_UNINTERRUPTIBLE); \
391 __ret; \
392})
393
394/**
395 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
396 * @wq: the waitqueue to wait on
397 * @condition: a C expression for the event to wait for
398 * @timeout: timeout, as a ktime_t
399 *
400 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
401 * @condition evaluates to true or a signal is received.
402 * The @condition is checked each time the waitqueue @wq is woken up.
403 *
404 * wake_up() has to be called after changing any variable that could
405 * change the result of the wait condition.
406 *
407 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
408 * interrupted by a signal, or -ETIME if the timeout elapsed.
409 */
410#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
411({ \
412 long __ret = 0; \
413 if (!(condition)) \
414 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
415 TASK_INTERRUPTIBLE); \
416 __ret; \
417})
418
1da177e4
LT
419#define __wait_event_interruptible_exclusive(wq, condition, ret) \
420do { \
421 DEFINE_WAIT(__wait); \
422 \
423 for (;;) { \
424 prepare_to_wait_exclusive(&wq, &__wait, \
425 TASK_INTERRUPTIBLE); \
777c6c5f
JW
426 if (condition) { \
427 finish_wait(&wq, &__wait); \
1da177e4 428 break; \
777c6c5f 429 } \
1da177e4
LT
430 if (!signal_pending(current)) { \
431 schedule(); \
432 continue; \
433 } \
434 ret = -ERESTARTSYS; \
777c6c5f
JW
435 abort_exclusive_wait(&wq, &__wait, \
436 TASK_INTERRUPTIBLE, NULL); \
1da177e4
LT
437 break; \
438 } \
1da177e4
LT
439} while (0)
440
441#define wait_event_interruptible_exclusive(wq, condition) \
442({ \
443 int __ret = 0; \
444 if (!(condition)) \
445 __wait_event_interruptible_exclusive(wq, condition, __ret);\
446 __ret; \
447})
448
22c43c81
MN
449
450#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
451({ \
452 int __ret = 0; \
453 DEFINE_WAIT(__wait); \
454 if (exclusive) \
455 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
456 do { \
457 if (likely(list_empty(&__wait.task_list))) \
458 __add_wait_queue_tail(&(wq), &__wait); \
459 set_current_state(TASK_INTERRUPTIBLE); \
460 if (signal_pending(current)) { \
461 __ret = -ERESTARTSYS; \
462 break; \
463 } \
464 if (irq) \
465 spin_unlock_irq(&(wq).lock); \
466 else \
467 spin_unlock(&(wq).lock); \
468 schedule(); \
469 if (irq) \
470 spin_lock_irq(&(wq).lock); \
471 else \
472 spin_lock(&(wq).lock); \
473 } while (!(condition)); \
474 __remove_wait_queue(&(wq), &__wait); \
475 __set_current_state(TASK_RUNNING); \
476 __ret; \
477})
478
479
480/**
481 * wait_event_interruptible_locked - sleep until a condition gets true
482 * @wq: the waitqueue to wait on
483 * @condition: a C expression for the event to wait for
484 *
485 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
486 * @condition evaluates to true or a signal is received.
487 * The @condition is checked each time the waitqueue @wq is woken up.
488 *
489 * It must be called with wq.lock being held. This spinlock is
490 * unlocked while sleeping but @condition testing is done while lock
491 * is held and when this macro exits the lock is held.
492 *
493 * The lock is locked/unlocked using spin_lock()/spin_unlock()
494 * functions which must match the way they are locked/unlocked outside
495 * of this macro.
496 *
497 * wake_up_locked() has to be called after changing any variable that could
498 * change the result of the wait condition.
499 *
500 * The function will return -ERESTARTSYS if it was interrupted by a
501 * signal and 0 if @condition evaluated to true.
502 */
503#define wait_event_interruptible_locked(wq, condition) \
504 ((condition) \
505 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
506
507/**
508 * wait_event_interruptible_locked_irq - sleep until a condition gets true
509 * @wq: the waitqueue to wait on
510 * @condition: a C expression for the event to wait for
511 *
512 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
513 * @condition evaluates to true or a signal is received.
514 * The @condition is checked each time the waitqueue @wq is woken up.
515 *
516 * It must be called with wq.lock being held. This spinlock is
517 * unlocked while sleeping but @condition testing is done while lock
518 * is held and when this macro exits the lock is held.
519 *
520 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
521 * functions which must match the way they are locked/unlocked outside
522 * of this macro.
523 *
524 * wake_up_locked() has to be called after changing any variable that could
525 * change the result of the wait condition.
526 *
527 * The function will return -ERESTARTSYS if it was interrupted by a
528 * signal and 0 if @condition evaluated to true.
529 */
530#define wait_event_interruptible_locked_irq(wq, condition) \
531 ((condition) \
532 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
533
534/**
535 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
536 * @wq: the waitqueue to wait on
537 * @condition: a C expression for the event to wait for
538 *
539 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
540 * @condition evaluates to true or a signal is received.
541 * The @condition is checked each time the waitqueue @wq is woken up.
542 *
543 * It must be called with wq.lock being held. This spinlock is
544 * unlocked while sleeping but @condition testing is done while lock
545 * is held and when this macro exits the lock is held.
546 *
547 * The lock is locked/unlocked using spin_lock()/spin_unlock()
548 * functions which must match the way they are locked/unlocked outside
549 * of this macro.
550 *
551 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
552 * set thus when other process waits process on the list if this
553 * process is awaken further processes are not considered.
554 *
555 * wake_up_locked() has to be called after changing any variable that could
556 * change the result of the wait condition.
557 *
558 * The function will return -ERESTARTSYS if it was interrupted by a
559 * signal and 0 if @condition evaluated to true.
560 */
561#define wait_event_interruptible_exclusive_locked(wq, condition) \
562 ((condition) \
563 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
564
565/**
566 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
567 * @wq: the waitqueue to wait on
568 * @condition: a C expression for the event to wait for
569 *
570 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
571 * @condition evaluates to true or a signal is received.
572 * The @condition is checked each time the waitqueue @wq is woken up.
573 *
574 * It must be called with wq.lock being held. This spinlock is
575 * unlocked while sleeping but @condition testing is done while lock
576 * is held and when this macro exits the lock is held.
577 *
578 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
579 * functions which must match the way they are locked/unlocked outside
580 * of this macro.
581 *
582 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
583 * set thus when other process waits process on the list if this
584 * process is awaken further processes are not considered.
585 *
586 * wake_up_locked() has to be called after changing any variable that could
587 * change the result of the wait condition.
588 *
589 * The function will return -ERESTARTSYS if it was interrupted by a
590 * signal and 0 if @condition evaluated to true.
591 */
592#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
593 ((condition) \
594 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
595
596
597
1411d5a7
MW
598#define __wait_event_killable(wq, condition, ret) \
599do { \
600 DEFINE_WAIT(__wait); \
601 \
602 for (;;) { \
603 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
604 if (condition) \
605 break; \
606 if (!fatal_signal_pending(current)) { \
607 schedule(); \
608 continue; \
609 } \
610 ret = -ERESTARTSYS; \
611 break; \
612 } \
613 finish_wait(&wq, &__wait); \
614} while (0)
615
616/**
617 * wait_event_killable - sleep until a condition gets true
618 * @wq: the waitqueue to wait on
619 * @condition: a C expression for the event to wait for
620 *
621 * The process is put to sleep (TASK_KILLABLE) until the
622 * @condition evaluates to true or a signal is received.
623 * The @condition is checked each time the waitqueue @wq is woken up.
624 *
625 * wake_up() has to be called after changing any variable that could
626 * change the result of the wait condition.
627 *
628 * The function will return -ERESTARTSYS if it was interrupted by a
629 * signal and 0 if @condition evaluated to true.
630 */
631#define wait_event_killable(wq, condition) \
632({ \
633 int __ret = 0; \
634 if (!(condition)) \
635 __wait_event_killable(wq, condition, __ret); \
636 __ret; \
637})
638
eed8c02e
LC
639
640#define __wait_event_lock_irq(wq, condition, lock, cmd) \
641do { \
642 DEFINE_WAIT(__wait); \
643 \
644 for (;;) { \
645 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
646 if (condition) \
647 break; \
648 spin_unlock_irq(&lock); \
649 cmd; \
650 schedule(); \
651 spin_lock_irq(&lock); \
652 } \
653 finish_wait(&wq, &__wait); \
654} while (0)
655
656/**
657 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
658 * condition is checked under the lock. This
659 * is expected to be called with the lock
660 * taken.
661 * @wq: the waitqueue to wait on
662 * @condition: a C expression for the event to wait for
663 * @lock: a locked spinlock_t, which will be released before cmd
664 * and schedule() and reacquired afterwards.
665 * @cmd: a command which is invoked outside the critical section before
666 * sleep
667 *
668 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
669 * @condition evaluates to true. The @condition is checked each time
670 * the waitqueue @wq is woken up.
671 *
672 * wake_up() has to be called after changing any variable that could
673 * change the result of the wait condition.
674 *
675 * This is supposed to be called while holding the lock. The lock is
676 * dropped before invoking the cmd and going to sleep and is reacquired
677 * afterwards.
678 */
679#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
680do { \
681 if (condition) \
682 break; \
683 __wait_event_lock_irq(wq, condition, lock, cmd); \
684} while (0)
685
686/**
687 * wait_event_lock_irq - sleep until a condition gets true. The
688 * condition is checked under the lock. This
689 * is expected to be called with the lock
690 * taken.
691 * @wq: the waitqueue to wait on
692 * @condition: a C expression for the event to wait for
693 * @lock: a locked spinlock_t, which will be released before schedule()
694 * and reacquired afterwards.
695 *
696 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
697 * @condition evaluates to true. The @condition is checked each time
698 * the waitqueue @wq is woken up.
699 *
700 * wake_up() has to be called after changing any variable that could
701 * change the result of the wait condition.
702 *
703 * This is supposed to be called while holding the lock. The lock is
704 * dropped before going to sleep and is reacquired afterwards.
705 */
706#define wait_event_lock_irq(wq, condition, lock) \
707do { \
708 if (condition) \
709 break; \
710 __wait_event_lock_irq(wq, condition, lock, ); \
711} while (0)
712
713
714#define __wait_event_interruptible_lock_irq(wq, condition, \
715 lock, ret, cmd) \
716do { \
717 DEFINE_WAIT(__wait); \
718 \
719 for (;;) { \
720 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
721 if (condition) \
722 break; \
723 if (signal_pending(current)) { \
724 ret = -ERESTARTSYS; \
725 break; \
726 } \
727 spin_unlock_irq(&lock); \
728 cmd; \
729 schedule(); \
730 spin_lock_irq(&lock); \
731 } \
732 finish_wait(&wq, &__wait); \
733} while (0)
734
735/**
736 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
737 * The condition is checked under the lock. This is expected to
738 * be called with the lock taken.
739 * @wq: the waitqueue to wait on
740 * @condition: a C expression for the event to wait for
741 * @lock: a locked spinlock_t, which will be released before cmd and
742 * schedule() and reacquired afterwards.
743 * @cmd: a command which is invoked outside the critical section before
744 * sleep
745 *
746 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
747 * @condition evaluates to true or a signal is received. The @condition is
748 * checked each time the waitqueue @wq is woken up.
749 *
750 * wake_up() has to be called after changing any variable that could
751 * change the result of the wait condition.
752 *
753 * This is supposed to be called while holding the lock. The lock is
754 * dropped before invoking the cmd and going to sleep and is reacquired
755 * afterwards.
756 *
757 * The macro will return -ERESTARTSYS if it was interrupted by a signal
758 * and 0 if @condition evaluated to true.
759 */
760#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
761({ \
762 int __ret = 0; \
763 \
764 if (!(condition)) \
765 __wait_event_interruptible_lock_irq(wq, condition, \
766 lock, __ret, cmd); \
767 __ret; \
768})
769
770/**
771 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
772 * The condition is checked under the lock. This is expected
773 * to be called with the lock taken.
774 * @wq: the waitqueue to wait on
775 * @condition: a C expression for the event to wait for
776 * @lock: a locked spinlock_t, which will be released before schedule()
777 * and reacquired afterwards.
778 *
779 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
780 * @condition evaluates to true or signal is received. The @condition is
781 * checked each time the waitqueue @wq is woken up.
782 *
783 * wake_up() has to be called after changing any variable that could
784 * change the result of the wait condition.
785 *
786 * This is supposed to be called while holding the lock. The lock is
787 * dropped before going to sleep and is reacquired afterwards.
788 *
789 * The macro will return -ERESTARTSYS if it was interrupted by a signal
790 * and 0 if @condition evaluated to true.
791 */
792#define wait_event_interruptible_lock_irq(wq, condition, lock) \
793({ \
794 int __ret = 0; \
795 \
796 if (!(condition)) \
797 __wait_event_interruptible_lock_irq(wq, condition, \
798 lock, __ret, ); \
799 __ret; \
800})
801
802
1da177e4
LT
803/*
804 * These are the old interfaces to sleep waiting for an event.
0fec171c
IM
805 * They are racy. DO NOT use them, use the wait_event* interfaces above.
806 * We plan to remove these interfaces.
1da177e4 807 */
0fec171c
IM
808extern void sleep_on(wait_queue_head_t *q);
809extern long sleep_on_timeout(wait_queue_head_t *q,
810 signed long timeout);
811extern void interruptible_sleep_on(wait_queue_head_t *q);
812extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
813 signed long timeout);
1da177e4
LT
814
815/*
816 * Waitqueues which are removed from the waitqueue_head at wakeup time
817 */
b3c97528
HH
818void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
819void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
820void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
777c6c5f
JW
821void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
822 unsigned int mode, void *key);
1da177e4
LT
823int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
824int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
825
bf368e4e 826#define DEFINE_WAIT_FUNC(name, function) \
1da177e4 827 wait_queue_t name = { \
c43dc2fd 828 .private = current, \
bf368e4e 829 .func = function, \
7e43c84e 830 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
831 }
832
bf368e4e
ED
833#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
834
1da177e4
LT
835#define DEFINE_WAIT_BIT(name, word, bit) \
836 struct wait_bit_queue name = { \
837 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
838 .wait = { \
c43dc2fd 839 .private = current, \
1da177e4
LT
840 .func = wake_bit_function, \
841 .task_list = \
842 LIST_HEAD_INIT((name).wait.task_list), \
843 }, \
844 }
845
846#define init_wait(wait) \
847 do { \
c43dc2fd 848 (wait)->private = current; \
1da177e4
LT
849 (wait)->func = autoremove_wake_function; \
850 INIT_LIST_HEAD(&(wait)->task_list); \
231d0aef 851 (wait)->flags = 0; \
1da177e4
LT
852 } while (0)
853
854/**
855 * wait_on_bit - wait for a bit to be cleared
856 * @word: the word being waited on, a kernel virtual address
857 * @bit: the bit of the word being waited on
858 * @action: the function used to sleep, which may take special actions
859 * @mode: the task state to sleep in
860 *
861 * There is a standard hashed waitqueue table for generic use. This
862 * is the part of the hashtable's accessor API that waits on a bit.
863 * For instance, if one were to have waiters on a bitflag, one would
864 * call wait_on_bit() in threads waiting for the bit to clear.
865 * One uses wait_on_bit() where one is waiting for the bit to clear,
866 * but has no intention of setting it.
867 */
868static inline int wait_on_bit(void *word, int bit,
869 int (*action)(void *), unsigned mode)
870{
871 if (!test_bit(bit, word))
872 return 0;
873 return out_of_line_wait_on_bit(word, bit, action, mode);
874}
875
876/**
877 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
878 * @word: the word being waited on, a kernel virtual address
879 * @bit: the bit of the word being waited on
880 * @action: the function used to sleep, which may take special actions
881 * @mode: the task state to sleep in
882 *
883 * There is a standard hashed waitqueue table for generic use. This
884 * is the part of the hashtable's accessor API that waits on a bit
885 * when one intends to set it, for instance, trying to lock bitflags.
886 * For instance, if one were to have waiters trying to set bitflag
887 * and waiting for it to clear before setting it, one would call
888 * wait_on_bit() in threads waiting to be able to set the bit.
889 * One uses wait_on_bit_lock() where one is waiting for the bit to
890 * clear with the intention of setting it, and when done, clearing it.
891 */
892static inline int wait_on_bit_lock(void *word, int bit,
893 int (*action)(void *), unsigned mode)
894{
895 if (!test_and_set_bit(bit, word))
896 return 0;
897 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
898}
899
1da177e4 900#endif