]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/wait.h
Merge branch 'acpi-resources'
[mirror_ubuntu-artful-kernel.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
fb869b6e
IM
3/*
4 * Linux wait queue related types and methods
5 */
1da177e4
LT
6#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
1da177e4 9#include <asm/current.h>
607ca46e 10#include <uapi/linux/wait.h>
1da177e4
LT
11
12typedef struct __wait_queue wait_queue_t;
7d478721
PZ
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
1da177e4 15
61ada528
PZ
16/* __wait_queue::flags */
17#define WQ_FLAG_EXCLUSIVE 0x01
18#define WQ_FLAG_WOKEN 0x02
19
1da177e4 20struct __wait_queue {
fb869b6e 21 unsigned int flags;
fb869b6e
IM
22 void *private;
23 wait_queue_func_t func;
24 struct list_head task_list;
1da177e4
LT
25};
26
27struct wait_bit_key {
fb869b6e
IM
28 void *flags;
29 int bit_nr;
30#define WAIT_ATOMIC_T_BIT_NR -1
cbbce822 31 unsigned long timeout;
1da177e4
LT
32};
33
34struct wait_bit_queue {
fb869b6e
IM
35 struct wait_bit_key key;
36 wait_queue_t wait;
1da177e4
LT
37};
38
39struct __wait_queue_head {
fb869b6e
IM
40 spinlock_t lock;
41 struct list_head task_list;
1da177e4
LT
42};
43typedef struct __wait_queue_head wait_queue_head_t;
44
8c65b4a6 45struct task_struct;
1da177e4
LT
46
47/*
48 * Macros for declaration and initialisaton of the datatypes
49 */
50
51#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 52 .private = tsk, \
1da177e4
LT
53 .func = default_wake_function, \
54 .task_list = { NULL, NULL } }
55
56#define DECLARE_WAITQUEUE(name, tsk) \
57 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 60 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
61 .task_list = { &(name).task_list, &(name).task_list } }
62
63#define DECLARE_WAIT_QUEUE_HEAD(name) \
64 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
67 { .flags = word, .bit_nr = bit, }
68
cb65537e
DH
69#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
70 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
f07fdec5 72extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
2fc39111
PZ
73
74#define init_waitqueue_head(q) \
75 do { \
76 static struct lock_class_key __key; \
77 \
f07fdec5 78 __init_waitqueue_head((q), #q, &__key); \
2fc39111 79 } while (0)
1da177e4 80
7259f0d0
PZ
81#ifdef CONFIG_LOCKDEP
82# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83 ({ init_waitqueue_head(&name); name; })
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86#else
87# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88#endif
89
1da177e4
LT
90static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91{
fb869b6e
IM
92 q->flags = 0;
93 q->private = p;
94 q->func = default_wake_function;
1da177e4
LT
95}
96
fb869b6e
IM
97static inline void
98init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
1da177e4 99{
fb869b6e
IM
100 q->flags = 0;
101 q->private = NULL;
102 q->func = func;
1da177e4
LT
103}
104
105static inline int waitqueue_active(wait_queue_head_t *q)
106{
107 return !list_empty(&q->task_list);
108}
109
b3c97528
HH
110extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
111extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
112extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
113
114static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
115{
116 list_add(&new->task_list, &head->task_list);
117}
118
119/*
120 * Used for wake-one threads:
121 */
fb869b6e
IM
122static inline void
123__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
124{
125 wait->flags |= WQ_FLAG_EXCLUSIVE;
126 __add_wait_queue(q, wait);
127}
128
1da177e4 129static inline void __add_wait_queue_tail(wait_queue_head_t *head,
a93d2f17 130 wait_queue_t *new)
1da177e4
LT
131{
132 list_add_tail(&new->task_list, &head->task_list);
133}
134
fb869b6e
IM
135static inline void
136__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
137{
138 wait->flags |= WQ_FLAG_EXCLUSIVE;
139 __add_wait_queue_tail(q, wait);
140}
141
fb869b6e
IM
142static inline void
143__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
1da177e4
LT
144{
145 list_del(&old->task_list);
146}
147
c1221321 148typedef int wait_bit_action_f(struct wait_bit_key *);
b3c97528 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
fb869b6e 151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
63b20011 152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
4ede816a 153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528 154void __wake_up_bit(wait_queue_head_t *, void *, int);
c1221321
N
155int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
156int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
b3c97528 157void wake_up_bit(void *, int);
cb65537e 158void wake_up_atomic_t(atomic_t *);
c1221321 159int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
cbbce822 160int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
c1221321 161int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
cb65537e 162int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
b3c97528 163wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 164
e64d66c8
MW
165#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
166#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
167#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
168#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
169#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 170
1da177e4
LT
171#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
172#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
173#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 174#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 175
0ccf831c 176/*
c0da3775 177 * Wakeup macros to be used to report events to the targets.
0ccf831c 178 */
fb869b6e 179#define wake_up_poll(x, m) \
c0da3775 180 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
fb869b6e 181#define wake_up_locked_poll(x, m) \
c0da3775 182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
fb869b6e 183#define wake_up_interruptible_poll(x, m) \
c0da3775
DL
184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
185#define wake_up_interruptible_sync_poll(x, m) \
186 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
0ccf831c 187
35a2af94 188#define ___wait_cond_timeout(condition) \
2953ef24 189({ \
fb869b6e
IM
190 bool __cond = (condition); \
191 if (__cond && !__ret) \
192 __ret = 1; \
193 __cond || !__ret; \
2953ef24
PZ
194})
195
c2d81644
ON
196#define ___wait_is_interruptible(state) \
197 (!__builtin_constant_p(state) || \
198 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
41a1431b 199
8b32201d
PZ
200/*
201 * The below macro ___wait_event() has an explicit shadow of the __ret
202 * variable when used from the wait_event_*() macros.
203 *
204 * This is so that both can use the ___wait_cond_timeout() construct
205 * to wrap the condition.
206 *
207 * The type inconsistency of the wait_event_*() __ret variable is also
208 * on purpose; we use long where we can return timeout values and int
209 * otherwise.
210 */
211
41a1431b 212#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
35a2af94 213({ \
41a1431b 214 __label__ __out; \
c2d81644 215 wait_queue_t __wait; \
8b32201d 216 long __ret = ret; /* explicit shadow */ \
41a1431b 217 \
c2d81644
ON
218 INIT_LIST_HEAD(&__wait.task_list); \
219 if (exclusive) \
220 __wait.flags = WQ_FLAG_EXCLUSIVE; \
221 else \
222 __wait.flags = 0; \
223 \
41a1431b 224 for (;;) { \
c2d81644 225 long __int = prepare_to_wait_event(&wq, &__wait, state);\
41a1431b
PZ
226 \
227 if (condition) \
228 break; \
229 \
c2d81644
ON
230 if (___wait_is_interruptible(state) && __int) { \
231 __ret = __int; \
41a1431b 232 if (exclusive) { \
fb869b6e
IM
233 abort_exclusive_wait(&wq, &__wait, \
234 state, NULL); \
41a1431b
PZ
235 goto __out; \
236 } \
237 break; \
238 } \
239 \
240 cmd; \
241 } \
242 finish_wait(&wq, &__wait); \
35a2af94
PZ
243__out: __ret; \
244})
41a1431b 245
fb869b6e 246#define __wait_event(wq, condition) \
35a2af94
PZ
247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
248 schedule())
1da177e4
LT
249
250/**
251 * wait_event - sleep until a condition gets true
252 * @wq: the waitqueue to wait on
253 * @condition: a C expression for the event to wait for
254 *
255 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
256 * @condition evaluates to true. The @condition is checked each time
257 * the waitqueue @wq is woken up.
258 *
259 * wake_up() has to be called after changing any variable that could
260 * change the result of the wait condition.
261 */
fb869b6e 262#define wait_event(wq, condition) \
1da177e4 263do { \
e22b886a 264 might_sleep(); \
fb869b6e 265 if (condition) \
1da177e4
LT
266 break; \
267 __wait_event(wq, condition); \
268} while (0)
269
36df04bc
PZ
270#define __wait_event_freezable(wq, condition) \
271 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
272 schedule(); try_to_freeze())
273
274/**
275 * wait_event - sleep (or freeze) until a condition gets true
276 * @wq: the waitqueue to wait on
277 * @condition: a C expression for the event to wait for
278 *
279 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
280 * to system load) until the @condition evaluates to true. The
281 * @condition is checked each time the waitqueue @wq is woken up.
282 *
283 * wake_up() has to be called after changing any variable that could
284 * change the result of the wait condition.
285 */
286#define wait_event_freezable(wq, condition) \
287({ \
288 int __ret = 0; \
289 might_sleep(); \
290 if (!(condition)) \
291 __ret = __wait_event_freezable(wq, condition); \
292 __ret; \
293})
294
35a2af94
PZ
295#define __wait_event_timeout(wq, condition, timeout) \
296 ___wait_event(wq, ___wait_cond_timeout(condition), \
297 TASK_UNINTERRUPTIBLE, 0, timeout, \
298 __ret = schedule_timeout(__ret))
1da177e4
LT
299
300/**
301 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
302 * @wq: the waitqueue to wait on
303 * @condition: a C expression for the event to wait for
304 * @timeout: timeout, in jiffies
305 *
306 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
307 * @condition evaluates to true. The @condition is checked each time
308 * the waitqueue @wq is woken up.
309 *
310 * wake_up() has to be called after changing any variable that could
311 * change the result of the wait condition.
312 *
6b44f519
SD
313 * Returns:
314 * 0 if the @condition evaluated to %false after the @timeout elapsed,
315 * 1 if the @condition evaluated to %true after the @timeout elapsed,
316 * or the remaining jiffies (at least 1) if the @condition evaluated
317 * to %true before the @timeout elapsed.
1da177e4
LT
318 */
319#define wait_event_timeout(wq, condition, timeout) \
320({ \
321 long __ret = timeout; \
e22b886a 322 might_sleep(); \
8922915b 323 if (!___wait_cond_timeout(condition)) \
35a2af94 324 __ret = __wait_event_timeout(wq, condition, timeout); \
1da177e4
LT
325 __ret; \
326})
327
36df04bc
PZ
328#define __wait_event_freezable_timeout(wq, condition, timeout) \
329 ___wait_event(wq, ___wait_cond_timeout(condition), \
330 TASK_INTERRUPTIBLE, 0, timeout, \
331 __ret = schedule_timeout(__ret); try_to_freeze())
332
333/*
334 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
335 * increasing load and is freezable.
336 */
337#define wait_event_freezable_timeout(wq, condition, timeout) \
338({ \
339 long __ret = timeout; \
340 might_sleep(); \
341 if (!___wait_cond_timeout(condition)) \
342 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
343 __ret; \
344})
345
82e06c81
SL
346#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
347 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
348 cmd1; schedule(); cmd2)
349
350/**
351 * wait_event_cmd - sleep until a condition gets true
352 * @wq: the waitqueue to wait on
353 * @condition: a C expression for the event to wait for
f434f7af
MI
354 * @cmd1: the command will be executed before sleep
355 * @cmd2: the command will be executed after sleep
82e06c81
SL
356 *
357 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
358 * @condition evaluates to true. The @condition is checked each time
359 * the waitqueue @wq is woken up.
360 *
361 * wake_up() has to be called after changing any variable that could
362 * change the result of the wait condition.
363 */
364#define wait_event_cmd(wq, condition, cmd1, cmd2) \
365do { \
366 if (condition) \
367 break; \
368 __wait_event_cmd(wq, condition, cmd1, cmd2); \
369} while (0)
370
35a2af94
PZ
371#define __wait_event_interruptible(wq, condition) \
372 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
f13f4c41 373 schedule())
1da177e4
LT
374
375/**
376 * wait_event_interruptible - sleep until a condition gets true
377 * @wq: the waitqueue to wait on
378 * @condition: a C expression for the event to wait for
379 *
380 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
381 * @condition evaluates to true or a signal is received.
382 * The @condition is checked each time the waitqueue @wq is woken up.
383 *
384 * wake_up() has to be called after changing any variable that could
385 * change the result of the wait condition.
386 *
387 * The function will return -ERESTARTSYS if it was interrupted by a
388 * signal and 0 if @condition evaluated to true.
389 */
390#define wait_event_interruptible(wq, condition) \
391({ \
392 int __ret = 0; \
e22b886a 393 might_sleep(); \
1da177e4 394 if (!(condition)) \
35a2af94 395 __ret = __wait_event_interruptible(wq, condition); \
1da177e4
LT
396 __ret; \
397})
398
35a2af94
PZ
399#define __wait_event_interruptible_timeout(wq, condition, timeout) \
400 ___wait_event(wq, ___wait_cond_timeout(condition), \
401 TASK_INTERRUPTIBLE, 0, timeout, \
402 __ret = schedule_timeout(__ret))
1da177e4
LT
403
404/**
405 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
406 * @wq: the waitqueue to wait on
407 * @condition: a C expression for the event to wait for
408 * @timeout: timeout, in jiffies
409 *
410 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
411 * @condition evaluates to true or a signal is received.
412 * The @condition is checked each time the waitqueue @wq is woken up.
413 *
414 * wake_up() has to be called after changing any variable that could
415 * change the result of the wait condition.
416 *
4c663cfc 417 * Returns:
6b44f519
SD
418 * 0 if the @condition evaluated to %false after the @timeout elapsed,
419 * 1 if the @condition evaluated to %true after the @timeout elapsed,
420 * the remaining jiffies (at least 1) if the @condition evaluated
421 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
422 * interrupted by a signal.
1da177e4
LT
423 */
424#define wait_event_interruptible_timeout(wq, condition, timeout) \
425({ \
426 long __ret = timeout; \
e22b886a 427 might_sleep(); \
8922915b 428 if (!___wait_cond_timeout(condition)) \
fb869b6e 429 __ret = __wait_event_interruptible_timeout(wq, \
35a2af94 430 condition, timeout); \
1da177e4
LT
431 __ret; \
432})
433
774a08b3
KO
434#define __wait_event_hrtimeout(wq, condition, timeout, state) \
435({ \
436 int __ret = 0; \
774a08b3
KO
437 struct hrtimer_sleeper __t; \
438 \
439 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
440 HRTIMER_MODE_REL); \
441 hrtimer_init_sleeper(&__t, current); \
442 if ((timeout).tv64 != KTIME_MAX) \
443 hrtimer_start_range_ns(&__t.timer, timeout, \
444 current->timer_slack_ns, \
445 HRTIMER_MODE_REL); \
446 \
35a2af94 447 __ret = ___wait_event(wq, condition, state, 0, 0, \
774a08b3
KO
448 if (!__t.task) { \
449 __ret = -ETIME; \
450 break; \
451 } \
ebdc195f 452 schedule()); \
774a08b3
KO
453 \
454 hrtimer_cancel(&__t.timer); \
455 destroy_hrtimer_on_stack(&__t.timer); \
774a08b3
KO
456 __ret; \
457})
458
459/**
460 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
461 * @wq: the waitqueue to wait on
462 * @condition: a C expression for the event to wait for
463 * @timeout: timeout, as a ktime_t
464 *
465 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
466 * @condition evaluates to true or a signal is received.
467 * The @condition is checked each time the waitqueue @wq is woken up.
468 *
469 * wake_up() has to be called after changing any variable that could
470 * change the result of the wait condition.
471 *
472 * The function returns 0 if @condition became true, or -ETIME if the timeout
473 * elapsed.
474 */
475#define wait_event_hrtimeout(wq, condition, timeout) \
476({ \
477 int __ret = 0; \
e22b886a 478 might_sleep(); \
774a08b3
KO
479 if (!(condition)) \
480 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
481 TASK_UNINTERRUPTIBLE); \
482 __ret; \
483})
484
485/**
486 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
487 * @wq: the waitqueue to wait on
488 * @condition: a C expression for the event to wait for
489 * @timeout: timeout, as a ktime_t
490 *
491 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
492 * @condition evaluates to true or a signal is received.
493 * The @condition is checked each time the waitqueue @wq is woken up.
494 *
495 * wake_up() has to be called after changing any variable that could
496 * change the result of the wait condition.
497 *
498 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
499 * interrupted by a signal, or -ETIME if the timeout elapsed.
500 */
501#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
502({ \
503 long __ret = 0; \
e22b886a 504 might_sleep(); \
774a08b3
KO
505 if (!(condition)) \
506 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
507 TASK_INTERRUPTIBLE); \
508 __ret; \
509})
510
35a2af94
PZ
511#define __wait_event_interruptible_exclusive(wq, condition) \
512 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
48c25217 513 schedule())
1da177e4
LT
514
515#define wait_event_interruptible_exclusive(wq, condition) \
516({ \
517 int __ret = 0; \
e22b886a 518 might_sleep(); \
1da177e4 519 if (!(condition)) \
35a2af94 520 __ret = __wait_event_interruptible_exclusive(wq, condition);\
1da177e4
LT
521 __ret; \
522})
523
22c43c81 524
36df04bc
PZ
525#define __wait_event_freezable_exclusive(wq, condition) \
526 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
527 schedule(); try_to_freeze())
528
529#define wait_event_freezable_exclusive(wq, condition) \
530({ \
531 int __ret = 0; \
532 might_sleep(); \
533 if (!(condition)) \
534 __ret = __wait_event_freezable_exclusive(wq, condition);\
535 __ret; \
536})
537
538
22c43c81
MN
539#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
540({ \
541 int __ret = 0; \
542 DEFINE_WAIT(__wait); \
543 if (exclusive) \
544 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
545 do { \
546 if (likely(list_empty(&__wait.task_list))) \
547 __add_wait_queue_tail(&(wq), &__wait); \
548 set_current_state(TASK_INTERRUPTIBLE); \
549 if (signal_pending(current)) { \
550 __ret = -ERESTARTSYS; \
551 break; \
552 } \
553 if (irq) \
554 spin_unlock_irq(&(wq).lock); \
555 else \
556 spin_unlock(&(wq).lock); \
557 schedule(); \
558 if (irq) \
559 spin_lock_irq(&(wq).lock); \
560 else \
561 spin_lock(&(wq).lock); \
562 } while (!(condition)); \
563 __remove_wait_queue(&(wq), &__wait); \
564 __set_current_state(TASK_RUNNING); \
565 __ret; \
566})
567
568
569/**
570 * wait_event_interruptible_locked - sleep until a condition gets true
571 * @wq: the waitqueue to wait on
572 * @condition: a C expression for the event to wait for
573 *
574 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
575 * @condition evaluates to true or a signal is received.
576 * The @condition is checked each time the waitqueue @wq is woken up.
577 *
578 * It must be called with wq.lock being held. This spinlock is
579 * unlocked while sleeping but @condition testing is done while lock
580 * is held and when this macro exits the lock is held.
581 *
582 * The lock is locked/unlocked using spin_lock()/spin_unlock()
583 * functions which must match the way they are locked/unlocked outside
584 * of this macro.
585 *
586 * wake_up_locked() has to be called after changing any variable that could
587 * change the result of the wait condition.
588 *
589 * The function will return -ERESTARTSYS if it was interrupted by a
590 * signal and 0 if @condition evaluated to true.
591 */
592#define wait_event_interruptible_locked(wq, condition) \
593 ((condition) \
594 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
595
596/**
597 * wait_event_interruptible_locked_irq - sleep until a condition gets true
598 * @wq: the waitqueue to wait on
599 * @condition: a C expression for the event to wait for
600 *
601 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
602 * @condition evaluates to true or a signal is received.
603 * The @condition is checked each time the waitqueue @wq is woken up.
604 *
605 * It must be called with wq.lock being held. This spinlock is
606 * unlocked while sleeping but @condition testing is done while lock
607 * is held and when this macro exits the lock is held.
608 *
609 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
610 * functions which must match the way they are locked/unlocked outside
611 * of this macro.
612 *
613 * wake_up_locked() has to be called after changing any variable that could
614 * change the result of the wait condition.
615 *
616 * The function will return -ERESTARTSYS if it was interrupted by a
617 * signal and 0 if @condition evaluated to true.
618 */
619#define wait_event_interruptible_locked_irq(wq, condition) \
620 ((condition) \
621 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
622
623/**
624 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
625 * @wq: the waitqueue to wait on
626 * @condition: a C expression for the event to wait for
627 *
628 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
629 * @condition evaluates to true or a signal is received.
630 * The @condition is checked each time the waitqueue @wq is woken up.
631 *
632 * It must be called with wq.lock being held. This spinlock is
633 * unlocked while sleeping but @condition testing is done while lock
634 * is held and when this macro exits the lock is held.
635 *
636 * The lock is locked/unlocked using spin_lock()/spin_unlock()
637 * functions which must match the way they are locked/unlocked outside
638 * of this macro.
639 *
640 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
641 * set thus when other process waits process on the list if this
642 * process is awaken further processes are not considered.
643 *
644 * wake_up_locked() has to be called after changing any variable that could
645 * change the result of the wait condition.
646 *
647 * The function will return -ERESTARTSYS if it was interrupted by a
648 * signal and 0 if @condition evaluated to true.
649 */
650#define wait_event_interruptible_exclusive_locked(wq, condition) \
651 ((condition) \
652 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
653
654/**
655 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
656 * @wq: the waitqueue to wait on
657 * @condition: a C expression for the event to wait for
658 *
659 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
660 * @condition evaluates to true or a signal is received.
661 * The @condition is checked each time the waitqueue @wq is woken up.
662 *
663 * It must be called with wq.lock being held. This spinlock is
664 * unlocked while sleeping but @condition testing is done while lock
665 * is held and when this macro exits the lock is held.
666 *
667 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
668 * functions which must match the way they are locked/unlocked outside
669 * of this macro.
670 *
671 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
672 * set thus when other process waits process on the list if this
673 * process is awaken further processes are not considered.
674 *
675 * wake_up_locked() has to be called after changing any variable that could
676 * change the result of the wait condition.
677 *
678 * The function will return -ERESTARTSYS if it was interrupted by a
679 * signal and 0 if @condition evaluated to true.
680 */
681#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
682 ((condition) \
683 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
684
685
35a2af94
PZ
686#define __wait_event_killable(wq, condition) \
687 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
1411d5a7
MW
688
689/**
690 * wait_event_killable - sleep until a condition gets true
691 * @wq: the waitqueue to wait on
692 * @condition: a C expression for the event to wait for
693 *
694 * The process is put to sleep (TASK_KILLABLE) until the
695 * @condition evaluates to true or a signal is received.
696 * The @condition is checked each time the waitqueue @wq is woken up.
697 *
698 * wake_up() has to be called after changing any variable that could
699 * change the result of the wait condition.
700 *
701 * The function will return -ERESTARTSYS if it was interrupted by a
702 * signal and 0 if @condition evaluated to true.
703 */
704#define wait_event_killable(wq, condition) \
705({ \
706 int __ret = 0; \
e22b886a 707 might_sleep(); \
1411d5a7 708 if (!(condition)) \
35a2af94 709 __ret = __wait_event_killable(wq, condition); \
1411d5a7
MW
710 __ret; \
711})
712
eed8c02e
LC
713
714#define __wait_event_lock_irq(wq, condition, lock, cmd) \
35a2af94
PZ
715 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
716 spin_unlock_irq(&lock); \
717 cmd; \
718 schedule(); \
719 spin_lock_irq(&lock))
eed8c02e
LC
720
721/**
722 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
723 * condition is checked under the lock. This
724 * is expected to be called with the lock
725 * taken.
726 * @wq: the waitqueue to wait on
727 * @condition: a C expression for the event to wait for
728 * @lock: a locked spinlock_t, which will be released before cmd
729 * and schedule() and reacquired afterwards.
730 * @cmd: a command which is invoked outside the critical section before
731 * sleep
732 *
733 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
734 * @condition evaluates to true. The @condition is checked each time
735 * the waitqueue @wq is woken up.
736 *
737 * wake_up() has to be called after changing any variable that could
738 * change the result of the wait condition.
739 *
740 * This is supposed to be called while holding the lock. The lock is
741 * dropped before invoking the cmd and going to sleep and is reacquired
742 * afterwards.
743 */
744#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
745do { \
746 if (condition) \
747 break; \
748 __wait_event_lock_irq(wq, condition, lock, cmd); \
749} while (0)
750
751/**
752 * wait_event_lock_irq - sleep until a condition gets true. The
753 * condition is checked under the lock. This
754 * is expected to be called with the lock
755 * taken.
756 * @wq: the waitqueue to wait on
757 * @condition: a C expression for the event to wait for
758 * @lock: a locked spinlock_t, which will be released before schedule()
759 * and reacquired afterwards.
760 *
761 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
762 * @condition evaluates to true. The @condition is checked each time
763 * the waitqueue @wq is woken up.
764 *
765 * wake_up() has to be called after changing any variable that could
766 * change the result of the wait condition.
767 *
768 * This is supposed to be called while holding the lock. The lock is
769 * dropped before going to sleep and is reacquired afterwards.
770 */
771#define wait_event_lock_irq(wq, condition, lock) \
772do { \
773 if (condition) \
774 break; \
775 __wait_event_lock_irq(wq, condition, lock, ); \
776} while (0)
777
778
35a2af94 779#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
fb869b6e 780 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
35a2af94
PZ
781 spin_unlock_irq(&lock); \
782 cmd; \
783 schedule(); \
8fbd88fa 784 spin_lock_irq(&lock))
eed8c02e
LC
785
786/**
787 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
788 * The condition is checked under the lock. This is expected to
789 * be called with the lock taken.
790 * @wq: the waitqueue to wait on
791 * @condition: a C expression for the event to wait for
792 * @lock: a locked spinlock_t, which will be released before cmd and
793 * schedule() and reacquired afterwards.
794 * @cmd: a command which is invoked outside the critical section before
795 * sleep
796 *
797 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
798 * @condition evaluates to true or a signal is received. The @condition is
799 * checked each time the waitqueue @wq is woken up.
800 *
801 * wake_up() has to be called after changing any variable that could
802 * change the result of the wait condition.
803 *
804 * This is supposed to be called while holding the lock. The lock is
805 * dropped before invoking the cmd and going to sleep and is reacquired
806 * afterwards.
807 *
808 * The macro will return -ERESTARTSYS if it was interrupted by a signal
809 * and 0 if @condition evaluated to true.
810 */
811#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
812({ \
813 int __ret = 0; \
eed8c02e 814 if (!(condition)) \
fb869b6e 815 __ret = __wait_event_interruptible_lock_irq(wq, \
35a2af94 816 condition, lock, cmd); \
eed8c02e
LC
817 __ret; \
818})
819
820/**
821 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
822 * The condition is checked under the lock. This is expected
823 * to be called with the lock taken.
824 * @wq: the waitqueue to wait on
825 * @condition: a C expression for the event to wait for
826 * @lock: a locked spinlock_t, which will be released before schedule()
827 * and reacquired afterwards.
828 *
829 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
830 * @condition evaluates to true or signal is received. The @condition is
831 * checked each time the waitqueue @wq is woken up.
832 *
833 * wake_up() has to be called after changing any variable that could
834 * change the result of the wait condition.
835 *
836 * This is supposed to be called while holding the lock. The lock is
837 * dropped before going to sleep and is reacquired afterwards.
838 *
839 * The macro will return -ERESTARTSYS if it was interrupted by a signal
840 * and 0 if @condition evaluated to true.
841 */
842#define wait_event_interruptible_lock_irq(wq, condition, lock) \
843({ \
844 int __ret = 0; \
eed8c02e 845 if (!(condition)) \
35a2af94 846 __ret = __wait_event_interruptible_lock_irq(wq, \
92ec1180 847 condition, lock,); \
eed8c02e
LC
848 __ret; \
849})
850
fb869b6e
IM
851#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
852 lock, timeout) \
35a2af94 853 ___wait_event(wq, ___wait_cond_timeout(condition), \
7d716456 854 TASK_INTERRUPTIBLE, 0, timeout, \
35a2af94
PZ
855 spin_unlock_irq(&lock); \
856 __ret = schedule_timeout(__ret); \
a1dc6852 857 spin_lock_irq(&lock));
d79ff142
MP
858
859/**
fb869b6e
IM
860 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
861 * true or a timeout elapses. The condition is checked under
862 * the lock. This is expected to be called with the lock taken.
d79ff142
MP
863 * @wq: the waitqueue to wait on
864 * @condition: a C expression for the event to wait for
865 * @lock: a locked spinlock_t, which will be released before schedule()
866 * and reacquired afterwards.
867 * @timeout: timeout, in jiffies
868 *
869 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
870 * @condition evaluates to true or signal is received. The @condition is
871 * checked each time the waitqueue @wq is woken up.
872 *
873 * wake_up() has to be called after changing any variable that could
874 * change the result of the wait condition.
875 *
876 * This is supposed to be called while holding the lock. The lock is
877 * dropped before going to sleep and is reacquired afterwards.
878 *
879 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
880 * was interrupted by a signal, and the remaining jiffies otherwise
881 * if the condition evaluated to true before the timeout elapsed.
882 */
883#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
884 timeout) \
885({ \
35a2af94 886 long __ret = timeout; \
8922915b 887 if (!___wait_cond_timeout(condition)) \
35a2af94
PZ
888 __ret = __wait_event_interruptible_lock_irq_timeout( \
889 wq, condition, lock, timeout); \
d79ff142
MP
890 __ret; \
891})
892
1da177e4
LT
893/*
894 * Waitqueues which are removed from the waitqueue_head at wakeup time
895 */
b3c97528
HH
896void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
897void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
c2d81644 898long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
b3c97528 899void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
fb869b6e 900void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
61ada528
PZ
901long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
902int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1da177e4
LT
903int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
904int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
905
bf368e4e 906#define DEFINE_WAIT_FUNC(name, function) \
1da177e4 907 wait_queue_t name = { \
c43dc2fd 908 .private = current, \
bf368e4e 909 .func = function, \
7e43c84e 910 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
911 }
912
bf368e4e
ED
913#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
914
1da177e4
LT
915#define DEFINE_WAIT_BIT(name, word, bit) \
916 struct wait_bit_queue name = { \
917 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
918 .wait = { \
c43dc2fd 919 .private = current, \
1da177e4
LT
920 .func = wake_bit_function, \
921 .task_list = \
922 LIST_HEAD_INIT((name).wait.task_list), \
923 }, \
924 }
925
926#define init_wait(wait) \
927 do { \
c43dc2fd 928 (wait)->private = current; \
1da177e4
LT
929 (wait)->func = autoremove_wake_function; \
930 INIT_LIST_HEAD(&(wait)->task_list); \
231d0aef 931 (wait)->flags = 0; \
1da177e4
LT
932 } while (0)
933
74316201 934
c1221321
N
935extern int bit_wait(struct wait_bit_key *);
936extern int bit_wait_io(struct wait_bit_key *);
cbbce822
N
937extern int bit_wait_timeout(struct wait_bit_key *);
938extern int bit_wait_io_timeout(struct wait_bit_key *);
74316201 939
1da177e4
LT
940/**
941 * wait_on_bit - wait for a bit to be cleared
942 * @word: the word being waited on, a kernel virtual address
943 * @bit: the bit of the word being waited on
1da177e4
LT
944 * @mode: the task state to sleep in
945 *
946 * There is a standard hashed waitqueue table for generic use. This
947 * is the part of the hashtable's accessor API that waits on a bit.
948 * For instance, if one were to have waiters on a bitflag, one would
949 * call wait_on_bit() in threads waiting for the bit to clear.
950 * One uses wait_on_bit() where one is waiting for the bit to clear,
951 * but has no intention of setting it.
74316201
N
952 * Returned value will be zero if the bit was cleared, or non-zero
953 * if the process received a signal and the mode permitted wakeup
954 * on that signal.
955 */
956static inline int
957wait_on_bit(void *word, int bit, unsigned mode)
958{
e22b886a 959 might_sleep();
74316201
N
960 if (!test_bit(bit, word))
961 return 0;
962 return out_of_line_wait_on_bit(word, bit,
963 bit_wait,
964 mode);
965}
966
967/**
968 * wait_on_bit_io - wait for a bit to be cleared
969 * @word: the word being waited on, a kernel virtual address
970 * @bit: the bit of the word being waited on
971 * @mode: the task state to sleep in
972 *
973 * Use the standard hashed waitqueue table to wait for a bit
974 * to be cleared. This is similar to wait_on_bit(), but calls
975 * io_schedule() instead of schedule() for the actual waiting.
976 *
977 * Returned value will be zero if the bit was cleared, or non-zero
978 * if the process received a signal and the mode permitted wakeup
979 * on that signal.
980 */
981static inline int
982wait_on_bit_io(void *word, int bit, unsigned mode)
983{
e22b886a 984 might_sleep();
74316201
N
985 if (!test_bit(bit, word))
986 return 0;
987 return out_of_line_wait_on_bit(word, bit,
988 bit_wait_io,
989 mode);
990}
991
992/**
993 * wait_on_bit_action - wait for a bit to be cleared
994 * @word: the word being waited on, a kernel virtual address
995 * @bit: the bit of the word being waited on
996 * @action: the function used to sleep, which may take special actions
997 * @mode: the task state to sleep in
998 *
999 * Use the standard hashed waitqueue table to wait for a bit
1000 * to be cleared, and allow the waiting action to be specified.
1001 * This is like wait_on_bit() but allows fine control of how the waiting
1002 * is done.
1003 *
1004 * Returned value will be zero if the bit was cleared, or non-zero
1005 * if the process received a signal and the mode permitted wakeup
1006 * on that signal.
1da177e4 1007 */
fb869b6e 1008static inline int
c1221321 1009wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1da177e4 1010{
e22b886a 1011 might_sleep();
1da177e4
LT
1012 if (!test_bit(bit, word))
1013 return 0;
1014 return out_of_line_wait_on_bit(word, bit, action, mode);
1015}
1016
1017/**
1018 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1019 * @word: the word being waited on, a kernel virtual address
1020 * @bit: the bit of the word being waited on
1da177e4
LT
1021 * @mode: the task state to sleep in
1022 *
1023 * There is a standard hashed waitqueue table for generic use. This
1024 * is the part of the hashtable's accessor API that waits on a bit
1025 * when one intends to set it, for instance, trying to lock bitflags.
1026 * For instance, if one were to have waiters trying to set bitflag
1027 * and waiting for it to clear before setting it, one would call
1028 * wait_on_bit() in threads waiting to be able to set the bit.
1029 * One uses wait_on_bit_lock() where one is waiting for the bit to
1030 * clear with the intention of setting it, and when done, clearing it.
74316201
N
1031 *
1032 * Returns zero if the bit was (eventually) found to be clear and was
1033 * set. Returns non-zero if a signal was delivered to the process and
1034 * the @mode allows that signal to wake the process.
1035 */
1036static inline int
1037wait_on_bit_lock(void *word, int bit, unsigned mode)
1038{
e22b886a 1039 might_sleep();
74316201
N
1040 if (!test_and_set_bit(bit, word))
1041 return 0;
1042 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1043}
1044
1045/**
1046 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1047 * @word: the word being waited on, a kernel virtual address
1048 * @bit: the bit of the word being waited on
1049 * @mode: the task state to sleep in
1050 *
1051 * Use the standard hashed waitqueue table to wait for a bit
1052 * to be cleared and then to atomically set it. This is similar
1053 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1054 * for the actual waiting.
1055 *
1056 * Returns zero if the bit was (eventually) found to be clear and was
1057 * set. Returns non-zero if a signal was delivered to the process and
1058 * the @mode allows that signal to wake the process.
1059 */
1060static inline int
1061wait_on_bit_lock_io(void *word, int bit, unsigned mode)
1062{
e22b886a 1063 might_sleep();
74316201
N
1064 if (!test_and_set_bit(bit, word))
1065 return 0;
1066 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1067}
1068
1069/**
1070 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1071 * @word: the word being waited on, a kernel virtual address
1072 * @bit: the bit of the word being waited on
1073 * @action: the function used to sleep, which may take special actions
1074 * @mode: the task state to sleep in
1075 *
1076 * Use the standard hashed waitqueue table to wait for a bit
1077 * to be cleared and then to set it, and allow the waiting action
1078 * to be specified.
1079 * This is like wait_on_bit() but allows fine control of how the waiting
1080 * is done.
1081 *
1082 * Returns zero if the bit was (eventually) found to be clear and was
1083 * set. Returns non-zero if a signal was delivered to the process and
1084 * the @mode allows that signal to wake the process.
1da177e4 1085 */
fb869b6e 1086static inline int
c1221321 1087wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1da177e4 1088{
e22b886a 1089 might_sleep();
1da177e4
LT
1090 if (!test_and_set_bit(bit, word))
1091 return 0;
1092 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1093}
cb65537e
DH
1094
1095/**
1096 * wait_on_atomic_t - Wait for an atomic_t to become 0
1097 * @val: The atomic value being waited on, a kernel virtual address
1098 * @action: the function used to sleep, which may take special actions
1099 * @mode: the task state to sleep in
1100 *
1101 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1102 * the purpose of getting a waitqueue, but we set the key to a bit number
1103 * outside of the target 'word'.
1104 */
1105static inline
1106int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1107{
e22b886a 1108 might_sleep();
cb65537e
DH
1109 if (atomic_read(val) == 0)
1110 return 0;
1111 return out_of_line_wait_on_atomic_t(val, action, mode);
1112}
fb869b6e
IM
1113
1114#endif /* _LINUX_WAIT_H */