]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/wait.h
Merge tag 'spi-v3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[mirror_ubuntu-zesty-kernel.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
fb869b6e
IM
3/*
4 * Linux wait queue related types and methods
5 */
1da177e4
LT
6#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
1da177e4 9#include <asm/current.h>
607ca46e 10#include <uapi/linux/wait.h>
1da177e4
LT
11
12typedef struct __wait_queue wait_queue_t;
7d478721
PZ
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
1da177e4
LT
15
16struct __wait_queue {
fb869b6e 17 unsigned int flags;
1da177e4 18#define WQ_FLAG_EXCLUSIVE 0x01
fb869b6e
IM
19 void *private;
20 wait_queue_func_t func;
21 struct list_head task_list;
1da177e4
LT
22};
23
24struct wait_bit_key {
fb869b6e
IM
25 void *flags;
26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1
c1221321 28 unsigned long private;
1da177e4
LT
29};
30
31struct wait_bit_queue {
fb869b6e
IM
32 struct wait_bit_key key;
33 wait_queue_t wait;
1da177e4
LT
34};
35
36struct __wait_queue_head {
fb869b6e
IM
37 spinlock_t lock;
38 struct list_head task_list;
1da177e4
LT
39};
40typedef struct __wait_queue_head wait_queue_head_t;
41
8c65b4a6 42struct task_struct;
1da177e4
LT
43
44/*
45 * Macros for declaration and initialisaton of the datatypes
46 */
47
48#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 49 .private = tsk, \
1da177e4
LT
50 .func = default_wake_function, \
51 .task_list = { NULL, NULL } }
52
53#define DECLARE_WAITQUEUE(name, tsk) \
54 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 57 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
58 .task_list = { &(name).task_list, &(name).task_list } }
59
60#define DECLARE_WAIT_QUEUE_HEAD(name) \
61 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
64 { .flags = word, .bit_nr = bit, }
65
cb65537e
DH
66#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
67 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
68
f07fdec5 69extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
2fc39111
PZ
70
71#define init_waitqueue_head(q) \
72 do { \
73 static struct lock_class_key __key; \
74 \
f07fdec5 75 __init_waitqueue_head((q), #q, &__key); \
2fc39111 76 } while (0)
1da177e4 77
7259f0d0
PZ
78#ifdef CONFIG_LOCKDEP
79# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
80 ({ init_waitqueue_head(&name); name; })
81# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
82 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
83#else
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
85#endif
86
1da177e4
LT
87static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
88{
fb869b6e
IM
89 q->flags = 0;
90 q->private = p;
91 q->func = default_wake_function;
1da177e4
LT
92}
93
fb869b6e
IM
94static inline void
95init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
1da177e4 96{
fb869b6e
IM
97 q->flags = 0;
98 q->private = NULL;
99 q->func = func;
1da177e4
LT
100}
101
102static inline int waitqueue_active(wait_queue_head_t *q)
103{
104 return !list_empty(&q->task_list);
105}
106
b3c97528
HH
107extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
108extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
109extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
110
111static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
112{
113 list_add(&new->task_list, &head->task_list);
114}
115
116/*
117 * Used for wake-one threads:
118 */
fb869b6e
IM
119static inline void
120__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
121{
122 wait->flags |= WQ_FLAG_EXCLUSIVE;
123 __add_wait_queue(q, wait);
124}
125
1da177e4 126static inline void __add_wait_queue_tail(wait_queue_head_t *head,
a93d2f17 127 wait_queue_t *new)
1da177e4
LT
128{
129 list_add_tail(&new->task_list, &head->task_list);
130}
131
fb869b6e
IM
132static inline void
133__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
134{
135 wait->flags |= WQ_FLAG_EXCLUSIVE;
136 __add_wait_queue_tail(q, wait);
137}
138
fb869b6e
IM
139static inline void
140__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
1da177e4
LT
141{
142 list_del(&old->task_list);
143}
144
c1221321 145typedef int wait_bit_action_f(struct wait_bit_key *);
b3c97528 146void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a 147void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
fb869b6e 148void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
63b20011 149void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
4ede816a 150void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528 151void __wake_up_bit(wait_queue_head_t *, void *, int);
c1221321
N
152int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
153int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
b3c97528 154void wake_up_bit(void *, int);
cb65537e 155void wake_up_atomic_t(atomic_t *);
c1221321
N
156int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
157int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
cb65537e 158int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
b3c97528 159wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 160
e64d66c8
MW
161#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
162#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
163#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
164#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
165#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 166
1da177e4
LT
167#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
168#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
169#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 170#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 171
0ccf831c 172/*
c0da3775 173 * Wakeup macros to be used to report events to the targets.
0ccf831c 174 */
fb869b6e 175#define wake_up_poll(x, m) \
c0da3775 176 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
fb869b6e 177#define wake_up_locked_poll(x, m) \
c0da3775 178 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
fb869b6e 179#define wake_up_interruptible_poll(x, m) \
c0da3775
DL
180 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
181#define wake_up_interruptible_sync_poll(x, m) \
182 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
0ccf831c 183
35a2af94 184#define ___wait_cond_timeout(condition) \
2953ef24 185({ \
fb869b6e
IM
186 bool __cond = (condition); \
187 if (__cond && !__ret) \
188 __ret = 1; \
189 __cond || !__ret; \
2953ef24
PZ
190})
191
c2d81644
ON
192#define ___wait_is_interruptible(state) \
193 (!__builtin_constant_p(state) || \
194 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
41a1431b 195
8b32201d
PZ
196/*
197 * The below macro ___wait_event() has an explicit shadow of the __ret
198 * variable when used from the wait_event_*() macros.
199 *
200 * This is so that both can use the ___wait_cond_timeout() construct
201 * to wrap the condition.
202 *
203 * The type inconsistency of the wait_event_*() __ret variable is also
204 * on purpose; we use long where we can return timeout values and int
205 * otherwise.
206 */
207
41a1431b 208#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
35a2af94 209({ \
41a1431b 210 __label__ __out; \
c2d81644 211 wait_queue_t __wait; \
8b32201d 212 long __ret = ret; /* explicit shadow */ \
41a1431b 213 \
c2d81644
ON
214 INIT_LIST_HEAD(&__wait.task_list); \
215 if (exclusive) \
216 __wait.flags = WQ_FLAG_EXCLUSIVE; \
217 else \
218 __wait.flags = 0; \
219 \
41a1431b 220 for (;;) { \
c2d81644 221 long __int = prepare_to_wait_event(&wq, &__wait, state);\
41a1431b
PZ
222 \
223 if (condition) \
224 break; \
225 \
c2d81644
ON
226 if (___wait_is_interruptible(state) && __int) { \
227 __ret = __int; \
41a1431b 228 if (exclusive) { \
fb869b6e
IM
229 abort_exclusive_wait(&wq, &__wait, \
230 state, NULL); \
41a1431b
PZ
231 goto __out; \
232 } \
233 break; \
234 } \
235 \
236 cmd; \
237 } \
238 finish_wait(&wq, &__wait); \
35a2af94
PZ
239__out: __ret; \
240})
41a1431b 241
fb869b6e 242#define __wait_event(wq, condition) \
35a2af94
PZ
243 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
244 schedule())
1da177e4
LT
245
246/**
247 * wait_event - sleep until a condition gets true
248 * @wq: the waitqueue to wait on
249 * @condition: a C expression for the event to wait for
250 *
251 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
252 * @condition evaluates to true. The @condition is checked each time
253 * the waitqueue @wq is woken up.
254 *
255 * wake_up() has to be called after changing any variable that could
256 * change the result of the wait condition.
257 */
fb869b6e 258#define wait_event(wq, condition) \
1da177e4 259do { \
fb869b6e 260 if (condition) \
1da177e4
LT
261 break; \
262 __wait_event(wq, condition); \
263} while (0)
264
35a2af94
PZ
265#define __wait_event_timeout(wq, condition, timeout) \
266 ___wait_event(wq, ___wait_cond_timeout(condition), \
267 TASK_UNINTERRUPTIBLE, 0, timeout, \
268 __ret = schedule_timeout(__ret))
1da177e4
LT
269
270/**
271 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
272 * @wq: the waitqueue to wait on
273 * @condition: a C expression for the event to wait for
274 * @timeout: timeout, in jiffies
275 *
276 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
277 * @condition evaluates to true. The @condition is checked each time
278 * the waitqueue @wq is woken up.
279 *
280 * wake_up() has to be called after changing any variable that could
281 * change the result of the wait condition.
282 *
4c663cfc
ID
283 * The function returns 0 if the @timeout elapsed, or the remaining
284 * jiffies (at least 1) if the @condition evaluated to %true before
285 * the @timeout elapsed.
1da177e4
LT
286 */
287#define wait_event_timeout(wq, condition, timeout) \
288({ \
289 long __ret = timeout; \
8922915b 290 if (!___wait_cond_timeout(condition)) \
35a2af94 291 __ret = __wait_event_timeout(wq, condition, timeout); \
1da177e4
LT
292 __ret; \
293})
294
82e06c81
SL
295#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
296 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
297 cmd1; schedule(); cmd2)
298
299/**
300 * wait_event_cmd - sleep until a condition gets true
301 * @wq: the waitqueue to wait on
302 * @condition: a C expression for the event to wait for
f434f7af
MI
303 * @cmd1: the command will be executed before sleep
304 * @cmd2: the command will be executed after sleep
82e06c81
SL
305 *
306 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
307 * @condition evaluates to true. The @condition is checked each time
308 * the waitqueue @wq is woken up.
309 *
310 * wake_up() has to be called after changing any variable that could
311 * change the result of the wait condition.
312 */
313#define wait_event_cmd(wq, condition, cmd1, cmd2) \
314do { \
315 if (condition) \
316 break; \
317 __wait_event_cmd(wq, condition, cmd1, cmd2); \
318} while (0)
319
35a2af94
PZ
320#define __wait_event_interruptible(wq, condition) \
321 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
f13f4c41 322 schedule())
1da177e4
LT
323
324/**
325 * wait_event_interruptible - sleep until a condition gets true
326 * @wq: the waitqueue to wait on
327 * @condition: a C expression for the event to wait for
328 *
329 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
330 * @condition evaluates to true or a signal is received.
331 * The @condition is checked each time the waitqueue @wq is woken up.
332 *
333 * wake_up() has to be called after changing any variable that could
334 * change the result of the wait condition.
335 *
336 * The function will return -ERESTARTSYS if it was interrupted by a
337 * signal and 0 if @condition evaluated to true.
338 */
339#define wait_event_interruptible(wq, condition) \
340({ \
341 int __ret = 0; \
342 if (!(condition)) \
35a2af94 343 __ret = __wait_event_interruptible(wq, condition); \
1da177e4
LT
344 __ret; \
345})
346
35a2af94
PZ
347#define __wait_event_interruptible_timeout(wq, condition, timeout) \
348 ___wait_event(wq, ___wait_cond_timeout(condition), \
349 TASK_INTERRUPTIBLE, 0, timeout, \
350 __ret = schedule_timeout(__ret))
1da177e4
LT
351
352/**
353 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
354 * @wq: the waitqueue to wait on
355 * @condition: a C expression for the event to wait for
356 * @timeout: timeout, in jiffies
357 *
358 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
359 * @condition evaluates to true or a signal is received.
360 * The @condition is checked each time the waitqueue @wq is woken up.
361 *
362 * wake_up() has to be called after changing any variable that could
363 * change the result of the wait condition.
364 *
4c663cfc
ID
365 * Returns:
366 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
367 * a signal, or the remaining jiffies (at least 1) if the @condition
368 * evaluated to %true before the @timeout elapsed.
1da177e4
LT
369 */
370#define wait_event_interruptible_timeout(wq, condition, timeout) \
371({ \
372 long __ret = timeout; \
8922915b 373 if (!___wait_cond_timeout(condition)) \
fb869b6e 374 __ret = __wait_event_interruptible_timeout(wq, \
35a2af94 375 condition, timeout); \
1da177e4
LT
376 __ret; \
377})
378
774a08b3
KO
379#define __wait_event_hrtimeout(wq, condition, timeout, state) \
380({ \
381 int __ret = 0; \
774a08b3
KO
382 struct hrtimer_sleeper __t; \
383 \
384 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
385 HRTIMER_MODE_REL); \
386 hrtimer_init_sleeper(&__t, current); \
387 if ((timeout).tv64 != KTIME_MAX) \
388 hrtimer_start_range_ns(&__t.timer, timeout, \
389 current->timer_slack_ns, \
390 HRTIMER_MODE_REL); \
391 \
35a2af94 392 __ret = ___wait_event(wq, condition, state, 0, 0, \
774a08b3
KO
393 if (!__t.task) { \
394 __ret = -ETIME; \
395 break; \
396 } \
ebdc195f 397 schedule()); \
774a08b3
KO
398 \
399 hrtimer_cancel(&__t.timer); \
400 destroy_hrtimer_on_stack(&__t.timer); \
774a08b3
KO
401 __ret; \
402})
403
404/**
405 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
406 * @wq: the waitqueue to wait on
407 * @condition: a C expression for the event to wait for
408 * @timeout: timeout, as a ktime_t
409 *
410 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
411 * @condition evaluates to true or a signal is received.
412 * The @condition is checked each time the waitqueue @wq is woken up.
413 *
414 * wake_up() has to be called after changing any variable that could
415 * change the result of the wait condition.
416 *
417 * The function returns 0 if @condition became true, or -ETIME if the timeout
418 * elapsed.
419 */
420#define wait_event_hrtimeout(wq, condition, timeout) \
421({ \
422 int __ret = 0; \
423 if (!(condition)) \
424 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
425 TASK_UNINTERRUPTIBLE); \
426 __ret; \
427})
428
429/**
430 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
431 * @wq: the waitqueue to wait on
432 * @condition: a C expression for the event to wait for
433 * @timeout: timeout, as a ktime_t
434 *
435 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
436 * @condition evaluates to true or a signal is received.
437 * The @condition is checked each time the waitqueue @wq is woken up.
438 *
439 * wake_up() has to be called after changing any variable that could
440 * change the result of the wait condition.
441 *
442 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
443 * interrupted by a signal, or -ETIME if the timeout elapsed.
444 */
445#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
446({ \
447 long __ret = 0; \
448 if (!(condition)) \
449 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
450 TASK_INTERRUPTIBLE); \
451 __ret; \
452})
453
35a2af94
PZ
454#define __wait_event_interruptible_exclusive(wq, condition) \
455 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
48c25217 456 schedule())
1da177e4
LT
457
458#define wait_event_interruptible_exclusive(wq, condition) \
459({ \
460 int __ret = 0; \
461 if (!(condition)) \
35a2af94 462 __ret = __wait_event_interruptible_exclusive(wq, condition);\
1da177e4
LT
463 __ret; \
464})
465
22c43c81
MN
466
467#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
468({ \
469 int __ret = 0; \
470 DEFINE_WAIT(__wait); \
471 if (exclusive) \
472 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
473 do { \
474 if (likely(list_empty(&__wait.task_list))) \
475 __add_wait_queue_tail(&(wq), &__wait); \
476 set_current_state(TASK_INTERRUPTIBLE); \
477 if (signal_pending(current)) { \
478 __ret = -ERESTARTSYS; \
479 break; \
480 } \
481 if (irq) \
482 spin_unlock_irq(&(wq).lock); \
483 else \
484 spin_unlock(&(wq).lock); \
485 schedule(); \
486 if (irq) \
487 spin_lock_irq(&(wq).lock); \
488 else \
489 spin_lock(&(wq).lock); \
490 } while (!(condition)); \
491 __remove_wait_queue(&(wq), &__wait); \
492 __set_current_state(TASK_RUNNING); \
493 __ret; \
494})
495
496
497/**
498 * wait_event_interruptible_locked - sleep until a condition gets true
499 * @wq: the waitqueue to wait on
500 * @condition: a C expression for the event to wait for
501 *
502 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
503 * @condition evaluates to true or a signal is received.
504 * The @condition is checked each time the waitqueue @wq is woken up.
505 *
506 * It must be called with wq.lock being held. This spinlock is
507 * unlocked while sleeping but @condition testing is done while lock
508 * is held and when this macro exits the lock is held.
509 *
510 * The lock is locked/unlocked using spin_lock()/spin_unlock()
511 * functions which must match the way they are locked/unlocked outside
512 * of this macro.
513 *
514 * wake_up_locked() has to be called after changing any variable that could
515 * change the result of the wait condition.
516 *
517 * The function will return -ERESTARTSYS if it was interrupted by a
518 * signal and 0 if @condition evaluated to true.
519 */
520#define wait_event_interruptible_locked(wq, condition) \
521 ((condition) \
522 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
523
524/**
525 * wait_event_interruptible_locked_irq - sleep until a condition gets true
526 * @wq: the waitqueue to wait on
527 * @condition: a C expression for the event to wait for
528 *
529 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
530 * @condition evaluates to true or a signal is received.
531 * The @condition is checked each time the waitqueue @wq is woken up.
532 *
533 * It must be called with wq.lock being held. This spinlock is
534 * unlocked while sleeping but @condition testing is done while lock
535 * is held and when this macro exits the lock is held.
536 *
537 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
538 * functions which must match the way they are locked/unlocked outside
539 * of this macro.
540 *
541 * wake_up_locked() has to be called after changing any variable that could
542 * change the result of the wait condition.
543 *
544 * The function will return -ERESTARTSYS if it was interrupted by a
545 * signal and 0 if @condition evaluated to true.
546 */
547#define wait_event_interruptible_locked_irq(wq, condition) \
548 ((condition) \
549 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
550
551/**
552 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
553 * @wq: the waitqueue to wait on
554 * @condition: a C expression for the event to wait for
555 *
556 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
557 * @condition evaluates to true or a signal is received.
558 * The @condition is checked each time the waitqueue @wq is woken up.
559 *
560 * It must be called with wq.lock being held. This spinlock is
561 * unlocked while sleeping but @condition testing is done while lock
562 * is held and when this macro exits the lock is held.
563 *
564 * The lock is locked/unlocked using spin_lock()/spin_unlock()
565 * functions which must match the way they are locked/unlocked outside
566 * of this macro.
567 *
568 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
569 * set thus when other process waits process on the list if this
570 * process is awaken further processes are not considered.
571 *
572 * wake_up_locked() has to be called after changing any variable that could
573 * change the result of the wait condition.
574 *
575 * The function will return -ERESTARTSYS if it was interrupted by a
576 * signal and 0 if @condition evaluated to true.
577 */
578#define wait_event_interruptible_exclusive_locked(wq, condition) \
579 ((condition) \
580 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
581
582/**
583 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
584 * @wq: the waitqueue to wait on
585 * @condition: a C expression for the event to wait for
586 *
587 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
588 * @condition evaluates to true or a signal is received.
589 * The @condition is checked each time the waitqueue @wq is woken up.
590 *
591 * It must be called with wq.lock being held. This spinlock is
592 * unlocked while sleeping but @condition testing is done while lock
593 * is held and when this macro exits the lock is held.
594 *
595 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
596 * functions which must match the way they are locked/unlocked outside
597 * of this macro.
598 *
599 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
600 * set thus when other process waits process on the list if this
601 * process is awaken further processes are not considered.
602 *
603 * wake_up_locked() has to be called after changing any variable that could
604 * change the result of the wait condition.
605 *
606 * The function will return -ERESTARTSYS if it was interrupted by a
607 * signal and 0 if @condition evaluated to true.
608 */
609#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
610 ((condition) \
611 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
612
613
35a2af94
PZ
614#define __wait_event_killable(wq, condition) \
615 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
1411d5a7
MW
616
617/**
618 * wait_event_killable - sleep until a condition gets true
619 * @wq: the waitqueue to wait on
620 * @condition: a C expression for the event to wait for
621 *
622 * The process is put to sleep (TASK_KILLABLE) until the
623 * @condition evaluates to true or a signal is received.
624 * The @condition is checked each time the waitqueue @wq is woken up.
625 *
626 * wake_up() has to be called after changing any variable that could
627 * change the result of the wait condition.
628 *
629 * The function will return -ERESTARTSYS if it was interrupted by a
630 * signal and 0 if @condition evaluated to true.
631 */
632#define wait_event_killable(wq, condition) \
633({ \
634 int __ret = 0; \
635 if (!(condition)) \
35a2af94 636 __ret = __wait_event_killable(wq, condition); \
1411d5a7
MW
637 __ret; \
638})
639
eed8c02e
LC
640
641#define __wait_event_lock_irq(wq, condition, lock, cmd) \
35a2af94
PZ
642 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
643 spin_unlock_irq(&lock); \
644 cmd; \
645 schedule(); \
646 spin_lock_irq(&lock))
eed8c02e
LC
647
648/**
649 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
650 * condition is checked under the lock. This
651 * is expected to be called with the lock
652 * taken.
653 * @wq: the waitqueue to wait on
654 * @condition: a C expression for the event to wait for
655 * @lock: a locked spinlock_t, which will be released before cmd
656 * and schedule() and reacquired afterwards.
657 * @cmd: a command which is invoked outside the critical section before
658 * sleep
659 *
660 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
661 * @condition evaluates to true. The @condition is checked each time
662 * the waitqueue @wq is woken up.
663 *
664 * wake_up() has to be called after changing any variable that could
665 * change the result of the wait condition.
666 *
667 * This is supposed to be called while holding the lock. The lock is
668 * dropped before invoking the cmd and going to sleep and is reacquired
669 * afterwards.
670 */
671#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
672do { \
673 if (condition) \
674 break; \
675 __wait_event_lock_irq(wq, condition, lock, cmd); \
676} while (0)
677
678/**
679 * wait_event_lock_irq - sleep until a condition gets true. The
680 * condition is checked under the lock. This
681 * is expected to be called with the lock
682 * taken.
683 * @wq: the waitqueue to wait on
684 * @condition: a C expression for the event to wait for
685 * @lock: a locked spinlock_t, which will be released before schedule()
686 * and reacquired afterwards.
687 *
688 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
689 * @condition evaluates to true. The @condition is checked each time
690 * the waitqueue @wq is woken up.
691 *
692 * wake_up() has to be called after changing any variable that could
693 * change the result of the wait condition.
694 *
695 * This is supposed to be called while holding the lock. The lock is
696 * dropped before going to sleep and is reacquired afterwards.
697 */
698#define wait_event_lock_irq(wq, condition, lock) \
699do { \
700 if (condition) \
701 break; \
702 __wait_event_lock_irq(wq, condition, lock, ); \
703} while (0)
704
705
35a2af94 706#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
fb869b6e 707 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
35a2af94
PZ
708 spin_unlock_irq(&lock); \
709 cmd; \
710 schedule(); \
8fbd88fa 711 spin_lock_irq(&lock))
eed8c02e
LC
712
713/**
714 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
715 * The condition is checked under the lock. This is expected to
716 * be called with the lock taken.
717 * @wq: the waitqueue to wait on
718 * @condition: a C expression for the event to wait for
719 * @lock: a locked spinlock_t, which will be released before cmd and
720 * schedule() and reacquired afterwards.
721 * @cmd: a command which is invoked outside the critical section before
722 * sleep
723 *
724 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
725 * @condition evaluates to true or a signal is received. The @condition is
726 * checked each time the waitqueue @wq is woken up.
727 *
728 * wake_up() has to be called after changing any variable that could
729 * change the result of the wait condition.
730 *
731 * This is supposed to be called while holding the lock. The lock is
732 * dropped before invoking the cmd and going to sleep and is reacquired
733 * afterwards.
734 *
735 * The macro will return -ERESTARTSYS if it was interrupted by a signal
736 * and 0 if @condition evaluated to true.
737 */
738#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
739({ \
740 int __ret = 0; \
eed8c02e 741 if (!(condition)) \
fb869b6e 742 __ret = __wait_event_interruptible_lock_irq(wq, \
35a2af94 743 condition, lock, cmd); \
eed8c02e
LC
744 __ret; \
745})
746
747/**
748 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
749 * The condition is checked under the lock. This is expected
750 * to be called with the lock taken.
751 * @wq: the waitqueue to wait on
752 * @condition: a C expression for the event to wait for
753 * @lock: a locked spinlock_t, which will be released before schedule()
754 * and reacquired afterwards.
755 *
756 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
757 * @condition evaluates to true or signal is received. The @condition is
758 * checked each time the waitqueue @wq is woken up.
759 *
760 * wake_up() has to be called after changing any variable that could
761 * change the result of the wait condition.
762 *
763 * This is supposed to be called while holding the lock. The lock is
764 * dropped before going to sleep and is reacquired afterwards.
765 *
766 * The macro will return -ERESTARTSYS if it was interrupted by a signal
767 * and 0 if @condition evaluated to true.
768 */
769#define wait_event_interruptible_lock_irq(wq, condition, lock) \
770({ \
771 int __ret = 0; \
eed8c02e 772 if (!(condition)) \
35a2af94 773 __ret = __wait_event_interruptible_lock_irq(wq, \
92ec1180 774 condition, lock,); \
eed8c02e
LC
775 __ret; \
776})
777
fb869b6e
IM
778#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
779 lock, timeout) \
35a2af94 780 ___wait_event(wq, ___wait_cond_timeout(condition), \
7d716456 781 TASK_INTERRUPTIBLE, 0, timeout, \
35a2af94
PZ
782 spin_unlock_irq(&lock); \
783 __ret = schedule_timeout(__ret); \
a1dc6852 784 spin_lock_irq(&lock));
d79ff142
MP
785
786/**
fb869b6e
IM
787 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
788 * true or a timeout elapses. The condition is checked under
789 * the lock. This is expected to be called with the lock taken.
d79ff142
MP
790 * @wq: the waitqueue to wait on
791 * @condition: a C expression for the event to wait for
792 * @lock: a locked spinlock_t, which will be released before schedule()
793 * and reacquired afterwards.
794 * @timeout: timeout, in jiffies
795 *
796 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
797 * @condition evaluates to true or signal is received. The @condition is
798 * checked each time the waitqueue @wq is woken up.
799 *
800 * wake_up() has to be called after changing any variable that could
801 * change the result of the wait condition.
802 *
803 * This is supposed to be called while holding the lock. The lock is
804 * dropped before going to sleep and is reacquired afterwards.
805 *
806 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
807 * was interrupted by a signal, and the remaining jiffies otherwise
808 * if the condition evaluated to true before the timeout elapsed.
809 */
810#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
811 timeout) \
812({ \
35a2af94 813 long __ret = timeout; \
8922915b 814 if (!___wait_cond_timeout(condition)) \
35a2af94
PZ
815 __ret = __wait_event_interruptible_lock_irq_timeout( \
816 wq, condition, lock, timeout); \
d79ff142
MP
817 __ret; \
818})
819
1da177e4
LT
820/*
821 * Waitqueues which are removed from the waitqueue_head at wakeup time
822 */
b3c97528
HH
823void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
824void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
c2d81644 825long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
b3c97528 826void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
fb869b6e 827void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
1da177e4
LT
828int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
829int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
830
bf368e4e 831#define DEFINE_WAIT_FUNC(name, function) \
1da177e4 832 wait_queue_t name = { \
c43dc2fd 833 .private = current, \
bf368e4e 834 .func = function, \
7e43c84e 835 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
836 }
837
bf368e4e
ED
838#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
839
1da177e4
LT
840#define DEFINE_WAIT_BIT(name, word, bit) \
841 struct wait_bit_queue name = { \
842 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
843 .wait = { \
c43dc2fd 844 .private = current, \
1da177e4
LT
845 .func = wake_bit_function, \
846 .task_list = \
847 LIST_HEAD_INIT((name).wait.task_list), \
848 }, \
849 }
850
851#define init_wait(wait) \
852 do { \
c43dc2fd 853 (wait)->private = current; \
1da177e4
LT
854 (wait)->func = autoremove_wake_function; \
855 INIT_LIST_HEAD(&(wait)->task_list); \
231d0aef 856 (wait)->flags = 0; \
1da177e4
LT
857 } while (0)
858
74316201 859
c1221321
N
860extern int bit_wait(struct wait_bit_key *);
861extern int bit_wait_io(struct wait_bit_key *);
74316201 862
1da177e4
LT
863/**
864 * wait_on_bit - wait for a bit to be cleared
865 * @word: the word being waited on, a kernel virtual address
866 * @bit: the bit of the word being waited on
1da177e4
LT
867 * @mode: the task state to sleep in
868 *
869 * There is a standard hashed waitqueue table for generic use. This
870 * is the part of the hashtable's accessor API that waits on a bit.
871 * For instance, if one were to have waiters on a bitflag, one would
872 * call wait_on_bit() in threads waiting for the bit to clear.
873 * One uses wait_on_bit() where one is waiting for the bit to clear,
874 * but has no intention of setting it.
74316201
N
875 * Returned value will be zero if the bit was cleared, or non-zero
876 * if the process received a signal and the mode permitted wakeup
877 * on that signal.
878 */
879static inline int
880wait_on_bit(void *word, int bit, unsigned mode)
881{
882 if (!test_bit(bit, word))
883 return 0;
884 return out_of_line_wait_on_bit(word, bit,
885 bit_wait,
886 mode);
887}
888
889/**
890 * wait_on_bit_io - wait for a bit to be cleared
891 * @word: the word being waited on, a kernel virtual address
892 * @bit: the bit of the word being waited on
893 * @mode: the task state to sleep in
894 *
895 * Use the standard hashed waitqueue table to wait for a bit
896 * to be cleared. This is similar to wait_on_bit(), but calls
897 * io_schedule() instead of schedule() for the actual waiting.
898 *
899 * Returned value will be zero if the bit was cleared, or non-zero
900 * if the process received a signal and the mode permitted wakeup
901 * on that signal.
902 */
903static inline int
904wait_on_bit_io(void *word, int bit, unsigned mode)
905{
906 if (!test_bit(bit, word))
907 return 0;
908 return out_of_line_wait_on_bit(word, bit,
909 bit_wait_io,
910 mode);
911}
912
913/**
914 * wait_on_bit_action - wait for a bit to be cleared
915 * @word: the word being waited on, a kernel virtual address
916 * @bit: the bit of the word being waited on
917 * @action: the function used to sleep, which may take special actions
918 * @mode: the task state to sleep in
919 *
920 * Use the standard hashed waitqueue table to wait for a bit
921 * to be cleared, and allow the waiting action to be specified.
922 * This is like wait_on_bit() but allows fine control of how the waiting
923 * is done.
924 *
925 * Returned value will be zero if the bit was cleared, or non-zero
926 * if the process received a signal and the mode permitted wakeup
927 * on that signal.
1da177e4 928 */
fb869b6e 929static inline int
c1221321 930wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1da177e4
LT
931{
932 if (!test_bit(bit, word))
933 return 0;
934 return out_of_line_wait_on_bit(word, bit, action, mode);
935}
936
937/**
938 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
939 * @word: the word being waited on, a kernel virtual address
940 * @bit: the bit of the word being waited on
1da177e4
LT
941 * @mode: the task state to sleep in
942 *
943 * There is a standard hashed waitqueue table for generic use. This
944 * is the part of the hashtable's accessor API that waits on a bit
945 * when one intends to set it, for instance, trying to lock bitflags.
946 * For instance, if one were to have waiters trying to set bitflag
947 * and waiting for it to clear before setting it, one would call
948 * wait_on_bit() in threads waiting to be able to set the bit.
949 * One uses wait_on_bit_lock() where one is waiting for the bit to
950 * clear with the intention of setting it, and when done, clearing it.
74316201
N
951 *
952 * Returns zero if the bit was (eventually) found to be clear and was
953 * set. Returns non-zero if a signal was delivered to the process and
954 * the @mode allows that signal to wake the process.
955 */
956static inline int
957wait_on_bit_lock(void *word, int bit, unsigned mode)
958{
959 if (!test_and_set_bit(bit, word))
960 return 0;
961 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
962}
963
964/**
965 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
966 * @word: the word being waited on, a kernel virtual address
967 * @bit: the bit of the word being waited on
968 * @mode: the task state to sleep in
969 *
970 * Use the standard hashed waitqueue table to wait for a bit
971 * to be cleared and then to atomically set it. This is similar
972 * to wait_on_bit(), but calls io_schedule() instead of schedule()
973 * for the actual waiting.
974 *
975 * Returns zero if the bit was (eventually) found to be clear and was
976 * set. Returns non-zero if a signal was delivered to the process and
977 * the @mode allows that signal to wake the process.
978 */
979static inline int
980wait_on_bit_lock_io(void *word, int bit, unsigned mode)
981{
982 if (!test_and_set_bit(bit, word))
983 return 0;
984 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
985}
986
987/**
988 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
989 * @word: the word being waited on, a kernel virtual address
990 * @bit: the bit of the word being waited on
991 * @action: the function used to sleep, which may take special actions
992 * @mode: the task state to sleep in
993 *
994 * Use the standard hashed waitqueue table to wait for a bit
995 * to be cleared and then to set it, and allow the waiting action
996 * to be specified.
997 * This is like wait_on_bit() but allows fine control of how the waiting
998 * is done.
999 *
1000 * Returns zero if the bit was (eventually) found to be clear and was
1001 * set. Returns non-zero if a signal was delivered to the process and
1002 * the @mode allows that signal to wake the process.
1da177e4 1003 */
fb869b6e 1004static inline int
c1221321 1005wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1da177e4
LT
1006{
1007 if (!test_and_set_bit(bit, word))
1008 return 0;
1009 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1010}
cb65537e
DH
1011
1012/**
1013 * wait_on_atomic_t - Wait for an atomic_t to become 0
1014 * @val: The atomic value being waited on, a kernel virtual address
1015 * @action: the function used to sleep, which may take special actions
1016 * @mode: the task state to sleep in
1017 *
1018 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1019 * the purpose of getting a waitqueue, but we set the key to a bit number
1020 * outside of the target 'word'.
1021 */
1022static inline
1023int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1024{
1025 if (atomic_read(val) == 0)
1026 return 0;
1027 return out_of_line_wait_on_atomic_t(val, action, mode);
1028}
fb869b6e
IM
1029
1030#endif /* _LINUX_WAIT_H */