]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/wait.h
Merge branch 'for-4.15/upstream' into for-linus
[mirror_ubuntu-bionic-kernel.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4 * Linux wait queue related types and methods
5 */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9
10 #include <asm/current.h>
11 #include <uapi/linux/wait.h>
12
13 typedef struct wait_queue_entry wait_queue_entry_t;
14
15 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17
18 /* wait_queue_entry::flags */
19 #define WQ_FLAG_EXCLUSIVE 0x01
20 #define WQ_FLAG_WOKEN 0x02
21 #define WQ_FLAG_BOOKMARK 0x04
22
23 /*
24 * A single wait-queue entry structure:
25 */
26 struct wait_queue_entry {
27 unsigned int flags;
28 void *private;
29 wait_queue_func_t func;
30 struct list_head entry;
31 };
32
33 struct wait_queue_head {
34 spinlock_t lock;
35 struct list_head head;
36 };
37 typedef struct wait_queue_head wait_queue_head_t;
38
39 struct task_struct;
40
41 /*
42 * Macros for declaration and initialisaton of the datatypes
43 */
44
45 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
46 .private = tsk, \
47 .func = default_wake_function, \
48 .entry = { NULL, NULL } }
49
50 #define DECLARE_WAITQUEUE(name, tsk) \
51 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
52
53 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
54 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
55 .head = { &(name).head, &(name).head } }
56
57 #define DECLARE_WAIT_QUEUE_HEAD(name) \
58 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
59
60 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
61
62 #define init_waitqueue_head(wq_head) \
63 do { \
64 static struct lock_class_key __key; \
65 \
66 __init_waitqueue_head((wq_head), #wq_head, &__key); \
67 } while (0)
68
69 #ifdef CONFIG_LOCKDEP
70 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
71 ({ init_waitqueue_head(&name); name; })
72 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
73 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
74 #else
75 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
76 #endif
77
78 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
79 {
80 wq_entry->flags = 0;
81 wq_entry->private = p;
82 wq_entry->func = default_wake_function;
83 }
84
85 static inline void
86 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
87 {
88 wq_entry->flags = 0;
89 wq_entry->private = NULL;
90 wq_entry->func = func;
91 }
92
93 /**
94 * waitqueue_active -- locklessly test for waiters on the queue
95 * @wq_head: the waitqueue to test for waiters
96 *
97 * returns true if the wait list is not empty
98 *
99 * NOTE: this function is lockless and requires care, incorrect usage _will_
100 * lead to sporadic and non-obvious failure.
101 *
102 * Use either while holding wait_queue_head::lock or when used for wakeups
103 * with an extra smp_mb() like:
104 *
105 * CPU0 - waker CPU1 - waiter
106 *
107 * for (;;) {
108 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
109 * smp_mb(); // smp_mb() from set_current_state()
110 * if (waitqueue_active(wq_head)) if (@cond)
111 * wake_up(wq_head); break;
112 * schedule();
113 * }
114 * finish_wait(&wq_head, &wait);
115 *
116 * Because without the explicit smp_mb() it's possible for the
117 * waitqueue_active() load to get hoisted over the @cond store such that we'll
118 * observe an empty wait list while the waiter might not observe @cond.
119 *
120 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
121 * which (when the lock is uncontended) are of roughly equal cost.
122 */
123 static inline int waitqueue_active(struct wait_queue_head *wq_head)
124 {
125 return !list_empty(&wq_head->head);
126 }
127
128 /**
129 * wq_has_sleeper - check if there are any waiting processes
130 * @wq_head: wait queue head
131 *
132 * Returns true if wq_head has waiting processes
133 *
134 * Please refer to the comment for waitqueue_active.
135 */
136 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
137 {
138 /*
139 * We need to be sure we are in sync with the
140 * add_wait_queue modifications to the wait queue.
141 *
142 * This memory barrier should be paired with one on the
143 * waiting side.
144 */
145 smp_mb();
146 return waitqueue_active(wq_head);
147 }
148
149 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
150 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
151 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
152
153 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
154 {
155 list_add(&wq_entry->entry, &wq_head->head);
156 }
157
158 /*
159 * Used for wake-one threads:
160 */
161 static inline void
162 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
163 {
164 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
165 __add_wait_queue(wq_head, wq_entry);
166 }
167
168 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
169 {
170 list_add_tail(&wq_entry->entry, &wq_head->head);
171 }
172
173 static inline void
174 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
175 {
176 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
177 __add_wait_queue_entry_tail(wq_head, wq_entry);
178 }
179
180 static inline void
181 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
182 {
183 list_del(&wq_entry->entry);
184 }
185
186 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
187 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
188 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
189 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
190 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
191 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
192 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
193
194 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
195 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
196 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
197 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
198 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
199
200 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
201 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
202 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
203 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
204
205 /*
206 * Wakeup macros to be used to report events to the targets.
207 */
208 #define wake_up_poll(x, m) \
209 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
210 #define wake_up_locked_poll(x, m) \
211 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
212 #define wake_up_interruptible_poll(x, m) \
213 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
214 #define wake_up_interruptible_sync_poll(x, m) \
215 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
216
217 #define ___wait_cond_timeout(condition) \
218 ({ \
219 bool __cond = (condition); \
220 if (__cond && !__ret) \
221 __ret = 1; \
222 __cond || !__ret; \
223 })
224
225 #define ___wait_is_interruptible(state) \
226 (!__builtin_constant_p(state) || \
227 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
228
229 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
230
231 /*
232 * The below macro ___wait_event() has an explicit shadow of the __ret
233 * variable when used from the wait_event_*() macros.
234 *
235 * This is so that both can use the ___wait_cond_timeout() construct
236 * to wrap the condition.
237 *
238 * The type inconsistency of the wait_event_*() __ret variable is also
239 * on purpose; we use long where we can return timeout values and int
240 * otherwise.
241 */
242
243 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
244 ({ \
245 __label__ __out; \
246 struct wait_queue_entry __wq_entry; \
247 long __ret = ret; /* explicit shadow */ \
248 \
249 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
250 for (;;) { \
251 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
252 \
253 if (condition) \
254 break; \
255 \
256 if (___wait_is_interruptible(state) && __int) { \
257 __ret = __int; \
258 goto __out; \
259 } \
260 \
261 cmd; \
262 } \
263 finish_wait(&wq_head, &__wq_entry); \
264 __out: __ret; \
265 })
266
267 #define __wait_event(wq_head, condition) \
268 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
269 schedule())
270
271 /**
272 * wait_event - sleep until a condition gets true
273 * @wq_head: the waitqueue to wait on
274 * @condition: a C expression for the event to wait for
275 *
276 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
277 * @condition evaluates to true. The @condition is checked each time
278 * the waitqueue @wq_head is woken up.
279 *
280 * wake_up() has to be called after changing any variable that could
281 * change the result of the wait condition.
282 */
283 #define wait_event(wq_head, condition) \
284 do { \
285 might_sleep(); \
286 if (condition) \
287 break; \
288 __wait_event(wq_head, condition); \
289 } while (0)
290
291 #define __io_wait_event(wq_head, condition) \
292 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
293 io_schedule())
294
295 /*
296 * io_wait_event() -- like wait_event() but with io_schedule()
297 */
298 #define io_wait_event(wq_head, condition) \
299 do { \
300 might_sleep(); \
301 if (condition) \
302 break; \
303 __io_wait_event(wq_head, condition); \
304 } while (0)
305
306 #define __wait_event_freezable(wq_head, condition) \
307 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
308 schedule(); try_to_freeze())
309
310 /**
311 * wait_event_freezable - sleep (or freeze) until a condition gets true
312 * @wq_head: the waitqueue to wait on
313 * @condition: a C expression for the event to wait for
314 *
315 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
316 * to system load) until the @condition evaluates to true. The
317 * @condition is checked each time the waitqueue @wq_head is woken up.
318 *
319 * wake_up() has to be called after changing any variable that could
320 * change the result of the wait condition.
321 */
322 #define wait_event_freezable(wq_head, condition) \
323 ({ \
324 int __ret = 0; \
325 might_sleep(); \
326 if (!(condition)) \
327 __ret = __wait_event_freezable(wq_head, condition); \
328 __ret; \
329 })
330
331 #define __wait_event_timeout(wq_head, condition, timeout) \
332 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
333 TASK_UNINTERRUPTIBLE, 0, timeout, \
334 __ret = schedule_timeout(__ret))
335
336 /**
337 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
338 * @wq_head: the waitqueue to wait on
339 * @condition: a C expression for the event to wait for
340 * @timeout: timeout, in jiffies
341 *
342 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
343 * @condition evaluates to true. The @condition is checked each time
344 * the waitqueue @wq_head is woken up.
345 *
346 * wake_up() has to be called after changing any variable that could
347 * change the result of the wait condition.
348 *
349 * Returns:
350 * 0 if the @condition evaluated to %false after the @timeout elapsed,
351 * 1 if the @condition evaluated to %true after the @timeout elapsed,
352 * or the remaining jiffies (at least 1) if the @condition evaluated
353 * to %true before the @timeout elapsed.
354 */
355 #define wait_event_timeout(wq_head, condition, timeout) \
356 ({ \
357 long __ret = timeout; \
358 might_sleep(); \
359 if (!___wait_cond_timeout(condition)) \
360 __ret = __wait_event_timeout(wq_head, condition, timeout); \
361 __ret; \
362 })
363
364 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
365 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
366 TASK_INTERRUPTIBLE, 0, timeout, \
367 __ret = schedule_timeout(__ret); try_to_freeze())
368
369 /*
370 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
371 * increasing load and is freezable.
372 */
373 #define wait_event_freezable_timeout(wq_head, condition, timeout) \
374 ({ \
375 long __ret = timeout; \
376 might_sleep(); \
377 if (!___wait_cond_timeout(condition)) \
378 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
379 __ret; \
380 })
381
382 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
383 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
384 cmd1; schedule(); cmd2)
385 /*
386 * Just like wait_event_cmd(), except it sets exclusive flag
387 */
388 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
389 do { \
390 if (condition) \
391 break; \
392 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
393 } while (0)
394
395 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
396 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
397 cmd1; schedule(); cmd2)
398
399 /**
400 * wait_event_cmd - sleep until a condition gets true
401 * @wq_head: the waitqueue to wait on
402 * @condition: a C expression for the event to wait for
403 * @cmd1: the command will be executed before sleep
404 * @cmd2: the command will be executed after sleep
405 *
406 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
407 * @condition evaluates to true. The @condition is checked each time
408 * the waitqueue @wq_head is woken up.
409 *
410 * wake_up() has to be called after changing any variable that could
411 * change the result of the wait condition.
412 */
413 #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
414 do { \
415 if (condition) \
416 break; \
417 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
418 } while (0)
419
420 #define __wait_event_interruptible(wq_head, condition) \
421 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
422 schedule())
423
424 /**
425 * wait_event_interruptible - sleep until a condition gets true
426 * @wq_head: the waitqueue to wait on
427 * @condition: a C expression for the event to wait for
428 *
429 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
430 * @condition evaluates to true or a signal is received.
431 * The @condition is checked each time the waitqueue @wq_head is woken up.
432 *
433 * wake_up() has to be called after changing any variable that could
434 * change the result of the wait condition.
435 *
436 * The function will return -ERESTARTSYS if it was interrupted by a
437 * signal and 0 if @condition evaluated to true.
438 */
439 #define wait_event_interruptible(wq_head, condition) \
440 ({ \
441 int __ret = 0; \
442 might_sleep(); \
443 if (!(condition)) \
444 __ret = __wait_event_interruptible(wq_head, condition); \
445 __ret; \
446 })
447
448 #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
449 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
450 TASK_INTERRUPTIBLE, 0, timeout, \
451 __ret = schedule_timeout(__ret))
452
453 /**
454 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
455 * @wq_head: the waitqueue to wait on
456 * @condition: a C expression for the event to wait for
457 * @timeout: timeout, in jiffies
458 *
459 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
460 * @condition evaluates to true or a signal is received.
461 * The @condition is checked each time the waitqueue @wq_head is woken up.
462 *
463 * wake_up() has to be called after changing any variable that could
464 * change the result of the wait condition.
465 *
466 * Returns:
467 * 0 if the @condition evaluated to %false after the @timeout elapsed,
468 * 1 if the @condition evaluated to %true after the @timeout elapsed,
469 * the remaining jiffies (at least 1) if the @condition evaluated
470 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
471 * interrupted by a signal.
472 */
473 #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
474 ({ \
475 long __ret = timeout; \
476 might_sleep(); \
477 if (!___wait_cond_timeout(condition)) \
478 __ret = __wait_event_interruptible_timeout(wq_head, \
479 condition, timeout); \
480 __ret; \
481 })
482
483 #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
484 ({ \
485 int __ret = 0; \
486 struct hrtimer_sleeper __t; \
487 \
488 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
489 hrtimer_init_sleeper(&__t, current); \
490 if ((timeout) != KTIME_MAX) \
491 hrtimer_start_range_ns(&__t.timer, timeout, \
492 current->timer_slack_ns, \
493 HRTIMER_MODE_REL); \
494 \
495 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
496 if (!__t.task) { \
497 __ret = -ETIME; \
498 break; \
499 } \
500 schedule()); \
501 \
502 hrtimer_cancel(&__t.timer); \
503 destroy_hrtimer_on_stack(&__t.timer); \
504 __ret; \
505 })
506
507 /**
508 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
509 * @wq_head: the waitqueue to wait on
510 * @condition: a C expression for the event to wait for
511 * @timeout: timeout, as a ktime_t
512 *
513 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
514 * @condition evaluates to true or a signal is received.
515 * The @condition is checked each time the waitqueue @wq_head is woken up.
516 *
517 * wake_up() has to be called after changing any variable that could
518 * change the result of the wait condition.
519 *
520 * The function returns 0 if @condition became true, or -ETIME if the timeout
521 * elapsed.
522 */
523 #define wait_event_hrtimeout(wq_head, condition, timeout) \
524 ({ \
525 int __ret = 0; \
526 might_sleep(); \
527 if (!(condition)) \
528 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
529 TASK_UNINTERRUPTIBLE); \
530 __ret; \
531 })
532
533 /**
534 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
535 * @wq: the waitqueue to wait on
536 * @condition: a C expression for the event to wait for
537 * @timeout: timeout, as a ktime_t
538 *
539 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
540 * @condition evaluates to true or a signal is received.
541 * The @condition is checked each time the waitqueue @wq is woken up.
542 *
543 * wake_up() has to be called after changing any variable that could
544 * change the result of the wait condition.
545 *
546 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
547 * interrupted by a signal, or -ETIME if the timeout elapsed.
548 */
549 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
550 ({ \
551 long __ret = 0; \
552 might_sleep(); \
553 if (!(condition)) \
554 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
555 TASK_INTERRUPTIBLE); \
556 __ret; \
557 })
558
559 #define __wait_event_interruptible_exclusive(wq, condition) \
560 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
561 schedule())
562
563 #define wait_event_interruptible_exclusive(wq, condition) \
564 ({ \
565 int __ret = 0; \
566 might_sleep(); \
567 if (!(condition)) \
568 __ret = __wait_event_interruptible_exclusive(wq, condition); \
569 __ret; \
570 })
571
572 #define __wait_event_killable_exclusive(wq, condition) \
573 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
574 schedule())
575
576 #define wait_event_killable_exclusive(wq, condition) \
577 ({ \
578 int __ret = 0; \
579 might_sleep(); \
580 if (!(condition)) \
581 __ret = __wait_event_killable_exclusive(wq, condition); \
582 __ret; \
583 })
584
585
586 #define __wait_event_freezable_exclusive(wq, condition) \
587 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
588 schedule(); try_to_freeze())
589
590 #define wait_event_freezable_exclusive(wq, condition) \
591 ({ \
592 int __ret = 0; \
593 might_sleep(); \
594 if (!(condition)) \
595 __ret = __wait_event_freezable_exclusive(wq, condition); \
596 __ret; \
597 })
598
599 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
600 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
601
602 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
603 ({ \
604 int __ret; \
605 DEFINE_WAIT(__wait); \
606 if (exclusive) \
607 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
608 do { \
609 __ret = fn(&(wq), &__wait); \
610 if (__ret) \
611 break; \
612 } while (!(condition)); \
613 __remove_wait_queue(&(wq), &__wait); \
614 __set_current_state(TASK_RUNNING); \
615 __ret; \
616 })
617
618
619 /**
620 * wait_event_interruptible_locked - sleep until a condition gets true
621 * @wq: the waitqueue to wait on
622 * @condition: a C expression for the event to wait for
623 *
624 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
625 * @condition evaluates to true or a signal is received.
626 * The @condition is checked each time the waitqueue @wq is woken up.
627 *
628 * It must be called with wq.lock being held. This spinlock is
629 * unlocked while sleeping but @condition testing is done while lock
630 * is held and when this macro exits the lock is held.
631 *
632 * The lock is locked/unlocked using spin_lock()/spin_unlock()
633 * functions which must match the way they are locked/unlocked outside
634 * of this macro.
635 *
636 * wake_up_locked() has to be called after changing any variable that could
637 * change the result of the wait condition.
638 *
639 * The function will return -ERESTARTSYS if it was interrupted by a
640 * signal and 0 if @condition evaluated to true.
641 */
642 #define wait_event_interruptible_locked(wq, condition) \
643 ((condition) \
644 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
645
646 /**
647 * wait_event_interruptible_locked_irq - sleep until a condition gets true
648 * @wq: the waitqueue to wait on
649 * @condition: a C expression for the event to wait for
650 *
651 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
652 * @condition evaluates to true or a signal is received.
653 * The @condition is checked each time the waitqueue @wq is woken up.
654 *
655 * It must be called with wq.lock being held. This spinlock is
656 * unlocked while sleeping but @condition testing is done while lock
657 * is held and when this macro exits the lock is held.
658 *
659 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
660 * functions which must match the way they are locked/unlocked outside
661 * of this macro.
662 *
663 * wake_up_locked() has to be called after changing any variable that could
664 * change the result of the wait condition.
665 *
666 * The function will return -ERESTARTSYS if it was interrupted by a
667 * signal and 0 if @condition evaluated to true.
668 */
669 #define wait_event_interruptible_locked_irq(wq, condition) \
670 ((condition) \
671 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
672
673 /**
674 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
675 * @wq: the waitqueue to wait on
676 * @condition: a C expression for the event to wait for
677 *
678 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
679 * @condition evaluates to true or a signal is received.
680 * The @condition is checked each time the waitqueue @wq is woken up.
681 *
682 * It must be called with wq.lock being held. This spinlock is
683 * unlocked while sleeping but @condition testing is done while lock
684 * is held and when this macro exits the lock is held.
685 *
686 * The lock is locked/unlocked using spin_lock()/spin_unlock()
687 * functions which must match the way they are locked/unlocked outside
688 * of this macro.
689 *
690 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
691 * set thus when other process waits process on the list if this
692 * process is awaken further processes are not considered.
693 *
694 * wake_up_locked() has to be called after changing any variable that could
695 * change the result of the wait condition.
696 *
697 * The function will return -ERESTARTSYS if it was interrupted by a
698 * signal and 0 if @condition evaluated to true.
699 */
700 #define wait_event_interruptible_exclusive_locked(wq, condition) \
701 ((condition) \
702 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
703
704 /**
705 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
706 * @wq: the waitqueue to wait on
707 * @condition: a C expression for the event to wait for
708 *
709 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
710 * @condition evaluates to true or a signal is received.
711 * The @condition is checked each time the waitqueue @wq is woken up.
712 *
713 * It must be called with wq.lock being held. This spinlock is
714 * unlocked while sleeping but @condition testing is done while lock
715 * is held and when this macro exits the lock is held.
716 *
717 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
718 * functions which must match the way they are locked/unlocked outside
719 * of this macro.
720 *
721 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
722 * set thus when other process waits process on the list if this
723 * process is awaken further processes are not considered.
724 *
725 * wake_up_locked() has to be called after changing any variable that could
726 * change the result of the wait condition.
727 *
728 * The function will return -ERESTARTSYS if it was interrupted by a
729 * signal and 0 if @condition evaluated to true.
730 */
731 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
732 ((condition) \
733 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
734
735
736 #define __wait_event_killable(wq, condition) \
737 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
738
739 /**
740 * wait_event_killable - sleep until a condition gets true
741 * @wq_head: the waitqueue to wait on
742 * @condition: a C expression for the event to wait for
743 *
744 * The process is put to sleep (TASK_KILLABLE) until the
745 * @condition evaluates to true or a signal is received.
746 * The @condition is checked each time the waitqueue @wq_head is woken up.
747 *
748 * wake_up() has to be called after changing any variable that could
749 * change the result of the wait condition.
750 *
751 * The function will return -ERESTARTSYS if it was interrupted by a
752 * signal and 0 if @condition evaluated to true.
753 */
754 #define wait_event_killable(wq_head, condition) \
755 ({ \
756 int __ret = 0; \
757 might_sleep(); \
758 if (!(condition)) \
759 __ret = __wait_event_killable(wq_head, condition); \
760 __ret; \
761 })
762
763 #define __wait_event_killable_timeout(wq_head, condition, timeout) \
764 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
765 TASK_KILLABLE, 0, timeout, \
766 __ret = schedule_timeout(__ret))
767
768 /**
769 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
770 * @wq_head: the waitqueue to wait on
771 * @condition: a C expression for the event to wait for
772 * @timeout: timeout, in jiffies
773 *
774 * The process is put to sleep (TASK_KILLABLE) until the
775 * @condition evaluates to true or a kill signal is received.
776 * The @condition is checked each time the waitqueue @wq_head is woken up.
777 *
778 * wake_up() has to be called after changing any variable that could
779 * change the result of the wait condition.
780 *
781 * Returns:
782 * 0 if the @condition evaluated to %false after the @timeout elapsed,
783 * 1 if the @condition evaluated to %true after the @timeout elapsed,
784 * the remaining jiffies (at least 1) if the @condition evaluated
785 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
786 * interrupted by a kill signal.
787 *
788 * Only kill signals interrupt this process.
789 */
790 #define wait_event_killable_timeout(wq_head, condition, timeout) \
791 ({ \
792 long __ret = timeout; \
793 might_sleep(); \
794 if (!___wait_cond_timeout(condition)) \
795 __ret = __wait_event_killable_timeout(wq_head, \
796 condition, timeout); \
797 __ret; \
798 })
799
800
801 #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
802 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
803 spin_unlock_irq(&lock); \
804 cmd; \
805 schedule(); \
806 spin_lock_irq(&lock))
807
808 /**
809 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
810 * condition is checked under the lock. This
811 * is expected to be called with the lock
812 * taken.
813 * @wq_head: the waitqueue to wait on
814 * @condition: a C expression for the event to wait for
815 * @lock: a locked spinlock_t, which will be released before cmd
816 * and schedule() and reacquired afterwards.
817 * @cmd: a command which is invoked outside the critical section before
818 * sleep
819 *
820 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
821 * @condition evaluates to true. The @condition is checked each time
822 * the waitqueue @wq_head is woken up.
823 *
824 * wake_up() has to be called after changing any variable that could
825 * change the result of the wait condition.
826 *
827 * This is supposed to be called while holding the lock. The lock is
828 * dropped before invoking the cmd and going to sleep and is reacquired
829 * afterwards.
830 */
831 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
832 do { \
833 if (condition) \
834 break; \
835 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
836 } while (0)
837
838 /**
839 * wait_event_lock_irq - sleep until a condition gets true. The
840 * condition is checked under the lock. This
841 * is expected to be called with the lock
842 * taken.
843 * @wq_head: the waitqueue to wait on
844 * @condition: a C expression for the event to wait for
845 * @lock: a locked spinlock_t, which will be released before schedule()
846 * and reacquired afterwards.
847 *
848 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
849 * @condition evaluates to true. The @condition is checked each time
850 * the waitqueue @wq_head is woken up.
851 *
852 * wake_up() has to be called after changing any variable that could
853 * change the result of the wait condition.
854 *
855 * This is supposed to be called while holding the lock. The lock is
856 * dropped before going to sleep and is reacquired afterwards.
857 */
858 #define wait_event_lock_irq(wq_head, condition, lock) \
859 do { \
860 if (condition) \
861 break; \
862 __wait_event_lock_irq(wq_head, condition, lock, ); \
863 } while (0)
864
865
866 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
867 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
868 spin_unlock_irq(&lock); \
869 cmd; \
870 schedule(); \
871 spin_lock_irq(&lock))
872
873 /**
874 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
875 * The condition is checked under the lock. This is expected to
876 * be called with the lock taken.
877 * @wq_head: the waitqueue to wait on
878 * @condition: a C expression for the event to wait for
879 * @lock: a locked spinlock_t, which will be released before cmd and
880 * schedule() and reacquired afterwards.
881 * @cmd: a command which is invoked outside the critical section before
882 * sleep
883 *
884 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
885 * @condition evaluates to true or a signal is received. The @condition is
886 * checked each time the waitqueue @wq_head is woken up.
887 *
888 * wake_up() has to be called after changing any variable that could
889 * change the result of the wait condition.
890 *
891 * This is supposed to be called while holding the lock. The lock is
892 * dropped before invoking the cmd and going to sleep and is reacquired
893 * afterwards.
894 *
895 * The macro will return -ERESTARTSYS if it was interrupted by a signal
896 * and 0 if @condition evaluated to true.
897 */
898 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
899 ({ \
900 int __ret = 0; \
901 if (!(condition)) \
902 __ret = __wait_event_interruptible_lock_irq(wq_head, \
903 condition, lock, cmd); \
904 __ret; \
905 })
906
907 /**
908 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
909 * The condition is checked under the lock. This is expected
910 * to be called with the lock taken.
911 * @wq_head: the waitqueue to wait on
912 * @condition: a C expression for the event to wait for
913 * @lock: a locked spinlock_t, which will be released before schedule()
914 * and reacquired afterwards.
915 *
916 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
917 * @condition evaluates to true or signal is received. The @condition is
918 * checked each time the waitqueue @wq_head is woken up.
919 *
920 * wake_up() has to be called after changing any variable that could
921 * change the result of the wait condition.
922 *
923 * This is supposed to be called while holding the lock. The lock is
924 * dropped before going to sleep and is reacquired afterwards.
925 *
926 * The macro will return -ERESTARTSYS if it was interrupted by a signal
927 * and 0 if @condition evaluated to true.
928 */
929 #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
930 ({ \
931 int __ret = 0; \
932 if (!(condition)) \
933 __ret = __wait_event_interruptible_lock_irq(wq_head, \
934 condition, lock,); \
935 __ret; \
936 })
937
938 #define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
939 lock, timeout) \
940 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
941 TASK_INTERRUPTIBLE, 0, timeout, \
942 spin_unlock_irq(&lock); \
943 __ret = schedule_timeout(__ret); \
944 spin_lock_irq(&lock));
945
946 /**
947 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
948 * true or a timeout elapses. The condition is checked under
949 * the lock. This is expected to be called with the lock taken.
950 * @wq_head: the waitqueue to wait on
951 * @condition: a C expression for the event to wait for
952 * @lock: a locked spinlock_t, which will be released before schedule()
953 * and reacquired afterwards.
954 * @timeout: timeout, in jiffies
955 *
956 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
957 * @condition evaluates to true or signal is received. The @condition is
958 * checked each time the waitqueue @wq_head is woken up.
959 *
960 * wake_up() has to be called after changing any variable that could
961 * change the result of the wait condition.
962 *
963 * This is supposed to be called while holding the lock. The lock is
964 * dropped before going to sleep and is reacquired afterwards.
965 *
966 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
967 * was interrupted by a signal, and the remaining jiffies otherwise
968 * if the condition evaluated to true before the timeout elapsed.
969 */
970 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
971 timeout) \
972 ({ \
973 long __ret = timeout; \
974 if (!___wait_cond_timeout(condition)) \
975 __ret = __wait_event_interruptible_lock_irq_timeout( \
976 wq_head, condition, lock, timeout); \
977 __ret; \
978 })
979
980 /*
981 * Waitqueues which are removed from the waitqueue_head at wakeup time
982 */
983 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
984 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
985 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
986 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
987 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
988 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
989 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
990
991 #define DEFINE_WAIT_FUNC(name, function) \
992 struct wait_queue_entry name = { \
993 .private = current, \
994 .func = function, \
995 .entry = LIST_HEAD_INIT((name).entry), \
996 }
997
998 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
999
1000 #define init_wait(wait) \
1001 do { \
1002 (wait)->private = current; \
1003 (wait)->func = autoremove_wake_function; \
1004 INIT_LIST_HEAD(&(wait)->entry); \
1005 (wait)->flags = 0; \
1006 } while (0)
1007
1008 #endif /* _LINUX_WAIT_H */