]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/swait.h
seccomp: Move speculation migitation control to arch code
[mirror_ubuntu-artful-kernel.git] / include / linux / swait.h
CommitLineData
13b35686
PZI
1#ifndef _LINUX_SWAIT_H
2#define _LINUX_SWAIT_H
3
4#include <linux/list.h>
5#include <linux/stddef.h>
6#include <linux/spinlock.h>
7#include <asm/current.h>
8
9/*
10 * Simple wait queues
11 *
12 * While these are very similar to the other/complex wait queues (wait.h) the
13 * most important difference is that the simple waitqueue allows for
14 * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
15 * times.
16 *
17 * In order to make this so, we had to drop a fair number of features of the
18 * other waitqueue code; notably:
19 *
20 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
21 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
22 * sleeper state.
23 *
24 * - the exclusive mode; because this requires preserving the list order
25 * and this is hard.
26 *
27 * - custom wake functions; because you cannot give any guarantees about
28 * random code.
29 *
30 * As a side effect of this; the data structures are slimmer.
31 *
32 * One would recommend using this wait queue where possible.
33 */
34
35struct task_struct;
36
37struct swait_queue_head {
38 raw_spinlock_t lock;
39 struct list_head task_list;
40};
41
42struct swait_queue {
43 struct task_struct *task;
44 struct list_head task_list;
45};
46
47#define __SWAITQUEUE_INITIALIZER(name) { \
48 .task = current, \
49 .task_list = LIST_HEAD_INIT((name).task_list), \
50}
51
52#define DECLARE_SWAITQUEUE(name) \
53 struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
54
55#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
56 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
57 .task_list = LIST_HEAD_INIT((name).task_list), \
58}
59
60#define DECLARE_SWAIT_QUEUE_HEAD(name) \
61 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
62
63extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
64 struct lock_class_key *key);
65
66#define init_swait_queue_head(q) \
67 do { \
68 static struct lock_class_key __key; \
69 __init_swait_queue_head((q), #q, &__key); \
70 } while (0)
71
72#ifdef CONFIG_LOCKDEP
73# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 ({ init_swait_queue_head(&name); name; })
75# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
76 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77#else
78# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
79 DECLARE_SWAIT_QUEUE_HEAD(name)
80#endif
81
82static inline int swait_active(struct swait_queue_head *q)
83{
84 return !list_empty(&q->task_list);
85}
86
87extern void swake_up(struct swait_queue_head *q);
88extern void swake_up_all(struct swait_queue_head *q);
89extern void swake_up_locked(struct swait_queue_head *q);
90
91extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
92extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
93extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
94
95extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
96extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
97
98/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
99#define ___swait_event(wq, condition, state, ret, cmd) \
100({ \
101 struct swait_queue __wait; \
102 long __ret = ret; \
103 \
104 INIT_LIST_HEAD(&__wait.task_list); \
105 for (;;) { \
106 long __int = prepare_to_swait_event(&wq, &__wait, state);\
107 \
108 if (condition) \
109 break; \
110 \
111 if (___wait_is_interruptible(state) && __int) { \
112 __ret = __int; \
113 break; \
114 } \
115 \
116 cmd; \
117 } \
118 finish_swait(&wq, &__wait); \
119 __ret; \
120})
121
122#define __swait_event(wq, condition) \
123 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
124 schedule())
125
126#define swait_event(wq, condition) \
127do { \
128 if (condition) \
129 break; \
130 __swait_event(wq, condition); \
131} while (0)
132
133#define __swait_event_timeout(wq, condition, timeout) \
134 ___swait_event(wq, ___wait_cond_timeout(condition), \
135 TASK_UNINTERRUPTIBLE, timeout, \
136 __ret = schedule_timeout(__ret))
137
138#define swait_event_timeout(wq, condition, timeout) \
139({ \
140 long __ret = timeout; \
141 if (!___wait_cond_timeout(condition)) \
142 __ret = __swait_event_timeout(wq, condition, timeout); \
143 __ret; \
144})
145
146#define __swait_event_interruptible(wq, condition) \
147 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
148 schedule())
149
150#define swait_event_interruptible(wq, condition) \
151({ \
152 int __ret = 0; \
153 if (!(condition)) \
154 __ret = __swait_event_interruptible(wq, condition); \
155 __ret; \
156})
157
158#define __swait_event_interruptible_timeout(wq, condition, timeout) \
159 ___swait_event(wq, ___wait_cond_timeout(condition), \
160 TASK_INTERRUPTIBLE, timeout, \
161 __ret = schedule_timeout(__ret))
162
163#define swait_event_interruptible_timeout(wq, condition, timeout) \
164({ \
165 long __ret = timeout; \
166 if (!___wait_cond_timeout(condition)) \
167 __ret = __swait_event_interruptible_timeout(wq, \
168 condition, timeout); \
169 __ret; \
170})
171
172#endif /* _LINUX_SWAIT_H */