]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
13b35686 PZI |
2 | #ifndef _LINUX_SWAIT_H |
3 | #define _LINUX_SWAIT_H | |
4 | ||
5 | #include <linux/list.h> | |
6 | #include <linux/stddef.h> | |
7 | #include <linux/spinlock.h> | |
8 | #include <asm/current.h> | |
9 | ||
10 | /* | |
11 | * Simple wait queues | |
12 | * | |
13 | * While these are very similar to the other/complex wait queues (wait.h) the | |
14 | * most important difference is that the simple waitqueue allows for | |
15 | * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold | |
16 | * times. | |
17 | * | |
18 | * In order to make this so, we had to drop a fair number of features of the | |
19 | * other waitqueue code; notably: | |
20 | * | |
21 | * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; | |
22 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right | |
23 | * sleeper state. | |
24 | * | |
25 | * - the exclusive mode; because this requires preserving the list order | |
26 | * and this is hard. | |
27 | * | |
28 | * - custom wake functions; because you cannot give any guarantees about | |
29 | * random code. | |
30 | * | |
31 | * As a side effect of this; the data structures are slimmer. | |
32 | * | |
33 | * One would recommend using this wait queue where possible. | |
34 | */ | |
35 | ||
36 | struct task_struct; | |
37 | ||
38 | struct swait_queue_head { | |
39 | raw_spinlock_t lock; | |
40 | struct list_head task_list; | |
41 | }; | |
42 | ||
43 | struct swait_queue { | |
44 | struct task_struct *task; | |
45 | struct list_head task_list; | |
46 | }; | |
47 | ||
48 | #define __SWAITQUEUE_INITIALIZER(name) { \ | |
49 | .task = current, \ | |
50 | .task_list = LIST_HEAD_INIT((name).task_list), \ | |
51 | } | |
52 | ||
53 | #define DECLARE_SWAITQUEUE(name) \ | |
54 | struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) | |
55 | ||
56 | #define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ | |
57 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ | |
58 | .task_list = LIST_HEAD_INIT((name).task_list), \ | |
59 | } | |
60 | ||
61 | #define DECLARE_SWAIT_QUEUE_HEAD(name) \ | |
62 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) | |
63 | ||
64 | extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, | |
65 | struct lock_class_key *key); | |
66 | ||
67 | #define init_swait_queue_head(q) \ | |
68 | do { \ | |
69 | static struct lock_class_key __key; \ | |
70 | __init_swait_queue_head((q), #q, &__key); \ | |
71 | } while (0) | |
72 | ||
73 | #ifdef CONFIG_LOCKDEP | |
74 | # define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ | |
75 | ({ init_swait_queue_head(&name); name; }) | |
76 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ | |
77 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) | |
78 | #else | |
79 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ | |
80 | DECLARE_SWAIT_QUEUE_HEAD(name) | |
81 | #endif | |
82 | ||
8cd641e3 DB |
83 | /** |
84 | * swait_active -- locklessly test for waiters on the queue | |
85 | * @wq: the waitqueue to test for waiters | |
86 | * | |
87 | * returns true if the wait list is not empty | |
88 | * | |
89 | * NOTE: this function is lockless and requires care, incorrect usage _will_ | |
90 | * lead to sporadic and non-obvious failure. | |
91 | * | |
92 | * NOTE2: this function has the same above implications as regular waitqueues. | |
93 | * | |
94 | * Use either while holding swait_queue_head::lock or when used for wakeups | |
95 | * with an extra smp_mb() like: | |
96 | * | |
97 | * CPU0 - waker CPU1 - waiter | |
98 | * | |
99 | * for (;;) { | |
100 | * @cond = true; prepare_to_swait(&wq_head, &wait, state); | |
101 | * smp_mb(); // smp_mb() from set_current_state() | |
102 | * if (swait_active(wq_head)) if (@cond) | |
103 | * wake_up(wq_head); break; | |
104 | * schedule(); | |
105 | * } | |
106 | * finish_swait(&wq_head, &wait); | |
107 | * | |
108 | * Because without the explicit smp_mb() it's possible for the | |
109 | * swait_active() load to get hoisted over the @cond store such that we'll | |
110 | * observe an empty wait list while the waiter might not observe @cond. | |
111 | * This, in turn, can trigger missing wakeups. | |
112 | * | |
113 | * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), | |
114 | * which (when the lock is uncontended) are of roughly equal cost. | |
115 | */ | |
116 | static inline int swait_active(struct swait_queue_head *wq) | |
117 | { | |
118 | return !list_empty(&wq->task_list); | |
119 | } | |
120 | ||
121 | /** | |
122 | * swq_has_sleeper - check if there are any waiting processes | |
123 | * @wq: the waitqueue to test for waiters | |
124 | * | |
125 | * Returns true if @wq has waiting processes | |
126 | * | |
127 | * Please refer to the comment for swait_active. | |
128 | */ | |
129 | static inline bool swq_has_sleeper(struct swait_queue_head *wq) | |
13b35686 | 130 | { |
8cd641e3 DB |
131 | /* |
132 | * We need to be sure we are in sync with the list_add() | |
133 | * modifications to the wait queue (task_list). | |
134 | * | |
135 | * This memory barrier should be paired with one on the | |
136 | * waiting side. | |
137 | */ | |
138 | smp_mb(); | |
139 | return swait_active(wq); | |
13b35686 PZI |
140 | } |
141 | ||
142 | extern void swake_up(struct swait_queue_head *q); | |
143 | extern void swake_up_all(struct swait_queue_head *q); | |
144 | extern void swake_up_locked(struct swait_queue_head *q); | |
145 | ||
146 | extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); | |
147 | extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); | |
148 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); | |
149 | ||
150 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | |
151 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | |
152 | ||
153 | /* as per ___wait_event() but for swait, therefore "exclusive == 0" */ | |
154 | #define ___swait_event(wq, condition, state, ret, cmd) \ | |
155 | ({ \ | |
156 | struct swait_queue __wait; \ | |
157 | long __ret = ret; \ | |
158 | \ | |
159 | INIT_LIST_HEAD(&__wait.task_list); \ | |
160 | for (;;) { \ | |
161 | long __int = prepare_to_swait_event(&wq, &__wait, state);\ | |
162 | \ | |
163 | if (condition) \ | |
164 | break; \ | |
165 | \ | |
166 | if (___wait_is_interruptible(state) && __int) { \ | |
167 | __ret = __int; \ | |
168 | break; \ | |
169 | } \ | |
170 | \ | |
171 | cmd; \ | |
172 | } \ | |
173 | finish_swait(&wq, &__wait); \ | |
174 | __ret; \ | |
175 | }) | |
176 | ||
177 | #define __swait_event(wq, condition) \ | |
178 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | |
179 | schedule()) | |
180 | ||
181 | #define swait_event(wq, condition) \ | |
182 | do { \ | |
183 | if (condition) \ | |
184 | break; \ | |
185 | __swait_event(wq, condition); \ | |
186 | } while (0) | |
187 | ||
188 | #define __swait_event_timeout(wq, condition, timeout) \ | |
189 | ___swait_event(wq, ___wait_cond_timeout(condition), \ | |
190 | TASK_UNINTERRUPTIBLE, timeout, \ | |
191 | __ret = schedule_timeout(__ret)) | |
192 | ||
193 | #define swait_event_timeout(wq, condition, timeout) \ | |
194 | ({ \ | |
195 | long __ret = timeout; \ | |
196 | if (!___wait_cond_timeout(condition)) \ | |
197 | __ret = __swait_event_timeout(wq, condition, timeout); \ | |
198 | __ret; \ | |
199 | }) | |
200 | ||
201 | #define __swait_event_interruptible(wq, condition) \ | |
202 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ | |
203 | schedule()) | |
204 | ||
205 | #define swait_event_interruptible(wq, condition) \ | |
206 | ({ \ | |
207 | int __ret = 0; \ | |
208 | if (!(condition)) \ | |
209 | __ret = __swait_event_interruptible(wq, condition); \ | |
210 | __ret; \ | |
211 | }) | |
212 | ||
213 | #define __swait_event_interruptible_timeout(wq, condition, timeout) \ | |
214 | ___swait_event(wq, ___wait_cond_timeout(condition), \ | |
215 | TASK_INTERRUPTIBLE, timeout, \ | |
216 | __ret = schedule_timeout(__ret)) | |
217 | ||
218 | #define swait_event_interruptible_timeout(wq, condition, timeout) \ | |
219 | ({ \ | |
220 | long __ret = timeout; \ | |
221 | if (!___wait_cond_timeout(condition)) \ | |
222 | __ret = __swait_event_interruptible_timeout(wq, \ | |
223 | condition, timeout); \ | |
224 | __ret; \ | |
225 | }) | |
226 | ||
352eee12 LR |
227 | #define __swait_event_idle(wq, condition) \ |
228 | (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) | |
229 | ||
230 | /** | |
231 | * swait_event_idle - wait without system load contribution | |
232 | * @wq: the waitqueue to wait on | |
233 | * @condition: a C expression for the event to wait for | |
234 | * | |
235 | * The process is put to sleep (TASK_IDLE) until the @condition evaluates to | |
236 | * true. The @condition is checked each time the waitqueue @wq is woken up. | |
237 | * | |
238 | * This function is mostly used when a kthread or workqueue waits for some | |
239 | * condition and doesn't want to contribute to system load. Signals are | |
240 | * ignored. | |
241 | */ | |
242 | #define swait_event_idle(wq, condition) \ | |
243 | do { \ | |
244 | if (condition) \ | |
245 | break; \ | |
246 | __swait_event_idle(wq, condition); \ | |
247 | } while (0) | |
248 | ||
249 | #define __swait_event_idle_timeout(wq, condition, timeout) \ | |
250 | ___swait_event(wq, ___wait_cond_timeout(condition), \ | |
251 | TASK_IDLE, timeout, \ | |
252 | __ret = schedule_timeout(__ret)) | |
253 | ||
254 | /** | |
255 | * swait_event_idle_timeout - wait up to timeout without load contribution | |
256 | * @wq: the waitqueue to wait on | |
257 | * @condition: a C expression for the event to wait for | |
258 | * @timeout: timeout at which we'll give up in jiffies | |
259 | * | |
260 | * The process is put to sleep (TASK_IDLE) until the @condition evaluates to | |
261 | * true. The @condition is checked each time the waitqueue @wq is woken up. | |
262 | * | |
263 | * This function is mostly used when a kthread or workqueue waits for some | |
264 | * condition and doesn't want to contribute to system load. Signals are | |
265 | * ignored. | |
266 | * | |
267 | * Returns: | |
268 | * 0 if the @condition evaluated to %false after the @timeout elapsed, | |
269 | * 1 if the @condition evaluated to %true after the @timeout elapsed, | |
270 | * or the remaining jiffies (at least 1) if the @condition evaluated | |
271 | * to %true before the @timeout elapsed. | |
272 | */ | |
273 | #define swait_event_idle_timeout(wq, condition, timeout) \ | |
274 | ({ \ | |
275 | long __ret = timeout; \ | |
276 | if (!___wait_cond_timeout(condition)) \ | |
277 | __ret = __swait_event_idle_timeout(wq, \ | |
278 | condition, timeout); \ | |
279 | __ret; \ | |
280 | }) | |
281 | ||
13b35686 | 282 | #endif /* _LINUX_SWAIT_H */ |