]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic waiting primitives. | |
3 | * | |
6d49e352 | 4 | * (C) 2004 Nadia Yvette Chambers, Oracle |
1da177e4 | 5 | */ |
1da177e4 | 6 | #include <linux/init.h> |
9984de1a | 7 | #include <linux/export.h> |
174cd4b1 | 8 | #include <linux/sched/signal.h> |
b17b0153 | 9 | #include <linux/sched/debug.h> |
1da177e4 LT |
10 | #include <linux/mm.h> |
11 | #include <linux/wait.h> | |
12 | #include <linux/hash.h> | |
cb6538e7 | 13 | #include <linux/kthread.h> |
1da177e4 | 14 | |
9d9d676f | 15 | void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) |
21d71f51 | 16 | { |
9d9d676f IM |
17 | spin_lock_init(&wq_head->lock); |
18 | lockdep_set_class_and_name(&wq_head->lock, key, name); | |
2055da97 | 19 | INIT_LIST_HEAD(&wq_head->head); |
21d71f51 | 20 | } |
eb4542b9 | 21 | |
2fc39111 | 22 | EXPORT_SYMBOL(__init_waitqueue_head); |
eb4542b9 | 23 | |
9d9d676f | 24 | void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
25 | { |
26 | unsigned long flags; | |
27 | ||
50816c48 | 28 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
9d9d676f IM |
29 | spin_lock_irqsave(&wq_head->lock, flags); |
30 | __add_wait_queue_entry_tail(wq_head, wq_entry); | |
31 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
1da177e4 LT |
32 | } |
33 | EXPORT_SYMBOL(add_wait_queue); | |
34 | ||
9d9d676f | 35 | void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
36 | { |
37 | unsigned long flags; | |
38 | ||
50816c48 | 39 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
9d9d676f IM |
40 | spin_lock_irqsave(&wq_head->lock, flags); |
41 | __add_wait_queue_entry_tail(wq_head, wq_entry); | |
42 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
1da177e4 LT |
43 | } |
44 | EXPORT_SYMBOL(add_wait_queue_exclusive); | |
45 | ||
9d9d676f | 46 | void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
47 | { |
48 | unsigned long flags; | |
49 | ||
9d9d676f IM |
50 | spin_lock_irqsave(&wq_head->lock, flags); |
51 | __remove_wait_queue(wq_head, wq_entry); | |
52 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
1da177e4 LT |
53 | } |
54 | EXPORT_SYMBOL(remove_wait_queue); | |
55 | ||
2554db91 TC |
56 | /* |
57 | * Scan threshold to break wait queue walk. | |
58 | * This allows a waker to take a break from holding the | |
59 | * wait queue lock during the wait queue walk. | |
60 | */ | |
61 | #define WAITQUEUE_WALK_BREAK_CNT 64 | |
1da177e4 | 62 | |
b4145872 PZ |
63 | /* |
64 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just | |
65 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve | |
66 | * number) then we wake all the non-exclusive tasks and one exclusive task. | |
67 | * | |
68 | * There are circumstances in which we can try to wake a task which has already | |
69 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | |
70 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | |
71 | */ | |
2554db91 TC |
72 | static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, |
73 | int nr_exclusive, int wake_flags, void *key, | |
74 | wait_queue_entry_t *bookmark) | |
b4145872 | 75 | { |
ac6424b9 | 76 | wait_queue_entry_t *curr, *next; |
2554db91 TC |
77 | int cnt = 0; |
78 | ||
79 | if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { | |
80 | curr = list_next_entry(bookmark, entry); | |
b4145872 | 81 | |
2554db91 TC |
82 | list_del(&bookmark->entry); |
83 | bookmark->flags = 0; | |
84 | } else | |
85 | curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); | |
86 | ||
87 | if (&curr->entry == &wq_head->head) | |
88 | return nr_exclusive; | |
89 | ||
90 | list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { | |
b4145872 | 91 | unsigned flags = curr->flags; |
2554db91 TC |
92 | int ret; |
93 | ||
94 | if (flags & WQ_FLAG_BOOKMARK) | |
95 | continue; | |
96 | ||
97 | ret = curr->func(curr, mode, wake_flags, key); | |
3510ca20 LT |
98 | if (ret < 0) |
99 | break; | |
100 | if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) | |
b4145872 | 101 | break; |
2554db91 TC |
102 | |
103 | if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && | |
104 | (&next->entry != &wq_head->head)) { | |
105 | bookmark->flags = WQ_FLAG_BOOKMARK; | |
106 | list_add_tail(&bookmark->entry, &next->entry); | |
107 | break; | |
108 | } | |
109 | } | |
110 | return nr_exclusive; | |
111 | } | |
112 | ||
113 | static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode, | |
114 | int nr_exclusive, int wake_flags, void *key) | |
115 | { | |
116 | unsigned long flags; | |
117 | wait_queue_entry_t bookmark; | |
118 | ||
119 | bookmark.flags = 0; | |
120 | bookmark.private = NULL; | |
121 | bookmark.func = NULL; | |
122 | INIT_LIST_HEAD(&bookmark.entry); | |
123 | ||
124 | spin_lock_irqsave(&wq_head->lock, flags); | |
125 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark); | |
126 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
127 | ||
128 | while (bookmark.flags & WQ_FLAG_BOOKMARK) { | |
129 | spin_lock_irqsave(&wq_head->lock, flags); | |
130 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, | |
131 | wake_flags, key, &bookmark); | |
132 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
b4145872 PZ |
133 | } |
134 | } | |
135 | ||
136 | /** | |
137 | * __wake_up - wake up threads blocked on a waitqueue. | |
9d9d676f | 138 | * @wq_head: the waitqueue |
b4145872 PZ |
139 | * @mode: which threads |
140 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | |
141 | * @key: is directly passed to the wakeup function | |
142 | * | |
143 | * It may be assumed that this function implies a write memory barrier before | |
144 | * changing the task state if and only if any tasks are woken up. | |
145 | */ | |
9d9d676f | 146 | void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, |
b4145872 PZ |
147 | int nr_exclusive, void *key) |
148 | { | |
2554db91 | 149 | __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); |
b4145872 PZ |
150 | } |
151 | EXPORT_SYMBOL(__wake_up); | |
152 | ||
153 | /* | |
154 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. | |
155 | */ | |
9d9d676f | 156 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) |
b4145872 | 157 | { |
2554db91 | 158 | __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); |
b4145872 PZ |
159 | } |
160 | EXPORT_SYMBOL_GPL(__wake_up_locked); | |
161 | ||
9d9d676f | 162 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) |
b4145872 | 163 | { |
2554db91 | 164 | __wake_up_common(wq_head, mode, 1, 0, key, NULL); |
b4145872 PZ |
165 | } |
166 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | |
167 | ||
11a19c7b TC |
168 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
169 | unsigned int mode, void *key, wait_queue_entry_t *bookmark) | |
170 | { | |
171 | __wake_up_common(wq_head, mode, 1, 0, key, bookmark); | |
172 | } | |
173 | EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); | |
174 | ||
b4145872 PZ |
175 | /** |
176 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | |
9d9d676f | 177 | * @wq_head: the waitqueue |
b4145872 PZ |
178 | * @mode: which threads |
179 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | |
180 | * @key: opaque value to be passed to wakeup targets | |
181 | * | |
182 | * The sync wakeup differs that the waker knows that it will schedule | |
183 | * away soon, so while the target thread will be woken up, it will not | |
184 | * be migrated to another CPU - ie. the two threads are 'synchronized' | |
185 | * with each other. This can prevent needless bouncing between CPUs. | |
186 | * | |
187 | * On UP it can prevent extra preemption. | |
188 | * | |
189 | * It may be assumed that this function implies a write memory barrier before | |
190 | * changing the task state if and only if any tasks are woken up. | |
191 | */ | |
9d9d676f | 192 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, |
b4145872 PZ |
193 | int nr_exclusive, void *key) |
194 | { | |
b4145872 PZ |
195 | int wake_flags = 1; /* XXX WF_SYNC */ |
196 | ||
9d9d676f | 197 | if (unlikely(!wq_head)) |
b4145872 PZ |
198 | return; |
199 | ||
200 | if (unlikely(nr_exclusive != 1)) | |
201 | wake_flags = 0; | |
202 | ||
2554db91 | 203 | __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key); |
b4145872 PZ |
204 | } |
205 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | |
206 | ||
207 | /* | |
208 | * __wake_up_sync - see __wake_up_sync_key() | |
209 | */ | |
9d9d676f | 210 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive) |
b4145872 | 211 | { |
9d9d676f | 212 | __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL); |
b4145872 PZ |
213 | } |
214 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | |
215 | ||
1da177e4 LT |
216 | /* |
217 | * Note: we use "set_current_state()" _after_ the wait-queue add, | |
218 | * because we need a memory barrier there on SMP, so that any | |
219 | * wake-function that tests for the wait-queue being active | |
220 | * will be guaranteed to see waitqueue addition _or_ subsequent | |
221 | * tests in this thread will see the wakeup having taken place. | |
222 | * | |
223 | * The spin_unlock() itself is semi-permeable and only protects | |
224 | * one way (it only protects stuff inside the critical region and | |
225 | * stops them from bleeding out - it would still allow subsequent | |
59c51591 | 226 | * loads to move into the critical region). |
1da177e4 | 227 | */ |
7ad5b3a5 | 228 | void |
9d9d676f | 229 | prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
1da177e4 LT |
230 | { |
231 | unsigned long flags; | |
232 | ||
50816c48 | 233 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
9d9d676f | 234 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 235 | if (list_empty(&wq_entry->entry)) |
9d9d676f | 236 | __add_wait_queue(wq_head, wq_entry); |
a25d644f | 237 | set_current_state(state); |
9d9d676f | 238 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
239 | } |
240 | EXPORT_SYMBOL(prepare_to_wait); | |
241 | ||
7ad5b3a5 | 242 | void |
9d9d676f | 243 | prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
1da177e4 LT |
244 | { |
245 | unsigned long flags; | |
246 | ||
50816c48 | 247 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
9d9d676f | 248 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 249 | if (list_empty(&wq_entry->entry)) |
9d9d676f | 250 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
a25d644f | 251 | set_current_state(state); |
9d9d676f | 252 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
253 | } |
254 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | |
255 | ||
50816c48 | 256 | void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) |
0176beaf | 257 | { |
50816c48 IM |
258 | wq_entry->flags = flags; |
259 | wq_entry->private = current; | |
260 | wq_entry->func = autoremove_wake_function; | |
2055da97 | 261 | INIT_LIST_HEAD(&wq_entry->entry); |
0176beaf ON |
262 | } |
263 | EXPORT_SYMBOL(init_wait_entry); | |
264 | ||
9d9d676f | 265 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
c2d81644 ON |
266 | { |
267 | unsigned long flags; | |
b1ea06a9 | 268 | long ret = 0; |
c2d81644 | 269 | |
9d9d676f | 270 | spin_lock_irqsave(&wq_head->lock, flags); |
b1ea06a9 ON |
271 | if (unlikely(signal_pending_state(state, current))) { |
272 | /* | |
273 | * Exclusive waiter must not fail if it was selected by wakeup, | |
274 | * it should "consume" the condition we were waiting for. | |
275 | * | |
276 | * The caller will recheck the condition and return success if | |
277 | * we were already woken up, we can not miss the event because | |
9d9d676f | 278 | * wakeup locks/unlocks the same wq_head->lock. |
b1ea06a9 ON |
279 | * |
280 | * But we need to ensure that set-condition + wakeup after that | |
281 | * can't see us, it should wake up another exclusive waiter if | |
282 | * we fail. | |
283 | */ | |
2055da97 | 284 | list_del_init(&wq_entry->entry); |
b1ea06a9 ON |
285 | ret = -ERESTARTSYS; |
286 | } else { | |
2055da97 | 287 | if (list_empty(&wq_entry->entry)) { |
50816c48 | 288 | if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) |
9d9d676f | 289 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
b1ea06a9 | 290 | else |
9d9d676f | 291 | __add_wait_queue(wq_head, wq_entry); |
b1ea06a9 ON |
292 | } |
293 | set_current_state(state); | |
c2d81644 | 294 | } |
9d9d676f | 295 | spin_unlock_irqrestore(&wq_head->lock, flags); |
c2d81644 | 296 | |
b1ea06a9 | 297 | return ret; |
c2d81644 ON |
298 | } |
299 | EXPORT_SYMBOL(prepare_to_wait_event); | |
300 | ||
bd0f9b35 LT |
301 | /* |
302 | * Note! These two wait functions are entered with the | |
303 | * wait-queue lock held (and interrupts off in the _irq | |
304 | * case), so there is no race with testing the wakeup | |
305 | * condition in the caller before they add the wait | |
306 | * entry to the wake queue. | |
307 | */ | |
ac6424b9 | 308 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
bd0f9b35 | 309 | { |
2055da97 | 310 | if (likely(list_empty(&wait->entry))) |
ac6424b9 | 311 | __add_wait_queue_entry_tail(wq, wait); |
bd0f9b35 LT |
312 | |
313 | set_current_state(TASK_INTERRUPTIBLE); | |
314 | if (signal_pending(current)) | |
315 | return -ERESTARTSYS; | |
316 | ||
317 | spin_unlock(&wq->lock); | |
318 | schedule(); | |
319 | spin_lock(&wq->lock); | |
320 | return 0; | |
321 | } | |
322 | EXPORT_SYMBOL(do_wait_intr); | |
323 | ||
ac6424b9 | 324 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
bd0f9b35 | 325 | { |
2055da97 | 326 | if (likely(list_empty(&wait->entry))) |
ac6424b9 | 327 | __add_wait_queue_entry_tail(wq, wait); |
bd0f9b35 LT |
328 | |
329 | set_current_state(TASK_INTERRUPTIBLE); | |
330 | if (signal_pending(current)) | |
331 | return -ERESTARTSYS; | |
332 | ||
333 | spin_unlock_irq(&wq->lock); | |
334 | schedule(); | |
335 | spin_lock_irq(&wq->lock); | |
336 | return 0; | |
337 | } | |
338 | EXPORT_SYMBOL(do_wait_intr_irq); | |
339 | ||
ee2f154a | 340 | /** |
777c6c5f | 341 | * finish_wait - clean up after waiting in a queue |
9d9d676f | 342 | * @wq_head: waitqueue waited on |
50816c48 | 343 | * @wq_entry: wait descriptor |
777c6c5f JW |
344 | * |
345 | * Sets current thread back to running state and removes | |
346 | * the wait descriptor from the given waitqueue if still | |
347 | * queued. | |
348 | */ | |
9d9d676f | 349 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
350 | { |
351 | unsigned long flags; | |
352 | ||
353 | __set_current_state(TASK_RUNNING); | |
354 | /* | |
355 | * We can check for list emptiness outside the lock | |
356 | * IFF: | |
357 | * - we use the "careful" check that verifies both | |
358 | * the next and prev pointers, so that there cannot | |
359 | * be any half-pending updates in progress on other | |
360 | * CPU's that we haven't seen yet (and that might | |
361 | * still change the stack area. | |
362 | * and | |
363 | * - all other users take the lock (ie we can only | |
364 | * have _one_ other CPU that looks at or modifies | |
365 | * the list). | |
366 | */ | |
2055da97 | 367 | if (!list_empty_careful(&wq_entry->entry)) { |
9d9d676f | 368 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 369 | list_del_init(&wq_entry->entry); |
9d9d676f | 370 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
371 | } |
372 | } | |
373 | EXPORT_SYMBOL(finish_wait); | |
374 | ||
50816c48 | 375 | int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
1da177e4 | 376 | { |
50816c48 | 377 | int ret = default_wake_function(wq_entry, mode, sync, key); |
1da177e4 LT |
378 | |
379 | if (ret) | |
2055da97 | 380 | list_del_init(&wq_entry->entry); |
1da177e4 LT |
381 | return ret; |
382 | } | |
383 | EXPORT_SYMBOL(autoremove_wake_function); | |
384 | ||
cb6538e7 PZ |
385 | static inline bool is_kthread_should_stop(void) |
386 | { | |
387 | return (current->flags & PF_KTHREAD) && kthread_should_stop(); | |
388 | } | |
61ada528 PZ |
389 | |
390 | /* | |
391 | * DEFINE_WAIT_FUNC(wait, woken_wake_func); | |
392 | * | |
9d9d676f | 393 | * add_wait_queue(&wq_head, &wait); |
61ada528 PZ |
394 | * for (;;) { |
395 | * if (condition) | |
396 | * break; | |
397 | * | |
398 | * p->state = mode; condition = true; | |
399 | * smp_mb(); // A smp_wmb(); // C | |
50816c48 | 400 | * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN; |
61ada528 PZ |
401 | * schedule() try_to_wake_up(); |
402 | * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ | |
50816c48 | 403 | * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true; |
61ada528 | 404 | * smp_mb() // B smp_wmb(); // C |
50816c48 | 405 | * wq_entry->flags |= WQ_FLAG_WOKEN; |
61ada528 | 406 | * } |
9d9d676f | 407 | * remove_wait_queue(&wq_head, &wait); |
61ada528 PZ |
408 | * |
409 | */ | |
50816c48 | 410 | long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) |
61ada528 PZ |
411 | { |
412 | set_current_state(mode); /* A */ | |
413 | /* | |
414 | * The above implies an smp_mb(), which matches with the smp_wmb() from | |
415 | * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must | |
416 | * also observe all state before the wakeup. | |
417 | */ | |
50816c48 | 418 | if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) |
61ada528 PZ |
419 | timeout = schedule_timeout(timeout); |
420 | __set_current_state(TASK_RUNNING); | |
421 | ||
422 | /* | |
423 | * The below implies an smp_mb(), it too pairs with the smp_wmb() from | |
424 | * woken_wake_function() such that we must either observe the wait | |
425 | * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss | |
426 | * an event. | |
427 | */ | |
50816c48 | 428 | smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ |
61ada528 PZ |
429 | |
430 | return timeout; | |
431 | } | |
432 | EXPORT_SYMBOL(wait_woken); | |
433 | ||
50816c48 | 434 | int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
61ada528 PZ |
435 | { |
436 | /* | |
437 | * Although this function is called under waitqueue lock, LOCK | |
438 | * doesn't imply write barrier and the users expects write | |
439 | * barrier semantics on wakeup functions. The following | |
440 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | |
b92b8b35 | 441 | * and is paired with smp_store_mb() in wait_woken(). |
61ada528 PZ |
442 | */ |
443 | smp_wmb(); /* C */ | |
50816c48 | 444 | wq_entry->flags |= WQ_FLAG_WOKEN; |
61ada528 | 445 | |
50816c48 | 446 | return default_wake_function(wq_entry, mode, sync, key); |
61ada528 PZ |
447 | } |
448 | EXPORT_SYMBOL(woken_wake_function); |