]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - kernel/sched/wait.c
Merge remote-tracking branches 'regulator/topic/rc5t619' and 'regulator/topic/stm32...
[mirror_ubuntu-eoan-kernel.git] / kernel / sched / wait.c
CommitLineData
1da177e4
LT
1/*
2 * Generic waiting primitives.
3 *
6d49e352 4 * (C) 2004 Nadia Yvette Chambers, Oracle
1da177e4 5 */
1da177e4 6#include <linux/init.h>
9984de1a 7#include <linux/export.h>
174cd4b1 8#include <linux/sched/signal.h>
b17b0153 9#include <linux/sched/debug.h>
1da177e4
LT
10#include <linux/mm.h>
11#include <linux/wait.h>
12#include <linux/hash.h>
cb6538e7 13#include <linux/kthread.h>
1da177e4 14
9d9d676f 15void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
21d71f51 16{
9d9d676f
IM
17 spin_lock_init(&wq_head->lock);
18 lockdep_set_class_and_name(&wq_head->lock, key, name);
2055da97 19 INIT_LIST_HEAD(&wq_head->head);
21d71f51 20}
eb4542b9 21
2fc39111 22EXPORT_SYMBOL(__init_waitqueue_head);
eb4542b9 23
9d9d676f 24void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1da177e4
LT
25{
26 unsigned long flags;
27
50816c48 28 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
9d9d676f
IM
29 spin_lock_irqsave(&wq_head->lock, flags);
30 __add_wait_queue_entry_tail(wq_head, wq_entry);
31 spin_unlock_irqrestore(&wq_head->lock, flags);
1da177e4
LT
32}
33EXPORT_SYMBOL(add_wait_queue);
34
9d9d676f 35void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1da177e4
LT
36{
37 unsigned long flags;
38
50816c48 39 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
9d9d676f
IM
40 spin_lock_irqsave(&wq_head->lock, flags);
41 __add_wait_queue_entry_tail(wq_head, wq_entry);
42 spin_unlock_irqrestore(&wq_head->lock, flags);
1da177e4
LT
43}
44EXPORT_SYMBOL(add_wait_queue_exclusive);
45
9d9d676f 46void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1da177e4
LT
47{
48 unsigned long flags;
49
9d9d676f
IM
50 spin_lock_irqsave(&wq_head->lock, flags);
51 __remove_wait_queue(wq_head, wq_entry);
52 spin_unlock_irqrestore(&wq_head->lock, flags);
1da177e4
LT
53}
54EXPORT_SYMBOL(remove_wait_queue);
55
56
b4145872
PZ
57/*
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
61 *
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 */
9d9d676f 66static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
b4145872
PZ
67 int nr_exclusive, int wake_flags, void *key)
68{
ac6424b9 69 wait_queue_entry_t *curr, *next;
b4145872 70
2055da97 71 list_for_each_entry_safe(curr, next, &wq_head->head, entry) {
b4145872 72 unsigned flags = curr->flags;
3510ca20
LT
73 int ret = curr->func(curr, mode, wake_flags, key);
74 if (ret < 0)
75 break;
76 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
b4145872
PZ
77 break;
78 }
79}
80
81/**
82 * __wake_up - wake up threads blocked on a waitqueue.
9d9d676f 83 * @wq_head: the waitqueue
b4145872
PZ
84 * @mode: which threads
85 * @nr_exclusive: how many wake-one or wake-many threads to wake up
86 * @key: is directly passed to the wakeup function
87 *
88 * It may be assumed that this function implies a write memory barrier before
89 * changing the task state if and only if any tasks are woken up.
90 */
9d9d676f 91void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
b4145872
PZ
92 int nr_exclusive, void *key)
93{
94 unsigned long flags;
95
9d9d676f
IM
96 spin_lock_irqsave(&wq_head->lock, flags);
97 __wake_up_common(wq_head, mode, nr_exclusive, 0, key);
98 spin_unlock_irqrestore(&wq_head->lock, flags);
b4145872
PZ
99}
100EXPORT_SYMBOL(__wake_up);
101
102/*
103 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
104 */
9d9d676f 105void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
b4145872 106{
9d9d676f 107 __wake_up_common(wq_head, mode, nr, 0, NULL);
b4145872
PZ
108}
109EXPORT_SYMBOL_GPL(__wake_up_locked);
110
9d9d676f 111void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
b4145872 112{
9d9d676f 113 __wake_up_common(wq_head, mode, 1, 0, key);
b4145872
PZ
114}
115EXPORT_SYMBOL_GPL(__wake_up_locked_key);
116
117/**
118 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
9d9d676f 119 * @wq_head: the waitqueue
b4145872
PZ
120 * @mode: which threads
121 * @nr_exclusive: how many wake-one or wake-many threads to wake up
122 * @key: opaque value to be passed to wakeup targets
123 *
124 * The sync wakeup differs that the waker knows that it will schedule
125 * away soon, so while the target thread will be woken up, it will not
126 * be migrated to another CPU - ie. the two threads are 'synchronized'
127 * with each other. This can prevent needless bouncing between CPUs.
128 *
129 * On UP it can prevent extra preemption.
130 *
131 * It may be assumed that this function implies a write memory barrier before
132 * changing the task state if and only if any tasks are woken up.
133 */
9d9d676f 134void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
b4145872
PZ
135 int nr_exclusive, void *key)
136{
137 unsigned long flags;
138 int wake_flags = 1; /* XXX WF_SYNC */
139
9d9d676f 140 if (unlikely(!wq_head))
b4145872
PZ
141 return;
142
143 if (unlikely(nr_exclusive != 1))
144 wake_flags = 0;
145
9d9d676f
IM
146 spin_lock_irqsave(&wq_head->lock, flags);
147 __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key);
148 spin_unlock_irqrestore(&wq_head->lock, flags);
b4145872
PZ
149}
150EXPORT_SYMBOL_GPL(__wake_up_sync_key);
151
152/*
153 * __wake_up_sync - see __wake_up_sync_key()
154 */
9d9d676f 155void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
b4145872 156{
9d9d676f 157 __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
b4145872
PZ
158}
159EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
160
1da177e4
LT
161/*
162 * Note: we use "set_current_state()" _after_ the wait-queue add,
163 * because we need a memory barrier there on SMP, so that any
164 * wake-function that tests for the wait-queue being active
165 * will be guaranteed to see waitqueue addition _or_ subsequent
166 * tests in this thread will see the wakeup having taken place.
167 *
168 * The spin_unlock() itself is semi-permeable and only protects
169 * one way (it only protects stuff inside the critical region and
170 * stops them from bleeding out - it would still allow subsequent
59c51591 171 * loads to move into the critical region).
1da177e4 172 */
7ad5b3a5 173void
9d9d676f 174prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
1da177e4
LT
175{
176 unsigned long flags;
177
50816c48 178 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
9d9d676f 179 spin_lock_irqsave(&wq_head->lock, flags);
2055da97 180 if (list_empty(&wq_entry->entry))
9d9d676f 181 __add_wait_queue(wq_head, wq_entry);
a25d644f 182 set_current_state(state);
9d9d676f 183 spin_unlock_irqrestore(&wq_head->lock, flags);
1da177e4
LT
184}
185EXPORT_SYMBOL(prepare_to_wait);
186
7ad5b3a5 187void
9d9d676f 188prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
1da177e4
LT
189{
190 unsigned long flags;
191
50816c48 192 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
9d9d676f 193 spin_lock_irqsave(&wq_head->lock, flags);
2055da97 194 if (list_empty(&wq_entry->entry))
9d9d676f 195 __add_wait_queue_entry_tail(wq_head, wq_entry);
a25d644f 196 set_current_state(state);
9d9d676f 197 spin_unlock_irqrestore(&wq_head->lock, flags);
1da177e4
LT
198}
199EXPORT_SYMBOL(prepare_to_wait_exclusive);
200
50816c48 201void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
0176beaf 202{
50816c48
IM
203 wq_entry->flags = flags;
204 wq_entry->private = current;
205 wq_entry->func = autoremove_wake_function;
2055da97 206 INIT_LIST_HEAD(&wq_entry->entry);
0176beaf
ON
207}
208EXPORT_SYMBOL(init_wait_entry);
209
9d9d676f 210long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
c2d81644
ON
211{
212 unsigned long flags;
b1ea06a9 213 long ret = 0;
c2d81644 214
9d9d676f 215 spin_lock_irqsave(&wq_head->lock, flags);
b1ea06a9
ON
216 if (unlikely(signal_pending_state(state, current))) {
217 /*
218 * Exclusive waiter must not fail if it was selected by wakeup,
219 * it should "consume" the condition we were waiting for.
220 *
221 * The caller will recheck the condition and return success if
222 * we were already woken up, we can not miss the event because
9d9d676f 223 * wakeup locks/unlocks the same wq_head->lock.
b1ea06a9
ON
224 *
225 * But we need to ensure that set-condition + wakeup after that
226 * can't see us, it should wake up another exclusive waiter if
227 * we fail.
228 */
2055da97 229 list_del_init(&wq_entry->entry);
b1ea06a9
ON
230 ret = -ERESTARTSYS;
231 } else {
2055da97 232 if (list_empty(&wq_entry->entry)) {
50816c48 233 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
9d9d676f 234 __add_wait_queue_entry_tail(wq_head, wq_entry);
b1ea06a9 235 else
9d9d676f 236 __add_wait_queue(wq_head, wq_entry);
b1ea06a9
ON
237 }
238 set_current_state(state);
c2d81644 239 }
9d9d676f 240 spin_unlock_irqrestore(&wq_head->lock, flags);
c2d81644 241
b1ea06a9 242 return ret;
c2d81644
ON
243}
244EXPORT_SYMBOL(prepare_to_wait_event);
245
bd0f9b35
LT
246/*
247 * Note! These two wait functions are entered with the
248 * wait-queue lock held (and interrupts off in the _irq
249 * case), so there is no race with testing the wakeup
250 * condition in the caller before they add the wait
251 * entry to the wake queue.
252 */
ac6424b9 253int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
bd0f9b35 254{
2055da97 255 if (likely(list_empty(&wait->entry)))
ac6424b9 256 __add_wait_queue_entry_tail(wq, wait);
bd0f9b35
LT
257
258 set_current_state(TASK_INTERRUPTIBLE);
259 if (signal_pending(current))
260 return -ERESTARTSYS;
261
262 spin_unlock(&wq->lock);
263 schedule();
264 spin_lock(&wq->lock);
265 return 0;
266}
267EXPORT_SYMBOL(do_wait_intr);
268
ac6424b9 269int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
bd0f9b35 270{
2055da97 271 if (likely(list_empty(&wait->entry)))
ac6424b9 272 __add_wait_queue_entry_tail(wq, wait);
bd0f9b35
LT
273
274 set_current_state(TASK_INTERRUPTIBLE);
275 if (signal_pending(current))
276 return -ERESTARTSYS;
277
278 spin_unlock_irq(&wq->lock);
279 schedule();
280 spin_lock_irq(&wq->lock);
281 return 0;
282}
283EXPORT_SYMBOL(do_wait_intr_irq);
284
ee2f154a 285/**
777c6c5f 286 * finish_wait - clean up after waiting in a queue
9d9d676f 287 * @wq_head: waitqueue waited on
50816c48 288 * @wq_entry: wait descriptor
777c6c5f
JW
289 *
290 * Sets current thread back to running state and removes
291 * the wait descriptor from the given waitqueue if still
292 * queued.
293 */
9d9d676f 294void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1da177e4
LT
295{
296 unsigned long flags;
297
298 __set_current_state(TASK_RUNNING);
299 /*
300 * We can check for list emptiness outside the lock
301 * IFF:
302 * - we use the "careful" check that verifies both
303 * the next and prev pointers, so that there cannot
304 * be any half-pending updates in progress on other
305 * CPU's that we haven't seen yet (and that might
306 * still change the stack area.
307 * and
308 * - all other users take the lock (ie we can only
309 * have _one_ other CPU that looks at or modifies
310 * the list).
311 */
2055da97 312 if (!list_empty_careful(&wq_entry->entry)) {
9d9d676f 313 spin_lock_irqsave(&wq_head->lock, flags);
2055da97 314 list_del_init(&wq_entry->entry);
9d9d676f 315 spin_unlock_irqrestore(&wq_head->lock, flags);
1da177e4
LT
316 }
317}
318EXPORT_SYMBOL(finish_wait);
319
50816c48 320int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
1da177e4 321{
50816c48 322 int ret = default_wake_function(wq_entry, mode, sync, key);
1da177e4
LT
323
324 if (ret)
2055da97 325 list_del_init(&wq_entry->entry);
1da177e4
LT
326 return ret;
327}
328EXPORT_SYMBOL(autoremove_wake_function);
329
cb6538e7
PZ
330static inline bool is_kthread_should_stop(void)
331{
332 return (current->flags & PF_KTHREAD) && kthread_should_stop();
333}
61ada528
PZ
334
335/*
336 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
337 *
9d9d676f 338 * add_wait_queue(&wq_head, &wait);
61ada528
PZ
339 * for (;;) {
340 * if (condition)
341 * break;
342 *
343 * p->state = mode; condition = true;
344 * smp_mb(); // A smp_wmb(); // C
50816c48 345 * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
61ada528
PZ
346 * schedule() try_to_wake_up();
347 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
50816c48 348 * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
61ada528 349 * smp_mb() // B smp_wmb(); // C
50816c48 350 * wq_entry->flags |= WQ_FLAG_WOKEN;
61ada528 351 * }
9d9d676f 352 * remove_wait_queue(&wq_head, &wait);
61ada528
PZ
353 *
354 */
50816c48 355long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
61ada528
PZ
356{
357 set_current_state(mode); /* A */
358 /*
359 * The above implies an smp_mb(), which matches with the smp_wmb() from
360 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
361 * also observe all state before the wakeup.
362 */
50816c48 363 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
61ada528
PZ
364 timeout = schedule_timeout(timeout);
365 __set_current_state(TASK_RUNNING);
366
367 /*
368 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
369 * woken_wake_function() such that we must either observe the wait
370 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
371 * an event.
372 */
50816c48 373 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
61ada528
PZ
374
375 return timeout;
376}
377EXPORT_SYMBOL(wait_woken);
378
50816c48 379int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
61ada528
PZ
380{
381 /*
382 * Although this function is called under waitqueue lock, LOCK
383 * doesn't imply write barrier and the users expects write
384 * barrier semantics on wakeup functions. The following
385 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
b92b8b35 386 * and is paired with smp_store_mb() in wait_woken().
61ada528
PZ
387 */
388 smp_wmb(); /* C */
50816c48 389 wq_entry->flags |= WQ_FLAG_WOKEN;
61ada528 390
50816c48 391 return default_wake_function(wq_entry, mode, sync, key);
61ada528
PZ
392}
393EXPORT_SYMBOL(woken_wake_function);