1 // SPDX-License-Identifier: GPL-2.0
2 /* rwsem.c: R/W semaphores: contention handling functions
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from arch/i386/kernel/semaphore.c
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 #include <linux/rwsem.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/wake_q.h>
19 #include <linux/sched/debug.h>
20 #include <linux/osq_lock.h>
25 * Guide to the rw_semaphore's count field for common values.
26 * (32-bit case illustrated, similar for 64-bit)
28 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
29 * X = #active_readers + #readers attempting to lock
32 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
33 * attempting to read lock or write lock.
35 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
36 * X = #active readers + # readers attempting lock
37 * (X*ACTIVE_BIAS + WAITING_BIAS)
38 * (2) 1 writer attempting lock, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 * (3) 1 writer active, no waiters for lock
42 * X-1 = #active readers + #readers attempting lock
43 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
45 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
46 * (WAITING_BIAS + ACTIVE_BIAS)
47 * (2) 1 writer active or attempting lock, no waiters for lock
50 * 0xffff0000 (1) There are writers or readers queued but none active
51 * or in the process of attempting lock.
53 * Note: writer can attempt to steal lock for this count by adding
54 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
56 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
57 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
59 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
60 * the count becomes more than 0 for successful lock acquisition,
61 * i.e. the case where there are only readers or nobody has lock.
62 * (1st and 2nd case above).
64 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
65 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
66 * acquisition (i.e. nobody else has lock or attempts lock). If
67 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
68 * are only waiters but none active (5th case above), and attempt to
74 * Initialize an rwsem:
76 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
77 struct lock_class_key
*key
)
79 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 * Make sure we are not reinitializing a held semaphore:
83 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
84 lockdep_init_map(&sem
->dep_map
, name
, key
, 0);
86 atomic_long_set(&sem
->count
, RWSEM_UNLOCKED_VALUE
);
87 raw_spin_lock_init(&sem
->wait_lock
);
88 INIT_LIST_HEAD(&sem
->wait_list
);
89 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
91 osq_lock_init(&sem
->osq
);
95 EXPORT_SYMBOL(__init_rwsem
);
97 enum rwsem_waiter_type
{
98 RWSEM_WAITING_FOR_WRITE
,
99 RWSEM_WAITING_FOR_READ
102 struct rwsem_waiter
{
103 struct list_head list
;
104 struct task_struct
*task
;
105 enum rwsem_waiter_type type
;
108 enum rwsem_wake_type
{
109 RWSEM_WAKE_ANY
, /* Wake whatever's at head of wait list */
110 RWSEM_WAKE_READERS
, /* Wake readers only */
111 RWSEM_WAKE_READ_OWNED
/* Waker thread holds the read lock */
115 * handle the lock release when processes blocked on it that can now run
116 * - if we come here from up_xxxx(), then:
117 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
118 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
119 * - there must be someone on the queue
120 * - the wait_lock must be held by the caller
121 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
122 * to actually wakeup the blocked task(s) and drop the reference count,
123 * preferably when the wait_lock is released
124 * - woken process blocks are discarded from the list after having task zeroed
125 * - writers are only marked woken if downgrading is false
127 static void __rwsem_mark_wake(struct rw_semaphore
*sem
,
128 enum rwsem_wake_type wake_type
,
129 struct wake_q_head
*wake_q
)
131 struct rwsem_waiter
*waiter
, *tmp
;
132 long oldcount
, woken
= 0, adjustment
= 0;
133 struct list_head wlist
;
136 * Take a peek at the queue head waiter such that we can determine
137 * the wakeup(s) to perform.
139 waiter
= list_first_entry(&sem
->wait_list
, struct rwsem_waiter
, list
);
141 if (waiter
->type
== RWSEM_WAITING_FOR_WRITE
) {
142 if (wake_type
== RWSEM_WAKE_ANY
) {
144 * Mark writer at the front of the queue for wakeup.
145 * Until the task is actually later awoken later by
146 * the caller, other writers are able to steal it.
147 * Readers, on the other hand, will block as they
148 * will notice the queued writer.
150 wake_q_add(wake_q
, waiter
->task
);
157 * Writers might steal the lock before we grant it to the next reader.
158 * We prefer to do the first reader grant before counting readers
159 * so we can bail out early if a writer stole the lock.
161 if (wake_type
!= RWSEM_WAKE_READ_OWNED
) {
162 adjustment
= RWSEM_ACTIVE_READ_BIAS
;
164 oldcount
= atomic_long_fetch_add(adjustment
, &sem
->count
);
165 if (unlikely(oldcount
< RWSEM_WAITING_BIAS
)) {
167 * If the count is still less than RWSEM_WAITING_BIAS
168 * after removing the adjustment, it is assumed that
169 * a writer has stolen the lock. We have to undo our
172 if (atomic_long_add_return(-adjustment
, &sem
->count
) <
176 /* Last active locker left. Retry waking readers. */
177 goto try_reader_grant
;
180 * It is not really necessary to set it to reader-owned here,
181 * but it gives the spinners an early indication that the
182 * readers now have the lock.
184 __rwsem_set_reader_owned(sem
, waiter
->task
);
188 * Grant an infinite number of read locks to the readers at the front
189 * of the queue. We know that woken will be at least 1 as we accounted
190 * for above. Note we increment the 'active part' of the count by the
191 * number of readers before waking any processes up.
193 * We have to do wakeup in 2 passes to prevent the possibility that
194 * the reader count may be decremented before it is incremented. It
195 * is because the to-be-woken waiter may not have slept yet. So it
196 * may see waiter->task got cleared, finish its critical section and
197 * do an unlock before the reader count increment.
199 * 1) Collect the read-waiters in a separate list, count them and
200 * fully increment the reader count in rwsem.
201 * 2) For each waiters in the new list, clear waiter->task and
202 * put them into wake_q to be woken up later.
204 list_for_each_entry(waiter
, &sem
->wait_list
, list
) {
205 if (waiter
->type
== RWSEM_WAITING_FOR_WRITE
)
210 list_cut_before(&wlist
, &sem
->wait_list
, &waiter
->list
);
212 adjustment
= woken
* RWSEM_ACTIVE_READ_BIAS
- adjustment
;
213 if (list_empty(&sem
->wait_list
)) {
214 /* hit end of list above */
215 adjustment
-= RWSEM_WAITING_BIAS
;
219 atomic_long_add(adjustment
, &sem
->count
);
222 list_for_each_entry_safe(waiter
, tmp
, &wlist
, list
) {
223 struct task_struct
*tsk
;
226 get_task_struct(tsk
);
229 * Ensure calling get_task_struct() before setting the reader
230 * waiter to nil such that rwsem_down_read_failed() cannot
231 * race with do_exit() by always holding a reference count
232 * to the task to wakeup.
234 smp_store_release(&waiter
->task
, NULL
);
236 * Ensure issuing the wakeup (either by us or someone else)
237 * after setting the reader waiter to nil.
239 wake_q_add(wake_q
, tsk
);
240 /* wake_q_add() already take the task ref */
241 put_task_struct(tsk
);
246 * Wait for the read lock to be granted
248 static inline struct rw_semaphore __sched
*
249 __rwsem_down_read_failed_common(struct rw_semaphore
*sem
, int state
)
251 long count
, adjustment
= -RWSEM_ACTIVE_READ_BIAS
;
252 struct rwsem_waiter waiter
;
253 DEFINE_WAKE_Q(wake_q
);
255 waiter
.task
= current
;
256 waiter
.type
= RWSEM_WAITING_FOR_READ
;
258 raw_spin_lock_irq(&sem
->wait_lock
);
259 if (list_empty(&sem
->wait_list
)) {
261 * In case the wait queue is empty and the lock isn't owned
262 * by a writer, this reader can exit the slowpath and return
263 * immediately as its RWSEM_ACTIVE_READ_BIAS has already
264 * been set in the count.
266 if (atomic_long_read(&sem
->count
) >= 0) {
267 raw_spin_unlock_irq(&sem
->wait_lock
);
270 adjustment
+= RWSEM_WAITING_BIAS
;
272 list_add_tail(&waiter
.list
, &sem
->wait_list
);
274 /* we're now waiting on the lock, but no longer actively locking */
275 count
= atomic_long_add_return(adjustment
, &sem
->count
);
278 * If there are no active locks, wake the front queued process(es).
280 * If there are no writers and we are first in the queue,
281 * wake our own waiter to join the existing active readers !
283 if (count
== RWSEM_WAITING_BIAS
||
284 (count
> RWSEM_WAITING_BIAS
&&
285 adjustment
!= -RWSEM_ACTIVE_READ_BIAS
))
286 __rwsem_mark_wake(sem
, RWSEM_WAKE_ANY
, &wake_q
);
288 raw_spin_unlock_irq(&sem
->wait_lock
);
291 /* wait to be given the lock */
293 set_current_state(state
);
296 if (signal_pending_state(state
, current
)) {
297 raw_spin_lock_irq(&sem
->wait_lock
);
300 raw_spin_unlock_irq(&sem
->wait_lock
);
306 __set_current_state(TASK_RUNNING
);
309 list_del(&waiter
.list
);
310 if (list_empty(&sem
->wait_list
))
311 atomic_long_add(-RWSEM_WAITING_BIAS
, &sem
->count
);
312 raw_spin_unlock_irq(&sem
->wait_lock
);
313 __set_current_state(TASK_RUNNING
);
314 return ERR_PTR(-EINTR
);
317 __visible
struct rw_semaphore
* __sched
318 rwsem_down_read_failed(struct rw_semaphore
*sem
)
320 return __rwsem_down_read_failed_common(sem
, TASK_UNINTERRUPTIBLE
);
322 EXPORT_SYMBOL(rwsem_down_read_failed
);
324 __visible
struct rw_semaphore
* __sched
325 rwsem_down_read_failed_killable(struct rw_semaphore
*sem
)
327 return __rwsem_down_read_failed_common(sem
, TASK_KILLABLE
);
329 EXPORT_SYMBOL(rwsem_down_read_failed_killable
);
332 * This function must be called with the sem->wait_lock held to prevent
333 * race conditions between checking the rwsem wait list and setting the
334 * sem->count accordingly.
336 static inline bool rwsem_try_write_lock(long count
, struct rw_semaphore
*sem
)
339 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
341 if (count
!= RWSEM_WAITING_BIAS
)
345 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
346 * are other tasks on the wait list, we need to add on WAITING_BIAS.
348 count
= list_is_singular(&sem
->wait_list
) ?
349 RWSEM_ACTIVE_WRITE_BIAS
:
350 RWSEM_ACTIVE_WRITE_BIAS
+ RWSEM_WAITING_BIAS
;
352 if (atomic_long_cmpxchg_acquire(&sem
->count
, RWSEM_WAITING_BIAS
, count
)
353 == RWSEM_WAITING_BIAS
) {
354 rwsem_set_owner(sem
);
361 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
363 * Try to acquire write lock before the writer has been put on wait queue.
365 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore
*sem
)
367 long old
, count
= atomic_long_read(&sem
->count
);
370 if (!(count
== 0 || count
== RWSEM_WAITING_BIAS
))
373 old
= atomic_long_cmpxchg_acquire(&sem
->count
, count
,
374 count
+ RWSEM_ACTIVE_WRITE_BIAS
);
376 rwsem_set_owner(sem
);
384 static inline bool owner_on_cpu(struct task_struct
*owner
)
387 * As lock holder preemption issue, we both skip spinning if
388 * task is not on cpu or its cpu is preempted
390 return owner
->on_cpu
&& !vcpu_is_preempted(task_cpu(owner
));
393 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore
*sem
)
395 struct task_struct
*owner
;
398 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN
));
404 owner
= READ_ONCE(sem
->owner
);
406 ret
= is_rwsem_owner_spinnable(owner
) &&
414 * Return true only if we can still spin on the owner field of the rwsem.
416 static noinline
bool rwsem_spin_on_owner(struct rw_semaphore
*sem
)
418 struct task_struct
*owner
= READ_ONCE(sem
->owner
);
420 if (!is_rwsem_owner_spinnable(owner
))
424 while (owner
&& (READ_ONCE(sem
->owner
) == owner
)) {
426 * Ensure we emit the owner->on_cpu, dereference _after_
427 * checking sem->owner still matches owner, if that fails,
428 * owner might point to free()d memory, if it still matches,
429 * the rcu_read_lock() ensures the memory stays valid.
434 * abort spinning when need_resched or owner is not running or
435 * owner's cpu is preempted.
437 if (need_resched() || !owner_on_cpu(owner
)) {
447 * If there is a new owner or the owner is not set, we continue
450 return is_rwsem_owner_spinnable(READ_ONCE(sem
->owner
));
453 static bool rwsem_optimistic_spin(struct rw_semaphore
*sem
)
459 /* sem->wait_lock should not be held when doing optimistic spinning */
460 if (!rwsem_can_spin_on_owner(sem
))
463 if (!osq_lock(&sem
->osq
))
467 * Optimistically spin on the owner field and attempt to acquire the
468 * lock whenever the owner changes. Spinning will be stopped when:
469 * 1) the owning writer isn't running; or
470 * 2) readers own the lock as we can't determine if they are
471 * actively running or not.
473 while (rwsem_spin_on_owner(sem
)) {
475 * Try to acquire the lock
477 if (rwsem_try_write_lock_unqueued(sem
)) {
483 * When there's no owner, we might have preempted between the
484 * owner acquiring the lock and setting the owner field. If
485 * we're an RT task that will live-lock because we won't let
486 * the owner complete.
488 if (!sem
->owner
&& (need_resched() || rt_task(current
)))
492 * The cpu_relax() call is a compiler barrier which forces
493 * everything in this loop to be re-loaded. We don't need
494 * memory barriers as we'll eventually observe the right
495 * values at the cost of a few extra spins.
499 osq_unlock(&sem
->osq
);
506 * Return true if the rwsem has active spinner
508 static inline bool rwsem_has_spinner(struct rw_semaphore
*sem
)
510 return osq_is_locked(&sem
->osq
);
514 static bool rwsem_optimistic_spin(struct rw_semaphore
*sem
)
519 static inline bool rwsem_has_spinner(struct rw_semaphore
*sem
)
526 * Wait until we successfully acquire the write lock
528 static inline struct rw_semaphore
*
529 __rwsem_down_write_failed_common(struct rw_semaphore
*sem
, int state
)
532 bool waiting
= true; /* any queued threads before us */
533 struct rwsem_waiter waiter
;
534 struct rw_semaphore
*ret
= sem
;
535 DEFINE_WAKE_Q(wake_q
);
537 /* undo write bias from down_write operation, stop active locking */
538 count
= atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS
, &sem
->count
);
540 /* do optimistic spinning and steal lock if possible */
541 if (rwsem_optimistic_spin(sem
))
545 * Optimistic spinning failed, proceed to the slowpath
546 * and block until we can acquire the sem.
548 waiter
.task
= current
;
549 waiter
.type
= RWSEM_WAITING_FOR_WRITE
;
551 raw_spin_lock_irq(&sem
->wait_lock
);
553 /* account for this before adding a new element to the list */
554 if (list_empty(&sem
->wait_list
))
557 list_add_tail(&waiter
.list
, &sem
->wait_list
);
559 /* we're now waiting on the lock, but no longer actively locking */
561 count
= atomic_long_read(&sem
->count
);
564 * If there were already threads queued before us and there are
565 * no active writers, the lock must be read owned; so we try to
566 * wake any read locks that were queued ahead of us.
568 if (count
> RWSEM_WAITING_BIAS
) {
569 __rwsem_mark_wake(sem
, RWSEM_WAKE_READERS
, &wake_q
);
571 * The wakeup is normally called _after_ the wait_lock
572 * is released, but given that we are proactively waking
573 * readers we can deal with the wake_q overhead as it is
574 * similar to releasing and taking the wait_lock again
575 * for attempting rwsem_try_write_lock().
580 * Reinitialize wake_q after use.
582 wake_q_init(&wake_q
);
586 count
= atomic_long_add_return(RWSEM_WAITING_BIAS
, &sem
->count
);
588 /* wait until we successfully acquire the lock */
589 set_current_state(state
);
591 if (rwsem_try_write_lock(count
, sem
))
593 raw_spin_unlock_irq(&sem
->wait_lock
);
595 /* Block until there are no active lockers. */
597 if (signal_pending_state(state
, current
))
601 set_current_state(state
);
602 } while ((count
= atomic_long_read(&sem
->count
)) & RWSEM_ACTIVE_MASK
);
604 raw_spin_lock_irq(&sem
->wait_lock
);
606 __set_current_state(TASK_RUNNING
);
607 list_del(&waiter
.list
);
608 raw_spin_unlock_irq(&sem
->wait_lock
);
613 __set_current_state(TASK_RUNNING
);
614 raw_spin_lock_irq(&sem
->wait_lock
);
615 list_del(&waiter
.list
);
616 if (list_empty(&sem
->wait_list
))
617 atomic_long_add(-RWSEM_WAITING_BIAS
, &sem
->count
);
619 __rwsem_mark_wake(sem
, RWSEM_WAKE_ANY
, &wake_q
);
620 raw_spin_unlock_irq(&sem
->wait_lock
);
623 return ERR_PTR(-EINTR
);
626 __visible
struct rw_semaphore
* __sched
627 rwsem_down_write_failed(struct rw_semaphore
*sem
)
629 return __rwsem_down_write_failed_common(sem
, TASK_UNINTERRUPTIBLE
);
631 EXPORT_SYMBOL(rwsem_down_write_failed
);
633 __visible
struct rw_semaphore
* __sched
634 rwsem_down_write_failed_killable(struct rw_semaphore
*sem
)
636 return __rwsem_down_write_failed_common(sem
, TASK_KILLABLE
);
638 EXPORT_SYMBOL(rwsem_down_write_failed_killable
);
641 * handle waking up a waiter on the semaphore
642 * - up_read/up_write has decremented the active part of count if we come here
645 struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
)
648 DEFINE_WAKE_Q(wake_q
);
651 * __rwsem_down_write_failed_common(sem)
652 * rwsem_optimistic_spin(sem)
653 * osq_unlock(sem->osq)
655 * atomic_long_add_return(&sem->count)
660 * if (atomic_long_sub_return_release(&sem->count) < 0)
662 * osq_is_locked(&sem->osq)
664 * And __up_write() must observe !osq_is_locked() when it observes the
665 * atomic_long_add_return() in order to not miss a wakeup.
667 * This boils down to:
669 * [S.rel] X = 1 [RmW] r0 = (Y += 0)
671 * [RmW] Y += 1 [L] r1 = X
673 * exists (r0=1 /\ r1=0)
678 * If a spinner is present, it is not necessary to do the wakeup.
679 * Try to do wakeup only if the trylock succeeds to minimize
680 * spinlock contention which may introduce too much delay in the
683 * spinning writer up_write/up_read caller
684 * --------------- -----------------------
685 * [S] osq_unlock() [L] osq
687 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
689 * Here, it is important to make sure that there won't be a missed
690 * wakeup while the rwsem is free and the only spinning writer goes
691 * to sleep without taking the rwsem. Even when the spinning writer
692 * is just going to break out of the waiting loop, it will still do
693 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
694 * rwsem_has_spinner() is true, it will guarantee at least one
695 * trylock attempt on the rwsem later on.
697 if (rwsem_has_spinner(sem
)) {
699 * The smp_rmb() here is to make sure that the spinner
700 * state is consulted before reading the wait_lock.
703 if (!raw_spin_trylock_irqsave(&sem
->wait_lock
, flags
))
707 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
710 if (!list_empty(&sem
->wait_list
))
711 __rwsem_mark_wake(sem
, RWSEM_WAKE_ANY
, &wake_q
);
713 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
718 EXPORT_SYMBOL(rwsem_wake
);
721 * downgrade a write lock into a read lock
722 * - caller incremented waiting part of count and discovered it still negative
723 * - just wake up any readers at the front of the queue
726 struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
729 DEFINE_WAKE_Q(wake_q
);
731 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
733 if (!list_empty(&sem
->wait_list
))
734 __rwsem_mark_wake(sem
, RWSEM_WAKE_READ_OWNED
, &wake_q
);
736 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
741 EXPORT_SYMBOL(rwsem_downgrade_wake
);