1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
31 #ifndef CONFIG_PREEMPT_RT
32 #include "lock_events.h"
35 * The least significant 2 bits of the owner value has the following
37 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
38 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
40 * When the rwsem is reader-owned and a spinning writer has timed out,
41 * the nonspinnable bit will be set to disable optimistic spinning.
43 * When a writer acquires a rwsem, it puts its task_struct pointer
44 * into the owner field. It is cleared after an unlock.
46 * When a reader acquires a rwsem, it will also puts its task_struct
47 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
48 * On unlock, the owner field will largely be left untouched. So
49 * for a free or reader-owned rwsem, the owner value may contain
50 * information about the last reader that acquires the rwsem.
52 * That information may be helpful in debugging cases where the system
53 * seems to hang on a reader owned rwsem especially if only one reader
54 * is involved. Ideally we would like to track all the readers that own
55 * a rwsem, but the overhead is simply too big.
57 * A fast path reader optimistic lock stealing is supported when the rwsem
58 * is previously owned by a writer and the following conditions are met:
60 * - rwsem is not currently writer owned
61 * - the handoff isn't set.
63 #define RWSEM_READER_OWNED (1UL << 0)
64 #define RWSEM_NONSPINNABLE (1UL << 1)
65 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
67 #ifdef CONFIG_DEBUG_RWSEMS
68 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
69 if (!debug_locks_silent && \
70 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
71 #c, atomic_long_read(&(sem)->count), \
72 (unsigned long) sem->magic, \
73 atomic_long_read(&(sem)->owner), (long)current, \
74 list_empty(&(sem)->wait_list) ? "" : "not ")) \
78 # define DEBUG_RWSEMS_WARN_ON(c, sem)
82 * On 64-bit architectures, the bit definitions of the count are:
84 * Bit 0 - writer locked bit
85 * Bit 1 - waiters present bit
86 * Bit 2 - lock handoff bit
88 * Bits 8-62 - 55-bit reader count
89 * Bit 63 - read fail bit
91 * On 32-bit architectures, the bit definitions of the count are:
93 * Bit 0 - writer locked bit
94 * Bit 1 - waiters present bit
95 * Bit 2 - lock handoff bit
97 * Bits 8-30 - 23-bit reader count
98 * Bit 31 - read fail bit
100 * It is not likely that the most significant bit (read fail bit) will ever
101 * be set. This guard bit is still checked anyway in the down_read() fastpath
102 * just in case we need to use up more of the reader bits for other purpose
105 * atomic_long_fetch_add() is used to obtain reader lock, whereas
106 * atomic_long_cmpxchg() will be used to obtain writer lock.
108 * There are three places where the lock handoff bit may be set or cleared.
109 * 1) rwsem_mark_wake() for readers -- set, clear
110 * 2) rwsem_try_write_lock() for writers -- set, clear
111 * 3) rwsem_del_waiter() -- clear
113 * For all the above cases, wait_lock will be held. A writer must also
114 * be the first one in the wait_list to be eligible for setting the handoff
115 * bit. So concurrent setting/clearing of handoff bit is not possible.
117 #define RWSEM_WRITER_LOCKED (1UL << 0)
118 #define RWSEM_FLAG_WAITERS (1UL << 1)
119 #define RWSEM_FLAG_HANDOFF (1UL << 2)
120 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
122 #define RWSEM_READER_SHIFT 8
123 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
124 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
125 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
126 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
127 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
128 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
131 * All writes to owner are protected by WRITE_ONCE() to make sure that
132 * store tearing can't happen as optimistic spinners may read and use
133 * the owner value concurrently without lock. Read from owner, however,
134 * may not need READ_ONCE() as long as the pointer value is only used
135 * for comparison and isn't being dereferenced.
137 static inline void rwsem_set_owner(struct rw_semaphore
*sem
)
139 atomic_long_set(&sem
->owner
, (long)current
);
142 static inline void rwsem_clear_owner(struct rw_semaphore
*sem
)
144 atomic_long_set(&sem
->owner
, 0);
148 * Test the flags in the owner field.
150 static inline bool rwsem_test_oflags(struct rw_semaphore
*sem
, long flags
)
152 return atomic_long_read(&sem
->owner
) & flags
;
156 * The task_struct pointer of the last owning reader will be left in
159 * Note that the owner value just indicates the task has owned the rwsem
160 * previously, it may not be the real owner or one of the real owners
161 * anymore when that field is examined, so take it with a grain of salt.
163 * The reader non-spinnable bit is preserved.
165 static inline void __rwsem_set_reader_owned(struct rw_semaphore
*sem
,
166 struct task_struct
*owner
)
168 unsigned long val
= (unsigned long)owner
| RWSEM_READER_OWNED
|
169 (atomic_long_read(&sem
->owner
) & RWSEM_NONSPINNABLE
);
171 atomic_long_set(&sem
->owner
, val
);
174 static inline void rwsem_set_reader_owned(struct rw_semaphore
*sem
)
176 __rwsem_set_reader_owned(sem
, current
);
180 * Return true if the rwsem is owned by a reader.
182 static inline bool is_rwsem_reader_owned(struct rw_semaphore
*sem
)
184 #ifdef CONFIG_DEBUG_RWSEMS
186 * Check the count to see if it is write-locked.
188 long count
= atomic_long_read(&sem
->count
);
190 if (count
& RWSEM_WRITER_MASK
)
193 return rwsem_test_oflags(sem
, RWSEM_READER_OWNED
);
196 #ifdef CONFIG_DEBUG_RWSEMS
198 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
199 * is a task pointer in owner of a reader-owned rwsem, it will be the
200 * real owner or one of the real owners. The only exception is when the
201 * unlock is done by up_read_non_owner().
203 static inline void rwsem_clear_reader_owned(struct rw_semaphore
*sem
)
205 unsigned long val
= atomic_long_read(&sem
->owner
);
207 while ((val
& ~RWSEM_OWNER_FLAGS_MASK
) == (unsigned long)current
) {
208 if (atomic_long_try_cmpxchg(&sem
->owner
, &val
,
209 val
& RWSEM_OWNER_FLAGS_MASK
))
214 static inline void rwsem_clear_reader_owned(struct rw_semaphore
*sem
)
220 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
221 * remains set. Otherwise, the operation will be aborted.
223 static inline void rwsem_set_nonspinnable(struct rw_semaphore
*sem
)
225 unsigned long owner
= atomic_long_read(&sem
->owner
);
228 if (!(owner
& RWSEM_READER_OWNED
))
230 if (owner
& RWSEM_NONSPINNABLE
)
232 } while (!atomic_long_try_cmpxchg(&sem
->owner
, &owner
,
233 owner
| RWSEM_NONSPINNABLE
));
236 static inline bool rwsem_read_trylock(struct rw_semaphore
*sem
, long *cntp
)
238 *cntp
= atomic_long_add_return_acquire(RWSEM_READER_BIAS
, &sem
->count
);
240 if (WARN_ON_ONCE(*cntp
< 0))
241 rwsem_set_nonspinnable(sem
);
243 if (!(*cntp
& RWSEM_READ_FAILED_MASK
)) {
244 rwsem_set_reader_owned(sem
);
251 static inline bool rwsem_write_trylock(struct rw_semaphore
*sem
)
253 long tmp
= RWSEM_UNLOCKED_VALUE
;
255 if (atomic_long_try_cmpxchg_acquire(&sem
->count
, &tmp
, RWSEM_WRITER_LOCKED
)) {
256 rwsem_set_owner(sem
);
264 * Return just the real task structure pointer of the owner
266 static inline struct task_struct
*rwsem_owner(struct rw_semaphore
*sem
)
268 return (struct task_struct
*)
269 (atomic_long_read(&sem
->owner
) & ~RWSEM_OWNER_FLAGS_MASK
);
273 * Return the real task structure pointer of the owner and the embedded
274 * flags in the owner. pflags must be non-NULL.
276 static inline struct task_struct
*
277 rwsem_owner_flags(struct rw_semaphore
*sem
, unsigned long *pflags
)
279 unsigned long owner
= atomic_long_read(&sem
->owner
);
281 *pflags
= owner
& RWSEM_OWNER_FLAGS_MASK
;
282 return (struct task_struct
*)(owner
& ~RWSEM_OWNER_FLAGS_MASK
);
286 * Guide to the rw_semaphore's count field.
288 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
291 * The lock is owned by readers when
292 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
293 * (2) some of the reader bits are set in count, and
294 * (3) the owner field has RWSEM_READ_OWNED bit set.
296 * Having some reader bits set is not enough to guarantee a readers owned
297 * lock as the readers may be in the process of backing out from the count
298 * and a writer has just released the lock. So another writer may steal
299 * the lock immediately after that.
303 * Initialize an rwsem:
305 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
306 struct lock_class_key
*key
)
308 #ifdef CONFIG_DEBUG_LOCK_ALLOC
310 * Make sure we are not reinitializing a held semaphore:
312 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
313 lockdep_init_map_wait(&sem
->dep_map
, name
, key
, 0, LD_WAIT_SLEEP
);
315 #ifdef CONFIG_DEBUG_RWSEMS
318 atomic_long_set(&sem
->count
, RWSEM_UNLOCKED_VALUE
);
319 raw_spin_lock_init(&sem
->wait_lock
);
320 INIT_LIST_HEAD(&sem
->wait_list
);
321 atomic_long_set(&sem
->owner
, 0L);
322 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
323 osq_lock_init(&sem
->osq
);
326 EXPORT_SYMBOL(__init_rwsem
);
328 enum rwsem_waiter_type
{
329 RWSEM_WAITING_FOR_WRITE
,
330 RWSEM_WAITING_FOR_READ
333 struct rwsem_waiter
{
334 struct list_head list
;
335 struct task_struct
*task
;
336 enum rwsem_waiter_type type
;
337 unsigned long timeout
;
339 /* Writer only, not initialized in reader */
342 #define rwsem_first_waiter(sem) \
343 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
345 enum rwsem_wake_type
{
346 RWSEM_WAKE_ANY
, /* Wake whatever's at head of wait list */
347 RWSEM_WAKE_READERS
, /* Wake readers only */
348 RWSEM_WAKE_READ_OWNED
/* Waker thread holds the read lock */
352 * The typical HZ value is either 250 or 1000. So set the minimum waiting
353 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
354 * queue before initiating the handoff protocol.
356 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
359 * Magic number to batch-wakeup waiting readers, even when writers are
360 * also present in the queue. This both limits the amount of work the
361 * waking thread must do and also prevents any potential counter overflow,
364 #define MAX_READERS_WAKEUP 0x100
367 rwsem_add_waiter(struct rw_semaphore
*sem
, struct rwsem_waiter
*waiter
)
369 lockdep_assert_held(&sem
->wait_lock
);
370 list_add_tail(&waiter
->list
, &sem
->wait_list
);
371 /* caller will set RWSEM_FLAG_WAITERS */
375 * Remove a waiter from the wait_list and clear flags.
377 * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
378 * this function. Modify with care.
381 rwsem_del_waiter(struct rw_semaphore
*sem
, struct rwsem_waiter
*waiter
)
383 lockdep_assert_held(&sem
->wait_lock
);
384 list_del(&waiter
->list
);
385 if (likely(!list_empty(&sem
->wait_list
)))
388 atomic_long_andnot(RWSEM_FLAG_HANDOFF
| RWSEM_FLAG_WAITERS
, &sem
->count
);
392 * handle the lock release when processes blocked on it that can now run
393 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
395 * - there must be someone on the queue
396 * - the wait_lock must be held by the caller
397 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
398 * to actually wakeup the blocked task(s) and drop the reference count,
399 * preferably when the wait_lock is released
400 * - woken process blocks are discarded from the list after having task zeroed
401 * - writers are only marked woken if downgrading is false
403 * Implies rwsem_del_waiter() for all woken readers.
405 static void rwsem_mark_wake(struct rw_semaphore
*sem
,
406 enum rwsem_wake_type wake_type
,
407 struct wake_q_head
*wake_q
)
409 struct rwsem_waiter
*waiter
, *tmp
;
410 long oldcount
, woken
= 0, adjustment
= 0;
411 struct list_head wlist
;
413 lockdep_assert_held(&sem
->wait_lock
);
416 * Take a peek at the queue head waiter such that we can determine
417 * the wakeup(s) to perform.
419 waiter
= rwsem_first_waiter(sem
);
421 if (waiter
->type
== RWSEM_WAITING_FOR_WRITE
) {
422 if (wake_type
== RWSEM_WAKE_ANY
) {
424 * Mark writer at the front of the queue for wakeup.
425 * Until the task is actually later awoken later by
426 * the caller, other writers are able to steal it.
427 * Readers, on the other hand, will block as they
428 * will notice the queued writer.
430 wake_q_add(wake_q
, waiter
->task
);
431 lockevent_inc(rwsem_wake_writer
);
438 * No reader wakeup if there are too many of them already.
440 if (unlikely(atomic_long_read(&sem
->count
) < 0))
444 * Writers might steal the lock before we grant it to the next reader.
445 * We prefer to do the first reader grant before counting readers
446 * so we can bail out early if a writer stole the lock.
448 if (wake_type
!= RWSEM_WAKE_READ_OWNED
) {
449 struct task_struct
*owner
;
451 adjustment
= RWSEM_READER_BIAS
;
452 oldcount
= atomic_long_fetch_add(adjustment
, &sem
->count
);
453 if (unlikely(oldcount
& RWSEM_WRITER_MASK
)) {
455 * When we've been waiting "too" long (for writers
456 * to give up the lock), request a HANDOFF to
459 if (!(oldcount
& RWSEM_FLAG_HANDOFF
) &&
460 time_after(jiffies
, waiter
->timeout
)) {
461 adjustment
-= RWSEM_FLAG_HANDOFF
;
462 lockevent_inc(rwsem_rlock_handoff
);
465 atomic_long_add(-adjustment
, &sem
->count
);
469 * Set it to reader-owned to give spinners an early
470 * indication that readers now have the lock.
471 * The reader nonspinnable bit seen at slowpath entry of
472 * the reader is copied over.
474 owner
= waiter
->task
;
475 __rwsem_set_reader_owned(sem
, owner
);
479 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
480 * queue. We know that the woken will be at least 1 as we accounted
481 * for above. Note we increment the 'active part' of the count by the
482 * number of readers before waking any processes up.
484 * This is an adaptation of the phase-fair R/W locks where at the
485 * reader phase (first waiter is a reader), all readers are eligible
486 * to acquire the lock at the same time irrespective of their order
487 * in the queue. The writers acquire the lock according to their
488 * order in the queue.
490 * We have to do wakeup in 2 passes to prevent the possibility that
491 * the reader count may be decremented before it is incremented. It
492 * is because the to-be-woken waiter may not have slept yet. So it
493 * may see waiter->task got cleared, finish its critical section and
494 * do an unlock before the reader count increment.
496 * 1) Collect the read-waiters in a separate list, count them and
497 * fully increment the reader count in rwsem.
498 * 2) For each waiters in the new list, clear waiter->task and
499 * put them into wake_q to be woken up later.
501 INIT_LIST_HEAD(&wlist
);
502 list_for_each_entry_safe(waiter
, tmp
, &sem
->wait_list
, list
) {
503 if (waiter
->type
== RWSEM_WAITING_FOR_WRITE
)
507 list_move_tail(&waiter
->list
, &wlist
);
510 * Limit # of readers that can be woken up per wakeup call.
512 if (woken
>= MAX_READERS_WAKEUP
)
516 adjustment
= woken
* RWSEM_READER_BIAS
- adjustment
;
517 lockevent_cond_inc(rwsem_wake_reader
, woken
);
519 oldcount
= atomic_long_read(&sem
->count
);
520 if (list_empty(&sem
->wait_list
)) {
522 * Combined with list_move_tail() above, this implies
523 * rwsem_del_waiter().
525 adjustment
-= RWSEM_FLAG_WAITERS
;
526 if (oldcount
& RWSEM_FLAG_HANDOFF
)
527 adjustment
-= RWSEM_FLAG_HANDOFF
;
530 * When we've woken a reader, we no longer need to force
531 * writers to give up the lock and we can clear HANDOFF.
533 if (oldcount
& RWSEM_FLAG_HANDOFF
)
534 adjustment
-= RWSEM_FLAG_HANDOFF
;
538 atomic_long_add(adjustment
, &sem
->count
);
541 list_for_each_entry_safe(waiter
, tmp
, &wlist
, list
) {
542 struct task_struct
*tsk
;
545 get_task_struct(tsk
);
548 * Ensure calling get_task_struct() before setting the reader
549 * waiter to nil such that rwsem_down_read_slowpath() cannot
550 * race with do_exit() by always holding a reference count
551 * to the task to wakeup.
553 smp_store_release(&waiter
->task
, NULL
);
555 * Ensure issuing the wakeup (either by us or someone else)
556 * after setting the reader waiter to nil.
558 wake_q_add_safe(wake_q
, tsk
);
563 * This function must be called with the sem->wait_lock held to prevent
564 * race conditions between checking the rwsem wait list and setting the
565 * sem->count accordingly.
567 * Implies rwsem_del_waiter() on success.
569 static inline bool rwsem_try_write_lock(struct rw_semaphore
*sem
,
570 struct rwsem_waiter
*waiter
)
572 bool first
= rwsem_first_waiter(sem
) == waiter
;
575 lockdep_assert_held(&sem
->wait_lock
);
577 count
= atomic_long_read(&sem
->count
);
579 bool has_handoff
= !!(count
& RWSEM_FLAG_HANDOFF
);
585 /* First waiter inherits a previously set handoff bit */
586 waiter
->handoff_set
= true;
591 if (count
& RWSEM_LOCK_MASK
) {
592 if (has_handoff
|| (!rt_task(waiter
->task
) &&
593 !time_after(jiffies
, waiter
->timeout
)))
596 new |= RWSEM_FLAG_HANDOFF
;
598 new |= RWSEM_WRITER_LOCKED
;
599 new &= ~RWSEM_FLAG_HANDOFF
;
601 if (list_is_singular(&sem
->wait_list
))
602 new &= ~RWSEM_FLAG_WAITERS
;
604 } while (!atomic_long_try_cmpxchg_acquire(&sem
->count
, &count
, new));
607 * We have either acquired the lock with handoff bit cleared or
608 * set the handoff bit.
610 if (new & RWSEM_FLAG_HANDOFF
) {
611 waiter
->handoff_set
= true;
612 lockevent_inc(rwsem_wlock_handoff
);
617 * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
620 list_del(&waiter
->list
);
621 rwsem_set_owner(sem
);
626 * The rwsem_spin_on_owner() function returns the following 4 values
627 * depending on the lock owner state.
628 * OWNER_NULL : owner is currently NULL
629 * OWNER_WRITER: when owner changes and is a writer
630 * OWNER_READER: when owner changes and the new owner may be a reader.
631 * OWNER_NONSPINNABLE:
632 * when optimistic spinning has to stop because either the
633 * owner stops running, is unknown, or its timeslice has
638 OWNER_WRITER
= 1 << 1,
639 OWNER_READER
= 1 << 2,
640 OWNER_NONSPINNABLE
= 1 << 3,
643 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
645 * Try to acquire write lock before the writer has been put on wait queue.
647 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore
*sem
)
649 long count
= atomic_long_read(&sem
->count
);
651 while (!(count
& (RWSEM_LOCK_MASK
|RWSEM_FLAG_HANDOFF
))) {
652 if (atomic_long_try_cmpxchg_acquire(&sem
->count
, &count
,
653 count
| RWSEM_WRITER_LOCKED
)) {
654 rwsem_set_owner(sem
);
655 lockevent_inc(rwsem_opt_lock
);
662 static inline bool owner_on_cpu(struct task_struct
*owner
)
665 * As lock holder preemption issue, we both skip spinning if
666 * task is not on cpu or its cpu is preempted
668 return owner
->on_cpu
&& !vcpu_is_preempted(task_cpu(owner
));
671 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore
*sem
)
673 struct task_struct
*owner
;
677 if (need_resched()) {
678 lockevent_inc(rwsem_opt_fail
);
684 owner
= rwsem_owner_flags(sem
, &flags
);
686 * Don't check the read-owner as the entry may be stale.
688 if ((flags
& RWSEM_NONSPINNABLE
) ||
689 (owner
&& !(flags
& RWSEM_READER_OWNED
) && !owner_on_cpu(owner
)))
694 lockevent_cond_inc(rwsem_opt_fail
, !ret
);
698 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
700 static inline enum owner_state
701 rwsem_owner_state(struct task_struct
*owner
, unsigned long flags
)
703 if (flags
& RWSEM_NONSPINNABLE
)
704 return OWNER_NONSPINNABLE
;
706 if (flags
& RWSEM_READER_OWNED
)
709 return owner
? OWNER_WRITER
: OWNER_NULL
;
712 static noinline
enum owner_state
713 rwsem_spin_on_owner(struct rw_semaphore
*sem
)
715 struct task_struct
*new, *owner
;
716 unsigned long flags
, new_flags
;
717 enum owner_state state
;
719 owner
= rwsem_owner_flags(sem
, &flags
);
720 state
= rwsem_owner_state(owner
, flags
);
721 if (state
!= OWNER_WRITER
)
727 * When a waiting writer set the handoff flag, it may spin
728 * on the owner as well. Once that writer acquires the lock,
729 * we can spin on it. So we don't need to quit even when the
730 * handoff bit is set.
732 new = rwsem_owner_flags(sem
, &new_flags
);
733 if ((new != owner
) || (new_flags
!= flags
)) {
734 state
= rwsem_owner_state(new, new_flags
);
739 * Ensure we emit the owner->on_cpu, dereference _after_
740 * checking sem->owner still matches owner, if that fails,
741 * owner might point to free()d memory, if it still matches,
742 * the rcu_read_lock() ensures the memory stays valid.
746 if (need_resched() || !owner_on_cpu(owner
)) {
747 state
= OWNER_NONSPINNABLE
;
759 * Calculate reader-owned rwsem spinning threshold for writer
761 * The more readers own the rwsem, the longer it will take for them to
762 * wind down and free the rwsem. So the empirical formula used to
763 * determine the actual spinning time limit here is:
765 * Spinning threshold = (10 + nr_readers/2)us
767 * The limit is capped to a maximum of 25us (30 readers). This is just
768 * a heuristic and is subjected to change in the future.
770 static inline u64
rwsem_rspin_threshold(struct rw_semaphore
*sem
)
772 long count
= atomic_long_read(&sem
->count
);
773 int readers
= count
>> RWSEM_READER_SHIFT
;
778 delta
= (20 + readers
) * NSEC_PER_USEC
/ 2;
780 return sched_clock() + delta
;
783 static bool rwsem_optimistic_spin(struct rw_semaphore
*sem
)
786 int prev_owner_state
= OWNER_NULL
;
788 u64 rspin_threshold
= 0;
792 /* sem->wait_lock should not be held when doing optimistic spinning */
793 if (!osq_lock(&sem
->osq
))
797 * Optimistically spin on the owner field and attempt to acquire the
798 * lock whenever the owner changes. Spinning will be stopped when:
799 * 1) the owning writer isn't running; or
800 * 2) readers own the lock and spinning time has exceeded limit.
803 enum owner_state owner_state
;
805 owner_state
= rwsem_spin_on_owner(sem
);
806 if (!(owner_state
& OWNER_SPINNABLE
))
810 * Try to acquire the lock
812 taken
= rwsem_try_write_lock_unqueued(sem
);
818 * Time-based reader-owned rwsem optimistic spinning
820 if (owner_state
== OWNER_READER
) {
822 * Re-initialize rspin_threshold every time when
823 * the owner state changes from non-reader to reader.
824 * This allows a writer to steal the lock in between
825 * 2 reader phases and have the threshold reset at
826 * the beginning of the 2nd reader phase.
828 if (prev_owner_state
!= OWNER_READER
) {
829 if (rwsem_test_oflags(sem
, RWSEM_NONSPINNABLE
))
831 rspin_threshold
= rwsem_rspin_threshold(sem
);
836 * Check time threshold once every 16 iterations to
837 * avoid calling sched_clock() too frequently so
838 * as to reduce the average latency between the times
839 * when the lock becomes free and when the spinner
840 * is ready to do a trylock.
842 else if (!(++loop
& 0xf) && (sched_clock() > rspin_threshold
)) {
843 rwsem_set_nonspinnable(sem
);
844 lockevent_inc(rwsem_opt_nospin
);
850 * An RT task cannot do optimistic spinning if it cannot
851 * be sure the lock holder is running or live-lock may
852 * happen if the current task and the lock holder happen
853 * to run in the same CPU. However, aborting optimistic
854 * spinning while a NULL owner is detected may miss some
855 * opportunity where spinning can continue without causing
858 * There are 2 possible cases where an RT task may be able
859 * to continue spinning.
861 * 1) The lock owner is in the process of releasing the
862 * lock, sem->owner is cleared but the lock has not
864 * 2) The lock was free and owner cleared, but another
865 * task just comes in and acquire the lock before
866 * we try to get it. The new owner may be a spinnable
869 * To take advantage of two scenarios listed above, the RT
870 * task is made to retry one more time to see if it can
871 * acquire the lock or continue spinning on the new owning
872 * writer. Of course, if the time lag is long enough or the
873 * new owner is not a writer or spinnable, the RT task will
876 * If the owner is a writer, the need_resched() check is
877 * done inside rwsem_spin_on_owner(). If the owner is not
878 * a writer, need_resched() check needs to be done here.
880 if (owner_state
!= OWNER_WRITER
) {
883 if (rt_task(current
) &&
884 (prev_owner_state
!= OWNER_WRITER
))
887 prev_owner_state
= owner_state
;
890 * The cpu_relax() call is a compiler barrier which forces
891 * everything in this loop to be re-loaded. We don't need
892 * memory barriers as we'll eventually observe the right
893 * values at the cost of a few extra spins.
897 osq_unlock(&sem
->osq
);
900 lockevent_cond_inc(rwsem_opt_fail
, !taken
);
905 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
906 * only be called when the reader count reaches 0.
908 static inline void clear_nonspinnable(struct rw_semaphore
*sem
)
910 if (rwsem_test_oflags(sem
, RWSEM_NONSPINNABLE
))
911 atomic_long_andnot(RWSEM_NONSPINNABLE
, &sem
->owner
);
915 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore
*sem
)
920 static inline bool rwsem_optimistic_spin(struct rw_semaphore
*sem
)
925 static inline void clear_nonspinnable(struct rw_semaphore
*sem
) { }
927 static inline enum owner_state
928 rwsem_spin_on_owner(struct rw_semaphore
*sem
)
930 return OWNER_NONSPINNABLE
;
935 * Wait for the read lock to be granted
937 static struct rw_semaphore __sched
*
938 rwsem_down_read_slowpath(struct rw_semaphore
*sem
, long count
, unsigned int state
)
940 long adjustment
= -RWSEM_READER_BIAS
;
941 long rcnt
= (count
>> RWSEM_READER_SHIFT
);
942 struct rwsem_waiter waiter
;
943 DEFINE_WAKE_Q(wake_q
);
947 * To prevent a constant stream of readers from starving a sleeping
948 * waiter, don't attempt optimistic lock stealing if the lock is
949 * currently owned by readers.
951 if ((atomic_long_read(&sem
->owner
) & RWSEM_READER_OWNED
) &&
952 (rcnt
> 1) && !(count
& RWSEM_WRITER_LOCKED
))
956 * Reader optimistic lock stealing.
958 if (!(count
& (RWSEM_WRITER_LOCKED
| RWSEM_FLAG_HANDOFF
))) {
959 rwsem_set_reader_owned(sem
);
960 lockevent_inc(rwsem_rlock_steal
);
963 * Wake up other readers in the wait queue if it is
966 if ((rcnt
== 1) && (count
& RWSEM_FLAG_WAITERS
)) {
967 raw_spin_lock_irq(&sem
->wait_lock
);
968 if (!list_empty(&sem
->wait_list
))
969 rwsem_mark_wake(sem
, RWSEM_WAKE_READ_OWNED
,
971 raw_spin_unlock_irq(&sem
->wait_lock
);
978 waiter
.task
= current
;
979 waiter
.type
= RWSEM_WAITING_FOR_READ
;
980 waiter
.timeout
= jiffies
+ RWSEM_WAIT_TIMEOUT
;
982 raw_spin_lock_irq(&sem
->wait_lock
);
983 if (list_empty(&sem
->wait_list
)) {
985 * In case the wait queue is empty and the lock isn't owned
986 * by a writer or has the handoff bit set, this reader can
987 * exit the slowpath and return immediately as its
988 * RWSEM_READER_BIAS has already been set in the count.
990 if (!(atomic_long_read(&sem
->count
) &
991 (RWSEM_WRITER_MASK
| RWSEM_FLAG_HANDOFF
))) {
992 /* Provide lock ACQUIRE */
993 smp_acquire__after_ctrl_dep();
994 raw_spin_unlock_irq(&sem
->wait_lock
);
995 rwsem_set_reader_owned(sem
);
996 lockevent_inc(rwsem_rlock_fast
);
999 adjustment
+= RWSEM_FLAG_WAITERS
;
1001 rwsem_add_waiter(sem
, &waiter
);
1003 /* we're now waiting on the lock, but no longer actively locking */
1004 count
= atomic_long_add_return(adjustment
, &sem
->count
);
1007 * If there are no active locks, wake the front queued process(es).
1009 * If there are no writers and we are first in the queue,
1010 * wake our own waiter to join the existing active readers !
1012 if (!(count
& RWSEM_LOCK_MASK
)) {
1013 clear_nonspinnable(sem
);
1016 if (wake
|| (!(count
& RWSEM_WRITER_MASK
) &&
1017 (adjustment
& RWSEM_FLAG_WAITERS
)))
1018 rwsem_mark_wake(sem
, RWSEM_WAKE_ANY
, &wake_q
);
1020 raw_spin_unlock_irq(&sem
->wait_lock
);
1023 /* wait to be given the lock */
1025 set_current_state(state
);
1026 if (!smp_load_acquire(&waiter
.task
)) {
1027 /* Matches rwsem_mark_wake()'s smp_store_release(). */
1030 if (signal_pending_state(state
, current
)) {
1031 raw_spin_lock_irq(&sem
->wait_lock
);
1034 raw_spin_unlock_irq(&sem
->wait_lock
);
1035 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1039 lockevent_inc(rwsem_sleep_reader
);
1042 __set_current_state(TASK_RUNNING
);
1043 lockevent_inc(rwsem_rlock
);
1047 rwsem_del_waiter(sem
, &waiter
);
1048 raw_spin_unlock_irq(&sem
->wait_lock
);
1049 __set_current_state(TASK_RUNNING
);
1050 lockevent_inc(rwsem_rlock_fail
);
1051 return ERR_PTR(-EINTR
);
1055 * Wait until we successfully acquire the write lock
1057 static struct rw_semaphore
*
1058 rwsem_down_write_slowpath(struct rw_semaphore
*sem
, int state
)
1061 struct rwsem_waiter waiter
;
1062 DEFINE_WAKE_Q(wake_q
);
1064 /* do optimistic spinning and steal lock if possible */
1065 if (rwsem_can_spin_on_owner(sem
) && rwsem_optimistic_spin(sem
)) {
1066 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1071 * Optimistic spinning failed, proceed to the slowpath
1072 * and block until we can acquire the sem.
1074 waiter
.task
= current
;
1075 waiter
.type
= RWSEM_WAITING_FOR_WRITE
;
1076 waiter
.timeout
= jiffies
+ RWSEM_WAIT_TIMEOUT
;
1077 waiter
.handoff_set
= false;
1079 raw_spin_lock_irq(&sem
->wait_lock
);
1080 rwsem_add_waiter(sem
, &waiter
);
1082 /* we're now waiting on the lock */
1083 if (rwsem_first_waiter(sem
) != &waiter
) {
1084 count
= atomic_long_read(&sem
->count
);
1087 * If there were already threads queued before us and:
1088 * 1) there are no active locks, wake the front
1089 * queued process(es) as the handoff bit might be set.
1090 * 2) there are no active writers and some readers, the lock
1091 * must be read owned; so we try to wake any read lock
1092 * waiters that were queued ahead of us.
1094 if (count
& RWSEM_WRITER_MASK
)
1097 rwsem_mark_wake(sem
, (count
& RWSEM_READER_MASK
)
1098 ? RWSEM_WAKE_READERS
1099 : RWSEM_WAKE_ANY
, &wake_q
);
1101 if (!wake_q_empty(&wake_q
)) {
1103 * We want to minimize wait_lock hold time especially
1104 * when a large number of readers are to be woken up.
1106 raw_spin_unlock_irq(&sem
->wait_lock
);
1108 wake_q_init(&wake_q
); /* Used again, reinit */
1109 raw_spin_lock_irq(&sem
->wait_lock
);
1112 atomic_long_or(RWSEM_FLAG_WAITERS
, &sem
->count
);
1116 /* wait until we successfully acquire the lock */
1117 set_current_state(state
);
1119 if (rwsem_try_write_lock(sem
, &waiter
)) {
1120 /* rwsem_try_write_lock() implies ACQUIRE on success */
1124 raw_spin_unlock_irq(&sem
->wait_lock
);
1126 if (signal_pending_state(state
, current
))
1130 * After setting the handoff bit and failing to acquire
1131 * the lock, attempt to spin on owner to accelerate lock
1132 * transfer. If the previous owner is a on-cpu writer and it
1133 * has just released the lock, OWNER_NULL will be returned.
1134 * In this case, we attempt to acquire the lock again
1137 if (waiter
.handoff_set
) {
1138 enum owner_state owner_state
;
1141 owner_state
= rwsem_spin_on_owner(sem
);
1144 if (owner_state
== OWNER_NULL
)
1149 lockevent_inc(rwsem_sleep_writer
);
1150 set_current_state(state
);
1152 raw_spin_lock_irq(&sem
->wait_lock
);
1154 __set_current_state(TASK_RUNNING
);
1155 raw_spin_unlock_irq(&sem
->wait_lock
);
1156 lockevent_inc(rwsem_wlock
);
1160 __set_current_state(TASK_RUNNING
);
1161 raw_spin_lock_irq(&sem
->wait_lock
);
1162 rwsem_del_waiter(sem
, &waiter
);
1163 if (!list_empty(&sem
->wait_list
))
1164 rwsem_mark_wake(sem
, RWSEM_WAKE_ANY
, &wake_q
);
1165 raw_spin_unlock_irq(&sem
->wait_lock
);
1167 lockevent_inc(rwsem_wlock_fail
);
1168 return ERR_PTR(-EINTR
);
1172 * handle waking up a waiter on the semaphore
1173 * - up_read/up_write has decremented the active part of count if we come here
1175 static struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
)
1177 unsigned long flags
;
1178 DEFINE_WAKE_Q(wake_q
);
1180 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
1182 if (!list_empty(&sem
->wait_list
))
1183 rwsem_mark_wake(sem
, RWSEM_WAKE_ANY
, &wake_q
);
1185 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
1192 * downgrade a write lock into a read lock
1193 * - caller incremented waiting part of count and discovered it still negative
1194 * - just wake up any readers at the front of the queue
1196 static struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
1198 unsigned long flags
;
1199 DEFINE_WAKE_Q(wake_q
);
1201 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
1203 if (!list_empty(&sem
->wait_list
))
1204 rwsem_mark_wake(sem
, RWSEM_WAKE_READ_OWNED
, &wake_q
);
1206 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
1215 static inline int __down_read_common(struct rw_semaphore
*sem
, int state
)
1219 if (!rwsem_read_trylock(sem
, &count
)) {
1220 if (IS_ERR(rwsem_down_read_slowpath(sem
, count
, state
)))
1222 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem
), sem
);
1227 static inline void __down_read(struct rw_semaphore
*sem
)
1229 __down_read_common(sem
, TASK_UNINTERRUPTIBLE
);
1232 static inline int __down_read_interruptible(struct rw_semaphore
*sem
)
1234 return __down_read_common(sem
, TASK_INTERRUPTIBLE
);
1237 static inline int __down_read_killable(struct rw_semaphore
*sem
)
1239 return __down_read_common(sem
, TASK_KILLABLE
);
1242 static inline int __down_read_trylock(struct rw_semaphore
*sem
)
1246 DEBUG_RWSEMS_WARN_ON(sem
->magic
!= sem
, sem
);
1249 * Optimize for the case when the rwsem is not locked at all.
1251 tmp
= RWSEM_UNLOCKED_VALUE
;
1253 if (atomic_long_try_cmpxchg_acquire(&sem
->count
, &tmp
,
1254 tmp
+ RWSEM_READER_BIAS
)) {
1255 rwsem_set_reader_owned(sem
);
1258 } while (!(tmp
& RWSEM_READ_FAILED_MASK
));
1265 static inline int __down_write_common(struct rw_semaphore
*sem
, int state
)
1267 if (unlikely(!rwsem_write_trylock(sem
))) {
1268 if (IS_ERR(rwsem_down_write_slowpath(sem
, state
)))
1275 static inline void __down_write(struct rw_semaphore
*sem
)
1277 __down_write_common(sem
, TASK_UNINTERRUPTIBLE
);
1280 static inline int __down_write_killable(struct rw_semaphore
*sem
)
1282 return __down_write_common(sem
, TASK_KILLABLE
);
1285 static inline int __down_write_trylock(struct rw_semaphore
*sem
)
1287 DEBUG_RWSEMS_WARN_ON(sem
->magic
!= sem
, sem
);
1288 return rwsem_write_trylock(sem
);
1292 * unlock after reading
1294 static inline void __up_read(struct rw_semaphore
*sem
)
1298 DEBUG_RWSEMS_WARN_ON(sem
->magic
!= sem
, sem
);
1299 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem
), sem
);
1301 rwsem_clear_reader_owned(sem
);
1302 tmp
= atomic_long_add_return_release(-RWSEM_READER_BIAS
, &sem
->count
);
1303 DEBUG_RWSEMS_WARN_ON(tmp
< 0, sem
);
1304 if (unlikely((tmp
& (RWSEM_LOCK_MASK
|RWSEM_FLAG_WAITERS
)) ==
1305 RWSEM_FLAG_WAITERS
)) {
1306 clear_nonspinnable(sem
);
1312 * unlock after writing
1314 static inline void __up_write(struct rw_semaphore
*sem
)
1318 DEBUG_RWSEMS_WARN_ON(sem
->magic
!= sem
, sem
);
1320 * sem->owner may differ from current if the ownership is transferred
1321 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1323 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem
) != current
) &&
1324 !rwsem_test_oflags(sem
, RWSEM_NONSPINNABLE
), sem
);
1326 rwsem_clear_owner(sem
);
1327 tmp
= atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED
, &sem
->count
);
1328 if (unlikely(tmp
& RWSEM_FLAG_WAITERS
))
1333 * downgrade write lock to read lock
1335 static inline void __downgrade_write(struct rw_semaphore
*sem
)
1340 * When downgrading from exclusive to shared ownership,
1341 * anything inside the write-locked region cannot leak
1342 * into the read side. In contrast, anything in the
1343 * read-locked region is ok to be re-ordered into the
1344 * write side. As such, rely on RELEASE semantics.
1346 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem
) != current
, sem
);
1347 tmp
= atomic_long_fetch_add_release(
1348 -RWSEM_WRITER_LOCKED
+RWSEM_READER_BIAS
, &sem
->count
);
1349 rwsem_set_reader_owned(sem
);
1350 if (tmp
& RWSEM_FLAG_WAITERS
)
1351 rwsem_downgrade_wake(sem
);
1354 #else /* !CONFIG_PREEMPT_RT */
1356 #define RT_MUTEX_BUILD_MUTEX
1357 #include "rtmutex.c"
1359 #define rwbase_set_and_save_current_state(state) \
1360 set_current_state(state)
1362 #define rwbase_restore_current_state() \
1363 __set_current_state(TASK_RUNNING)
1365 #define rwbase_rtmutex_lock_state(rtm, state) \
1366 __rt_mutex_lock(rtm, state)
1368 #define rwbase_rtmutex_slowlock_locked(rtm, state) \
1369 __rt_mutex_slowlock_locked(rtm, NULL, state)
1371 #define rwbase_rtmutex_unlock(rtm) \
1372 __rt_mutex_unlock(rtm)
1374 #define rwbase_rtmutex_trylock(rtm) \
1375 __rt_mutex_trylock(rtm)
1377 #define rwbase_signal_pending_state(state, current) \
1378 signal_pending_state(state, current)
1380 #define rwbase_schedule() \
1383 #include "rwbase_rt.c"
1385 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
1386 struct lock_class_key
*key
)
1388 init_rwbase_rt(&(sem
)->rwbase
);
1390 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1391 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
1392 lockdep_init_map_wait(&sem
->dep_map
, name
, key
, 0, LD_WAIT_SLEEP
);
1395 EXPORT_SYMBOL(__init_rwsem
);
1397 static inline void __down_read(struct rw_semaphore
*sem
)
1399 rwbase_read_lock(&sem
->rwbase
, TASK_UNINTERRUPTIBLE
);
1402 static inline int __down_read_interruptible(struct rw_semaphore
*sem
)
1404 return rwbase_read_lock(&sem
->rwbase
, TASK_INTERRUPTIBLE
);
1407 static inline int __down_read_killable(struct rw_semaphore
*sem
)
1409 return rwbase_read_lock(&sem
->rwbase
, TASK_KILLABLE
);
1412 static inline int __down_read_trylock(struct rw_semaphore
*sem
)
1414 return rwbase_read_trylock(&sem
->rwbase
);
1417 static inline void __up_read(struct rw_semaphore
*sem
)
1419 rwbase_read_unlock(&sem
->rwbase
, TASK_NORMAL
);
1422 static inline void __sched
__down_write(struct rw_semaphore
*sem
)
1424 rwbase_write_lock(&sem
->rwbase
, TASK_UNINTERRUPTIBLE
);
1427 static inline int __sched
__down_write_killable(struct rw_semaphore
*sem
)
1429 return rwbase_write_lock(&sem
->rwbase
, TASK_KILLABLE
);
1432 static inline int __down_write_trylock(struct rw_semaphore
*sem
)
1434 return rwbase_write_trylock(&sem
->rwbase
);
1437 static inline void __up_write(struct rw_semaphore
*sem
)
1439 rwbase_write_unlock(&sem
->rwbase
);
1442 static inline void __downgrade_write(struct rw_semaphore
*sem
)
1444 rwbase_write_downgrade(&sem
->rwbase
);
1447 /* Debug stubs for the common API */
1448 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1450 static inline void __rwsem_set_reader_owned(struct rw_semaphore
*sem
,
1451 struct task_struct
*owner
)
1455 static inline bool is_rwsem_reader_owned(struct rw_semaphore
*sem
)
1457 int count
= atomic_read(&sem
->rwbase
.readers
);
1459 return count
< 0 && count
!= READER_BIAS
;
1462 #endif /* CONFIG_PREEMPT_RT */
1467 void __sched
down_read(struct rw_semaphore
*sem
)
1470 rwsem_acquire_read(&sem
->dep_map
, 0, 0, _RET_IP_
);
1472 LOCK_CONTENDED(sem
, __down_read_trylock
, __down_read
);
1474 EXPORT_SYMBOL(down_read
);
1476 int __sched
down_read_interruptible(struct rw_semaphore
*sem
)
1479 rwsem_acquire_read(&sem
->dep_map
, 0, 0, _RET_IP_
);
1481 if (LOCK_CONTENDED_RETURN(sem
, __down_read_trylock
, __down_read_interruptible
)) {
1482 rwsem_release(&sem
->dep_map
, _RET_IP_
);
1488 EXPORT_SYMBOL(down_read_interruptible
);
1490 int __sched
down_read_killable(struct rw_semaphore
*sem
)
1493 rwsem_acquire_read(&sem
->dep_map
, 0, 0, _RET_IP_
);
1495 if (LOCK_CONTENDED_RETURN(sem
, __down_read_trylock
, __down_read_killable
)) {
1496 rwsem_release(&sem
->dep_map
, _RET_IP_
);
1502 EXPORT_SYMBOL(down_read_killable
);
1505 * trylock for reading -- returns 1 if successful, 0 if contention
1507 int down_read_trylock(struct rw_semaphore
*sem
)
1509 int ret
= __down_read_trylock(sem
);
1512 rwsem_acquire_read(&sem
->dep_map
, 0, 1, _RET_IP_
);
1515 EXPORT_SYMBOL(down_read_trylock
);
1520 void __sched
down_write(struct rw_semaphore
*sem
)
1523 rwsem_acquire(&sem
->dep_map
, 0, 0, _RET_IP_
);
1524 LOCK_CONTENDED(sem
, __down_write_trylock
, __down_write
);
1526 EXPORT_SYMBOL(down_write
);
1531 int __sched
down_write_killable(struct rw_semaphore
*sem
)
1534 rwsem_acquire(&sem
->dep_map
, 0, 0, _RET_IP_
);
1536 if (LOCK_CONTENDED_RETURN(sem
, __down_write_trylock
,
1537 __down_write_killable
)) {
1538 rwsem_release(&sem
->dep_map
, _RET_IP_
);
1544 EXPORT_SYMBOL(down_write_killable
);
1547 * trylock for writing -- returns 1 if successful, 0 if contention
1549 int down_write_trylock(struct rw_semaphore
*sem
)
1551 int ret
= __down_write_trylock(sem
);
1554 rwsem_acquire(&sem
->dep_map
, 0, 1, _RET_IP_
);
1558 EXPORT_SYMBOL(down_write_trylock
);
1561 * release a read lock
1563 void up_read(struct rw_semaphore
*sem
)
1565 rwsem_release(&sem
->dep_map
, _RET_IP_
);
1568 EXPORT_SYMBOL(up_read
);
1571 * release a write lock
1573 void up_write(struct rw_semaphore
*sem
)
1575 rwsem_release(&sem
->dep_map
, _RET_IP_
);
1578 EXPORT_SYMBOL(up_write
);
1581 * downgrade write lock to read lock
1583 void downgrade_write(struct rw_semaphore
*sem
)
1585 lock_downgrade(&sem
->dep_map
, _RET_IP_
);
1586 __downgrade_write(sem
);
1588 EXPORT_SYMBOL(downgrade_write
);
1590 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1592 void down_read_nested(struct rw_semaphore
*sem
, int subclass
)
1595 rwsem_acquire_read(&sem
->dep_map
, subclass
, 0, _RET_IP_
);
1596 LOCK_CONTENDED(sem
, __down_read_trylock
, __down_read
);
1598 EXPORT_SYMBOL(down_read_nested
);
1600 int down_read_killable_nested(struct rw_semaphore
*sem
, int subclass
)
1603 rwsem_acquire_read(&sem
->dep_map
, subclass
, 0, _RET_IP_
);
1605 if (LOCK_CONTENDED_RETURN(sem
, __down_read_trylock
, __down_read_killable
)) {
1606 rwsem_release(&sem
->dep_map
, _RET_IP_
);
1612 EXPORT_SYMBOL(down_read_killable_nested
);
1614 void _down_write_nest_lock(struct rw_semaphore
*sem
, struct lockdep_map
*nest
)
1617 rwsem_acquire_nest(&sem
->dep_map
, 0, 0, nest
, _RET_IP_
);
1618 LOCK_CONTENDED(sem
, __down_write_trylock
, __down_write
);
1620 EXPORT_SYMBOL(_down_write_nest_lock
);
1622 void down_read_non_owner(struct rw_semaphore
*sem
)
1626 __rwsem_set_reader_owned(sem
, NULL
);
1628 EXPORT_SYMBOL(down_read_non_owner
);
1630 void down_write_nested(struct rw_semaphore
*sem
, int subclass
)
1633 rwsem_acquire(&sem
->dep_map
, subclass
, 0, _RET_IP_
);
1634 LOCK_CONTENDED(sem
, __down_write_trylock
, __down_write
);
1636 EXPORT_SYMBOL(down_write_nested
);
1638 int __sched
down_write_killable_nested(struct rw_semaphore
*sem
, int subclass
)
1641 rwsem_acquire(&sem
->dep_map
, subclass
, 0, _RET_IP_
);
1643 if (LOCK_CONTENDED_RETURN(sem
, __down_write_trylock
,
1644 __down_write_killable
)) {
1645 rwsem_release(&sem
->dep_map
, _RET_IP_
);
1651 EXPORT_SYMBOL(down_write_killable_nested
);
1653 void up_read_non_owner(struct rw_semaphore
*sem
)
1655 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem
), sem
);
1658 EXPORT_SYMBOL(up_read_non_owner
);