2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/locking/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include "mcs_spinlock.h"
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
34 #ifdef CONFIG_DEBUG_MUTEXES
35 # include "mutex-debug.h"
36 # include <asm-generic/mutex-null.h>
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
42 # undef __mutex_slowpath_needs_to_unlock
43 # define __mutex_slowpath_needs_to_unlock() 0
46 # include <asm/mutex.h>
50 __mutex_init(struct mutex
*lock
, const char *name
, struct lock_class_key
*key
)
52 atomic_set(&lock
->count
, 1);
53 spin_lock_init(&lock
->wait_lock
);
54 INIT_LIST_HEAD(&lock
->wait_list
);
55 mutex_clear_owner(lock
);
56 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57 osq_lock_init(&lock
->osq
);
60 debug_mutex_init(lock
, name
, key
);
63 EXPORT_SYMBOL(__mutex_init
);
65 #ifndef CONFIG_DEBUG_LOCK_ALLOC
67 * We split the mutex lock/unlock logic into separate fastpath and
68 * slowpath functions, to reduce the register pressure on the fastpath.
69 * We also put the fastpath first in the kernel image, to make sure the
70 * branch is predicted by the CPU as default-untaken.
72 __visible
void __sched
__mutex_lock_slowpath(atomic_t
*lock_count
);
75 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides mutex must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
93 * This function is similar to (but not equivalent to) down().
95 void __sched
mutex_lock(struct mutex
*lock
)
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
102 __mutex_fastpath_lock(&lock
->count
, __mutex_lock_slowpath
);
103 mutex_set_owner(lock
);
106 EXPORT_SYMBOL(mutex_lock
);
109 static __always_inline
void ww_mutex_lock_acquired(struct ww_mutex
*ww
,
110 struct ww_acquire_ctx
*ww_ctx
)
112 #ifdef CONFIG_DEBUG_MUTEXES
114 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115 * but released with a normal mutex_unlock in this call.
117 * This should never happen, always use ww_mutex_unlock.
119 DEBUG_LOCKS_WARN_ON(ww
->ctx
);
122 * Not quite done after calling ww_acquire_done() ?
124 DEBUG_LOCKS_WARN_ON(ww_ctx
->done_acquire
);
126 if (ww_ctx
->contending_lock
) {
128 * After -EDEADLK you tried to
129 * acquire a different ww_mutex? Bad!
131 DEBUG_LOCKS_WARN_ON(ww_ctx
->contending_lock
!= ww
);
134 * You called ww_mutex_lock after receiving -EDEADLK,
135 * but 'forgot' to unlock everything else first?
137 DEBUG_LOCKS_WARN_ON(ww_ctx
->acquired
> 0);
138 ww_ctx
->contending_lock
= NULL
;
142 * Naughty, using a different class will lead to undefined behavior!
144 DEBUG_LOCKS_WARN_ON(ww_ctx
->ww_class
!= ww
->ww_class
);
150 * after acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck.
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
154 * as the fastpath and opportunistic spinning are disabled in that case.
156 static __always_inline
void
157 ww_mutex_set_context_fastpath(struct ww_mutex
*lock
,
158 struct ww_acquire_ctx
*ctx
)
161 struct mutex_waiter
*cur
;
163 ww_mutex_lock_acquired(lock
, ctx
);
168 * The lock->ctx update should be visible on all cores before
169 * the atomic read is done, otherwise contended waiters might be
170 * missed. The contended waiters will either see ww_ctx == NULL
171 * and keep spinning, or it will acquire wait_lock, add itself
172 * to waiter list and sleep.
177 * Check if lock is contended, if not there is nobody to wake up
179 if (likely(atomic_read(&lock
->base
.count
) == 0))
183 * Uh oh, we raced in fastpath, wake up everyone in this case,
184 * so they can see the new lock->ctx.
186 spin_lock_mutex(&lock
->base
.wait_lock
, flags
);
187 list_for_each_entry(cur
, &lock
->base
.wait_list
, list
) {
188 debug_mutex_wake_waiter(&lock
->base
, cur
);
189 wake_up_process(cur
->task
);
191 spin_unlock_mutex(&lock
->base
.wait_lock
, flags
);
195 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
197 * In order to avoid a stampede of mutex spinners from acquiring the mutex
198 * more or less simultaneously, the spinners need to acquire a MCS lock
199 * first before spinning on the owner field.
204 * Mutex spinning code migrated from kernel/sched/core.c
207 static inline bool owner_running(struct mutex
*lock
, struct task_struct
*owner
)
209 if (lock
->owner
!= owner
)
213 * Ensure we emit the owner->on_cpu, dereference _after_ checking
214 * lock->owner still matches owner, if that fails, owner might
215 * point to free()d memory, if it still matches, the rcu_read_lock()
216 * ensures the memory stays valid.
220 return owner
->on_cpu
;
224 * Look out! "owner" is an entirely speculative pointer
225 * access and not reliable.
228 int mutex_spin_on_owner(struct mutex
*lock
, struct task_struct
*owner
)
231 while (owner_running(lock
, owner
)) {
235 cpu_relax_lowlatency();
240 * We break out the loop above on need_resched() and when the
241 * owner changed, which is a sign for heavy contention. Return
242 * success only when lock->owner is NULL.
244 return lock
->owner
== NULL
;
248 * Initial check for entering the mutex spinning loop
250 static inline int mutex_can_spin_on_owner(struct mutex
*lock
)
252 struct task_struct
*owner
;
259 owner
= ACCESS_ONCE(lock
->owner
);
261 retval
= owner
->on_cpu
;
264 * if lock->owner is not set, the mutex owner may have just acquired
265 * it and not set the owner yet or the mutex has been released.
271 * Atomically try to take the lock when it is available
273 static inline bool mutex_try_to_acquire(struct mutex
*lock
)
275 return !mutex_is_locked(lock
) &&
276 (atomic_cmpxchg(&lock
->count
, 1, 0) == 1);
280 * Optimistic spinning.
282 * We try to spin for acquisition when we find that the lock owner
283 * is currently running on a (different) CPU and while we don't
284 * need to reschedule. The rationale is that if the lock owner is
285 * running, it is likely to release the lock soon.
287 * Since this needs the lock owner, and this mutex implementation
288 * doesn't track the owner atomically in the lock field, we need to
289 * track it non-atomically.
291 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
292 * to serialize everything.
294 * The mutex spinners are queued up using MCS lock so that only one
295 * spinner can compete for the mutex. However, if mutex spinning isn't
296 * going to happen, there is no point in going through the lock/unlock
299 * Returns true when the lock was taken, otherwise false, indicating
300 * that we need to jump to the slowpath and sleep.
302 static bool mutex_optimistic_spin(struct mutex
*lock
,
303 struct ww_acquire_ctx
*ww_ctx
, const bool use_ww_ctx
)
305 struct task_struct
*task
= current
;
307 if (!mutex_can_spin_on_owner(lock
))
310 if (!osq_lock(&lock
->osq
))
314 struct task_struct
*owner
;
316 if (use_ww_ctx
&& ww_ctx
->acquired
> 0) {
319 ww
= container_of(lock
, struct ww_mutex
, base
);
321 * If ww->ctx is set the contents are undefined, only
322 * by acquiring wait_lock there is a guarantee that
323 * they are not invalid when reading.
325 * As such, when deadlock detection needs to be
326 * performed the optimistic spinning cannot be done.
328 if (ACCESS_ONCE(ww
->ctx
))
333 * If there's an owner, wait for it to either
334 * release the lock or go to sleep.
336 owner
= ACCESS_ONCE(lock
->owner
);
337 if (owner
&& !mutex_spin_on_owner(lock
, owner
))
340 /* Try to acquire the mutex if it is unlocked. */
341 if (mutex_try_to_acquire(lock
)) {
342 lock_acquired(&lock
->dep_map
, ip
);
346 ww
= container_of(lock
, struct ww_mutex
, base
);
348 ww_mutex_set_context_fastpath(ww
, ww_ctx
);
351 mutex_set_owner(lock
);
352 osq_unlock(&lock
->osq
);
357 * When there's no owner, we might have preempted between the
358 * owner acquiring the lock and setting the owner field. If
359 * we're an RT task that will live-lock because we won't let
360 * the owner complete.
362 if (!owner
&& (need_resched() || rt_task(task
)))
366 * The cpu_relax() call is a compiler barrier which forces
367 * everything in this loop to be re-loaded. We don't need
368 * memory barriers as we'll eventually observe the right
369 * values at the cost of a few extra spins.
371 cpu_relax_lowlatency();
374 osq_unlock(&lock
->osq
);
377 * If we fell out of the spin path because of need_resched(),
378 * reschedule now, before we try-lock the mutex. This avoids getting
379 * scheduled out right after we obtained the mutex.
382 schedule_preempt_disabled();
387 static bool mutex_optimistic_spin(struct mutex
*lock
,
388 struct ww_acquire_ctx
*ww_ctx
, const bool use_ww_ctx
)
394 __visible __used noinline
395 void __sched
__mutex_unlock_slowpath(atomic_t
*lock_count
);
398 * mutex_unlock - release the mutex
399 * @lock: the mutex to be released
401 * Unlock a mutex that has been locked by this task previously.
403 * This function must not be used in interrupt context. Unlocking
404 * of a not locked mutex is not allowed.
406 * This function is similar to (but not equivalent to) up().
408 void __sched
mutex_unlock(struct mutex
*lock
)
411 * The unlocking fastpath is the 0->1 transition from 'locked'
412 * into 'unlocked' state:
414 #ifndef CONFIG_DEBUG_MUTEXES
416 * When debugging is enabled we must not clear the owner before time,
417 * the slow path will always be taken, and that clears the owner field
418 * after verifying that it was indeed current.
420 mutex_clear_owner(lock
);
422 __mutex_fastpath_unlock(&lock
->count
, __mutex_unlock_slowpath
);
425 EXPORT_SYMBOL(mutex_unlock
);
428 * ww_mutex_unlock - release the w/w mutex
429 * @lock: the mutex to be released
431 * Unlock a mutex that has been locked by this task previously with any of the
432 * ww_mutex_lock* functions (with or without an acquire context). It is
433 * forbidden to release the locks after releasing the acquire context.
435 * This function must not be used in interrupt context. Unlocking
436 * of a unlocked mutex is not allowed.
438 void __sched
ww_mutex_unlock(struct ww_mutex
*lock
)
441 * The unlocking fastpath is the 0->1 transition from 'locked'
442 * into 'unlocked' state:
445 #ifdef CONFIG_DEBUG_MUTEXES
446 DEBUG_LOCKS_WARN_ON(!lock
->ctx
->acquired
);
448 if (lock
->ctx
->acquired
> 0)
449 lock
->ctx
->acquired
--;
453 #ifndef CONFIG_DEBUG_MUTEXES
455 * When debugging is enabled we must not clear the owner before time,
456 * the slow path will always be taken, and that clears the owner field
457 * after verifying that it was indeed current.
459 mutex_clear_owner(&lock
->base
);
461 __mutex_fastpath_unlock(&lock
->base
.count
, __mutex_unlock_slowpath
);
463 EXPORT_SYMBOL(ww_mutex_unlock
);
465 static inline int __sched
466 __mutex_lock_check_stamp(struct mutex
*lock
, struct ww_acquire_ctx
*ctx
)
468 struct ww_mutex
*ww
= container_of(lock
, struct ww_mutex
, base
);
469 struct ww_acquire_ctx
*hold_ctx
= ACCESS_ONCE(ww
->ctx
);
474 if (unlikely(ctx
== hold_ctx
))
477 if (ctx
->stamp
- hold_ctx
->stamp
<= LONG_MAX
&&
478 (ctx
->stamp
!= hold_ctx
->stamp
|| ctx
> hold_ctx
)) {
479 #ifdef CONFIG_DEBUG_MUTEXES
480 DEBUG_LOCKS_WARN_ON(ctx
->contending_lock
);
481 ctx
->contending_lock
= ww
;
490 * Lock a mutex (possibly interruptible), slowpath:
492 static __always_inline
int __sched
493 __mutex_lock_common(struct mutex
*lock
, long state
, unsigned int subclass
,
494 struct lockdep_map
*nest_lock
, unsigned long ip
,
495 struct ww_acquire_ctx
*ww_ctx
, const bool use_ww_ctx
)
497 struct task_struct
*task
= current
;
498 struct mutex_waiter waiter
;
503 mutex_acquire_nest(&lock
->dep_map
, subclass
, 0, nest_lock
, ip
);
505 if (mutex_optimistic_spin(lock
, ww_ctx
, use_ww_ctx
)) {
506 /* got the lock, yay! */
511 spin_lock_mutex(&lock
->wait_lock
, flags
);
514 * Once more, try to acquire the lock. Only try-lock the mutex if
515 * it is unlocked to reduce unnecessary xchg() operations.
517 if (!mutex_is_locked(lock
) && (atomic_xchg(&lock
->count
, 0) == 1))
520 debug_mutex_lock_common(lock
, &waiter
);
521 debug_mutex_add_waiter(lock
, &waiter
, task_thread_info(task
));
523 /* add waiting tasks to the end of the waitqueue (FIFO): */
524 list_add_tail(&waiter
.list
, &lock
->wait_list
);
527 lock_contended(&lock
->dep_map
, ip
);
531 * Lets try to take the lock again - this is needed even if
532 * we get here for the first time (shortly after failing to
533 * acquire the lock), to make sure that we get a wakeup once
534 * it's unlocked. Later on, if we sleep, this is the
535 * operation that gives us the lock. We xchg it to -1, so
536 * that when we release the lock, we properly wake up the
537 * other waiters. We only attempt the xchg if the count is
538 * non-negative in order to avoid unnecessary xchg operations:
540 if (atomic_read(&lock
->count
) >= 0 &&
541 (atomic_xchg(&lock
->count
, -1) == 1))
545 * got a signal? (This code gets eliminated in the
546 * TASK_UNINTERRUPTIBLE case.)
548 if (unlikely(signal_pending_state(state
, task
))) {
553 if (use_ww_ctx
&& ww_ctx
->acquired
> 0) {
554 ret
= __mutex_lock_check_stamp(lock
, ww_ctx
);
559 __set_task_state(task
, state
);
561 /* didn't get the lock, go to sleep: */
562 spin_unlock_mutex(&lock
->wait_lock
, flags
);
563 schedule_preempt_disabled();
564 spin_lock_mutex(&lock
->wait_lock
, flags
);
566 mutex_remove_waiter(lock
, &waiter
, current_thread_info());
567 /* set it to 0 if there are no waiters left: */
568 if (likely(list_empty(&lock
->wait_list
)))
569 atomic_set(&lock
->count
, 0);
570 debug_mutex_free_waiter(&waiter
);
573 /* got the lock - cleanup and rejoice! */
574 lock_acquired(&lock
->dep_map
, ip
);
575 mutex_set_owner(lock
);
578 struct ww_mutex
*ww
= container_of(lock
, struct ww_mutex
, base
);
579 struct mutex_waiter
*cur
;
582 * This branch gets optimized out for the common case,
583 * and is only important for ww_mutex_lock.
585 ww_mutex_lock_acquired(ww
, ww_ctx
);
589 * Give any possible sleeping processes the chance to wake up,
590 * so they can recheck if they have to back off.
592 list_for_each_entry(cur
, &lock
->wait_list
, list
) {
593 debug_mutex_wake_waiter(lock
, cur
);
594 wake_up_process(cur
->task
);
598 spin_unlock_mutex(&lock
->wait_lock
, flags
);
603 mutex_remove_waiter(lock
, &waiter
, task_thread_info(task
));
604 spin_unlock_mutex(&lock
->wait_lock
, flags
);
605 debug_mutex_free_waiter(&waiter
);
606 mutex_release(&lock
->dep_map
, 1, ip
);
611 #ifdef CONFIG_DEBUG_LOCK_ALLOC
613 mutex_lock_nested(struct mutex
*lock
, unsigned int subclass
)
616 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
,
617 subclass
, NULL
, _RET_IP_
, NULL
, 0);
620 EXPORT_SYMBOL_GPL(mutex_lock_nested
);
623 _mutex_lock_nest_lock(struct mutex
*lock
, struct lockdep_map
*nest
)
626 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
,
627 0, nest
, _RET_IP_
, NULL
, 0);
630 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock
);
633 mutex_lock_killable_nested(struct mutex
*lock
, unsigned int subclass
)
636 return __mutex_lock_common(lock
, TASK_KILLABLE
,
637 subclass
, NULL
, _RET_IP_
, NULL
, 0);
639 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested
);
642 mutex_lock_interruptible_nested(struct mutex
*lock
, unsigned int subclass
)
645 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
,
646 subclass
, NULL
, _RET_IP_
, NULL
, 0);
649 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested
);
652 ww_mutex_deadlock_injection(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
654 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
657 if (ctx
->deadlock_inject_countdown
-- == 0) {
658 tmp
= ctx
->deadlock_inject_interval
;
659 if (tmp
> UINT_MAX
/4)
662 tmp
= tmp
*2 + tmp
+ tmp
/2;
664 ctx
->deadlock_inject_interval
= tmp
;
665 ctx
->deadlock_inject_countdown
= tmp
;
666 ctx
->contending_lock
= lock
;
668 ww_mutex_unlock(lock
);
678 __ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
683 ret
= __mutex_lock_common(&lock
->base
, TASK_UNINTERRUPTIBLE
,
684 0, &ctx
->dep_map
, _RET_IP_
, ctx
, 1);
685 if (!ret
&& ctx
->acquired
> 1)
686 return ww_mutex_deadlock_injection(lock
, ctx
);
690 EXPORT_SYMBOL_GPL(__ww_mutex_lock
);
693 __ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
698 ret
= __mutex_lock_common(&lock
->base
, TASK_INTERRUPTIBLE
,
699 0, &ctx
->dep_map
, _RET_IP_
, ctx
, 1);
701 if (!ret
&& ctx
->acquired
> 1)
702 return ww_mutex_deadlock_injection(lock
, ctx
);
706 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible
);
711 * Release the lock, slowpath:
714 __mutex_unlock_common_slowpath(struct mutex
*lock
, int nested
)
719 * As a performance measurement, release the lock before doing other
720 * wakeup related duties to follow. This allows other tasks to acquire
721 * the lock sooner, while still handling cleanups in past unlock calls.
722 * This can be done as we do not enforce strict equivalence between the
723 * mutex counter and wait_list.
726 * Some architectures leave the lock unlocked in the fastpath failure
727 * case, others need to leave it locked. In the later case we have to
728 * unlock it here - as the lock counter is currently 0 or negative.
730 if (__mutex_slowpath_needs_to_unlock())
731 atomic_set(&lock
->count
, 1);
733 spin_lock_mutex(&lock
->wait_lock
, flags
);
734 mutex_release(&lock
->dep_map
, nested
, _RET_IP_
);
735 debug_mutex_unlock(lock
);
737 if (!list_empty(&lock
->wait_list
)) {
738 /* get the first entry from the wait-list: */
739 struct mutex_waiter
*waiter
=
740 list_entry(lock
->wait_list
.next
,
741 struct mutex_waiter
, list
);
743 debug_mutex_wake_waiter(lock
, waiter
);
745 wake_up_process(waiter
->task
);
748 spin_unlock_mutex(&lock
->wait_lock
, flags
);
752 * Release the lock, slowpath:
755 __mutex_unlock_slowpath(atomic_t
*lock_count
)
757 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
759 __mutex_unlock_common_slowpath(lock
, 1);
762 #ifndef CONFIG_DEBUG_LOCK_ALLOC
764 * Here come the less common (and hence less performance-critical) APIs:
765 * mutex_lock_interruptible() and mutex_trylock().
767 static noinline
int __sched
768 __mutex_lock_killable_slowpath(struct mutex
*lock
);
770 static noinline
int __sched
771 __mutex_lock_interruptible_slowpath(struct mutex
*lock
);
774 * mutex_lock_interruptible - acquire the mutex, interruptible
775 * @lock: the mutex to be acquired
777 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
778 * been acquired or sleep until the mutex becomes available. If a
779 * signal arrives while waiting for the lock then this function
782 * This function is similar to (but not equivalent to) down_interruptible().
784 int __sched
mutex_lock_interruptible(struct mutex
*lock
)
789 ret
= __mutex_fastpath_lock_retval(&lock
->count
);
791 mutex_set_owner(lock
);
794 return __mutex_lock_interruptible_slowpath(lock
);
797 EXPORT_SYMBOL(mutex_lock_interruptible
);
799 int __sched
mutex_lock_killable(struct mutex
*lock
)
804 ret
= __mutex_fastpath_lock_retval(&lock
->count
);
806 mutex_set_owner(lock
);
809 return __mutex_lock_killable_slowpath(lock
);
811 EXPORT_SYMBOL(mutex_lock_killable
);
813 __visible
void __sched
814 __mutex_lock_slowpath(atomic_t
*lock_count
)
816 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
818 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
, 0,
819 NULL
, _RET_IP_
, NULL
, 0);
822 static noinline
int __sched
823 __mutex_lock_killable_slowpath(struct mutex
*lock
)
825 return __mutex_lock_common(lock
, TASK_KILLABLE
, 0,
826 NULL
, _RET_IP_
, NULL
, 0);
829 static noinline
int __sched
830 __mutex_lock_interruptible_slowpath(struct mutex
*lock
)
832 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
, 0,
833 NULL
, _RET_IP_
, NULL
, 0);
836 static noinline
int __sched
837 __ww_mutex_lock_slowpath(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
839 return __mutex_lock_common(&lock
->base
, TASK_UNINTERRUPTIBLE
, 0,
840 NULL
, _RET_IP_
, ctx
, 1);
843 static noinline
int __sched
844 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex
*lock
,
845 struct ww_acquire_ctx
*ctx
)
847 return __mutex_lock_common(&lock
->base
, TASK_INTERRUPTIBLE
, 0,
848 NULL
, _RET_IP_
, ctx
, 1);
854 * Spinlock based trylock, we take the spinlock and check whether we
857 static inline int __mutex_trylock_slowpath(atomic_t
*lock_count
)
859 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
863 /* No need to trylock if the mutex is locked. */
864 if (mutex_is_locked(lock
))
867 spin_lock_mutex(&lock
->wait_lock
, flags
);
869 prev
= atomic_xchg(&lock
->count
, -1);
870 if (likely(prev
== 1)) {
871 mutex_set_owner(lock
);
872 mutex_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
875 /* Set it back to 0 if there are no waiters: */
876 if (likely(list_empty(&lock
->wait_list
)))
877 atomic_set(&lock
->count
, 0);
879 spin_unlock_mutex(&lock
->wait_lock
, flags
);
885 * mutex_trylock - try to acquire the mutex, without waiting
886 * @lock: the mutex to be acquired
888 * Try to acquire the mutex atomically. Returns 1 if the mutex
889 * has been acquired successfully, and 0 on contention.
891 * NOTE: this function follows the spin_trylock() convention, so
892 * it is negated from the down_trylock() return values! Be careful
893 * about this when converting semaphore users to mutexes.
895 * This function must not be used in interrupt context. The
896 * mutex must be released by the same task that acquired it.
898 int __sched
mutex_trylock(struct mutex
*lock
)
902 ret
= __mutex_fastpath_trylock(&lock
->count
, __mutex_trylock_slowpath
);
904 mutex_set_owner(lock
);
908 EXPORT_SYMBOL(mutex_trylock
);
910 #ifndef CONFIG_DEBUG_LOCK_ALLOC
912 __ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
918 ret
= __mutex_fastpath_lock_retval(&lock
->base
.count
);
921 ww_mutex_set_context_fastpath(lock
, ctx
);
922 mutex_set_owner(&lock
->base
);
924 ret
= __ww_mutex_lock_slowpath(lock
, ctx
);
927 EXPORT_SYMBOL(__ww_mutex_lock
);
930 __ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
936 ret
= __mutex_fastpath_lock_retval(&lock
->base
.count
);
939 ww_mutex_set_context_fastpath(lock
, ctx
);
940 mutex_set_owner(&lock
->base
);
942 ret
= __ww_mutex_lock_interruptible_slowpath(lock
, ctx
);
945 EXPORT_SYMBOL(__ww_mutex_lock_interruptible
);
950 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
951 * @cnt: the atomic which we are to dec
952 * @lock: the mutex to return holding if we dec to 0
954 * return true and hold lock if we dec to 0, return false otherwise
956 int atomic_dec_and_mutex_lock(atomic_t
*cnt
, struct mutex
*lock
)
958 /* dec if we can't possibly hit 0 */
959 if (atomic_add_unless(cnt
, -1, 1))
961 /* we might hit 0, so take the lock */
963 if (!atomic_dec_and_test(cnt
)) {
964 /* when we actually did the dec, we didn't hit 0 */
968 /* we hit 0, and we hold the lock */
971 EXPORT_SYMBOL(atomic_dec_and_mutex_lock
);