]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - kernel/locking/mutex.c
Merge tag 'renesas-fixes2-for-v5.3' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-eoan-kernel.git] / kernel / locking / mutex.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 #else
36 # include "mutex.h"
37 #endif
38
39 void
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 {
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
47 #endif
48
49 debug_mutex_init(lock, name, key);
50 }
51 EXPORT_SYMBOL(__mutex_init);
52
53 /*
54 * @owner: contains: 'struct task_struct *' to the current lock owner,
55 * NULL means not owned. Since task_struct pointers are aligned at
56 * at least L1_CACHE_BYTES, we have low bits to store extra state.
57 *
58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
60 * Bit2 indicates handoff has been done and we're waiting for pickup.
61 */
62 #define MUTEX_FLAG_WAITERS 0x01
63 #define MUTEX_FLAG_HANDOFF 0x02
64 #define MUTEX_FLAG_PICKUP 0x04
65
66 #define MUTEX_FLAGS 0x07
67
68 static inline struct task_struct *__owner_task(unsigned long owner)
69 {
70 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
71 }
72
73 static inline unsigned long __owner_flags(unsigned long owner)
74 {
75 return owner & MUTEX_FLAGS;
76 }
77
78 /*
79 * Trylock variant that retuns the owning task on failure.
80 */
81 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
82 {
83 unsigned long owner, curr = (unsigned long)current;
84
85 owner = atomic_long_read(&lock->owner);
86 for (;;) { /* must loop, can race against a flag */
87 unsigned long old, flags = __owner_flags(owner);
88 unsigned long task = owner & ~MUTEX_FLAGS;
89
90 if (task) {
91 if (likely(task != curr))
92 break;
93
94 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
95 break;
96
97 flags &= ~MUTEX_FLAG_PICKUP;
98 } else {
99 #ifdef CONFIG_DEBUG_MUTEXES
100 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
101 #endif
102 }
103
104 /*
105 * We set the HANDOFF bit, we must make sure it doesn't live
106 * past the point where we acquire it. This would be possible
107 * if we (accidentally) set the bit on an unlocked mutex.
108 */
109 flags &= ~MUTEX_FLAG_HANDOFF;
110
111 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
112 if (old == owner)
113 return NULL;
114
115 owner = old;
116 }
117
118 return __owner_task(owner);
119 }
120
121 /*
122 * Actual trylock that will work on any unlocked state.
123 */
124 static inline bool __mutex_trylock(struct mutex *lock)
125 {
126 return !__mutex_trylock_or_owner(lock);
127 }
128
129 #ifndef CONFIG_DEBUG_LOCK_ALLOC
130 /*
131 * Lockdep annotations are contained to the slow paths for simplicity.
132 * There is nothing that would stop spreading the lockdep annotations outwards
133 * except more code.
134 */
135
136 /*
137 * Optimistic trylock that only works in the uncontended case. Make sure to
138 * follow with a __mutex_trylock() before failing.
139 */
140 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
141 {
142 unsigned long curr = (unsigned long)current;
143 unsigned long zero = 0UL;
144
145 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
146 return true;
147
148 return false;
149 }
150
151 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
152 {
153 unsigned long curr = (unsigned long)current;
154
155 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
156 return true;
157
158 return false;
159 }
160 #endif
161
162 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
163 {
164 atomic_long_or(flag, &lock->owner);
165 }
166
167 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
168 {
169 atomic_long_andnot(flag, &lock->owner);
170 }
171
172 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
173 {
174 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
175 }
176
177 /*
178 * Add @waiter to a given location in the lock wait_list and set the
179 * FLAG_WAITERS flag if it's the first waiter.
180 */
181 static void __sched
182 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
183 struct list_head *list)
184 {
185 debug_mutex_add_waiter(lock, waiter, current);
186
187 list_add_tail(&waiter->list, list);
188 if (__mutex_waiter_is_first(lock, waiter))
189 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
190 }
191
192 /*
193 * Give up ownership to a specific task, when @task = NULL, this is equivalent
194 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
195 * WAITERS. Provides RELEASE semantics like a regular unlock, the
196 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
197 */
198 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
199 {
200 unsigned long owner = atomic_long_read(&lock->owner);
201
202 for (;;) {
203 unsigned long old, new;
204
205 #ifdef CONFIG_DEBUG_MUTEXES
206 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
207 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
208 #endif
209
210 new = (owner & MUTEX_FLAG_WAITERS);
211 new |= (unsigned long)task;
212 if (task)
213 new |= MUTEX_FLAG_PICKUP;
214
215 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
216 if (old == owner)
217 break;
218
219 owner = old;
220 }
221 }
222
223 #ifndef CONFIG_DEBUG_LOCK_ALLOC
224 /*
225 * We split the mutex lock/unlock logic into separate fastpath and
226 * slowpath functions, to reduce the register pressure on the fastpath.
227 * We also put the fastpath first in the kernel image, to make sure the
228 * branch is predicted by the CPU as default-untaken.
229 */
230 static void __sched __mutex_lock_slowpath(struct mutex *lock);
231
232 /**
233 * mutex_lock - acquire the mutex
234 * @lock: the mutex to be acquired
235 *
236 * Lock the mutex exclusively for this task. If the mutex is not
237 * available right now, it will sleep until it can get it.
238 *
239 * The mutex must later on be released by the same task that
240 * acquired it. Recursive locking is not allowed. The task
241 * may not exit without first unlocking the mutex. Also, kernel
242 * memory where the mutex resides must not be freed with
243 * the mutex still locked. The mutex must first be initialized
244 * (or statically defined) before it can be locked. memset()-ing
245 * the mutex to 0 is not allowed.
246 *
247 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
248 * checks that will enforce the restrictions and will also do
249 * deadlock debugging)
250 *
251 * This function is similar to (but not equivalent to) down().
252 */
253 void __sched mutex_lock(struct mutex *lock)
254 {
255 might_sleep();
256
257 if (!__mutex_trylock_fast(lock))
258 __mutex_lock_slowpath(lock);
259 }
260 EXPORT_SYMBOL(mutex_lock);
261 #endif
262
263 /*
264 * Wait-Die:
265 * The newer transactions are killed when:
266 * It (the new transaction) makes a request for a lock being held
267 * by an older transaction.
268 *
269 * Wound-Wait:
270 * The newer transactions are wounded when:
271 * An older transaction makes a request for a lock being held by
272 * the newer transaction.
273 */
274
275 /*
276 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
277 * it.
278 */
279 static __always_inline void
280 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
281 {
282 #ifdef CONFIG_DEBUG_MUTEXES
283 /*
284 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
285 * but released with a normal mutex_unlock in this call.
286 *
287 * This should never happen, always use ww_mutex_unlock.
288 */
289 DEBUG_LOCKS_WARN_ON(ww->ctx);
290
291 /*
292 * Not quite done after calling ww_acquire_done() ?
293 */
294 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
295
296 if (ww_ctx->contending_lock) {
297 /*
298 * After -EDEADLK you tried to
299 * acquire a different ww_mutex? Bad!
300 */
301 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
302
303 /*
304 * You called ww_mutex_lock after receiving -EDEADLK,
305 * but 'forgot' to unlock everything else first?
306 */
307 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
308 ww_ctx->contending_lock = NULL;
309 }
310
311 /*
312 * Naughty, using a different class will lead to undefined behavior!
313 */
314 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
315 #endif
316 ww_ctx->acquired++;
317 ww->ctx = ww_ctx;
318 }
319
320 /*
321 * Determine if context @a is 'after' context @b. IOW, @a is a younger
322 * transaction than @b and depending on algorithm either needs to wait for
323 * @b or die.
324 */
325 static inline bool __sched
326 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
327 {
328
329 return (signed long)(a->stamp - b->stamp) > 0;
330 }
331
332 /*
333 * Wait-Die; wake a younger waiter context (when locks held) such that it can
334 * die.
335 *
336 * Among waiters with context, only the first one can have other locks acquired
337 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
338 * __ww_mutex_check_kill() wake any but the earliest context.
339 */
340 static bool __sched
341 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
342 struct ww_acquire_ctx *ww_ctx)
343 {
344 if (!ww_ctx->is_wait_die)
345 return false;
346
347 if (waiter->ww_ctx->acquired > 0 &&
348 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
349 debug_mutex_wake_waiter(lock, waiter);
350 wake_up_process(waiter->task);
351 }
352
353 return true;
354 }
355
356 /*
357 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
358 *
359 * Wound the lock holder if there are waiters with older transactions than
360 * the lock holders. Even if multiple waiters may wound the lock holder,
361 * it's sufficient that only one does.
362 */
363 static bool __ww_mutex_wound(struct mutex *lock,
364 struct ww_acquire_ctx *ww_ctx,
365 struct ww_acquire_ctx *hold_ctx)
366 {
367 struct task_struct *owner = __mutex_owner(lock);
368
369 lockdep_assert_held(&lock->wait_lock);
370
371 /*
372 * Possible through __ww_mutex_add_waiter() when we race with
373 * ww_mutex_set_context_fastpath(). In that case we'll get here again
374 * through __ww_mutex_check_waiters().
375 */
376 if (!hold_ctx)
377 return false;
378
379 /*
380 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
381 * it cannot go away because we'll have FLAG_WAITERS set and hold
382 * wait_lock.
383 */
384 if (!owner)
385 return false;
386
387 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
388 hold_ctx->wounded = 1;
389
390 /*
391 * wake_up_process() paired with set_current_state()
392 * inserts sufficient barriers to make sure @owner either sees
393 * it's wounded in __ww_mutex_check_kill() or has a
394 * wakeup pending to re-read the wounded state.
395 */
396 if (owner != current)
397 wake_up_process(owner);
398
399 return true;
400 }
401
402 return false;
403 }
404
405 /*
406 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
407 * behind us on the wait-list, check if they need to die, or wound us.
408 *
409 * See __ww_mutex_add_waiter() for the list-order construction; basically the
410 * list is ordered by stamp, smallest (oldest) first.
411 *
412 * This relies on never mixing wait-die/wound-wait on the same wait-list;
413 * which is currently ensured by that being a ww_class property.
414 *
415 * The current task must not be on the wait list.
416 */
417 static void __sched
418 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
419 {
420 struct mutex_waiter *cur;
421
422 lockdep_assert_held(&lock->wait_lock);
423
424 list_for_each_entry(cur, &lock->wait_list, list) {
425 if (!cur->ww_ctx)
426 continue;
427
428 if (__ww_mutex_die(lock, cur, ww_ctx) ||
429 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
430 break;
431 }
432 }
433
434 /*
435 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
436 * and wake up any waiters so they can recheck.
437 */
438 static __always_inline void
439 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
440 {
441 ww_mutex_lock_acquired(lock, ctx);
442
443 /*
444 * The lock->ctx update should be visible on all cores before
445 * the WAITERS check is done, otherwise contended waiters might be
446 * missed. The contended waiters will either see ww_ctx == NULL
447 * and keep spinning, or it will acquire wait_lock, add itself
448 * to waiter list and sleep.
449 */
450 smp_mb(); /* See comments above and below. */
451
452 /*
453 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
454 * MB MB
455 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
456 *
457 * The memory barrier above pairs with the memory barrier in
458 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
459 * and/or !empty list.
460 */
461 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
462 return;
463
464 /*
465 * Uh oh, we raced in fastpath, check if any of the waiters need to
466 * die or wound us.
467 */
468 spin_lock(&lock->base.wait_lock);
469 __ww_mutex_check_waiters(&lock->base, ctx);
470 spin_unlock(&lock->base.wait_lock);
471 }
472
473 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
474
475 static inline
476 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
477 struct mutex_waiter *waiter)
478 {
479 struct ww_mutex *ww;
480
481 ww = container_of(lock, struct ww_mutex, base);
482
483 /*
484 * If ww->ctx is set the contents are undefined, only
485 * by acquiring wait_lock there is a guarantee that
486 * they are not invalid when reading.
487 *
488 * As such, when deadlock detection needs to be
489 * performed the optimistic spinning cannot be done.
490 *
491 * Check this in every inner iteration because we may
492 * be racing against another thread's ww_mutex_lock.
493 */
494 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
495 return false;
496
497 /*
498 * If we aren't on the wait list yet, cancel the spin
499 * if there are waiters. We want to avoid stealing the
500 * lock from a waiter with an earlier stamp, since the
501 * other thread may already own a lock that we also
502 * need.
503 */
504 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
505 return false;
506
507 /*
508 * Similarly, stop spinning if we are no longer the
509 * first waiter.
510 */
511 if (waiter && !__mutex_waiter_is_first(lock, waiter))
512 return false;
513
514 return true;
515 }
516
517 /*
518 * Look out! "owner" is an entirely speculative pointer access and not
519 * reliable.
520 *
521 * "noinline" so that this function shows up on perf profiles.
522 */
523 static noinline
524 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
525 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
526 {
527 bool ret = true;
528
529 rcu_read_lock();
530 while (__mutex_owner(lock) == owner) {
531 /*
532 * Ensure we emit the owner->on_cpu, dereference _after_
533 * checking lock->owner still matches owner. If that fails,
534 * owner might point to freed memory. If it still matches,
535 * the rcu_read_lock() ensures the memory stays valid.
536 */
537 barrier();
538
539 /*
540 * Use vcpu_is_preempted to detect lock holder preemption issue.
541 */
542 if (!owner->on_cpu || need_resched() ||
543 vcpu_is_preempted(task_cpu(owner))) {
544 ret = false;
545 break;
546 }
547
548 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
549 ret = false;
550 break;
551 }
552
553 cpu_relax();
554 }
555 rcu_read_unlock();
556
557 return ret;
558 }
559
560 /*
561 * Initial check for entering the mutex spinning loop
562 */
563 static inline int mutex_can_spin_on_owner(struct mutex *lock)
564 {
565 struct task_struct *owner;
566 int retval = 1;
567
568 if (need_resched())
569 return 0;
570
571 rcu_read_lock();
572 owner = __mutex_owner(lock);
573
574 /*
575 * As lock holder preemption issue, we both skip spinning if task is not
576 * on cpu or its cpu is preempted
577 */
578 if (owner)
579 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
580 rcu_read_unlock();
581
582 /*
583 * If lock->owner is not set, the mutex has been released. Return true
584 * such that we'll trylock in the spin path, which is a faster option
585 * than the blocking slow path.
586 */
587 return retval;
588 }
589
590 /*
591 * Optimistic spinning.
592 *
593 * We try to spin for acquisition when we find that the lock owner
594 * is currently running on a (different) CPU and while we don't
595 * need to reschedule. The rationale is that if the lock owner is
596 * running, it is likely to release the lock soon.
597 *
598 * The mutex spinners are queued up using MCS lock so that only one
599 * spinner can compete for the mutex. However, if mutex spinning isn't
600 * going to happen, there is no point in going through the lock/unlock
601 * overhead.
602 *
603 * Returns true when the lock was taken, otherwise false, indicating
604 * that we need to jump to the slowpath and sleep.
605 *
606 * The waiter flag is set to true if the spinner is a waiter in the wait
607 * queue. The waiter-spinner will spin on the lock directly and concurrently
608 * with the spinner at the head of the OSQ, if present, until the owner is
609 * changed to itself.
610 */
611 static __always_inline bool
612 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
613 const bool use_ww_ctx, struct mutex_waiter *waiter)
614 {
615 if (!waiter) {
616 /*
617 * The purpose of the mutex_can_spin_on_owner() function is
618 * to eliminate the overhead of osq_lock() and osq_unlock()
619 * in case spinning isn't possible. As a waiter-spinner
620 * is not going to take OSQ lock anyway, there is no need
621 * to call mutex_can_spin_on_owner().
622 */
623 if (!mutex_can_spin_on_owner(lock))
624 goto fail;
625
626 /*
627 * In order to avoid a stampede of mutex spinners trying to
628 * acquire the mutex all at once, the spinners need to take a
629 * MCS (queued) lock first before spinning on the owner field.
630 */
631 if (!osq_lock(&lock->osq))
632 goto fail;
633 }
634
635 for (;;) {
636 struct task_struct *owner;
637
638 /* Try to acquire the mutex... */
639 owner = __mutex_trylock_or_owner(lock);
640 if (!owner)
641 break;
642
643 /*
644 * There's an owner, wait for it to either
645 * release the lock or go to sleep.
646 */
647 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
648 goto fail_unlock;
649
650 /*
651 * The cpu_relax() call is a compiler barrier which forces
652 * everything in this loop to be re-loaded. We don't need
653 * memory barriers as we'll eventually observe the right
654 * values at the cost of a few extra spins.
655 */
656 cpu_relax();
657 }
658
659 if (!waiter)
660 osq_unlock(&lock->osq);
661
662 return true;
663
664
665 fail_unlock:
666 if (!waiter)
667 osq_unlock(&lock->osq);
668
669 fail:
670 /*
671 * If we fell out of the spin path because of need_resched(),
672 * reschedule now, before we try-lock the mutex. This avoids getting
673 * scheduled out right after we obtained the mutex.
674 */
675 if (need_resched()) {
676 /*
677 * We _should_ have TASK_RUNNING here, but just in case
678 * we do not, make it so, otherwise we might get stuck.
679 */
680 __set_current_state(TASK_RUNNING);
681 schedule_preempt_disabled();
682 }
683
684 return false;
685 }
686 #else
687 static __always_inline bool
688 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
689 const bool use_ww_ctx, struct mutex_waiter *waiter)
690 {
691 return false;
692 }
693 #endif
694
695 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
696
697 /**
698 * mutex_unlock - release the mutex
699 * @lock: the mutex to be released
700 *
701 * Unlock a mutex that has been locked by this task previously.
702 *
703 * This function must not be used in interrupt context. Unlocking
704 * of a not locked mutex is not allowed.
705 *
706 * This function is similar to (but not equivalent to) up().
707 */
708 void __sched mutex_unlock(struct mutex *lock)
709 {
710 #ifndef CONFIG_DEBUG_LOCK_ALLOC
711 if (__mutex_unlock_fast(lock))
712 return;
713 #endif
714 __mutex_unlock_slowpath(lock, _RET_IP_);
715 }
716 EXPORT_SYMBOL(mutex_unlock);
717
718 /**
719 * ww_mutex_unlock - release the w/w mutex
720 * @lock: the mutex to be released
721 *
722 * Unlock a mutex that has been locked by this task previously with any of the
723 * ww_mutex_lock* functions (with or without an acquire context). It is
724 * forbidden to release the locks after releasing the acquire context.
725 *
726 * This function must not be used in interrupt context. Unlocking
727 * of a unlocked mutex is not allowed.
728 */
729 void __sched ww_mutex_unlock(struct ww_mutex *lock)
730 {
731 /*
732 * The unlocking fastpath is the 0->1 transition from 'locked'
733 * into 'unlocked' state:
734 */
735 if (lock->ctx) {
736 #ifdef CONFIG_DEBUG_MUTEXES
737 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
738 #endif
739 if (lock->ctx->acquired > 0)
740 lock->ctx->acquired--;
741 lock->ctx = NULL;
742 }
743
744 mutex_unlock(&lock->base);
745 }
746 EXPORT_SYMBOL(ww_mutex_unlock);
747
748
749 static __always_inline int __sched
750 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
751 {
752 if (ww_ctx->acquired > 0) {
753 #ifdef CONFIG_DEBUG_MUTEXES
754 struct ww_mutex *ww;
755
756 ww = container_of(lock, struct ww_mutex, base);
757 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
758 ww_ctx->contending_lock = ww;
759 #endif
760 return -EDEADLK;
761 }
762
763 return 0;
764 }
765
766
767 /*
768 * Check the wound condition for the current lock acquire.
769 *
770 * Wound-Wait: If we're wounded, kill ourself.
771 *
772 * Wait-Die: If we're trying to acquire a lock already held by an older
773 * context, kill ourselves.
774 *
775 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
776 * look at waiters before us in the wait-list.
777 */
778 static inline int __sched
779 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
780 struct ww_acquire_ctx *ctx)
781 {
782 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
783 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
784 struct mutex_waiter *cur;
785
786 if (ctx->acquired == 0)
787 return 0;
788
789 if (!ctx->is_wait_die) {
790 if (ctx->wounded)
791 return __ww_mutex_kill(lock, ctx);
792
793 return 0;
794 }
795
796 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
797 return __ww_mutex_kill(lock, ctx);
798
799 /*
800 * If there is a waiter in front of us that has a context, then its
801 * stamp is earlier than ours and we must kill ourself.
802 */
803 cur = waiter;
804 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
805 if (!cur->ww_ctx)
806 continue;
807
808 return __ww_mutex_kill(lock, ctx);
809 }
810
811 return 0;
812 }
813
814 /*
815 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
816 * first. Such that older contexts are preferred to acquire the lock over
817 * younger contexts.
818 *
819 * Waiters without context are interspersed in FIFO order.
820 *
821 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
822 * older contexts already waiting) to avoid unnecessary waiting and for
823 * Wound-Wait ensure we wound the owning context when it is younger.
824 */
825 static inline int __sched
826 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
827 struct mutex *lock,
828 struct ww_acquire_ctx *ww_ctx)
829 {
830 struct mutex_waiter *cur;
831 struct list_head *pos;
832 bool is_wait_die;
833
834 if (!ww_ctx) {
835 __mutex_add_waiter(lock, waiter, &lock->wait_list);
836 return 0;
837 }
838
839 is_wait_die = ww_ctx->is_wait_die;
840
841 /*
842 * Add the waiter before the first waiter with a higher stamp.
843 * Waiters without a context are skipped to avoid starving
844 * them. Wait-Die waiters may die here. Wound-Wait waiters
845 * never die here, but they are sorted in stamp order and
846 * may wound the lock holder.
847 */
848 pos = &lock->wait_list;
849 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
850 if (!cur->ww_ctx)
851 continue;
852
853 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
854 /*
855 * Wait-Die: if we find an older context waiting, there
856 * is no point in queueing behind it, as we'd have to
857 * die the moment it would acquire the lock.
858 */
859 if (is_wait_die) {
860 int ret = __ww_mutex_kill(lock, ww_ctx);
861
862 if (ret)
863 return ret;
864 }
865
866 break;
867 }
868
869 pos = &cur->list;
870
871 /* Wait-Die: ensure younger waiters die. */
872 __ww_mutex_die(lock, cur, ww_ctx);
873 }
874
875 __mutex_add_waiter(lock, waiter, pos);
876
877 /*
878 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
879 * wound that such that we might proceed.
880 */
881 if (!is_wait_die) {
882 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
883
884 /*
885 * See ww_mutex_set_context_fastpath(). Orders setting
886 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
887 * such that either we or the fastpath will wound @ww->ctx.
888 */
889 smp_mb();
890 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
891 }
892
893 return 0;
894 }
895
896 /*
897 * Lock a mutex (possibly interruptible), slowpath:
898 */
899 static __always_inline int __sched
900 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
901 struct lockdep_map *nest_lock, unsigned long ip,
902 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
903 {
904 struct mutex_waiter waiter;
905 bool first = false;
906 struct ww_mutex *ww;
907 int ret;
908
909 might_sleep();
910
911 #ifdef CONFIG_DEBUG_MUTEXES
912 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
913 #endif
914
915 ww = container_of(lock, struct ww_mutex, base);
916 if (use_ww_ctx && ww_ctx) {
917 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
918 return -EALREADY;
919
920 /*
921 * Reset the wounded flag after a kill. No other process can
922 * race and wound us here since they can't have a valid owner
923 * pointer if we don't have any locks held.
924 */
925 if (ww_ctx->acquired == 0)
926 ww_ctx->wounded = 0;
927 }
928
929 preempt_disable();
930 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
931
932 if (__mutex_trylock(lock) ||
933 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
934 /* got the lock, yay! */
935 lock_acquired(&lock->dep_map, ip);
936 if (use_ww_ctx && ww_ctx)
937 ww_mutex_set_context_fastpath(ww, ww_ctx);
938 preempt_enable();
939 return 0;
940 }
941
942 spin_lock(&lock->wait_lock);
943 /*
944 * After waiting to acquire the wait_lock, try again.
945 */
946 if (__mutex_trylock(lock)) {
947 if (use_ww_ctx && ww_ctx)
948 __ww_mutex_check_waiters(lock, ww_ctx);
949
950 goto skip_wait;
951 }
952
953 debug_mutex_lock_common(lock, &waiter);
954
955 lock_contended(&lock->dep_map, ip);
956
957 if (!use_ww_ctx) {
958 /* add waiting tasks to the end of the waitqueue (FIFO): */
959 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
960
961
962 #ifdef CONFIG_DEBUG_MUTEXES
963 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
964 #endif
965 } else {
966 /*
967 * Add in stamp order, waking up waiters that must kill
968 * themselves.
969 */
970 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
971 if (ret)
972 goto err_early_kill;
973
974 waiter.ww_ctx = ww_ctx;
975 }
976
977 waiter.task = current;
978
979 set_current_state(state);
980 for (;;) {
981 /*
982 * Once we hold wait_lock, we're serialized against
983 * mutex_unlock() handing the lock off to us, do a trylock
984 * before testing the error conditions to make sure we pick up
985 * the handoff.
986 */
987 if (__mutex_trylock(lock))
988 goto acquired;
989
990 /*
991 * Check for signals and kill conditions while holding
992 * wait_lock. This ensures the lock cancellation is ordered
993 * against mutex_unlock() and wake-ups do not go missing.
994 */
995 if (signal_pending_state(state, current)) {
996 ret = -EINTR;
997 goto err;
998 }
999
1000 if (use_ww_ctx && ww_ctx) {
1001 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1002 if (ret)
1003 goto err;
1004 }
1005
1006 spin_unlock(&lock->wait_lock);
1007 schedule_preempt_disabled();
1008
1009 /*
1010 * ww_mutex needs to always recheck its position since its waiter
1011 * list is not FIFO ordered.
1012 */
1013 if ((use_ww_ctx && ww_ctx) || !first) {
1014 first = __mutex_waiter_is_first(lock, &waiter);
1015 if (first)
1016 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1017 }
1018
1019 set_current_state(state);
1020 /*
1021 * Here we order against unlock; we must either see it change
1022 * state back to RUNNING and fall through the next schedule(),
1023 * or we must see its unlock and acquire.
1024 */
1025 if (__mutex_trylock(lock) ||
1026 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1027 break;
1028
1029 spin_lock(&lock->wait_lock);
1030 }
1031 spin_lock(&lock->wait_lock);
1032 acquired:
1033 __set_current_state(TASK_RUNNING);
1034
1035 if (use_ww_ctx && ww_ctx) {
1036 /*
1037 * Wound-Wait; we stole the lock (!first_waiter), check the
1038 * waiters as anyone might want to wound us.
1039 */
1040 if (!ww_ctx->is_wait_die &&
1041 !__mutex_waiter_is_first(lock, &waiter))
1042 __ww_mutex_check_waiters(lock, ww_ctx);
1043 }
1044
1045 mutex_remove_waiter(lock, &waiter, current);
1046 if (likely(list_empty(&lock->wait_list)))
1047 __mutex_clear_flag(lock, MUTEX_FLAGS);
1048
1049 debug_mutex_free_waiter(&waiter);
1050
1051 skip_wait:
1052 /* got the lock - cleanup and rejoice! */
1053 lock_acquired(&lock->dep_map, ip);
1054
1055 if (use_ww_ctx && ww_ctx)
1056 ww_mutex_lock_acquired(ww, ww_ctx);
1057
1058 spin_unlock(&lock->wait_lock);
1059 preempt_enable();
1060 return 0;
1061
1062 err:
1063 __set_current_state(TASK_RUNNING);
1064 mutex_remove_waiter(lock, &waiter, current);
1065 err_early_kill:
1066 spin_unlock(&lock->wait_lock);
1067 debug_mutex_free_waiter(&waiter);
1068 mutex_release(&lock->dep_map, 1, ip);
1069 preempt_enable();
1070 return ret;
1071 }
1072
1073 static int __sched
1074 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1075 struct lockdep_map *nest_lock, unsigned long ip)
1076 {
1077 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1078 }
1079
1080 static int __sched
1081 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1082 struct lockdep_map *nest_lock, unsigned long ip,
1083 struct ww_acquire_ctx *ww_ctx)
1084 {
1085 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1086 }
1087
1088 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1089 void __sched
1090 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1091 {
1092 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1093 }
1094
1095 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1096
1097 void __sched
1098 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1099 {
1100 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1101 }
1102 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1103
1104 int __sched
1105 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1106 {
1107 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1108 }
1109 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1110
1111 int __sched
1112 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1113 {
1114 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1115 }
1116 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1117
1118 void __sched
1119 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1120 {
1121 int token;
1122
1123 might_sleep();
1124
1125 token = io_schedule_prepare();
1126 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1127 subclass, NULL, _RET_IP_, NULL, 0);
1128 io_schedule_finish(token);
1129 }
1130 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1131
1132 static inline int
1133 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1134 {
1135 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1136 unsigned tmp;
1137
1138 if (ctx->deadlock_inject_countdown-- == 0) {
1139 tmp = ctx->deadlock_inject_interval;
1140 if (tmp > UINT_MAX/4)
1141 tmp = UINT_MAX;
1142 else
1143 tmp = tmp*2 + tmp + tmp/2;
1144
1145 ctx->deadlock_inject_interval = tmp;
1146 ctx->deadlock_inject_countdown = tmp;
1147 ctx->contending_lock = lock;
1148
1149 ww_mutex_unlock(lock);
1150
1151 return -EDEADLK;
1152 }
1153 #endif
1154
1155 return 0;
1156 }
1157
1158 int __sched
1159 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1160 {
1161 int ret;
1162
1163 might_sleep();
1164 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1165 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1166 ctx);
1167 if (!ret && ctx && ctx->acquired > 1)
1168 return ww_mutex_deadlock_injection(lock, ctx);
1169
1170 return ret;
1171 }
1172 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1173
1174 int __sched
1175 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1176 {
1177 int ret;
1178
1179 might_sleep();
1180 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1181 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1182 ctx);
1183
1184 if (!ret && ctx && ctx->acquired > 1)
1185 return ww_mutex_deadlock_injection(lock, ctx);
1186
1187 return ret;
1188 }
1189 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1190
1191 #endif
1192
1193 /*
1194 * Release the lock, slowpath:
1195 */
1196 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1197 {
1198 struct task_struct *next = NULL;
1199 DEFINE_WAKE_Q(wake_q);
1200 unsigned long owner;
1201
1202 mutex_release(&lock->dep_map, 1, ip);
1203
1204 /*
1205 * Release the lock before (potentially) taking the spinlock such that
1206 * other contenders can get on with things ASAP.
1207 *
1208 * Except when HANDOFF, in that case we must not clear the owner field,
1209 * but instead set it to the top waiter.
1210 */
1211 owner = atomic_long_read(&lock->owner);
1212 for (;;) {
1213 unsigned long old;
1214
1215 #ifdef CONFIG_DEBUG_MUTEXES
1216 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1217 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1218 #endif
1219
1220 if (owner & MUTEX_FLAG_HANDOFF)
1221 break;
1222
1223 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1224 __owner_flags(owner));
1225 if (old == owner) {
1226 if (owner & MUTEX_FLAG_WAITERS)
1227 break;
1228
1229 return;
1230 }
1231
1232 owner = old;
1233 }
1234
1235 spin_lock(&lock->wait_lock);
1236 debug_mutex_unlock(lock);
1237 if (!list_empty(&lock->wait_list)) {
1238 /* get the first entry from the wait-list: */
1239 struct mutex_waiter *waiter =
1240 list_first_entry(&lock->wait_list,
1241 struct mutex_waiter, list);
1242
1243 next = waiter->task;
1244
1245 debug_mutex_wake_waiter(lock, waiter);
1246 wake_q_add(&wake_q, next);
1247 }
1248
1249 if (owner & MUTEX_FLAG_HANDOFF)
1250 __mutex_handoff(lock, next);
1251
1252 spin_unlock(&lock->wait_lock);
1253
1254 wake_up_q(&wake_q);
1255 }
1256
1257 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1258 /*
1259 * Here come the less common (and hence less performance-critical) APIs:
1260 * mutex_lock_interruptible() and mutex_trylock().
1261 */
1262 static noinline int __sched
1263 __mutex_lock_killable_slowpath(struct mutex *lock);
1264
1265 static noinline int __sched
1266 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1267
1268 /**
1269 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1270 * @lock: The mutex to be acquired.
1271 *
1272 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1273 * process is sleeping, this function will return without acquiring the
1274 * mutex.
1275 *
1276 * Context: Process context.
1277 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1278 * signal arrived.
1279 */
1280 int __sched mutex_lock_interruptible(struct mutex *lock)
1281 {
1282 might_sleep();
1283
1284 if (__mutex_trylock_fast(lock))
1285 return 0;
1286
1287 return __mutex_lock_interruptible_slowpath(lock);
1288 }
1289
1290 EXPORT_SYMBOL(mutex_lock_interruptible);
1291
1292 /**
1293 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1294 * @lock: The mutex to be acquired.
1295 *
1296 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1297 * the current process is delivered while the process is sleeping, this
1298 * function will return without acquiring the mutex.
1299 *
1300 * Context: Process context.
1301 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1302 * fatal signal arrived.
1303 */
1304 int __sched mutex_lock_killable(struct mutex *lock)
1305 {
1306 might_sleep();
1307
1308 if (__mutex_trylock_fast(lock))
1309 return 0;
1310
1311 return __mutex_lock_killable_slowpath(lock);
1312 }
1313 EXPORT_SYMBOL(mutex_lock_killable);
1314
1315 /**
1316 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1317 * @lock: The mutex to be acquired.
1318 *
1319 * Lock the mutex like mutex_lock(). While the task is waiting for this
1320 * mutex, it will be accounted as being in the IO wait state by the
1321 * scheduler.
1322 *
1323 * Context: Process context.
1324 */
1325 void __sched mutex_lock_io(struct mutex *lock)
1326 {
1327 int token;
1328
1329 token = io_schedule_prepare();
1330 mutex_lock(lock);
1331 io_schedule_finish(token);
1332 }
1333 EXPORT_SYMBOL_GPL(mutex_lock_io);
1334
1335 static noinline void __sched
1336 __mutex_lock_slowpath(struct mutex *lock)
1337 {
1338 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1339 }
1340
1341 static noinline int __sched
1342 __mutex_lock_killable_slowpath(struct mutex *lock)
1343 {
1344 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1345 }
1346
1347 static noinline int __sched
1348 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1349 {
1350 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1351 }
1352
1353 static noinline int __sched
1354 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1355 {
1356 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1357 _RET_IP_, ctx);
1358 }
1359
1360 static noinline int __sched
1361 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1362 struct ww_acquire_ctx *ctx)
1363 {
1364 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1365 _RET_IP_, ctx);
1366 }
1367
1368 #endif
1369
1370 /**
1371 * mutex_trylock - try to acquire the mutex, without waiting
1372 * @lock: the mutex to be acquired
1373 *
1374 * Try to acquire the mutex atomically. Returns 1 if the mutex
1375 * has been acquired successfully, and 0 on contention.
1376 *
1377 * NOTE: this function follows the spin_trylock() convention, so
1378 * it is negated from the down_trylock() return values! Be careful
1379 * about this when converting semaphore users to mutexes.
1380 *
1381 * This function must not be used in interrupt context. The
1382 * mutex must be released by the same task that acquired it.
1383 */
1384 int __sched mutex_trylock(struct mutex *lock)
1385 {
1386 bool locked;
1387
1388 #ifdef CONFIG_DEBUG_MUTEXES
1389 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1390 #endif
1391
1392 locked = __mutex_trylock(lock);
1393 if (locked)
1394 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1395
1396 return locked;
1397 }
1398 EXPORT_SYMBOL(mutex_trylock);
1399
1400 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1401 int __sched
1402 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1403 {
1404 might_sleep();
1405
1406 if (__mutex_trylock_fast(&lock->base)) {
1407 if (ctx)
1408 ww_mutex_set_context_fastpath(lock, ctx);
1409 return 0;
1410 }
1411
1412 return __ww_mutex_lock_slowpath(lock, ctx);
1413 }
1414 EXPORT_SYMBOL(ww_mutex_lock);
1415
1416 int __sched
1417 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1418 {
1419 might_sleep();
1420
1421 if (__mutex_trylock_fast(&lock->base)) {
1422 if (ctx)
1423 ww_mutex_set_context_fastpath(lock, ctx);
1424 return 0;
1425 }
1426
1427 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1428 }
1429 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1430
1431 #endif
1432
1433 /**
1434 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1435 * @cnt: the atomic which we are to dec
1436 * @lock: the mutex to return holding if we dec to 0
1437 *
1438 * return true and hold lock if we dec to 0, return false otherwise
1439 */
1440 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1441 {
1442 /* dec if we can't possibly hit 0 */
1443 if (atomic_add_unless(cnt, -1, 1))
1444 return 0;
1445 /* we might hit 0, so take the lock */
1446 mutex_lock(lock);
1447 if (!atomic_dec_and_test(cnt)) {
1448 /* when we actually did the dec, we didn't hit 0 */
1449 mutex_unlock(lock);
1450 return 0;
1451 }
1452 /* we hit 0, and we hold the lock */
1453 return 1;
1454 }
1455 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);