]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/locking/mutex.c
locking/Documentation: Move locking related docs into Documentation/locking/
[mirror_ubuntu-artful-kernel.git] / kernel / locking / mutex.c
1 /*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/locking/mutex-design.txt.
19 */
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include "mcs_spinlock.h"
29
30 /*
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
33 */
34 #ifdef CONFIG_DEBUG_MUTEXES
35 # include "mutex-debug.h"
36 # include <asm-generic/mutex-null.h>
37 /*
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
40 * case.
41 */
42 # undef __mutex_slowpath_needs_to_unlock
43 # define __mutex_slowpath_needs_to_unlock() 0
44 #else
45 # include "mutex.h"
46 # include <asm/mutex.h>
47 #endif
48
49 void
50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51 {
52 atomic_set(&lock->count, 1);
53 spin_lock_init(&lock->wait_lock);
54 INIT_LIST_HEAD(&lock->wait_list);
55 mutex_clear_owner(lock);
56 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57 osq_lock_init(&lock->osq);
58 #endif
59
60 debug_mutex_init(lock, name, key);
61 }
62
63 EXPORT_SYMBOL(__mutex_init);
64
65 #ifndef CONFIG_DEBUG_LOCK_ALLOC
66 /*
67 * We split the mutex lock/unlock logic into separate fastpath and
68 * slowpath functions, to reduce the register pressure on the fastpath.
69 * We also put the fastpath first in the kernel image, to make sure the
70 * branch is predicted by the CPU as default-untaken.
71 */
72 __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
73
74 /**
75 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
77 *
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
80 *
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides mutex must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
88 *
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
92 *
93 * This function is similar to (but not equivalent to) down().
94 */
95 void __sched mutex_lock(struct mutex *lock)
96 {
97 might_sleep();
98 /*
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
101 */
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103 mutex_set_owner(lock);
104 }
105
106 EXPORT_SYMBOL(mutex_lock);
107 #endif
108
109 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110 struct ww_acquire_ctx *ww_ctx)
111 {
112 #ifdef CONFIG_DEBUG_MUTEXES
113 /*
114 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115 * but released with a normal mutex_unlock in this call.
116 *
117 * This should never happen, always use ww_mutex_unlock.
118 */
119 DEBUG_LOCKS_WARN_ON(ww->ctx);
120
121 /*
122 * Not quite done after calling ww_acquire_done() ?
123 */
124 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
125
126 if (ww_ctx->contending_lock) {
127 /*
128 * After -EDEADLK you tried to
129 * acquire a different ww_mutex? Bad!
130 */
131 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
132
133 /*
134 * You called ww_mutex_lock after receiving -EDEADLK,
135 * but 'forgot' to unlock everything else first?
136 */
137 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138 ww_ctx->contending_lock = NULL;
139 }
140
141 /*
142 * Naughty, using a different class will lead to undefined behavior!
143 */
144 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
145 #endif
146 ww_ctx->acquired++;
147 }
148
149 /*
150 * after acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck.
152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
154 * as the fastpath and opportunistic spinning are disabled in that case.
155 */
156 static __always_inline void
157 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
158 struct ww_acquire_ctx *ctx)
159 {
160 unsigned long flags;
161 struct mutex_waiter *cur;
162
163 ww_mutex_lock_acquired(lock, ctx);
164
165 lock->ctx = ctx;
166
167 /*
168 * The lock->ctx update should be visible on all cores before
169 * the atomic read is done, otherwise contended waiters might be
170 * missed. The contended waiters will either see ww_ctx == NULL
171 * and keep spinning, or it will acquire wait_lock, add itself
172 * to waiter list and sleep.
173 */
174 smp_mb(); /* ^^^ */
175
176 /*
177 * Check if lock is contended, if not there is nobody to wake up
178 */
179 if (likely(atomic_read(&lock->base.count) == 0))
180 return;
181
182 /*
183 * Uh oh, we raced in fastpath, wake up everyone in this case,
184 * so they can see the new lock->ctx.
185 */
186 spin_lock_mutex(&lock->base.wait_lock, flags);
187 list_for_each_entry(cur, &lock->base.wait_list, list) {
188 debug_mutex_wake_waiter(&lock->base, cur);
189 wake_up_process(cur->task);
190 }
191 spin_unlock_mutex(&lock->base.wait_lock, flags);
192 }
193
194
195 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
196 /*
197 * In order to avoid a stampede of mutex spinners from acquiring the mutex
198 * more or less simultaneously, the spinners need to acquire a MCS lock
199 * first before spinning on the owner field.
200 *
201 */
202
203 /*
204 * Mutex spinning code migrated from kernel/sched/core.c
205 */
206
207 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
208 {
209 if (lock->owner != owner)
210 return false;
211
212 /*
213 * Ensure we emit the owner->on_cpu, dereference _after_ checking
214 * lock->owner still matches owner, if that fails, owner might
215 * point to free()d memory, if it still matches, the rcu_read_lock()
216 * ensures the memory stays valid.
217 */
218 barrier();
219
220 return owner->on_cpu;
221 }
222
223 /*
224 * Look out! "owner" is an entirely speculative pointer
225 * access and not reliable.
226 */
227 static noinline
228 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
229 {
230 rcu_read_lock();
231 while (owner_running(lock, owner)) {
232 if (need_resched())
233 break;
234
235 cpu_relax_lowlatency();
236 }
237 rcu_read_unlock();
238
239 /*
240 * We break out the loop above on need_resched() and when the
241 * owner changed, which is a sign for heavy contention. Return
242 * success only when lock->owner is NULL.
243 */
244 return lock->owner == NULL;
245 }
246
247 /*
248 * Initial check for entering the mutex spinning loop
249 */
250 static inline int mutex_can_spin_on_owner(struct mutex *lock)
251 {
252 struct task_struct *owner;
253 int retval = 1;
254
255 if (need_resched())
256 return 0;
257
258 rcu_read_lock();
259 owner = ACCESS_ONCE(lock->owner);
260 if (owner)
261 retval = owner->on_cpu;
262 rcu_read_unlock();
263 /*
264 * if lock->owner is not set, the mutex owner may have just acquired
265 * it and not set the owner yet or the mutex has been released.
266 */
267 return retval;
268 }
269
270 /*
271 * Atomically try to take the lock when it is available
272 */
273 static inline bool mutex_try_to_acquire(struct mutex *lock)
274 {
275 return !mutex_is_locked(lock) &&
276 (atomic_cmpxchg(&lock->count, 1, 0) == 1);
277 }
278
279 /*
280 * Optimistic spinning.
281 *
282 * We try to spin for acquisition when we find that the lock owner
283 * is currently running on a (different) CPU and while we don't
284 * need to reschedule. The rationale is that if the lock owner is
285 * running, it is likely to release the lock soon.
286 *
287 * Since this needs the lock owner, and this mutex implementation
288 * doesn't track the owner atomically in the lock field, we need to
289 * track it non-atomically.
290 *
291 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
292 * to serialize everything.
293 *
294 * The mutex spinners are queued up using MCS lock so that only one
295 * spinner can compete for the mutex. However, if mutex spinning isn't
296 * going to happen, there is no point in going through the lock/unlock
297 * overhead.
298 *
299 * Returns true when the lock was taken, otherwise false, indicating
300 * that we need to jump to the slowpath and sleep.
301 */
302 static bool mutex_optimistic_spin(struct mutex *lock,
303 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
304 {
305 struct task_struct *task = current;
306
307 if (!mutex_can_spin_on_owner(lock))
308 goto done;
309
310 if (!osq_lock(&lock->osq))
311 goto done;
312
313 while (true) {
314 struct task_struct *owner;
315
316 if (use_ww_ctx && ww_ctx->acquired > 0) {
317 struct ww_mutex *ww;
318
319 ww = container_of(lock, struct ww_mutex, base);
320 /*
321 * If ww->ctx is set the contents are undefined, only
322 * by acquiring wait_lock there is a guarantee that
323 * they are not invalid when reading.
324 *
325 * As such, when deadlock detection needs to be
326 * performed the optimistic spinning cannot be done.
327 */
328 if (ACCESS_ONCE(ww->ctx))
329 break;
330 }
331
332 /*
333 * If there's an owner, wait for it to either
334 * release the lock or go to sleep.
335 */
336 owner = ACCESS_ONCE(lock->owner);
337 if (owner && !mutex_spin_on_owner(lock, owner))
338 break;
339
340 /* Try to acquire the mutex if it is unlocked. */
341 if (mutex_try_to_acquire(lock)) {
342 lock_acquired(&lock->dep_map, ip);
343
344 if (use_ww_ctx) {
345 struct ww_mutex *ww;
346 ww = container_of(lock, struct ww_mutex, base);
347
348 ww_mutex_set_context_fastpath(ww, ww_ctx);
349 }
350
351 mutex_set_owner(lock);
352 osq_unlock(&lock->osq);
353 return true;
354 }
355
356 /*
357 * When there's no owner, we might have preempted between the
358 * owner acquiring the lock and setting the owner field. If
359 * we're an RT task that will live-lock because we won't let
360 * the owner complete.
361 */
362 if (!owner && (need_resched() || rt_task(task)))
363 break;
364
365 /*
366 * The cpu_relax() call is a compiler barrier which forces
367 * everything in this loop to be re-loaded. We don't need
368 * memory barriers as we'll eventually observe the right
369 * values at the cost of a few extra spins.
370 */
371 cpu_relax_lowlatency();
372 }
373
374 osq_unlock(&lock->osq);
375 done:
376 /*
377 * If we fell out of the spin path because of need_resched(),
378 * reschedule now, before we try-lock the mutex. This avoids getting
379 * scheduled out right after we obtained the mutex.
380 */
381 if (need_resched())
382 schedule_preempt_disabled();
383
384 return false;
385 }
386 #else
387 static bool mutex_optimistic_spin(struct mutex *lock,
388 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
389 {
390 return false;
391 }
392 #endif
393
394 __visible __used noinline
395 void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
396
397 /**
398 * mutex_unlock - release the mutex
399 * @lock: the mutex to be released
400 *
401 * Unlock a mutex that has been locked by this task previously.
402 *
403 * This function must not be used in interrupt context. Unlocking
404 * of a not locked mutex is not allowed.
405 *
406 * This function is similar to (but not equivalent to) up().
407 */
408 void __sched mutex_unlock(struct mutex *lock)
409 {
410 /*
411 * The unlocking fastpath is the 0->1 transition from 'locked'
412 * into 'unlocked' state:
413 */
414 #ifndef CONFIG_DEBUG_MUTEXES
415 /*
416 * When debugging is enabled we must not clear the owner before time,
417 * the slow path will always be taken, and that clears the owner field
418 * after verifying that it was indeed current.
419 */
420 mutex_clear_owner(lock);
421 #endif
422 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
423 }
424
425 EXPORT_SYMBOL(mutex_unlock);
426
427 /**
428 * ww_mutex_unlock - release the w/w mutex
429 * @lock: the mutex to be released
430 *
431 * Unlock a mutex that has been locked by this task previously with any of the
432 * ww_mutex_lock* functions (with or without an acquire context). It is
433 * forbidden to release the locks after releasing the acquire context.
434 *
435 * This function must not be used in interrupt context. Unlocking
436 * of a unlocked mutex is not allowed.
437 */
438 void __sched ww_mutex_unlock(struct ww_mutex *lock)
439 {
440 /*
441 * The unlocking fastpath is the 0->1 transition from 'locked'
442 * into 'unlocked' state:
443 */
444 if (lock->ctx) {
445 #ifdef CONFIG_DEBUG_MUTEXES
446 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
447 #endif
448 if (lock->ctx->acquired > 0)
449 lock->ctx->acquired--;
450 lock->ctx = NULL;
451 }
452
453 #ifndef CONFIG_DEBUG_MUTEXES
454 /*
455 * When debugging is enabled we must not clear the owner before time,
456 * the slow path will always be taken, and that clears the owner field
457 * after verifying that it was indeed current.
458 */
459 mutex_clear_owner(&lock->base);
460 #endif
461 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
462 }
463 EXPORT_SYMBOL(ww_mutex_unlock);
464
465 static inline int __sched
466 __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
467 {
468 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
469 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
470
471 if (!hold_ctx)
472 return 0;
473
474 if (unlikely(ctx == hold_ctx))
475 return -EALREADY;
476
477 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
478 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
479 #ifdef CONFIG_DEBUG_MUTEXES
480 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
481 ctx->contending_lock = ww;
482 #endif
483 return -EDEADLK;
484 }
485
486 return 0;
487 }
488
489 /*
490 * Lock a mutex (possibly interruptible), slowpath:
491 */
492 static __always_inline int __sched
493 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
494 struct lockdep_map *nest_lock, unsigned long ip,
495 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
496 {
497 struct task_struct *task = current;
498 struct mutex_waiter waiter;
499 unsigned long flags;
500 int ret;
501
502 preempt_disable();
503 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
504
505 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
506 /* got the lock, yay! */
507 preempt_enable();
508 return 0;
509 }
510
511 spin_lock_mutex(&lock->wait_lock, flags);
512
513 /*
514 * Once more, try to acquire the lock. Only try-lock the mutex if
515 * it is unlocked to reduce unnecessary xchg() operations.
516 */
517 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
518 goto skip_wait;
519
520 debug_mutex_lock_common(lock, &waiter);
521 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
522
523 /* add waiting tasks to the end of the waitqueue (FIFO): */
524 list_add_tail(&waiter.list, &lock->wait_list);
525 waiter.task = task;
526
527 lock_contended(&lock->dep_map, ip);
528
529 for (;;) {
530 /*
531 * Lets try to take the lock again - this is needed even if
532 * we get here for the first time (shortly after failing to
533 * acquire the lock), to make sure that we get a wakeup once
534 * it's unlocked. Later on, if we sleep, this is the
535 * operation that gives us the lock. We xchg it to -1, so
536 * that when we release the lock, we properly wake up the
537 * other waiters. We only attempt the xchg if the count is
538 * non-negative in order to avoid unnecessary xchg operations:
539 */
540 if (atomic_read(&lock->count) >= 0 &&
541 (atomic_xchg(&lock->count, -1) == 1))
542 break;
543
544 /*
545 * got a signal? (This code gets eliminated in the
546 * TASK_UNINTERRUPTIBLE case.)
547 */
548 if (unlikely(signal_pending_state(state, task))) {
549 ret = -EINTR;
550 goto err;
551 }
552
553 if (use_ww_ctx && ww_ctx->acquired > 0) {
554 ret = __mutex_lock_check_stamp(lock, ww_ctx);
555 if (ret)
556 goto err;
557 }
558
559 __set_task_state(task, state);
560
561 /* didn't get the lock, go to sleep: */
562 spin_unlock_mutex(&lock->wait_lock, flags);
563 schedule_preempt_disabled();
564 spin_lock_mutex(&lock->wait_lock, flags);
565 }
566 mutex_remove_waiter(lock, &waiter, current_thread_info());
567 /* set it to 0 if there are no waiters left: */
568 if (likely(list_empty(&lock->wait_list)))
569 atomic_set(&lock->count, 0);
570 debug_mutex_free_waiter(&waiter);
571
572 skip_wait:
573 /* got the lock - cleanup and rejoice! */
574 lock_acquired(&lock->dep_map, ip);
575 mutex_set_owner(lock);
576
577 if (use_ww_ctx) {
578 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
579 struct mutex_waiter *cur;
580
581 /*
582 * This branch gets optimized out for the common case,
583 * and is only important for ww_mutex_lock.
584 */
585 ww_mutex_lock_acquired(ww, ww_ctx);
586 ww->ctx = ww_ctx;
587
588 /*
589 * Give any possible sleeping processes the chance to wake up,
590 * so they can recheck if they have to back off.
591 */
592 list_for_each_entry(cur, &lock->wait_list, list) {
593 debug_mutex_wake_waiter(lock, cur);
594 wake_up_process(cur->task);
595 }
596 }
597
598 spin_unlock_mutex(&lock->wait_lock, flags);
599 preempt_enable();
600 return 0;
601
602 err:
603 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
604 spin_unlock_mutex(&lock->wait_lock, flags);
605 debug_mutex_free_waiter(&waiter);
606 mutex_release(&lock->dep_map, 1, ip);
607 preempt_enable();
608 return ret;
609 }
610
611 #ifdef CONFIG_DEBUG_LOCK_ALLOC
612 void __sched
613 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
614 {
615 might_sleep();
616 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
617 subclass, NULL, _RET_IP_, NULL, 0);
618 }
619
620 EXPORT_SYMBOL_GPL(mutex_lock_nested);
621
622 void __sched
623 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
624 {
625 might_sleep();
626 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
627 0, nest, _RET_IP_, NULL, 0);
628 }
629
630 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
631
632 int __sched
633 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
634 {
635 might_sleep();
636 return __mutex_lock_common(lock, TASK_KILLABLE,
637 subclass, NULL, _RET_IP_, NULL, 0);
638 }
639 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
640
641 int __sched
642 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
643 {
644 might_sleep();
645 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
646 subclass, NULL, _RET_IP_, NULL, 0);
647 }
648
649 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
650
651 static inline int
652 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
653 {
654 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
655 unsigned tmp;
656
657 if (ctx->deadlock_inject_countdown-- == 0) {
658 tmp = ctx->deadlock_inject_interval;
659 if (tmp > UINT_MAX/4)
660 tmp = UINT_MAX;
661 else
662 tmp = tmp*2 + tmp + tmp/2;
663
664 ctx->deadlock_inject_interval = tmp;
665 ctx->deadlock_inject_countdown = tmp;
666 ctx->contending_lock = lock;
667
668 ww_mutex_unlock(lock);
669
670 return -EDEADLK;
671 }
672 #endif
673
674 return 0;
675 }
676
677 int __sched
678 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
679 {
680 int ret;
681
682 might_sleep();
683 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
684 0, &ctx->dep_map, _RET_IP_, ctx, 1);
685 if (!ret && ctx->acquired > 1)
686 return ww_mutex_deadlock_injection(lock, ctx);
687
688 return ret;
689 }
690 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
691
692 int __sched
693 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
694 {
695 int ret;
696
697 might_sleep();
698 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
699 0, &ctx->dep_map, _RET_IP_, ctx, 1);
700
701 if (!ret && ctx->acquired > 1)
702 return ww_mutex_deadlock_injection(lock, ctx);
703
704 return ret;
705 }
706 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
707
708 #endif
709
710 /*
711 * Release the lock, slowpath:
712 */
713 static inline void
714 __mutex_unlock_common_slowpath(struct mutex *lock, int nested)
715 {
716 unsigned long flags;
717
718 /*
719 * As a performance measurement, release the lock before doing other
720 * wakeup related duties to follow. This allows other tasks to acquire
721 * the lock sooner, while still handling cleanups in past unlock calls.
722 * This can be done as we do not enforce strict equivalence between the
723 * mutex counter and wait_list.
724 *
725 *
726 * Some architectures leave the lock unlocked in the fastpath failure
727 * case, others need to leave it locked. In the later case we have to
728 * unlock it here - as the lock counter is currently 0 or negative.
729 */
730 if (__mutex_slowpath_needs_to_unlock())
731 atomic_set(&lock->count, 1);
732
733 spin_lock_mutex(&lock->wait_lock, flags);
734 mutex_release(&lock->dep_map, nested, _RET_IP_);
735 debug_mutex_unlock(lock);
736
737 if (!list_empty(&lock->wait_list)) {
738 /* get the first entry from the wait-list: */
739 struct mutex_waiter *waiter =
740 list_entry(lock->wait_list.next,
741 struct mutex_waiter, list);
742
743 debug_mutex_wake_waiter(lock, waiter);
744
745 wake_up_process(waiter->task);
746 }
747
748 spin_unlock_mutex(&lock->wait_lock, flags);
749 }
750
751 /*
752 * Release the lock, slowpath:
753 */
754 __visible void
755 __mutex_unlock_slowpath(atomic_t *lock_count)
756 {
757 struct mutex *lock = container_of(lock_count, struct mutex, count);
758
759 __mutex_unlock_common_slowpath(lock, 1);
760 }
761
762 #ifndef CONFIG_DEBUG_LOCK_ALLOC
763 /*
764 * Here come the less common (and hence less performance-critical) APIs:
765 * mutex_lock_interruptible() and mutex_trylock().
766 */
767 static noinline int __sched
768 __mutex_lock_killable_slowpath(struct mutex *lock);
769
770 static noinline int __sched
771 __mutex_lock_interruptible_slowpath(struct mutex *lock);
772
773 /**
774 * mutex_lock_interruptible - acquire the mutex, interruptible
775 * @lock: the mutex to be acquired
776 *
777 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
778 * been acquired or sleep until the mutex becomes available. If a
779 * signal arrives while waiting for the lock then this function
780 * returns -EINTR.
781 *
782 * This function is similar to (but not equivalent to) down_interruptible().
783 */
784 int __sched mutex_lock_interruptible(struct mutex *lock)
785 {
786 int ret;
787
788 might_sleep();
789 ret = __mutex_fastpath_lock_retval(&lock->count);
790 if (likely(!ret)) {
791 mutex_set_owner(lock);
792 return 0;
793 } else
794 return __mutex_lock_interruptible_slowpath(lock);
795 }
796
797 EXPORT_SYMBOL(mutex_lock_interruptible);
798
799 int __sched mutex_lock_killable(struct mutex *lock)
800 {
801 int ret;
802
803 might_sleep();
804 ret = __mutex_fastpath_lock_retval(&lock->count);
805 if (likely(!ret)) {
806 mutex_set_owner(lock);
807 return 0;
808 } else
809 return __mutex_lock_killable_slowpath(lock);
810 }
811 EXPORT_SYMBOL(mutex_lock_killable);
812
813 __visible void __sched
814 __mutex_lock_slowpath(atomic_t *lock_count)
815 {
816 struct mutex *lock = container_of(lock_count, struct mutex, count);
817
818 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
819 NULL, _RET_IP_, NULL, 0);
820 }
821
822 static noinline int __sched
823 __mutex_lock_killable_slowpath(struct mutex *lock)
824 {
825 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
826 NULL, _RET_IP_, NULL, 0);
827 }
828
829 static noinline int __sched
830 __mutex_lock_interruptible_slowpath(struct mutex *lock)
831 {
832 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
833 NULL, _RET_IP_, NULL, 0);
834 }
835
836 static noinline int __sched
837 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
838 {
839 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
840 NULL, _RET_IP_, ctx, 1);
841 }
842
843 static noinline int __sched
844 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
845 struct ww_acquire_ctx *ctx)
846 {
847 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
848 NULL, _RET_IP_, ctx, 1);
849 }
850
851 #endif
852
853 /*
854 * Spinlock based trylock, we take the spinlock and check whether we
855 * can get the lock:
856 */
857 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
858 {
859 struct mutex *lock = container_of(lock_count, struct mutex, count);
860 unsigned long flags;
861 int prev;
862
863 /* No need to trylock if the mutex is locked. */
864 if (mutex_is_locked(lock))
865 return 0;
866
867 spin_lock_mutex(&lock->wait_lock, flags);
868
869 prev = atomic_xchg(&lock->count, -1);
870 if (likely(prev == 1)) {
871 mutex_set_owner(lock);
872 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
873 }
874
875 /* Set it back to 0 if there are no waiters: */
876 if (likely(list_empty(&lock->wait_list)))
877 atomic_set(&lock->count, 0);
878
879 spin_unlock_mutex(&lock->wait_lock, flags);
880
881 return prev == 1;
882 }
883
884 /**
885 * mutex_trylock - try to acquire the mutex, without waiting
886 * @lock: the mutex to be acquired
887 *
888 * Try to acquire the mutex atomically. Returns 1 if the mutex
889 * has been acquired successfully, and 0 on contention.
890 *
891 * NOTE: this function follows the spin_trylock() convention, so
892 * it is negated from the down_trylock() return values! Be careful
893 * about this when converting semaphore users to mutexes.
894 *
895 * This function must not be used in interrupt context. The
896 * mutex must be released by the same task that acquired it.
897 */
898 int __sched mutex_trylock(struct mutex *lock)
899 {
900 int ret;
901
902 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
903 if (ret)
904 mutex_set_owner(lock);
905
906 return ret;
907 }
908 EXPORT_SYMBOL(mutex_trylock);
909
910 #ifndef CONFIG_DEBUG_LOCK_ALLOC
911 int __sched
912 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
913 {
914 int ret;
915
916 might_sleep();
917
918 ret = __mutex_fastpath_lock_retval(&lock->base.count);
919
920 if (likely(!ret)) {
921 ww_mutex_set_context_fastpath(lock, ctx);
922 mutex_set_owner(&lock->base);
923 } else
924 ret = __ww_mutex_lock_slowpath(lock, ctx);
925 return ret;
926 }
927 EXPORT_SYMBOL(__ww_mutex_lock);
928
929 int __sched
930 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
931 {
932 int ret;
933
934 might_sleep();
935
936 ret = __mutex_fastpath_lock_retval(&lock->base.count);
937
938 if (likely(!ret)) {
939 ww_mutex_set_context_fastpath(lock, ctx);
940 mutex_set_owner(&lock->base);
941 } else
942 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
943 return ret;
944 }
945 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
946
947 #endif
948
949 /**
950 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
951 * @cnt: the atomic which we are to dec
952 * @lock: the mutex to return holding if we dec to 0
953 *
954 * return true and hold lock if we dec to 0, return false otherwise
955 */
956 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
957 {
958 /* dec if we can't possibly hit 0 */
959 if (atomic_add_unless(cnt, -1, 1))
960 return 0;
961 /* we might hit 0, so take the lock */
962 mutex_lock(lock);
963 if (!atomic_dec_and_test(cnt)) {
964 /* when we actually did the dec, we didn't hit 0 */
965 mutex_unlock(lock);
966 return 0;
967 }
968 /* we hit 0, and we hold the lock */
969 return 1;
970 }
971 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);