]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - kernel/locking/rtmutex.c
locking/Documentation: Move locking related docs into Documentation/locking/
[mirror_ubuntu-artful-kernel.git] / kernel / locking / rtmutex.c
index a620d4d08ca6c17580f0bbb76d7c3ac9129330b9..7c98873a30777f131541a36889631c1efea79320 100644 (file)
@@ -8,7 +8,7 @@
  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  *  Copyright (C) 2006 Esben Nielsen
  *
- *  See Documentation/rt-mutex-design.txt for details.
+ *  See Documentation/locking/rt-mutex-design.txt for details.
  */
 #include <linux/spinlock.h>
 #include <linux/export.h>
@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
                owner = *p;
        } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
 }
+
+/*
+ * Safe fastpath aware unlock:
+ * 1) Clear the waiters bit
+ * 2) Drop lock->wait_lock
+ * 3) Try to unlock the lock with cmpxchg
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       struct task_struct *owner = rt_mutex_owner(lock);
+
+       clear_rt_mutex_waiters(lock);
+       raw_spin_unlock(&lock->wait_lock);
+       /*
+        * If a new waiter comes in between the unlock and the cmpxchg
+        * we have two situations:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        * cmpxchg(p, owner, 0) == owner
+        *                                      mark_rt_mutex_waiters(lock);
+        *                                      acquire(lock);
+        * or:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      mark_rt_mutex_waiters(lock);
+        *
+        * cmpxchg(p, owner, 0) != owner
+        *                                      enqueue_waiter();
+        *                                      unlock(wait_lock);
+        * lock(wait_lock);
+        * wake waiter();
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      acquire(lock);
+        */
+       return rt_mutex_cmpxchg(lock, owner, NULL);
+}
+
 #else
 # define rt_mutex_cmpxchg(l,c,n)       (0)
 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
 }
+
+/*
+ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       lock->owner = NULL;
+       raw_spin_unlock(&lock->wait_lock);
+       return true;
+}
 #endif
 
 static inline int
@@ -255,42 +307,121 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 }
 
+/*
+ * Deadlock detection is conditional:
+ *
+ * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
+ * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
+ *
+ * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
+ * conducted independent of the detect argument.
+ *
+ * If the waiter argument is NULL this indicates the deboost path and
+ * deadlock detection is disabled independent of the detect argument
+ * and the config settings.
+ */
+static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
+                                         enum rtmutex_chainwalk chwalk)
+{
+       /*
+        * This is just a wrapper function for the following call,
+        * because debug_rt_mutex_detect_deadlock() smells like a magic
+        * debug feature and I wanted to keep the cond function in the
+        * main source file along with the comments instead of having
+        * two of the same in the headers.
+        */
+       return debug_rt_mutex_detect_deadlock(waiter, chwalk);
+}
+
 /*
  * Max number of times we'll walk the boosting chain:
  */
 int max_lock_depth = 1024;
 
+static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+{
+       return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+}
+
 /*
  * Adjust the priority chain. Also used for deadlock detection.
  * Decreases task's usage by one - may thus free the task.
  *
- * @task: the task owning the mutex (owner) for which a chain walk is probably
- *       needed
+ * @task:      the task owning the mutex (owner) for which a chain walk is
+ *             probably needed
  * @deadlock_detect: do we have to carry out deadlock detection?
- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
- *            things for a task that has just got its priority adjusted, and
- *            is waiting on a mutex)
+ * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+ *             things for a task that has just got its priority adjusted, and
+ *             is waiting on a mutex)
+ * @next_lock: the mutex on which the owner of @orig_lock was blocked before
+ *             we dropped its pi_lock. Is never dereferenced, only used for
+ *             comparison to detect lock chain changes.
  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
- *              its priority to the mutex owner (can be NULL in the case
- *              depicted above or if the top waiter is gone away and we are
- *              actually deboosting the owner)
- * @top_task: the current top waiter
+ *             its priority to the mutex owner (can be NULL in the case
+ *             depicted above or if the top waiter is gone away and we are
+ *             actually deboosting the owner)
+ * @top_task:  the current top waiter
  *
  * Returns 0 or -EDEADLK.
+ *
+ * Chain walk basics and protection scope
+ *
+ * [R] refcount on task
+ * [P] task->pi_lock held
+ * [L] rtmutex->wait_lock held
+ *
+ * Step        Description                             Protected by
+ *     function arguments:
+ *     @task                                   [R]
+ *     @orig_lock if != NULL                   @top_task is blocked on it
+ *     @next_lock                              Unprotected. Cannot be
+ *                                             dereferenced. Only used for
+ *                                             comparison.
+ *     @orig_waiter if != NULL                 @top_task is blocked on it
+ *     @top_task                               current, or in case of proxy
+ *                                             locking protected by calling
+ *                                             code
+ *     again:
+ *       loop_sanity_check();
+ *     retry:
+ * [1]   lock(task->pi_lock);                  [R] acquire [P]
+ * [2]   waiter = task->pi_blocked_on;         [P]
+ * [3]   check_exit_conditions_1();            [P]
+ * [4]   lock = waiter->lock;                  [P]
+ * [5]   if (!try_lock(lock->wait_lock)) {     [P] try to acquire [L]
+ *         unlock(task->pi_lock);              release [P]
+ *         goto retry;
+ *       }
+ * [6]   check_exit_conditions_2();            [P] + [L]
+ * [7]   requeue_lock_waiter(lock, waiter);    [P] + [L]
+ * [8]   unlock(task->pi_lock);                release [P]
+ *       put_task_struct(task);                release [R]
+ * [9]   check_exit_conditions_3();            [L]
+ * [10]          task = owner(lock);                   [L]
+ *       get_task_struct(task);                [L] acquire [R]
+ *       lock(task->pi_lock);                  [L] acquire [P]
+ * [11]          requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
+ * [12]          check_exit_conditions_4();            [P] + [L]
+ * [13]          unlock(task->pi_lock);                release [P]
+ *       unlock(lock->wait_lock);              release [L]
+ *       goto again;
  */
 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
-                                     int deadlock_detect,
+                                     enum rtmutex_chainwalk chwalk,
                                      struct rt_mutex *orig_lock,
+                                     struct rt_mutex *next_lock,
                                      struct rt_mutex_waiter *orig_waiter,
                                      struct task_struct *top_task)
 {
-       struct rt_mutex *lock;
        struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
-       int detect_deadlock, ret = 0, depth = 0;
+       struct rt_mutex_waiter *prerequeue_top_waiter;
+       int ret = 0, depth = 0;
+       struct rt_mutex *lock;
+       bool detect_deadlock;
        unsigned long flags;
+       bool requeue = true;
 
-       detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
-                                                        deadlock_detect);
+       detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
 
        /*
         * The (de)boosting is a step by step approach with a lot of
@@ -299,6 +430,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
         * carefully whether things change under us.
         */
  again:
+       /*
+        * We limit the lock chain length for each invocation.
+        */
        if (++depth > max_lock_depth) {
                static int prev_max;
 
@@ -314,15 +448,30 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                }
                put_task_struct(task);
 
-               return deadlock_detect ? -EDEADLK : 0;
+               return -EDEADLK;
        }
+
+       /*
+        * We are fully preemptible here and only hold the refcount on
+        * @task. So everything can have changed under us since the
+        * caller or our own code below (goto retry/again) dropped all
+        * locks.
+        */
  retry:
        /*
-        * Task can not go away as we did a get_task() before !
+        * [1] Task cannot go away as we did a get_task() before !
         */
        raw_spin_lock_irqsave(&task->pi_lock, flags);
 
+       /*
+        * [2] Get the waiter on which @task is blocked on.
+        */
        waiter = task->pi_blocked_on;
+
+       /*
+        * [3] check_exit_conditions_1() protected by task->pi_lock.
+        */
+
        /*
         * Check whether the end of the boosting chain has been
         * reached or the state of the chain has changed while we
@@ -338,6 +487,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (orig_waiter && !rt_mutex_owner(orig_lock))
                goto out_unlock_pi;
 
+       /*
+        * We dropped all locks after taking a refcount on @task, so
+        * the task might have moved on in the lock chain or even left
+        * the chain completely and blocks now on an unrelated lock or
+        * on @orig_lock.
+        *
+        * We stored the lock on which @task was blocked in @next_lock,
+        * so we can detect the chain change.
+        */
+       if (next_lock != waiter->lock)
+               goto out_unlock_pi;
+
        /*
         * Drop out, when the task has no waiters. Note,
         * top_waiter can be NULL, when we are in the deboosting
@@ -348,20 +509,41 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                        goto out_unlock_pi;
                /*
                 * If deadlock detection is off, we stop here if we
-                * are not the top pi waiter of the task.
+                * are not the top pi waiter of the task. If deadlock
+                * detection is enabled we continue, but stop the
+                * requeueing in the chain walk.
                 */
-               if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
-                       goto out_unlock_pi;
+               if (top_waiter != task_top_pi_waiter(task)) {
+                       if (!detect_deadlock)
+                               goto out_unlock_pi;
+                       else
+                               requeue = false;
+               }
        }
 
        /*
-        * When deadlock detection is off then we check, if further
-        * priority adjustment is necessary.
+        * If the waiter priority is the same as the task priority
+        * then there is no further priority adjustment necessary.  If
+        * deadlock detection is off, we stop the chain walk. If its
+        * enabled we continue, but stop the requeueing in the chain
+        * walk.
         */
-       if (!detect_deadlock && waiter->prio == task->prio)
-               goto out_unlock_pi;
+       if (waiter->prio == task->prio) {
+               if (!detect_deadlock)
+                       goto out_unlock_pi;
+               else
+                       requeue = false;
+       }
 
+       /*
+        * [4] Get the next lock
+        */
        lock = waiter->lock;
+       /*
+        * [5] We need to trylock here as we are holding task->pi_lock,
+        * which is the reverse lock order versus the other rtmutex
+        * operations.
+        */
        if (!raw_spin_trylock(&lock->wait_lock)) {
                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
                cpu_relax();
@@ -369,64 +551,180 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        }
 
        /*
+        * [6] check_exit_conditions_2() protected by task->pi_lock and
+        * lock->wait_lock.
+        *
         * Deadlock detection. If the lock is the same as the original
         * lock which caused us to walk the lock chain or if the
         * current lock is owned by the task which initiated the chain
         * walk, we detected a deadlock.
         */
        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
-               debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+               debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
                raw_spin_unlock(&lock->wait_lock);
-               ret = deadlock_detect ? -EDEADLK : 0;
+               ret = -EDEADLK;
                goto out_unlock_pi;
        }
 
-       top_waiter = rt_mutex_top_waiter(lock);
+       /*
+        * If we just follow the lock chain for deadlock detection, no
+        * need to do all the requeue operations. To avoid a truckload
+        * of conditionals around the various places below, just do the
+        * minimum chain walk checks.
+        */
+       if (!requeue) {
+               /*
+                * No requeue[7] here. Just release @task [8]
+                */
+               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               put_task_struct(task);
+
+               /*
+                * [9] check_exit_conditions_3 protected by lock->wait_lock.
+                * If there is no owner of the lock, end of chain.
+                */
+               if (!rt_mutex_owner(lock)) {
+                       raw_spin_unlock(&lock->wait_lock);
+                       return 0;
+               }
+
+               /* [10] Grab the next task, i.e. owner of @lock */
+               task = rt_mutex_owner(lock);
+               get_task_struct(task);
+               raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+               /*
+                * No requeue [11] here. We just do deadlock detection.
+                *
+                * [12] Store whether owner is blocked
+                * itself. Decision is made after dropping the locks
+                */
+               next_lock = task_blocked_on_lock(task);
+               /*
+                * Get the top waiter for the next iteration
+                */
+               top_waiter = rt_mutex_top_waiter(lock);
+
+               /* [13] Drop locks */
+               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               raw_spin_unlock(&lock->wait_lock);
+
+               /* If owner is not blocked, end of chain. */
+               if (!next_lock)
+                       goto out_put_task;
+               goto again;
+       }
+
+       /*
+        * Store the current top waiter before doing the requeue
+        * operation on @lock. We need it for the boost/deboost
+        * decision below.
+        */
+       prerequeue_top_waiter = rt_mutex_top_waiter(lock);
 
-       /* Requeue the waiter */
+       /* [7] Requeue the waiter in the lock waiter list. */
        rt_mutex_dequeue(lock, waiter);
        waiter->prio = task->prio;
        rt_mutex_enqueue(lock, waiter);
 
-       /* Release the task */
+       /* [8] Release the task */
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       put_task_struct(task);
+
+       /*
+        * [9] check_exit_conditions_3 protected by lock->wait_lock.
+        *
+        * We must abort the chain walk if there is no lock owner even
+        * in the dead lock detection case, as we have nothing to
+        * follow here. This is the end of the chain we are walking.
+        */
        if (!rt_mutex_owner(lock)) {
                /*
-                * If the requeue above changed the top waiter, then we need
-                * to wake the new top waiter up to try to get the lock.
+                * If the requeue [7] above changed the top waiter,
+                * then we need to wake the new top waiter up to try
+                * to get the lock.
                 */
-
-               if (top_waiter != rt_mutex_top_waiter(lock))
+               if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
                        wake_up_process(rt_mutex_top_waiter(lock)->task);
                raw_spin_unlock(&lock->wait_lock);
-               goto out_put_task;
+               return 0;
        }
-       put_task_struct(task);
 
-       /* Grab the next task */
+       /* [10] Grab the next task, i.e. the owner of @lock */
        task = rt_mutex_owner(lock);
        get_task_struct(task);
        raw_spin_lock_irqsave(&task->pi_lock, flags);
 
+       /* [11] requeue the pi waiters if necessary */
        if (waiter == rt_mutex_top_waiter(lock)) {
-               /* Boost the owner */
-               rt_mutex_dequeue_pi(task, top_waiter);
+               /*
+                * The waiter became the new top (highest priority)
+                * waiter on the lock. Replace the previous top waiter
+                * in the owner tasks pi waiters list with this waiter
+                * and adjust the priority of the owner.
+                */
+               rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
                rt_mutex_enqueue_pi(task, waiter);
                __rt_mutex_adjust_prio(task);
 
-       } else if (top_waiter == waiter) {
-               /* Deboost the owner */
+       } else if (prerequeue_top_waiter == waiter) {
+               /*
+                * The waiter was the top waiter on the lock, but is
+                * no longer the top prority waiter. Replace waiter in
+                * the owner tasks pi waiters list with the new top
+                * (highest priority) waiter and adjust the priority
+                * of the owner.
+                * The new top waiter is stored in @waiter so that
+                * @waiter == @top_waiter evaluates to true below and
+                * we continue to deboost the rest of the chain.
+                */
                rt_mutex_dequeue_pi(task, waiter);
                waiter = rt_mutex_top_waiter(lock);
                rt_mutex_enqueue_pi(task, waiter);
                __rt_mutex_adjust_prio(task);
+       } else {
+               /*
+                * Nothing changed. No need to do any priority
+                * adjustment.
+                */
        }
 
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
+       /*
+        * [12] check_exit_conditions_4() protected by task->pi_lock
+        * and lock->wait_lock. The actual decisions are made after we
+        * dropped the locks.
+        *
+        * Check whether the task which owns the current lock is pi
+        * blocked itself. If yes we store a pointer to the lock for
+        * the lock chain change detection above. After we dropped
+        * task->pi_lock next_lock cannot be dereferenced anymore.
+        */
+       next_lock = task_blocked_on_lock(task);
+       /*
+        * Store the top waiter of @lock for the end of chain walk
+        * decision below.
+        */
        top_waiter = rt_mutex_top_waiter(lock);
+
+       /* [13] Drop the locks */
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
        raw_spin_unlock(&lock->wait_lock);
 
+       /*
+        * Make the actual exit decisions [12], based on the stored
+        * values.
+        *
+        * We reached the end of the lock chain. Stop right here. No
+        * point to go back just to figure that out.
+        */
+       if (!next_lock)
+               goto out_put_task;
+
+       /*
+        * If the current waiter is not the top waiter on the lock,
+        * then we can stop the chain walk here if we are not in full
+        * deadlock detection mode.
+        */
        if (!detect_deadlock && waiter != top_waiter)
                goto out_put_task;
 
@@ -445,76 +743,119 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
  *
  * Must be called with lock->wait_lock held.
  *
- * @lock:   the lock to be acquired.
- * @task:   the task which wants to acquire the lock
- * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
+ * @lock:   The lock to be acquired.
+ * @task:   The task which wants to acquire the lock
+ * @waiter: The waiter that is queued to the lock's wait list if the
+ *         callsite called task_blocked_on_lock(), otherwise NULL
  */
 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-               struct rt_mutex_waiter *waiter)
+                               struct rt_mutex_waiter *waiter)
 {
+       unsigned long flags;
+
        /*
-        * We have to be careful here if the atomic speedups are
-        * enabled, such that, when
-        *  - no other waiter is on the lock
-        *  - the lock has been released since we did the cmpxchg
-        * the lock can be released or taken while we are doing the
-        * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
+        * Before testing whether we can acquire @lock, we set the
+        * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
+        * other tasks which try to modify @lock into the slow path
+        * and they serialize on @lock->wait_lock.
+        *
+        * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
+        * as explained at the top of this file if and only if:
         *
-        * The atomic acquire/release aware variant of
-        * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
-        * the WAITERS bit, the atomic release / acquire can not
-        * happen anymore and lock->wait_lock protects us from the
-        * non-atomic case.
+        * - There is a lock owner. The caller must fixup the
+        *   transient state if it does a trylock or leaves the lock
+        *   function due to a signal or timeout.
         *
-        * Note, that this might set lock->owner =
-        * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
-        * any more. This is fixed up when we take the ownership.
-        * This is the transitional state explained at the top of this file.
+        * - @task acquires the lock and there are no other
+        *   waiters. This is undone in rt_mutex_set_owner(@task) at
+        *   the end of this function.
         */
        mark_rt_mutex_waiters(lock);
 
+       /*
+        * If @lock has an owner, give up.
+        */
        if (rt_mutex_owner(lock))
                return 0;
 
        /*
-        * It will get the lock because of one of these conditions:
-        * 1) there is no waiter
-        * 2) higher priority than waiters
-        * 3) it is top waiter
+        * If @waiter != NULL, @task has already enqueued the waiter
+        * into @lock waiter list. If @waiter == NULL then this is a
+        * trylock attempt.
         */
-       if (rt_mutex_has_waiters(lock)) {
-               if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
-                       if (!waiter || waiter != rt_mutex_top_waiter(lock))
-                               return 0;
-               }
-       }
-
-       if (waiter || rt_mutex_has_waiters(lock)) {
-               unsigned long flags;
-               struct rt_mutex_waiter *top;
-
-               raw_spin_lock_irqsave(&task->pi_lock, flags);
+       if (waiter) {
+               /*
+                * If waiter is not the highest priority waiter of
+                * @lock, give up.
+                */
+               if (waiter != rt_mutex_top_waiter(lock))
+                       return 0;
 
-               /* remove the queued waiter. */
-               if (waiter) {
-                       rt_mutex_dequeue(lock, waiter);
-                       task->pi_blocked_on = NULL;
-               }
+               /*
+                * We can acquire the lock. Remove the waiter from the
+                * lock waiters list.
+                */
+               rt_mutex_dequeue(lock, waiter);
 
+       } else {
                /*
-                * We have to enqueue the top waiter(if it exists) into
-                * task->pi_waiters list.
+                * If the lock has waiters already we check whether @task is
+                * eligible to take over the lock.
+                *
+                * If there are no other waiters, @task can acquire
+                * the lock.  @task->pi_blocked_on is NULL, so it does
+                * not need to be dequeued.
                 */
                if (rt_mutex_has_waiters(lock)) {
-                       top = rt_mutex_top_waiter(lock);
-                       rt_mutex_enqueue_pi(task, top);
+                       /*
+                        * If @task->prio is greater than or equal to
+                        * the top waiter priority (kernel view),
+                        * @task lost.
+                        */
+                       if (task->prio >= rt_mutex_top_waiter(lock)->prio)
+                               return 0;
+
+                       /*
+                        * The current top waiter stays enqueued. We
+                        * don't have to change anything in the lock
+                        * waiters order.
+                        */
+               } else {
+                       /*
+                        * No waiters. Take the lock without the
+                        * pi_lock dance.@task->pi_blocked_on is NULL
+                        * and we have no waiters to enqueue in @task
+                        * pi waiters list.
+                        */
+                       goto takeit;
                }
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
        }
 
+       /*
+        * Clear @task->pi_blocked_on. Requires protection by
+        * @task->pi_lock. Redundant operation for the @waiter == NULL
+        * case, but conditionals are more expensive than a redundant
+        * store.
+        */
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       task->pi_blocked_on = NULL;
+       /*
+        * Finish the lock acquisition. @task is the new owner. If
+        * other waiters exist we have to insert the highest priority
+        * waiter into @task->pi_waiters list.
+        */
+       if (rt_mutex_has_waiters(lock))
+               rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+takeit:
        /* We got the lock. */
        debug_rt_mutex_lock(lock);
 
+       /*
+        * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
+        * are still waiters or clears it.
+        */
        rt_mutex_set_owner(lock, task);
 
        rt_mutex_deadlock_account_lock(lock, task);
@@ -532,12 +873,13 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
                                   struct rt_mutex_waiter *waiter,
                                   struct task_struct *task,
-                                  int detect_deadlock)
+                                  enum rtmutex_chainwalk chwalk)
 {
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex_waiter *top_waiter = waiter;
-       unsigned long flags;
+       struct rt_mutex *next_lock;
        int chain_walk = 0, res;
+       unsigned long flags;
 
        /*
         * Early deadlock detection. We really don't want the task to
@@ -548,7 +890,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
         * which is wrong, as the other waiter is not in a deadlock
         * situation.
         */
-       if (detect_deadlock && owner == task)
+       if (owner == task)
                return -EDEADLK;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -569,20 +911,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        if (!owner)
                return 0;
 
+       raw_spin_lock_irqsave(&owner->pi_lock, flags);
        if (waiter == rt_mutex_top_waiter(lock)) {
-               raw_spin_lock_irqsave(&owner->pi_lock, flags);
                rt_mutex_dequeue_pi(owner, top_waiter);
                rt_mutex_enqueue_pi(owner, waiter);
 
                __rt_mutex_adjust_prio(owner);
                if (owner->pi_blocked_on)
                        chain_walk = 1;
-               raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-       }
-       else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
+       } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
                chain_walk = 1;
+       }
+
+       /* Store the lock on which owner is blocked or NULL */
+       next_lock = task_blocked_on_lock(owner);
 
-       if (!chain_walk)
+       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       /*
+        * Even if full deadlock detection is on, if the owner is not
+        * blocked itself, we can avoid finding this out in the chain
+        * walk.
+        */
+       if (!chain_walk || !next_lock)
                return 0;
 
        /*
@@ -594,8 +944,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
-                                        task);
+       res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
+                                        next_lock, waiter, task);
 
        raw_spin_lock(&lock->wait_lock);
 
@@ -605,7 +955,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 /*
  * Wake up the next waiter on the lock.
  *
- * Remove the top waiter from the current tasks waiter list and wake it up.
+ * Remove the top waiter from the current tasks pi waiter list and
+ * wake it up.
  *
  * Called with lock->wait_lock held.
  */
@@ -626,10 +977,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
         */
        rt_mutex_dequeue_pi(current, waiter);
 
-       rt_mutex_set_owner(lock, NULL);
+       /*
+        * As we are waking up the top waiter, and the waiter stays
+        * queued on the lock until it gets the lock, this lock
+        * obviously has waiters. Just set the bit here and this has
+        * the added benefit of forcing all new tasks into the
+        * slow path making sure no task of lower priority than
+        * the top waiter can steal this lock.
+        */
+       lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
 
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
+       /*
+        * It's safe to dereference waiter as it cannot go away as
+        * long as we hold lock->wait_lock. The waiter task needs to
+        * acquire it in order to dequeue the waiter.
+        */
        wake_up_process(waiter->task);
 }
 
@@ -642,40 +1006,42 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
 static void remove_waiter(struct rt_mutex *lock,
                          struct rt_mutex_waiter *waiter)
 {
-       int first = (waiter == rt_mutex_top_waiter(lock));
+       bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
+       struct rt_mutex *next_lock;
        unsigned long flags;
-       int chain_walk = 0;
 
        raw_spin_lock_irqsave(&current->pi_lock, flags);
        rt_mutex_dequeue(lock, waiter);
        current->pi_blocked_on = NULL;
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
-       if (!owner)
+       /*
+        * Only update priority if the waiter was the highest priority
+        * waiter of the lock and there is an owner to update.
+        */
+       if (!owner || !is_top_waiter)
                return;
 
-       if (first) {
-
-               raw_spin_lock_irqsave(&owner->pi_lock, flags);
+       raw_spin_lock_irqsave(&owner->pi_lock, flags);
 
-               rt_mutex_dequeue_pi(owner, waiter);
+       rt_mutex_dequeue_pi(owner, waiter);
 
-               if (rt_mutex_has_waiters(lock)) {
-                       struct rt_mutex_waiter *next;
+       if (rt_mutex_has_waiters(lock))
+               rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
 
-                       next = rt_mutex_top_waiter(lock);
-                       rt_mutex_enqueue_pi(owner, next);
-               }
-               __rt_mutex_adjust_prio(owner);
+       __rt_mutex_adjust_prio(owner);
 
-               if (owner->pi_blocked_on)
-                       chain_walk = 1;
+       /* Store the lock on which owner is blocked or NULL */
+       next_lock = task_blocked_on_lock(owner);
 
-               raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-       }
+       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
 
-       if (!chain_walk)
+       /*
+        * Don't walk the chain, if the owner task is not blocked
+        * itself.
+        */
+       if (!next_lock)
                return;
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
@@ -683,7 +1049,8 @@ static void remove_waiter(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
+       rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
+                                  next_lock, NULL, current);
 
        raw_spin_lock(&lock->wait_lock);
 }
@@ -696,6 +1063,7 @@ static void remove_waiter(struct rt_mutex *lock,
 void rt_mutex_adjust_pi(struct task_struct *task)
 {
        struct rt_mutex_waiter *waiter;
+       struct rt_mutex *next_lock;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -706,12 +1074,14 @@ void rt_mutex_adjust_pi(struct task_struct *task)
                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
                return;
        }
-
+       next_lock = waiter->lock;
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
        get_task_struct(task);
-       rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+
+       rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+                                  next_lock, NULL, task);
 }
 
 /**
@@ -763,13 +1133,33 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
        return ret;
 }
 
+static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+                                    struct rt_mutex_waiter *w)
+{
+       /*
+        * If the result is not -EDEADLOCK or the caller requested
+        * deadlock detection, nothing to do here.
+        */
+       if (res != -EDEADLOCK || detect_deadlock)
+               return;
+
+       /*
+        * Yell lowdly and stop the task right here.
+        */
+       rt_mutex_print_deadlock(w);
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
+}
+
 /*
  * Slow path lock function:
  */
 static int __sched
 rt_mutex_slowlock(struct rt_mutex *lock, int state,
                  struct hrtimer_sleeper *timeout,
-                 int detect_deadlock)
+                 enum rtmutex_chainwalk chwalk)
 {
        struct rt_mutex_waiter waiter;
        int ret = 0;
@@ -795,15 +1185,17 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                        timeout->task = NULL;
        }
 
-       ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
+       ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
 
        if (likely(!ret))
                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
        set_current_state(TASK_RUNNING);
 
-       if (unlikely(ret))
+       if (unlikely(ret)) {
                remove_waiter(lock, &waiter);
+               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+       }
 
        /*
         * try_to_take_rt_mutex() sets the waiter bit
@@ -825,22 +1217,31 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
 /*
  * Slow path try-lock function:
  */
-static inline int
-rt_mutex_slowtrylock(struct rt_mutex *lock)
+static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 {
-       int ret = 0;
+       int ret;
 
+       /*
+        * If the lock already has an owner we fail to get the lock.
+        * This can be done without taking the @lock->wait_lock as
+        * it is only being read, and this is a trylock anyway.
+        */
+       if (rt_mutex_owner(lock))
+               return 0;
+
+       /*
+        * The mutex has currently no owner. Lock the wait lock and
+        * try to acquire the lock.
+        */
        raw_spin_lock(&lock->wait_lock);
 
-       if (likely(rt_mutex_owner(lock) != current)) {
+       ret = try_to_take_rt_mutex(lock, current, NULL);
 
-               ret = try_to_take_rt_mutex(lock, current, NULL);
-               /*
-                * try_to_take_rt_mutex() sets the lock waiters
-                * bit unconditionally. Clean this up.
-                */
-               fixup_rt_mutex_waiters(lock);
-       }
+       /*
+        * try_to_take_rt_mutex() sets the lock waiters bit
+        * unconditionally. Clean this up.
+        */
+       fixup_rt_mutex_waiters(lock);
 
        raw_spin_unlock(&lock->wait_lock);
 
@@ -859,12 +1260,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
 
        rt_mutex_deadlock_account_unlock(current);
 
-       if (!rt_mutex_has_waiters(lock)) {
-               lock->owner = NULL;
-               raw_spin_unlock(&lock->wait_lock);
-               return;
+       /*
+        * We must be careful here if the fast path is enabled. If we
+        * have no waiters queued we cannot set owner to NULL here
+        * because of:
+        *
+        * foo->lock->owner = NULL;
+        *                      rtmutex_lock(foo->lock);   <- fast path
+        *                      free = atomic_dec_and_test(foo->refcnt);
+        *                      rtmutex_unlock(foo->lock); <- fast path
+        *                      if (free)
+        *                              kfree(foo);
+        * raw_spin_unlock(foo->lock->wait_lock);
+        *
+        * So for the fastpath enabled kernel:
+        *
+        * Nothing can set the waiters bit as long as we hold
+        * lock->wait_lock. So we do the following sequence:
+        *
+        *      owner = rt_mutex_owner(lock);
+        *      clear_rt_mutex_waiters(lock);
+        *      raw_spin_unlock(&lock->wait_lock);
+        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
+        *              return;
+        *      goto retry;
+        *
+        * The fastpath disabled variant is simple as all access to
+        * lock->owner is serialized by lock->wait_lock:
+        *
+        *      lock->owner = NULL;
+        *      raw_spin_unlock(&lock->wait_lock);
+        */
+       while (!rt_mutex_has_waiters(lock)) {
+               /* Drops lock->wait_lock ! */
+               if (unlock_rt_mutex_safe(lock) == true)
+                       return;
+               /* Relock the rtmutex and try again */
+               raw_spin_lock(&lock->wait_lock);
        }
 
+       /*
+        * The wakeup next waiter path does not suffer from the above
+        * race. See the comments there.
+        */
        wakeup_next_waiter(lock);
 
        raw_spin_unlock(&lock->wait_lock);
@@ -881,30 +1319,31 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
  */
 static inline int
 rt_mutex_fastlock(struct rt_mutex *lock, int state,
-                 int detect_deadlock,
                  int (*slowfn)(struct rt_mutex *lock, int state,
                                struct hrtimer_sleeper *timeout,
-                               int detect_deadlock))
+                               enum rtmutex_chainwalk chwalk))
 {
-       if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+       if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
                rt_mutex_deadlock_account_lock(lock, current);
                return 0;
        } else
-               return slowfn(lock, state, NULL, detect_deadlock);
+               return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
 }
 
 static inline int
 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
-                       struct hrtimer_sleeper *timeout, int detect_deadlock,
+                       struct hrtimer_sleeper *timeout,
+                       enum rtmutex_chainwalk chwalk,
                        int (*slowfn)(struct rt_mutex *lock, int state,
                                      struct hrtimer_sleeper *timeout,
-                                     int detect_deadlock))
+                                     enum rtmutex_chainwalk chwalk))
 {
-       if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+       if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+           likely(rt_mutex_cmpxchg(lock, NULL, current))) {
                rt_mutex_deadlock_account_lock(lock, current);
                return 0;
        } else
-               return slowfn(lock, state, timeout, detect_deadlock);
+               return slowfn(lock, state, timeout, chwalk);
 }
 
 static inline int
@@ -937,54 +1376,61 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
        might_sleep();
 
-       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
+       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 
 /**
  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
  *
- * @lock:              the rt_mutex to be locked
- * @detect_deadlock:   deadlock detection on/off
+ * @lock:              the rt_mutex to be locked
  *
  * Returns:
- *  0          on success
- * -EINTR      when interrupted by a signal
- * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
+ *  0          on success
+ * -EINTR      when interrupted by a signal
  */
-int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
-                                                int detect_deadlock)
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
        might_sleep();
 
-       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
-                                detect_deadlock, rt_mutex_slowlock);
+       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
+/*
+ * Futex variant with full deadlock detection.
+ */
+int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
+                             struct hrtimer_sleeper *timeout)
+{
+       might_sleep();
+
+       return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+                                      RT_MUTEX_FULL_CHAINWALK,
+                                      rt_mutex_slowlock);
+}
+
 /**
  * rt_mutex_timed_lock - lock a rt_mutex interruptible
  *                     the timeout structure is provided
  *                     by the caller
  *
- * @lock:              the rt_mutex to be locked
+ * @lock:              the rt_mutex to be locked
  * @timeout:           timeout structure or NULL (no timeout)
- * @detect_deadlock:   deadlock detection on/off
  *
  * Returns:
- *  0          on success
- * -EINTR      when interrupted by a signal
+ *  0          on success
+ * -EINTR      when interrupted by a signal
  * -ETIMEDOUT  when the timeout expired
- * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
  */
 int
-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
-                   int detect_deadlock)
+rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
 {
        might_sleep();
 
        return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
-                                      detect_deadlock, rt_mutex_slowlock);
+                                      RT_MUTEX_MIN_CHAINWALK,
+                                      rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 
@@ -1090,7 +1536,6 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  * @lock:              the rt_mutex to take
  * @waiter:            the pre-initialized rt_mutex_waiter
  * @task:              the task to prepare
- * @detect_deadlock:   perform deadlock detection (1) or not (0)
  *
  * Returns:
  *  0 - task blocked on lock
@@ -1101,7 +1546,7 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  */
 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                              struct rt_mutex_waiter *waiter,
-                             struct task_struct *task, int detect_deadlock)
+                             struct task_struct *task)
 {
        int ret;
 
@@ -1112,7 +1557,9 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                return 1;
        }
 
-       ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+       /* We enforce deadlock detection for futexes */
+       ret = task_blocks_on_rt_mutex(lock, waiter, task,
+                                     RT_MUTEX_FULL_CHAINWALK);
 
        if (ret && !rt_mutex_owner(lock)) {
                /*
@@ -1158,22 +1605,20 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
  * @lock:              the rt_mutex we were woken on
  * @to:                        the timeout, null if none. hrtimer should already have
- *                     been started.
+ *                     been started.
  * @waiter:            the pre-initialized rt_mutex_waiter
- * @detect_deadlock:   perform deadlock detection (1) or not (0)
  *
  * Complete the lock acquisition started our behalf by another thread.
  *
  * Returns:
  *  0 - success
- * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
+ * <0 - error, one of -EINTR, -ETIMEDOUT
  *
  * Special API call for PI-futex requeue support
  */
 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
                               struct hrtimer_sleeper *to,
-                              struct rt_mutex_waiter *waiter,
-                              int detect_deadlock)
+                              struct rt_mutex_waiter *waiter)
 {
        int ret;