]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
locking/qrwlock: Prevent slowpath writers getting held up by fastpath
authorWill Deacon <will.deacon@arm.com>
Fri, 5 Jan 2018 01:48:37 +0000 (18:48 -0700)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Tue, 6 Feb 2018 11:11:58 +0000 (12:11 +0100)
BugLink: https://bugs.launchpad.net/bugs/1732238
When a prospective writer takes the qrwlock locking slowpath due to the
lock being held, it attempts to cmpxchg the wmode field from 0 to
_QW_WAITING so that concurrent lockers also take the slowpath and queue
on the spinlock accordingly, allowing the lockers to drain.

Unfortunately, this isn't fair, because a fastpath writer that comes in
after the lock is made available but before the _QW_WAITING flag is set
can effectively jump the queue. If there is a steady stream of prospective
writers, then the waiter will be held off indefinitely.

This patch restores fairness by separating _QW_WAITING and _QW_LOCKED
into two distinct fields: _QW_LOCKED continues to occupy the bottom byte
of the lockword so that it can be cleared unconditionally when unlocking,
but _QW_WAITING now occupies what used to be the bottom bit of the reader
count. This then forces the slow-path for concurrent lockers.

Tested-by: Waiman Long <longman@redhat.com>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Tested-by: Adam Wallis <awallis@codeaurora.org>
Tested-by: Jan Glauber <jglauber@cavium.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Jeremy.Linton@arm.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1507810851-306-6-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit d133166146333e1f13fc81c0e6c43c8d99290a8a)
Signed-off-by: dann frazier <dann.frazier@canonical.com>
Acked-by: Seth Forshee <seth.forshee@canonical.com>
Acked-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
include/asm-generic/qrwlock.h
include/asm-generic/qrwlock_types.h
kernel/locking/qrwlock.c

index 814d5b64528caa99d5ad2bce655babe5815e0705..c39a93a6d91d14ce00a636385736b9dc895bcd10 100644 (file)
 
 /*
  * Writer states & reader shift and bias.
- *
- *       | +0 | +1 | +2 | +3 |
- *   ----+----+----+----+----+
- *    LE | 78 | 56 | 34 | 12 | 0x12345678
- *   ----+----+----+----+----+
- *       | wr |      rd      |
- *       +----+----+----+----+
- *
- *   ----+----+----+----+----+
- *    BE | 12 | 34 | 56 | 78 | 0x12345678
- *   ----+----+----+----+----+
- *       |      rd      | wr |
- *       +----+----+----+----+
  */
-#define        _QW_WAITING     1               /* A writer is waiting     */
-#define        _QW_LOCKED      0xff            /* A writer holds the lock */
-#define        _QW_WMASK       0xff            /* Writer mask             */
-#define        _QR_SHIFT       8               /* Reader count shift      */
+#define        _QW_WAITING     0x100           /* A writer is waiting     */
+#define        _QW_LOCKED      0x0ff           /* A writer holds the lock */
+#define        _QW_WMASK       0x1ff           /* Writer mask             */
+#define        _QR_SHIFT       9               /* Reader count shift      */
 #define _QR_BIAS       (1U << _QR_SHIFT)
 
 /*
@@ -152,7 +139,7 @@ static inline void queued_read_unlock(struct qrwlock *lock)
  */
 static inline void queued_write_unlock(struct qrwlock *lock)
 {
-       smp_store_release(&lock->wmode, 0);
+       smp_store_release(&lock->wlocked, 0);
 }
 
 /*
index 507f2dc51bba98f6f3d3d0f93f58940af9ece784..8af752acbdc0849c5fe1bb74a82f4993a736a0b6 100644 (file)
@@ -13,11 +13,11 @@ typedef struct qrwlock {
                atomic_t cnts;
                struct {
 #ifdef __LITTLE_ENDIAN
-                       u8 wmode;       /* Writer mode   */
-                       u8 rcnts[3];    /* Reader counts */
+                       u8 wlocked;     /* Locked for write? */
+                       u8 __lstate[3];
 #else
-                       u8 rcnts[3];    /* Reader counts */
-                       u8 wmode;       /* Writer mode   */
+                       u8 __lstate[3];
+                       u8 wlocked;     /* Locked for write? */
 #endif
                };
        };
index 5825e0fc1a8eec6f1cf504b3582b8773e4676afd..c7471c3fb79898aa9c0176af57f72812564d2641 100644 (file)
@@ -39,8 +39,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
                 * so spin with ACQUIRE semantics until the lock is available
                 * without waiting in the queue.
                 */
-               atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK)
-                                        != _QW_LOCKED);
+               atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
                return;
        }
        atomic_sub(_QR_BIAS, &lock->cnts);
@@ -56,7 +55,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
         * that accesses can't leak upwards out of our subsequent critical
         * section in the case that the lock is currently held for write.
         */
-       atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED);
+       atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
 
        /*
         * Signal the next one in queue to become queue head
@@ -79,19 +78,10 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
            (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
                goto unlock;
 
-       /*
-        * Set the waiting flag to notify readers that a writer is pending,
-        * or wait for a previous writer to go away.
-        */
-       for (;;) {
-               if (!READ_ONCE(lock->wmode) &&
-                  (cmpxchg_relaxed(&lock->wmode, 0, _QW_WAITING) == 0))
-                       break;
-
-               cpu_relax();
-       }
+       /* Set the waiting flag to notify readers that a writer is pending */
+       atomic_add(_QW_WAITING, &lock->cnts);
 
-       /* When no more readers, set the locked flag */
+       /* When no more readers or writers, set the locked flag */
        do {
                atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
        } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,