]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
locking/pvqspinlock: Move lock stealing count tracking code into pv_queued_spin_steal...
authorWaiman Long <Waiman.Long@hpe.com>
Thu, 10 Dec 2015 20:17:46 +0000 (15:17 -0500)
committerIngo Molnar <mingo@kernel.org>
Mon, 29 Feb 2016 09:02:41 +0000 (10:02 +0100)
This patch moves the lock stealing count tracking code into
pv_queued_spin_steal_lock() instead of via a jacket function simplifying
the code.

Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Douglas Hatch <doug.hatch@hpe.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1449778666-13593-3-git-send-email-Waiman.Long@hpe.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/qspinlock_paravirt.h
kernel/locking/qspinlock_stat.h

index 87bb235c3448054d63923d0f9549cbc7718a49ca..78f04a232d3a61fc1cb0caddfb72c2a58af22926 100644 (file)
@@ -54,6 +54,11 @@ struct pv_node {
        u8                      state;
 };
 
+/*
+ * Include queued spinlock statistics code
+ */
+#include "qspinlock_stat.h"
+
 /*
  * By replacing the regular queued_spin_trylock() with the function below,
  * it will be called once when a lock waiter enter the PV slowpath before
@@ -65,9 +70,11 @@ struct pv_node {
 static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
+       int ret = !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
+                  (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
 
-       return !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
-               (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
+       qstat_inc(qstat_pv_lock_stealing, ret);
+       return ret;
 }
 
 /*
@@ -137,11 +144,6 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 }
 #endif /* _Q_PENDING_BITS == 8 */
 
-/*
- * Include queued spinlock statistics code
- */
-#include "qspinlock_stat.h"
-
 /*
  * Lock and MCS node addresses hash table for fast lookup
  *
index 640dcecdd1df7a5ac5c8167fbd9e70e71b2e8f21..869988d4612416c5e178ec9f3a91d83cfd7b666a 100644 (file)
@@ -279,19 +279,6 @@ static inline void __pv_wait(u8 *ptr, u8 val)
 #define pv_kick(c)     __pv_kick(c)
 #define pv_wait(p, v)  __pv_wait(p, v)
 
-/*
- * PV unfair trylock count tracking function
- */
-static inline int qstat_spin_steal_lock(struct qspinlock *lock)
-{
-       int ret = pv_queued_spin_steal_lock(lock);
-
-       qstat_inc(qstat_pv_lock_stealing, ret);
-       return ret;
-}
-#undef  queued_spin_trylock
-#define queued_spin_trylock(l) qstat_spin_steal_lock(l)
-
 #else /* CONFIG_QUEUED_LOCK_STAT */
 
 static inline void qstat_inc(enum qlock_stats stat, bool cond) { }