]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
locking/pvqspinlock: Kick the PV CPU unconditionally when _Q_SLOW_VAL
authorWaiman Long <Waiman.Long@hpe.com>
Fri, 11 Sep 2015 18:37:34 +0000 (14:37 -0400)
committerIngo Molnar <mingo@kernel.org>
Fri, 18 Sep 2015 07:27:29 +0000 (09:27 +0200)
If _Q_SLOW_VAL has been set, the vCPU state must have been vcpu_hashed.
The extra check at the end of __pv_queued_spin_unlock() is unnecessary
and can be removed.

Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1441996658-62854-3-git-send-email-Waiman.Long@hpe.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/qspinlock_paravirt.h

index c8e6e9a596f513baa8a85af3cb0f2b9ff9116a91..f0450ff4829b6c1308d4768b2ae3a7c575b1cf51 100644 (file)
@@ -267,7 +267,6 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
                }
 
                if (!lp) { /* ONCE */
-                       WRITE_ONCE(pn->state, vcpu_hashed);
                        lp = pv_hash(lock, pn);
 
                        /*
@@ -275,11 +274,9 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
                         * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
                         * we'll be sure to be able to observe our hash entry.
                         *
-                        *   [S] pn->state
                         *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
                         *       MB                           RMB
                         * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
-                        *                                [L] pn->state
                         *
                         * Matches the smp_rmb() in __pv_queued_spin_unlock().
                         */
@@ -364,8 +361,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
         * vCPU is harmless other than the additional latency in completing
         * the unlock.
         */
-       if (READ_ONCE(node->state) == vcpu_hashed)
-               pv_kick(node->cpu);
+       pv_kick(node->cpu);
 }
 /*
  * Include the architecture specific callee-save thunk of the