]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
locking/spinlocks, s390: Implement vcpu_is_preempted(cpu)
authorChristian Borntraeger <borntraeger@de.ibm.com>
Wed, 2 Nov 2016 09:08:32 +0000 (05:08 -0400)
committerIngo Molnar <mingo@kernel.org>
Tue, 22 Nov 2016 11:48:06 +0000 (12:48 +0100)
This implements the s390 version for vcpu_is_preempted(cpu),
by reworking the existing smp_vcpu_scheduled() function into
arch_vcpu_is_preempted().

We can then also get rid of the local cpu_is_preempted()
function by moving the CIF_ENABLED_WAIT test into
arch_vcpu_is_preempted().

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: benh@kernel.crashing.org
Cc: boqun.feng@gmail.com
Cc: bsingharora@gmail.com
Cc: dave@stgolabs.net
Cc: jgross@suse.com
Cc: kernellwp@gmail.com
Cc: konrad.wilk@oracle.com
Cc: linuxppc-dev@lists.ozlabs.org
Cc: mpe@ellerman.id.au
Cc: paulmck@linux.vnet.ibm.com
Cc: paulus@samba.org
Cc: pbonzini@redhat.com
Cc: rkrcmar@redhat.com
Cc: virtualization@lists.linux-foundation.org
Cc: will.deacon@arm.com
Cc: xen-devel-request@lists.xenproject.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1478077718-37424-6-git-send-email-xinhui.pan@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/s390/include/asm/spinlock.h
arch/s390/kernel/smp.c
arch/s390/lib/spinlock.c

index 7e9e09f600fa5932849948de3bc4f66ea871cba2..7ecd8902a5c36bc96dfe52083e067365a0d3a71d 100644 (file)
@@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
        return __sync_bool_compare_and_swap(lock, old, new);
 }
 
+#ifndef CONFIG_SMP
+static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
+#else
+bool arch_vcpu_is_preempted(int cpu);
+#endif
+
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
index 35531fe1c5ea91c05b5d43a71e5de03894024287..b988ed1d75add1398a7c8e50b8ae5668cc9e96b6 100644 (file)
@@ -368,10 +368,15 @@ int smp_find_processor_id(u16 address)
        return -1;
 }
 
-int smp_vcpu_scheduled(int cpu)
+bool arch_vcpu_is_preempted(int cpu)
 {
-       return pcpu_running(pcpu_devices + cpu);
+       if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+               return false;
+       if (pcpu_running(pcpu_devices + cpu))
+               return false;
+       return true;
 }
+EXPORT_SYMBOL(arch_vcpu_is_preempted);
 
 void smp_yield_cpu(int cpu)
 {
index e5f50a7d2f4eb07fd2037c605e4c7e87160455b2..e48a48ec24bc27229c7c35fc69ec25e8adb68ab7 100644 (file)
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
        asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
 }
 
-static inline int cpu_is_preempted(int cpu)
-{
-       if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
-               return 0;
-       if (smp_vcpu_scheduled(cpu))
-               return 0;
-       return 1;
-}
-
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
        unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
                        continue;
                }
                /* First iteration: check if the lock owner is running. */
-               if (first_diag && cpu_is_preempted(~owner)) {
+               if (first_diag && arch_vcpu_is_preempted(~owner)) {
                        smp_yield_cpu(~owner);
                        first_diag = 0;
                        continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
                 * yield the CPU unconditionally. For LPAR rely on the
                 * sense running status.
                 */
-               if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+               if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
                        smp_yield_cpu(~owner);
                        first_diag = 0;
                }
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
                        continue;
                }
                /* Check if the lock owner is running. */
-               if (first_diag && cpu_is_preempted(~owner)) {
+               if (first_diag && arch_vcpu_is_preempted(~owner)) {
                        smp_yield_cpu(~owner);
                        first_diag = 0;
                        continue;
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
                 * yield the CPU unconditionally. For LPAR rely on the
                 * sense running status.
                 */
-               if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+               if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
                        smp_yield_cpu(~owner);
                        first_diag = 0;
                }
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
        owner = 0;
        while (1) {
                if (count-- <= 0) {
-                       if (owner && cpu_is_preempted(~owner))
+                       if (owner && arch_vcpu_is_preempted(~owner))
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
        owner = 0;
        while (1) {
                if (count-- <= 0) {
-                       if (owner && cpu_is_preempted(~owner))
+                       if (owner && arch_vcpu_is_preempted(~owner))
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
        owner = 0;
        while (1) {
                if (count-- <= 0) {
-                       if (owner && cpu_is_preempted(~owner))
+                       if (owner && arch_vcpu_is_preempted(~owner))
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
 {
        if (!cpu)
                return;
-       if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
+       if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
                return;
        smp_yield_cpu(~cpu);
 }