]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
arch: Remove spin_unlock_wait() arch-specific definitions
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 29 Jun 2017 22:53:02 +0000 (15:53 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 17 Aug 2017 15:08:59 +0000 (08:08 -0700)
There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair.  This commit therefore removes the underlying arch-specific
arch_spin_unlock_wait() for all architectures providing them.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Boqun Feng <boqun.feng@gmail.com>
21 files changed:
arch/alpha/include/asm/spinlock.h
arch/arc/include/asm/spinlock.h
arch/arm/include/asm/spinlock.h
arch/arm64/include/asm/spinlock.h
arch/blackfin/include/asm/spinlock.h
arch/hexagon/include/asm/spinlock.h
arch/ia64/include/asm/spinlock.h
arch/m32r/include/asm/spinlock.h
arch/metag/include/asm/spinlock.h
arch/mn10300/include/asm/spinlock.h
arch/parisc/include/asm/spinlock.h
arch/powerpc/include/asm/spinlock.h
arch/s390/include/asm/spinlock.h
arch/sh/include/asm/spinlock-cas.h
arch/sh/include/asm/spinlock-llsc.h
arch/sparc/include/asm/spinlock_32.h
arch/tile/include/asm/spinlock_32.h
arch/tile/include/asm/spinlock_64.h
arch/tile/lib/spinlock_32.c
arch/tile/lib/spinlock_64.c
arch/xtensa/include/asm/spinlock.h

index a40b9fc0c6c3cafbffbf61fdde57c47afb68250d..718ac0b64adf78ce770e1e1e56f82694550e24c9 100644 (file)
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 #define arch_spin_is_locked(x) ((x)->lock != 0)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
         return lock.lock == 0;
index 233d5ffe6ec779eb376e5beaf5a031fce6fa97c1..a325e6a365231cca8b39890b0d2db6253d4efcaf 100644 (file)
 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
 #define arch_spin_lock_flags(lock, flags)      arch_spin_lock(lock)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->slock, !VAL);
-}
-
 #ifdef CONFIG_ARC_HAS_LLSC
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
index 4bec4544207243d477b2e326b50119cc842526fe..c030143c18c6ea9709a5b42c44112549bd4503fc 100644 (file)
@@ -52,22 +52,6 @@ static inline void dsb_sev(void)
  * memory.
  */
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       u16 owner = READ_ONCE(lock->tickets.owner);
-
-       for (;;) {
-               arch_spinlock_t tmp = READ_ONCE(*lock);
-
-               if (tmp.tickets.owner == tmp.tickets.next ||
-                   tmp.tickets.owner != owner)
-                       break;
-
-               wfe();
-       }
-       smp_acquire__after_ctrl_dep();
-}
-
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
index cae331d553f81b8ab649aa4e65c6da35038c5f97..f445bd7f2b9f2f573d99b0b739decb443c2b3666 100644 (file)
  * The memory barriers are implicit with the load-acquire and store-release
  * instructions.
  */
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       unsigned int tmp;
-       arch_spinlock_t lockval;
-       u32 owner;
-
-       /*
-        * Ensure prior spin_lock operations to other locks have completed
-        * on this CPU before we test whether "lock" is locked.
-        */
-       smp_mb();
-       owner = READ_ONCE(lock->owner) << 16;
-
-       asm volatile(
-"      sevl\n"
-"1:    wfe\n"
-"2:    ldaxr   %w0, %2\n"
-       /* Is the lock free? */
-"      eor     %w1, %w0, %w0, ror #16\n"
-"      cbz     %w1, 3f\n"
-       /* Lock taken -- has there been a subsequent unlock->lock transition? */
-"      eor     %w1, %w3, %w0, lsl #16\n"
-"      cbz     %w1, 1b\n"
-       /*
-        * The owner has been updated, so there was an unlock->lock
-        * transition that we missed. That means we can rely on the
-        * store-release of the unlock operation paired with the
-        * load-acquire of the lock operation to publish any of our
-        * previous stores to the new lock owner and therefore don't
-        * need to bother with the writeback below.
-        */
-"      b       4f\n"
-"3:\n"
-       /*
-        * Serialise against any concurrent lockers by writing back the
-        * unlocked lock value
-        */
-       ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-"      stxr    %w1, %w0, %2\n"
-       __nops(2),
-       /* LSE atomics */
-"      mov     %w1, %w0\n"
-"      cas     %w0, %w0, %2\n"
-"      eor     %w1, %w1, %w0\n")
-       /* Somebody else wrote to the lock, GOTO 10 and reload the value */
-"      cbnz    %w1, 2b\n"
-"4:"
-       : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
-       : "r" (owner)
-       : "memory");
-}
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
@@ -176,7 +124,11 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       smp_mb(); /* See arch_spin_unlock_wait */
+       /*
+        * Ensure prior spin_lock operations to other locks have completed
+        * on this CPU before we test whether "lock" is locked.
+        */
+       smp_mb(); /* ^^^ */
        return !arch_spin_value_unlocked(READ_ONCE(*lock));
 }
 
index c58f4a83ed6f343f6ef45962884352d6368f2dc8..f6431439d15d48339ab4b161af70447baaa00e6a 100644 (file)
@@ -48,11 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        __raw_spin_unlock_asm(&lock->lock);
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
 static inline int arch_read_can_lock(arch_rwlock_t *rw)
 {
        return __raw_uncached_fetch_asm(&rw->lock) > 0;
index a1c55788c5d6591eac4786b2dc0fcdd3164eb65c..53a8d588588787cba2b22202c2ee675fba83e6df 100644 (file)
@@ -179,11 +179,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  */
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
 #define arch_spin_is_locked(x) ((x)->lock != 0)
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
index ca9e76149a4aa16974047820829e6c017afe77dd..df2c121164b8d03e3a9c355e9199d62482fd2c93 100644 (file)
@@ -76,22 +76,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
        ACCESS_ONCE(*p) = (tmp + 2) & ~1;
 }
 
-static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       int     *p = (int *)&lock->lock, ticket;
-
-       ia64_invala();
-
-       for (;;) {
-               asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
-               if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
-                       return;
-               cpu_relax();
-       }
-
-       smp_acquire__after_ctrl_dep();
-}
-
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {
        long tmp = ACCESS_ONCE(lock->lock);
@@ -143,11 +127,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
        arch_spin_lock(lock);
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       __ticket_spin_unlock_wait(lock);
-}
-
 #define arch_read_can_lock(rw)         (*(volatile int *)(rw) >= 0)
 #define arch_write_can_lock(rw)        (*(volatile int *)(rw) == 0)
 
index 323c7fc953cdefa3e0b89efaafbe8aace9ed41c0..a56825592b90a070b48a41145fb84332fd6f50fe 100644 (file)
 #define arch_spin_is_locked(x)         (*(volatile int *)(&(x)->slock) <= 0)
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->slock, VAL > 0);
-}
-
 /**
  * arch_spin_trylock - Try spin lock and return a result
  * @lock: Pointer to the lock variable
index c0c7a22be1aeff1769308825d9d739837c8baaaa..ddf7fe5708a6ba079a3f50496eeec34f8de8fbc2 100644 (file)
  * locked.
  */
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
 #define        arch_read_lock_flags(lock, flags) arch_read_lock(lock)
index 9c7b8f7942d8e9aadf3008875774b55a37959ae9..fe413b41df6cbbc50551f734d793e3b6c0c8d3d1 100644 (file)
 
 #define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->slock, !VAL);
-}
-
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        asm volatile(
index e32936cd7f1017a418bf1f086cfc819640616260..55bfe4affca30215db7bd23721795c89992b6d86 100644 (file)
@@ -14,13 +14,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)
 
 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
-{
-       volatile unsigned int *a = __ldcw_align(x);
-
-       smp_cond_load_acquire(a, VAL);
-}
-
 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
                                         unsigned long flags)
 {
index 8c1b913de6d72ccb2a54759570fc8a511a4ea4e9..d256e448ea49c9911287318ae34d992885cb23db 100644 (file)
@@ -170,39 +170,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        lock->slock = 0;
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       arch_spinlock_t lock_val;
-
-       smp_mb();
-
-       /*
-        * Atomically load and store back the lock value (unchanged). This
-        * ensures that our observation of the lock value is ordered with
-        * respect to other lock operations.
-        */
-       __asm__ __volatile__(
-"1:    " PPC_LWARX(%0, 0, %2, 0) "\n"
-"      stwcx. %0, 0, %2\n"
-"      bne- 1b\n"
-       : "=&r" (lock_val), "+m" (*lock)
-       : "r" (lock)
-       : "cr0", "xer");
-
-       if (arch_spin_value_unlocked(lock_val))
-               goto out;
-
-       while (lock->slock) {
-               HMT_low();
-               if (SHARED_PROCESSOR)
-                       __spin_yield(lock);
-       }
-       HMT_medium();
-
-out:
-       smp_mb();
-}
-
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
index f7838ecd83c6691d9ffa4e4d50af32292b4b5a3e..217ee5210c32ea0845d98deff7436f6d2e84b215 100644 (file)
@@ -98,13 +98,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
                : "cc", "memory");
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       while (arch_spin_is_locked(lock))
-               arch_spin_relax(lock);
-       smp_acquire__after_ctrl_dep();
-}
-
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
index c46e8cc7b515d9c7ba8654b1e45f681fc2cdd0cf..5ed7dbbd94ff5bce146bf9581297d9e1358373e7 100644 (file)
@@ -29,11 +29,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new
 #define arch_spin_is_locked(x)         ((x)->lock <= 0)
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, VAL > 0);
-}
-
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        while (!__sl_cas(&lock->lock, 1, 0));
index cec78143fa8368bf610347a100d8e598e0176288..f77263aae7607ff649ff167446c44879f2133345 100644 (file)
 #define arch_spin_is_locked(x)         ((x)->lock <= 0)
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, VAL > 0);
-}
-
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
index 8011e79f59c96f3658e430765e6aa56caa400098..67345b2dc408b94970f80a40d4aba18d2c08c7bc 100644 (file)
 
 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        __asm__ __volatile__(
index b14b1ba5bf9c328959bea3a14643fbb87e4ae7b1..cba8ba9b8da6ab218cee2d4ca3cecaf6a2488d97 100644 (file)
@@ -64,8 +64,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        lock->current_ticket = old_ticket + TICKET_QUANTUM;
 }
 
-void arch_spin_unlock_wait(arch_spinlock_t *lock);
-
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
index b9718fb4e74a7703a0fbf3c37691b9c00a84aced..9a2c2d605752e6649c97286389013b1cce5f3e2f 100644 (file)
@@ -58,8 +58,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
 }
 
-void arch_spin_unlock_wait(arch_spinlock_t *lock);
-
 void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
 
 /* Grab the "next" ticket number and bump it atomically.
index 076c6cc431136fc8475b0fa0b768bb7a6083dc66..db9333f2447c397013854ef0f1f7abe3e675089b 100644 (file)
@@ -62,29 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
 }
 EXPORT_SYMBOL(arch_spin_trylock);
 
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       u32 iterations = 0;
-       int curr = READ_ONCE(lock->current_ticket);
-       int next = READ_ONCE(lock->next_ticket);
-
-       /* Return immediately if unlocked. */
-       if (next == curr)
-               return;
-
-       /* Wait until the current locker has released the lock. */
-       do {
-               delay_backoff(iterations++);
-       } while (READ_ONCE(lock->current_ticket) == curr);
-
-       /*
-        * The TILE architecture doesn't do read speculation; therefore
-        * a control dependency guarantees a LOAD->{LOAD,STORE} order.
-        */
-       barrier();
-}
-EXPORT_SYMBOL(arch_spin_unlock_wait);
-
 /*
  * The low byte is always reserved to be the marker for a "tns" operation
  * since the low bit is set to "1" by a tns.  The next seven bits are
index a4b5b2cbce9337bdc775a92c087b3c5cf3876965..de414c22892fd5bca9caeaf5cdf8e7de8ae6ffc8 100644 (file)
@@ -62,28 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
 }
 EXPORT_SYMBOL(arch_spin_trylock);
 
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       u32 iterations = 0;
-       u32 val = READ_ONCE(lock->lock);
-       u32 curr = arch_spin_current(val);
-
-       /* Return immediately if unlocked. */
-       if (arch_spin_next(val) == curr)
-               return;
-
-       /* Wait until the current locker has released the lock. */
-       do {
-               delay_backoff(iterations++);
-       } while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
-
-       /*
-        * The TILE architecture doesn't do read speculation; therefore
-        * a control dependency guarantees a LOAD->{LOAD,STORE} order.
-        */
-       barrier();
-}
-EXPORT_SYMBOL(arch_spin_unlock_wait);
 
 /*
  * If the read lock fails due to a writer, we retry periodically
index a36221cf63637fc2e066850e937bc138698af973..3bb49681ee242803e13c1ef1eae81011a33a98b4 100644 (file)
 
 #define arch_spin_is_locked(x) ((x)->slock != 0)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->slock, !VAL);
-}
-
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)