]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
locking: Remove spin_lock_flags() etc
authorArnd Bergmann <arnd@arndb.de>
Fri, 22 Oct 2021 11:59:38 +0000 (13:59 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Sat, 30 Oct 2021 14:37:28 +0000 (16:37 +0200)
parisc, ia64 and powerpc32 are the only remaining architectures that
provide custom arch_{spin,read,write}_lock_flags() functions, which are
meant to re-enable interrupts while waiting for a spinlock.

However, none of these can actually run into this codepath, because
it is only called on architectures without CONFIG_GENERIC_LOCKBREAK,
or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none
of those combinations are possible on the three architectures.

Going back in the git history, it appears that arch/mn10300 may have
been able to run into this code path, but there is a good chance that
it never worked. On the architectures that still exist, it was
already impossible to hit back in 2008 after the introduction of
CONFIG_GENERIC_LOCKBREAK, and possibly earlier.

As this is all dead code, just remove it and the helper functions built
around it. For arch/ia64, the inline asm could be cleaned up, but
it seems safer to leave it untouched.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Helge Deller <deller@gmx.de> # parisc
Link: https://lore.kernel.org/r/20211022120058.1031690-1-arnd@kernel.org
12 files changed:
arch/ia64/include/asm/spinlock.h
arch/openrisc/include/asm/spinlock.h
arch/parisc/include/asm/spinlock.h
arch/powerpc/include/asm/simple_spinlock.h
arch/s390/include/asm/spinlock.h
include/linux/lockdep.h
include/linux/rwlock.h
include/linux/rwlock_api_smp.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_up.h
kernel/locking/spinlock.c

index 864775970c50f9757429a871a76a04eee5bdb952..0e5c1ad3239c48da82cdfa77925d0065d3206365 100644 (file)
@@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
        __ticket_spin_unlock(lock);
 }
 
-static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
-                                                 unsigned long flags)
-{
-       arch_spin_lock(lock);
-}
-#define arch_spin_lock_flags   arch_spin_lock_flags
-
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_read_lock(arch_rwlock_t *lock)
 {
+       unsigned long flags = 0;
+
        __asm__ __volatile__ (
                "tbit.nz p6, p0 = %1,%2\n"
                "br.few 3f\n"
@@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
                : "p6", "p7", "r2", "memory");
 }
 
-#define arch_read_lock_flags arch_read_lock_flags
-#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
-
 #else /* !ASM_SUPPORTED */
 
-#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-
 #define arch_read_lock(rw)                                                             \
 do {                                                                                   \
        arch_rwlock_t *__read_lock_ptr = (rw);                                          \
@@ -186,8 +176,10 @@ do {                                                               \
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_write_lock(arch_rwlock_t *lock)
 {
+       unsigned long flags = 0;
+
        __asm__ __volatile__ (
                "tbit.nz p6, p0 = %1, %2\n"
                "mov ar.ccv = r0\n"
@@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
                : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
 }
 
-#define arch_write_lock_flags arch_write_lock_flags
-#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-
 #define arch_write_trylock(rw)                                                 \
 ({                                                                             \
        register long result;                                                   \
index a8940bdfcb7efe862f63f081e65a9828d7a09672..264944a71535ffcd54f8b9283b91ef86c3166594 100644 (file)
@@ -19,9 +19,6 @@
 
 #include <asm/qrwlock.h>
 
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
index fa5ee8a45dbd69dc1c8ffd4667243efa25126950..a6e5d66a76562aaa8e89bab4b55e4aa39d13ad43 100644 (file)
@@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
                        continue;
 }
 
-static inline void arch_spin_lock_flags(arch_spinlock_t *x,
-                                       unsigned long flags)
-{
-       volatile unsigned int *a;
-
-       a = __ldcw_align(x);
-       while (__ldcw(a) == 0)
-               while (*a == 0)
-                       if (flags & PSW_SM_I) {
-                               local_irq_enable();
-                               local_irq_disable();
-                       }
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
 static inline void arch_spin_unlock(arch_spinlock_t *x)
 {
        volatile unsigned int *a;
index 8985791a2ba57ca16906416f6899e2ca1ae3c50d..7ae6aeef8464eebc9583d8963e009441a1bc955e 100644 (file)
@@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        }
 }
 
-static inline
-void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
-       unsigned long flags_dis;
-
-       while (1) {
-               if (likely(__arch_spin_trylock(lock) == 0))
-                       break;
-               local_save_flags(flags_dis);
-               local_irq_restore(flags);
-               do {
-                       HMT_low();
-                       if (is_shared_processor())
-                               splpar_spin_yield(lock);
-               } while (unlikely(lock->slock != 0));
-               HMT_medium();
-               local_irq_restore(flags_dis);
-       }
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        __asm__ __volatile__("# arch_spin_unlock\n\t"
index ef59588a3042d1d415b783b1f2c957166fa2c760..888a2f1c9ee3b6943e974e00c38783fb787a8c7f 100644 (file)
@@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp)
                arch_spin_lock_wait(lp);
 }
 
-static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
-                                       unsigned long flags)
-{
-       if (!arch_spin_trylock_once(lp))
-               arch_spin_lock_wait(lp);
-}
-#define arch_spin_lock_flags   arch_spin_lock_flags
-
 static inline int arch_spin_trylock(arch_spinlock_t *lp)
 {
        if (!arch_spin_trylock_once(lp))
index 9fe165beb0f9e2a750e61aa0af3fd64bd6e5e67c..467b94257105e815bec99d4404a5c596f3857d15 100644 (file)
@@ -481,23 +481,6 @@ do {                                                               \
 
 #endif /* CONFIG_LOCK_STAT */
 
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
-       LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
-       lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
 #ifdef CONFIG_PROVE_LOCKING
 extern void print_irqtrace_events(struct task_struct *curr);
 #else
index 7ce9a51ae5c046dbb536fbf36d1b50f9685eab67..2c0ad417ce3c034b40f04157a062269c130a0f36 100644 (file)
@@ -30,31 +30,16 @@ do {                                                                \
 
 #ifdef CONFIG_DEBUG_SPINLOCK
  extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
  extern int do_raw_read_trylock(rwlock_t *lock);
  extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
  extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
  extern int do_raw_write_trylock(rwlock_t *lock);
  extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
 #else
-
-#ifndef arch_read_lock_flags
-# define arch_read_lock_flags(lock, flags)     arch_read_lock(lock)
-#endif
-
-#ifndef arch_write_lock_flags
-# define arch_write_lock_flags(lock, flags)    arch_write_lock(lock)
-#endif
-
 # define do_raw_read_lock(rwlock)      do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
-               do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
 # define do_raw_read_trylock(rwlock)   arch_read_trylock(&(rwlock)->raw_lock)
 # define do_raw_read_unlock(rwlock)    do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
 # define do_raw_write_lock(rwlock)     do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
-               do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
 # define do_raw_write_trylock(rwlock)  arch_write_trylock(&(rwlock)->raw_lock)
 # define do_raw_write_unlock(rwlock)   do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
 #endif
index abfb53ab11beb4f527cea7203cda03495850ed37..f1db6f17c4fba4c8b96f8ddb6ded8e25235c64a8 100644 (file)
@@ -157,8 +157,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
        local_irq_save(flags);
        preempt_disable();
        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-       LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
-                            do_raw_read_lock_flags, &flags);
+       LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
        return flags;
 }
 
@@ -184,8 +183,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
        local_irq_save(flags);
        preempt_disable();
        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
-                            do_raw_write_lock_flags, &flags);
+       LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
        return flags;
 }
 
index 45310ea1b1d787940c26597983fab0dcfa08affc..f0447062eecda8e0e3c8fd1037e5673029b8b851 100644 (file)
@@ -177,7 +177,6 @@ do {                                                                        \
 
 #ifdef CONFIG_DEBUG_SPINLOCK
  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
-#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
 #else
@@ -188,18 +187,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
        mmiowb_spin_lock();
 }
 
-#ifndef arch_spin_lock_flags
-#define arch_spin_lock_flags(lock, flags)      arch_spin_lock(lock)
-#endif
-
-static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
-{
-       __acquire(lock);
-       arch_spin_lock_flags(&lock->raw_lock, *flags);
-       mmiowb_spin_lock();
-}
-
 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
 {
        int ret = arch_spin_trylock(&(lock)->raw_lock);
index 6b8e1a0b137bd261e4c8fcf3f9597cf5b1851e5d..51fa0dab68c4dc6af3c8922694f257ea9db3e2d2 100644 (file)
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
        local_irq_save(flags);
        preempt_disable();
        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       /*
-        * On lockdep we dont want the hand-coded irq-enable of
-        * do_raw_spin_lock_flags() code, because lockdep assumes
-        * that interrupts are not re-enabled during lock-acquire:
-        */
-#ifdef CONFIG_LOCKDEP
        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-#else
-       do_raw_spin_lock_flags(lock, &flags);
-#endif
        return flags;
 }
 
index 0ac9112c1bbe3287658057098c37a67bc818c559..16521074b6f7c8576339fd516401915685800494 100644 (file)
@@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 #define arch_spin_is_locked(lock)      ((void)(lock), 0)
 /* for sched/core.c and kernel_lock.c: */
 # define arch_spin_lock(lock)          do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags)     do { barrier(); (void)(lock); } while (0)
 # define arch_spin_unlock(lock)        do { barrier(); (void)(lock); } while (0)
 # define arch_spin_trylock(lock)       ({ barrier(); (void)(lock); 1; })
 #endif /* DEBUG_SPINLOCK */
index c5830cfa379a06bdd76833054abeb82d8cfbd28c..b562f92893727c6885b1b75081783eb48770875f 100644 (file)
@@ -378,8 +378,7 @@ unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
        local_irq_save(flags);
        preempt_disable();
        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-       LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
-                               do_raw_spin_lock_flags, &flags);
+       LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
        return flags;
 }
 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);