]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
locking/qrwlock, arm64: Move rwlock implementation over to qrwlocks
authorWill Deacon <will.deacon@arm.com>
Fri, 5 Jan 2018 01:48:36 +0000 (18:48 -0700)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Tue, 6 Feb 2018 11:11:44 +0000 (12:11 +0100)
BugLink: https://bugs.launchpad.net/bugs/1732238
Now that the qrwlock can make use of WFE, remove our homebrewed rwlock
code in favour of the generic queued implementation.

Tested-by: Waiman Long <longman@redhat.com>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Tested-by: Adam Wallis <awallis@codeaurora.org>
Tested-by: Jan Glauber <jglauber@cavium.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Jeremy.Linton@arm.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boqun.feng@gmail.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1507810851-306-5-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit 087133ac90763cd339b6b67f2998f87dcc136c52)
Signed-off-by: dann frazier <dann.frazier@canonical.com>
Acked-by: Seth Forshee <seth.forshee@canonical.com>
Acked-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/arm64/Kconfig
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/spinlock.h
arch/arm64/include/asm/spinlock_types.h

index 4c3ce156f84ea7975468f3c64a720532cb448be1..b4e12bc663ce42889e075abc1a6c8d6f14bc15e0 100644 (file)
@@ -22,7 +22,24 @@ config ARM64
        select ARCH_HAS_STRICT_MODULE_RWX
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA
+       select ARCH_INLINE_READ_LOCK if !PREEMPT
+       select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
+       select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
+       select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
+       select ARCH_INLINE_READ_UNLOCK if !PREEMPT
+       select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
+       select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
+       select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
+       select ARCH_INLINE_WRITE_LOCK if !PREEMPT
+       select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
+       select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
+       select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
+       select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
+       select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
+       select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
+       select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
        select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_SUPPORTS_MEMORY_FAILURE
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_NUMA_BALANCING
index f81c7b685fc6f3ee2dc7d15fdf31924d06e396ba..67dcb793a17c62502865331e41ff7c2ac0fa3454 100644 (file)
@@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += msi.h
 generic-y += preempt.h
+generic-y += qrwlock.h
 generic-y += rwsem.h
 generic-y += segment.h
 generic-y += serial.h
index cae331d553f81b8ab649aa4e65c6da35038c5f97..60140d5b328e9ca740df2325f70c131ab0a71bbc 100644 (file)
@@ -187,169 +187,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 }
 #define arch_spin_is_contended arch_spin_is_contended
 
-/*
- * Write lock implementation.
- *
- * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
- * exclusively held.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-       unsigned int tmp;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "       sevl\n"
-       "1:     wfe\n"
-       "2:     ldaxr   %w0, %1\n"
-       "       cbnz    %w0, 1b\n"
-       "       stxr    %w0, %w2, %1\n"
-       "       cbnz    %w0, 2b\n"
-       __nops(1),
-       /* LSE atomics */
-       "1:     mov     %w0, wzr\n"
-       "2:     casa    %w0, %w2, %1\n"
-       "       cbz     %w0, 3f\n"
-       "       ldxr    %w0, %1\n"
-       "       cbz     %w0, 2b\n"
-       "       wfe\n"
-       "       b       1b\n"
-       "3:")
-       : "=&r" (tmp), "+Q" (rw->lock)
-       : "r" (0x80000000)
-       : "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-       unsigned int tmp;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "1:     ldaxr   %w0, %1\n"
-       "       cbnz    %w0, 2f\n"
-       "       stxr    %w0, %w2, %1\n"
-       "       cbnz    %w0, 1b\n"
-       "2:",
-       /* LSE atomics */
-       "       mov     %w0, wzr\n"
-       "       casa    %w0, %w2, %1\n"
-       __nops(2))
-       : "=&r" (tmp), "+Q" (rw->lock)
-       : "r" (0x80000000)
-       : "memory");
-
-       return !tmp;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       "       stlr    wzr, %0",
-       "       swpl    wzr, wzr, %0")
-       : "=Q" (rw->lock) :: "memory");
-}
-
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)         ((x)->lock == 0)
-
-/*
- * Read lock implementation.
- *
- * It exclusively loads the lock value, increments it and stores the new value
- * back if positive and the CPU still exclusively owns the location. If the
- * value is negative, the lock is already held.
- *
- * During unlocking there may be multiple active read locks but no write lock.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- *
- * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
- * and LSE implementations may exhibit different behaviour (although this
- * will have no effect on lockdep).
- */
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-       unsigned int tmp, tmp2;
-
-       asm volatile(
-       "       sevl\n"
-       ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "1:     wfe\n"
-       "2:     ldaxr   %w0, %2\n"
-       "       add     %w0, %w0, #1\n"
-       "       tbnz    %w0, #31, 1b\n"
-       "       stxr    %w1, %w0, %2\n"
-       "       cbnz    %w1, 2b\n"
-       __nops(1),
-       /* LSE atomics */
-       "1:     wfe\n"
-       "2:     ldxr    %w0, %2\n"
-       "       adds    %w1, %w0, #1\n"
-       "       tbnz    %w1, #31, 1b\n"
-       "       casa    %w0, %w1, %2\n"
-       "       sbc     %w0, %w1, %w0\n"
-       "       cbnz    %w0, 2b")
-       : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-       :
-       : "cc", "memory");
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-       unsigned int tmp, tmp2;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "1:     ldxr    %w0, %2\n"
-       "       sub     %w0, %w0, #1\n"
-       "       stlxr   %w1, %w0, %2\n"
-       "       cbnz    %w1, 1b",
-       /* LSE atomics */
-       "       movn    %w0, #0\n"
-       "       staddl  %w0, %2\n"
-       __nops(2))
-       : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-       :
-       : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-       unsigned int tmp, tmp2;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "       mov     %w1, #1\n"
-       "1:     ldaxr   %w0, %2\n"
-       "       add     %w0, %w0, #1\n"
-       "       tbnz    %w0, #31, 2f\n"
-       "       stxr    %w1, %w0, %2\n"
-       "       cbnz    %w1, 1b\n"
-       "2:",
-       /* LSE atomics */
-       "       ldr     %w0, %2\n"
-       "       adds    %w1, %w0, #1\n"
-       "       tbnz    %w1, #31, 1f\n"
-       "       casa    %w0, %w1, %2\n"
-       "       sbc     %w1, %w1, %w0\n"
-       __nops(1)
-       "1:")
-       : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-       :
-       : "cc", "memory");
-
-       return !tmp2;
-}
-
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)          ((x)->lock < 0x80000000)
+#include <asm/qrwlock.h>
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
index 55be59a35e3fe98c094b3b64a15cf02bfd62c7c8..6b856012c51b9f80e658a10e22609e6d4debc0ed 100644 (file)
@@ -36,10 +36,6 @@ typedef struct {
 
 #define __ARCH_SPIN_LOCK_UNLOCKED      { 0 , 0 }
 
-typedef struct {
-       volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED                { 0 }
+#include <asm-generic/qrwlock_types.h>
 
 #endif