]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock
authorWill Deacon <will.deacon@arm.com>
Mon, 12 Aug 2013 17:04:05 +0000 (18:04 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Tue, 13 Aug 2013 19:22:44 +0000 (20:22 +0100)
Commit 15e7e5c1ebf5 ("ARM: 7749/1: spinlock: retry trylock operation if
strex fails on free lock") modifying our arch_spin_trylock to retry the
acquisition if the lock appeared uncontended, but the strex failed.

This patch does the same for rwlocks, which were missed by the original
patch.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/spinlock.h

index 7ed43f68e04495479561d792c0e6dd8019213444..b07c09e5a0ac86c6ddd5f7bc9ba25425c784e147 100644 (file)
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-       unsigned long tmp;
+       unsigned long contended, res;
 
-       __asm__ __volatile__(
-"      ldrex   %0, [%1]\n"
-"      teq     %0, #0\n"
-"      strexeq %0, %2, [%1]"
-       : "=&r" (tmp)
-       : "r" (&rw->lock), "r" (0x80000000)
-       : "cc");
+       do {
+               __asm__ __volatile__(
+               "       ldrex   %0, [%2]\n"
+               "       mov     %1, #0\n"
+               "       teq     %0, #0\n"
+               "       strexeq %1, %3, [%2]"
+               : "=&r" (contended), "=&r" (res)
+               : "r" (&rw->lock), "r" (0x80000000)
+               : "cc");
+       } while (res);
 
-       if (tmp == 0) {
+       if (!contended) {
                smp_mb();
                return 1;
        } else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-       unsigned long tmp, tmp2 = 1;
+       unsigned long contended, res;
 
-       __asm__ __volatile__(
-"      ldrex   %0, [%2]\n"
-"      adds    %0, %0, #1\n"
-"      strexpl %1, %0, [%2]\n"
-       : "=&r" (tmp), "+r" (tmp2)
-       : "r" (&rw->lock)
-       : "cc");
+       do {
+               __asm__ __volatile__(
+               "       ldrex   %0, [%2]\n"
+               "       mov     %1, #0\n"
+               "       adds    %0, %0, #1\n"
+               "       strexpl %1, %0, [%2]"
+               : "=&r" (contended), "=&r" (res)
+               : "r" (&rw->lock)
+               : "cc");
+       } while (res);
 
-       smp_mb();
-       return tmp2 == 0;
+       /* If the lock is negative, then it is already held for write. */
+       if (contended < 0x80000000) {
+               smp_mb();
+               return 1;
+       } else {
+               return 0;
+       }
 }
 
 /* read_can_lock - would read_trylock() succeed? */