]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
locking/atomic/x86: Un-macro-ify atomic ops implementation
authorDmitry Vyukov <dvyukov@google.com>
Sat, 17 Jun 2017 09:15:27 +0000 (11:15 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 28 Jun 2017 16:55:55 +0000 (18:55 +0200)
CPP turns perfectly readable code into a much harder to read syntactic soup.

Ingo suggested to write them out as-is in C and ignore the higher linecount.

Do this.

(As a side effect, plain C functions will be easier to KASAN-instrument as well.)

Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kasan-dev@googlegroups.com
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/a35b983dd3be937a3cf63c4e2db487de2cdc7b8f.1497690003.git.dvyukov@google.com
[ Beautified the C code some more and twiddled the changelog
  to mention the linecount increase and the KASAN benefit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h

index 33380b8714634f93ef77290e8601e6834701ddf1..0874ebda3069d9eb3a8811f8d05951e929ab7d18 100644 (file)
@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
        return xchg(&v->counter, new);
 }
 
-#define ATOMIC_OP(op)                                                  \
-static inline void atomic_##op(int i, atomic_t *v)                     \
-{                                                                      \
-       asm volatile(LOCK_PREFIX #op"l %1,%0"                           \
-                       : "+m" (v->counter)                             \
-                       : "ir" (i)                                      \
-                       : "memory");                                    \
+static inline void atomic_and(int i, atomic_t *v)
+{
+       asm volatile(LOCK_PREFIX "andl %1,%0"
+                       : "+m" (v->counter)
+                       : "ir" (i)
+                       : "memory");
+}
+
+static inline int atomic_fetch_and(int i, atomic_t *v)
+{
+       int val = atomic_read(v);
+
+       do { } while (!atomic_try_cmpxchg(v, &val, val & i));
+
+       return val;
 }
 
-#define ATOMIC_FETCH_OP(op, c_op)                                      \
-static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
-{                                                                      \
-       int val = atomic_read(v);                                       \
-       do {                                                            \
-       } while (!atomic_try_cmpxchg(v, &val, val c_op i));             \
-       return val;                                                     \
+static inline void atomic_or(int i, atomic_t *v)
+{
+       asm volatile(LOCK_PREFIX "orl %1,%0"
+                       : "+m" (v->counter)
+                       : "ir" (i)
+                       : "memory");
 }
 
-#define ATOMIC_OPS(op, c_op)                                           \
-       ATOMIC_OP(op)                                                   \
-       ATOMIC_FETCH_OP(op, c_op)
+static inline int atomic_fetch_or(int i, atomic_t *v)
+{
+       int val = atomic_read(v);
 
-ATOMIC_OPS(and, &)
-ATOMIC_OPS(or , |)
-ATOMIC_OPS(xor, ^)
+       do { } while (!atomic_try_cmpxchg(v, &val, val | i));
 
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP
+       return val;
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+       asm volatile(LOCK_PREFIX "xorl %1,%0"
+                       : "+m" (v->counter)
+                       : "ir" (i)
+                       : "memory");
+}
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+       int val = atomic_read(v);
+
+       do { } while (!atomic_try_cmpxchg(v, &val, val ^ i));
+
+       return val;
+}
 
 /**
  * __atomic_add_unless - add unless the number is already a given value
@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c = atomic_read(v);
+
        do {
                if (unlikely(c == u))
                        break;
        } while (!atomic_try_cmpxchg(v, &c, c + a));
+
        return c;
 }
 
index 71d7705fb303ba5238f4f812fdb071277addbce8..9e206f31ce2a6d0bbb2a036e25cb221cad6376c9 100644 (file)
@@ -312,37 +312,70 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 #undef alternative_atomic64
 #undef __alternative_atomic64
 
-#define ATOMIC64_OP(op, c_op)                                          \
-static inline void atomic64_##op(long long i, atomic64_t *v)           \
-{                                                                      \
-       long long old, c = 0;                                           \
-       while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)           \
-               c = old;                                                \
+static inline void atomic64_and(long long i, atomic64_t *v)
+{
+       long long old, c = 0;
+
+       while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
+               c = old;
 }
 
-#define ATOMIC64_FETCH_OP(op, c_op)                                    \
-static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)        \
-{                                                                      \
-       long long old, c = 0;                                           \
-       while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)           \
-               c = old;                                                \
-       return old;                                                     \
+static inline long long atomic64_fetch_and(long long i, atomic64_t *v)
+{
+       long long old, c = 0;
+
+       while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
+               c = old;
+
+       return old;
 }
 
-ATOMIC64_FETCH_OP(add, +)
+static inline void atomic64_or(long long i, atomic64_t *v)
+{
+       long long old, c = 0;
 
-#define atomic64_fetch_sub(i, v)       atomic64_fetch_add(-(i), (v))
+       while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
+               c = old;
+}
+
+static inline long long atomic64_fetch_or(long long i, atomic64_t *v)
+{
+       long long old, c = 0;
+
+       while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
+               c = old;
+
+       return old;
+}
 
-#define ATOMIC64_OPS(op, c_op)                                         \
-       ATOMIC64_OP(op, c_op)                                           \
-       ATOMIC64_FETCH_OP(op, c_op)
+static inline void atomic64_xor(long long i, atomic64_t *v)
+{
+       long long old, c = 0;
+
+       while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
+               c = old;
+}
 
-ATOMIC64_OPS(and, &)
-ATOMIC64_OPS(or, |)
-ATOMIC64_OPS(xor, ^)
+static inline long long atomic64_fetch_xor(long long i, atomic64_t *v)
+{
+       long long old, c = 0;
+
+       while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
+               c = old;
+
+       return old;
+}
 
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP
+static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+{
+       long long old, c = 0;
+
+       while ((old = atomic64_cmpxchg(v, c, c + i)) != c)
+               c = old;
+
+       return old;
+}
+
+#define atomic64_fetch_sub(i, v)       atomic64_fetch_add(-(i), (v))
 
 #endif /* _ASM_X86_ATOMIC64_32_H */
index 6189a433c9a93946dbabf2cb8a6ca6b50a7bdc93..8db8879a6d8cbb31d4d7264b52f289e0170d8b37 100644 (file)
@@ -226,34 +226,55 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        return dec;
 }
 
-#define ATOMIC64_OP(op)                                                        \
-static inline void atomic64_##op(long i, atomic64_t *v)                        \
-{                                                                      \
-       asm volatile(LOCK_PREFIX #op"q %1,%0"                           \
-                       : "+m" (v->counter)                             \
-                       : "er" (i)                                      \
-                       : "memory");                                    \
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+       asm volatile(LOCK_PREFIX "andq %1,%0"
+                       : "+m" (v->counter)
+                       : "er" (i)
+                       : "memory");
 }
 
-#define ATOMIC64_FETCH_OP(op, c_op)                                    \
-static inline long atomic64_fetch_##op(long i, atomic64_t *v)          \
-{                                                                      \
-       long val = atomic64_read(v);                                    \
-       do {                                                            \
-       } while (!atomic64_try_cmpxchg(v, &val, val c_op i));           \
-       return val;                                                     \
+static inline long atomic64_fetch_and(long i, atomic64_t *v)
+{
+       long val = atomic64_read(v);
+
+       do {
+       } while (!atomic64_try_cmpxchg(v, &val, val & i));
+       return val;
 }
 
-#define ATOMIC64_OPS(op, c_op)                                         \
-       ATOMIC64_OP(op)                                                 \
-       ATOMIC64_FETCH_OP(op, c_op)
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+       asm volatile(LOCK_PREFIX "orq %1,%0"
+                       : "+m" (v->counter)
+                       : "er" (i)
+                       : "memory");
+}
 
-ATOMIC64_OPS(and, &)
-ATOMIC64_OPS(or, |)
-ATOMIC64_OPS(xor, ^)
+static inline long atomic64_fetch_or(long i, atomic64_t *v)
+{
+       long val = atomic64_read(v);
 
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP
+       do {
+       } while (!atomic64_try_cmpxchg(v, &val, val | i));
+       return val;
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+       asm volatile(LOCK_PREFIX "xorq %1,%0"
+                       : "+m" (v->counter)
+                       : "er" (i)
+                       : "memory");
+}
+
+static inline long atomic64_fetch_xor(long i, atomic64_t *v)
+{
+       long val = atomic64_read(v);
+
+       do {
+       } while (!atomic64_try_cmpxchg(v, &val, val ^ i));
+       return val;
+}
 
 #endif /* _ASM_X86_ATOMIC64_64_H */