]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
ia64: Rewrite atomic_add and atomic_sub
authorMatthew Wilcox <mawilcox@microsoft.com>
Thu, 18 Jan 2018 21:52:17 +0000 (13:52 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Jan 2018 18:47:51 +0000 (10:47 -0800)
Force __builtin_constant_p to evaluate whether the argument to atomic_add
& atomic_sub is constant in the front-end before optimisations which
can lead GCC to output a call to __bad_increment_for_ia64_fetch_and_add().

See GCC bugzilla 83653.

Signed-off-by: Jakub Jelinek <jakub@redhat.com>
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/ia64/include/asm/atomic.h

index 28e02c99be6d72efca8128d8a11011e20bd89936..762eeb0fcc1dcaec27a4dff140274113a2c31fbf 100644 (file)
@@ -65,29 +65,30 @@ ia64_atomic_fetch_##op (int i, atomic_t *v)                         \
 ATOMIC_OPS(add, +)
 ATOMIC_OPS(sub, -)
 
-#define atomic_add_return(i,v)                                         \
+#ifdef __OPTIMIZE__
+#define __ia64_atomic_const(i) __builtin_constant_p(i) ?               \
+               ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||       \
+                (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
+
+#define atomic_add_return(i, v)                                                \
 ({                                                                     \
-       int __ia64_aar_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
-               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
-               : ia64_atomic_add(__ia64_aar_i, v);                     \
+       int __i = (i);                                                  \
+       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
+       __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) :      \
+                               ia64_atomic_add(__i, v);                \
 })
 
-#define atomic_sub_return(i,v)                                         \
+#define atomic_sub_return(i, v)                                                \
 ({                                                                     \
-       int __ia64_asr_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
-               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
-               : ia64_atomic_sub(__ia64_asr_i, v);                     \
+       int __i = (i);                                                  \
+       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
+       __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) :     \
+                               ia64_atomic_sub(__i, v);                \
 })
+#else
+#define atomic_add_return(i, v)        ia64_atomic_add(i, v)
+#define atomic_sub_return(i, v)        ia64_atomic_sub(i, v)
+#endif
 
 #define atomic_fetch_add(i,v)                                          \
 ({                                                                     \