2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
20 #ifndef CONFIG_ARC_PLAT_EZNPS
22 #define atomic_read(v) READ_ONCE((v)->counter)
24 #ifdef CONFIG_ARC_HAS_LLSC
26 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
28 #define ATOMIC_OP(op, c_op, asm_op) \
29 static inline void atomic_##op(int i, atomic_t *v) \
33 __asm__ __volatile__( \
34 "1: llock %[val], [%[ctr]] \n" \
35 " " #asm_op " %[val], %[val], %[i] \n" \
36 " scond %[val], [%[ctr]] \n" \
38 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
39 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
44 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
45 static inline int atomic_##op##_return(int i, atomic_t *v) \
50 * Explicit full memory barrier needed before/after as \
51 * LLOCK/SCOND thmeselves don't provide any such semantics \
55 __asm__ __volatile__( \
56 "1: llock %[val], [%[ctr]] \n" \
57 " " #asm_op " %[val], %[val], %[i] \n" \
58 " scond %[val], [%[ctr]] \n" \
61 : [ctr] "r" (&v->counter), \
70 #else /* !CONFIG_ARC_HAS_LLSC */
74 /* violating atomic_xxx API locking protocol in UP for optimization sake */
75 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
79 static inline void atomic_set(atomic_t
*v
, int i
)
82 * Independent of hardware support, all of the atomic_xxx() APIs need
83 * to follow the same locking rules to make sure that a "hardware"
84 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
87 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
88 * requires the locking.
92 atomic_ops_lock(flags
);
93 WRITE_ONCE(v
->counter
, i
);
94 atomic_ops_unlock(flags
);
100 * Non hardware assisted Atomic-R-M-W
101 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
104 #define ATOMIC_OP(op, c_op, asm_op) \
105 static inline void atomic_##op(int i, atomic_t *v) \
107 unsigned long flags; \
109 atomic_ops_lock(flags); \
111 atomic_ops_unlock(flags); \
114 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
115 static inline int atomic_##op##_return(int i, atomic_t *v) \
117 unsigned long flags; \
118 unsigned long temp; \
121 * spin lock/unlock provides the needed smp_mb() before/after \
123 atomic_ops_lock(flags); \
127 atomic_ops_unlock(flags); \
132 #endif /* !CONFIG_ARC_HAS_LLSC */
134 #define ATOMIC_OPS(op, c_op, asm_op) \
135 ATOMIC_OP(op, c_op, asm_op) \
136 ATOMIC_OP_RETURN(op, c_op, asm_op)
138 ATOMIC_OPS(add
, +=, add
)
139 ATOMIC_OPS(sub
, -=, sub
)
141 #define atomic_andnot atomic_andnot
143 ATOMIC_OP(and, &=, and)
144 ATOMIC_OP(andnot
, &= ~, bic
)
145 ATOMIC_OP(or, |=, or)
146 ATOMIC_OP(xor, ^=, xor)
148 #undef SCOND_FAIL_RETRY_VAR_DEF
149 #undef SCOND_FAIL_RETRY_ASM
150 #undef SCOND_FAIL_RETRY_VARS
152 #else /* CONFIG_ARC_PLAT_EZNPS */
154 static inline int atomic_read(const atomic_t
*v
)
158 __asm__
__volatile__(
166 static inline void atomic_set(atomic_t
*v
, int i
)
168 __asm__
__volatile__(
171 : "r"(i
), "r"(&v
->counter
)
175 #define ATOMIC_OP(op, c_op, asm_op) \
176 static inline void atomic_##op(int i, atomic_t *v) \
178 __asm__ __volatile__( \
183 : "r"(i), "r"(&v->counter), "i"(asm_op) \
184 : "r2", "r3", "memory"); \
187 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
188 static inline int atomic_##op##_return(int i, atomic_t *v) \
190 unsigned int temp = i; \
192 /* Explicit full memory barrier needed before/after */ \
195 __asm__ __volatile__( \
201 : "r"(&v->counter), "i"(asm_op) \
202 : "r2", "r3", "memory"); \
211 #define ATOMIC_OPS(op, c_op, asm_op) \
212 ATOMIC_OP(op, c_op, asm_op) \
213 ATOMIC_OP_RETURN(op, c_op, asm_op)
215 ATOMIC_OPS(add
, +=, CTOP_INST_AADD_DI_R2_R2_R3
)
216 #define atomic_sub(i, v) atomic_add(-(i), (v))
217 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
219 ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3
)
220 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
221 ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3
)
222 ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3
)
224 #endif /* CONFIG_ARC_PLAT_EZNPS */
227 #undef ATOMIC_OP_RETURN
231 * __atomic_add_unless - add unless the number is a given value
232 * @v: pointer of type atomic_t
233 * @a: the amount to add to v...
234 * @u: ...unless v is equal to u.
236 * Atomically adds @a to @v, so long as it was not @u.
237 * Returns the old value of @v
239 #define __atomic_add_unless(v, a, u) \
244 * Explicit full memory barrier needed before/after as \
245 * LLOCK/SCOND thmeselves don't provide any such semantics \
249 c = atomic_read(v); \
250 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
258 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
260 #define atomic_inc(v) atomic_add(1, v)
261 #define atomic_dec(v) atomic_sub(1, v)
263 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
264 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
265 #define atomic_inc_return(v) atomic_add_return(1, (v))
266 #define atomic_dec_return(v) atomic_sub_return(1, (v))
267 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
269 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
271 #define ATOMIC_INIT(i) { (i) }
273 #include <asm-generic/atomic64.h>