]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/parisc/include/asm/atomic.h
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_...
[mirror_ubuntu-bionic-kernel.git] / arch / parisc / include / asm / atomic.h
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7
8 #include <linux/types.h>
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
11
12 /*
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
15 *
16 * And probably incredibly slow on parisc. OTOH, we don't
17 * have to write any serious assembly. prumpf
18 */
19
20 #ifdef CONFIG_SMP
21 #include <asm/spinlock.h>
22 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
23
24 /* Use an array of spinlocks for our atomic_ts.
25 * Hash function to index into a different SPINLOCK.
26 * Since "a" is usually an address, use one spinlock per cacheline.
27 */
28 # define ATOMIC_HASH_SIZE 4
29 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30
31 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32
33 /* Can't use raw_spin_lock_irq because of #include problems, so
34 * this is the substitute */
35 #define _atomic_spin_lock_irqsave(l,f) do { \
36 arch_spinlock_t *s = ATOMIC_HASH(l); \
37 local_irq_save(f); \
38 arch_spin_lock(s); \
39 } while(0)
40
41 #define _atomic_spin_unlock_irqrestore(l,f) do { \
42 arch_spinlock_t *s = ATOMIC_HASH(l); \
43 arch_spin_unlock(s); \
44 local_irq_restore(f); \
45 } while(0)
46
47
48 #else
49 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51 #endif
52
53 /*
54 * Note that we need not lock read accesses - aligned word writes/reads
55 * are atomic, so a reader never sees inconsistent values.
56 */
57
58 static __inline__ void atomic_set(atomic_t *v, int i)
59 {
60 unsigned long flags;
61 _atomic_spin_lock_irqsave(v, flags);
62
63 v->counter = i;
64
65 _atomic_spin_unlock_irqrestore(v, flags);
66 }
67
68 #define atomic_set_release(v, i) atomic_set((v), (i))
69
70 static __inline__ int atomic_read(const atomic_t *v)
71 {
72 return READ_ONCE((v)->counter);
73 }
74
75 /* exported interface */
76 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
77 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
78
79 /**
80 * __atomic_add_unless - add unless the number is a given value
81 * @v: pointer of type atomic_t
82 * @a: the amount to add to v...
83 * @u: ...unless v is equal to u.
84 *
85 * Atomically adds @a to @v, so long as it was not @u.
86 * Returns the old value of @v.
87 */
88 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
89 {
90 int c, old;
91 c = atomic_read(v);
92 for (;;) {
93 if (unlikely(c == (u)))
94 break;
95 old = atomic_cmpxchg((v), c, c + (a));
96 if (likely(old == c))
97 break;
98 c = old;
99 }
100 return c;
101 }
102
103 #define ATOMIC_OP(op, c_op) \
104 static __inline__ void atomic_##op(int i, atomic_t *v) \
105 { \
106 unsigned long flags; \
107 \
108 _atomic_spin_lock_irqsave(v, flags); \
109 v->counter c_op i; \
110 _atomic_spin_unlock_irqrestore(v, flags); \
111 } \
112
113 #define ATOMIC_OP_RETURN(op, c_op) \
114 static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
115 { \
116 unsigned long flags; \
117 int ret; \
118 \
119 _atomic_spin_lock_irqsave(v, flags); \
120 ret = (v->counter c_op i); \
121 _atomic_spin_unlock_irqrestore(v, flags); \
122 \
123 return ret; \
124 }
125
126 #define ATOMIC_FETCH_OP(op, c_op) \
127 static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
128 { \
129 unsigned long flags; \
130 int ret; \
131 \
132 _atomic_spin_lock_irqsave(v, flags); \
133 ret = v->counter; \
134 v->counter c_op i; \
135 _atomic_spin_unlock_irqrestore(v, flags); \
136 \
137 return ret; \
138 }
139
140 #define ATOMIC_OPS(op, c_op) \
141 ATOMIC_OP(op, c_op) \
142 ATOMIC_OP_RETURN(op, c_op) \
143 ATOMIC_FETCH_OP(op, c_op)
144
145 ATOMIC_OPS(add, +=)
146 ATOMIC_OPS(sub, -=)
147
148 #undef ATOMIC_OPS
149 #define ATOMIC_OPS(op, c_op) \
150 ATOMIC_OP(op, c_op) \
151 ATOMIC_FETCH_OP(op, c_op)
152
153 ATOMIC_OPS(and, &=)
154 ATOMIC_OPS(or, |=)
155 ATOMIC_OPS(xor, ^=)
156
157 #undef ATOMIC_OPS
158 #undef ATOMIC_FETCH_OP
159 #undef ATOMIC_OP_RETURN
160 #undef ATOMIC_OP
161
162 #define atomic_inc(v) (atomic_add( 1,(v)))
163 #define atomic_dec(v) (atomic_add( -1,(v)))
164
165 #define atomic_inc_return(v) (atomic_add_return( 1,(v)))
166 #define atomic_dec_return(v) (atomic_add_return( -1,(v)))
167
168 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
169
170 /*
171 * atomic_inc_and_test - increment and test
172 * @v: pointer of type atomic_t
173 *
174 * Atomically increments @v by 1
175 * and returns true if the result is zero, or false for all
176 * other cases.
177 */
178 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
179
180 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
181
182 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
183
184 #define ATOMIC_INIT(i) { (i) }
185
186 #ifdef CONFIG_64BIT
187
188 #define ATOMIC64_INIT(i) { (i) }
189
190 #define ATOMIC64_OP(op, c_op) \
191 static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
192 { \
193 unsigned long flags; \
194 \
195 _atomic_spin_lock_irqsave(v, flags); \
196 v->counter c_op i; \
197 _atomic_spin_unlock_irqrestore(v, flags); \
198 } \
199
200 #define ATOMIC64_OP_RETURN(op, c_op) \
201 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
202 { \
203 unsigned long flags; \
204 s64 ret; \
205 \
206 _atomic_spin_lock_irqsave(v, flags); \
207 ret = (v->counter c_op i); \
208 _atomic_spin_unlock_irqrestore(v, flags); \
209 \
210 return ret; \
211 }
212
213 #define ATOMIC64_FETCH_OP(op, c_op) \
214 static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
215 { \
216 unsigned long flags; \
217 s64 ret; \
218 \
219 _atomic_spin_lock_irqsave(v, flags); \
220 ret = v->counter; \
221 v->counter c_op i; \
222 _atomic_spin_unlock_irqrestore(v, flags); \
223 \
224 return ret; \
225 }
226
227 #define ATOMIC64_OPS(op, c_op) \
228 ATOMIC64_OP(op, c_op) \
229 ATOMIC64_OP_RETURN(op, c_op) \
230 ATOMIC64_FETCH_OP(op, c_op)
231
232 ATOMIC64_OPS(add, +=)
233 ATOMIC64_OPS(sub, -=)
234
235 #undef ATOMIC64_OPS
236 #define ATOMIC64_OPS(op, c_op) \
237 ATOMIC64_OP(op, c_op) \
238 ATOMIC64_FETCH_OP(op, c_op)
239
240 ATOMIC64_OPS(and, &=)
241 ATOMIC64_OPS(or, |=)
242 ATOMIC64_OPS(xor, ^=)
243
244 #undef ATOMIC64_OPS
245 #undef ATOMIC64_FETCH_OP
246 #undef ATOMIC64_OP_RETURN
247 #undef ATOMIC64_OP
248
249 static __inline__ void
250 atomic64_set(atomic64_t *v, s64 i)
251 {
252 unsigned long flags;
253 _atomic_spin_lock_irqsave(v, flags);
254
255 v->counter = i;
256
257 _atomic_spin_unlock_irqrestore(v, flags);
258 }
259
260 static __inline__ s64
261 atomic64_read(const atomic64_t *v)
262 {
263 return READ_ONCE((v)->counter);
264 }
265
266 #define atomic64_inc(v) (atomic64_add( 1,(v)))
267 #define atomic64_dec(v) (atomic64_add( -1,(v)))
268
269 #define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
270 #define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
271
272 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
273
274 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
275 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
276 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
277
278 /* exported interface */
279 #define atomic64_cmpxchg(v, o, n) \
280 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
281 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
282
283 /**
284 * atomic64_add_unless - add unless the number is a given value
285 * @v: pointer of type atomic64_t
286 * @a: the amount to add to v...
287 * @u: ...unless v is equal to u.
288 *
289 * Atomically adds @a to @v, so long as it was not @u.
290 * Returns the old value of @v.
291 */
292 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
293 {
294 long c, old;
295 c = atomic64_read(v);
296 for (;;) {
297 if (unlikely(c == (u)))
298 break;
299 old = atomic64_cmpxchg((v), c, c + (a));
300 if (likely(old == c))
301 break;
302 c = old;
303 }
304 return c != (u);
305 }
306
307 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
308
309 /*
310 * atomic64_dec_if_positive - decrement by 1 if old value positive
311 * @v: pointer of type atomic_t
312 *
313 * The function returns the old value of *v minus 1, even if
314 * the atomic variable, v, was not decremented.
315 */
316 static inline long atomic64_dec_if_positive(atomic64_t *v)
317 {
318 long c, old, dec;
319 c = atomic64_read(v);
320 for (;;) {
321 dec = c - 1;
322 if (unlikely(dec < 0))
323 break;
324 old = atomic64_cmpxchg((v), c, dec);
325 if (likely(old == c))
326 break;
327 c = old;
328 }
329 return dec;
330 }
331
332 #endif /* !CONFIG_64BIT */
333
334
335 #endif /* _ASM_PARISC_ATOMIC_H_ */