]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/arm/include/asm/atomic.h
atomic: use <linux/atomic.h>
[mirror_ubuntu-zesty-kernel.git] / arch / arm / include / asm / atomic.h
1 /*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13
14 #include <linux/compiler.h>
15 #include <linux/types.h>
16 #include <asm/system.h>
17
18 #define ATOMIC_INIT(i) { (i) }
19
20 #ifdef __KERNEL__
21
22 /*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
27 #define atomic_read(v) (*(volatile int *)&(v)->counter)
28 #define atomic_set(v,i) (((v)->counter) = (i))
29
30 #if __LINUX_ARM_ARCH__ >= 6
31
32 /*
33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
34 * store exclusive to ensure that these are atomic. We may loop
35 * to ensure that the update happens.
36 */
37 static inline void atomic_add(int i, atomic_t *v)
38 {
39 unsigned long tmp;
40 int result;
41
42 __asm__ __volatile__("@ atomic_add\n"
43 "1: ldrex %0, [%3]\n"
44 " add %0, %0, %4\n"
45 " strex %1, %0, [%3]\n"
46 " teq %1, #0\n"
47 " bne 1b"
48 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
49 : "r" (&v->counter), "Ir" (i)
50 : "cc");
51 }
52
53 static inline int atomic_add_return(int i, atomic_t *v)
54 {
55 unsigned long tmp;
56 int result;
57
58 smp_mb();
59
60 __asm__ __volatile__("@ atomic_add_return\n"
61 "1: ldrex %0, [%3]\n"
62 " add %0, %0, %4\n"
63 " strex %1, %0, [%3]\n"
64 " teq %1, #0\n"
65 " bne 1b"
66 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
67 : "r" (&v->counter), "Ir" (i)
68 : "cc");
69
70 smp_mb();
71
72 return result;
73 }
74
75 static inline void atomic_sub(int i, atomic_t *v)
76 {
77 unsigned long tmp;
78 int result;
79
80 __asm__ __volatile__("@ atomic_sub\n"
81 "1: ldrex %0, [%3]\n"
82 " sub %0, %0, %4\n"
83 " strex %1, %0, [%3]\n"
84 " teq %1, #0\n"
85 " bne 1b"
86 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
87 : "r" (&v->counter), "Ir" (i)
88 : "cc");
89 }
90
91 static inline int atomic_sub_return(int i, atomic_t *v)
92 {
93 unsigned long tmp;
94 int result;
95
96 smp_mb();
97
98 __asm__ __volatile__("@ atomic_sub_return\n"
99 "1: ldrex %0, [%3]\n"
100 " sub %0, %0, %4\n"
101 " strex %1, %0, [%3]\n"
102 " teq %1, #0\n"
103 " bne 1b"
104 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
105 : "r" (&v->counter), "Ir" (i)
106 : "cc");
107
108 smp_mb();
109
110 return result;
111 }
112
113 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
114 {
115 unsigned long oldval, res;
116
117 smp_mb();
118
119 do {
120 __asm__ __volatile__("@ atomic_cmpxchg\n"
121 "ldrex %1, [%3]\n"
122 "mov %0, #0\n"
123 "teq %1, %4\n"
124 "strexeq %0, %5, [%3]\n"
125 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
126 : "r" (&ptr->counter), "Ir" (old), "r" (new)
127 : "cc");
128 } while (res);
129
130 smp_mb();
131
132 return oldval;
133 }
134
135 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
136 {
137 unsigned long tmp, tmp2;
138
139 __asm__ __volatile__("@ atomic_clear_mask\n"
140 "1: ldrex %0, [%3]\n"
141 " bic %0, %0, %4\n"
142 " strex %1, %0, [%3]\n"
143 " teq %1, #0\n"
144 " bne 1b"
145 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
146 : "r" (addr), "Ir" (mask)
147 : "cc");
148 }
149
150 #else /* ARM_ARCH_6 */
151
152 #ifdef CONFIG_SMP
153 #error SMP not supported on pre-ARMv6 CPUs
154 #endif
155
156 static inline int atomic_add_return(int i, atomic_t *v)
157 {
158 unsigned long flags;
159 int val;
160
161 raw_local_irq_save(flags);
162 val = v->counter;
163 v->counter = val += i;
164 raw_local_irq_restore(flags);
165
166 return val;
167 }
168 #define atomic_add(i, v) (void) atomic_add_return(i, v)
169
170 static inline int atomic_sub_return(int i, atomic_t *v)
171 {
172 unsigned long flags;
173 int val;
174
175 raw_local_irq_save(flags);
176 val = v->counter;
177 v->counter = val -= i;
178 raw_local_irq_restore(flags);
179
180 return val;
181 }
182 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
183
184 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185 {
186 int ret;
187 unsigned long flags;
188
189 raw_local_irq_save(flags);
190 ret = v->counter;
191 if (likely(ret == old))
192 v->counter = new;
193 raw_local_irq_restore(flags);
194
195 return ret;
196 }
197
198 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
199 {
200 unsigned long flags;
201
202 raw_local_irq_save(flags);
203 *addr &= ~mask;
204 raw_local_irq_restore(flags);
205 }
206
207 #endif /* __LINUX_ARM_ARCH__ */
208
209 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
210
211 static inline int atomic_add_unless(atomic_t *v, int a, int u)
212 {
213 int c, old;
214
215 c = atomic_read(v);
216 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
217 c = old;
218 return c != u;
219 }
220
221 #define atomic_inc(v) atomic_add(1, v)
222 #define atomic_dec(v) atomic_sub(1, v)
223
224 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
225 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
226 #define atomic_inc_return(v) (atomic_add_return(1, v))
227 #define atomic_dec_return(v) (atomic_sub_return(1, v))
228 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
229
230 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
231
232 #define smp_mb__before_atomic_dec() smp_mb()
233 #define smp_mb__after_atomic_dec() smp_mb()
234 #define smp_mb__before_atomic_inc() smp_mb()
235 #define smp_mb__after_atomic_inc() smp_mb()
236
237 #ifndef CONFIG_GENERIC_ATOMIC64
238 typedef struct {
239 u64 __aligned(8) counter;
240 } atomic64_t;
241
242 #define ATOMIC64_INIT(i) { (i) }
243
244 static inline u64 atomic64_read(atomic64_t *v)
245 {
246 u64 result;
247
248 __asm__ __volatile__("@ atomic64_read\n"
249 " ldrexd %0, %H0, [%1]"
250 : "=&r" (result)
251 : "r" (&v->counter), "Qo" (v->counter)
252 );
253
254 return result;
255 }
256
257 static inline void atomic64_set(atomic64_t *v, u64 i)
258 {
259 u64 tmp;
260
261 __asm__ __volatile__("@ atomic64_set\n"
262 "1: ldrexd %0, %H0, [%2]\n"
263 " strexd %0, %3, %H3, [%2]\n"
264 " teq %0, #0\n"
265 " bne 1b"
266 : "=&r" (tmp), "=Qo" (v->counter)
267 : "r" (&v->counter), "r" (i)
268 : "cc");
269 }
270
271 static inline void atomic64_add(u64 i, atomic64_t *v)
272 {
273 u64 result;
274 unsigned long tmp;
275
276 __asm__ __volatile__("@ atomic64_add\n"
277 "1: ldrexd %0, %H0, [%3]\n"
278 " adds %0, %0, %4\n"
279 " adc %H0, %H0, %H4\n"
280 " strexd %1, %0, %H0, [%3]\n"
281 " teq %1, #0\n"
282 " bne 1b"
283 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
284 : "r" (&v->counter), "r" (i)
285 : "cc");
286 }
287
288 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
289 {
290 u64 result;
291 unsigned long tmp;
292
293 smp_mb();
294
295 __asm__ __volatile__("@ atomic64_add_return\n"
296 "1: ldrexd %0, %H0, [%3]\n"
297 " adds %0, %0, %4\n"
298 " adc %H0, %H0, %H4\n"
299 " strexd %1, %0, %H0, [%3]\n"
300 " teq %1, #0\n"
301 " bne 1b"
302 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
303 : "r" (&v->counter), "r" (i)
304 : "cc");
305
306 smp_mb();
307
308 return result;
309 }
310
311 static inline void atomic64_sub(u64 i, atomic64_t *v)
312 {
313 u64 result;
314 unsigned long tmp;
315
316 __asm__ __volatile__("@ atomic64_sub\n"
317 "1: ldrexd %0, %H0, [%3]\n"
318 " subs %0, %0, %4\n"
319 " sbc %H0, %H0, %H4\n"
320 " strexd %1, %0, %H0, [%3]\n"
321 " teq %1, #0\n"
322 " bne 1b"
323 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
324 : "r" (&v->counter), "r" (i)
325 : "cc");
326 }
327
328 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
329 {
330 u64 result;
331 unsigned long tmp;
332
333 smp_mb();
334
335 __asm__ __volatile__("@ atomic64_sub_return\n"
336 "1: ldrexd %0, %H0, [%3]\n"
337 " subs %0, %0, %4\n"
338 " sbc %H0, %H0, %H4\n"
339 " strexd %1, %0, %H0, [%3]\n"
340 " teq %1, #0\n"
341 " bne 1b"
342 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
343 : "r" (&v->counter), "r" (i)
344 : "cc");
345
346 smp_mb();
347
348 return result;
349 }
350
351 static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
352 {
353 u64 oldval;
354 unsigned long res;
355
356 smp_mb();
357
358 do {
359 __asm__ __volatile__("@ atomic64_cmpxchg\n"
360 "ldrexd %1, %H1, [%3]\n"
361 "mov %0, #0\n"
362 "teq %1, %4\n"
363 "teqeq %H1, %H4\n"
364 "strexdeq %0, %5, %H5, [%3]"
365 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
366 : "r" (&ptr->counter), "r" (old), "r" (new)
367 : "cc");
368 } while (res);
369
370 smp_mb();
371
372 return oldval;
373 }
374
375 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
376 {
377 u64 result;
378 unsigned long tmp;
379
380 smp_mb();
381
382 __asm__ __volatile__("@ atomic64_xchg\n"
383 "1: ldrexd %0, %H0, [%3]\n"
384 " strexd %1, %4, %H4, [%3]\n"
385 " teq %1, #0\n"
386 " bne 1b"
387 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
388 : "r" (&ptr->counter), "r" (new)
389 : "cc");
390
391 smp_mb();
392
393 return result;
394 }
395
396 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
397 {
398 u64 result;
399 unsigned long tmp;
400
401 smp_mb();
402
403 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
404 "1: ldrexd %0, %H0, [%3]\n"
405 " subs %0, %0, #1\n"
406 " sbc %H0, %H0, #0\n"
407 " teq %H0, #0\n"
408 " bmi 2f\n"
409 " strexd %1, %0, %H0, [%3]\n"
410 " teq %1, #0\n"
411 " bne 1b\n"
412 "2:"
413 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
414 : "r" (&v->counter)
415 : "cc");
416
417 smp_mb();
418
419 return result;
420 }
421
422 static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
423 {
424 u64 val;
425 unsigned long tmp;
426 int ret = 1;
427
428 smp_mb();
429
430 __asm__ __volatile__("@ atomic64_add_unless\n"
431 "1: ldrexd %0, %H0, [%4]\n"
432 " teq %0, %5\n"
433 " teqeq %H0, %H5\n"
434 " moveq %1, #0\n"
435 " beq 2f\n"
436 " adds %0, %0, %6\n"
437 " adc %H0, %H0, %H6\n"
438 " strexd %2, %0, %H0, [%4]\n"
439 " teq %2, #0\n"
440 " bne 1b\n"
441 "2:"
442 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
443 : "r" (&v->counter), "r" (u), "r" (a)
444 : "cc");
445
446 if (ret)
447 smp_mb();
448
449 return ret;
450 }
451
452 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
453 #define atomic64_inc(v) atomic64_add(1LL, (v))
454 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
455 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
456 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
457 #define atomic64_dec(v) atomic64_sub(1LL, (v))
458 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
459 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
460 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
461
462 #else /* !CONFIG_GENERIC_ATOMIC64 */
463 #include <asm-generic/atomic64.h>
464 #endif
465 #include <asm-generic/atomic-long.h>
466 #endif
467 #endif