]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/atomic.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef __ASM_ARM_ATOMIC_H | |
12 | #define __ASM_ARM_ATOMIC_H | |
13 | ||
8dc39b88 | 14 | #include <linux/compiler.h> |
f38d999c | 15 | #include <linux/prefetch.h> |
ea435467 | 16 | #include <linux/types.h> |
9f97da78 DH |
17 | #include <linux/irqflags.h> |
18 | #include <asm/barrier.h> | |
19 | #include <asm/cmpxchg.h> | |
1da177e4 | 20 | |
1da177e4 LT |
21 | #define ATOMIC_INIT(i) { (i) } |
22 | ||
23 | #ifdef __KERNEL__ | |
24 | ||
200b812d CM |
25 | /* |
26 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | |
27 | * strex/ldrex monitor on some implementations. The reason we can use it for | |
28 | * atomic_set() is the clrex or dummy strex done on every exception return. | |
29 | */ | |
62e8a325 PZ |
30 | #define atomic_read(v) READ_ONCE((v)->counter) |
31 | #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) | |
1da177e4 LT |
32 | |
33 | #if __LINUX_ARM_ARCH__ >= 6 | |
34 | ||
35 | /* | |
36 | * ARMv6 UP and SMP safe atomic ops. We use load exclusive and | |
37 | * store exclusive to ensure that these are atomic. We may loop | |
200b812d | 38 | * to ensure that the update happens. |
1da177e4 | 39 | */ |
bac4e960 | 40 | |
aee9a554 PZ |
41 | #define ATOMIC_OP(op, c_op, asm_op) \ |
42 | static inline void atomic_##op(int i, atomic_t *v) \ | |
43 | { \ | |
44 | unsigned long tmp; \ | |
45 | int result; \ | |
46 | \ | |
47 | prefetchw(&v->counter); \ | |
48 | __asm__ __volatile__("@ atomic_" #op "\n" \ | |
49 | "1: ldrex %0, [%3]\n" \ | |
50 | " " #asm_op " %0, %0, %4\n" \ | |
51 | " strex %1, %0, [%3]\n" \ | |
52 | " teq %1, #0\n" \ | |
53 | " bne 1b" \ | |
54 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
55 | : "r" (&v->counter), "Ir" (i) \ | |
56 | : "cc"); \ | |
57 | } \ | |
58 | ||
59 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
0ca326de | 60 | static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ |
aee9a554 PZ |
61 | { \ |
62 | unsigned long tmp; \ | |
63 | int result; \ | |
64 | \ | |
aee9a554 PZ |
65 | prefetchw(&v->counter); \ |
66 | \ | |
67 | __asm__ __volatile__("@ atomic_" #op "_return\n" \ | |
68 | "1: ldrex %0, [%3]\n" \ | |
69 | " " #asm_op " %0, %0, %4\n" \ | |
70 | " strex %1, %0, [%3]\n" \ | |
71 | " teq %1, #0\n" \ | |
72 | " bne 1b" \ | |
73 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
74 | : "r" (&v->counter), "Ir" (i) \ | |
75 | : "cc"); \ | |
76 | \ | |
aee9a554 | 77 | return result; \ |
1da177e4 LT |
78 | } |
79 | ||
6da068c1 PZ |
80 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
81 | static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ | |
82 | { \ | |
83 | unsigned long tmp; \ | |
84 | int result, val; \ | |
85 | \ | |
86 | prefetchw(&v->counter); \ | |
87 | \ | |
88 | __asm__ __volatile__("@ atomic_fetch_" #op "\n" \ | |
89 | "1: ldrex %0, [%4]\n" \ | |
90 | " " #asm_op " %1, %0, %5\n" \ | |
91 | " strex %2, %1, [%4]\n" \ | |
92 | " teq %2, #0\n" \ | |
93 | " bne 1b" \ | |
94 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ | |
95 | : "r" (&v->counter), "Ir" (i) \ | |
96 | : "cc"); \ | |
97 | \ | |
98 | return result; \ | |
99 | } | |
100 | ||
0ca326de WD |
101 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
102 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | |
6da068c1 PZ |
103 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed |
104 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | |
105 | ||
106 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | |
107 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | |
108 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | |
109 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | |
0ca326de WD |
110 | |
111 | static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) | |
4a6dae6d | 112 | { |
4dcc1cf7 CG |
113 | int oldval; |
114 | unsigned long res; | |
4a6dae6d | 115 | |
c32ffce0 | 116 | prefetchw(&ptr->counter); |
bac4e960 | 117 | |
4a6dae6d NP |
118 | do { |
119 | __asm__ __volatile__("@ atomic_cmpxchg\n" | |
398aa668 | 120 | "ldrex %1, [%3]\n" |
a7d06833 | 121 | "mov %0, #0\n" |
398aa668 WD |
122 | "teq %1, %4\n" |
123 | "strexeq %0, %5, [%3]\n" | |
124 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
4a6dae6d NP |
125 | : "r" (&ptr->counter), "Ir" (old), "r" (new) |
126 | : "cc"); | |
127 | } while (res); | |
128 | ||
129 | return oldval; | |
130 | } | |
0ca326de | 131 | #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed |
4a6dae6d | 132 | |
db38ee87 WD |
133 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
134 | { | |
135 | int oldval, newval; | |
136 | unsigned long tmp; | |
137 | ||
138 | smp_mb(); | |
139 | prefetchw(&v->counter); | |
140 | ||
141 | __asm__ __volatile__ ("@ atomic_add_unless\n" | |
142 | "1: ldrex %0, [%4]\n" | |
143 | " teq %0, %5\n" | |
144 | " beq 2f\n" | |
145 | " add %1, %0, %6\n" | |
146 | " strex %2, %1, [%4]\n" | |
147 | " teq %2, #0\n" | |
148 | " bne 1b\n" | |
149 | "2:" | |
150 | : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) | |
151 | : "r" (&v->counter), "r" (u), "r" (a) | |
152 | : "cc"); | |
153 | ||
154 | if (oldval != u) | |
155 | smp_mb(); | |
156 | ||
157 | return oldval; | |
158 | } | |
159 | ||
1da177e4 LT |
160 | #else /* ARM_ARCH_6 */ |
161 | ||
1da177e4 LT |
162 | #ifdef CONFIG_SMP |
163 | #error SMP not supported on pre-ARMv6 CPUs | |
164 | #endif | |
165 | ||
aee9a554 PZ |
166 | #define ATOMIC_OP(op, c_op, asm_op) \ |
167 | static inline void atomic_##op(int i, atomic_t *v) \ | |
168 | { \ | |
169 | unsigned long flags; \ | |
170 | \ | |
171 | raw_local_irq_save(flags); \ | |
172 | v->counter c_op i; \ | |
173 | raw_local_irq_restore(flags); \ | |
174 | } \ | |
175 | ||
176 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
177 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
178 | { \ | |
179 | unsigned long flags; \ | |
180 | int val; \ | |
181 | \ | |
182 | raw_local_irq_save(flags); \ | |
183 | v->counter c_op i; \ | |
184 | val = v->counter; \ | |
185 | raw_local_irq_restore(flags); \ | |
186 | \ | |
187 | return val; \ | |
1da177e4 | 188 | } |
1da177e4 | 189 | |
6da068c1 PZ |
190 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
191 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | |
192 | { \ | |
193 | unsigned long flags; \ | |
194 | int val; \ | |
195 | \ | |
196 | raw_local_irq_save(flags); \ | |
197 | val = v->counter; \ | |
198 | v->counter c_op i; \ | |
199 | raw_local_irq_restore(flags); \ | |
200 | \ | |
201 | return val; \ | |
202 | } | |
203 | ||
4a6dae6d NP |
204 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
205 | { | |
206 | int ret; | |
207 | unsigned long flags; | |
208 | ||
8dd5c845 | 209 | raw_local_irq_save(flags); |
4a6dae6d NP |
210 | ret = v->counter; |
211 | if (likely(ret == old)) | |
212 | v->counter = new; | |
8dd5c845 | 213 | raw_local_irq_restore(flags); |
4a6dae6d NP |
214 | |
215 | return ret; | |
216 | } | |
217 | ||
f24219b4 | 218 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
8426e1f6 NP |
219 | { |
220 | int c, old; | |
221 | ||
222 | c = atomic_read(v); | |
223 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | |
224 | c = old; | |
f24219b4 | 225 | return c; |
8426e1f6 | 226 | } |
8426e1f6 | 227 | |
db38ee87 WD |
228 | #endif /* __LINUX_ARM_ARCH__ */ |
229 | ||
aee9a554 PZ |
230 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
231 | ATOMIC_OP(op, c_op, asm_op) \ | |
6da068c1 PZ |
232 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
233 | ATOMIC_FETCH_OP(op, c_op, asm_op) | |
aee9a554 PZ |
234 | |
235 | ATOMIC_OPS(add, +=, add) | |
236 | ATOMIC_OPS(sub, -=, sub) | |
237 | ||
12589790 PZ |
238 | #define atomic_andnot atomic_andnot |
239 | ||
6da068c1 PZ |
240 | #undef ATOMIC_OPS |
241 | #define ATOMIC_OPS(op, c_op, asm_op) \ | |
242 | ATOMIC_OP(op, c_op, asm_op) \ | |
243 | ATOMIC_FETCH_OP(op, c_op, asm_op) | |
244 | ||
245 | ATOMIC_OPS(and, &=, and) | |
246 | ATOMIC_OPS(andnot, &= ~, bic) | |
247 | ATOMIC_OPS(or, |=, orr) | |
248 | ATOMIC_OPS(xor, ^=, eor) | |
12589790 | 249 | |
aee9a554 | 250 | #undef ATOMIC_OPS |
6da068c1 | 251 | #undef ATOMIC_FETCH_OP |
aee9a554 PZ |
252 | #undef ATOMIC_OP_RETURN |
253 | #undef ATOMIC_OP | |
254 | ||
db38ee87 WD |
255 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
256 | ||
bac4e960 RK |
257 | #define atomic_inc(v) atomic_add(1, v) |
258 | #define atomic_dec(v) atomic_sub(1, v) | |
1da177e4 LT |
259 | |
260 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
261 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
6e490b01 WD |
262 | #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v)) |
263 | #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v)) | |
1da177e4 LT |
264 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) |
265 | ||
266 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | |
267 | ||
24b44a66 WD |
268 | #ifndef CONFIG_GENERIC_ATOMIC64 |
269 | typedef struct { | |
237f1233 | 270 | long long counter; |
24b44a66 WD |
271 | } atomic64_t; |
272 | ||
273 | #define ATOMIC64_INIT(i) { (i) } | |
274 | ||
4fd75911 | 275 | #ifdef CONFIG_ARM_LPAE |
237f1233 | 276 | static inline long long atomic64_read(const atomic64_t *v) |
4fd75911 | 277 | { |
237f1233 | 278 | long long result; |
4fd75911 WD |
279 | |
280 | __asm__ __volatile__("@ atomic64_read\n" | |
281 | " ldrd %0, %H0, [%1]" | |
282 | : "=&r" (result) | |
283 | : "r" (&v->counter), "Qo" (v->counter) | |
284 | ); | |
285 | ||
286 | return result; | |
287 | } | |
288 | ||
237f1233 | 289 | static inline void atomic64_set(atomic64_t *v, long long i) |
4fd75911 WD |
290 | { |
291 | __asm__ __volatile__("@ atomic64_set\n" | |
292 | " strd %2, %H2, [%1]" | |
293 | : "=Qo" (v->counter) | |
294 | : "r" (&v->counter), "r" (i) | |
295 | ); | |
296 | } | |
297 | #else | |
237f1233 | 298 | static inline long long atomic64_read(const atomic64_t *v) |
24b44a66 | 299 | { |
237f1233 | 300 | long long result; |
24b44a66 WD |
301 | |
302 | __asm__ __volatile__("@ atomic64_read\n" | |
303 | " ldrexd %0, %H0, [%1]" | |
304 | : "=&r" (result) | |
398aa668 | 305 | : "r" (&v->counter), "Qo" (v->counter) |
24b44a66 WD |
306 | ); |
307 | ||
308 | return result; | |
309 | } | |
310 | ||
237f1233 | 311 | static inline void atomic64_set(atomic64_t *v, long long i) |
24b44a66 | 312 | { |
237f1233 | 313 | long long tmp; |
24b44a66 | 314 | |
f38d999c | 315 | prefetchw(&v->counter); |
24b44a66 | 316 | __asm__ __volatile__("@ atomic64_set\n" |
398aa668 WD |
317 | "1: ldrexd %0, %H0, [%2]\n" |
318 | " strexd %0, %3, %H3, [%2]\n" | |
24b44a66 WD |
319 | " teq %0, #0\n" |
320 | " bne 1b" | |
398aa668 | 321 | : "=&r" (tmp), "=Qo" (v->counter) |
24b44a66 WD |
322 | : "r" (&v->counter), "r" (i) |
323 | : "cc"); | |
324 | } | |
4fd75911 | 325 | #endif |
24b44a66 | 326 | |
aee9a554 PZ |
327 | #define ATOMIC64_OP(op, op1, op2) \ |
328 | static inline void atomic64_##op(long long i, atomic64_t *v) \ | |
329 | { \ | |
330 | long long result; \ | |
331 | unsigned long tmp; \ | |
332 | \ | |
333 | prefetchw(&v->counter); \ | |
334 | __asm__ __volatile__("@ atomic64_" #op "\n" \ | |
335 | "1: ldrexd %0, %H0, [%3]\n" \ | |
336 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
337 | " " #op2 " %R0, %R0, %R4\n" \ | |
338 | " strexd %1, %0, %H0, [%3]\n" \ | |
339 | " teq %1, #0\n" \ | |
340 | " bne 1b" \ | |
341 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
342 | : "r" (&v->counter), "r" (i) \ | |
343 | : "cc"); \ | |
344 | } \ | |
345 | ||
346 | #define ATOMIC64_OP_RETURN(op, op1, op2) \ | |
0ca326de WD |
347 | static inline long long \ |
348 | atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ | |
aee9a554 PZ |
349 | { \ |
350 | long long result; \ | |
351 | unsigned long tmp; \ | |
352 | \ | |
aee9a554 PZ |
353 | prefetchw(&v->counter); \ |
354 | \ | |
355 | __asm__ __volatile__("@ atomic64_" #op "_return\n" \ | |
356 | "1: ldrexd %0, %H0, [%3]\n" \ | |
357 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
358 | " " #op2 " %R0, %R0, %R4\n" \ | |
359 | " strexd %1, %0, %H0, [%3]\n" \ | |
360 | " teq %1, #0\n" \ | |
361 | " bne 1b" \ | |
362 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
363 | : "r" (&v->counter), "r" (i) \ | |
364 | : "cc"); \ | |
365 | \ | |
aee9a554 | 366 | return result; \ |
24b44a66 WD |
367 | } |
368 | ||
6da068c1 PZ |
369 | #define ATOMIC64_FETCH_OP(op, op1, op2) \ |
370 | static inline long long \ | |
371 | atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \ | |
372 | { \ | |
373 | long long result, val; \ | |
374 | unsigned long tmp; \ | |
375 | \ | |
376 | prefetchw(&v->counter); \ | |
377 | \ | |
378 | __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \ | |
379 | "1: ldrexd %0, %H0, [%4]\n" \ | |
380 | " " #op1 " %Q1, %Q0, %Q5\n" \ | |
381 | " " #op2 " %R1, %R0, %R5\n" \ | |
382 | " strexd %2, %1, %H1, [%4]\n" \ | |
383 | " teq %2, #0\n" \ | |
384 | " bne 1b" \ | |
385 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ | |
386 | : "r" (&v->counter), "r" (i) \ | |
387 | : "cc"); \ | |
388 | \ | |
389 | return result; \ | |
390 | } | |
391 | ||
aee9a554 PZ |
392 | #define ATOMIC64_OPS(op, op1, op2) \ |
393 | ATOMIC64_OP(op, op1, op2) \ | |
6da068c1 PZ |
394 | ATOMIC64_OP_RETURN(op, op1, op2) \ |
395 | ATOMIC64_FETCH_OP(op, op1, op2) | |
24b44a66 | 396 | |
aee9a554 PZ |
397 | ATOMIC64_OPS(add, adds, adc) |
398 | ATOMIC64_OPS(sub, subs, sbc) | |
24b44a66 | 399 | |
0ca326de WD |
400 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed |
401 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | |
6da068c1 PZ |
402 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed |
403 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | |
404 | ||
405 | #undef ATOMIC64_OPS | |
406 | #define ATOMIC64_OPS(op, op1, op2) \ | |
407 | ATOMIC64_OP(op, op1, op2) \ | |
408 | ATOMIC64_FETCH_OP(op, op1, op2) | |
0ca326de | 409 | |
12589790 PZ |
410 | #define atomic64_andnot atomic64_andnot |
411 | ||
6da068c1 PZ |
412 | ATOMIC64_OPS(and, and, and) |
413 | ATOMIC64_OPS(andnot, bic, bic) | |
414 | ATOMIC64_OPS(or, orr, orr) | |
415 | ATOMIC64_OPS(xor, eor, eor) | |
416 | ||
417 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | |
418 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | |
419 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | |
420 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | |
12589790 | 421 | |
aee9a554 | 422 | #undef ATOMIC64_OPS |
6da068c1 | 423 | #undef ATOMIC64_FETCH_OP |
aee9a554 PZ |
424 | #undef ATOMIC64_OP_RETURN |
425 | #undef ATOMIC64_OP | |
24b44a66 | 426 | |
0ca326de WD |
427 | static inline long long |
428 | atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) | |
24b44a66 | 429 | { |
237f1233 | 430 | long long oldval; |
24b44a66 WD |
431 | unsigned long res; |
432 | ||
c32ffce0 | 433 | prefetchw(&ptr->counter); |
24b44a66 WD |
434 | |
435 | do { | |
436 | __asm__ __volatile__("@ atomic64_cmpxchg\n" | |
398aa668 | 437 | "ldrexd %1, %H1, [%3]\n" |
24b44a66 | 438 | "mov %0, #0\n" |
398aa668 WD |
439 | "teq %1, %4\n" |
440 | "teqeq %H1, %H4\n" | |
441 | "strexdeq %0, %5, %H5, [%3]" | |
442 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
24b44a66 WD |
443 | : "r" (&ptr->counter), "r" (old), "r" (new) |
444 | : "cc"); | |
445 | } while (res); | |
446 | ||
24b44a66 WD |
447 | return oldval; |
448 | } | |
0ca326de | 449 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed |
24b44a66 | 450 | |
0ca326de | 451 | static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) |
24b44a66 | 452 | { |
237f1233 | 453 | long long result; |
24b44a66 WD |
454 | unsigned long tmp; |
455 | ||
c32ffce0 | 456 | prefetchw(&ptr->counter); |
24b44a66 WD |
457 | |
458 | __asm__ __volatile__("@ atomic64_xchg\n" | |
398aa668 WD |
459 | "1: ldrexd %0, %H0, [%3]\n" |
460 | " strexd %1, %4, %H4, [%3]\n" | |
24b44a66 WD |
461 | " teq %1, #0\n" |
462 | " bne 1b" | |
398aa668 | 463 | : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) |
24b44a66 WD |
464 | : "r" (&ptr->counter), "r" (new) |
465 | : "cc"); | |
466 | ||
24b44a66 WD |
467 | return result; |
468 | } | |
0ca326de | 469 | #define atomic64_xchg_relaxed atomic64_xchg_relaxed |
24b44a66 | 470 | |
237f1233 | 471 | static inline long long atomic64_dec_if_positive(atomic64_t *v) |
24b44a66 | 472 | { |
237f1233 | 473 | long long result; |
24b44a66 WD |
474 | unsigned long tmp; |
475 | ||
476 | smp_mb(); | |
c32ffce0 | 477 | prefetchw(&v->counter); |
24b44a66 WD |
478 | |
479 | __asm__ __volatile__("@ atomic64_dec_if_positive\n" | |
398aa668 | 480 | "1: ldrexd %0, %H0, [%3]\n" |
2245f924 VK |
481 | " subs %Q0, %Q0, #1\n" |
482 | " sbc %R0, %R0, #0\n" | |
483 | " teq %R0, #0\n" | |
24b44a66 | 484 | " bmi 2f\n" |
398aa668 | 485 | " strexd %1, %0, %H0, [%3]\n" |
24b44a66 WD |
486 | " teq %1, #0\n" |
487 | " bne 1b\n" | |
488 | "2:" | |
398aa668 | 489 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
490 | : "r" (&v->counter) |
491 | : "cc"); | |
492 | ||
493 | smp_mb(); | |
494 | ||
495 | return result; | |
496 | } | |
497 | ||
237f1233 | 498 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
24b44a66 | 499 | { |
237f1233 | 500 | long long val; |
24b44a66 WD |
501 | unsigned long tmp; |
502 | int ret = 1; | |
503 | ||
504 | smp_mb(); | |
c32ffce0 | 505 | prefetchw(&v->counter); |
24b44a66 WD |
506 | |
507 | __asm__ __volatile__("@ atomic64_add_unless\n" | |
398aa668 WD |
508 | "1: ldrexd %0, %H0, [%4]\n" |
509 | " teq %0, %5\n" | |
510 | " teqeq %H0, %H5\n" | |
24b44a66 WD |
511 | " moveq %1, #0\n" |
512 | " beq 2f\n" | |
2245f924 VK |
513 | " adds %Q0, %Q0, %Q6\n" |
514 | " adc %R0, %R0, %R6\n" | |
398aa668 | 515 | " strexd %2, %0, %H0, [%4]\n" |
24b44a66 WD |
516 | " teq %2, #0\n" |
517 | " bne 1b\n" | |
518 | "2:" | |
398aa668 | 519 | : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
520 | : "r" (&v->counter), "r" (u), "r" (a) |
521 | : "cc"); | |
522 | ||
523 | if (ret) | |
524 | smp_mb(); | |
525 | ||
526 | return ret; | |
527 | } | |
528 | ||
529 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
530 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
6e490b01 | 531 | #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v)) |
24b44a66 WD |
532 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) |
533 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
534 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
6e490b01 | 535 | #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v)) |
24b44a66 WD |
536 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) |
537 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
538 | ||
7847777a | 539 | #endif /* !CONFIG_GENERIC_ATOMIC64 */ |
1da177e4 LT |
540 | #endif |
541 | #endif |