]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 | 2 | /* |
4baa9922 | 3 | * arch/arm/include/asm/atomic.h |
1da177e4 LT |
4 | * |
5 | * Copyright (C) 1996 Russell King. | |
6 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
1da177e4 LT |
7 | */ |
8 | #ifndef __ASM_ARM_ATOMIC_H | |
9 | #define __ASM_ARM_ATOMIC_H | |
10 | ||
8dc39b88 | 11 | #include <linux/compiler.h> |
f38d999c | 12 | #include <linux/prefetch.h> |
ea435467 | 13 | #include <linux/types.h> |
9f97da78 DH |
14 | #include <linux/irqflags.h> |
15 | #include <asm/barrier.h> | |
16 | #include <asm/cmpxchg.h> | |
1da177e4 | 17 | |
1da177e4 LT |
18 | #define ATOMIC_INIT(i) { (i) } |
19 | ||
20 | #ifdef __KERNEL__ | |
21 | ||
200b812d CM |
22 | /* |
23 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | |
24 | * strex/ldrex monitor on some implementations. The reason we can use it for | |
25 | * atomic_set() is the clrex or dummy strex done on every exception return. | |
26 | */ | |
62e8a325 PZ |
27 | #define atomic_read(v) READ_ONCE((v)->counter) |
28 | #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) | |
1da177e4 LT |
29 | |
30 | #if __LINUX_ARM_ARCH__ >= 6 | |
31 | ||
32 | /* | |
33 | * ARMv6 UP and SMP safe atomic ops. We use load exclusive and | |
34 | * store exclusive to ensure that these are atomic. We may loop | |
200b812d | 35 | * to ensure that the update happens. |
1da177e4 | 36 | */ |
bac4e960 | 37 | |
aee9a554 PZ |
38 | #define ATOMIC_OP(op, c_op, asm_op) \ |
39 | static inline void atomic_##op(int i, atomic_t *v) \ | |
40 | { \ | |
41 | unsigned long tmp; \ | |
42 | int result; \ | |
43 | \ | |
44 | prefetchw(&v->counter); \ | |
45 | __asm__ __volatile__("@ atomic_" #op "\n" \ | |
46 | "1: ldrex %0, [%3]\n" \ | |
47 | " " #asm_op " %0, %0, %4\n" \ | |
48 | " strex %1, %0, [%3]\n" \ | |
49 | " teq %1, #0\n" \ | |
50 | " bne 1b" \ | |
51 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
52 | : "r" (&v->counter), "Ir" (i) \ | |
53 | : "cc"); \ | |
54 | } \ | |
55 | ||
56 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
0ca326de | 57 | static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ |
aee9a554 PZ |
58 | { \ |
59 | unsigned long tmp; \ | |
60 | int result; \ | |
61 | \ | |
aee9a554 PZ |
62 | prefetchw(&v->counter); \ |
63 | \ | |
64 | __asm__ __volatile__("@ atomic_" #op "_return\n" \ | |
65 | "1: ldrex %0, [%3]\n" \ | |
66 | " " #asm_op " %0, %0, %4\n" \ | |
67 | " strex %1, %0, [%3]\n" \ | |
68 | " teq %1, #0\n" \ | |
69 | " bne 1b" \ | |
70 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
71 | : "r" (&v->counter), "Ir" (i) \ | |
72 | : "cc"); \ | |
73 | \ | |
aee9a554 | 74 | return result; \ |
1da177e4 LT |
75 | } |
76 | ||
6da068c1 PZ |
77 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
78 | static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ | |
79 | { \ | |
80 | unsigned long tmp; \ | |
81 | int result, val; \ | |
82 | \ | |
83 | prefetchw(&v->counter); \ | |
84 | \ | |
85 | __asm__ __volatile__("@ atomic_fetch_" #op "\n" \ | |
86 | "1: ldrex %0, [%4]\n" \ | |
87 | " " #asm_op " %1, %0, %5\n" \ | |
88 | " strex %2, %1, [%4]\n" \ | |
89 | " teq %2, #0\n" \ | |
90 | " bne 1b" \ | |
91 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ | |
92 | : "r" (&v->counter), "Ir" (i) \ | |
93 | : "cc"); \ | |
94 | \ | |
95 | return result; \ | |
96 | } | |
97 | ||
0ca326de WD |
98 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
99 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | |
6da068c1 PZ |
100 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed |
101 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | |
102 | ||
103 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | |
104 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | |
105 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | |
106 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | |
0ca326de WD |
107 | |
108 | static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) | |
4a6dae6d | 109 | { |
4dcc1cf7 CG |
110 | int oldval; |
111 | unsigned long res; | |
4a6dae6d | 112 | |
c32ffce0 | 113 | prefetchw(&ptr->counter); |
bac4e960 | 114 | |
4a6dae6d NP |
115 | do { |
116 | __asm__ __volatile__("@ atomic_cmpxchg\n" | |
398aa668 | 117 | "ldrex %1, [%3]\n" |
a7d06833 | 118 | "mov %0, #0\n" |
398aa668 WD |
119 | "teq %1, %4\n" |
120 | "strexeq %0, %5, [%3]\n" | |
121 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
4a6dae6d NP |
122 | : "r" (&ptr->counter), "Ir" (old), "r" (new) |
123 | : "cc"); | |
124 | } while (res); | |
125 | ||
126 | return oldval; | |
127 | } | |
0ca326de | 128 | #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed |
4a6dae6d | 129 | |
bfc18e38 | 130 | static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) |
db38ee87 WD |
131 | { |
132 | int oldval, newval; | |
133 | unsigned long tmp; | |
134 | ||
135 | smp_mb(); | |
136 | prefetchw(&v->counter); | |
137 | ||
138 | __asm__ __volatile__ ("@ atomic_add_unless\n" | |
139 | "1: ldrex %0, [%4]\n" | |
140 | " teq %0, %5\n" | |
141 | " beq 2f\n" | |
142 | " add %1, %0, %6\n" | |
143 | " strex %2, %1, [%4]\n" | |
144 | " teq %2, #0\n" | |
145 | " bne 1b\n" | |
146 | "2:" | |
147 | : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) | |
148 | : "r" (&v->counter), "r" (u), "r" (a) | |
149 | : "cc"); | |
150 | ||
151 | if (oldval != u) | |
152 | smp_mb(); | |
153 | ||
154 | return oldval; | |
155 | } | |
eccc2da8 | 156 | #define atomic_fetch_add_unless atomic_fetch_add_unless |
db38ee87 | 157 | |
1da177e4 LT |
158 | #else /* ARM_ARCH_6 */ |
159 | ||
1da177e4 LT |
160 | #ifdef CONFIG_SMP |
161 | #error SMP not supported on pre-ARMv6 CPUs | |
162 | #endif | |
163 | ||
aee9a554 PZ |
164 | #define ATOMIC_OP(op, c_op, asm_op) \ |
165 | static inline void atomic_##op(int i, atomic_t *v) \ | |
166 | { \ | |
167 | unsigned long flags; \ | |
168 | \ | |
169 | raw_local_irq_save(flags); \ | |
170 | v->counter c_op i; \ | |
171 | raw_local_irq_restore(flags); \ | |
172 | } \ | |
173 | ||
174 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
175 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
176 | { \ | |
177 | unsigned long flags; \ | |
178 | int val; \ | |
179 | \ | |
180 | raw_local_irq_save(flags); \ | |
181 | v->counter c_op i; \ | |
182 | val = v->counter; \ | |
183 | raw_local_irq_restore(flags); \ | |
184 | \ | |
185 | return val; \ | |
1da177e4 | 186 | } |
1da177e4 | 187 | |
6da068c1 PZ |
188 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
189 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | |
190 | { \ | |
191 | unsigned long flags; \ | |
192 | int val; \ | |
193 | \ | |
194 | raw_local_irq_save(flags); \ | |
195 | val = v->counter; \ | |
196 | v->counter c_op i; \ | |
197 | raw_local_irq_restore(flags); \ | |
198 | \ | |
199 | return val; \ | |
200 | } | |
201 | ||
4a6dae6d NP |
202 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
203 | { | |
204 | int ret; | |
205 | unsigned long flags; | |
206 | ||
8dd5c845 | 207 | raw_local_irq_save(flags); |
4a6dae6d NP |
208 | ret = v->counter; |
209 | if (likely(ret == old)) | |
210 | v->counter = new; | |
8dd5c845 | 211 | raw_local_irq_restore(flags); |
4a6dae6d NP |
212 | |
213 | return ret; | |
214 | } | |
215 | ||
7cc7eaad MR |
216 | #define atomic_fetch_andnot atomic_fetch_andnot |
217 | ||
db38ee87 WD |
218 | #endif /* __LINUX_ARM_ARCH__ */ |
219 | ||
aee9a554 PZ |
220 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
221 | ATOMIC_OP(op, c_op, asm_op) \ | |
6da068c1 PZ |
222 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
223 | ATOMIC_FETCH_OP(op, c_op, asm_op) | |
aee9a554 PZ |
224 | |
225 | ATOMIC_OPS(add, +=, add) | |
226 | ATOMIC_OPS(sub, -=, sub) | |
227 | ||
12589790 PZ |
228 | #define atomic_andnot atomic_andnot |
229 | ||
6da068c1 PZ |
230 | #undef ATOMIC_OPS |
231 | #define ATOMIC_OPS(op, c_op, asm_op) \ | |
232 | ATOMIC_OP(op, c_op, asm_op) \ | |
233 | ATOMIC_FETCH_OP(op, c_op, asm_op) | |
234 | ||
235 | ATOMIC_OPS(and, &=, and) | |
236 | ATOMIC_OPS(andnot, &= ~, bic) | |
237 | ATOMIC_OPS(or, |=, orr) | |
238 | ATOMIC_OPS(xor, ^=, eor) | |
12589790 | 239 | |
aee9a554 | 240 | #undef ATOMIC_OPS |
6da068c1 | 241 | #undef ATOMIC_FETCH_OP |
aee9a554 PZ |
242 | #undef ATOMIC_OP_RETURN |
243 | #undef ATOMIC_OP | |
244 | ||
db38ee87 WD |
245 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
246 | ||
24b44a66 WD |
247 | #ifndef CONFIG_GENERIC_ATOMIC64 |
248 | typedef struct { | |
237f1233 | 249 | long long counter; |
24b44a66 WD |
250 | } atomic64_t; |
251 | ||
252 | #define ATOMIC64_INIT(i) { (i) } | |
253 | ||
4fd75911 | 254 | #ifdef CONFIG_ARM_LPAE |
237f1233 | 255 | static inline long long atomic64_read(const atomic64_t *v) |
4fd75911 | 256 | { |
237f1233 | 257 | long long result; |
4fd75911 WD |
258 | |
259 | __asm__ __volatile__("@ atomic64_read\n" | |
260 | " ldrd %0, %H0, [%1]" | |
261 | : "=&r" (result) | |
262 | : "r" (&v->counter), "Qo" (v->counter) | |
263 | ); | |
264 | ||
265 | return result; | |
266 | } | |
267 | ||
237f1233 | 268 | static inline void atomic64_set(atomic64_t *v, long long i) |
4fd75911 WD |
269 | { |
270 | __asm__ __volatile__("@ atomic64_set\n" | |
271 | " strd %2, %H2, [%1]" | |
272 | : "=Qo" (v->counter) | |
273 | : "r" (&v->counter), "r" (i) | |
274 | ); | |
275 | } | |
276 | #else | |
237f1233 | 277 | static inline long long atomic64_read(const atomic64_t *v) |
24b44a66 | 278 | { |
237f1233 | 279 | long long result; |
24b44a66 WD |
280 | |
281 | __asm__ __volatile__("@ atomic64_read\n" | |
282 | " ldrexd %0, %H0, [%1]" | |
283 | : "=&r" (result) | |
398aa668 | 284 | : "r" (&v->counter), "Qo" (v->counter) |
24b44a66 WD |
285 | ); |
286 | ||
287 | return result; | |
288 | } | |
289 | ||
237f1233 | 290 | static inline void atomic64_set(atomic64_t *v, long long i) |
24b44a66 | 291 | { |
237f1233 | 292 | long long tmp; |
24b44a66 | 293 | |
f38d999c | 294 | prefetchw(&v->counter); |
24b44a66 | 295 | __asm__ __volatile__("@ atomic64_set\n" |
398aa668 WD |
296 | "1: ldrexd %0, %H0, [%2]\n" |
297 | " strexd %0, %3, %H3, [%2]\n" | |
24b44a66 WD |
298 | " teq %0, #0\n" |
299 | " bne 1b" | |
398aa668 | 300 | : "=&r" (tmp), "=Qo" (v->counter) |
24b44a66 WD |
301 | : "r" (&v->counter), "r" (i) |
302 | : "cc"); | |
303 | } | |
4fd75911 | 304 | #endif |
24b44a66 | 305 | |
aee9a554 PZ |
306 | #define ATOMIC64_OP(op, op1, op2) \ |
307 | static inline void atomic64_##op(long long i, atomic64_t *v) \ | |
308 | { \ | |
309 | long long result; \ | |
310 | unsigned long tmp; \ | |
311 | \ | |
312 | prefetchw(&v->counter); \ | |
313 | __asm__ __volatile__("@ atomic64_" #op "\n" \ | |
314 | "1: ldrexd %0, %H0, [%3]\n" \ | |
315 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
316 | " " #op2 " %R0, %R0, %R4\n" \ | |
317 | " strexd %1, %0, %H0, [%3]\n" \ | |
318 | " teq %1, #0\n" \ | |
319 | " bne 1b" \ | |
320 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
321 | : "r" (&v->counter), "r" (i) \ | |
322 | : "cc"); \ | |
323 | } \ | |
324 | ||
325 | #define ATOMIC64_OP_RETURN(op, op1, op2) \ | |
0ca326de WD |
326 | static inline long long \ |
327 | atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ | |
aee9a554 PZ |
328 | { \ |
329 | long long result; \ | |
330 | unsigned long tmp; \ | |
331 | \ | |
aee9a554 PZ |
332 | prefetchw(&v->counter); \ |
333 | \ | |
334 | __asm__ __volatile__("@ atomic64_" #op "_return\n" \ | |
335 | "1: ldrexd %0, %H0, [%3]\n" \ | |
336 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
337 | " " #op2 " %R0, %R0, %R4\n" \ | |
338 | " strexd %1, %0, %H0, [%3]\n" \ | |
339 | " teq %1, #0\n" \ | |
340 | " bne 1b" \ | |
341 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
342 | : "r" (&v->counter), "r" (i) \ | |
343 | : "cc"); \ | |
344 | \ | |
aee9a554 | 345 | return result; \ |
24b44a66 WD |
346 | } |
347 | ||
6da068c1 PZ |
348 | #define ATOMIC64_FETCH_OP(op, op1, op2) \ |
349 | static inline long long \ | |
350 | atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \ | |
351 | { \ | |
352 | long long result, val; \ | |
353 | unsigned long tmp; \ | |
354 | \ | |
355 | prefetchw(&v->counter); \ | |
356 | \ | |
357 | __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \ | |
358 | "1: ldrexd %0, %H0, [%4]\n" \ | |
359 | " " #op1 " %Q1, %Q0, %Q5\n" \ | |
360 | " " #op2 " %R1, %R0, %R5\n" \ | |
361 | " strexd %2, %1, %H1, [%4]\n" \ | |
362 | " teq %2, #0\n" \ | |
363 | " bne 1b" \ | |
364 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ | |
365 | : "r" (&v->counter), "r" (i) \ | |
366 | : "cc"); \ | |
367 | \ | |
368 | return result; \ | |
369 | } | |
370 | ||
aee9a554 PZ |
371 | #define ATOMIC64_OPS(op, op1, op2) \ |
372 | ATOMIC64_OP(op, op1, op2) \ | |
6da068c1 PZ |
373 | ATOMIC64_OP_RETURN(op, op1, op2) \ |
374 | ATOMIC64_FETCH_OP(op, op1, op2) | |
24b44a66 | 375 | |
aee9a554 PZ |
376 | ATOMIC64_OPS(add, adds, adc) |
377 | ATOMIC64_OPS(sub, subs, sbc) | |
24b44a66 | 378 | |
0ca326de WD |
379 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed |
380 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | |
6da068c1 PZ |
381 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed |
382 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | |
383 | ||
384 | #undef ATOMIC64_OPS | |
385 | #define ATOMIC64_OPS(op, op1, op2) \ | |
386 | ATOMIC64_OP(op, op1, op2) \ | |
387 | ATOMIC64_FETCH_OP(op, op1, op2) | |
0ca326de | 388 | |
12589790 PZ |
389 | #define atomic64_andnot atomic64_andnot |
390 | ||
6da068c1 PZ |
391 | ATOMIC64_OPS(and, and, and) |
392 | ATOMIC64_OPS(andnot, bic, bic) | |
393 | ATOMIC64_OPS(or, orr, orr) | |
394 | ATOMIC64_OPS(xor, eor, eor) | |
395 | ||
396 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | |
397 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | |
398 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | |
399 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | |
12589790 | 400 | |
aee9a554 | 401 | #undef ATOMIC64_OPS |
6da068c1 | 402 | #undef ATOMIC64_FETCH_OP |
aee9a554 PZ |
403 | #undef ATOMIC64_OP_RETURN |
404 | #undef ATOMIC64_OP | |
24b44a66 | 405 | |
0ca326de WD |
406 | static inline long long |
407 | atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) | |
24b44a66 | 408 | { |
237f1233 | 409 | long long oldval; |
24b44a66 WD |
410 | unsigned long res; |
411 | ||
c32ffce0 | 412 | prefetchw(&ptr->counter); |
24b44a66 WD |
413 | |
414 | do { | |
415 | __asm__ __volatile__("@ atomic64_cmpxchg\n" | |
398aa668 | 416 | "ldrexd %1, %H1, [%3]\n" |
24b44a66 | 417 | "mov %0, #0\n" |
398aa668 WD |
418 | "teq %1, %4\n" |
419 | "teqeq %H1, %H4\n" | |
420 | "strexdeq %0, %5, %H5, [%3]" | |
421 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
24b44a66 WD |
422 | : "r" (&ptr->counter), "r" (old), "r" (new) |
423 | : "cc"); | |
424 | } while (res); | |
425 | ||
24b44a66 WD |
426 | return oldval; |
427 | } | |
0ca326de | 428 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed |
24b44a66 | 429 | |
0ca326de | 430 | static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) |
24b44a66 | 431 | { |
237f1233 | 432 | long long result; |
24b44a66 WD |
433 | unsigned long tmp; |
434 | ||
c32ffce0 | 435 | prefetchw(&ptr->counter); |
24b44a66 WD |
436 | |
437 | __asm__ __volatile__("@ atomic64_xchg\n" | |
398aa668 WD |
438 | "1: ldrexd %0, %H0, [%3]\n" |
439 | " strexd %1, %4, %H4, [%3]\n" | |
24b44a66 WD |
440 | " teq %1, #0\n" |
441 | " bne 1b" | |
398aa668 | 442 | : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) |
24b44a66 WD |
443 | : "r" (&ptr->counter), "r" (new) |
444 | : "cc"); | |
445 | ||
24b44a66 WD |
446 | return result; |
447 | } | |
0ca326de | 448 | #define atomic64_xchg_relaxed atomic64_xchg_relaxed |
24b44a66 | 449 | |
237f1233 | 450 | static inline long long atomic64_dec_if_positive(atomic64_t *v) |
24b44a66 | 451 | { |
237f1233 | 452 | long long result; |
24b44a66 WD |
453 | unsigned long tmp; |
454 | ||
455 | smp_mb(); | |
c32ffce0 | 456 | prefetchw(&v->counter); |
24b44a66 WD |
457 | |
458 | __asm__ __volatile__("@ atomic64_dec_if_positive\n" | |
398aa668 | 459 | "1: ldrexd %0, %H0, [%3]\n" |
2245f924 VK |
460 | " subs %Q0, %Q0, #1\n" |
461 | " sbc %R0, %R0, #0\n" | |
462 | " teq %R0, #0\n" | |
24b44a66 | 463 | " bmi 2f\n" |
398aa668 | 464 | " strexd %1, %0, %H0, [%3]\n" |
24b44a66 WD |
465 | " teq %1, #0\n" |
466 | " bne 1b\n" | |
467 | "2:" | |
398aa668 | 468 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
469 | : "r" (&v->counter) |
470 | : "cc"); | |
471 | ||
472 | smp_mb(); | |
473 | ||
474 | return result; | |
475 | } | |
b3a2a05f | 476 | #define atomic64_dec_if_positive atomic64_dec_if_positive |
24b44a66 | 477 | |
fee8ca9f MR |
478 | static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, |
479 | long long u) | |
24b44a66 | 480 | { |
fee8ca9f | 481 | long long oldval, newval; |
24b44a66 | 482 | unsigned long tmp; |
24b44a66 WD |
483 | |
484 | smp_mb(); | |
c32ffce0 | 485 | prefetchw(&v->counter); |
24b44a66 WD |
486 | |
487 | __asm__ __volatile__("@ atomic64_add_unless\n" | |
398aa668 WD |
488 | "1: ldrexd %0, %H0, [%4]\n" |
489 | " teq %0, %5\n" | |
490 | " teqeq %H0, %H5\n" | |
24b44a66 | 491 | " beq 2f\n" |
fee8ca9f MR |
492 | " adds %Q1, %Q0, %Q6\n" |
493 | " adc %R1, %R0, %R6\n" | |
494 | " strexd %2, %1, %H1, [%4]\n" | |
24b44a66 WD |
495 | " teq %2, #0\n" |
496 | " bne 1b\n" | |
497 | "2:" | |
fee8ca9f | 498 | : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
499 | : "r" (&v->counter), "r" (u), "r" (a) |
500 | : "cc"); | |
501 | ||
fee8ca9f | 502 | if (oldval != u) |
24b44a66 WD |
503 | smp_mb(); |
504 | ||
fee8ca9f | 505 | return oldval; |
24b44a66 | 506 | } |
fee8ca9f | 507 | #define atomic64_fetch_add_unless atomic64_fetch_add_unless |
24b44a66 | 508 | |
7847777a | 509 | #endif /* !CONFIG_GENERIC_ATOMIC64 */ |
1da177e4 LT |
510 | #endif |
511 | #endif |