]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - arch/mips/include/asm/atomic.h
MIPS: Fix a R10000_LLSC_WAR logic in atomic.h
[mirror_ubuntu-disco-kernel.git] / arch / mips / include / asm / atomic.h
CommitLineData
1da177e4 1/*
edf7b938 2 * Atomic operations that C can't guarantee us. Useful for
1da177e4
LT
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
e303e088 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
1da177e4 13 */
1da177e4
LT
14#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
192ef366 17#include <linux/irqflags.h>
ea435467 18#include <linux/types.h>
0004a9df 19#include <asm/barrier.h>
b0984c43 20#include <asm/compiler.h>
1da177e4 21#include <asm/cpu-features.h>
b81947c6 22#include <asm/cmpxchg.h>
1da177e4
LT
23#include <asm/war.h>
24
4936084c
JK
25/*
26 * Using a branch-likely instruction to check the result of an sc instruction
27 * works around a bug present in R10000 CPUs prior to revision 3.0 that could
28 * cause ll-sc sequences to execute non-atomically.
29 */
30#if R10000_LLSC_WAR
31# define __scbeqz "beqzl"
32#else
33# define __scbeqz "beqz"
34#endif
35
70342287 36#define ATOMIC_INIT(i) { (i) }
1da177e4
LT
37
38/*
39 * atomic_read - read atomic variable
40 * @v: pointer of type atomic_t
41 *
42 * Atomically reads the value of @v.
43 */
62e8a325 44#define atomic_read(v) READ_ONCE((v)->counter)
1da177e4
LT
45
46/*
47 * atomic_set - set atomic variable
48 * @v: pointer of type atomic_t
49 * @i: required value
50 *
51 * Atomically sets the value of @v to @i.
52 */
62e8a325 53#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
1da177e4 54
ddb3108e
MR
55#define ATOMIC_OP(op, c_op, asm_op) \
56static __inline__ void atomic_##op(int i, atomic_t * v) \
57{ \
4936084c 58 if (kernel_uses_llsc) { \
ddb3108e
MR
59 int temp; \
60 \
61 __asm__ __volatile__( \
378ed6f0 62 " .set push \n" \
4936084c 63 " .set "MIPS_ISA_LEVEL" \n" \
ddb3108e
MR
64 "1: ll %0, %1 # atomic_" #op " \n" \
65 " " #asm_op " %0, %2 \n" \
66 " sc %0, %1 \n" \
4936084c 67 "\t" __scbeqz " %0, 1b \n" \
378ed6f0 68 " .set pop \n" \
94bfb75a 69 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
ddb3108e 70 : "Ir" (i)); \
ddb3108e
MR
71 } else { \
72 unsigned long flags; \
73 \
74 raw_local_irq_save(flags); \
75 v->counter c_op i; \
76 raw_local_irq_restore(flags); \
77 } \
1da177e4
LT
78}
79
ddb3108e 80#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
4ec45856 81static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
ddb3108e
MR
82{ \
83 int result; \
84 \
4936084c 85 if (kernel_uses_llsc) { \
ddb3108e
MR
86 int temp; \
87 \
88 __asm__ __volatile__( \
378ed6f0 89 " .set push \n" \
4936084c 90 " .set "MIPS_ISA_LEVEL" \n" \
ddb3108e
MR
91 "1: ll %1, %2 # atomic_" #op "_return \n" \
92 " " #asm_op " %0, %1, %3 \n" \
93 " sc %0, %2 \n" \
4936084c 94 "\t" __scbeqz " %0, 1b \n" \
ddb3108e 95 " " #asm_op " %0, %1, %3 \n" \
378ed6f0 96 " .set pop \n" \
ddb3108e 97 : "=&r" (result), "=&r" (temp), \
94bfb75a 98 "+" GCC_OFF_SMALL_ASM() (v->counter) \
ddb3108e 99 : "Ir" (i)); \
ddb3108e
MR
100 } else { \
101 unsigned long flags; \
102 \
103 raw_local_irq_save(flags); \
104 result = v->counter; \
105 result c_op i; \
106 v->counter = result; \
107 raw_local_irq_restore(flags); \
108 } \
109 \
ddb3108e
MR
110 return result; \
111}
112
4edac529 113#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
4ec45856 114static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
4edac529
PZ
115{ \
116 int result; \
117 \
4936084c 118 if (kernel_uses_llsc) { \
4edac529
PZ
119 int temp; \
120 \
121 __asm__ __volatile__( \
378ed6f0 122 " .set push \n" \
4936084c 123 " .set "MIPS_ISA_LEVEL" \n" \
4edac529
PZ
124 "1: ll %1, %2 # atomic_fetch_" #op " \n" \
125 " " #asm_op " %0, %1, %3 \n" \
126 " sc %0, %2 \n" \
4936084c 127 "\t" __scbeqz " %0, 1b \n" \
378ed6f0 128 " .set pop \n" \
cfd54de3 129 " move %0, %1 \n" \
4edac529
PZ
130 : "=&r" (result), "=&r" (temp), \
131 "+" GCC_OFF_SMALL_ASM() (v->counter) \
132 : "Ir" (i)); \
4edac529
PZ
133 } else { \
134 unsigned long flags; \
135 \
136 raw_local_irq_save(flags); \
137 result = v->counter; \
138 v->counter c_op i; \
139 raw_local_irq_restore(flags); \
140 } \
141 \
4edac529
PZ
142 return result; \
143}
144
ddb3108e
MR
145#define ATOMIC_OPS(op, c_op, asm_op) \
146 ATOMIC_OP(op, c_op, asm_op) \
4edac529
PZ
147 ATOMIC_OP_RETURN(op, c_op, asm_op) \
148 ATOMIC_FETCH_OP(op, c_op, asm_op)
1da177e4 149
ef31563e
PZ
150ATOMIC_OPS(add, +=, addu)
151ATOMIC_OPS(sub, -=, subu)
1da177e4 152
4ec45856
PZ
153#define atomic_add_return_relaxed atomic_add_return_relaxed
154#define atomic_sub_return_relaxed atomic_sub_return_relaxed
155#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
156#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
157
4edac529
PZ
158#undef ATOMIC_OPS
159#define ATOMIC_OPS(op, c_op, asm_op) \
160 ATOMIC_OP(op, c_op, asm_op) \
161 ATOMIC_FETCH_OP(op, c_op, asm_op)
162
4edac529
PZ
163ATOMIC_OPS(and, &=, and)
164ATOMIC_OPS(or, |=, or)
165ATOMIC_OPS(xor, ^=, xor)
27782f27 166
4ec45856
PZ
167#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
168#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
169#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
170
ef31563e 171#undef ATOMIC_OPS
4edac529 172#undef ATOMIC_FETCH_OP
ef31563e
PZ
173#undef ATOMIC_OP_RETURN
174#undef ATOMIC_OP
1da177e4
LT
175
176/*
f10d14dd
AG
177 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
178 * @i: integer value to subtract
1da177e4
LT
179 * @v: pointer of type atomic_t
180 *
f10d14dd
AG
181 * Atomically test @v and subtract @i if @v is greater or equal than @i.
182 * The function returns the old value of @v minus @i.
1da177e4
LT
183 */
184static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
185{
915ec1e2 186 int result;
1da177e4 187
f252ffd5 188 smp_mb__before_llsc();
0004a9df 189
4936084c 190 if (kernel_uses_llsc) {
915ec1e2 191 int temp;
1da177e4
LT
192
193 __asm__ __volatile__(
378ed6f0 194 " .set push \n"
0038df22 195 " .set "MIPS_ISA_LEVEL" \n"
1da177e4 196 "1: ll %1, %2 # atomic_sub_if_positive\n"
378ed6f0 197 " .set pop \n"
1da177e4 198 " subu %0, %1, %3 \n"
a0a5ac3c 199 " move %1, %0 \n"
1da177e4 200 " bltz %0, 1f \n"
378ed6f0 201 " .set push \n"
cfd54de3 202 " .set "MIPS_ISA_LEVEL" \n"
a0a5ac3c 203 " sc %1, %2 \n"
4936084c 204 "\t" __scbeqz " %1, 1b \n"
50952026 205 "1: \n"
378ed6f0 206 " .set pop \n"
b0984c43 207 : "=&r" (result), "=&r" (temp),
94bfb75a 208 "+" GCC_OFF_SMALL_ASM() (v->counter)
b4f2a17b 209 : "Ir" (i));
1da177e4
LT
210 } else {
211 unsigned long flags;
212
49edd098 213 raw_local_irq_save(flags);
1da177e4
LT
214 result = v->counter;
215 result -= i;
216 if (result >= 0)
217 v->counter = result;
49edd098 218 raw_local_irq_restore(flags);
1da177e4
LT
219 }
220
17099b11 221 smp_llsc_mb();
0004a9df 222
1da177e4
LT
223 return result;
224}
225
e12f644b
MD
226#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
227#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
4a6dae6d 228
1da177e4
LT
229/*
230 * atomic_dec_if_positive - decrement by 1 if old value positive
231 * @v: pointer of type atomic_t
232 */
233#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
234
875d43e7 235#ifdef CONFIG_64BIT
1da177e4 236
1da177e4
LT
237#define ATOMIC64_INIT(i) { (i) }
238
239/*
240 * atomic64_read - read atomic variable
241 * @v: pointer of type atomic64_t
242 *
243 */
62e8a325 244#define atomic64_read(v) READ_ONCE((v)->counter)
1da177e4
LT
245
246/*
247 * atomic64_set - set atomic variable
248 * @v: pointer of type atomic64_t
249 * @i: required value
250 */
62e8a325 251#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
1da177e4 252
ddb3108e
MR
253#define ATOMIC64_OP(op, c_op, asm_op) \
254static __inline__ void atomic64_##op(long i, atomic64_t * v) \
255{ \
4936084c 256 if (kernel_uses_llsc) { \
ddb3108e
MR
257 long temp; \
258 \
259 __asm__ __volatile__( \
378ed6f0 260 " .set push \n" \
4936084c 261 " .set "MIPS_ISA_LEVEL" \n" \
ddb3108e
MR
262 "1: lld %0, %1 # atomic64_" #op " \n" \
263 " " #asm_op " %0, %2 \n" \
264 " scd %0, %1 \n" \
4936084c 265 "\t" __scbeqz " %0, 1b \n" \
378ed6f0 266 " .set pop \n" \
94bfb75a 267 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
ddb3108e 268 : "Ir" (i)); \
ddb3108e
MR
269 } else { \
270 unsigned long flags; \
271 \
272 raw_local_irq_save(flags); \
273 v->counter c_op i; \
274 raw_local_irq_restore(flags); \
275 } \
276}
277
278#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
4ec45856 279static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
ddb3108e
MR
280{ \
281 long result; \
282 \
4936084c 283 if (kernel_uses_llsc) { \
ddb3108e
MR
284 long temp; \
285 \
286 __asm__ __volatile__( \
378ed6f0 287 " .set push \n" \
4936084c 288 " .set "MIPS_ISA_LEVEL" \n" \
ddb3108e
MR
289 "1: lld %1, %2 # atomic64_" #op "_return\n" \
290 " " #asm_op " %0, %1, %3 \n" \
291 " scd %0, %2 \n" \
4936084c 292 "\t" __scbeqz " %0, 1b \n" \
ddb3108e 293 " " #asm_op " %0, %1, %3 \n" \
378ed6f0 294 " .set pop \n" \
ddb3108e 295 : "=&r" (result), "=&r" (temp), \
94bfb75a 296 "+" GCC_OFF_SMALL_ASM() (v->counter) \
ddb3108e 297 : "Ir" (i)); \
ddb3108e
MR
298 } else { \
299 unsigned long flags; \
300 \
301 raw_local_irq_save(flags); \
302 result = v->counter; \
303 result c_op i; \
304 v->counter = result; \
305 raw_local_irq_restore(flags); \
306 } \
307 \
ddb3108e 308 return result; \
1da177e4
LT
309}
310
4edac529 311#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
4ec45856 312static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
4edac529
PZ
313{ \
314 long result; \
315 \
db1ce3f5 316 if (kernel_uses_llsc) { \
4edac529
PZ
317 long temp; \
318 \
319 __asm__ __volatile__( \
378ed6f0 320 " .set push \n" \
4936084c 321 " .set "MIPS_ISA_LEVEL" \n" \
4edac529
PZ
322 "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
323 " " #asm_op " %0, %1, %3 \n" \
324 " scd %0, %2 \n" \
4936084c 325 "\t" __scbeqz " %0, 1b \n" \
4edac529 326 " move %0, %1 \n" \
378ed6f0 327 " .set pop \n" \
4edac529
PZ
328 : "=&r" (result), "=&r" (temp), \
329 "+" GCC_OFF_SMALL_ASM() (v->counter) \
330 : "Ir" (i)); \
4edac529
PZ
331 } else { \
332 unsigned long flags; \
333 \
334 raw_local_irq_save(flags); \
335 result = v->counter; \
336 v->counter c_op i; \
337 raw_local_irq_restore(flags); \
338 } \
339 \
4edac529
PZ
340 return result; \
341}
342
ddb3108e
MR
343#define ATOMIC64_OPS(op, c_op, asm_op) \
344 ATOMIC64_OP(op, c_op, asm_op) \
4edac529
PZ
345 ATOMIC64_OP_RETURN(op, c_op, asm_op) \
346 ATOMIC64_FETCH_OP(op, c_op, asm_op)
1da177e4 347
ef31563e
PZ
348ATOMIC64_OPS(add, +=, daddu)
349ATOMIC64_OPS(sub, -=, dsubu)
1da177e4 350
4ec45856
PZ
351#define atomic64_add_return_relaxed atomic64_add_return_relaxed
352#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
353#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
354#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
355
ef31563e 356#undef ATOMIC64_OPS
4edac529
PZ
357#define ATOMIC64_OPS(op, c_op, asm_op) \
358 ATOMIC64_OP(op, c_op, asm_op) \
359 ATOMIC64_FETCH_OP(op, c_op, asm_op)
360
361ATOMIC64_OPS(and, &=, and)
362ATOMIC64_OPS(or, |=, or)
363ATOMIC64_OPS(xor, ^=, xor)
364
4ec45856
PZ
365#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
366#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
367#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
368
4edac529
PZ
369#undef ATOMIC64_OPS
370#undef ATOMIC64_FETCH_OP
ef31563e
PZ
371#undef ATOMIC64_OP_RETURN
372#undef ATOMIC64_OP
1da177e4
LT
373
374/*
ddb3108e
MR
375 * atomic64_sub_if_positive - conditionally subtract integer from atomic
376 * variable
f10d14dd 377 * @i: integer value to subtract
1da177e4
LT
378 * @v: pointer of type atomic64_t
379 *
f10d14dd
AG
380 * Atomically test @v and subtract @i if @v is greater or equal than @i.
381 * The function returns the old value of @v minus @i.
1da177e4
LT
382 */
383static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
384{
915ec1e2 385 long result;
1da177e4 386
f252ffd5 387 smp_mb__before_llsc();
0004a9df 388
4936084c 389 if (kernel_uses_llsc) {
915ec1e2 390 long temp;
1da177e4
LT
391
392 __asm__ __volatile__(
378ed6f0 393 " .set push \n"
0038df22 394 " .set "MIPS_ISA_LEVEL" \n"
1da177e4
LT
395 "1: lld %1, %2 # atomic64_sub_if_positive\n"
396 " dsubu %0, %1, %3 \n"
a0a5ac3c 397 " move %1, %0 \n"
1da177e4 398 " bltz %0, 1f \n"
a0a5ac3c 399 " scd %1, %2 \n"
4936084c 400 "\t" __scbeqz " %1, 1b \n"
50952026 401 "1: \n"
378ed6f0 402 " .set pop \n"
b0984c43 403 : "=&r" (result), "=&r" (temp),
94bfb75a 404 "+" GCC_OFF_SMALL_ASM() (v->counter)
b4f2a17b 405 : "Ir" (i));
1da177e4
LT
406 } else {
407 unsigned long flags;
408
49edd098 409 raw_local_irq_save(flags);
1da177e4
LT
410 result = v->counter;
411 result -= i;
412 if (result >= 0)
413 v->counter = result;
49edd098 414 raw_local_irq_restore(flags);
1da177e4
LT
415 }
416
17099b11 417 smp_llsc_mb();
0004a9df 418
1da177e4
LT
419 return result;
420}
421
e12f644b 422#define atomic64_cmpxchg(v, o, n) \
7b239bb1 423 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
e12f644b
MD
424#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
425
1da177e4
LT
426/*
427 * atomic64_dec_if_positive - decrement by 1 if old value positive
428 * @v: pointer of type atomic64_t
429 */
430#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
431
875d43e7 432#endif /* CONFIG_64BIT */
1da177e4 433
1da177e4 434#endif /* _ASM_ATOMIC_H */