]>
git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/powerpc/include/asm/atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
6 * PowerPC atomic operations
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
14 #define ATOMIC_INIT(i) { (i) }
17 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19 * on the platform without lwsync.
21 #define __atomic_op_acquire(op, args...) \
23 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
24 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
28 #define __atomic_op_release(op, args...) \
30 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
34 static __inline__
int atomic_read(const atomic_t
*v
)
38 __asm__
__volatile__("lwz%U1%X1 %0,%1" : "=r"(t
) : "m"(v
->counter
));
43 static __inline__
void atomic_set(atomic_t
*v
, int i
)
45 __asm__
__volatile__("stw%U0%X0 %1,%0" : "=m"(v
->counter
) : "r"(i
));
48 #define ATOMIC_OP(op, asm_op) \
49 static __inline__ void atomic_##op(int a, atomic_t *v) \
53 __asm__ __volatile__( \
54 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
55 #asm_op " %0,%2,%0\n" \
57 " stwcx. %0,0,%3 \n" \
59 : "=&r" (t), "+m" (v->counter) \
60 : "r" (a), "r" (&v->counter) \
64 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
65 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
69 __asm__ __volatile__( \
70 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
71 #asm_op " %0,%2,%0\n" \
75 : "=&r" (t), "+m" (v->counter) \
76 : "r" (a), "r" (&v->counter) \
82 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
83 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
87 __asm__ __volatile__( \
88 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
89 #asm_op " %1,%3,%0\n" \
93 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
94 : "r" (a), "r" (&v->counter) \
100 #define ATOMIC_OPS(op, asm_op) \
101 ATOMIC_OP(op, asm_op) \
102 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
103 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
106 ATOMIC_OPS(sub
, subf
)
108 #define atomic_add_return_relaxed atomic_add_return_relaxed
109 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
111 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
112 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
115 #define ATOMIC_OPS(op, asm_op) \
116 ATOMIC_OP(op, asm_op) \
117 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
123 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
124 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
125 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
128 #undef ATOMIC_FETCH_OP_RELAXED
129 #undef ATOMIC_OP_RETURN_RELAXED
132 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
134 static __inline__
void atomic_inc(atomic_t
*v
)
138 __asm__
__volatile__(
139 "1: lwarx %0,0,%2 # atomic_inc\n\
144 : "=&r" (t
), "+m" (v
->counter
)
149 static __inline__
int atomic_inc_return_relaxed(atomic_t
*v
)
153 __asm__
__volatile__(
154 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
159 : "=&r" (t
), "+m" (v
->counter
)
167 * atomic_inc_and_test - increment and test
168 * @v: pointer of type atomic_t
170 * Atomically increments @v by 1
171 * and returns true if the result is zero, or false for all
174 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
176 static __inline__
void atomic_dec(atomic_t
*v
)
180 __asm__
__volatile__(
181 "1: lwarx %0,0,%2 # atomic_dec\n\
186 : "=&r" (t
), "+m" (v
->counter
)
191 static __inline__
int atomic_dec_return_relaxed(atomic_t
*v
)
195 __asm__
__volatile__(
196 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
201 : "=&r" (t
), "+m" (v
->counter
)
208 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
209 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
211 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
212 #define atomic_cmpxchg_relaxed(v, o, n) \
213 cmpxchg_relaxed(&((v)->counter), (o), (n))
214 #define atomic_cmpxchg_acquire(v, o, n) \
215 cmpxchg_acquire(&((v)->counter), (o), (n))
217 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
218 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
221 * __atomic_add_unless - add unless the number is a given value
222 * @v: pointer of type atomic_t
223 * @a: the amount to add to v...
224 * @u: ...unless v is equal to u.
226 * Atomically adds @a to @v, so long as it was not @u.
227 * Returns the old value of @v.
229 static __inline__
int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
233 __asm__
__volatile__ (
234 PPC_ATOMIC_ENTRY_BARRIER
235 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
242 PPC_ATOMIC_EXIT_BARRIER
246 : "r" (&v
->counter
), "r" (a
), "r" (u
)
253 * atomic_inc_not_zero - increment unless the number is zero
254 * @v: pointer of type atomic_t
256 * Atomically increments @v by 1, so long as @v is non-zero.
257 * Returns non-zero if @v was non-zero, and zero otherwise.
259 static __inline__
int atomic_inc_not_zero(atomic_t
*v
)
263 __asm__
__volatile__ (
264 PPC_ATOMIC_ENTRY_BARRIER
265 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
272 PPC_ATOMIC_EXIT_BARRIER
275 : "=&r" (t1
), "=&r" (t2
)
277 : "cc", "xer", "memory");
281 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
283 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
284 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
287 * Atomically test *v and decrement if it is greater than 0.
288 * The function returns the old value of *v minus 1, even if
289 * the atomic variable, v, was not decremented.
291 static __inline__
int atomic_dec_if_positive(atomic_t
*v
)
295 __asm__
__volatile__(
296 PPC_ATOMIC_ENTRY_BARRIER
297 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
304 PPC_ATOMIC_EXIT_BARRIER
312 #define atomic_dec_if_positive atomic_dec_if_positive
316 #define ATOMIC64_INIT(i) { (i) }
318 static __inline__
long atomic64_read(const atomic64_t
*v
)
322 __asm__
__volatile__("ld%U1%X1 %0,%1" : "=r"(t
) : "m"(v
->counter
));
327 static __inline__
void atomic64_set(atomic64_t
*v
, long i
)
329 __asm__
__volatile__("std%U0%X0 %1,%0" : "=m"(v
->counter
) : "r"(i
));
332 #define ATOMIC64_OP(op, asm_op) \
333 static __inline__ void atomic64_##op(long a, atomic64_t *v) \
337 __asm__ __volatile__( \
338 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
339 #asm_op " %0,%2,%0\n" \
340 " stdcx. %0,0,%3 \n" \
342 : "=&r" (t), "+m" (v->counter) \
343 : "r" (a), "r" (&v->counter) \
347 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
349 atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
353 __asm__ __volatile__( \
354 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
355 #asm_op " %0,%2,%0\n" \
356 " stdcx. %0,0,%3\n" \
358 : "=&r" (t), "+m" (v->counter) \
359 : "r" (a), "r" (&v->counter) \
365 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
367 atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
371 __asm__ __volatile__( \
372 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
373 #asm_op " %1,%3,%0\n" \
374 " stdcx. %1,0,%4\n" \
376 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
377 : "r" (a), "r" (&v->counter) \
383 #define ATOMIC64_OPS(op, asm_op) \
384 ATOMIC64_OP(op, asm_op) \
385 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
386 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
388 ATOMIC64_OPS(add
, add
)
389 ATOMIC64_OPS(sub
, subf
)
391 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
392 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
394 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
395 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
398 #define ATOMIC64_OPS(op, asm_op) \
399 ATOMIC64_OP(op, asm_op) \
400 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
402 ATOMIC64_OPS(and, and)
404 ATOMIC64_OPS(xor, xor)
406 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
407 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
408 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
411 #undef ATOMIC64_FETCH_OP_RELAXED
412 #undef ATOMIC64_OP_RETURN_RELAXED
415 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
417 static __inline__
void atomic64_inc(atomic64_t
*v
)
421 __asm__
__volatile__(
422 "1: ldarx %0,0,%2 # atomic64_inc\n\
426 : "=&r" (t
), "+m" (v
->counter
)
431 static __inline__
long atomic64_inc_return_relaxed(atomic64_t
*v
)
435 __asm__
__volatile__(
436 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
440 : "=&r" (t
), "+m" (v
->counter
)
448 * atomic64_inc_and_test - increment and test
449 * @v: pointer of type atomic64_t
451 * Atomically increments @v by 1
452 * and returns true if the result is zero, or false for all
455 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
457 static __inline__
void atomic64_dec(atomic64_t
*v
)
461 __asm__
__volatile__(
462 "1: ldarx %0,0,%2 # atomic64_dec\n\
466 : "=&r" (t
), "+m" (v
->counter
)
471 static __inline__
long atomic64_dec_return_relaxed(atomic64_t
*v
)
475 __asm__
__volatile__(
476 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
480 : "=&r" (t
), "+m" (v
->counter
)
487 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
488 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
490 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
491 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
494 * Atomically test *v and decrement if it is greater than 0.
495 * The function returns the old value of *v minus 1.
497 static __inline__
long atomic64_dec_if_positive(atomic64_t
*v
)
501 __asm__
__volatile__(
502 PPC_ATOMIC_ENTRY_BARRIER
503 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
508 PPC_ATOMIC_EXIT_BARRIER
512 : "cc", "xer", "memory");
517 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
518 #define atomic64_cmpxchg_relaxed(v, o, n) \
519 cmpxchg_relaxed(&((v)->counter), (o), (n))
520 #define atomic64_cmpxchg_acquire(v, o, n) \
521 cmpxchg_acquire(&((v)->counter), (o), (n))
523 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
524 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
527 * atomic64_add_unless - add unless the number is a given value
528 * @v: pointer of type atomic64_t
529 * @a: the amount to add to v...
530 * @u: ...unless v is equal to u.
532 * Atomically adds @a to @v, so long as it was not @u.
533 * Returns the old value of @v.
535 static __inline__
int atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
539 __asm__
__volatile__ (
540 PPC_ATOMIC_ENTRY_BARRIER
541 "1: ldarx %0,0,%1 # __atomic_add_unless\n\
547 PPC_ATOMIC_EXIT_BARRIER
551 : "r" (&v
->counter
), "r" (a
), "r" (u
)
558 * atomic_inc64_not_zero - increment unless the number is zero
559 * @v: pointer of type atomic64_t
561 * Atomically increments @v by 1, so long as @v is non-zero.
562 * Returns non-zero if @v was non-zero, and zero otherwise.
564 static __inline__
int atomic64_inc_not_zero(atomic64_t
*v
)
568 __asm__
__volatile__ (
569 PPC_ATOMIC_ENTRY_BARRIER
570 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
576 PPC_ATOMIC_EXIT_BARRIER
579 : "=&r" (t1
), "=&r" (t2
)
581 : "cc", "xer", "memory");
586 #endif /* __powerpc64__ */
588 #endif /* __KERNEL__ */
589 #endif /* _ASM_POWERPC_ATOMIC_H_ */