]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/alpha/include/asm/atomic.h
UBUNTU: Ubuntu-5.4.0-117.132
[mirror_ubuntu-focal-kernel.git] / arch / alpha / include / asm / atomic.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _ALPHA_ATOMIC_H
3#define _ALPHA_ATOMIC_H
4
ea435467 5#include <linux/types.h>
0db9ae4a 6#include <asm/barrier.h>
5ba840f9 7#include <asm/cmpxchg.h>
0db9ae4a 8
1da177e4
LT
9/*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc...
12 *
13 * But use these as seldom as possible since they are much slower
14 * than regular operations.
15 */
16
5a8897cc
WD
17/*
18 * To ensure dependency ordering is preserved for the _relaxed and
19 * _release atomics, an smp_read_barrier_depends() is unconditionally
20 * inserted into the _relaxed variants, which are used to build the
fd2efaa4
MR
21 * barriered versions. Avoid redundant back-to-back fences in the
22 * _acquire and _fence versions.
5a8897cc 23 */
fd2efaa4
MR
24#define __atomic_acquire_fence()
25#define __atomic_post_full_fence()
1da177e4 26
67a806d9
MG
27#define ATOMIC_INIT(i) { (i) }
28#define ATOMIC64_INIT(i) { (i) }
1da177e4 29
62e8a325
PZ
30#define atomic_read(v) READ_ONCE((v)->counter)
31#define atomic64_read(v) READ_ONCE((v)->counter)
1da177e4 32
62e8a325
PZ
33#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
34#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
1da177e4
LT
35
36/*
37 * To get proper branch prediction for the main line, we must branch
38 * forward to code at the end of this object's .text section, then
39 * branch back to restart the operation.
40 */
41
212d3be1 42#define ATOMIC_OP(op, asm_op) \
b93c7b8c
PZ
43static __inline__ void atomic_##op(int i, atomic_t * v) \
44{ \
45 unsigned long temp; \
46 __asm__ __volatile__( \
47 "1: ldl_l %0,%1\n" \
212d3be1 48 " " #asm_op " %0,%2,%0\n" \
b93c7b8c
PZ
49 " stl_c %0,%1\n" \
50 " beq %0,2f\n" \
51 ".subsection 2\n" \
52 "2: br 1b\n" \
53 ".previous" \
54 :"=&r" (temp), "=m" (v->counter) \
55 :"Ir" (i), "m" (v->counter)); \
56} \
57
212d3be1 58#define ATOMIC_OP_RETURN(op, asm_op) \
fe14d2f1 59static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
b93c7b8c
PZ
60{ \
61 long temp, result; \
b93c7b8c
PZ
62 __asm__ __volatile__( \
63 "1: ldl_l %0,%1\n" \
212d3be1
PZ
64 " " #asm_op " %0,%3,%2\n" \
65 " " #asm_op " %0,%3,%0\n" \
b93c7b8c
PZ
66 " stl_c %0,%1\n" \
67 " beq %0,2f\n" \
68 ".subsection 2\n" \
69 "2: br 1b\n" \
70 ".previous" \
71 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
72 :"Ir" (i), "m" (v->counter) : "memory"); \
5a8897cc 73 smp_read_barrier_depends(); \
b93c7b8c 74 return result; \
1da177e4
LT
75}
76
1f51dee7 77#define ATOMIC_FETCH_OP(op, asm_op) \
fe14d2f1 78static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
1f51dee7
PZ
79{ \
80 long temp, result; \
1f51dee7
PZ
81 __asm__ __volatile__( \
82 "1: ldl_l %2,%1\n" \
83 " " #asm_op " %2,%3,%0\n" \
84 " stl_c %0,%1\n" \
85 " beq %0,2f\n" \
86 ".subsection 2\n" \
87 "2: br 1b\n" \
88 ".previous" \
89 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
90 :"Ir" (i), "m" (v->counter) : "memory"); \
5a8897cc 91 smp_read_barrier_depends(); \
1f51dee7
PZ
92 return result; \
93}
94
212d3be1 95#define ATOMIC64_OP(op, asm_op) \
0203fdc1 96static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
b93c7b8c 97{ \
0203fdc1 98 s64 temp; \
b93c7b8c
PZ
99 __asm__ __volatile__( \
100 "1: ldq_l %0,%1\n" \
212d3be1 101 " " #asm_op " %0,%2,%0\n" \
b93c7b8c
PZ
102 " stq_c %0,%1\n" \
103 " beq %0,2f\n" \
104 ".subsection 2\n" \
105 "2: br 1b\n" \
106 ".previous" \
107 :"=&r" (temp), "=m" (v->counter) \
108 :"Ir" (i), "m" (v->counter)); \
109} \
110
212d3be1 111#define ATOMIC64_OP_RETURN(op, asm_op) \
0203fdc1 112static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
b93c7b8c 113{ \
0203fdc1 114 s64 temp, result; \
b93c7b8c
PZ
115 __asm__ __volatile__( \
116 "1: ldq_l %0,%1\n" \
212d3be1
PZ
117 " " #asm_op " %0,%3,%2\n" \
118 " " #asm_op " %0,%3,%0\n" \
b93c7b8c
PZ
119 " stq_c %0,%1\n" \
120 " beq %0,2f\n" \
121 ".subsection 2\n" \
122 "2: br 1b\n" \
123 ".previous" \
124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
125 :"Ir" (i), "m" (v->counter) : "memory"); \
5a8897cc 126 smp_read_barrier_depends(); \
b93c7b8c 127 return result; \
1da177e4
LT
128}
129
1f51dee7 130#define ATOMIC64_FETCH_OP(op, asm_op) \
0203fdc1 131static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
1f51dee7 132{ \
0203fdc1 133 s64 temp, result; \
1f51dee7
PZ
134 __asm__ __volatile__( \
135 "1: ldq_l %2,%1\n" \
136 " " #asm_op " %2,%3,%0\n" \
137 " stq_c %0,%1\n" \
138 " beq %0,2f\n" \
139 ".subsection 2\n" \
140 "2: br 1b\n" \
141 ".previous" \
142 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
143 :"Ir" (i), "m" (v->counter) : "memory"); \
5a8897cc 144 smp_read_barrier_depends(); \
1f51dee7
PZ
145 return result; \
146}
147
212d3be1
PZ
148#define ATOMIC_OPS(op) \
149 ATOMIC_OP(op, op##l) \
150 ATOMIC_OP_RETURN(op, op##l) \
1f51dee7 151 ATOMIC_FETCH_OP(op, op##l) \
212d3be1 152 ATOMIC64_OP(op, op##q) \
1f51dee7
PZ
153 ATOMIC64_OP_RETURN(op, op##q) \
154 ATOMIC64_FETCH_OP(op, op##q)
1da177e4 155
b93c7b8c
PZ
156ATOMIC_OPS(add)
157ATOMIC_OPS(sub)
1da177e4 158
fe14d2f1
PZ
159#define atomic_add_return_relaxed atomic_add_return_relaxed
160#define atomic_sub_return_relaxed atomic_sub_return_relaxed
161#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
162#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
163
164#define atomic64_add_return_relaxed atomic64_add_return_relaxed
165#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
166#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
167#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
168
212d3be1
PZ
169#define atomic_andnot atomic_andnot
170#define atomic64_andnot atomic64_andnot
171
1f51dee7
PZ
172#undef ATOMIC_OPS
173#define ATOMIC_OPS(op, asm) \
174 ATOMIC_OP(op, asm) \
175 ATOMIC_FETCH_OP(op, asm) \
176 ATOMIC64_OP(op, asm) \
177 ATOMIC64_FETCH_OP(op, asm)
178
179ATOMIC_OPS(and, and)
180ATOMIC_OPS(andnot, bic)
181ATOMIC_OPS(or, bis)
182ATOMIC_OPS(xor, xor)
212d3be1 183
fe14d2f1
PZ
184#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
185#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
186#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
187#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
188
189#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
190#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
191#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
192#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
193
b93c7b8c 194#undef ATOMIC_OPS
1f51dee7 195#undef ATOMIC64_FETCH_OP
b93c7b8c
PZ
196#undef ATOMIC64_OP_RETURN
197#undef ATOMIC64_OP
1f51dee7 198#undef ATOMIC_FETCH_OP
b93c7b8c
PZ
199#undef ATOMIC_OP_RETURN
200#undef ATOMIC_OP
1da177e4 201
e96e6994
MD
202#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
203#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
204
205#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
ffbf670f 206#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4a6dae6d 207
e96e6994 208/**
bfc18e38 209 * atomic_fetch_add_unless - add unless the number is a given value
e96e6994
MD
210 * @v: pointer of type atomic_t
211 * @a: the amount to add to v...
212 * @u: ...unless v is equal to u.
213 *
214 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 215 * Returns the old value of @v.
e96e6994 216 */
bfc18e38 217static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
2856f5e3 218{
6da75397
RH
219 int c, new, old;
220 smp_mb();
221 __asm__ __volatile__(
222 "1: ldl_l %[old],%[mem]\n"
223 " cmpeq %[old],%[u],%[c]\n"
224 " addl %[old],%[a],%[new]\n"
225 " bne %[c],2f\n"
226 " stl_c %[new],%[mem]\n"
227 " beq %[new],3f\n"
228 "2:\n"
229 ".subsection 2\n"
230 "3: br 1b\n"
231 ".previous"
232 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
233 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
234 : "memory");
235 smp_mb();
236 return old;
2856f5e3 237}
eccc2da8 238#define atomic_fetch_add_unless atomic_fetch_add_unless
8426e1f6 239
e96e6994 240/**
434b6acc 241 * atomic64_fetch_add_unless - add unless the number is a given value
e96e6994
MD
242 * @v: pointer of type atomic64_t
243 * @a: the amount to add to v...
244 * @u: ...unless v is equal to u.
245 *
246 * Atomically adds @a to @v, so long as it was not @u.
434b6acc 247 * Returns the old value of @v.
e96e6994 248 */
0203fdc1 249static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
2856f5e3 250{
0203fdc1 251 s64 c, new, old;
6da75397
RH
252 smp_mb();
253 __asm__ __volatile__(
434b6acc
MR
254 "1: ldq_l %[old],%[mem]\n"
255 " cmpeq %[old],%[u],%[c]\n"
256 " addq %[old],%[a],%[new]\n"
6da75397 257 " bne %[c],2f\n"
434b6acc
MR
258 " stq_c %[new],%[mem]\n"
259 " beq %[new],3f\n"
6da75397
RH
260 "2:\n"
261 ".subsection 2\n"
262 "3: br 1b\n"
263 ".previous"
434b6acc 264 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
6da75397
RH
265 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
266 : "memory");
267 smp_mb();
434b6acc 268 return old;
2856f5e3 269}
434b6acc 270#define atomic64_fetch_add_unless atomic64_fetch_add_unless
2856f5e3 271
748a76b5
RH
272/*
273 * atomic64_dec_if_positive - decrement by 1 if old value positive
274 * @v: pointer of type atomic_t
275 *
276 * The function returns the old value of *v minus 1, even if
277 * the atomic variable, v, was not decremented.
278 */
0203fdc1 279static inline s64 atomic64_dec_if_positive(atomic64_t *v)
748a76b5 280{
0203fdc1 281 s64 old, tmp;
748a76b5
RH
282 smp_mb();
283 __asm__ __volatile__(
284 "1: ldq_l %[old],%[mem]\n"
285 " subq %[old],1,%[tmp]\n"
286 " ble %[old],2f\n"
287 " stq_c %[tmp],%[mem]\n"
288 " beq %[tmp],3f\n"
289 "2:\n"
290 ".subsection 2\n"
291 "3: br 1b\n"
292 ".previous"
293 : [old] "=&r"(old), [tmp] "=&r"(tmp)
294 : [mem] "m"(*v)
295 : "memory");
296 smp_mb();
297 return old - 1;
298}
b3a2a05f 299#define atomic64_dec_if_positive atomic64_dec_if_positive
748a76b5 300
1da177e4 301#endif /* _ALPHA_ATOMIC_H */