]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/riscv/include/asm/atomic.h
2 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #ifndef _ASM_RISCV_ATOMIC_H
13 #define _ASM_RISCV_ATOMIC_H
15 #ifdef CONFIG_GENERIC_ATOMIC64
16 # include <asm-generic/atomic64.h>
18 # if (__riscv_xlen < 64)
19 # error "64-bit atomics require XLEN to be at least 64"
23 #include <asm/cmpxchg.h>
24 #include <asm/barrier.h>
26 #define ATOMIC_INIT(i) { (i) }
27 static __always_inline
int atomic_read(const atomic_t
*v
)
29 return READ_ONCE(v
->counter
);
31 static __always_inline
void atomic_set(atomic_t
*v
, int i
)
33 WRITE_ONCE(v
->counter
, i
);
36 #ifndef CONFIG_GENERIC_ATOMIC64
37 #define ATOMIC64_INIT(i) { (i) }
38 static __always_inline
long atomic64_read(const atomic64_t
*v
)
40 return READ_ONCE(v
->counter
);
42 static __always_inline
void atomic64_set(atomic64_t
*v
, long i
)
44 WRITE_ONCE(v
->counter
, i
);
49 * First, the atomic ops that have no ordering constraints and therefor don't
50 * have the AQ or RL bits set. These don't return anything, so there's only
51 * one version to worry about.
53 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
54 static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
56 __asm__ __volatile__ ( \
57 "amo" #asm_op "." #asm_type " zero, %1, %0" \
63 #ifdef CONFIG_GENERIC_ATOMIC64
64 #define ATOMIC_OPS(op, asm_op, I) \
65 ATOMIC_OP (op, asm_op, I, w, int, )
67 #define ATOMIC_OPS(op, asm_op, I) \
68 ATOMIC_OP (op, asm_op, I, w, int, ) \
69 ATOMIC_OP (op, asm_op, I, d, long, 64)
72 ATOMIC_OPS(add
, add
, i
)
73 ATOMIC_OPS(sub
, add
, -i
)
74 ATOMIC_OPS(and, and, i
)
75 ATOMIC_OPS( or, or, i
)
76 ATOMIC_OPS(xor, xor, i
)
82 * Atomic ops that have ordered, relaxed, acquire, and relese variants.
83 * There's two flavors of these: the arithmatic ops have both fetch and return
84 * versions, while the logical ops only have fetch versions.
86 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix) \
87 static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
89 register c_type ret; \
90 __asm__ __volatile__ ( \
91 "amo" #asm_op "." #asm_type #asm_or " %1, %2, %0" \
92 : "+A" (v->counter), "=r" (ret) \
98 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
99 static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, atomic##prefix##_t *v) \
101 return atomic##prefix##_fetch_##op##c_or(i, v) c_op I; \
104 #ifdef CONFIG_GENERIC_ATOMIC64
105 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
106 ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
107 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
109 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
110 ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
111 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
112 ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, d, long, 64) \
113 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
116 ATOMIC_OPS(add
, add
, +, i
, , _relaxed
)
117 ATOMIC_OPS(add
, add
, +, i
, .aq
, _acquire
)
118 ATOMIC_OPS(add
, add
, +, i
, .rl
, _release
)
119 ATOMIC_OPS(add
, add
, +, i
, .aqrl
, )
121 ATOMIC_OPS(sub
, add
, +, -i
, , _relaxed
)
122 ATOMIC_OPS(sub
, add
, +, -i
, .aq
, _acquire
)
123 ATOMIC_OPS(sub
, add
, +, -i
, .rl
, _release
)
124 ATOMIC_OPS(sub
, add
, +, -i
, .aqrl
, )
128 #ifdef CONFIG_GENERIC_ATOMIC64
129 #define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
130 ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, )
132 #define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
133 ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) \
134 ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
137 ATOMIC_OPS(and, and, i
, , _relaxed
)
138 ATOMIC_OPS(and, and, i
, .aq
, _acquire
)
139 ATOMIC_OPS(and, and, i
, .rl
, _release
)
140 ATOMIC_OPS(and, and, i
, .aqrl
, )
142 ATOMIC_OPS( or, or, i
, , _relaxed
)
143 ATOMIC_OPS( or, or, i
, .aq
, _acquire
)
144 ATOMIC_OPS( or, or, i
, .rl
, _release
)
145 ATOMIC_OPS( or, or, i
, .aqrl
, )
147 ATOMIC_OPS(xor, xor, i
, , _relaxed
)
148 ATOMIC_OPS(xor, xor, i
, .aq
, _acquire
)
149 ATOMIC_OPS(xor, xor, i
, .rl
, _release
)
150 ATOMIC_OPS(xor, xor, i
, .aqrl
, )
154 #undef ATOMIC_FETCH_OP
155 #undef ATOMIC_OP_RETURN
158 * The extra atomic operations that are constructed from one of the core
159 * AMO-based operations above (aside from sub, which is easier to fit above).
160 * These are required to perform a barrier, but they're OK this way because
161 * atomic_*_return is also required to perform a barrier.
163 #define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
164 static __always_inline bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
166 return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
169 #ifdef CONFIG_GENERIC_ATOMIC64
170 #define ATOMIC_OPS(op, func_op, comp_op, I) \
171 ATOMIC_OP (op, func_op, comp_op, I, int, )
173 #define ATOMIC_OPS(op, func_op, comp_op, I) \
174 ATOMIC_OP (op, func_op, comp_op, I, int, ) \
175 ATOMIC_OP (op, func_op, comp_op, I, long, 64)
178 ATOMIC_OPS(add_and_test
, add
, ==, 0)
179 ATOMIC_OPS(sub_and_test
, sub
, ==, 0)
180 ATOMIC_OPS(add_negative
, add
, <, 0)
185 #define ATOMIC_OP(op, func_op, I, c_type, prefix) \
186 static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \
188 atomic##prefix##_##func_op(I, v); \
191 #define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
192 static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
194 return atomic##prefix##_fetch_##func_op(I, v); \
197 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
198 static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
200 return atomic##prefix##_fetch_##op(v) c_op I; \
203 #ifdef CONFIG_GENERIC_ATOMIC64
204 #define ATOMIC_OPS(op, asm_op, c_op, I) \
205 ATOMIC_OP (op, asm_op, I, int, ) \
206 ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
207 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
209 #define ATOMIC_OPS(op, asm_op, c_op, I) \
210 ATOMIC_OP (op, asm_op, I, int, ) \
211 ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
212 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
213 ATOMIC_OP (op, asm_op, I, long, 64) \
214 ATOMIC_FETCH_OP (op, asm_op, I, long, 64) \
215 ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
218 ATOMIC_OPS(inc
, add
, +, 1)
219 ATOMIC_OPS(dec
, add
, +, -1)
223 #undef ATOMIC_FETCH_OP
224 #undef ATOMIC_OP_RETURN
226 #define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
227 static __always_inline bool atomic##prefix##_##op(atomic##prefix##_t *v) \
229 return atomic##prefix##_##func_op##_return(v) comp_op I; \
232 ATOMIC_OP(inc_and_test
, inc
, ==, 0, )
233 ATOMIC_OP(dec_and_test
, dec
, ==, 0, )
234 #ifndef CONFIG_GENERIC_ATOMIC64
235 ATOMIC_OP(inc_and_test
, inc
, ==, 0, 64)
236 ATOMIC_OP(dec_and_test
, dec
, ==, 0, 64)
241 /* This is required to provide a barrier on success. */
242 static __always_inline
int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
246 __asm__
__volatile__ (
248 "lr.w.aqrl %[p], %[c]\n\t"
249 "beq %[p], %[u], 1f\n\t"
250 "add %[rc], %[p], %[a]\n\t"
251 "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
254 : [p
]"=&r" (prev
), [rc
]"=&r" (rc
), [c
]"+A" (v
->counter
)
255 : [a
]"r" (a
), [u
]"r" (u
)
260 #ifndef CONFIG_GENERIC_ATOMIC64
261 static __always_inline
long __atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
265 __asm__
__volatile__ (
267 "lr.d.aqrl %[p], %[c]\n\t"
268 "beq %[p], %[u], 1f\n\t"
269 "add %[rc], %[p], %[a]\n\t"
270 "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
273 : [p
]"=&r" (prev
), [rc
]"=&r" (rc
), [c
]"+A" (v
->counter
)
274 : [a
]"r" (a
), [u
]"r" (u
)
279 static __always_inline
int atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
281 return __atomic64_add_unless(v
, a
, u
) != u
;
286 * The extra atomic operations that are constructed from one of the core
287 * LR/SC-based operations above.
289 static __always_inline
int atomic_inc_not_zero(atomic_t
*v
)
291 return __atomic_add_unless(v
, 1, 0);
294 #ifndef CONFIG_GENERIC_ATOMIC64
295 static __always_inline
long atomic64_inc_not_zero(atomic64_t
*v
)
297 return atomic64_add_unless(v
, 1, 0);
302 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
303 * {cmp,}xchg and the operations that return, so they need a barrier.
306 * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
307 * assigning the same barrier to both the LR and SC operations, but that might
308 * not make any sense. We're waiting on a memory model specification to
309 * determine exactly what the right thing to do is here.
311 #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \
312 static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \
314 return __cmpxchg(&(v->counter), o, n, size, asm_or, asm_or); \
316 static __always_inline c_t atomic##prefix##_xchg##c_or(atomic##prefix##_t *v, c_t n) \
318 return __xchg(n, &(v->counter), size, asm_or); \
321 #ifdef CONFIG_GENERIC_ATOMIC64
322 #define ATOMIC_OPS(c_or, asm_or) \
323 ATOMIC_OP( int, , c_or, 4, asm_or)
325 #define ATOMIC_OPS(c_or, asm_or) \
326 ATOMIC_OP( int, , c_or, 4, asm_or) \
327 ATOMIC_OP(long, 64, c_or, 8, asm_or)
331 ATOMIC_OPS(_acquire
, .aq
)
332 ATOMIC_OPS(_release
, .rl
)
333 ATOMIC_OPS(_relaxed
, )
338 static __always_inline
int atomic_sub_if_positive(atomic_t
*v
, int offset
)
342 __asm__
__volatile__ (
344 "lr.w.aqrl %[p], %[c]\n\t"
345 "sub %[rc], %[p], %[o]\n\t"
347 "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
350 : [p
]"=&r" (prev
), [rc
]"=&r" (rc
), [c
]"+A" (v
->counter
)
353 return prev
- offset
;
356 #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
358 #ifndef CONFIG_GENERIC_ATOMIC64
359 static __always_inline
long atomic64_sub_if_positive(atomic64_t
*v
, int offset
)
363 __asm__
__volatile__ (
365 "lr.d.aqrl %[p], %[c]\n\t"
366 "sub %[rc], %[p], %[o]\n\t"
368 "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
371 : [p
]"=&r" (prev
), [rc
]"=&r" (rc
), [c
]"+A" (v
->counter
)
374 return prev
- offset
;
377 #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
380 #endif /* _ASM_RISCV_ATOMIC_H */