]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/riscv/include/asm/atomic.h
Merge branch 'for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[mirror_ubuntu-jammy-kernel.git] / arch / riscv / include / asm / atomic.h
CommitLineData
b4d0d230 1/* SPDX-License-Identifier: GPL-2.0-or-later */
fab957c1
PD
2/*
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Copyright (C) 2012 Regents of the University of California
5 * Copyright (C) 2017 SiFive
fab957c1
PD
6 */
7
8#ifndef _ASM_RISCV_ATOMIC_H
9#define _ASM_RISCV_ATOMIC_H
10
11#ifdef CONFIG_GENERIC_ATOMIC64
12# include <asm-generic/atomic64.h>
13#else
14# if (__riscv_xlen < 64)
15# error "64-bit atomics require XLEN to be at least 64"
16# endif
17#endif
18
19#include <asm/cmpxchg.h>
20#include <asm/barrier.h>
21
fd2efaa4
MR
22#define __atomic_acquire_fence() \
23 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
24
25#define __atomic_release_fence() \
26 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
5ce6c1f3 27
9efbb355 28static __always_inline int arch_atomic_read(const atomic_t *v)
fab957c1
PD
29{
30 return READ_ONCE(v->counter);
31}
9efbb355 32static __always_inline void arch_atomic_set(atomic_t *v, int i)
fab957c1
PD
33{
34 WRITE_ONCE(v->counter, i);
35}
36
37#ifndef CONFIG_GENERIC_ATOMIC64
38#define ATOMIC64_INIT(i) { (i) }
9efbb355 39static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
fab957c1
PD
40{
41 return READ_ONCE(v->counter);
42}
9efbb355 43static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
fab957c1
PD
44{
45 WRITE_ONCE(v->counter, i);
46}
47#endif
48
49/*
50 * First, the atomic ops that have no ordering constraints and therefor don't
51 * have the AQ or RL bits set. These don't return anything, so there's only
52 * one version to worry about.
53 */
5ce6c1f3
AP
54#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
55static __always_inline \
9efbb355 56void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
5ce6c1f3
AP
57{ \
58 __asm__ __volatile__ ( \
59 " amo" #asm_op "." #asm_type " zero, %1, %0" \
60 : "+A" (v->counter) \
61 : "r" (I) \
62 : "memory"); \
63} \
fab957c1
PD
64
65#ifdef CONFIG_GENERIC_ATOMIC64
5ce6c1f3 66#define ATOMIC_OPS(op, asm_op, I) \
07542118 67 ATOMIC_OP (op, asm_op, I, w, int, )
fab957c1 68#else
5ce6c1f3 69#define ATOMIC_OPS(op, asm_op, I) \
07542118
MR
70 ATOMIC_OP (op, asm_op, I, w, int, ) \
71 ATOMIC_OP (op, asm_op, I, d, s64, 64)
fab957c1
PD
72#endif
73
4650d02a
PD
74ATOMIC_OPS(add, add, i)
75ATOMIC_OPS(sub, add, -i)
76ATOMIC_OPS(and, and, i)
77ATOMIC_OPS( or, or, i)
78ATOMIC_OPS(xor, xor, i)
fab957c1
PD
79
80#undef ATOMIC_OP
81#undef ATOMIC_OPS
82
83/*
5ce6c1f3 84 * Atomic ops that have ordered, relaxed, acquire, and release variants.
fab957c1
PD
85 * There's two flavors of these: the arithmatic ops have both fetch and return
86 * versions, while the logical ops only have fetch versions.
87 */
5ce6c1f3
AP
88#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
89static __always_inline \
9efbb355 90c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
5ce6c1f3
AP
91 atomic##prefix##_t *v) \
92{ \
93 register c_type ret; \
94 __asm__ __volatile__ ( \
95 " amo" #asm_op "." #asm_type " %1, %2, %0" \
96 : "+A" (v->counter), "=r" (ret) \
97 : "r" (I) \
98 : "memory"); \
99 return ret; \
100} \
101static __always_inline \
9efbb355 102c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
5ce6c1f3
AP
103{ \
104 register c_type ret; \
105 __asm__ __volatile__ ( \
106 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
107 : "+A" (v->counter), "=r" (ret) \
108 : "r" (I) \
109 : "memory"); \
110 return ret; \
fab957c1
PD
111}
112
5ce6c1f3
AP
113#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
114static __always_inline \
9efbb355 115c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
5ce6c1f3
AP
116 atomic##prefix##_t *v) \
117{ \
9efbb355 118 return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
5ce6c1f3
AP
119} \
120static __always_inline \
9efbb355 121c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
5ce6c1f3 122{ \
9efbb355 123 return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
fab957c1
PD
124}
125
126#ifdef CONFIG_GENERIC_ATOMIC64
5ce6c1f3 127#define ATOMIC_OPS(op, asm_op, c_op, I) \
07542118
MR
128 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
129 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
fab957c1 130#else
5ce6c1f3 131#define ATOMIC_OPS(op, asm_op, c_op, I) \
07542118
MR
132 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
133 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
134 ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
135 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
fab957c1
PD
136#endif
137
5ce6c1f3
AP
138ATOMIC_OPS(add, add, +, i)
139ATOMIC_OPS(sub, add, +, -i)
140
9efbb355
MR
141#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
142#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
143#define arch_atomic_add_return arch_atomic_add_return
144#define arch_atomic_sub_return arch_atomic_sub_return
fab957c1 145
9efbb355
MR
146#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
147#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
148#define arch_atomic_fetch_add arch_atomic_fetch_add
149#define arch_atomic_fetch_sub arch_atomic_fetch_sub
5ce6c1f3
AP
150
151#ifndef CONFIG_GENERIC_ATOMIC64
9efbb355
MR
152#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
153#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
154#define arch_atomic64_add_return arch_atomic64_add_return
155#define arch_atomic64_sub_return arch_atomic64_sub_return
156
157#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
158#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
159#define arch_atomic64_fetch_add arch_atomic64_fetch_add
160#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
5ce6c1f3 161#endif
fab957c1
PD
162
163#undef ATOMIC_OPS
164
165#ifdef CONFIG_GENERIC_ATOMIC64
5ce6c1f3 166#define ATOMIC_OPS(op, asm_op, I) \
07542118 167 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
fab957c1 168#else
5ce6c1f3 169#define ATOMIC_OPS(op, asm_op, I) \
07542118
MR
170 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
171 ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
fab957c1
PD
172#endif
173
5ce6c1f3
AP
174ATOMIC_OPS(and, and, i)
175ATOMIC_OPS( or, or, i)
176ATOMIC_OPS(xor, xor, i)
fab957c1 177
9efbb355
MR
178#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
179#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
180#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
181#define arch_atomic_fetch_and arch_atomic_fetch_and
182#define arch_atomic_fetch_or arch_atomic_fetch_or
183#define arch_atomic_fetch_xor arch_atomic_fetch_xor
fab957c1 184
5ce6c1f3 185#ifndef CONFIG_GENERIC_ATOMIC64
9efbb355
MR
186#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
187#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
188#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
189#define arch_atomic64_fetch_and arch_atomic64_fetch_and
190#define arch_atomic64_fetch_or arch_atomic64_fetch_or
191#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
5ce6c1f3 192#endif
fab957c1
PD
193
194#undef ATOMIC_OPS
195
196#undef ATOMIC_FETCH_OP
197#undef ATOMIC_OP_RETURN
198
5ce6c1f3 199/* This is required to provide a full barrier on success. */
9efbb355 200static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
fab957c1
PD
201{
202 int prev, rc;
203
204 __asm__ __volatile__ (
5ce6c1f3
AP
205 "0: lr.w %[p], %[c]\n"
206 " beq %[p], %[u], 1f\n"
207 " add %[rc], %[p], %[a]\n"
208 " sc.w.rl %[rc], %[rc], %[c]\n"
209 " bnez %[rc], 0b\n"
210 " fence rw, rw\n"
211 "1:\n"
fab957c1
PD
212 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
213 : [a]"r" (a), [u]"r" (u)
214 : "memory");
215 return prev;
216}
9efbb355 217#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
fab957c1
PD
218
219#ifndef CONFIG_GENERIC_ATOMIC64
9efbb355 220static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
fab957c1 221{
07542118
MR
222 s64 prev;
223 long rc;
fab957c1
PD
224
225 __asm__ __volatile__ (
5ce6c1f3
AP
226 "0: lr.d %[p], %[c]\n"
227 " beq %[p], %[u], 1f\n"
228 " add %[rc], %[p], %[a]\n"
229 " sc.d.rl %[rc], %[rc], %[c]\n"
230 " bnez %[rc], 0b\n"
231 " fence rw, rw\n"
232 "1:\n"
fab957c1
PD
233 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
234 : [a]"r" (a), [u]"r" (u)
235 : "memory");
236 return prev;
237}
9efbb355 238#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
fab957c1
PD
239#endif
240
fab957c1
PD
241/*
242 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
5ce6c1f3 243 * {cmp,}xchg and the operations that return, so they need a full barrier.
fab957c1 244 */
5ce6c1f3
AP
245#define ATOMIC_OP(c_t, prefix, size) \
246static __always_inline \
9efbb355 247c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
5ce6c1f3
AP
248{ \
249 return __xchg_relaxed(&(v->counter), n, size); \
250} \
251static __always_inline \
9efbb355 252c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
5ce6c1f3
AP
253{ \
254 return __xchg_acquire(&(v->counter), n, size); \
255} \
256static __always_inline \
9efbb355 257c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
5ce6c1f3
AP
258{ \
259 return __xchg_release(&(v->counter), n, size); \
260} \
261static __always_inline \
9efbb355 262c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
5ce6c1f3
AP
263{ \
264 return __xchg(&(v->counter), n, size); \
265} \
266static __always_inline \
9efbb355 267c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
5ce6c1f3
AP
268 c_t o, c_t n) \
269{ \
270 return __cmpxchg_relaxed(&(v->counter), o, n, size); \
271} \
272static __always_inline \
9efbb355 273c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
5ce6c1f3
AP
274 c_t o, c_t n) \
275{ \
276 return __cmpxchg_acquire(&(v->counter), o, n, size); \
277} \
278static __always_inline \
9efbb355 279c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
5ce6c1f3
AP
280 c_t o, c_t n) \
281{ \
282 return __cmpxchg_release(&(v->counter), o, n, size); \
283} \
284static __always_inline \
9efbb355 285c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
5ce6c1f3
AP
286{ \
287 return __cmpxchg(&(v->counter), o, n, size); \
fab957c1
PD
288}
289
290#ifdef CONFIG_GENERIC_ATOMIC64
5ce6c1f3 291#define ATOMIC_OPS() \
07542118 292 ATOMIC_OP(int, , 4)
fab957c1 293#else
5ce6c1f3 294#define ATOMIC_OPS() \
07542118
MR
295 ATOMIC_OP(int, , 4) \
296 ATOMIC_OP(s64, 64, 8)
fab957c1
PD
297#endif
298
5ce6c1f3 299ATOMIC_OPS()
fab957c1 300
9efbb355
MR
301#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
302#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
303#define arch_atomic_xchg_release arch_atomic_xchg_release
304#define arch_atomic_xchg arch_atomic_xchg
305#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
306#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
307#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
308#define arch_atomic_cmpxchg arch_atomic_cmpxchg
8b699616 309
fab957c1
PD
310#undef ATOMIC_OPS
311#undef ATOMIC_OP
312
9efbb355 313static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
fab957c1
PD
314{
315 int prev, rc;
316
317 __asm__ __volatile__ (
5ce6c1f3
AP
318 "0: lr.w %[p], %[c]\n"
319 " sub %[rc], %[p], %[o]\n"
320 " bltz %[rc], 1f\n"
321 " sc.w.rl %[rc], %[rc], %[c]\n"
322 " bnez %[rc], 0b\n"
323 " fence rw, rw\n"
324 "1:\n"
fab957c1
PD
325 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
326 : [o]"r" (offset)
327 : "memory");
328 return prev - offset;
329}
330
9efbb355 331#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
fab957c1
PD
332
333#ifndef CONFIG_GENERIC_ATOMIC64
9efbb355 334static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
fab957c1 335{
07542118
MR
336 s64 prev;
337 long rc;
fab957c1
PD
338
339 __asm__ __volatile__ (
5ce6c1f3
AP
340 "0: lr.d %[p], %[c]\n"
341 " sub %[rc], %[p], %[o]\n"
342 " bltz %[rc], 1f\n"
343 " sc.d.rl %[rc], %[rc], %[c]\n"
344 " bnez %[rc], 0b\n"
345 " fence rw, rw\n"
346 "1:\n"
fab957c1
PD
347 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
348 : [o]"r" (offset)
349 : "memory");
350 return prev - offset;
351}
352
9efbb355 353#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
fab957c1
PD
354#endif
355
356#endif /* _ASM_RISCV_ATOMIC_H */