]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/include/asm/atomic_ll_sc.h
2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #ifndef __ASM_ATOMIC_LL_SC_H
22 #define __ASM_ATOMIC_LL_SC_H
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
29 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
30 * store exclusive to ensure that these are atomic. We may loop
31 * to ensure that the update happens.
33 * NOTE: these functions do *not* follow the PCS and must explicitly
34 * save any clobbered registers other than x0 (regardless of return
35 * value). This is achieved through -fcall-saved-* compiler flags for
36 * this file, which unfortunately don't work on a per-function basis
37 * (the optimize attribute silently ignores these options).
40 #define ATOMIC_OP(op, asm_op) \
42 __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
47 asm volatile("// atomic_" #op "\n" \
48 " prfm pstl1strm, %2\n" \
50 " " #asm_op " %w0, %w0, %w3\n" \
51 " stxr %w1, %w0, %2\n" \
53 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
56 __LL_SC_EXPORT(atomic_##op);
58 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
60 __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
65 asm volatile("// atomic_" #op "_return" #name "\n" \
66 " prfm pstl1strm, %2\n" \
67 "1: ld" #acq "xr %w0, %2\n" \
68 " " #asm_op " %w0, %w0, %w3\n" \
69 " st" #rel "xr %w1, %w0, %2\n" \
72 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
78 __LL_SC_EXPORT(atomic_##op##_return##name);
80 #define ATOMIC_OPS(...) \
81 ATOMIC_OP(__VA_ARGS__) \
82 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)
84 #define ATOMIC_OPS_RLX(...) \
85 ATOMIC_OPS(__VA_ARGS__) \
86 ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
87 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
88 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)
90 ATOMIC_OPS_RLX(add
, add
)
91 ATOMIC_OPS_RLX(sub
, sub
)
94 ATOMIC_OP(andnot
, bic
)
100 #undef ATOMIC_OP_RETURN
103 #define ATOMIC64_OP(op, asm_op) \
104 __LL_SC_INLINE void \
105 __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
110 asm volatile("// atomic64_" #op "\n" \
111 " prfm pstl1strm, %2\n" \
113 " " #asm_op " %0, %0, %3\n" \
114 " stxr %w1, %0, %2\n" \
116 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
119 __LL_SC_EXPORT(atomic64_##op);
121 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
122 __LL_SC_INLINE long \
123 __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
128 asm volatile("// atomic64_" #op "_return" #name "\n" \
129 " prfm pstl1strm, %2\n" \
130 "1: ld" #acq "xr %0, %2\n" \
131 " " #asm_op " %0, %0, %3\n" \
132 " st" #rel "xr %w1, %0, %2\n" \
135 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
141 __LL_SC_EXPORT(atomic64_##op##_return##name);
143 #define ATOMIC64_OPS(...) \
144 ATOMIC64_OP(__VA_ARGS__) \
145 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__)
147 #define ATOMIC64_OPS_RLX(...) \
148 ATOMIC64_OPS(__VA_ARGS__) \
149 ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
150 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
151 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__)
153 ATOMIC64_OPS_RLX(add
, add
)
154 ATOMIC64_OPS_RLX(sub
, sub
)
156 ATOMIC64_OP(and, and)
157 ATOMIC64_OP(andnot
, bic
)
159 ATOMIC64_OP(xor, eor
)
161 #undef ATOMIC64_OPS_RLX
163 #undef ATOMIC64_OP_RETURN
167 __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t
*v
))
172 asm volatile("// atomic64_dec_if_positive\n"
173 " prfm pstl1strm, %2\n"
177 " stlxr %w1, %0, %2\n"
181 : "=&r" (result
), "=&r" (tmp
), "+Q" (v
->counter
)
187 __LL_SC_EXPORT(atomic64_dec_if_positive
);
189 #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
190 __LL_SC_INLINE unsigned long \
191 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
193 unsigned long new)) \
195 unsigned long tmp, oldval; \
198 " prfm pstl1strm, %[v]\n" \
199 "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
200 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
201 " cbnz %" #w "[tmp], 2f\n" \
202 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
203 " cbnz %w[tmp], 1b\n" \
205 " mov %" #w "[oldval], %" #w "[old]\n" \
207 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
208 [v] "+Q" (*(unsigned long *)ptr) \
209 : [old] "Lr" (old), [new] "r" (new) \
214 __LL_SC_EXPORT(__cmpxchg_case_##name);
216 __CMPXCHG_CASE(w
, b
, 1, , , , )
217 __CMPXCHG_CASE(w
, h
, 2, , , , )
218 __CMPXCHG_CASE(w
, , 4, , , , )
219 __CMPXCHG_CASE( , , 8, , , , )
220 __CMPXCHG_CASE(w
, b
, acq_1
, , a
, , "memory")
221 __CMPXCHG_CASE(w
, h
, acq_2
, , a
, , "memory")
222 __CMPXCHG_CASE(w
, , acq_4
, , a
, , "memory")
223 __CMPXCHG_CASE( , , acq_8
, , a
, , "memory")
224 __CMPXCHG_CASE(w
, b
, rel_1
, , , l
, "memory")
225 __CMPXCHG_CASE(w
, h
, rel_2
, , , l
, "memory")
226 __CMPXCHG_CASE(w
, , rel_4
, , , l
, "memory")
227 __CMPXCHG_CASE( , , rel_8
, , , l
, "memory")
228 __CMPXCHG_CASE(w
, b
, mb_1
, dmb ish
, , l
, "memory")
229 __CMPXCHG_CASE(w
, h
, mb_2
, dmb ish
, , l
, "memory")
230 __CMPXCHG_CASE(w
, , mb_4
, dmb ish
, , l
, "memory")
231 __CMPXCHG_CASE( , , mb_8
, dmb ish
, , l
, "memory")
233 #undef __CMPXCHG_CASE
235 #define __CMPXCHG_DBL(name, mb, rel, cl) \
236 __LL_SC_INLINE long \
237 __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
238 unsigned long old2, \
239 unsigned long new1, \
240 unsigned long new2, \
241 volatile void *ptr)) \
243 unsigned long tmp, ret; \
245 asm volatile("// __cmpxchg_double" #name "\n" \
246 " prfm pstl1strm, %2\n" \
247 "1: ldxp %0, %1, %2\n" \
248 " eor %0, %0, %3\n" \
249 " eor %1, %1, %4\n" \
250 " orr %1, %0, %1\n" \
252 " st" #rel "xp %w0, %5, %6, %2\n" \
256 : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
257 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
262 __LL_SC_EXPORT(__cmpxchg_double##name);
264 __CMPXCHG_DBL( , , , )
265 __CMPXCHG_DBL(_mb
, dmb ish
, l
, "memory")
269 #endif /* __ASM_ATOMIC_LL_SC_H */