]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/include/asm/atomic_lse.h
2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
28 #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
30 static inline void atomic_andnot(int i
, atomic_t
*v
)
32 register int w0
asm ("w0") = i
;
33 register atomic_t
*x1
asm ("x1") = v
;
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot
),
36 " stclr %w[i], %[v]\n")
37 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
42 static inline void atomic_or(int i
, atomic_t
*v
)
44 register int w0
asm ("w0") = i
;
45 register atomic_t
*x1
asm ("x1") = v
;
47 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
48 " stset %w[i], %[v]\n")
49 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
54 static inline void atomic_xor(int i
, atomic_t
*v
)
56 register int w0
asm ("w0") = i
;
57 register atomic_t
*x1
asm ("x1") = v
;
59 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
60 " steor %w[i], %[v]\n")
61 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
66 static inline void atomic_add(int i
, atomic_t
*v
)
68 register int w0
asm ("w0") = i
;
69 register atomic_t
*x1
asm ("x1") = v
;
71 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add
),
72 " stadd %w[i], %[v]\n")
73 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
78 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
79 static inline int atomic_add_return##name(int i, atomic_t *v) \
81 register int w0 asm ("w0") = i; \
82 register atomic_t *x1 asm ("x1") = v; \
84 asm volatile(ARM64_LSE_ATOMIC_INSN( \
87 __LL_SC_ATOMIC(add_return##name), \
89 " ldadd" #mb " %w[i], w30, %[v]\n" \
90 " add %w[i], %w[i], w30") \
91 : [i] "+r" (w0), [v] "+Q" (v->counter) \
98 ATOMIC_OP_ADD_RETURN(_relaxed
, )
99 ATOMIC_OP_ADD_RETURN(_acquire
, a
, "memory")
100 ATOMIC_OP_ADD_RETURN(_release
, l
, "memory")
101 ATOMIC_OP_ADD_RETURN( , al
, "memory")
103 #undef ATOMIC_OP_ADD_RETURN
105 static inline void atomic_and(int i
, atomic_t
*v
)
107 register int w0
asm ("w0") = i
;
108 register atomic_t
*x1
asm ("x1") = v
;
110 asm volatile(ARM64_LSE_ATOMIC_INSN(
115 " mvn %w[i], %w[i]\n"
116 " stclr %w[i], %[v]")
117 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
122 static inline void atomic_sub(int i
, atomic_t
*v
)
124 register int w0
asm ("w0") = i
;
125 register atomic_t
*x1
asm ("x1") = v
;
127 asm volatile(ARM64_LSE_ATOMIC_INSN(
132 " neg %w[i], %w[i]\n"
133 " stadd %w[i], %[v]")
134 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
139 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
140 static inline int atomic_sub_return##name(int i, atomic_t *v) \
142 register int w0 asm ("w0") = i; \
143 register atomic_t *x1 asm ("x1") = v; \
145 asm volatile(ARM64_LSE_ATOMIC_INSN( \
148 __LL_SC_ATOMIC(sub_return##name) \
151 " neg %w[i], %w[i]\n" \
152 " ldadd" #mb " %w[i], w30, %[v]\n" \
153 " add %w[i], %w[i], w30") \
154 : [i] "+r" (w0), [v] "+Q" (v->counter) \
161 ATOMIC_OP_SUB_RETURN(_relaxed
, )
162 ATOMIC_OP_SUB_RETURN(_acquire
, a
, "memory")
163 ATOMIC_OP_SUB_RETURN(_release
, l
, "memory")
164 ATOMIC_OP_SUB_RETURN( , al
, "memory")
166 #undef ATOMIC_OP_SUB_RETURN
167 #undef __LL_SC_ATOMIC
169 #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
171 static inline void atomic64_andnot(long i
, atomic64_t
*v
)
173 register long x0
asm ("x0") = i
;
174 register atomic64_t
*x1
asm ("x1") = v
;
176 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot
),
177 " stclr %[i], %[v]\n")
178 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
183 static inline void atomic64_or(long i
, atomic64_t
*v
)
185 register long x0
asm ("x0") = i
;
186 register atomic64_t
*x1
asm ("x1") = v
;
188 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
189 " stset %[i], %[v]\n")
190 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
195 static inline void atomic64_xor(long i
, atomic64_t
*v
)
197 register long x0
asm ("x0") = i
;
198 register atomic64_t
*x1
asm ("x1") = v
;
200 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
201 " steor %[i], %[v]\n")
202 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
207 static inline void atomic64_add(long i
, atomic64_t
*v
)
209 register long x0
asm ("x0") = i
;
210 register atomic64_t
*x1
asm ("x1") = v
;
212 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add
),
213 " stadd %[i], %[v]\n")
214 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
219 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
220 static inline long atomic64_add_return##name(long i, atomic64_t *v) \
222 register long x0 asm ("x0") = i; \
223 register atomic64_t *x1 asm ("x1") = v; \
225 asm volatile(ARM64_LSE_ATOMIC_INSN( \
228 __LL_SC_ATOMIC64(add_return##name), \
230 " ldadd" #mb " %[i], x30, %[v]\n" \
231 " add %[i], %[i], x30") \
232 : [i] "+r" (x0), [v] "+Q" (v->counter) \
239 ATOMIC64_OP_ADD_RETURN(_relaxed
, )
240 ATOMIC64_OP_ADD_RETURN(_acquire
, a
, "memory")
241 ATOMIC64_OP_ADD_RETURN(_release
, l
, "memory")
242 ATOMIC64_OP_ADD_RETURN( , al
, "memory")
244 #undef ATOMIC64_OP_ADD_RETURN
246 static inline void atomic64_and(long i
, atomic64_t
*v
)
248 register long x0
asm ("x0") = i
;
249 register atomic64_t
*x1
asm ("x1") = v
;
251 asm volatile(ARM64_LSE_ATOMIC_INSN(
254 __LL_SC_ATOMIC64(and),
258 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
263 static inline void atomic64_sub(long i
, atomic64_t
*v
)
265 register long x0
asm ("x0") = i
;
266 register atomic64_t
*x1
asm ("x1") = v
;
268 asm volatile(ARM64_LSE_ATOMIC_INSN(
271 __LL_SC_ATOMIC64(sub
),
275 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
280 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
281 static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
283 register long x0 asm ("x0") = i; \
284 register atomic64_t *x1 asm ("x1") = v; \
286 asm volatile(ARM64_LSE_ATOMIC_INSN( \
289 __LL_SC_ATOMIC64(sub_return##name) \
292 " neg %[i], %[i]\n" \
293 " ldadd" #mb " %[i], x30, %[v]\n" \
294 " add %[i], %[i], x30") \
295 : [i] "+r" (x0), [v] "+Q" (v->counter) \
302 ATOMIC64_OP_SUB_RETURN(_relaxed
, )
303 ATOMIC64_OP_SUB_RETURN(_acquire
, a
, "memory")
304 ATOMIC64_OP_SUB_RETURN(_release
, l
, "memory")
305 ATOMIC64_OP_SUB_RETURN( , al
, "memory")
307 #undef ATOMIC64_OP_SUB_RETURN
309 static inline long atomic64_dec_if_positive(atomic64_t
*v
)
311 register long x0
asm ("x0") = (long)v
;
313 asm volatile(ARM64_LSE_ATOMIC_INSN(
316 __LL_SC_ATOMIC64(dec_if_positive
)
324 " subs %[ret], x30, #1\n"
326 " casal x30, %[ret], %[v]\n"
327 " sub x30, x30, #1\n"
328 " sub x30, x30, %[ret]\n"
331 : [ret
] "+&r" (x0
), [v
] "+Q" (v
->counter
)
333 : "x30", "cc", "memory");
338 #undef __LL_SC_ATOMIC64
340 #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
342 #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
343 static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
347 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
348 register unsigned long x1 asm ("x1") = old; \
349 register unsigned long x2 asm ("x2") = new; \
351 asm volatile(ARM64_LSE_ATOMIC_INSN( \
354 __LL_SC_CMPXCHG(name) \
357 " mov " #w "30, %" #w "[old]\n" \
358 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
359 " mov %" #w "[ret], " #w "30") \
360 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
361 : [old] "r" (x1), [new] "r" (x2) \
367 __CMPXCHG_CASE(w
, b
, 1, )
368 __CMPXCHG_CASE(w
, h
, 2, )
369 __CMPXCHG_CASE(w
, , 4, )
370 __CMPXCHG_CASE(x
, , 8, )
371 __CMPXCHG_CASE(w
, b
, acq_1
, a
, "memory")
372 __CMPXCHG_CASE(w
, h
, acq_2
, a
, "memory")
373 __CMPXCHG_CASE(w
, , acq_4
, a
, "memory")
374 __CMPXCHG_CASE(x
, , acq_8
, a
, "memory")
375 __CMPXCHG_CASE(w
, b
, rel_1
, l
, "memory")
376 __CMPXCHG_CASE(w
, h
, rel_2
, l
, "memory")
377 __CMPXCHG_CASE(w
, , rel_4
, l
, "memory")
378 __CMPXCHG_CASE(x
, , rel_8
, l
, "memory")
379 __CMPXCHG_CASE(w
, b
, mb_1
, al
, "memory")
380 __CMPXCHG_CASE(w
, h
, mb_2
, al
, "memory")
381 __CMPXCHG_CASE(w
, , mb_4
, al
, "memory")
382 __CMPXCHG_CASE(x
, , mb_8
, al
, "memory")
384 #undef __LL_SC_CMPXCHG
385 #undef __CMPXCHG_CASE
387 #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
389 #define __CMPXCHG_DBL(name, mb, cl...) \
390 static inline int __cmpxchg_double##name(unsigned long old1, \
391 unsigned long old2, \
392 unsigned long new1, \
393 unsigned long new2, \
394 volatile void *ptr) \
396 unsigned long oldval1 = old1; \
397 unsigned long oldval2 = old2; \
398 register unsigned long x0 asm ("x0") = old1; \
399 register unsigned long x1 asm ("x1") = old2; \
400 register unsigned long x2 asm ("x2") = new1; \
401 register unsigned long x3 asm ("x3") = new2; \
402 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
404 asm volatile(ARM64_LSE_ATOMIC_INSN( \
409 __LL_SC_CMPXCHG_DBL(name), \
411 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
412 " eor %[old1], %[old1], %[oldval1]\n" \
413 " eor %[old2], %[old2], %[oldval2]\n" \
414 " orr %[old1], %[old1], %[old2]") \
415 : [old1] "+r" (x0), [old2] "+r" (x1), \
416 [v] "+Q" (*(unsigned long *)ptr) \
417 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
418 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
425 __CMPXCHG_DBL(_mb
, al
, "memory")
427 #undef __LL_SC_CMPXCHG_DBL
430 #endif /* __ASM_ATOMIC_LSE_H */