]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/frv/include/asm/atomic.h
1 /* atomic.h: atomic operation emulation for FR-V
3 * For an explanation of how atomic ops work in this arch, see:
4 * Documentation/frv/atomic-ops.txt
6 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
17 #include <linux/types.h>
18 #include <asm/spr-regs.h>
19 #include <asm/system.h>
26 * Atomic operations that C can't guarantee us. Useful for
27 * resource counting etc..
29 * We do not have SMP systems, so we don't have to deal with that.
32 /* Atomic operations are already serializing */
33 #define smp_mb__before_atomic_dec() barrier()
34 #define smp_mb__after_atomic_dec() barrier()
35 #define smp_mb__before_atomic_inc() barrier()
36 #define smp_mb__after_atomic_inc() barrier()
38 #define ATOMIC_INIT(i) { (i) }
39 #define atomic_read(v) (*(volatile int *)&(v)->counter)
40 #define atomic_set(v, i) (((v)->counter) = (i))
42 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
43 static inline int atomic_add_return(int i
, atomic_t
*v
)
48 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
50 " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
51 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
53 " cst.p %1,%M0 ,cc3,#1 \n"
54 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
56 : "+U"(v
->counter
), "=&r"(val
)
58 : "memory", "cc7", "cc3", "icc3"
64 static inline int atomic_sub_return(int i
, atomic_t
*v
)
69 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
71 " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
72 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
74 " cst.p %1,%M0 ,cc3,#1 \n"
75 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
77 : "+U"(v
->counter
), "=&r"(val
)
79 : "memory", "cc7", "cc3", "icc3"
87 extern int atomic_add_return(int i
, atomic_t
*v
);
88 extern int atomic_sub_return(int i
, atomic_t
*v
);
92 static inline int atomic_add_negative(int i
, atomic_t
*v
)
94 return atomic_add_return(i
, v
) < 0;
97 static inline void atomic_add(int i
, atomic_t
*v
)
99 atomic_add_return(i
, v
);
102 static inline void atomic_sub(int i
, atomic_t
*v
)
104 atomic_sub_return(i
, v
);
107 static inline void atomic_inc(atomic_t
*v
)
109 atomic_add_return(1, v
);
112 static inline void atomic_dec(atomic_t
*v
)
114 atomic_sub_return(1, v
);
117 #define atomic_dec_return(v) atomic_sub_return(1, (v))
118 #define atomic_inc_return(v) atomic_add_return(1, (v))
120 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
121 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
122 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
128 volatile long long counter
;
131 #define ATOMIC64_INIT(i) { (i) }
133 static inline long long atomic64_read(atomic64_t
*v
)
143 static inline void atomic64_set(atomic64_t
*v
, long long i
)
145 asm volatile("std%I0 %1,%M0"
150 extern long long atomic64_inc_return(atomic64_t
*v
);
151 extern long long atomic64_dec_return(atomic64_t
*v
);
152 extern long long atomic64_add_return(long long i
, atomic64_t
*v
);
153 extern long long atomic64_sub_return(long long i
, atomic64_t
*v
);
155 static inline long long atomic64_add_negative(long long i
, atomic64_t
*v
)
157 return atomic64_add_return(i
, v
) < 0;
160 static inline void atomic64_add(long long i
, atomic64_t
*v
)
162 atomic64_add_return(i
, v
);
165 static inline void atomic64_sub(long long i
, atomic64_t
*v
)
167 atomic64_sub_return(i
, v
);
170 static inline void atomic64_inc(atomic64_t
*v
)
172 atomic64_inc_return(v
);
175 static inline void atomic64_dec(atomic64_t
*v
)
177 atomic64_dec_return(v
);
180 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
181 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
182 #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
184 /*****************************************************************************/
186 * exchange value with memory
188 extern uint64_t __xchg_64(uint64_t i
, volatile void *v
);
190 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
192 #define xchg(ptr, x) \
194 __typeof__(ptr) __xg_ptr = (ptr); \
195 __typeof__(*(ptr)) __xg_orig; \
197 switch (sizeof(__xg_orig)) { \
201 : "+m"(*__xg_ptr), "=r"(__xg_orig) \
208 __xg_orig = (__typeof__(__xg_orig))0; \
209 asm volatile("break"); \
218 extern uint32_t __xchg_32(uint32_t i
, volatile void *v
);
220 #define xchg(ptr, x) \
222 __typeof__(ptr) __xg_ptr = (ptr); \
223 __typeof__(*(ptr)) __xg_orig; \
225 switch (sizeof(__xg_orig)) { \
226 case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
228 __xg_orig = (__typeof__(__xg_orig))0; \
229 asm volatile("break"); \
237 #define tas(ptr) (xchg((ptr), 1))
239 #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
240 #define atomic_xchg(v, new) (xchg(&(v)->counter, new))
241 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
242 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
244 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
249 if (unlikely(c
== (u
)))
251 old
= atomic_cmpxchg((v
), c
, c
+ (a
));
252 if (likely(old
== c
))
259 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
261 #include <asm-generic/atomic-long.h>
262 #endif /* _ASM_ATOMIC_H */