]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/m68k/include/asm/atomic.h
atomic, arch: Audit atomic_{read,set}()
[mirror_ubuntu-bionic-kernel.git] / arch / m68k / include / asm / atomic.h
CommitLineData
69f99746
GU
1#ifndef __ARCH_M68K_ATOMIC__
2#define __ARCH_M68K_ATOMIC__
3
4#include <linux/types.h>
803f6914 5#include <linux/irqflags.h>
7224c0d1 6#include <asm/cmpxchg.h>
2db56e86 7#include <asm/barrier.h>
69f99746
GU
8
9/*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 */
13
14/*
15 * We do not have SMP m68k systems, so we don't have to deal with that.
16 */
17
18#define ATOMIC_INIT(i) { (i) }
19
62e8a325
PZ
20#define atomic_read(v) READ_ONCE((v)->counter)
21#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
69f99746
GU
22
23/*
24 * The ColdFire parts cannot do some immediate to memory operations,
25 * so for them we do not specify the "i" asm constraint.
26 */
27#ifdef CONFIG_COLDFIRE
28#define ASM_DI "d"
49148020 29#else
69f99746 30#define ASM_DI "di"
49148020 31#endif
b417b717 32
d839bae4
PZ
33#define ATOMIC_OP(op, c_op, asm_op) \
34static inline void atomic_##op(int i, atomic_t *v) \
35{ \
36 __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
37} \
38
39#ifdef CONFIG_RMW_INSNS
40
41#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
42static inline int atomic_##op##_return(int i, atomic_t *v) \
43{ \
44 int t, tmp; \
45 \
46 __asm__ __volatile__( \
47 "1: movel %2,%1\n" \
48 " " #asm_op "l %3,%1\n" \
49 " casl %2,%1,%0\n" \
50 " jne 1b" \
51 : "+m" (*v), "=&d" (t), "=&d" (tmp) \
52 : "g" (i), "2" (atomic_read(v))); \
53 return t; \
69f99746
GU
54}
55
d839bae4
PZ
56#else
57
58#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
59static inline int atomic_##op##_return(int i, atomic_t * v) \
60{ \
61 unsigned long flags; \
62 int t; \
63 \
64 local_irq_save(flags); \
65 t = (v->counter c_op i); \
66 local_irq_restore(flags); \
67 \
68 return t; \
69f99746
GU
69}
70
d839bae4
PZ
71#endif /* CONFIG_RMW_INSNS */
72
73#define ATOMIC_OPS(op, c_op, asm_op) \
74 ATOMIC_OP(op, c_op, asm_op) \
75 ATOMIC_OP_RETURN(op, c_op, asm_op)
76
77ATOMIC_OPS(add, +=, add)
78ATOMIC_OPS(sub, -=, sub)
79
74b1bc50
PZ
80ATOMIC_OP(and, &=, and)
81ATOMIC_OP(or, |=, or)
82ATOMIC_OP(xor, ^=, eor)
83
d839bae4
PZ
84#undef ATOMIC_OPS
85#undef ATOMIC_OP_RETURN
86#undef ATOMIC_OP
87
69f99746
GU
88static inline void atomic_inc(atomic_t *v)
89{
90 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
91}
92
93static inline void atomic_dec(atomic_t *v)
94{
95 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
96}
97
98static inline int atomic_dec_and_test(atomic_t *v)
99{
100 char c;
101 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
102 return c != 0;
103}
104
83b73d6c
GU
105static inline int atomic_dec_and_test_lt(atomic_t *v)
106{
107 char c;
108 __asm__ __volatile__(
109 "subql #1,%1; slt %0"
110 : "=d" (c), "=m" (*v)
111 : "m" (*v));
112 return c != 0;
113}
114
69f99746
GU
115static inline int atomic_inc_and_test(atomic_t *v)
116{
117 char c;
118 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
119 return c != 0;
120}
121
122#ifdef CONFIG_RMW_INSNS
123
69f99746
GU
124#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
125#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
126
127#else /* !CONFIG_RMW_INSNS */
128
69f99746
GU
129static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
130{
131 unsigned long flags;
132 int prev;
133
134 local_irq_save(flags);
135 prev = atomic_read(v);
136 if (prev == old)
137 atomic_set(v, new);
138 local_irq_restore(flags);
139 return prev;
140}
141
142static inline int atomic_xchg(atomic_t *v, int new)
143{
144 unsigned long flags;
145 int prev;
146
147 local_irq_save(flags);
148 prev = atomic_read(v);
149 atomic_set(v, new);
150 local_irq_restore(flags);
151 return prev;
152}
153
154#endif /* !CONFIG_RMW_INSNS */
155
156#define atomic_dec_return(v) atomic_sub_return(1, (v))
157#define atomic_inc_return(v) atomic_add_return(1, (v))
158
159static inline int atomic_sub_and_test(int i, atomic_t *v)
160{
161 char c;
162 __asm__ __volatile__("subl %2,%1; seq %0"
163 : "=d" (c), "+m" (*v)
164 : ASM_DI (i));
165 return c != 0;
166}
167
168static inline int atomic_add_negative(int i, atomic_t *v)
169{
170 char c;
171 __asm__ __volatile__("addl %2,%1; smi %0"
172 : "=d" (c), "+m" (*v)
35de6749 173 : ASM_DI (i));
69f99746
GU
174 return c != 0;
175}
176
f24219b4 177static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
69f99746
GU
178{
179 int c, old;
180 c = atomic_read(v);
181 for (;;) {
182 if (unlikely(c == (u)))
183 break;
184 old = atomic_cmpxchg((v), c, c + (a));
185 if (likely(old == c))
186 break;
187 c = old;
188 }
f24219b4 189 return c;
69f99746
GU
190}
191
69f99746 192#endif /* __ARCH_M68K_ATOMIC __ */