]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/atomic.h
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / atomic.h
1 #ifndef _ASM_X86_ATOMIC_H
2 #define _ASM_X86_ATOMIC_H
3
4 #include <linux/compiler.h>
5 #include <linux/types.h>
6 #include <asm/alternative.h>
7 #include <asm/cmpxchg.h>
8 #include <asm/rmwcc.h>
9 #include <asm/barrier.h>
10
11 /*
12 * Atomic operations that C can't guarantee us. Useful for
13 * resource counting etc..
14 */
15
16 #define ATOMIC_INIT(i) { (i) }
17
18 /**
19 * atomic_read - read atomic variable
20 * @v: pointer of type atomic_t
21 *
22 * Atomically reads the value of @v.
23 */
24 static __always_inline int atomic_read(const atomic_t *v)
25 {
26 return READ_ONCE((v)->counter);
27 }
28
29 /**
30 * atomic_set - set atomic variable
31 * @v: pointer of type atomic_t
32 * @i: required value
33 *
34 * Atomically sets the value of @v to @i.
35 */
36 static __always_inline void atomic_set(atomic_t *v, int i)
37 {
38 WRITE_ONCE(v->counter, i);
39 }
40
41 /**
42 * atomic_add - add integer to atomic variable
43 * @i: integer value to add
44 * @v: pointer of type atomic_t
45 *
46 * Atomically adds @i to @v.
47 */
48 static __always_inline void atomic_add(int i, atomic_t *v)
49 {
50 asm volatile(LOCK_PREFIX "addl %1,%0"
51 : "+m" (v->counter)
52 : "ir" (i));
53 }
54
55 /**
56 * atomic_sub - subtract integer from atomic variable
57 * @i: integer value to subtract
58 * @v: pointer of type atomic_t
59 *
60 * Atomically subtracts @i from @v.
61 */
62 static __always_inline void atomic_sub(int i, atomic_t *v)
63 {
64 asm volatile(LOCK_PREFIX "subl %1,%0"
65 : "+m" (v->counter)
66 : "ir" (i));
67 }
68
69 /**
70 * atomic_sub_and_test - subtract value from variable and test result
71 * @i: integer value to subtract
72 * @v: pointer of type atomic_t
73 *
74 * Atomically subtracts @i from @v and returns
75 * true if the result is zero, or false for all
76 * other cases.
77 */
78 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
79 {
80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
81 }
82
83 /**
84 * atomic_inc - increment atomic variable
85 * @v: pointer of type atomic_t
86 *
87 * Atomically increments @v by 1.
88 */
89 static __always_inline void atomic_inc(atomic_t *v)
90 {
91 asm volatile(LOCK_PREFIX "incl %0"
92 : "+m" (v->counter));
93 }
94
95 /**
96 * atomic_dec - decrement atomic variable
97 * @v: pointer of type atomic_t
98 *
99 * Atomically decrements @v by 1.
100 */
101 static __always_inline void atomic_dec(atomic_t *v)
102 {
103 asm volatile(LOCK_PREFIX "decl %0"
104 : "+m" (v->counter));
105 }
106
107 /**
108 * atomic_dec_and_test - decrement and test
109 * @v: pointer of type atomic_t
110 *
111 * Atomically decrements @v by 1 and
112 * returns true if the result is 0, or false for all other
113 * cases.
114 */
115 static __always_inline bool atomic_dec_and_test(atomic_t *v)
116 {
117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
118 }
119
120 /**
121 * atomic_inc_and_test - increment and test
122 * @v: pointer of type atomic_t
123 *
124 * Atomically increments @v by 1
125 * and returns true if the result is zero, or false for all
126 * other cases.
127 */
128 static __always_inline bool atomic_inc_and_test(atomic_t *v)
129 {
130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
131 }
132
133 /**
134 * atomic_add_negative - add and test if negative
135 * @i: integer value to add
136 * @v: pointer of type atomic_t
137 *
138 * Atomically adds @i to @v and returns true
139 * if the result is negative, or false when
140 * result is greater than or equal to zero.
141 */
142 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
143 {
144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
145 }
146
147 /**
148 * atomic_add_return - add integer and return
149 * @i: integer value to add
150 * @v: pointer of type atomic_t
151 *
152 * Atomically adds @i to @v and returns @i + @v
153 */
154 static __always_inline int atomic_add_return(int i, atomic_t *v)
155 {
156 return i + xadd(&v->counter, i);
157 }
158
159 /**
160 * atomic_sub_return - subtract integer and return
161 * @v: pointer of type atomic_t
162 * @i: integer value to subtract
163 *
164 * Atomically subtracts @i from @v and returns @v - @i
165 */
166 static __always_inline int atomic_sub_return(int i, atomic_t *v)
167 {
168 return atomic_add_return(-i, v);
169 }
170
171 #define atomic_inc_return(v) (atomic_add_return(1, v))
172 #define atomic_dec_return(v) (atomic_sub_return(1, v))
173
174 static __always_inline int atomic_fetch_add(int i, atomic_t *v)
175 {
176 return xadd(&v->counter, i);
177 }
178
179 static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
180 {
181 return xadd(&v->counter, -i);
182 }
183
184 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185 {
186 return cmpxchg(&v->counter, old, new);
187 }
188
189 #define atomic_try_cmpxchg atomic_try_cmpxchg
190 static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
191 {
192 return try_cmpxchg(&v->counter, old, new);
193 }
194
195 static inline int atomic_xchg(atomic_t *v, int new)
196 {
197 return xchg(&v->counter, new);
198 }
199
200 #define ATOMIC_OP(op) \
201 static inline void atomic_##op(int i, atomic_t *v) \
202 { \
203 asm volatile(LOCK_PREFIX #op"l %1,%0" \
204 : "+m" (v->counter) \
205 : "ir" (i) \
206 : "memory"); \
207 }
208
209 #define ATOMIC_FETCH_OP(op, c_op) \
210 static inline int atomic_fetch_##op(int i, atomic_t *v) \
211 { \
212 int val = atomic_read(v); \
213 do { \
214 } while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
215 return val; \
216 }
217
218 #define ATOMIC_OPS(op, c_op) \
219 ATOMIC_OP(op) \
220 ATOMIC_FETCH_OP(op, c_op)
221
222 ATOMIC_OPS(and, &)
223 ATOMIC_OPS(or , |)
224 ATOMIC_OPS(xor, ^)
225
226 #undef ATOMIC_OPS
227 #undef ATOMIC_FETCH_OP
228 #undef ATOMIC_OP
229
230 /**
231 * __atomic_add_unless - add unless the number is already a given value
232 * @v: pointer of type atomic_t
233 * @a: the amount to add to v...
234 * @u: ...unless v is equal to u.
235 *
236 * Atomically adds @a to @v, so long as @v was not already @u.
237 * Returns the old value of @v.
238 */
239 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
240 {
241 int c = atomic_read(v);
242 do {
243 if (unlikely(c == u))
244 break;
245 } while (!atomic_try_cmpxchg(v, &c, c + a));
246 return c;
247 }
248
249 #ifdef CONFIG_X86_32
250 # include <asm/atomic64_32.h>
251 #else
252 # include <asm/atomic64_64.h>
253 #endif
254
255 #endif /* _ASM_X86_ATOMIC_H */