]>
Commit | Line | Data |
---|---|---|
acac43e2 | 1 | /* Atomic operations usable in machine independent code */ |
3f9d35b9 ED |
2 | #ifndef _LINUX_ATOMIC_H |
3 | #define _LINUX_ATOMIC_H | |
4 | #include <asm/atomic.h> | |
654672d4 WD |
5 | #include <asm/barrier.h> |
6 | ||
7 | /* | |
8 | * Relaxed variants of xchg, cmpxchg and some atomic operations. | |
9 | * | |
10 | * We support four variants: | |
11 | * | |
12 | * - Fully ordered: The default implementation, no suffix required. | |
13 | * - Acquire: Provides ACQUIRE semantics, _acquire suffix. | |
14 | * - Release: Provides RELEASE semantics, _release suffix. | |
15 | * - Relaxed: No ordering guarantees, _relaxed suffix. | |
16 | * | |
17 | * For compound atomics performing both a load and a store, ACQUIRE | |
18 | * semantics apply only to the load and RELEASE semantics only to the | |
19 | * store portion of the operation. Note that a failed cmpxchg_acquire | |
20 | * does -not- imply any memory ordering constraints. | |
21 | * | |
22 | * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. | |
23 | */ | |
24 | ||
25 | #ifndef atomic_read_acquire | |
26 | #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) | |
27 | #endif | |
28 | ||
29 | #ifndef atomic_set_release | |
30 | #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) | |
31 | #endif | |
32 | ||
33 | /* | |
34 | * The idea here is to build acquire/release variants by adding explicit | |
35 | * barriers on top of the relaxed variant. In the case where the relaxed | |
36 | * variant is already fully ordered, no additional barriers are needed. | |
37 | */ | |
38 | #define __atomic_op_acquire(op, args...) \ | |
39 | ({ \ | |
40 | typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ | |
41 | smp_mb__after_atomic(); \ | |
42 | __ret; \ | |
43 | }) | |
44 | ||
45 | #define __atomic_op_release(op, args...) \ | |
46 | ({ \ | |
47 | smp_mb__before_atomic(); \ | |
48 | op##_relaxed(args); \ | |
49 | }) | |
50 | ||
51 | #define __atomic_op_fence(op, args...) \ | |
52 | ({ \ | |
53 | typeof(op##_relaxed(args)) __ret; \ | |
54 | smp_mb__before_atomic(); \ | |
55 | __ret = op##_relaxed(args); \ | |
56 | smp_mb__after_atomic(); \ | |
57 | __ret; \ | |
58 | }) | |
59 | ||
60 | /* atomic_add_return_relaxed */ | |
61 | #ifndef atomic_add_return_relaxed | |
62 | #define atomic_add_return_relaxed atomic_add_return | |
63 | #define atomic_add_return_acquire atomic_add_return | |
64 | #define atomic_add_return_release atomic_add_return | |
65 | ||
66 | #else /* atomic_add_return_relaxed */ | |
67 | ||
68 | #ifndef atomic_add_return_acquire | |
69 | #define atomic_add_return_acquire(...) \ | |
70 | __atomic_op_acquire(atomic_add_return, __VA_ARGS__) | |
71 | #endif | |
72 | ||
73 | #ifndef atomic_add_return_release | |
74 | #define atomic_add_return_release(...) \ | |
75 | __atomic_op_release(atomic_add_return, __VA_ARGS__) | |
76 | #endif | |
77 | ||
78 | #ifndef atomic_add_return | |
79 | #define atomic_add_return(...) \ | |
80 | __atomic_op_fence(atomic_add_return, __VA_ARGS__) | |
81 | #endif | |
82 | #endif /* atomic_add_return_relaxed */ | |
83 | ||
84 | /* atomic_sub_return_relaxed */ | |
85 | #ifndef atomic_sub_return_relaxed | |
86 | #define atomic_sub_return_relaxed atomic_sub_return | |
87 | #define atomic_sub_return_acquire atomic_sub_return | |
88 | #define atomic_sub_return_release atomic_sub_return | |
89 | ||
90 | #else /* atomic_sub_return_relaxed */ | |
91 | ||
92 | #ifndef atomic_sub_return_acquire | |
93 | #define atomic_sub_return_acquire(...) \ | |
94 | __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) | |
95 | #endif | |
96 | ||
97 | #ifndef atomic_sub_return_release | |
98 | #define atomic_sub_return_release(...) \ | |
99 | __atomic_op_release(atomic_sub_return, __VA_ARGS__) | |
100 | #endif | |
101 | ||
102 | #ifndef atomic_sub_return | |
103 | #define atomic_sub_return(...) \ | |
104 | __atomic_op_fence(atomic_sub_return, __VA_ARGS__) | |
105 | #endif | |
106 | #endif /* atomic_sub_return_relaxed */ | |
107 | ||
108 | /* atomic_xchg_relaxed */ | |
109 | #ifndef atomic_xchg_relaxed | |
110 | #define atomic_xchg_relaxed atomic_xchg | |
111 | #define atomic_xchg_acquire atomic_xchg | |
112 | #define atomic_xchg_release atomic_xchg | |
113 | ||
114 | #else /* atomic_xchg_relaxed */ | |
115 | ||
116 | #ifndef atomic_xchg_acquire | |
117 | #define atomic_xchg_acquire(...) \ | |
118 | __atomic_op_acquire(atomic_xchg, __VA_ARGS__) | |
119 | #endif | |
120 | ||
121 | #ifndef atomic_xchg_release | |
122 | #define atomic_xchg_release(...) \ | |
123 | __atomic_op_release(atomic_xchg, __VA_ARGS__) | |
124 | #endif | |
125 | ||
126 | #ifndef atomic_xchg | |
127 | #define atomic_xchg(...) \ | |
128 | __atomic_op_fence(atomic_xchg, __VA_ARGS__) | |
129 | #endif | |
130 | #endif /* atomic_xchg_relaxed */ | |
131 | ||
132 | /* atomic_cmpxchg_relaxed */ | |
133 | #ifndef atomic_cmpxchg_relaxed | |
134 | #define atomic_cmpxchg_relaxed atomic_cmpxchg | |
135 | #define atomic_cmpxchg_acquire atomic_cmpxchg | |
136 | #define atomic_cmpxchg_release atomic_cmpxchg | |
137 | ||
138 | #else /* atomic_cmpxchg_relaxed */ | |
139 | ||
140 | #ifndef atomic_cmpxchg_acquire | |
141 | #define atomic_cmpxchg_acquire(...) \ | |
142 | __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) | |
143 | #endif | |
144 | ||
145 | #ifndef atomic_cmpxchg_release | |
146 | #define atomic_cmpxchg_release(...) \ | |
147 | __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) | |
148 | #endif | |
149 | ||
150 | #ifndef atomic_cmpxchg | |
151 | #define atomic_cmpxchg(...) \ | |
152 | __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) | |
153 | #endif | |
154 | #endif /* atomic_cmpxchg_relaxed */ | |
155 | ||
156 | #ifndef atomic64_read_acquire | |
157 | #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) | |
158 | #endif | |
159 | ||
160 | #ifndef atomic64_set_release | |
161 | #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) | |
162 | #endif | |
163 | ||
164 | /* atomic64_add_return_relaxed */ | |
165 | #ifndef atomic64_add_return_relaxed | |
166 | #define atomic64_add_return_relaxed atomic64_add_return | |
167 | #define atomic64_add_return_acquire atomic64_add_return | |
168 | #define atomic64_add_return_release atomic64_add_return | |
169 | ||
170 | #else /* atomic64_add_return_relaxed */ | |
171 | ||
172 | #ifndef atomic64_add_return_acquire | |
173 | #define atomic64_add_return_acquire(...) \ | |
174 | __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) | |
175 | #endif | |
176 | ||
177 | #ifndef atomic64_add_return_release | |
178 | #define atomic64_add_return_release(...) \ | |
179 | __atomic_op_release(atomic64_add_return, __VA_ARGS__) | |
180 | #endif | |
181 | ||
182 | #ifndef atomic64_add_return | |
183 | #define atomic64_add_return(...) \ | |
184 | __atomic_op_fence(atomic64_add_return, __VA_ARGS__) | |
185 | #endif | |
186 | #endif /* atomic64_add_return_relaxed */ | |
187 | ||
188 | /* atomic64_sub_return_relaxed */ | |
189 | #ifndef atomic64_sub_return_relaxed | |
190 | #define atomic64_sub_return_relaxed atomic64_sub_return | |
191 | #define atomic64_sub_return_acquire atomic64_sub_return | |
192 | #define atomic64_sub_return_release atomic64_sub_return | |
193 | ||
194 | #else /* atomic64_sub_return_relaxed */ | |
195 | ||
196 | #ifndef atomic64_sub_return_acquire | |
197 | #define atomic64_sub_return_acquire(...) \ | |
198 | __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) | |
199 | #endif | |
200 | ||
201 | #ifndef atomic64_sub_return_release | |
202 | #define atomic64_sub_return_release(...) \ | |
203 | __atomic_op_release(atomic64_sub_return, __VA_ARGS__) | |
204 | #endif | |
205 | ||
206 | #ifndef atomic64_sub_return | |
207 | #define atomic64_sub_return(...) \ | |
208 | __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) | |
209 | #endif | |
210 | #endif /* atomic64_sub_return_relaxed */ | |
211 | ||
212 | /* atomic64_xchg_relaxed */ | |
213 | #ifndef atomic64_xchg_relaxed | |
214 | #define atomic64_xchg_relaxed atomic64_xchg | |
215 | #define atomic64_xchg_acquire atomic64_xchg | |
216 | #define atomic64_xchg_release atomic64_xchg | |
217 | ||
218 | #else /* atomic64_xchg_relaxed */ | |
219 | ||
220 | #ifndef atomic64_xchg_acquire | |
221 | #define atomic64_xchg_acquire(...) \ | |
222 | __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) | |
223 | #endif | |
224 | ||
225 | #ifndef atomic64_xchg_release | |
226 | #define atomic64_xchg_release(...) \ | |
227 | __atomic_op_release(atomic64_xchg, __VA_ARGS__) | |
228 | #endif | |
229 | ||
230 | #ifndef atomic64_xchg | |
231 | #define atomic64_xchg(...) \ | |
232 | __atomic_op_fence(atomic64_xchg, __VA_ARGS__) | |
233 | #endif | |
234 | #endif /* atomic64_xchg_relaxed */ | |
235 | ||
236 | /* atomic64_cmpxchg_relaxed */ | |
237 | #ifndef atomic64_cmpxchg_relaxed | |
238 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg | |
239 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg | |
240 | #define atomic64_cmpxchg_release atomic64_cmpxchg | |
241 | ||
242 | #else /* atomic64_cmpxchg_relaxed */ | |
243 | ||
244 | #ifndef atomic64_cmpxchg_acquire | |
245 | #define atomic64_cmpxchg_acquire(...) \ | |
246 | __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) | |
247 | #endif | |
248 | ||
249 | #ifndef atomic64_cmpxchg_release | |
250 | #define atomic64_cmpxchg_release(...) \ | |
251 | __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) | |
252 | #endif | |
253 | ||
254 | #ifndef atomic64_cmpxchg | |
255 | #define atomic64_cmpxchg(...) \ | |
256 | __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) | |
257 | #endif | |
258 | #endif /* atomic64_cmpxchg_relaxed */ | |
259 | ||
260 | /* cmpxchg_relaxed */ | |
261 | #ifndef cmpxchg_relaxed | |
262 | #define cmpxchg_relaxed cmpxchg | |
263 | #define cmpxchg_acquire cmpxchg | |
264 | #define cmpxchg_release cmpxchg | |
265 | ||
266 | #else /* cmpxchg_relaxed */ | |
267 | ||
268 | #ifndef cmpxchg_acquire | |
269 | #define cmpxchg_acquire(...) \ | |
270 | __atomic_op_acquire(cmpxchg, __VA_ARGS__) | |
271 | #endif | |
272 | ||
273 | #ifndef cmpxchg_release | |
274 | #define cmpxchg_release(...) \ | |
275 | __atomic_op_release(cmpxchg, __VA_ARGS__) | |
276 | #endif | |
277 | ||
278 | #ifndef cmpxchg | |
279 | #define cmpxchg(...) \ | |
280 | __atomic_op_fence(cmpxchg, __VA_ARGS__) | |
281 | #endif | |
282 | #endif /* cmpxchg_relaxed */ | |
283 | ||
284 | /* cmpxchg64_relaxed */ | |
285 | #ifndef cmpxchg64_relaxed | |
286 | #define cmpxchg64_relaxed cmpxchg64 | |
287 | #define cmpxchg64_acquire cmpxchg64 | |
288 | #define cmpxchg64_release cmpxchg64 | |
289 | ||
290 | #else /* cmpxchg64_relaxed */ | |
291 | ||
292 | #ifndef cmpxchg64_acquire | |
293 | #define cmpxchg64_acquire(...) \ | |
294 | __atomic_op_acquire(cmpxchg64, __VA_ARGS__) | |
295 | #endif | |
296 | ||
297 | #ifndef cmpxchg64_release | |
298 | #define cmpxchg64_release(...) \ | |
299 | __atomic_op_release(cmpxchg64, __VA_ARGS__) | |
300 | #endif | |
301 | ||
302 | #ifndef cmpxchg64 | |
303 | #define cmpxchg64(...) \ | |
304 | __atomic_op_fence(cmpxchg64, __VA_ARGS__) | |
305 | #endif | |
306 | #endif /* cmpxchg64_relaxed */ | |
307 | ||
308 | /* xchg_relaxed */ | |
309 | #ifndef xchg_relaxed | |
310 | #define xchg_relaxed xchg | |
311 | #define xchg_acquire xchg | |
312 | #define xchg_release xchg | |
313 | ||
314 | #else /* xchg_relaxed */ | |
315 | ||
316 | #ifndef xchg_acquire | |
317 | #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) | |
318 | #endif | |
319 | ||
320 | #ifndef xchg_release | |
321 | #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) | |
322 | #endif | |
323 | ||
324 | #ifndef xchg | |
325 | #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) | |
326 | #endif | |
327 | #endif /* xchg_relaxed */ | |
3f9d35b9 | 328 | |
f24219b4 AS |
329 | /** |
330 | * atomic_add_unless - add unless the number is already a given value | |
331 | * @v: pointer of type atomic_t | |
332 | * @a: the amount to add to v... | |
333 | * @u: ...unless v is equal to u. | |
334 | * | |
335 | * Atomically adds @a to @v, so long as @v was not already @u. | |
336 | * Returns non-zero if @v was not @u, and zero otherwise. | |
337 | */ | |
338 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | |
339 | { | |
340 | return __atomic_add_unless(v, a, u) != u; | |
341 | } | |
342 | ||
60063497 AS |
343 | /** |
344 | * atomic_inc_not_zero - increment unless the number is zero | |
345 | * @v: pointer of type atomic_t | |
346 | * | |
347 | * Atomically increments @v by 1, so long as @v is non-zero. | |
348 | * Returns non-zero if @v was non-zero, and zero otherwise. | |
349 | */ | |
b1ada601 | 350 | #ifndef atomic_inc_not_zero |
60063497 | 351 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
b1ada601 | 352 | #endif |
60063497 | 353 | |
de9e432c PZ |
354 | #ifndef atomic_andnot |
355 | static inline void atomic_andnot(int i, atomic_t *v) | |
356 | { | |
357 | atomic_and(~i, v); | |
358 | } | |
359 | #endif | |
360 | ||
361 | static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) | |
362 | { | |
363 | atomic_andnot(mask, v); | |
364 | } | |
365 | ||
366 | static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) | |
367 | { | |
368 | atomic_or(mask, v); | |
369 | } | |
370 | ||
3f9d35b9 ED |
371 | /** |
372 | * atomic_inc_not_zero_hint - increment if not null | |
373 | * @v: pointer of type atomic_t | |
374 | * @hint: probable value of the atomic before the increment | |
375 | * | |
376 | * This version of atomic_inc_not_zero() gives a hint of probable | |
377 | * value of the atomic. This helps processor to not read the memory | |
378 | * before doing the atomic read/modify/write cycle, lowering | |
379 | * number of bus transactions on some arches. | |
380 | * | |
381 | * Returns: 0 if increment was not done, 1 otherwise. | |
382 | */ | |
383 | #ifndef atomic_inc_not_zero_hint | |
384 | static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) | |
385 | { | |
386 | int val, c = hint; | |
387 | ||
388 | /* sanity test, should be removed by compiler if hint is a constant */ | |
389 | if (!hint) | |
390 | return atomic_inc_not_zero(v); | |
391 | ||
392 | do { | |
393 | val = atomic_cmpxchg(v, c, c + 1); | |
394 | if (val == c) | |
395 | return 1; | |
396 | c = val; | |
397 | } while (c); | |
398 | ||
399 | return 0; | |
400 | } | |
401 | #endif | |
402 | ||
07b8ce1e AV |
403 | #ifndef atomic_inc_unless_negative |
404 | static inline int atomic_inc_unless_negative(atomic_t *p) | |
405 | { | |
406 | int v, v1; | |
407 | for (v = 0; v >= 0; v = v1) { | |
408 | v1 = atomic_cmpxchg(p, v, v + 1); | |
409 | if (likely(v1 == v)) | |
410 | return 1; | |
411 | } | |
412 | return 0; | |
413 | } | |
414 | #endif | |
415 | ||
416 | #ifndef atomic_dec_unless_positive | |
417 | static inline int atomic_dec_unless_positive(atomic_t *p) | |
418 | { | |
419 | int v, v1; | |
420 | for (v = 0; v <= 0; v = v1) { | |
421 | v1 = atomic_cmpxchg(p, v, v - 1); | |
422 | if (likely(v1 == v)) | |
423 | return 1; | |
424 | } | |
425 | return 0; | |
426 | } | |
427 | #endif | |
428 | ||
e79bee24 SL |
429 | /* |
430 | * atomic_dec_if_positive - decrement by 1 if old value positive | |
431 | * @v: pointer of type atomic_t | |
432 | * | |
433 | * The function returns the old value of *v minus 1, even if | |
434 | * the atomic variable, v, was not decremented. | |
435 | */ | |
436 | #ifndef atomic_dec_if_positive | |
437 | static inline int atomic_dec_if_positive(atomic_t *v) | |
438 | { | |
439 | int c, old, dec; | |
440 | c = atomic_read(v); | |
441 | for (;;) { | |
442 | dec = c - 1; | |
443 | if (unlikely(dec < 0)) | |
444 | break; | |
445 | old = atomic_cmpxchg((v), c, dec); | |
446 | if (likely(old == c)) | |
447 | break; | |
448 | c = old; | |
449 | } | |
450 | return dec; | |
451 | } | |
452 | #endif | |
453 | ||
7847777a AS |
454 | #include <asm-generic/atomic-long.h> |
455 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
456 | #include <asm-generic/atomic64.h> | |
457 | #endif | |
de9e432c PZ |
458 | |
459 | #ifndef atomic64_andnot | |
460 | static inline void atomic64_andnot(long long i, atomic64_t *v) | |
461 | { | |
462 | atomic64_and(~i, v); | |
463 | } | |
464 | #endif | |
465 | ||
3f9d35b9 | 466 | #endif /* _LINUX_ATOMIC_H */ |