]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_ATOMIC_H |
2 | #define _ASM_IA64_ATOMIC_H | |
3 | ||
4 | /* | |
5 | * Atomic operations that C can't guarantee us. Useful for | |
6 | * resource counting etc.. | |
7 | * | |
8 | * NOTE: don't mess with the types below! The "unsigned long" and | |
9 | * "int" types were carefully placed so as to ensure proper operation | |
10 | * of the macros. | |
11 | * | |
12 | * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co | |
13 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
14 | */ | |
15 | #include <linux/types.h> | |
16 | ||
17 | #include <asm/intrinsics.h> | |
18 | ||
1da177e4 | 19 | |
a1193655 TL |
20 | #define ATOMIC_INIT(i) { (i) } |
21 | #define ATOMIC64_INIT(i) { (i) } | |
1da177e4 | 22 | |
f3d46f9d AB |
23 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
24 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) | |
1da177e4 LT |
25 | |
26 | #define atomic_set(v,i) (((v)->counter) = (i)) | |
27 | #define atomic64_set(v,i) (((v)->counter) = (i)) | |
28 | ||
29 | static __inline__ int | |
30 | ia64_atomic_add (int i, atomic_t *v) | |
31 | { | |
32 | __s32 old, new; | |
33 | CMPXCHG_BUGCHECK_DECL | |
34 | ||
35 | do { | |
36 | CMPXCHG_BUGCHECK(v); | |
37 | old = atomic_read(v); | |
38 | new = old + i; | |
39 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); | |
40 | return new; | |
41 | } | |
42 | ||
01d69a82 | 43 | static __inline__ long |
1da177e4 LT |
44 | ia64_atomic64_add (__s64 i, atomic64_t *v) |
45 | { | |
46 | __s64 old, new; | |
47 | CMPXCHG_BUGCHECK_DECL | |
48 | ||
49 | do { | |
50 | CMPXCHG_BUGCHECK(v); | |
6cba9862 | 51 | old = atomic64_read(v); |
1da177e4 LT |
52 | new = old + i; |
53 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); | |
54 | return new; | |
55 | } | |
56 | ||
57 | static __inline__ int | |
58 | ia64_atomic_sub (int i, atomic_t *v) | |
59 | { | |
60 | __s32 old, new; | |
61 | CMPXCHG_BUGCHECK_DECL | |
62 | ||
63 | do { | |
64 | CMPXCHG_BUGCHECK(v); | |
65 | old = atomic_read(v); | |
66 | new = old - i; | |
67 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); | |
68 | return new; | |
69 | } | |
70 | ||
01d69a82 | 71 | static __inline__ long |
1da177e4 LT |
72 | ia64_atomic64_sub (__s64 i, atomic64_t *v) |
73 | { | |
74 | __s64 old, new; | |
75 | CMPXCHG_BUGCHECK_DECL | |
76 | ||
77 | do { | |
78 | CMPXCHG_BUGCHECK(v); | |
6cba9862 | 79 | old = atomic64_read(v); |
1da177e4 LT |
80 | new = old - i; |
81 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); | |
82 | return new; | |
83 | } | |
84 | ||
81979131 | 85 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
ffbf670f | 86 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
4a6dae6d | 87 | |
81979131 MD |
88 | #define atomic64_cmpxchg(v, old, new) \ |
89 | (cmpxchg(&((v)->counter), old, new)) | |
90 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
91 | ||
f24219b4 | 92 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
2856f5e3 MD |
93 | { |
94 | int c, old; | |
95 | c = atomic_read(v); | |
96 | for (;;) { | |
97 | if (unlikely(c == (u))) | |
98 | break; | |
99 | old = atomic_cmpxchg((v), c, c + (a)); | |
100 | if (likely(old == c)) | |
101 | break; | |
102 | c = old; | |
103 | } | |
f24219b4 | 104 | return c; |
2856f5e3 MD |
105 | } |
106 | ||
8426e1f6 | 107 | |
01d69a82 | 108 | static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) |
2856f5e3 MD |
109 | { |
110 | long c, old; | |
111 | c = atomic64_read(v); | |
112 | for (;;) { | |
113 | if (unlikely(c == (u))) | |
114 | break; | |
115 | old = atomic64_cmpxchg((v), c, c + (a)); | |
116 | if (likely(old == c)) | |
117 | break; | |
118 | c = old; | |
119 | } | |
120 | return c != (u); | |
121 | } | |
122 | ||
81979131 MD |
123 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
124 | ||
1da177e4 LT |
125 | #define atomic_add_return(i,v) \ |
126 | ({ \ | |
127 | int __ia64_aar_i = (i); \ | |
128 | (__builtin_constant_p(i) \ | |
129 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | |
130 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | |
131 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | |
132 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | |
133 | ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ | |
134 | : ia64_atomic_add(__ia64_aar_i, v); \ | |
135 | }) | |
136 | ||
137 | #define atomic64_add_return(i,v) \ | |
138 | ({ \ | |
139 | long __ia64_aar_i = (i); \ | |
140 | (__builtin_constant_p(i) \ | |
141 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | |
142 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | |
143 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | |
144 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | |
145 | ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ | |
146 | : ia64_atomic64_add(__ia64_aar_i, v); \ | |
147 | }) | |
148 | ||
149 | /* | |
150 | * Atomically add I to V and return TRUE if the resulting value is | |
151 | * negative. | |
152 | */ | |
153 | static __inline__ int | |
154 | atomic_add_negative (int i, atomic_t *v) | |
155 | { | |
156 | return atomic_add_return(i, v) < 0; | |
157 | } | |
158 | ||
01d69a82 | 159 | static __inline__ long |
1da177e4 LT |
160 | atomic64_add_negative (__s64 i, atomic64_t *v) |
161 | { | |
162 | return atomic64_add_return(i, v) < 0; | |
163 | } | |
164 | ||
165 | #define atomic_sub_return(i,v) \ | |
166 | ({ \ | |
167 | int __ia64_asr_i = (i); \ | |
168 | (__builtin_constant_p(i) \ | |
169 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | |
170 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | |
171 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | |
172 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | |
173 | ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ | |
174 | : ia64_atomic_sub(__ia64_asr_i, v); \ | |
175 | }) | |
176 | ||
177 | #define atomic64_sub_return(i,v) \ | |
178 | ({ \ | |
179 | long __ia64_asr_i = (i); \ | |
180 | (__builtin_constant_p(i) \ | |
181 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | |
182 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | |
183 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | |
184 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | |
185 | ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ | |
186 | : ia64_atomic64_sub(__ia64_asr_i, v); \ | |
187 | }) | |
188 | ||
189 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
190 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
191 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | |
192 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | |
193 | ||
194 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
195 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | |
196 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | |
197 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | |
198 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) | |
199 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) | |
200 | ||
201 | #define atomic_add(i,v) atomic_add_return((i), (v)) | |
202 | #define atomic_sub(i,v) atomic_sub_return((i), (v)) | |
203 | #define atomic_inc(v) atomic_add(1, (v)) | |
204 | #define atomic_dec(v) atomic_sub(1, (v)) | |
205 | ||
206 | #define atomic64_add(i,v) atomic64_add_return((i), (v)) | |
207 | #define atomic64_sub(i,v) atomic64_sub_return((i), (v)) | |
208 | #define atomic64_inc(v) atomic64_add(1, (v)) | |
209 | #define atomic64_dec(v) atomic64_sub(1, (v)) | |
210 | ||
211 | /* Atomic operations are already serializing */ | |
212 | #define smp_mb__before_atomic_dec() barrier() | |
213 | #define smp_mb__after_atomic_dec() barrier() | |
214 | #define smp_mb__before_atomic_inc() barrier() | |
215 | #define smp_mb__after_atomic_inc() barrier() | |
216 | ||
217 | #endif /* _ASM_IA64_ATOMIC_H */ |