]> git.proxmox.com Git - mirror_spl-debian.git/blob - include/sys/atomic.h
1d11738941b41d8c8b3b6d0516aa93e710a58957
[mirror_spl-debian.git] / include / sys / atomic.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_ATOMIC_H
26 #define _SPL_ATOMIC_H
27
28 #include <linux/module.h>
29 #include <linux/spinlock.h>
30 #include <sys/types.h>
31
32 #ifndef HAVE_ATOMIC64_CMPXCHG
33 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
34 #endif
35
36 #ifndef HAVE_ATOMIC64_XCHG
37 #define atomic64_xchg(v, n) (xchg(&((v)->counter), n))
38 #endif
39
40 /*
41 * Two approaches to atomic operations are implemented each with its
42 * own benefits are drawbacks imposed by the Solaris API. Neither
43 * approach handles the issue of word breaking when using a 64-bit
44 * atomic variable on a 32-bit arch. The Solaris API would need to
45 * add an atomic read call to correctly support this.
46 *
47 * When ATOMIC_SPINLOCK is defined all atomic operations will be
48 * serialized through global spin locks. This is bad for performance
49 * but it does allow a simple generic implementation.
50 *
51 * When ATOMIC_SPINLOCK is not defined the Linux atomic operations
52 * are used. This is safe as long as the core Linux implementation
53 * doesn't change because we are relying on the fact that an atomic
54 * type is really just a uint32 or uint64. If this changes at some
55 * point in the future we need to fall-back to the spin approach.
56 */
57 #ifdef ATOMIC_SPINLOCK
58 extern spinlock_t atomic32_lock;
59 extern spinlock_t atomic64_lock;
60
61 static __inline__ void
62 atomic_inc_32(volatile uint32_t *target)
63 {
64 spin_lock(&atomic32_lock);
65 (*target)++;
66 spin_unlock(&atomic32_lock);
67 }
68
69 static __inline__ void
70 atomic_dec_32(volatile uint32_t *target)
71 {
72 spin_lock(&atomic32_lock);
73 (*target)--;
74 spin_unlock(&atomic32_lock);
75 }
76
77 static __inline__ void
78 atomic_add_32(volatile uint32_t *target, int32_t delta)
79 {
80 spin_lock(&atomic32_lock);
81 *target += delta;
82 spin_unlock(&atomic32_lock);
83 }
84
85 static __inline__ void
86 atomic_sub_32(volatile uint32_t *target, int32_t delta)
87 {
88 spin_lock(&atomic32_lock);
89 *target -= delta;
90 spin_unlock(&atomic32_lock);
91 }
92
93 static __inline__ uint32_t
94 atomic_inc_32_nv(volatile uint32_t *target)
95 {
96 uint32_t nv;
97
98 spin_lock(&atomic32_lock);
99 nv = ++(*target);
100 spin_unlock(&atomic32_lock);
101
102 return nv;
103 }
104
105 static __inline__ uint32_t
106 atomic_dec_32_nv(volatile uint32_t *target)
107 {
108 uint32_t nv;
109
110 spin_lock(&atomic32_lock);
111 nv = --(*target);
112 spin_unlock(&atomic32_lock);
113
114 return nv;
115 }
116
117 static __inline__ uint32_t
118 atomic_add_32_nv(volatile uint32_t *target, uint32_t delta)
119 {
120 uint32_t nv;
121
122 spin_lock(&atomic32_lock);
123 *target += delta;
124 nv = *target;
125 spin_unlock(&atomic32_lock);
126
127 return nv;
128 }
129
130 static __inline__ uint32_t
131 atomic_sub_32_nv(volatile uint32_t *target, uint32_t delta)
132 {
133 uint32_t nv;
134
135 spin_lock(&atomic32_lock);
136 *target -= delta;
137 nv = *target;
138 spin_unlock(&atomic32_lock);
139
140 return nv;
141 }
142
143 static __inline__ uint32_t
144 atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
145 uint32_t newval)
146 {
147 uint32_t rc;
148
149 spin_lock(&atomic32_lock);
150 rc = *target;
151 if (*target == cmp)
152 *target = newval;
153
154 spin_unlock(&atomic32_lock);
155
156 return rc;
157 }
158
159 static __inline__ void
160 atomic_inc_64(volatile uint64_t *target)
161 {
162 spin_lock(&atomic64_lock);
163 (*target)++;
164 spin_unlock(&atomic64_lock);
165 }
166
167 static __inline__ void
168 atomic_dec_64(volatile uint64_t *target)
169 {
170 spin_lock(&atomic64_lock);
171 (*target)--;
172 spin_unlock(&atomic64_lock);
173 }
174
175 static __inline__ void
176 atomic_add_64(volatile uint64_t *target, uint64_t delta)
177 {
178 spin_lock(&atomic64_lock);
179 *target += delta;
180 spin_unlock(&atomic64_lock);
181 }
182
183 static __inline__ void
184 atomic_sub_64(volatile uint64_t *target, uint64_t delta)
185 {
186 spin_lock(&atomic64_lock);
187 *target -= delta;
188 spin_unlock(&atomic64_lock);
189 }
190
191 static __inline__ uint64_t
192 atomic_inc_64_nv(volatile uint64_t *target)
193 {
194 uint64_t nv;
195
196 spin_lock(&atomic64_lock);
197 nv = ++(*target);
198 spin_unlock(&atomic64_lock);
199
200 return nv;
201 }
202
203 static __inline__ uint64_t
204 atomic_dec_64_nv(volatile uint64_t *target)
205 {
206 uint64_t nv;
207
208 spin_lock(&atomic64_lock);
209 nv = --(*target);
210 spin_unlock(&atomic64_lock);
211
212 return nv;
213 }
214
215 static __inline__ uint64_t
216 atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
217 {
218 uint64_t nv;
219
220 spin_lock(&atomic64_lock);
221 *target += delta;
222 nv = *target;
223 spin_unlock(&atomic64_lock);
224
225 return nv;
226 }
227
228 static __inline__ uint64_t
229 atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
230 {
231 uint64_t nv;
232
233 spin_lock(&atomic64_lock);
234 *target -= delta;
235 nv = *target;
236 spin_unlock(&atomic64_lock);
237
238 return nv;
239 }
240
241 static __inline__ uint64_t
242 atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
243 uint64_t newval)
244 {
245 uint64_t rc;
246
247 spin_lock(&atomic64_lock);
248 rc = *target;
249 if (*target == cmp)
250 *target = newval;
251 spin_unlock(&atomic64_lock);
252
253 return rc;
254 }
255
256
257 #else /* ATOMIC_SPINLOCK */
258
259 #define atomic_inc_32(v) atomic_inc((atomic_t *)(v))
260 #define atomic_dec_32(v) atomic_dec((atomic_t *)(v))
261 #define atomic_add_32(v, i) atomic_add((i), (atomic_t *)(v))
262 #define atomic_sub_32(v, i) atomic_sub((i), (atomic_t *)(v))
263 #define atomic_inc_32_nv(v) atomic_inc_return((atomic_t *)(v))
264 #define atomic_dec_32_nv(v) atomic_dec_return((atomic_t *)(v))
265 #define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
266 #define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
267 #define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
268 #define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
269 #define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
270 #define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
271 #define atomic_sub_64(v, i) atomic64_sub((i), (atomic64_t *)(v))
272 #define atomic_inc_64_nv(v) atomic64_inc_return((atomic64_t *)(v))
273 #define atomic_dec_64_nv(v) atomic64_dec_return((atomic64_t *)(v))
274 #define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
275 #define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
276 #define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
277
278 #endif /* ATOMIC_SPINLOCK */
279
280 #ifdef _LP64
281 static __inline__ void *
282 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
283 {
284 return (void *)atomic_cas_64((volatile uint64_t *)target,
285 (uint64_t)cmp, (uint64_t)newval);
286 }
287 #else /* _LP64 */
288 static __inline__ void *
289 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
290 {
291 return (void *)atomic_cas_32((volatile uint32_t *)target,
292 (uint32_t)cmp, (uint32_t)newval);
293 }
294 #endif /* _LP64 */
295
296 #endif /* _SPL_ATOMIC_H */