]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/atomic.h
Fix cstyle warnings
[mirror_spl.git] / include / sys / atomic.h
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #ifndef _SPL_ATOMIC_H
26 #define _SPL_ATOMIC_H
27
28 #include <linux/module.h>
29 #include <linux/spinlock.h>
30 #include <sys/types.h>
31
32 /*
33 * Two approaches to atomic operations are implemented each with its
34 * own benefits are drawbacks imposed by the Solaris API. Neither
35 * approach handles the issue of word breaking when using a 64-bit
36 * atomic variable on a 32-bit arch. The Solaris API would need to
37 * add an atomic read call to correctly support this.
38 *
39 * When ATOMIC_SPINLOCK is defined all atomic operations will be
40 * serialized through global spin locks. This is bad for performance
41 * but it does allow a simple generic implementation.
42 *
43 * When ATOMIC_SPINLOCK is not defined the Linux atomic operations
44 * are used. This is safe as long as the core Linux implementation
45 * doesn't change because we are relying on the fact that an atomic
46 * type is really just a uint32 or uint64. If this changes at some
47 * point in the future we need to fall-back to the spin approach.
48 */
49 #ifdef ATOMIC_SPINLOCK
50 extern spinlock_t atomic32_lock;
51 extern spinlock_t atomic64_lock;
52
53 static __inline__ void
54 atomic_inc_32(volatile uint32_t *target)
55 {
56 spin_lock(&atomic32_lock);
57 (*target)++;
58 spin_unlock(&atomic32_lock);
59 }
60
61 static __inline__ void
62 atomic_dec_32(volatile uint32_t *target)
63 {
64 spin_lock(&atomic32_lock);
65 (*target)--;
66 spin_unlock(&atomic32_lock);
67 }
68
69 static __inline__ void
70 atomic_add_32(volatile uint32_t *target, int32_t delta)
71 {
72 spin_lock(&atomic32_lock);
73 *target += delta;
74 spin_unlock(&atomic32_lock);
75 }
76
77 static __inline__ void
78 atomic_sub_32(volatile uint32_t *target, int32_t delta)
79 {
80 spin_lock(&atomic32_lock);
81 *target -= delta;
82 spin_unlock(&atomic32_lock);
83 }
84
85 static __inline__ uint32_t
86 atomic_inc_32_nv(volatile uint32_t *target)
87 {
88 uint32_t nv;
89
90 spin_lock(&atomic32_lock);
91 nv = ++(*target);
92 spin_unlock(&atomic32_lock);
93
94 return (nv);
95 }
96
97 static __inline__ uint32_t
98 atomic_dec_32_nv(volatile uint32_t *target)
99 {
100 uint32_t nv;
101
102 spin_lock(&atomic32_lock);
103 nv = --(*target);
104 spin_unlock(&atomic32_lock);
105
106 return (nv);
107 }
108
109 static __inline__ uint32_t
110 atomic_add_32_nv(volatile uint32_t *target, uint32_t delta)
111 {
112 uint32_t nv;
113
114 spin_lock(&atomic32_lock);
115 *target += delta;
116 nv = *target;
117 spin_unlock(&atomic32_lock);
118
119 return (nv);
120 }
121
122 static __inline__ uint32_t
123 atomic_sub_32_nv(volatile uint32_t *target, uint32_t delta)
124 {
125 uint32_t nv;
126
127 spin_lock(&atomic32_lock);
128 *target -= delta;
129 nv = *target;
130 spin_unlock(&atomic32_lock);
131
132 return (nv);
133 }
134
135 static __inline__ uint32_t
136 atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
137 {
138 uint32_t rc;
139
140 spin_lock(&atomic32_lock);
141 rc = *target;
142 if (*target == cmp)
143 *target = newval;
144
145 spin_unlock(&atomic32_lock);
146
147 return (rc);
148 }
149
150 static __inline__ uint32_t
151 atomic_swap_32(volatile uint32_t *target, uint32_t newval)
152 {
153 uint32_t rc;
154
155 spin_lock(&atomic32_lock);
156 rc = *target;
157 *target = newval;
158 spin_unlock(&atomic32_lock);
159
160 return (rc);
161 }
162
163 static __inline__ void
164 atomic_inc_64(volatile uint64_t *target)
165 {
166 spin_lock(&atomic64_lock);
167 (*target)++;
168 spin_unlock(&atomic64_lock);
169 }
170
171 static __inline__ void
172 atomic_dec_64(volatile uint64_t *target)
173 {
174 spin_lock(&atomic64_lock);
175 (*target)--;
176 spin_unlock(&atomic64_lock);
177 }
178
179 static __inline__ void
180 atomic_add_64(volatile uint64_t *target, uint64_t delta)
181 {
182 spin_lock(&atomic64_lock);
183 *target += delta;
184 spin_unlock(&atomic64_lock);
185 }
186
187 static __inline__ void
188 atomic_sub_64(volatile uint64_t *target, uint64_t delta)
189 {
190 spin_lock(&atomic64_lock);
191 *target -= delta;
192 spin_unlock(&atomic64_lock);
193 }
194
195 static __inline__ uint64_t
196 atomic_inc_64_nv(volatile uint64_t *target)
197 {
198 uint64_t nv;
199
200 spin_lock(&atomic64_lock);
201 nv = ++(*target);
202 spin_unlock(&atomic64_lock);
203
204 return (nv);
205 }
206
207 static __inline__ uint64_t
208 atomic_dec_64_nv(volatile uint64_t *target)
209 {
210 uint64_t nv;
211
212 spin_lock(&atomic64_lock);
213 nv = --(*target);
214 spin_unlock(&atomic64_lock);
215
216 return (nv);
217 }
218
219 static __inline__ uint64_t
220 atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
221 {
222 uint64_t nv;
223
224 spin_lock(&atomic64_lock);
225 *target += delta;
226 nv = *target;
227 spin_unlock(&atomic64_lock);
228
229 return (nv);
230 }
231
232 static __inline__ uint64_t
233 atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
234 {
235 uint64_t nv;
236
237 spin_lock(&atomic64_lock);
238 *target -= delta;
239 nv = *target;
240 spin_unlock(&atomic64_lock);
241
242 return (nv);
243 }
244
245 static __inline__ uint64_t
246 atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
247 {
248 uint64_t rc;
249
250 spin_lock(&atomic64_lock);
251 rc = *target;
252 if (*target == cmp)
253 *target = newval;
254 spin_unlock(&atomic64_lock);
255
256 return (rc);
257 }
258
259 static __inline__ uint64_t
260 atomic_swap_64(volatile uint64_t *target, uint64_t newval)
261 {
262 uint64_t rc;
263
264 spin_lock(&atomic64_lock);
265 rc = *target;
266 *target = newval;
267 spin_unlock(&atomic64_lock);
268
269 return (rc);
270 }
271
272 #else /* ATOMIC_SPINLOCK */
273
274 #define atomic_inc_32(v) atomic_inc((atomic_t *)(v))
275 #define atomic_dec_32(v) atomic_dec((atomic_t *)(v))
276 #define atomic_add_32(v, i) atomic_add((i), (atomic_t *)(v))
277 #define atomic_sub_32(v, i) atomic_sub((i), (atomic_t *)(v))
278 #define atomic_inc_32_nv(v) atomic_inc_return((atomic_t *)(v))
279 #define atomic_dec_32_nv(v) atomic_dec_return((atomic_t *)(v))
280 #define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
281 #define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
282 #define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
283 #define atomic_swap_32(v, x) atomic_xchg((atomic_t *)(v), x)
284 #define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
285 #define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
286 #define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
287 #define atomic_sub_64(v, i) atomic64_sub((i), (atomic64_t *)(v))
288 #define atomic_inc_64_nv(v) atomic64_inc_return((atomic64_t *)(v))
289 #define atomic_dec_64_nv(v) atomic64_dec_return((atomic64_t *)(v))
290 #define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
291 #define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
292 #define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
293 #define atomic_swap_64(v, x) atomic64_xchg((atomic64_t *)(v), x)
294
295 #endif /* ATOMIC_SPINLOCK */
296
297 #ifdef _LP64
298 static __inline__ void *
299 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
300 {
301 return ((void *)atomic_cas_64((volatile uint64_t *)target,
302 (uint64_t)cmp, (uint64_t)newval));
303 }
304 #else /* _LP64 */
305 static __inline__ void *
306 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
307 {
308 return ((void *)atomic_cas_32((volatile uint32_t *)target,
309 (uint32_t)cmp, (uint32_t)newval));
310 }
311 #endif /* _LP64 */
312
313 #endif /* _SPL_ATOMIC_H */