]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/atomic.h
Make file headers conform to ZFS style standard
[mirror_spl.git] / include / sys / atomic.h
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #ifndef _SPL_ATOMIC_H
26 #define _SPL_ATOMIC_H
27
28 #include <linux/module.h>
29 #include <linux/spinlock.h>
30 #include <sys/types.h>
31
32 /*
33 * Two approaches to atomic operations are implemented each with its
34 * own benefits are drawbacks imposed by the Solaris API. Neither
35 * approach handles the issue of word breaking when using a 64-bit
36 * atomic variable on a 32-bit arch. The Solaris API would need to
37 * add an atomic read call to correctly support this.
38 *
39 * When ATOMIC_SPINLOCK is defined all atomic operations will be
40 * serialized through global spin locks. This is bad for performance
41 * but it does allow a simple generic implementation.
42 *
43 * When ATOMIC_SPINLOCK is not defined the Linux atomic operations
44 * are used. This is safe as long as the core Linux implementation
45 * doesn't change because we are relying on the fact that an atomic
46 * type is really just a uint32 or uint64. If this changes at some
47 * point in the future we need to fall-back to the spin approach.
48 */
49 #ifdef ATOMIC_SPINLOCK
50 extern spinlock_t atomic32_lock;
51 extern spinlock_t atomic64_lock;
52
53 static __inline__ void
54 atomic_inc_32(volatile uint32_t *target)
55 {
56 spin_lock(&atomic32_lock);
57 (*target)++;
58 spin_unlock(&atomic32_lock);
59 }
60
61 static __inline__ void
62 atomic_dec_32(volatile uint32_t *target)
63 {
64 spin_lock(&atomic32_lock);
65 (*target)--;
66 spin_unlock(&atomic32_lock);
67 }
68
69 static __inline__ void
70 atomic_add_32(volatile uint32_t *target, int32_t delta)
71 {
72 spin_lock(&atomic32_lock);
73 *target += delta;
74 spin_unlock(&atomic32_lock);
75 }
76
77 static __inline__ void
78 atomic_sub_32(volatile uint32_t *target, int32_t delta)
79 {
80 spin_lock(&atomic32_lock);
81 *target -= delta;
82 spin_unlock(&atomic32_lock);
83 }
84
85 static __inline__ uint32_t
86 atomic_inc_32_nv(volatile uint32_t *target)
87 {
88 uint32_t nv;
89
90 spin_lock(&atomic32_lock);
91 nv = ++(*target);
92 spin_unlock(&atomic32_lock);
93
94 return nv;
95 }
96
97 static __inline__ uint32_t
98 atomic_dec_32_nv(volatile uint32_t *target)
99 {
100 uint32_t nv;
101
102 spin_lock(&atomic32_lock);
103 nv = --(*target);
104 spin_unlock(&atomic32_lock);
105
106 return nv;
107 }
108
109 static __inline__ uint32_t
110 atomic_add_32_nv(volatile uint32_t *target, uint32_t delta)
111 {
112 uint32_t nv;
113
114 spin_lock(&atomic32_lock);
115 *target += delta;
116 nv = *target;
117 spin_unlock(&atomic32_lock);
118
119 return nv;
120 }
121
122 static __inline__ uint32_t
123 atomic_sub_32_nv(volatile uint32_t *target, uint32_t delta)
124 {
125 uint32_t nv;
126
127 spin_lock(&atomic32_lock);
128 *target -= delta;
129 nv = *target;
130 spin_unlock(&atomic32_lock);
131
132 return nv;
133 }
134
135 static __inline__ uint32_t
136 atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
137 uint32_t newval)
138 {
139 uint32_t rc;
140
141 spin_lock(&atomic32_lock);
142 rc = *target;
143 if (*target == cmp)
144 *target = newval;
145
146 spin_unlock(&atomic32_lock);
147
148 return rc;
149 }
150
151 static __inline__ uint32_t
152 atomic_swap_32(volatile uint32_t *target, uint32_t newval)
153 {
154 uint32_t rc;
155
156 spin_lock(&atomic32_lock);
157 rc = *target;
158 *target = newval;
159 spin_unlock(&atomic32_lock);
160
161 return rc;
162 }
163
164 static __inline__ void
165 atomic_inc_64(volatile uint64_t *target)
166 {
167 spin_lock(&atomic64_lock);
168 (*target)++;
169 spin_unlock(&atomic64_lock);
170 }
171
172 static __inline__ void
173 atomic_dec_64(volatile uint64_t *target)
174 {
175 spin_lock(&atomic64_lock);
176 (*target)--;
177 spin_unlock(&atomic64_lock);
178 }
179
180 static __inline__ void
181 atomic_add_64(volatile uint64_t *target, uint64_t delta)
182 {
183 spin_lock(&atomic64_lock);
184 *target += delta;
185 spin_unlock(&atomic64_lock);
186 }
187
188 static __inline__ void
189 atomic_sub_64(volatile uint64_t *target, uint64_t delta)
190 {
191 spin_lock(&atomic64_lock);
192 *target -= delta;
193 spin_unlock(&atomic64_lock);
194 }
195
196 static __inline__ uint64_t
197 atomic_inc_64_nv(volatile uint64_t *target)
198 {
199 uint64_t nv;
200
201 spin_lock(&atomic64_lock);
202 nv = ++(*target);
203 spin_unlock(&atomic64_lock);
204
205 return nv;
206 }
207
208 static __inline__ uint64_t
209 atomic_dec_64_nv(volatile uint64_t *target)
210 {
211 uint64_t nv;
212
213 spin_lock(&atomic64_lock);
214 nv = --(*target);
215 spin_unlock(&atomic64_lock);
216
217 return nv;
218 }
219
220 static __inline__ uint64_t
221 atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
222 {
223 uint64_t nv;
224
225 spin_lock(&atomic64_lock);
226 *target += delta;
227 nv = *target;
228 spin_unlock(&atomic64_lock);
229
230 return nv;
231 }
232
233 static __inline__ uint64_t
234 atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
235 {
236 uint64_t nv;
237
238 spin_lock(&atomic64_lock);
239 *target -= delta;
240 nv = *target;
241 spin_unlock(&atomic64_lock);
242
243 return nv;
244 }
245
246 static __inline__ uint64_t
247 atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
248 uint64_t newval)
249 {
250 uint64_t rc;
251
252 spin_lock(&atomic64_lock);
253 rc = *target;
254 if (*target == cmp)
255 *target = newval;
256 spin_unlock(&atomic64_lock);
257
258 return rc;
259 }
260
261 static __inline__ uint64_t
262 atomic_swap_64(volatile uint64_t *target, uint64_t newval)
263 {
264 uint64_t rc;
265
266 spin_lock(&atomic64_lock);
267 rc = *target;
268 *target = newval;
269 spin_unlock(&atomic64_lock);
270
271 return rc;
272 }
273
274 #else /* ATOMIC_SPINLOCK */
275
276 #define atomic_inc_32(v) atomic_inc((atomic_t *)(v))
277 #define atomic_dec_32(v) atomic_dec((atomic_t *)(v))
278 #define atomic_add_32(v, i) atomic_add((i), (atomic_t *)(v))
279 #define atomic_sub_32(v, i) atomic_sub((i), (atomic_t *)(v))
280 #define atomic_inc_32_nv(v) atomic_inc_return((atomic_t *)(v))
281 #define atomic_dec_32_nv(v) atomic_dec_return((atomic_t *)(v))
282 #define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
283 #define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
284 #define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
285 #define atomic_swap_32(v, x) atomic_xchg((atomic_t *)(v), x)
286 #define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
287 #define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
288 #define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
289 #define atomic_sub_64(v, i) atomic64_sub((i), (atomic64_t *)(v))
290 #define atomic_inc_64_nv(v) atomic64_inc_return((atomic64_t *)(v))
291 #define atomic_dec_64_nv(v) atomic64_dec_return((atomic64_t *)(v))
292 #define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
293 #define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
294 #define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
295 #define atomic_swap_64(v, x) atomic64_xchg((atomic64_t *)(v), x)
296
297 #endif /* ATOMIC_SPINLOCK */
298
299 #ifdef _LP64
300 static __inline__ void *
301 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
302 {
303 return (void *)atomic_cas_64((volatile uint64_t *)target,
304 (uint64_t)cmp, (uint64_t)newval);
305 }
306 #else /* _LP64 */
307 static __inline__ void *
308 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
309 {
310 return (void *)atomic_cas_32((volatile uint32_t *)target,
311 (uint32_t)cmp, (uint32_t)newval);
312 }
313 #endif /* _LP64 */
314
315 #endif /* _SPL_ATOMIC_H */