]>
git.proxmox.com Git - mirror_spl.git/blob - include/sys/atomic.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
28 #include <linux/module.h>
29 #include <linux/spinlock.h>
30 #include <sys/types.h>
33 * Two approaches to atomic operations are implemented each with its
34 * own benefits are drawbacks imposed by the Solaris API. Neither
35 * approach handles the issue of word breaking when using a 64-bit
36 * atomic variable on a 32-bit arch. The Solaris API would need to
37 * add an atomic read call to correctly support this.
39 * When ATOMIC_SPINLOCK is defined all atomic operations will be
40 * serialized through global spin locks. This is bad for performance
41 * but it does allow a simple generic implementation.
43 * When ATOMIC_SPINLOCK is not defined the Linux atomic operations
44 * are used. This is safe as long as the core Linux implementation
45 * doesn't change because we are relying on the fact that an atomic
46 * type is really just a uint32 or uint64. If this changes at some
47 * point in the future we need to fall-back to the spin approach.
49 #ifdef ATOMIC_SPINLOCK
50 extern spinlock_t atomic32_lock
;
51 extern spinlock_t atomic64_lock
;
53 static __inline__
void
54 atomic_inc_32(volatile uint32_t *target
)
56 spin_lock(&atomic32_lock
);
58 spin_unlock(&atomic32_lock
);
61 static __inline__
void
62 atomic_dec_32(volatile uint32_t *target
)
64 spin_lock(&atomic32_lock
);
66 spin_unlock(&atomic32_lock
);
69 static __inline__
void
70 atomic_add_32(volatile uint32_t *target
, int32_t delta
)
72 spin_lock(&atomic32_lock
);
74 spin_unlock(&atomic32_lock
);
77 static __inline__
void
78 atomic_sub_32(volatile uint32_t *target
, int32_t delta
)
80 spin_lock(&atomic32_lock
);
82 spin_unlock(&atomic32_lock
);
85 static __inline__
uint32_t
86 atomic_inc_32_nv(volatile uint32_t *target
)
90 spin_lock(&atomic32_lock
);
92 spin_unlock(&atomic32_lock
);
97 static __inline__
uint32_t
98 atomic_dec_32_nv(volatile uint32_t *target
)
102 spin_lock(&atomic32_lock
);
104 spin_unlock(&atomic32_lock
);
109 static __inline__
uint32_t
110 atomic_add_32_nv(volatile uint32_t *target
, uint32_t delta
)
114 spin_lock(&atomic32_lock
);
117 spin_unlock(&atomic32_lock
);
122 static __inline__
uint32_t
123 atomic_sub_32_nv(volatile uint32_t *target
, uint32_t delta
)
127 spin_lock(&atomic32_lock
);
130 spin_unlock(&atomic32_lock
);
135 static __inline__
uint32_t
136 atomic_cas_32(volatile uint32_t *target
, uint32_t cmp
,
141 spin_lock(&atomic32_lock
);
146 spin_unlock(&atomic32_lock
);
151 static __inline__
uint32_t
152 atomic_swap_32(volatile uint32_t *target
, uint32_t newval
)
156 spin_lock(&atomic32_lock
);
159 spin_unlock(&atomic32_lock
);
164 static __inline__
void
165 atomic_inc_64(volatile uint64_t *target
)
167 spin_lock(&atomic64_lock
);
169 spin_unlock(&atomic64_lock
);
172 static __inline__
void
173 atomic_dec_64(volatile uint64_t *target
)
175 spin_lock(&atomic64_lock
);
177 spin_unlock(&atomic64_lock
);
180 static __inline__
void
181 atomic_add_64(volatile uint64_t *target
, uint64_t delta
)
183 spin_lock(&atomic64_lock
);
185 spin_unlock(&atomic64_lock
);
188 static __inline__
void
189 atomic_sub_64(volatile uint64_t *target
, uint64_t delta
)
191 spin_lock(&atomic64_lock
);
193 spin_unlock(&atomic64_lock
);
196 static __inline__
uint64_t
197 atomic_inc_64_nv(volatile uint64_t *target
)
201 spin_lock(&atomic64_lock
);
203 spin_unlock(&atomic64_lock
);
208 static __inline__
uint64_t
209 atomic_dec_64_nv(volatile uint64_t *target
)
213 spin_lock(&atomic64_lock
);
215 spin_unlock(&atomic64_lock
);
220 static __inline__
uint64_t
221 atomic_add_64_nv(volatile uint64_t *target
, uint64_t delta
)
225 spin_lock(&atomic64_lock
);
228 spin_unlock(&atomic64_lock
);
233 static __inline__
uint64_t
234 atomic_sub_64_nv(volatile uint64_t *target
, uint64_t delta
)
238 spin_lock(&atomic64_lock
);
241 spin_unlock(&atomic64_lock
);
246 static __inline__
uint64_t
247 atomic_cas_64(volatile uint64_t *target
, uint64_t cmp
,
252 spin_lock(&atomic64_lock
);
256 spin_unlock(&atomic64_lock
);
261 static __inline__
uint64_t
262 atomic_swap_64(volatile uint64_t *target
, uint64_t newval
)
266 spin_lock(&atomic64_lock
);
269 spin_unlock(&atomic64_lock
);
274 #else /* ATOMIC_SPINLOCK */
276 #define atomic_inc_32(v) atomic_inc((atomic_t *)(v))
277 #define atomic_dec_32(v) atomic_dec((atomic_t *)(v))
278 #define atomic_add_32(v, i) atomic_add((i), (atomic_t *)(v))
279 #define atomic_sub_32(v, i) atomic_sub((i), (atomic_t *)(v))
280 #define atomic_inc_32_nv(v) atomic_inc_return((atomic_t *)(v))
281 #define atomic_dec_32_nv(v) atomic_dec_return((atomic_t *)(v))
282 #define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
283 #define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
284 #define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
285 #define atomic_swap_32(v, x) atomic_xchg((atomic_t *)(v), x)
286 #define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
287 #define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
288 #define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
289 #define atomic_sub_64(v, i) atomic64_sub((i), (atomic64_t *)(v))
290 #define atomic_inc_64_nv(v) atomic64_inc_return((atomic64_t *)(v))
291 #define atomic_dec_64_nv(v) atomic64_dec_return((atomic64_t *)(v))
292 #define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
293 #define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
294 #define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
295 #define atomic_swap_64(v, x) atomic64_xchg((atomic64_t *)(v), x)
297 #endif /* ATOMIC_SPINLOCK */
300 static __inline__
void *
301 atomic_cas_ptr(volatile void *target
, void *cmp
, void *newval
)
303 return (void *)atomic_cas_64((volatile uint64_t *)target
,
304 (uint64_t)cmp
, (uint64_t)newval
);
307 static __inline__
void *
308 atomic_cas_ptr(volatile void *target
, void *cmp
, void *newval
)
310 return (void *)atomic_cas_32((volatile uint32_t *)target
,
311 (uint32_t)cmp
, (uint32_t)newval
);
315 #endif /* _SPL_ATOMIC_H */