]> git.proxmox.com Git - mirror_spl.git/blame - include/sys/atomic.h
Add atomic_swap_32() and atomic_swap_64()
[mirror_spl.git] / include / sys / atomic.h
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23\*****************************************************************************/
715f6251 24
b0dd3380 25#ifndef _SPL_ATOMIC_H
26#define _SPL_ATOMIC_H
27
b0dd3380 28#include <linux/module.h>
9f4c835a 29#include <linux/spinlock.h>
5e9b5d83 30#include <sys/types.h>
d04c8a56
BB
31
32#ifndef HAVE_ATOMIC64_CMPXCHG
33#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
34#endif
35
36#ifndef HAVE_ATOMIC64_XCHG
37#define atomic64_xchg(v, n) (xchg(&((v)->counter), n))
38#endif
9f4c835a 39
5e9b5d83
BB
40/*
41 * Two approaches to atomic operations are implemented each with its
42 * own benefits are drawbacks imposed by the Solaris API. Neither
43 * approach handles the issue of word breaking when using a 64-bit
44 * atomic variable on a 32-bit arch. The Solaris API would need to
45 * add an atomic read call to correctly support this.
46 *
47 * When ATOMIC_SPINLOCK is defined all atomic operations will be
48 * serialized through global spin locks. This is bad for performance
49 * but it does allow a simple generic implementation.
50 *
51 * When ATOMIC_SPINLOCK is not defined the Linux atomic operations
52 * are used. This is safe as long as the core Linux implementation
53 * doesn't change because we are relying on the fact that an atomic
54 * type is really just a uint32 or uint64. If this changes at some
55 * point in the future we need to fall-back to the spin approach.
b0dd3380 56 */
5e9b5d83 57#ifdef ATOMIC_SPINLOCK
9f4c835a 58extern spinlock_t atomic32_lock;
5e9b5d83 59extern spinlock_t atomic64_lock;
9f4c835a 60
5e9b5d83
BB
61static __inline__ void
62atomic_inc_32(volatile uint32_t *target)
63{
64 spin_lock(&atomic32_lock);
65 (*target)++;
66 spin_unlock(&atomic32_lock);
67}
68
69static __inline__ void
70atomic_dec_32(volatile uint32_t *target)
71{
72 spin_lock(&atomic32_lock);
73 (*target)--;
74 spin_unlock(&atomic32_lock);
75}
76
77static __inline__ void
9f4c835a 78atomic_add_32(volatile uint32_t *target, int32_t delta)
5e9b5d83
BB
79{
80 spin_lock(&atomic32_lock);
81 *target += delta;
82 spin_unlock(&atomic32_lock);
83}
84
85static __inline__ void
86atomic_sub_32(volatile uint32_t *target, int32_t delta)
87{
88 spin_lock(&atomic32_lock);
89 *target -= delta;
90 spin_unlock(&atomic32_lock);
91}
92
8dbd3fbd
BB
93static __inline__ uint32_t
94atomic_inc_32_nv(volatile uint32_t *target)
95{
a68d91d7
RC
96 uint32_t nv;
97
8dbd3fbd 98 spin_lock(&atomic32_lock);
a68d91d7 99 nv = ++(*target);
8dbd3fbd
BB
100 spin_unlock(&atomic32_lock);
101
a68d91d7 102 return nv;
8dbd3fbd
BB
103}
104
105static __inline__ uint32_t
106atomic_dec_32_nv(volatile uint32_t *target)
107{
a68d91d7
RC
108 uint32_t nv;
109
8dbd3fbd 110 spin_lock(&atomic32_lock);
a68d91d7 111 nv = --(*target);
8dbd3fbd
BB
112 spin_unlock(&atomic32_lock);
113
a68d91d7 114 return nv;
8dbd3fbd
BB
115}
116
5e9b5d83
BB
117static __inline__ uint32_t
118atomic_add_32_nv(volatile uint32_t *target, uint32_t delta)
119{
a68d91d7
RC
120 uint32_t nv;
121
5e9b5d83
BB
122 spin_lock(&atomic32_lock);
123 *target += delta;
a68d91d7 124 nv = *target;
5e9b5d83
BB
125 spin_unlock(&atomic32_lock);
126
a68d91d7 127 return nv;
5e9b5d83
BB
128}
129
130static __inline__ uint32_t
131atomic_sub_32_nv(volatile uint32_t *target, uint32_t delta)
132{
a68d91d7
RC
133 uint32_t nv;
134
5e9b5d83
BB
135 spin_lock(&atomic32_lock);
136 *target -= delta;
a68d91d7 137 nv = *target;
5e9b5d83
BB
138 spin_unlock(&atomic32_lock);
139
a68d91d7 140 return nv;
5e9b5d83
BB
141}
142
143static __inline__ uint32_t
144atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
145 uint32_t newval)
9f4c835a 146{
147 uint32_t rc;
148
149 spin_lock(&atomic32_lock);
150 rc = *target;
5e9b5d83
BB
151 if (*target == cmp)
152 *target = newval;
153
9f4c835a 154 spin_unlock(&atomic32_lock);
155
156 return rc;
157}
51f443a0 158
2bf35fb7
TC
159static __inline__ uint32_t
160atomic_swap_32(volatile uint32_t *target, uint32_t newval)
161{
162 uint32_t rc;
163
164 spin_lock(&atomic32_lock);
165 rc = *target;
166 *target = newval;
167 spin_unlock(&atomic32_lock);
168
169 return rc;
170}
171
b0dd3380 172static __inline__ void
173atomic_inc_64(volatile uint64_t *target)
174{
9f4c835a 175 spin_lock(&atomic64_lock);
b0dd3380 176 (*target)++;
9f4c835a 177 spin_unlock(&atomic64_lock);
b0dd3380 178}
179
180static __inline__ void
181atomic_dec_64(volatile uint64_t *target)
182{
9f4c835a 183 spin_lock(&atomic64_lock);
b0dd3380 184 (*target)--;
9f4c835a 185 spin_unlock(&atomic64_lock);
b0dd3380 186}
187
5e9b5d83 188static __inline__ void
9f4c835a 189atomic_add_64(volatile uint64_t *target, uint64_t delta)
51f443a0 190{
9f4c835a 191 spin_lock(&atomic64_lock);
51f443a0 192 *target += delta;
9f4c835a 193 spin_unlock(&atomic64_lock);
51f443a0 194}
195
5e9b5d83 196static __inline__ void
9f4c835a 197atomic_sub_64(volatile uint64_t *target, uint64_t delta)
b0dd3380 198{
9f4c835a 199 spin_lock(&atomic64_lock);
9f4c835a 200 *target -= delta;
201 spin_unlock(&atomic64_lock);
b0dd3380 202}
203
8dbd3fbd
BB
204static __inline__ uint64_t
205atomic_inc_64_nv(volatile uint64_t *target)
206{
a68d91d7
RC
207 uint64_t nv;
208
8dbd3fbd 209 spin_lock(&atomic64_lock);
a68d91d7 210 nv = ++(*target);
8dbd3fbd
BB
211 spin_unlock(&atomic64_lock);
212
a68d91d7 213 return nv;
8dbd3fbd
BB
214}
215
216static __inline__ uint64_t
217atomic_dec_64_nv(volatile uint64_t *target)
218{
a68d91d7
RC
219 uint64_t nv;
220
8dbd3fbd 221 spin_lock(&atomic64_lock);
a68d91d7 222 nv = --(*target);
8dbd3fbd
BB
223 spin_unlock(&atomic64_lock);
224
a68d91d7 225 return nv;
8dbd3fbd
BB
226}
227
b0dd3380 228static __inline__ uint64_t
229atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
230{
a68d91d7
RC
231 uint64_t nv;
232
9f4c835a 233 spin_lock(&atomic64_lock);
b0dd3380 234 *target += delta;
a68d91d7 235 nv = *target;
9f4c835a 236 spin_unlock(&atomic64_lock);
237
a68d91d7 238 return nv;
9f4c835a 239}
240
241static __inline__ uint64_t
242atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
243{
a68d91d7
RC
244 uint64_t nv;
245
9f4c835a 246 spin_lock(&atomic64_lock);
247 *target -= delta;
a68d91d7 248 nv = *target;
9f4c835a 249 spin_unlock(&atomic64_lock);
250
a68d91d7 251 return nv;
b0dd3380 252}
253
254static __inline__ uint64_t
996faa68 255atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
5e9b5d83 256 uint64_t newval)
b0dd3380 257{
9f4c835a 258 uint64_t rc;
b0dd3380 259
9f4c835a 260 spin_lock(&atomic64_lock);
261 rc = *target;
b0dd3380 262 if (*target == cmp)
263 *target = newval;
9f4c835a 264 spin_unlock(&atomic64_lock);
b0dd3380 265
266 return rc;
267}
268
2bf35fb7
TC
269static __inline__ uint64_t
270atomic_swap_64(volatile uint64_t *target, uint64_t newval)
271{
272 uint64_t rc;
273
274 spin_lock(&atomic64_lock);
275 rc = *target;
276 *target = newval;
277 spin_unlock(&atomic64_lock);
278
279 return rc;
280}
b61a6e8b 281
5e9b5d83 282#else /* ATOMIC_SPINLOCK */
b61a6e8b 283
5e9b5d83
BB
284#define atomic_inc_32(v) atomic_inc((atomic_t *)(v))
285#define atomic_dec_32(v) atomic_dec((atomic_t *)(v))
286#define atomic_add_32(v, i) atomic_add((i), (atomic_t *)(v))
287#define atomic_sub_32(v, i) atomic_sub((i), (atomic_t *)(v))
8dbd3fbd
BB
288#define atomic_inc_32_nv(v) atomic_inc_return((atomic_t *)(v))
289#define atomic_dec_32_nv(v) atomic_dec_return((atomic_t *)(v))
5e9b5d83
BB
290#define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
291#define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
292#define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
2bf35fb7 293#define atomic_swap_32(v, x) atomic_xchg((atomic_t *)(v), x)
5e9b5d83
BB
294#define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
295#define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
296#define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
297#define atomic_sub_64(v, i) atomic64_sub((i), (atomic64_t *)(v))
8dbd3fbd
BB
298#define atomic_inc_64_nv(v) atomic64_inc_return((atomic64_t *)(v))
299#define atomic_dec_64_nv(v) atomic64_dec_return((atomic64_t *)(v))
5e9b5d83
BB
300#define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
301#define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
302#define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
2bf35fb7 303#define atomic_swap_64(v, x) atomic64_xchg((atomic64_t *)(v), x)
b61a6e8b 304
5e9b5d83 305#endif /* ATOMIC_SPINLOCK */
b61a6e8b 306
307#ifdef _LP64
b0dd3380 308static __inline__ void *
996faa68 309atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
b0dd3380 310{
996faa68 311 return (void *)atomic_cas_64((volatile uint64_t *)target,
312 (uint64_t)cmp, (uint64_t)newval);
b0dd3380 313}
5e9b5d83 314#else /* _LP64 */
b61a6e8b 315static __inline__ void *
316atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
317{
318 return (void *)atomic_cas_32((volatile uint32_t *)target,
319 (uint32_t)cmp, (uint32_t)newval);
320}
5e9b5d83 321#endif /* _LP64 */
b0dd3380 322
323#endif /* _SPL_ATOMIC_H */