]> git.proxmox.com Git - mirror_spl-debian.git/blame - include/sys/atomic.h
dch: close a bug and refresh timestamp.
[mirror_spl-debian.git] / include / sys / atomic.h
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23\*****************************************************************************/
715f6251 24
b0dd3380 25#ifndef _SPL_ATOMIC_H
34ee731f 26#define _SPL_ATOMIC_H
b0dd3380 27
b0dd3380 28#include <linux/module.h>
9f4c835a 29#include <linux/spinlock.h>
5e9b5d83 30#include <sys/types.h>
d04c8a56 31
5e9b5d83
BB
32/*
33 * Two approaches to atomic operations are implemented each with its
34 * own benefits are drawbacks imposed by the Solaris API. Neither
35 * approach handles the issue of word breaking when using a 64-bit
36 * atomic variable on a 32-bit arch. The Solaris API would need to
37 * add an atomic read call to correctly support this.
38 *
39 * When ATOMIC_SPINLOCK is defined all atomic operations will be
40 * serialized through global spin locks. This is bad for performance
41 * but it does allow a simple generic implementation.
42 *
43 * When ATOMIC_SPINLOCK is not defined the Linux atomic operations
44 * are used. This is safe as long as the core Linux implementation
45 * doesn't change because we are relying on the fact that an atomic
46 * type is really just a uint32 or uint64. If this changes at some
47 * point in the future we need to fall-back to the spin approach.
b0dd3380 48 */
5e9b5d83 49#ifdef ATOMIC_SPINLOCK
9f4c835a 50extern spinlock_t atomic32_lock;
5e9b5d83 51extern spinlock_t atomic64_lock;
9f4c835a 52
5e9b5d83
BB
53static __inline__ void
54atomic_inc_32(volatile uint32_t *target)
55{
56 spin_lock(&atomic32_lock);
57 (*target)++;
58 spin_unlock(&atomic32_lock);
59}
60
61static __inline__ void
62atomic_dec_32(volatile uint32_t *target)
63{
64 spin_lock(&atomic32_lock);
65 (*target)--;
66 spin_unlock(&atomic32_lock);
67}
68
69static __inline__ void
9f4c835a 70atomic_add_32(volatile uint32_t *target, int32_t delta)
5e9b5d83
BB
71{
72 spin_lock(&atomic32_lock);
73 *target += delta;
74 spin_unlock(&atomic32_lock);
75}
76
77static __inline__ void
78atomic_sub_32(volatile uint32_t *target, int32_t delta)
79{
80 spin_lock(&atomic32_lock);
81 *target -= delta;
82 spin_unlock(&atomic32_lock);
83}
84
8dbd3fbd
BB
85static __inline__ uint32_t
86atomic_inc_32_nv(volatile uint32_t *target)
87{
a68d91d7
RC
88 uint32_t nv;
89
8dbd3fbd 90 spin_lock(&atomic32_lock);
a68d91d7 91 nv = ++(*target);
8dbd3fbd
BB
92 spin_unlock(&atomic32_lock);
93
34ee731f 94 return (nv);
8dbd3fbd
BB
95}
96
97static __inline__ uint32_t
98atomic_dec_32_nv(volatile uint32_t *target)
99{
a68d91d7
RC
100 uint32_t nv;
101
8dbd3fbd 102 spin_lock(&atomic32_lock);
a68d91d7 103 nv = --(*target);
8dbd3fbd
BB
104 spin_unlock(&atomic32_lock);
105
34ee731f 106 return (nv);
8dbd3fbd
BB
107}
108
5e9b5d83
BB
109static __inline__ uint32_t
110atomic_add_32_nv(volatile uint32_t *target, uint32_t delta)
111{
a68d91d7
RC
112 uint32_t nv;
113
5e9b5d83
BB
114 spin_lock(&atomic32_lock);
115 *target += delta;
a68d91d7 116 nv = *target;
5e9b5d83
BB
117 spin_unlock(&atomic32_lock);
118
34ee731f 119 return (nv);
5e9b5d83
BB
120}
121
122static __inline__ uint32_t
123atomic_sub_32_nv(volatile uint32_t *target, uint32_t delta)
124{
a68d91d7
RC
125 uint32_t nv;
126
5e9b5d83
BB
127 spin_lock(&atomic32_lock);
128 *target -= delta;
a68d91d7 129 nv = *target;
5e9b5d83
BB
130 spin_unlock(&atomic32_lock);
131
34ee731f 132 return (nv);
5e9b5d83
BB
133}
134
135static __inline__ uint32_t
34ee731f 136atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
9f4c835a 137{
138 uint32_t rc;
139
140 spin_lock(&atomic32_lock);
141 rc = *target;
5e9b5d83
BB
142 if (*target == cmp)
143 *target = newval;
144
9f4c835a 145 spin_unlock(&atomic32_lock);
146
34ee731f 147 return (rc);
9f4c835a 148}
51f443a0 149
9e4fb5c2
LG
150static __inline__ uint32_t
151atomic_swap_32(volatile uint32_t *target, uint32_t newval)
152{
153 uint32_t rc;
154
155 spin_lock(&atomic32_lock);
156 rc = *target;
157 *target = newval;
158 spin_unlock(&atomic32_lock);
159
34ee731f 160 return (rc);
9e4fb5c2
LG
161}
162
b0dd3380 163static __inline__ void
164atomic_inc_64(volatile uint64_t *target)
165{
9f4c835a 166 spin_lock(&atomic64_lock);
b0dd3380 167 (*target)++;
9f4c835a 168 spin_unlock(&atomic64_lock);
b0dd3380 169}
170
171static __inline__ void
172atomic_dec_64(volatile uint64_t *target)
173{
9f4c835a 174 spin_lock(&atomic64_lock);
b0dd3380 175 (*target)--;
9f4c835a 176 spin_unlock(&atomic64_lock);
b0dd3380 177}
178
5e9b5d83 179static __inline__ void
9f4c835a 180atomic_add_64(volatile uint64_t *target, uint64_t delta)
51f443a0 181{
9f4c835a 182 spin_lock(&atomic64_lock);
51f443a0 183 *target += delta;
9f4c835a 184 spin_unlock(&atomic64_lock);
51f443a0 185}
186
5e9b5d83 187static __inline__ void
9f4c835a 188atomic_sub_64(volatile uint64_t *target, uint64_t delta)
b0dd3380 189{
9f4c835a 190 spin_lock(&atomic64_lock);
9f4c835a 191 *target -= delta;
192 spin_unlock(&atomic64_lock);
b0dd3380 193}
194
8dbd3fbd
BB
195static __inline__ uint64_t
196atomic_inc_64_nv(volatile uint64_t *target)
197{
a68d91d7
RC
198 uint64_t nv;
199
8dbd3fbd 200 spin_lock(&atomic64_lock);
a68d91d7 201 nv = ++(*target);
8dbd3fbd
BB
202 spin_unlock(&atomic64_lock);
203
34ee731f 204 return (nv);
8dbd3fbd
BB
205}
206
207static __inline__ uint64_t
208atomic_dec_64_nv(volatile uint64_t *target)
209{
a68d91d7
RC
210 uint64_t nv;
211
8dbd3fbd 212 spin_lock(&atomic64_lock);
a68d91d7 213 nv = --(*target);
8dbd3fbd
BB
214 spin_unlock(&atomic64_lock);
215
34ee731f 216 return (nv);
8dbd3fbd
BB
217}
218
b0dd3380 219static __inline__ uint64_t
220atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
221{
a68d91d7
RC
222 uint64_t nv;
223
9f4c835a 224 spin_lock(&atomic64_lock);
b0dd3380 225 *target += delta;
a68d91d7 226 nv = *target;
9f4c835a 227 spin_unlock(&atomic64_lock);
228
34ee731f 229 return (nv);
9f4c835a 230}
231
232static __inline__ uint64_t
233atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
234{
a68d91d7
RC
235 uint64_t nv;
236
9f4c835a 237 spin_lock(&atomic64_lock);
238 *target -= delta;
a68d91d7 239 nv = *target;
9f4c835a 240 spin_unlock(&atomic64_lock);
241
34ee731f 242 return (nv);
b0dd3380 243}
244
245static __inline__ uint64_t
34ee731f 246atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
b0dd3380 247{
9f4c835a 248 uint64_t rc;
b0dd3380 249
9f4c835a 250 spin_lock(&atomic64_lock);
251 rc = *target;
b0dd3380 252 if (*target == cmp)
253 *target = newval;
9f4c835a 254 spin_unlock(&atomic64_lock);
b0dd3380 255
34ee731f 256 return (rc);
b0dd3380 257}
258
9e4fb5c2
LG
259static __inline__ uint64_t
260atomic_swap_64(volatile uint64_t *target, uint64_t newval)
261{
262 uint64_t rc;
263
264 spin_lock(&atomic64_lock);
265 rc = *target;
266 *target = newval;
267 spin_unlock(&atomic64_lock);
268
34ee731f 269 return (rc);
9e4fb5c2 270}
b61a6e8b 271
5e9b5d83 272#else /* ATOMIC_SPINLOCK */
b61a6e8b 273
34ee731f
AX
274#define atomic_inc_32(v) atomic_inc((atomic_t *)(v))
275#define atomic_dec_32(v) atomic_dec((atomic_t *)(v))
276#define atomic_add_32(v, i) atomic_add((i), (atomic_t *)(v))
277#define atomic_sub_32(v, i) atomic_sub((i), (atomic_t *)(v))
278#define atomic_inc_32_nv(v) atomic_inc_return((atomic_t *)(v))
279#define atomic_dec_32_nv(v) atomic_dec_return((atomic_t *)(v))
280#define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
281#define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
282#define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
283#define atomic_swap_32(v, x) atomic_xchg((atomic_t *)(v), x)
284#define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
285#define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
286#define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
287#define atomic_sub_64(v, i) atomic64_sub((i), (atomic64_t *)(v))
288#define atomic_inc_64_nv(v) atomic64_inc_return((atomic64_t *)(v))
289#define atomic_dec_64_nv(v) atomic64_dec_return((atomic64_t *)(v))
290#define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
291#define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
292#define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
293#define atomic_swap_64(v, x) atomic64_xchg((atomic64_t *)(v), x)
b61a6e8b 294
5e9b5d83 295#endif /* ATOMIC_SPINLOCK */
b61a6e8b 296
297#ifdef _LP64
b0dd3380 298static __inline__ void *
996faa68 299atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
b0dd3380 300{
34ee731f
AX
301 return ((void *)atomic_cas_64((volatile uint64_t *)target,
302 (uint64_t)cmp, (uint64_t)newval));
b0dd3380 303}
5e9b5d83 304#else /* _LP64 */
b61a6e8b 305static __inline__ void *
306atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
307{
34ee731f
AX
308 return ((void *)atomic_cas_32((volatile uint32_t *)target,
309 (uint32_t)cmp, (uint32_t)newval));
b61a6e8b 310}
5e9b5d83 311#endif /* _LP64 */
b0dd3380 312
313#endif /* _SPL_ATOMIC_H */