]> git.proxmox.com Git - mirror_spl.git/blame - include/sys/atomic.h
Make file headers conform to ZFS style standard
[mirror_spl.git] / include / sys / atomic.h
CommitLineData
4b393c50 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
4b393c50 23 */
715f6251 24
b0dd3380 25#ifndef _SPL_ATOMIC_H
26#define _SPL_ATOMIC_H
27
b0dd3380 28#include <linux/module.h>
9f4c835a 29#include <linux/spinlock.h>
5e9b5d83 30#include <sys/types.h>
d04c8a56 31
5e9b5d83
BB
32/*
33 * Two approaches to atomic operations are implemented each with its
34 * own benefits are drawbacks imposed by the Solaris API. Neither
35 * approach handles the issue of word breaking when using a 64-bit
36 * atomic variable on a 32-bit arch. The Solaris API would need to
37 * add an atomic read call to correctly support this.
38 *
39 * When ATOMIC_SPINLOCK is defined all atomic operations will be
40 * serialized through global spin locks. This is bad for performance
41 * but it does allow a simple generic implementation.
42 *
43 * When ATOMIC_SPINLOCK is not defined the Linux atomic operations
44 * are used. This is safe as long as the core Linux implementation
45 * doesn't change because we are relying on the fact that an atomic
46 * type is really just a uint32 or uint64. If this changes at some
47 * point in the future we need to fall-back to the spin approach.
b0dd3380 48 */
5e9b5d83 49#ifdef ATOMIC_SPINLOCK
9f4c835a 50extern spinlock_t atomic32_lock;
5e9b5d83 51extern spinlock_t atomic64_lock;
9f4c835a 52
5e9b5d83
BB
53static __inline__ void
54atomic_inc_32(volatile uint32_t *target)
55{
56 spin_lock(&atomic32_lock);
57 (*target)++;
58 spin_unlock(&atomic32_lock);
59}
60
61static __inline__ void
62atomic_dec_32(volatile uint32_t *target)
63{
64 spin_lock(&atomic32_lock);
65 (*target)--;
66 spin_unlock(&atomic32_lock);
67}
68
69static __inline__ void
9f4c835a 70atomic_add_32(volatile uint32_t *target, int32_t delta)
5e9b5d83
BB
71{
72 spin_lock(&atomic32_lock);
73 *target += delta;
74 spin_unlock(&atomic32_lock);
75}
76
77static __inline__ void
78atomic_sub_32(volatile uint32_t *target, int32_t delta)
79{
80 spin_lock(&atomic32_lock);
81 *target -= delta;
82 spin_unlock(&atomic32_lock);
83}
84
8dbd3fbd
BB
85static __inline__ uint32_t
86atomic_inc_32_nv(volatile uint32_t *target)
87{
a68d91d7
RC
88 uint32_t nv;
89
8dbd3fbd 90 spin_lock(&atomic32_lock);
a68d91d7 91 nv = ++(*target);
8dbd3fbd
BB
92 spin_unlock(&atomic32_lock);
93
a68d91d7 94 return nv;
8dbd3fbd
BB
95}
96
97static __inline__ uint32_t
98atomic_dec_32_nv(volatile uint32_t *target)
99{
a68d91d7
RC
100 uint32_t nv;
101
8dbd3fbd 102 spin_lock(&atomic32_lock);
a68d91d7 103 nv = --(*target);
8dbd3fbd
BB
104 spin_unlock(&atomic32_lock);
105
a68d91d7 106 return nv;
8dbd3fbd
BB
107}
108
5e9b5d83
BB
109static __inline__ uint32_t
110atomic_add_32_nv(volatile uint32_t *target, uint32_t delta)
111{
a68d91d7
RC
112 uint32_t nv;
113
5e9b5d83
BB
114 spin_lock(&atomic32_lock);
115 *target += delta;
a68d91d7 116 nv = *target;
5e9b5d83
BB
117 spin_unlock(&atomic32_lock);
118
a68d91d7 119 return nv;
5e9b5d83
BB
120}
121
122static __inline__ uint32_t
123atomic_sub_32_nv(volatile uint32_t *target, uint32_t delta)
124{
a68d91d7
RC
125 uint32_t nv;
126
5e9b5d83
BB
127 spin_lock(&atomic32_lock);
128 *target -= delta;
a68d91d7 129 nv = *target;
5e9b5d83
BB
130 spin_unlock(&atomic32_lock);
131
a68d91d7 132 return nv;
5e9b5d83
BB
133}
134
135static __inline__ uint32_t
136atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
137 uint32_t newval)
9f4c835a 138{
139 uint32_t rc;
140
141 spin_lock(&atomic32_lock);
142 rc = *target;
5e9b5d83
BB
143 if (*target == cmp)
144 *target = newval;
145
9f4c835a 146 spin_unlock(&atomic32_lock);
147
148 return rc;
149}
51f443a0 150
2bf35fb7
TC
151static __inline__ uint32_t
152atomic_swap_32(volatile uint32_t *target, uint32_t newval)
153{
154 uint32_t rc;
155
156 spin_lock(&atomic32_lock);
157 rc = *target;
158 *target = newval;
159 spin_unlock(&atomic32_lock);
160
161 return rc;
162}
163
b0dd3380 164static __inline__ void
165atomic_inc_64(volatile uint64_t *target)
166{
9f4c835a 167 spin_lock(&atomic64_lock);
b0dd3380 168 (*target)++;
9f4c835a 169 spin_unlock(&atomic64_lock);
b0dd3380 170}
171
172static __inline__ void
173atomic_dec_64(volatile uint64_t *target)
174{
9f4c835a 175 spin_lock(&atomic64_lock);
b0dd3380 176 (*target)--;
9f4c835a 177 spin_unlock(&atomic64_lock);
b0dd3380 178}
179
5e9b5d83 180static __inline__ void
9f4c835a 181atomic_add_64(volatile uint64_t *target, uint64_t delta)
51f443a0 182{
9f4c835a 183 spin_lock(&atomic64_lock);
51f443a0 184 *target += delta;
9f4c835a 185 spin_unlock(&atomic64_lock);
51f443a0 186}
187
5e9b5d83 188static __inline__ void
9f4c835a 189atomic_sub_64(volatile uint64_t *target, uint64_t delta)
b0dd3380 190{
9f4c835a 191 spin_lock(&atomic64_lock);
9f4c835a 192 *target -= delta;
193 spin_unlock(&atomic64_lock);
b0dd3380 194}
195
8dbd3fbd
BB
196static __inline__ uint64_t
197atomic_inc_64_nv(volatile uint64_t *target)
198{
a68d91d7
RC
199 uint64_t nv;
200
8dbd3fbd 201 spin_lock(&atomic64_lock);
a68d91d7 202 nv = ++(*target);
8dbd3fbd
BB
203 spin_unlock(&atomic64_lock);
204
a68d91d7 205 return nv;
8dbd3fbd
BB
206}
207
208static __inline__ uint64_t
209atomic_dec_64_nv(volatile uint64_t *target)
210{
a68d91d7
RC
211 uint64_t nv;
212
8dbd3fbd 213 spin_lock(&atomic64_lock);
a68d91d7 214 nv = --(*target);
8dbd3fbd
BB
215 spin_unlock(&atomic64_lock);
216
a68d91d7 217 return nv;
8dbd3fbd
BB
218}
219
b0dd3380 220static __inline__ uint64_t
221atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
222{
a68d91d7
RC
223 uint64_t nv;
224
9f4c835a 225 spin_lock(&atomic64_lock);
b0dd3380 226 *target += delta;
a68d91d7 227 nv = *target;
9f4c835a 228 spin_unlock(&atomic64_lock);
229
a68d91d7 230 return nv;
9f4c835a 231}
232
233static __inline__ uint64_t
234atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
235{
a68d91d7
RC
236 uint64_t nv;
237
9f4c835a 238 spin_lock(&atomic64_lock);
239 *target -= delta;
a68d91d7 240 nv = *target;
9f4c835a 241 spin_unlock(&atomic64_lock);
242
a68d91d7 243 return nv;
b0dd3380 244}
245
246static __inline__ uint64_t
996faa68 247atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
5e9b5d83 248 uint64_t newval)
b0dd3380 249{
9f4c835a 250 uint64_t rc;
b0dd3380 251
9f4c835a 252 spin_lock(&atomic64_lock);
253 rc = *target;
b0dd3380 254 if (*target == cmp)
255 *target = newval;
9f4c835a 256 spin_unlock(&atomic64_lock);
b0dd3380 257
258 return rc;
259}
260
2bf35fb7
TC
261static __inline__ uint64_t
262atomic_swap_64(volatile uint64_t *target, uint64_t newval)
263{
264 uint64_t rc;
265
266 spin_lock(&atomic64_lock);
267 rc = *target;
268 *target = newval;
269 spin_unlock(&atomic64_lock);
270
271 return rc;
272}
b61a6e8b 273
5e9b5d83 274#else /* ATOMIC_SPINLOCK */
b61a6e8b 275
5e9b5d83
BB
276#define atomic_inc_32(v) atomic_inc((atomic_t *)(v))
277#define atomic_dec_32(v) atomic_dec((atomic_t *)(v))
278#define atomic_add_32(v, i) atomic_add((i), (atomic_t *)(v))
279#define atomic_sub_32(v, i) atomic_sub((i), (atomic_t *)(v))
8dbd3fbd
BB
280#define atomic_inc_32_nv(v) atomic_inc_return((atomic_t *)(v))
281#define atomic_dec_32_nv(v) atomic_dec_return((atomic_t *)(v))
5e9b5d83
BB
282#define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
283#define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
284#define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
2bf35fb7 285#define atomic_swap_32(v, x) atomic_xchg((atomic_t *)(v), x)
5e9b5d83
BB
286#define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
287#define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
288#define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
289#define atomic_sub_64(v, i) atomic64_sub((i), (atomic64_t *)(v))
8dbd3fbd
BB
290#define atomic_inc_64_nv(v) atomic64_inc_return((atomic64_t *)(v))
291#define atomic_dec_64_nv(v) atomic64_dec_return((atomic64_t *)(v))
5e9b5d83
BB
292#define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
293#define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
294#define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
2bf35fb7 295#define atomic_swap_64(v, x) atomic64_xchg((atomic64_t *)(v), x)
b61a6e8b 296
5e9b5d83 297#endif /* ATOMIC_SPINLOCK */
b61a6e8b 298
299#ifdef _LP64
b0dd3380 300static __inline__ void *
996faa68 301atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
b0dd3380 302{
996faa68 303 return (void *)atomic_cas_64((volatile uint64_t *)target,
304 (uint64_t)cmp, (uint64_t)newval);
b0dd3380 305}
5e9b5d83 306#else /* _LP64 */
b61a6e8b 307static __inline__ void *
308atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
309{
310 return (void *)atomic_cas_32((volatile uint32_t *)target,
311 (uint32_t)cmp, (uint32_t)newval);
312}
5e9b5d83 313#endif /* _LP64 */
b0dd3380 314
315#endif /* _SPL_ATOMIC_H */