]> git.proxmox.com Git - mirror_spl.git/blob - include/asm/atomic_compat.h
Correctly handle division on 32-bit RHEL5 systems by returning dividend.
[mirror_spl.git] / include / asm / atomic_compat.h
1 #ifndef _SPL_ATOMIC_COMPAT_H
2 #define _SPL_ATOMIC_COMPAT_H
3
4 #include <asm/atomic.h>
5 #include <spl_config.h>
6
7 #ifndef HAVE_ATOMIC64_T
8 #include <linux/spinlock.h>
9
10 typedef struct {
11 spinlock_t lock;
12 __s64 val;
13 } atomic64_t;
14
15 #define ATOMIC64_INIT(i) { .lock = SPIN_LOCK_UNLOCKED, .val = (i) }
16
17 static inline void atomic64_add(__s64 i, atomic64_t *v)
18 {
19 unsigned long flags;
20
21 spin_lock_irqsave(&v->lock, flags);
22 v->val += i;
23 spin_unlock_irqrestore(&v->lock, flags);
24 }
25
26 static inline void atomic64_sub(__s64 i, atomic64_t *v)
27 {
28 unsigned long flags;
29
30 spin_lock_irqsave(&v->lock, flags);
31 v->val -= i;
32 spin_unlock_irqrestore(&v->lock, flags);
33 }
34
35 #define atomic64_inc(v) (atomic64_add(1, (v)))
36 #define atomic64_dec(v) (atomic64_sub(1, (v)))
37
38 static inline __s64 atomic64_add_return(__s64 i, atomic64_t *v)
39 {
40 unsigned long flags;
41 __s64 ret;
42
43 spin_lock_irqsave(&v->lock, flags);
44 v->val += i;
45 ret = v->val;
46 spin_unlock_irqrestore(&v->lock, flags);
47
48 return ret;
49 }
50
51 static inline __s64 atomic64_sub_return(__s64 i, atomic64_t *v)
52 {
53 unsigned long flags;
54 __s64 ret;
55
56 spin_lock_irqsave(&v->lock, flags);
57 v->val -= i;
58 ret = v->val;
59 spin_unlock_irqrestore(&v->lock, flags);
60
61 return ret;
62 }
63
64 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
65 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
66
67 static inline __s64 atomic64_read(atomic64_t *v)
68 {
69 unsigned long flags;
70 __s64 r;
71
72 spin_lock_irqsave(&v->lock, flags);
73 r = v->val;
74 spin_unlock_irqrestore(&v->lock, flags);
75
76 return r;
77 }
78
79 static inline void atomic64_set(atomic64_t *v, __s64 i)
80 {
81 unsigned long flags;
82
83 spin_lock_irqsave(&v->lock, flags);
84 v->val = i;
85 spin_unlock_irqrestore(&v->lock, flags);
86 }
87
88 #endif /* HAVE_ATOMIC64_T */
89
90 #ifndef HAVE_ATOMIC64_CMPXCHG
91 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
92 #endif
93
94 #ifndef HAVE_ATOMIC64_XCHG
95 #define atomic64_xchg(v, n) (xchg(&((v)->counter), n))
96 #endif
97
98 #endif /* _SPL_ATOMIC_COMPAT_H */
99