]>
Commit | Line | Data |
---|---|---|
b0dd3380 | 1 | #ifndef _SPL_ATOMIC_H |
2 | #define _SPL_ATOMIC_H | |
3 | ||
4 | #ifdef __cplusplus | |
5 | extern "C" { | |
6 | #endif | |
7 | ||
8 | #include <linux/module.h> | |
9f4c835a | 9 | #include <linux/spinlock.h> |
10 | ||
11 | /* XXX: Serialize everything through global locks. This is | |
12 | * going to be bad for performance, but for now it's the easiest | |
13 | * way to ensure correct behavior. I don't like it at all. | |
14 | * It would be nicer to make these function to the atomic linux | |
15 | * functions, but the normal uint64_t type complicates this. | |
b0dd3380 | 16 | */ |
9f4c835a | 17 | extern spinlock_t atomic64_lock; |
18 | extern spinlock_t atomic32_lock; | |
19 | extern spinlock_t atomic_lock; | |
20 | ||
21 | static __inline__ uint32_t | |
22 | atomic_add_32(volatile uint32_t *target, int32_t delta) | |
23 | { | |
24 | uint32_t rc; | |
25 | ||
26 | spin_lock(&atomic32_lock); | |
27 | rc = *target; | |
28 | *target += delta; | |
29 | spin_unlock(&atomic32_lock); | |
30 | ||
31 | return rc; | |
32 | } | |
51f443a0 | 33 | |
b0dd3380 | 34 | static __inline__ void |
35 | atomic_inc_64(volatile uint64_t *target) | |
36 | { | |
9f4c835a | 37 | spin_lock(&atomic64_lock); |
b0dd3380 | 38 | (*target)++; |
9f4c835a | 39 | spin_unlock(&atomic64_lock); |
b0dd3380 | 40 | } |
41 | ||
42 | static __inline__ void | |
43 | atomic_dec_64(volatile uint64_t *target) | |
44 | { | |
9f4c835a | 45 | spin_lock(&atomic64_lock); |
b0dd3380 | 46 | (*target)--; |
9f4c835a | 47 | spin_unlock(&atomic64_lock); |
b0dd3380 | 48 | } |
49 | ||
9f4c835a | 50 | static __inline__ uint64_t |
51 | atomic_add_64(volatile uint64_t *target, uint64_t delta) | |
51f443a0 | 52 | { |
9f4c835a | 53 | uint64_t rc; |
54 | ||
55 | spin_lock(&atomic64_lock); | |
56 | rc = *target; | |
51f443a0 | 57 | *target += delta; |
9f4c835a | 58 | spin_unlock(&atomic64_lock); |
59 | ||
51f443a0 | 60 | return rc; |
61 | } | |
62 | ||
b0dd3380 | 63 | static __inline__ uint64_t |
9f4c835a | 64 | atomic_sub_64(volatile uint64_t *target, uint64_t delta) |
b0dd3380 | 65 | { |
9f4c835a | 66 | uint64_t rc; |
67 | ||
68 | spin_lock(&atomic64_lock); | |
69 | rc = *target; | |
70 | *target -= delta; | |
71 | spin_unlock(&atomic64_lock); | |
72 | ||
b0dd3380 | 73 | return rc; |
74 | } | |
75 | ||
76 | static __inline__ uint64_t | |
77 | atomic_add_64_nv(volatile uint64_t *target, uint64_t delta) | |
78 | { | |
9f4c835a | 79 | spin_lock(&atomic64_lock); |
b0dd3380 | 80 | *target += delta; |
9f4c835a | 81 | spin_unlock(&atomic64_lock); |
82 | ||
83 | return *target; | |
84 | } | |
85 | ||
86 | static __inline__ uint64_t | |
87 | atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta) | |
88 | { | |
89 | spin_lock(&atomic64_lock); | |
90 | *target -= delta; | |
91 | spin_unlock(&atomic64_lock); | |
92 | ||
b0dd3380 | 93 | return *target; |
94 | } | |
95 | ||
96 | static __inline__ uint64_t | |
97 | atomic_cas_64(volatile uint64_t *target, uint64_t cmp, | |
98 | uint64_t newval) | |
99 | { | |
9f4c835a | 100 | uint64_t rc; |
b0dd3380 | 101 | |
9f4c835a | 102 | spin_lock(&atomic64_lock); |
103 | rc = *target; | |
b0dd3380 | 104 | if (*target == cmp) |
105 | *target = newval; | |
9f4c835a | 106 | spin_unlock(&atomic64_lock); |
b0dd3380 | 107 | |
108 | return rc; | |
109 | } | |
110 | ||
111 | static __inline__ void * | |
112 | atomic_cas_ptr(volatile void *target, void *cmp, void *newval) | |
113 | { | |
9f4c835a | 114 | void *rc; |
b0dd3380 | 115 | |
9f4c835a | 116 | spin_lock(&atomic_lock); |
117 | rc = (void *)target; | |
b0dd3380 | 118 | if (target == cmp) |
119 | target = newval; | |
9f4c835a | 120 | spin_unlock(&atomic_lock); |
b0dd3380 | 121 | |
122 | return rc; | |
123 | } | |
124 | ||
125 | #ifdef __cplusplus | |
126 | } | |
127 | #endif | |
128 | ||
129 | #endif /* _SPL_ATOMIC_H */ | |
130 |