]>
Commit | Line | Data |
---|---|---|
2e13b31e KM |
1 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
2 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | |
3 | */ | |
4 | ||
1da177e4 LT |
5 | #ifndef _ASM_PARISC_ATOMIC_H_ |
6 | #define _ASM_PARISC_ATOMIC_H_ | |
7 | ||
2e13b31e | 8 | #include <linux/types.h> |
1da177e4 LT |
9 | |
10 | /* | |
11 | * Atomic operations that C can't guarantee us. Useful for | |
12 | * resource counting etc.. | |
13 | * | |
14 | * And probably incredibly slow on parisc. OTOH, we don't | |
15 | * have to write any serious assembly. prumpf | |
16 | */ | |
17 | ||
18 | #ifdef CONFIG_SMP | |
19 | #include <asm/spinlock.h> | |
20 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | |
21 | ||
22 | /* Use an array of spinlocks for our atomic_ts. | |
23 | * Hash function to index into a different SPINLOCK. | |
24 | * Since "a" is usually an address, use one spinlock per cacheline. | |
25 | */ | |
26 | # define ATOMIC_HASH_SIZE 4 | |
27 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | |
28 | ||
fb1c8f93 | 29 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
1da177e4 | 30 | |
fb1c8f93 | 31 | /* Can't use raw_spin_lock_irq because of #include problems, so |
1da177e4 LT |
32 | * this is the substitute */ |
33 | #define _atomic_spin_lock_irqsave(l,f) do { \ | |
fb1c8f93 | 34 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
1da177e4 | 35 | local_irq_save(f); \ |
fb1c8f93 | 36 | __raw_spin_lock(s); \ |
1da177e4 LT |
37 | } while(0) |
38 | ||
39 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | |
fb1c8f93 IM |
40 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
41 | __raw_spin_unlock(s); \ | |
1da177e4 LT |
42 | local_irq_restore(f); \ |
43 | } while(0) | |
44 | ||
45 | ||
46 | #else | |
47 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | |
48 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | |
49 | #endif | |
50 | ||
1da177e4 LT |
51 | /* This should get optimized out since it's never called. |
52 | ** Or get a link error if xchg is used "wrong". | |
53 | */ | |
54 | extern void __xchg_called_with_bad_pointer(void); | |
55 | ||
56 | ||
57 | /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ | |
58 | extern unsigned long __xchg8(char, char *); | |
59 | extern unsigned long __xchg32(int, int *); | |
513e7ecd | 60 | #ifdef CONFIG_64BIT |
1da177e4 LT |
61 | extern unsigned long __xchg64(unsigned long, unsigned long *); |
62 | #endif | |
63 | ||
64 | /* optimizer better get rid of switch since size is a constant */ | |
2e13b31e KM |
65 | static __inline__ unsigned long |
66 | __xchg(unsigned long x, __volatile__ void * ptr, int size) | |
1da177e4 | 67 | { |
1da177e4 | 68 | switch(size) { |
513e7ecd | 69 | #ifdef CONFIG_64BIT |
1da177e4 LT |
70 | case 8: return __xchg64(x,(unsigned long *) ptr); |
71 | #endif | |
72 | case 4: return __xchg32((int) x, (int *) ptr); | |
73 | case 1: return __xchg8((char) x, (char *) ptr); | |
74 | } | |
75 | __xchg_called_with_bad_pointer(); | |
76 | return x; | |
77 | } | |
78 | ||
79 | ||
80 | /* | |
81 | ** REVISIT - Abandoned use of LDCW in xchg() for now: | |
82 | ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes | |
513e7ecd | 83 | ** o and while we are at it, could CONFIG_64BIT code use LDCD too? |
1da177e4 LT |
84 | ** |
85 | ** if (__builtin_constant_p(x) && (x == NULL)) | |
86 | ** if (((unsigned long)p & 0xf) == 0) | |
87 | ** return __ldcw(p); | |
88 | */ | |
89 | #define xchg(ptr,x) \ | |
90 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
91 | ||
92 | ||
93 | #define __HAVE_ARCH_CMPXCHG 1 | |
94 | ||
95 | /* bug catcher for when unsupported size is used - won't link */ | |
96 | extern void __cmpxchg_called_with_bad_pointer(void); | |
97 | ||
98 | /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ | |
99 | extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); | |
100 | extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); | |
101 | ||
102 | /* don't worry...optimizer will get rid of most of this */ | |
103 | static __inline__ unsigned long | |
104 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | |
105 | { | |
106 | switch(size) { | |
513e7ecd | 107 | #ifdef CONFIG_64BIT |
1da177e4 LT |
108 | case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); |
109 | #endif | |
110 | case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); | |
111 | } | |
112 | __cmpxchg_called_with_bad_pointer(); | |
113 | return old; | |
114 | } | |
115 | ||
116 | #define cmpxchg(ptr,o,n) \ | |
117 | ({ \ | |
118 | __typeof__(*(ptr)) _o_ = (o); \ | |
119 | __typeof__(*(ptr)) _n_ = (n); \ | |
120 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
121 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
122 | }) | |
123 | ||
2e13b31e KM |
124 | /* Note that we need not lock read accesses - aligned word writes/reads |
125 | * are atomic, so a reader never sees unconsistent values. | |
126 | * | |
127 | * Cache-line alignment would conflict with, for example, linux/module.h | |
128 | */ | |
1da177e4 | 129 | |
2e13b31e | 130 | typedef struct { volatile int counter; } atomic_t; |
1da177e4 LT |
131 | |
132 | /* It's possible to reduce all atomic operations to either | |
133 | * __atomic_add_return, atomic_set and atomic_read (the latter | |
134 | * is there only for consistency). | |
135 | */ | |
136 | ||
137 | static __inline__ int __atomic_add_return(int i, atomic_t *v) | |
138 | { | |
139 | int ret; | |
140 | unsigned long flags; | |
141 | _atomic_spin_lock_irqsave(v, flags); | |
142 | ||
143 | ret = (v->counter += i); | |
144 | ||
145 | _atomic_spin_unlock_irqrestore(v, flags); | |
146 | return ret; | |
147 | } | |
148 | ||
149 | static __inline__ void atomic_set(atomic_t *v, int i) | |
150 | { | |
151 | unsigned long flags; | |
152 | _atomic_spin_lock_irqsave(v, flags); | |
153 | ||
154 | v->counter = i; | |
155 | ||
156 | _atomic_spin_unlock_irqrestore(v, flags); | |
157 | } | |
158 | ||
159 | static __inline__ int atomic_read(const atomic_t *v) | |
160 | { | |
161 | return v->counter; | |
162 | } | |
163 | ||
164 | /* exported interface */ | |
8ffe9d0b | 165 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
ffbf670f | 166 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
1da177e4 | 167 | |
8426e1f6 NP |
168 | /** |
169 | * atomic_add_unless - add unless the number is a given value | |
170 | * @v: pointer of type atomic_t | |
171 | * @a: the amount to add to v... | |
172 | * @u: ...unless v is equal to u. | |
173 | * | |
174 | * Atomically adds @a to @v, so long as it was not @u. | |
175 | * Returns non-zero if @v was not @u, and zero otherwise. | |
176 | */ | |
177 | #define atomic_add_unless(v, a, u) \ | |
178 | ({ \ | |
8ffe9d0b | 179 | __typeof__((v)->counter) c, old; \ |
8426e1f6 NP |
180 | c = atomic_read(v); \ |
181 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | |
182 | c = old; \ | |
183 | c != (u); \ | |
184 | }) | |
185 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
186 | ||
1da177e4 LT |
187 | #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) |
188 | #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) | |
189 | #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) | |
190 | #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) | |
191 | ||
192 | #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v))) | |
193 | #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v))) | |
194 | #define atomic_inc_return(v) (__atomic_add_return( 1,(v))) | |
195 | #define atomic_dec_return(v) (__atomic_add_return( -1,(v))) | |
196 | ||
197 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
198 | ||
199 | /* | |
200 | * atomic_inc_and_test - increment and test | |
201 | * @v: pointer of type atomic_t | |
202 | * | |
203 | * Atomically increments @v by 1 | |
204 | * and returns true if the result is zero, or false for all | |
205 | * other cases. | |
206 | */ | |
207 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
208 | ||
209 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | |
210 | ||
4da9f131 KM |
211 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) |
212 | ||
2e13b31e | 213 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) |
1da177e4 LT |
214 | |
215 | #define smp_mb__before_atomic_dec() smp_mb() | |
216 | #define smp_mb__after_atomic_dec() smp_mb() | |
217 | #define smp_mb__before_atomic_inc() smp_mb() | |
218 | #define smp_mb__after_atomic_inc() smp_mb() | |
219 | ||
513e7ecd | 220 | #ifdef CONFIG_64BIT |
2e13b31e KM |
221 | |
222 | typedef struct { volatile s64 counter; } atomic64_t; | |
223 | ||
224 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | |
225 | ||
226 | static __inline__ int | |
227 | __atomic64_add_return(s64 i, atomic64_t *v) | |
228 | { | |
229 | int ret; | |
230 | unsigned long flags; | |
231 | _atomic_spin_lock_irqsave(v, flags); | |
232 | ||
233 | ret = (v->counter += i); | |
234 | ||
235 | _atomic_spin_unlock_irqrestore(v, flags); | |
236 | return ret; | |
237 | } | |
238 | ||
239 | static __inline__ void | |
240 | atomic64_set(atomic64_t *v, s64 i) | |
241 | { | |
242 | unsigned long flags; | |
243 | _atomic_spin_lock_irqsave(v, flags); | |
244 | ||
245 | v->counter = i; | |
246 | ||
247 | _atomic_spin_unlock_irqrestore(v, flags); | |
248 | } | |
249 | ||
250 | static __inline__ s64 | |
251 | atomic64_read(const atomic64_t *v) | |
252 | { | |
253 | return v->counter; | |
254 | } | |
255 | ||
256 | #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v)))) | |
257 | #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v)))) | |
258 | #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) | |
259 | #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) | |
260 | ||
261 | #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v))) | |
262 | #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v))) | |
263 | #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) | |
264 | #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) | |
265 | ||
266 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
267 | ||
268 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
269 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | |
4da9f131 | 270 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) |
2e13b31e | 271 | |
8ffe9d0b MD |
272 | /* exported interface */ |
273 | #define atomic64_cmpxchg(v, o, n) \ | |
274 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | |
275 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
276 | ||
277 | /** | |
278 | * atomic64_add_unless - add unless the number is a given value | |
279 | * @v: pointer of type atomic64_t | |
280 | * @a: the amount to add to v... | |
281 | * @u: ...unless v is equal to u. | |
282 | * | |
283 | * Atomically adds @a to @v, so long as it was not @u. | |
284 | * Returns non-zero if @v was not @u, and zero otherwise. | |
285 | */ | |
286 | #define atomic64_add_unless(v, a, u) \ | |
287 | ({ \ | |
288 | __typeof__((v)->counter) c, old; \ | |
289 | c = atomic64_read(v); \ | |
290 | while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ | |
291 | c = old; \ | |
292 | c != (u); \ | |
293 | }) | |
294 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | |
295 | ||
513e7ecd | 296 | #endif /* CONFIG_64BIT */ |
2e13b31e | 297 | |
d3cb4871 | 298 | #include <asm-generic/atomic.h> |
2e13b31e KM |
299 | |
300 | #endif /* _ASM_PARISC_ATOMIC_H_ */ |