]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_PARISC_ATOMIC_H_ |
2 | #define _ASM_PARISC_ATOMIC_H_ | |
3 | ||
4 | #include <linux/config.h> | |
5 | #include <asm/system.h> | |
6 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */ | |
7 | ||
8 | /* | |
9 | * Atomic operations that C can't guarantee us. Useful for | |
10 | * resource counting etc.. | |
11 | * | |
12 | * And probably incredibly slow on parisc. OTOH, we don't | |
13 | * have to write any serious assembly. prumpf | |
14 | */ | |
15 | ||
16 | #ifdef CONFIG_SMP | |
17 | #include <asm/spinlock.h> | |
18 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | |
19 | ||
20 | /* Use an array of spinlocks for our atomic_ts. | |
21 | * Hash function to index into a different SPINLOCK. | |
22 | * Since "a" is usually an address, use one spinlock per cacheline. | |
23 | */ | |
24 | # define ATOMIC_HASH_SIZE 4 | |
25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | |
26 | ||
27 | extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | |
28 | ||
29 | /* Can't use _raw_spin_lock_irq because of #include problems, so | |
30 | * this is the substitute */ | |
31 | #define _atomic_spin_lock_irqsave(l,f) do { \ | |
32 | spinlock_t *s = ATOMIC_HASH(l); \ | |
33 | local_irq_save(f); \ | |
34 | _raw_spin_lock(s); \ | |
35 | } while(0) | |
36 | ||
37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | |
38 | spinlock_t *s = ATOMIC_HASH(l); \ | |
39 | _raw_spin_unlock(s); \ | |
40 | local_irq_restore(f); \ | |
41 | } while(0) | |
42 | ||
43 | ||
44 | #else | |
45 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | |
46 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | |
47 | #endif | |
48 | ||
49 | /* Note that we need not lock read accesses - aligned word writes/reads | |
50 | * are atomic, so a reader never sees unconsistent values. | |
51 | * | |
52 | * Cache-line alignment would conflict with, for example, linux/module.h | |
53 | */ | |
54 | ||
55 | typedef struct { volatile int counter; } atomic_t; | |
56 | ||
57 | ||
58 | /* This should get optimized out since it's never called. | |
59 | ** Or get a link error if xchg is used "wrong". | |
60 | */ | |
61 | extern void __xchg_called_with_bad_pointer(void); | |
62 | ||
63 | ||
64 | /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ | |
65 | extern unsigned long __xchg8(char, char *); | |
66 | extern unsigned long __xchg32(int, int *); | |
67 | #ifdef __LP64__ | |
68 | extern unsigned long __xchg64(unsigned long, unsigned long *); | |
69 | #endif | |
70 | ||
71 | /* optimizer better get rid of switch since size is a constant */ | |
72 | static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, | |
73 | int size) | |
74 | { | |
75 | ||
76 | switch(size) { | |
77 | #ifdef __LP64__ | |
78 | case 8: return __xchg64(x,(unsigned long *) ptr); | |
79 | #endif | |
80 | case 4: return __xchg32((int) x, (int *) ptr); | |
81 | case 1: return __xchg8((char) x, (char *) ptr); | |
82 | } | |
83 | __xchg_called_with_bad_pointer(); | |
84 | return x; | |
85 | } | |
86 | ||
87 | ||
88 | /* | |
89 | ** REVISIT - Abandoned use of LDCW in xchg() for now: | |
90 | ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes | |
91 | ** o and while we are at it, could __LP64__ code use LDCD too? | |
92 | ** | |
93 | ** if (__builtin_constant_p(x) && (x == NULL)) | |
94 | ** if (((unsigned long)p & 0xf) == 0) | |
95 | ** return __ldcw(p); | |
96 | */ | |
97 | #define xchg(ptr,x) \ | |
98 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
99 | ||
100 | ||
101 | #define __HAVE_ARCH_CMPXCHG 1 | |
102 | ||
103 | /* bug catcher for when unsupported size is used - won't link */ | |
104 | extern void __cmpxchg_called_with_bad_pointer(void); | |
105 | ||
106 | /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ | |
107 | extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); | |
108 | extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); | |
109 | ||
110 | /* don't worry...optimizer will get rid of most of this */ | |
111 | static __inline__ unsigned long | |
112 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | |
113 | { | |
114 | switch(size) { | |
115 | #ifdef __LP64__ | |
116 | case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); | |
117 | #endif | |
118 | case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); | |
119 | } | |
120 | __cmpxchg_called_with_bad_pointer(); | |
121 | return old; | |
122 | } | |
123 | ||
124 | #define cmpxchg(ptr,o,n) \ | |
125 | ({ \ | |
126 | __typeof__(*(ptr)) _o_ = (o); \ | |
127 | __typeof__(*(ptr)) _n_ = (n); \ | |
128 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
129 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
130 | }) | |
131 | ||
132 | ||
133 | ||
134 | /* It's possible to reduce all atomic operations to either | |
135 | * __atomic_add_return, atomic_set and atomic_read (the latter | |
136 | * is there only for consistency). | |
137 | */ | |
138 | ||
139 | static __inline__ int __atomic_add_return(int i, atomic_t *v) | |
140 | { | |
141 | int ret; | |
142 | unsigned long flags; | |
143 | _atomic_spin_lock_irqsave(v, flags); | |
144 | ||
145 | ret = (v->counter += i); | |
146 | ||
147 | _atomic_spin_unlock_irqrestore(v, flags); | |
148 | return ret; | |
149 | } | |
150 | ||
151 | static __inline__ void atomic_set(atomic_t *v, int i) | |
152 | { | |
153 | unsigned long flags; | |
154 | _atomic_spin_lock_irqsave(v, flags); | |
155 | ||
156 | v->counter = i; | |
157 | ||
158 | _atomic_spin_unlock_irqrestore(v, flags); | |
159 | } | |
160 | ||
161 | static __inline__ int atomic_read(const atomic_t *v) | |
162 | { | |
163 | return v->counter; | |
164 | } | |
165 | ||
166 | /* exported interface */ | |
167 | ||
168 | #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) | |
169 | #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) | |
170 | #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) | |
171 | #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) | |
172 | ||
173 | #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v))) | |
174 | #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v))) | |
175 | #define atomic_inc_return(v) (__atomic_add_return( 1,(v))) | |
176 | #define atomic_dec_return(v) (__atomic_add_return( -1,(v))) | |
177 | ||
178 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
179 | ||
180 | /* | |
181 | * atomic_inc_and_test - increment and test | |
182 | * @v: pointer of type atomic_t | |
183 | * | |
184 | * Atomically increments @v by 1 | |
185 | * and returns true if the result is zero, or false for all | |
186 | * other cases. | |
187 | */ | |
188 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
189 | ||
190 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | |
191 | ||
192 | #define ATOMIC_INIT(i) { (i) } | |
193 | ||
194 | #define smp_mb__before_atomic_dec() smp_mb() | |
195 | #define smp_mb__after_atomic_dec() smp_mb() | |
196 | #define smp_mb__before_atomic_inc() smp_mb() | |
197 | #define smp_mb__after_atomic_inc() smp_mb() | |
198 | ||
199 | #endif |