]>
Commit | Line | Data |
---|---|---|
5638f993 HH |
1 | #ifndef _ARCH_LOCAL_H |
2 | #define _ARCH_LOCAL_H | |
3 | ||
4 | #include <linux/percpu.h> | |
5 | ||
6 | #include <asm/system.h> | |
7 | #include <asm/atomic.h> | |
8 | #include <asm/asm.h> | |
9 | ||
01c57fb6 | 10 | typedef struct { |
5638f993 HH |
11 | atomic_long_t a; |
12 | } local_t; | |
13 | ||
14 | #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } | |
15 | ||
16 | #define local_read(l) atomic_long_read(&(l)->a) | |
01c57fb6 | 17 | #define local_set(l, i) atomic_long_set(&(l)->a, (i)) |
5638f993 HH |
18 | |
19 | static inline void local_inc(local_t *l) | |
20 | { | |
21 | __asm__ __volatile__( | |
22 | _ASM_INC "%0" | |
23 | :"+m" (l->a.counter)); | |
24 | } | |
25 | ||
26 | static inline void local_dec(local_t *l) | |
27 | { | |
28 | __asm__ __volatile__( | |
29 | _ASM_DEC "%0" | |
30 | :"+m" (l->a.counter)); | |
31 | } | |
32 | ||
33 | static inline void local_add(long i, local_t *l) | |
34 | { | |
35 | __asm__ __volatile__( | |
36 | _ASM_ADD "%1,%0" | |
37 | :"+m" (l->a.counter) | |
38 | :"ir" (i)); | |
39 | } | |
40 | ||
41 | static inline void local_sub(long i, local_t *l) | |
42 | { | |
43 | __asm__ __volatile__( | |
44 | _ASM_SUB "%1,%0" | |
45 | :"+m" (l->a.counter) | |
46 | :"ir" (i)); | |
47 | } | |
48 | ||
49 | /** | |
50 | * local_sub_and_test - subtract value from variable and test result | |
51 | * @i: integer value to subtract | |
52 | * @l: pointer to type local_t | |
53 | * | |
54 | * Atomically subtracts @i from @l and returns | |
55 | * true if the result is zero, or false for all | |
56 | * other cases. | |
57 | */ | |
58 | static inline int local_sub_and_test(long i, local_t *l) | |
59 | { | |
60 | unsigned char c; | |
61 | ||
62 | __asm__ __volatile__( | |
63 | _ASM_SUB "%2,%0; sete %1" | |
64 | :"+m" (l->a.counter), "=qm" (c) | |
65 | :"ir" (i) : "memory"); | |
66 | return c; | |
67 | } | |
68 | ||
69 | /** | |
70 | * local_dec_and_test - decrement and test | |
71 | * @l: pointer to type local_t | |
72 | * | |
73 | * Atomically decrements @l by 1 and | |
74 | * returns true if the result is 0, or false for all other | |
75 | * cases. | |
76 | */ | |
77 | static inline int local_dec_and_test(local_t *l) | |
78 | { | |
79 | unsigned char c; | |
80 | ||
81 | __asm__ __volatile__( | |
82 | _ASM_DEC "%0; sete %1" | |
83 | :"+m" (l->a.counter), "=qm" (c) | |
84 | : : "memory"); | |
85 | return c != 0; | |
86 | } | |
87 | ||
88 | /** | |
89 | * local_inc_and_test - increment and test | |
90 | * @l: pointer to type local_t | |
91 | * | |
92 | * Atomically increments @l by 1 | |
93 | * and returns true if the result is zero, or false for all | |
94 | * other cases. | |
95 | */ | |
96 | static inline int local_inc_and_test(local_t *l) | |
97 | { | |
98 | unsigned char c; | |
99 | ||
100 | __asm__ __volatile__( | |
101 | _ASM_INC "%0; sete %1" | |
102 | :"+m" (l->a.counter), "=qm" (c) | |
103 | : : "memory"); | |
104 | return c != 0; | |
105 | } | |
106 | ||
107 | /** | |
108 | * local_add_negative - add and test if negative | |
109 | * @i: integer value to add | |
110 | * @l: pointer to type local_t | |
111 | * | |
112 | * Atomically adds @i to @l and returns true | |
113 | * if the result is negative, or false when | |
114 | * result is greater than or equal to zero. | |
115 | */ | |
116 | static inline int local_add_negative(long i, local_t *l) | |
117 | { | |
118 | unsigned char c; | |
119 | ||
120 | __asm__ __volatile__( | |
121 | _ASM_ADD "%2,%0; sets %1" | |
122 | :"+m" (l->a.counter), "=qm" (c) | |
123 | :"ir" (i) : "memory"); | |
124 | return c; | |
125 | } | |
126 | ||
127 | /** | |
128 | * local_add_return - add and return | |
129 | * @i: integer value to add | |
130 | * @l: pointer to type local_t | |
131 | * | |
132 | * Atomically adds @i to @l and returns @i + @l | |
133 | */ | |
134 | static inline long local_add_return(long i, local_t *l) | |
135 | { | |
136 | long __i; | |
137 | #ifdef CONFIG_M386 | |
138 | unsigned long flags; | |
01c57fb6 | 139 | if (unlikely(boot_cpu_data.x86 <= 3)) |
5638f993 | 140 | goto no_xadd; |
96a388de | 141 | #endif |
5638f993 HH |
142 | /* Modern 486+ processor */ |
143 | __i = i; | |
144 | __asm__ __volatile__( | |
145 | _ASM_XADD "%0, %1;" | |
146 | :"+r" (i), "+m" (l->a.counter) | |
147 | : : "memory"); | |
148 | return i + __i; | |
149 | ||
150 | #ifdef CONFIG_M386 | |
151 | no_xadd: /* Legacy 386 processor */ | |
152 | local_irq_save(flags); | |
153 | __i = local_read(l); | |
154 | local_set(l, i + __i); | |
155 | local_irq_restore(flags); | |
156 | return i + __i; | |
157 | #endif | |
158 | } | |
159 | ||
160 | static inline long local_sub_return(long i, local_t *l) | |
161 | { | |
01c57fb6 | 162 | return local_add_return(-i, l); |
5638f993 HH |
163 | } |
164 | ||
01c57fb6 HH |
165 | #define local_inc_return(l) (local_add_return(1, l)) |
166 | #define local_dec_return(l) (local_sub_return(1, l)) | |
5638f993 HH |
167 | |
168 | #define local_cmpxchg(l, o, n) \ | |
169 | (cmpxchg_local(&((l)->a.counter), (o), (n))) | |
170 | /* Always has a lock prefix */ | |
171 | #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) | |
172 | ||
173 | /** | |
174 | * local_add_unless - add unless the number is a given value | |
175 | * @l: pointer of type local_t | |
176 | * @a: the amount to add to l... | |
177 | * @u: ...unless l is equal to u. | |
178 | * | |
179 | * Atomically adds @a to @l, so long as it was not @u. | |
180 | * Returns non-zero if @l was not @u, and zero otherwise. | |
181 | */ | |
182 | #define local_add_unless(l, a, u) \ | |
183 | ({ \ | |
184 | long c, old; \ | |
185 | c = local_read(l); \ | |
186 | for (;;) { \ | |
187 | if (unlikely(c == (u))) \ | |
188 | break; \ | |
189 | old = local_cmpxchg((l), c, c + (a)); \ | |
190 | if (likely(old == c)) \ | |
191 | break; \ | |
192 | c = old; \ | |
193 | } \ | |
194 | c != (u); \ | |
195 | }) | |
196 | #define local_inc_not_zero(l) local_add_unless((l), 1, 0) | |
197 | ||
198 | /* On x86_32, these are no better than the atomic variants. | |
199 | * On x86-64 these are better than the atomic variants on SMP kernels | |
200 | * because they dont use a lock prefix. | |
201 | */ | |
202 | #define __local_inc(l) local_inc(l) | |
203 | #define __local_dec(l) local_dec(l) | |
01c57fb6 HH |
204 | #define __local_add(i, l) local_add((i), (l)) |
205 | #define __local_sub(i, l) local_sub((i), (l)) | |
5638f993 HH |
206 | |
207 | /* Use these for per-cpu local_t variables: on some archs they are | |
208 | * much more efficient than these naive implementations. Note they take | |
209 | * a variable, not an address. | |
210 | * | |
211 | * X86_64: This could be done better if we moved the per cpu data directly | |
212 | * after GS. | |
213 | */ | |
214 | ||
215 | /* Need to disable preemption for the cpu local counters otherwise we could | |
216 | still access a variable of a previous CPU in a non atomic way. */ | |
217 | #define cpu_local_wrap_v(l) \ | |
218 | ({ local_t res__; \ | |
219 | preempt_disable(); \ | |
220 | res__ = (l); \ | |
221 | preempt_enable(); \ | |
222 | res__; }) | |
223 | #define cpu_local_wrap(l) \ | |
224 | ({ preempt_disable(); \ | |
225 | l; \ | |
226 | preempt_enable(); }) \ | |
227 | ||
228 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | |
229 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | |
230 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | |
231 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | |
232 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | |
233 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | |
234 | ||
235 | #define __cpu_local_inc(l) cpu_local_inc(l) | |
236 | #define __cpu_local_dec(l) cpu_local_dec(l) | |
237 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | |
238 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | |
239 | ||
240 | #endif /* _ARCH_LOCAL_H */ |