]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/asm-i386/atomic.h
[PATCH] EDAC: core EDAC support code
[mirror_ubuntu-artful-kernel.git] / include / asm-i386 / atomic.h
CommitLineData
1da177e4
LT
1#ifndef __ARCH_I386_ATOMIC__
2#define __ARCH_I386_ATOMIC__
3
4#include <linux/config.h>
5#include <linux/compiler.h>
6#include <asm/processor.h>
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13#ifdef CONFIG_SMP
14#define LOCK "lock ; "
15#else
16#define LOCK ""
17#endif
18
19/*
20 * Make sure gcc doesn't try to be clever and move things around
21 * on us. We need to use _exactly_ the address the user gave us,
22 * not some alias that contains the same information.
23 */
24typedef struct { volatile int counter; } atomic_t;
25
26#define ATOMIC_INIT(i) { (i) }
27
28/**
29 * atomic_read - read atomic variable
30 * @v: pointer of type atomic_t
31 *
32 * Atomically reads the value of @v.
33 */
34#define atomic_read(v) ((v)->counter)
35
36/**
37 * atomic_set - set atomic variable
38 * @v: pointer of type atomic_t
39 * @i: required value
40 *
41 * Atomically sets the value of @v to @i.
42 */
43#define atomic_set(v,i) (((v)->counter) = (i))
44
45/**
46 * atomic_add - add integer to atomic variable
47 * @i: integer value to add
48 * @v: pointer of type atomic_t
49 *
50 * Atomically adds @i to @v.
51 */
52static __inline__ void atomic_add(int i, atomic_t *v)
53{
54 __asm__ __volatile__(
55 LOCK "addl %1,%0"
56 :"=m" (v->counter)
57 :"ir" (i), "m" (v->counter));
58}
59
60/**
61 * atomic_sub - subtract the atomic variable
62 * @i: integer value to subtract
63 * @v: pointer of type atomic_t
64 *
65 * Atomically subtracts @i from @v.
66 */
67static __inline__ void atomic_sub(int i, atomic_t *v)
68{
69 __asm__ __volatile__(
70 LOCK "subl %1,%0"
71 :"=m" (v->counter)
72 :"ir" (i), "m" (v->counter));
73}
74
75/**
76 * atomic_sub_and_test - subtract value from variable and test result
77 * @i: integer value to subtract
78 * @v: pointer of type atomic_t
79 *
80 * Atomically subtracts @i from @v and returns
81 * true if the result is zero, or false for all
82 * other cases.
83 */
84static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
85{
86 unsigned char c;
87
88 __asm__ __volatile__(
89 LOCK "subl %2,%0; sete %1"
90 :"=m" (v->counter), "=qm" (c)
91 :"ir" (i), "m" (v->counter) : "memory");
92 return c;
93}
94
95/**
96 * atomic_inc - increment atomic variable
97 * @v: pointer of type atomic_t
98 *
99 * Atomically increments @v by 1.
100 */
101static __inline__ void atomic_inc(atomic_t *v)
102{
103 __asm__ __volatile__(
104 LOCK "incl %0"
105 :"=m" (v->counter)
106 :"m" (v->counter));
107}
108
109/**
110 * atomic_dec - decrement atomic variable
111 * @v: pointer of type atomic_t
112 *
113 * Atomically decrements @v by 1.
114 */
115static __inline__ void atomic_dec(atomic_t *v)
116{
117 __asm__ __volatile__(
118 LOCK "decl %0"
119 :"=m" (v->counter)
120 :"m" (v->counter));
121}
122
123/**
124 * atomic_dec_and_test - decrement and test
125 * @v: pointer of type atomic_t
126 *
127 * Atomically decrements @v by 1 and
128 * returns true if the result is 0, or false for all other
129 * cases.
130 */
131static __inline__ int atomic_dec_and_test(atomic_t *v)
132{
133 unsigned char c;
134
135 __asm__ __volatile__(
136 LOCK "decl %0; sete %1"
137 :"=m" (v->counter), "=qm" (c)
138 :"m" (v->counter) : "memory");
139 return c != 0;
140}
141
142/**
143 * atomic_inc_and_test - increment and test
144 * @v: pointer of type atomic_t
145 *
146 * Atomically increments @v by 1
147 * and returns true if the result is zero, or false for all
148 * other cases.
149 */
150static __inline__ int atomic_inc_and_test(atomic_t *v)
151{
152 unsigned char c;
153
154 __asm__ __volatile__(
155 LOCK "incl %0; sete %1"
156 :"=m" (v->counter), "=qm" (c)
157 :"m" (v->counter) : "memory");
158 return c != 0;
159}
160
161/**
162 * atomic_add_negative - add and test if negative
163 * @v: pointer of type atomic_t
164 * @i: integer value to add
165 *
166 * Atomically adds @i to @v and returns true
167 * if the result is negative, or false when
168 * result is greater than or equal to zero.
169 */
170static __inline__ int atomic_add_negative(int i, atomic_t *v)
171{
172 unsigned char c;
173
174 __asm__ __volatile__(
175 LOCK "addl %2,%0; sets %1"
176 :"=m" (v->counter), "=qm" (c)
177 :"ir" (i), "m" (v->counter) : "memory");
178 return c;
179}
180
181/**
182 * atomic_add_return - add and return
183 * @v: pointer of type atomic_t
184 * @i: integer value to add
185 *
186 * Atomically adds @i to @v and returns @i + @v
187 */
188static __inline__ int atomic_add_return(int i, atomic_t *v)
189{
190 int __i;
191#ifdef CONFIG_M386
192 if(unlikely(boot_cpu_data.x86==3))
193 goto no_xadd;
194#endif
195 /* Modern 486+ processor */
196 __i = i;
197 __asm__ __volatile__(
198 LOCK "xaddl %0, %1;"
199 :"=r"(i)
200 :"m"(v->counter), "0"(i));
201 return i + __i;
202
203#ifdef CONFIG_M386
204no_xadd: /* Legacy 386 processor */
205 local_irq_disable();
206 __i = atomic_read(v);
207 atomic_set(v, i + __i);
208 local_irq_enable();
209 return i + __i;
210#endif
211}
212
213static __inline__ int atomic_sub_return(int i, atomic_t *v)
214{
215 return atomic_add_return(-i,v);
216}
217
4a6dae6d 218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
ffbf670f 219#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4a6dae6d 220
8426e1f6
NP
221/**
222 * atomic_add_unless - add unless the number is a given value
223 * @v: pointer of type atomic_t
224 * @a: the amount to add to v...
225 * @u: ...unless v is equal to u.
226 *
227 * Atomically adds @a to @v, so long as it was not @u.
228 * Returns non-zero if @v was not @u, and zero otherwise.
229 */
230#define atomic_add_unless(v, a, u) \
231({ \
232 int c, old; \
233 c = atomic_read(v); \
234 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
235 c = old; \
236 c != (u); \
237})
238#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
239
1da177e4
LT
240#define atomic_inc_return(v) (atomic_add_return(1,v))
241#define atomic_dec_return(v) (atomic_sub_return(1,v))
242
243/* These are x86-specific, used by some header files */
244#define atomic_clear_mask(mask, addr) \
245__asm__ __volatile__(LOCK "andl %0,%1" \
246: : "r" (~(mask)),"m" (*addr) : "memory")
247
248#define atomic_set_mask(mask, addr) \
249__asm__ __volatile__(LOCK "orl %0,%1" \
250: : "r" (mask),"m" (*(addr)) : "memory")
251
252/* Atomic operations are already serializing on x86 */
253#define smp_mb__before_atomic_dec() barrier()
254#define smp_mb__after_atomic_dec() barrier()
255#define smp_mb__before_atomic_inc() barrier()
256#define smp_mb__after_atomic_inc() barrier()
257
d3cb4871 258#include <asm-generic/atomic.h>
1da177e4 259#endif