]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/cmpxchg_32.h
1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
4 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
11 extern void __xchg_wrong_size(void);
12 extern void __cmpxchg_wrong_size(void);
15 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16 * Since this is generally used to protect other memory information, we
17 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
20 #define __xchg(x, ptr, size) \
22 __typeof(*(ptr)) __x = (x); \
26 volatile u8 *__ptr = (volatile u8 *)(ptr); \
27 asm volatile("xchgb %0,%1" \
28 : "=q" (__x), "+m" (*__ptr) \
35 volatile u16 *__ptr = (volatile u16 *)(ptr); \
36 asm volatile("xchgw %0,%1" \
37 : "=r" (__x), "+m" (*__ptr) \
44 volatile u32 *__ptr = (volatile u32 *)(ptr); \
45 asm volatile("xchgl %0,%1" \
46 : "=r" (__x), "+m" (*__ptr) \
52 __xchg_wrong_size(); \
57 #define xchg(ptr, v) \
58 __xchg((v), (ptr), sizeof(*ptr))
61 * CMPXCHG8B only writes to the target if we had the previous
62 * value in registers, otherwise it acts as a read and gives us the
63 * "new previous" value. That is why there is a loop. Preloading
64 * EDX:EAX is a performance optimization: in the common case it means
65 * we need only one locked operation.
67 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
68 * least an FPU save and/or %cr0.ts manipulation.
70 * cmpxchg8b must be used with the lock prefix here to allow the
71 * instruction to be executed atomically. We need to have the reader
72 * side to see the coherent 64bit value.
74 static inline void set_64bit(volatile u64
*ptr
, u64 value
)
77 u32 high
= value
>> 32;
81 LOCK_PREFIX
"cmpxchg8b %0\n\t"
83 : "=m" (*ptr
), "+A" (prev
)
84 : "b" (low
), "c" (high
)
89 * Atomic compare and exchange. Compare OLD with MEM, if identical,
90 * store NEW in MEM. Return the initial value in MEM. Success is
91 * indicated by comparing RETURN with OLD.
93 #define __raw_cmpxchg(ptr, old, new, size, lock) \
95 __typeof__(*(ptr)) __ret; \
96 __typeof__(*(ptr)) __old = (old); \
97 __typeof__(*(ptr)) __new = (new); \
101 volatile u8 *__ptr = (volatile u8 *)(ptr); \
102 asm volatile(lock "cmpxchgb %2,%1" \
103 : "=a" (__ret), "+m" (*__ptr) \
104 : "q" (__new), "0" (__old) \
110 volatile u16 *__ptr = (volatile u16 *)(ptr); \
111 asm volatile(lock "cmpxchgw %2,%1" \
112 : "=a" (__ret), "+m" (*__ptr) \
113 : "r" (__new), "0" (__old) \
119 volatile u32 *__ptr = (volatile u32 *)(ptr); \
120 asm volatile(lock "cmpxchgl %2,%1" \
121 : "=a" (__ret), "+m" (*__ptr) \
122 : "r" (__new), "0" (__old) \
127 __cmpxchg_wrong_size(); \
132 #define __cmpxchg(ptr, old, new, size) \
133 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135 #define __sync_cmpxchg(ptr, old, new, size) \
136 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
138 #define __cmpxchg_local(ptr, old, new, size) \
139 __raw_cmpxchg((ptr), (old), (new), (size), "")
141 #ifdef CONFIG_X86_CMPXCHG
142 #define __HAVE_ARCH_CMPXCHG 1
144 #define cmpxchg(ptr, old, new) \
145 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
147 #define sync_cmpxchg(ptr, old, new) \
148 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
150 #define cmpxchg_local(ptr, old, new) \
151 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
154 #ifdef CONFIG_X86_CMPXCHG64
155 #define cmpxchg64(ptr, o, n) \
156 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
157 (unsigned long long)(n)))
158 #define cmpxchg64_local(ptr, o, n) \
159 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
160 (unsigned long long)(n)))
163 static inline u64
__cmpxchg64(volatile u64
*ptr
, u64 old
, u64
new)
166 asm volatile(LOCK_PREFIX
"cmpxchg8b %1"
170 "c" ((u32
)(new >> 32)),
176 static inline u64
__cmpxchg64_local(volatile u64
*ptr
, u64 old
, u64
new)
179 asm volatile("cmpxchg8b %1"
183 "c" ((u32
)(new >> 32)),
189 #ifndef CONFIG_X86_CMPXCHG
191 * Building a kernel capable running on 80386. It may be necessary to
192 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
193 * a function for each of the sizes we support.
196 extern unsigned long cmpxchg_386_u8(volatile void *, u8
, u8
);
197 extern unsigned long cmpxchg_386_u16(volatile void *, u16
, u16
);
198 extern unsigned long cmpxchg_386_u32(volatile void *, u32
, u32
);
200 static inline unsigned long cmpxchg_386(volatile void *ptr
, unsigned long old
,
201 unsigned long new, int size
)
205 return cmpxchg_386_u8(ptr
, old
, new);
207 return cmpxchg_386_u16(ptr
, old
, new);
209 return cmpxchg_386_u32(ptr
, old
, new);
214 #define cmpxchg(ptr, o, n) \
216 __typeof__(*(ptr)) __ret; \
217 if (likely(boot_cpu_data.x86 > 3)) \
218 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
219 (unsigned long)(o), (unsigned long)(n), \
222 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
223 (unsigned long)(o), (unsigned long)(n), \
227 #define cmpxchg_local(ptr, o, n) \
229 __typeof__(*(ptr)) __ret; \
230 if (likely(boot_cpu_data.x86 > 3)) \
231 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
232 (unsigned long)(o), (unsigned long)(n), \
235 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
236 (unsigned long)(o), (unsigned long)(n), \
242 #ifndef CONFIG_X86_CMPXCHG64
244 * Building a kernel capable running on 80386 and 80486. It may be necessary
245 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
248 #define cmpxchg64(ptr, o, n) \
250 __typeof__(*(ptr)) __ret; \
251 __typeof__(*(ptr)) __old = (o); \
252 __typeof__(*(ptr)) __new = (n); \
253 alternative_io(LOCK_PREFIX_HERE \
254 "call cmpxchg8b_emu", \
255 "lock; cmpxchg8b (%%esi)" , \
258 "S" ((ptr)), "0" (__old), \
259 "b" ((unsigned int)__new), \
260 "c" ((unsigned int)(__new>>32)) \
265 #define cmpxchg64_local(ptr, o, n) \
267 __typeof__(*(ptr)) __ret; \
268 __typeof__(*(ptr)) __old = (o); \
269 __typeof__(*(ptr)) __new = (n); \
270 alternative_io("call cmpxchg8b_emu", \
271 "cmpxchg8b (%%esi)" , \
274 "S" ((ptr)), "0" (__old), \
275 "b" ((unsigned int)__new), \
276 "c" ((unsigned int)(__new>>32)) \
282 #define cmpxchg8b(ptr, o1, o2, n1, n2) \
285 __typeof__(o2) __dummy; \
286 __typeof__(*(ptr)) __old1 = (o1); \
287 __typeof__(o2) __old2 = (o2); \
288 __typeof__(*(ptr)) __new1 = (n1); \
289 __typeof__(o2) __new2 = (n2); \
290 asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
291 : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
292 : "a" (__old1), "d"(__old2), \
293 "b" (__new1), "c" (__new2) \
298 #define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
301 __typeof__(o2) __dummy; \
302 __typeof__(*(ptr)) __old1 = (o1); \
303 __typeof__(o2) __old2 = (o2); \
304 __typeof__(*(ptr)) __new1 = (n1); \
305 __typeof__(o2) __new2 = (n2); \
306 asm volatile("cmpxchg8b %2; setz %1" \
307 : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
308 : "a" (__old), "d"(__old2), \
309 "b" (__new1), "c" (__new2), \
314 #define cmpxchg_double(ptr, o1, o2, n1, n2) \
316 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
317 VM_BUG_ON((unsigned long)(ptr) % 8); \
318 cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
321 #define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
323 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
324 VM_BUG_ON((unsigned long)(ptr) % 8); \
325 cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
328 #define system_has_cmpxchg_double() cpu_has_cx8
330 #endif /* _ASM_X86_CMPXCHG_32_H */