]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/include/asm/cmpxchg_64.h
Merge branch 'master' of ssh://infradead/~/public_git/wireless-next into for-davem
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / cmpxchg_64.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_CMPXCHG_64_H
2#define _ASM_X86_CMPXCHG_64_H
a436ed9c
JD
3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
69309a05 6static inline void set_64bit(volatile u64 *ptr, u64 val)
a436ed9c
JD
7{
8 *ptr = val;
9}
10
f3834b9e
PZ
11extern void __xchg_wrong_size(void);
12extern void __cmpxchg_wrong_size(void);
13
a436ed9c 14/*
4532b305
PA
15 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16 * Since this is generally used to protect other memory information, we
17 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
18 * information around.
a436ed9c 19 */
f3834b9e
PZ
20#define __xchg(x, ptr, size) \
21({ \
22 __typeof(*(ptr)) __x = (x); \
23 switch (size) { \
24 case 1: \
4532b305
PA
25 { \
26 volatile u8 *__ptr = (volatile u8 *)(ptr); \
27 asm volatile("xchgb %0,%1" \
28 : "=q" (__x), "+m" (*__ptr) \
113fc5a6 29 : "0" (__x) \
f3834b9e
PZ
30 : "memory"); \
31 break; \
4532b305 32 } \
f3834b9e 33 case 2: \
4532b305
PA
34 { \
35 volatile u16 *__ptr = (volatile u16 *)(ptr); \
36 asm volatile("xchgw %0,%1" \
37 : "=r" (__x), "+m" (*__ptr) \
113fc5a6 38 : "0" (__x) \
f3834b9e
PZ
39 : "memory"); \
40 break; \
4532b305 41 } \
f3834b9e 42 case 4: \
4532b305
PA
43 { \
44 volatile u32 *__ptr = (volatile u32 *)(ptr); \
45 asm volatile("xchgl %0,%1" \
46 : "=r" (__x), "+m" (*__ptr) \
113fc5a6 47 : "0" (__x) \
f3834b9e
PZ
48 : "memory"); \
49 break; \
4532b305 50 } \
f3834b9e 51 case 8: \
4532b305
PA
52 { \
53 volatile u64 *__ptr = (volatile u64 *)(ptr); \
f3834b9e 54 asm volatile("xchgq %0,%1" \
4532b305 55 : "=r" (__x), "+m" (*__ptr) \
113fc5a6 56 : "0" (__x) \
f3834b9e
PZ
57 : "memory"); \
58 break; \
4532b305 59 } \
f3834b9e
PZ
60 default: \
61 __xchg_wrong_size(); \
62 } \
63 __x; \
64})
65
66#define xchg(ptr, v) \
67 __xchg((v), (ptr), sizeof(*ptr))
68
69#define __HAVE_ARCH_CMPXCHG 1
a436ed9c
JD
70
71/*
72 * Atomic compare and exchange. Compare OLD with MEM, if identical,
73 * store NEW in MEM. Return the initial value in MEM. Success is
74 * indicated by comparing RETURN with OLD.
75 */
f3834b9e
PZ
76#define __raw_cmpxchg(ptr, old, new, size, lock) \
77({ \
78 __typeof__(*(ptr)) __ret; \
79 __typeof__(*(ptr)) __old = (old); \
80 __typeof__(*(ptr)) __new = (new); \
81 switch (size) { \
82 case 1: \
4532b305
PA
83 { \
84 volatile u8 *__ptr = (volatile u8 *)(ptr); \
85 asm volatile(lock "cmpxchgb %2,%1" \
86 : "=a" (__ret), "+m" (*__ptr) \
113fc5a6 87 : "q" (__new), "0" (__old) \
f3834b9e
PZ
88 : "memory"); \
89 break; \
4532b305 90 } \
f3834b9e 91 case 2: \
4532b305
PA
92 { \
93 volatile u16 *__ptr = (volatile u16 *)(ptr); \
94 asm volatile(lock "cmpxchgw %2,%1" \
95 : "=a" (__ret), "+m" (*__ptr) \
113fc5a6 96 : "r" (__new), "0" (__old) \
f3834b9e
PZ
97 : "memory"); \
98 break; \
4532b305 99 } \
f3834b9e 100 case 4: \
4532b305
PA
101 { \
102 volatile u32 *__ptr = (volatile u32 *)(ptr); \
103 asm volatile(lock "cmpxchgl %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
113fc5a6 105 : "r" (__new), "0" (__old) \
f3834b9e
PZ
106 : "memory"); \
107 break; \
4532b305 108 } \
f3834b9e 109 case 8: \
4532b305
PA
110 { \
111 volatile u64 *__ptr = (volatile u64 *)(ptr); \
113fc5a6 112 asm volatile(lock "cmpxchgq %2,%1" \
4532b305 113 : "=a" (__ret), "+m" (*__ptr) \
113fc5a6 114 : "r" (__new), "0" (__old) \
f3834b9e
PZ
115 : "memory"); \
116 break; \
4532b305 117 } \
f3834b9e
PZ
118 default: \
119 __cmpxchg_wrong_size(); \
120 } \
121 __ret; \
122})
a436ed9c 123
f3834b9e
PZ
124#define __cmpxchg(ptr, old, new, size) \
125 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
a436ed9c 126
f3834b9e
PZ
127#define __sync_cmpxchg(ptr, old, new, size) \
128 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
a436ed9c 129
f3834b9e
PZ
130#define __cmpxchg_local(ptr, old, new, size) \
131 __raw_cmpxchg((ptr), (old), (new), (size), "")
15878c0b 132
f3834b9e
PZ
133#define cmpxchg(ptr, old, new) \
134 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
135
136#define sync_cmpxchg(ptr, old, new) \
137 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
138
139#define cmpxchg_local(ptr, old, new) \
140 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
a436ed9c 141
32f49eab 142#define cmpxchg64(ptr, o, n) \
e52da357 143({ \
32f49eab
MD
144 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
145 cmpxchg((ptr), (o), (n)); \
e52da357 146})
f3834b9e 147
32f49eab 148#define cmpxchg64_local(ptr, o, n) \
e52da357 149({ \
32f49eab
MD
150 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
151 cmpxchg_local((ptr), (o), (n)); \
e52da357 152})
a436ed9c 153
3824abd1
CL
154#define cmpxchg16b(ptr, o1, o2, n1, n2) \
155({ \
156 char __ret; \
157 __typeof__(o2) __junk; \
158 __typeof__(*(ptr)) __old1 = (o1); \
159 __typeof__(o2) __old2 = (o2); \
160 __typeof__(*(ptr)) __new1 = (n1); \
161 __typeof__(o2) __new2 = (n2); \
162 asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
163 : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
164 : "b"(__new1), "c"(__new2), \
165 "a"(__old1), "d"(__old2)); \
166 __ret; })
167
168
169#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
170({ \
171 char __ret; \
172 __typeof__(o2) __junk; \
173 __typeof__(*(ptr)) __old1 = (o1); \
174 __typeof__(o2) __old2 = (o2); \
175 __typeof__(*(ptr)) __new1 = (n1); \
176 __typeof__(o2) __new2 = (n2); \
177 asm volatile("cmpxchg16b %2;setz %1" \
178 : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
179 : "b"(__new1), "c"(__new2), \
180 "a"(__old1), "d"(__old2)); \
181 __ret; })
182
183#define cmpxchg_double(ptr, o1, o2, n1, n2) \
184({ \
185 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
186 VM_BUG_ON((unsigned long)(ptr) % 16); \
187 cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
188})
189
190#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
191({ \
192 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
193 VM_BUG_ON((unsigned long)(ptr) % 16); \
194 cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
195})
196
197#define system_has_cmpxchg_double() cpu_has_cx16
198
1965aae3 199#endif /* _ASM_X86_CMPXCHG_64_H */