]>
Commit | Line | Data |
---|---|---|
e9826380 JF |
1 | #ifndef ASM_X86_CMPXCHG_H |
2 | #define ASM_X86_CMPXCHG_H | |
3 | ||
61e2cd0a | 4 | #include <linux/compiler.h> |
e9826380 JF |
5 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
6 | ||
61e2cd0a JF |
7 | /* |
8 | * Non-existant functions to indicate usage errors at link time | |
9 | * (or compile-time if the compiler implements __compiletime_error(). | |
10 | */ | |
11 | extern void __xchg_wrong_size(void) | |
12 | __compiletime_error("Bad argument size for xchg"); | |
13 | extern void __cmpxchg_wrong_size(void) | |
14 | __compiletime_error("Bad argument size for cmpxchg"); | |
15 | extern void __xadd_wrong_size(void) | |
16 | __compiletime_error("Bad argument size for xadd"); | |
3d94ae0c JF |
17 | extern void __add_wrong_size(void) |
18 | __compiletime_error("Bad argument size for add"); | |
e9826380 JF |
19 | |
20 | /* | |
21 | * Constants for operation sizes. On 32-bit, the 64-bit size it set to | |
22 | * -1 because sizeof will never return -1, thereby making those switch | |
23 | * case statements guaranteeed dead code which the compiler will | |
24 | * eliminate, and allowing the "missing symbol in the default case" to | |
25 | * indicate a usage error. | |
26 | */ | |
27 | #define __X86_CASE_B 1 | |
28 | #define __X86_CASE_W 2 | |
29 | #define __X86_CASE_L 4 | |
30 | #ifdef CONFIG_64BIT | |
31 | #define __X86_CASE_Q 8 | |
32 | #else | |
33 | #define __X86_CASE_Q -1 /* sizeof will never return -1 */ | |
34 | #endif | |
35 | ||
31a8394e JF |
36 | /* |
37 | * An exchange-type operation, which takes a value and a pointer, and | |
38 | * returns a the old value. | |
39 | */ | |
40 | #define __xchg_op(ptr, arg, op, lock) \ | |
41 | ({ \ | |
42 | __typeof__ (*(ptr)) __ret = (arg); \ | |
43 | switch (sizeof(*(ptr))) { \ | |
44 | case __X86_CASE_B: \ | |
45 | asm volatile (lock #op "b %b0, %1\n" \ | |
46 | : "+r" (__ret), "+m" (*(ptr)) \ | |
47 | : : "memory", "cc"); \ | |
48 | break; \ | |
49 | case __X86_CASE_W: \ | |
50 | asm volatile (lock #op "w %w0, %1\n" \ | |
51 | : "+r" (__ret), "+m" (*(ptr)) \ | |
52 | : : "memory", "cc"); \ | |
53 | break; \ | |
54 | case __X86_CASE_L: \ | |
55 | asm volatile (lock #op "l %0, %1\n" \ | |
56 | : "+r" (__ret), "+m" (*(ptr)) \ | |
57 | : : "memory", "cc"); \ | |
58 | break; \ | |
59 | case __X86_CASE_Q: \ | |
60 | asm volatile (lock #op "q %q0, %1\n" \ | |
61 | : "+r" (__ret), "+m" (*(ptr)) \ | |
62 | : : "memory", "cc"); \ | |
63 | break; \ | |
64 | default: \ | |
65 | __ ## op ## _wrong_size(); \ | |
66 | } \ | |
67 | __ret; \ | |
68 | }) | |
69 | ||
e9826380 JF |
70 | /* |
71 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. | |
72 | * Since this is generally used to protect other memory information, we | |
73 | * use "asm volatile" and "memory" clobbers to prevent gcc from moving | |
74 | * information around. | |
75 | */ | |
31a8394e | 76 | #define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") |
e9826380 JF |
77 | |
78 | /* | |
79 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
80 | * store NEW in MEM. Return the initial value in MEM. Success is | |
81 | * indicated by comparing RETURN with OLD. | |
82 | */ | |
83 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | |
84 | ({ \ | |
85 | __typeof__(*(ptr)) __ret; \ | |
86 | __typeof__(*(ptr)) __old = (old); \ | |
87 | __typeof__(*(ptr)) __new = (new); \ | |
88 | switch (size) { \ | |
89 | case __X86_CASE_B: \ | |
90 | { \ | |
91 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ | |
92 | asm volatile(lock "cmpxchgb %2,%1" \ | |
93 | : "=a" (__ret), "+m" (*__ptr) \ | |
94 | : "q" (__new), "0" (__old) \ | |
95 | : "memory"); \ | |
96 | break; \ | |
97 | } \ | |
98 | case __X86_CASE_W: \ | |
99 | { \ | |
100 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ | |
101 | asm volatile(lock "cmpxchgw %2,%1" \ | |
102 | : "=a" (__ret), "+m" (*__ptr) \ | |
103 | : "r" (__new), "0" (__old) \ | |
104 | : "memory"); \ | |
105 | break; \ | |
106 | } \ | |
107 | case __X86_CASE_L: \ | |
108 | { \ | |
109 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ | |
110 | asm volatile(lock "cmpxchgl %2,%1" \ | |
111 | : "=a" (__ret), "+m" (*__ptr) \ | |
112 | : "r" (__new), "0" (__old) \ | |
113 | : "memory"); \ | |
114 | break; \ | |
115 | } \ | |
116 | case __X86_CASE_Q: \ | |
117 | { \ | |
118 | volatile u64 *__ptr = (volatile u64 *)(ptr); \ | |
119 | asm volatile(lock "cmpxchgq %2,%1" \ | |
120 | : "=a" (__ret), "+m" (*__ptr) \ | |
121 | : "r" (__new), "0" (__old) \ | |
122 | : "memory"); \ | |
123 | break; \ | |
124 | } \ | |
125 | default: \ | |
126 | __cmpxchg_wrong_size(); \ | |
127 | } \ | |
128 | __ret; \ | |
129 | }) | |
130 | ||
131 | #define __cmpxchg(ptr, old, new, size) \ | |
132 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | |
133 | ||
134 | #define __sync_cmpxchg(ptr, old, new, size) \ | |
135 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | |
136 | ||
137 | #define __cmpxchg_local(ptr, old, new, size) \ | |
138 | __raw_cmpxchg((ptr), (old), (new), (size), "") | |
139 | ||
96a388de TG |
140 | #ifdef CONFIG_X86_32 |
141 | # include "cmpxchg_32.h" | |
142 | #else | |
143 | # include "cmpxchg_64.h" | |
144 | #endif | |
e9826380 JF |
145 | |
146 | #ifdef __HAVE_ARCH_CMPXCHG | |
147 | #define cmpxchg(ptr, old, new) \ | |
148 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) | |
149 | ||
150 | #define sync_cmpxchg(ptr, old, new) \ | |
151 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) | |
152 | ||
153 | #define cmpxchg_local(ptr, old, new) \ | |
154 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) | |
155 | #endif | |
156 | ||
433b3520 JF |
157 | /* |
158 | * xadd() adds "inc" to "*ptr" and atomically returns the previous | |
159 | * value of "*ptr". | |
160 | * | |
161 | * xadd() is locked when multiple CPUs are online | |
162 | * xadd_sync() is always locked | |
163 | * xadd_local() is never locked | |
164 | */ | |
31a8394e | 165 | #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) |
433b3520 JF |
166 | #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) |
167 | #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") | |
168 | #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") | |
169 | ||
3d94ae0c JF |
170 | #define __add(ptr, inc, lock) \ |
171 | ({ \ | |
172 | __typeof__ (*(ptr)) __ret = (inc); \ | |
173 | switch (sizeof(*(ptr))) { \ | |
174 | case __X86_CASE_B: \ | |
175 | asm volatile (lock "addb %b1, %0\n" \ | |
176 | : "+m" (*(ptr)) : "ri" (inc) \ | |
177 | : "memory", "cc"); \ | |
178 | break; \ | |
179 | case __X86_CASE_W: \ | |
180 | asm volatile (lock "addw %w1, %0\n" \ | |
181 | : "+m" (*(ptr)) : "ri" (inc) \ | |
182 | : "memory", "cc"); \ | |
183 | break; \ | |
184 | case __X86_CASE_L: \ | |
185 | asm volatile (lock "addl %1, %0\n" \ | |
186 | : "+m" (*(ptr)) : "ri" (inc) \ | |
187 | : "memory", "cc"); \ | |
188 | break; \ | |
189 | case __X86_CASE_Q: \ | |
190 | asm volatile (lock "addq %1, %0\n" \ | |
191 | : "+m" (*(ptr)) : "ri" (inc) \ | |
192 | : "memory", "cc"); \ | |
193 | break; \ | |
194 | default: \ | |
195 | __add_wrong_size(); \ | |
196 | } \ | |
197 | __ret; \ | |
198 | }) | |
199 | ||
200 | /* | |
201 | * add_*() adds "inc" to "*ptr" | |
202 | * | |
203 | * __add() takes a lock prefix | |
204 | * add_smp() is locked when multiple CPUs are online | |
205 | * add_sync() is always locked | |
206 | */ | |
207 | #define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) | |
208 | #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") | |
209 | ||
e9826380 | 210 | #endif /* ASM_X86_CMPXCHG_H */ |