]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PERCPU_H |
3 | #define _ASM_X86_PERCPU_H | |
3334052a | 4 | |
1a51e3a0 | 5 | #ifdef CONFIG_X86_64 |
9939ddaf | 6 | #define __percpu_seg gs |
1a51e3a0 | 7 | #else |
9939ddaf | 8 | #define __percpu_seg fs |
96a388de | 9 | #endif |
3334052a | 10 | |
11 | #ifdef __ASSEMBLY__ | |
12 | ||
3334052a | 13 | #ifdef CONFIG_SMP |
dd17c8f7 | 14 | #define PER_CPU_VAR(var) %__percpu_seg:var |
3334052a | 15 | #else /* ! SMP */ |
dd17c8f7 | 16 | #define PER_CPU_VAR(var) var |
3334052a | 17 | #endif /* SMP */ |
18 | ||
2add8e23 BG |
19 | #ifdef CONFIG_X86_64_SMP |
20 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var | |
21 | #else | |
dd17c8f7 | 22 | #define INIT_PER_CPU_VAR(var) var |
2add8e23 BG |
23 | #endif |
24 | ||
3334052a | 25 | #else /* ...!ASSEMBLY */ |
26 | ||
e59a1bb2 | 27 | #include <linux/kernel.h> |
9939ddaf | 28 | #include <linux/stringify.h> |
3334052a | 29 | |
9939ddaf | 30 | #ifdef CONFIG_SMP |
d7c3f8ce | 31 | #define __percpu_prefix "%%"__stringify(__percpu_seg)":" |
c6ae41e7 | 32 | #define __my_cpu_offset this_cpu_read(this_cpu_off) |
db7829c6 BG |
33 | |
34 | /* | |
35 | * Compared to the generic __my_cpu_offset version, the following | |
36 | * saves one instruction and avoids clobbering a temp register. | |
37 | */ | |
bbc344e1 | 38 | #define arch_raw_cpu_ptr(ptr) \ |
db7829c6 BG |
39 | ({ \ |
40 | unsigned long tcp_ptr__; \ | |
db7829c6 BG |
41 | asm volatile("add " __percpu_arg(1) ", %0" \ |
42 | : "=r" (tcp_ptr__) \ | |
43 | : "m" (this_cpu_off), "0" (ptr)); \ | |
44 | (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ | |
45 | }) | |
9939ddaf | 46 | #else |
d7c3f8ce | 47 | #define __percpu_prefix "" |
9939ddaf | 48 | #endif |
3334052a | 49 | |
97b67ae5 | 50 | #define __percpu_arg(x) __percpu_prefix "%" #x |
d7c3f8ce | 51 | |
2add8e23 BG |
52 | /* |
53 | * Initialized pointers to per-cpu variables needed for the boot | |
54 | * processor need to use these macros to get the proper address | |
55 | * offset from __per_cpu_load on SMP. | |
56 | * | |
57 | * There also must be an entry in vmlinux_64.lds.S | |
58 | */ | |
59 | #define DECLARE_INIT_PER_CPU(var) \ | |
dd17c8f7 | 60 | extern typeof(var) init_per_cpu_var(var) |
2add8e23 BG |
61 | |
62 | #ifdef CONFIG_X86_64_SMP | |
63 | #define init_per_cpu_var(var) init_per_cpu__##var | |
64 | #else | |
dd17c8f7 | 65 | #define init_per_cpu_var(var) var |
2add8e23 BG |
66 | #endif |
67 | ||
3334052a | 68 | /* For arch-specific code, we can use direct single-insn ops (they |
69 | * don't give an lvalue though). */ | |
3334052a | 70 | |
6865dc3a BG |
71 | #define __pcpu_type_1 u8 |
72 | #define __pcpu_type_2 u16 | |
73 | #define __pcpu_type_4 u32 | |
74 | #define __pcpu_type_8 u64 | |
75 | ||
76 | #define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff)) | |
77 | #define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff)) | |
78 | #define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff)) | |
79 | #define __pcpu_cast_8(val) ((u64)(val)) | |
80 | ||
81 | #define __pcpu_op1_1(op, dst) op "b " dst | |
82 | #define __pcpu_op1_2(op, dst) op "w " dst | |
83 | #define __pcpu_op1_4(op, dst) op "l " dst | |
84 | #define __pcpu_op1_8(op, dst) op "q " dst | |
85 | ||
86 | #define __pcpu_op2_1(op, src, dst) op "b " src ", " dst | |
87 | #define __pcpu_op2_2(op, src, dst) op "w " src ", " dst | |
88 | #define __pcpu_op2_4(op, src, dst) op "l " src ", " dst | |
89 | #define __pcpu_op2_8(op, src, dst) op "q " src ", " dst | |
90 | ||
91 | #define __pcpu_reg_1(mod, x) mod "q" (x) | |
92 | #define __pcpu_reg_2(mod, x) mod "r" (x) | |
93 | #define __pcpu_reg_4(mod, x) mod "r" (x) | |
94 | #define __pcpu_reg_8(mod, x) mod "r" (x) | |
95 | ||
96 | #define __pcpu_reg_imm_1(x) "qi" (x) | |
97 | #define __pcpu_reg_imm_2(x) "ri" (x) | |
98 | #define __pcpu_reg_imm_4(x) "ri" (x) | |
99 | #define __pcpu_reg_imm_8(x) "re" (x) | |
100 | ||
c175acc1 BG |
101 | #define percpu_to_op(size, qual, op, _var, _val) \ |
102 | do { \ | |
103 | __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ | |
104 | if (0) { \ | |
105 | typeof(_var) pto_tmp__; \ | |
106 | pto_tmp__ = (_val); \ | |
107 | (void)pto_tmp__; \ | |
108 | } \ | |
109 | asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \ | |
110 | : [var] "+m" (_var) \ | |
111 | : [val] __pcpu_reg_imm_##size(pto_val__)); \ | |
bc9e3be2 JP |
112 | } while (0) |
113 | ||
33e5614a BG |
114 | #define percpu_unary_op(size, qual, op, _var) \ |
115 | ({ \ | |
116 | asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \ | |
117 | : [var] "+m" (_var)); \ | |
118 | }) | |
119 | ||
5917dae8 CL |
120 | /* |
121 | * Generate a percpu add to memory instruction and optimize code | |
40f0a5d0 | 122 | * if one is added or subtracted. |
5917dae8 | 123 | */ |
33e5614a | 124 | #define percpu_add_op(size, qual, var, val) \ |
5917dae8 | 125 | do { \ |
5917dae8 | 126 | const int pao_ID__ = (__builtin_constant_p(val) && \ |
bd09d9a3 GT |
127 | ((val) == 1 || (val) == -1)) ? \ |
128 | (int)(val) : 0; \ | |
5917dae8 | 129 | if (0) { \ |
33e5614a | 130 | typeof(var) pao_tmp__; \ |
5917dae8 | 131 | pao_tmp__ = (val); \ |
23b764d0 | 132 | (void)pao_tmp__; \ |
5917dae8 | 133 | } \ |
33e5614a BG |
134 | if (pao_ID__ == 1) \ |
135 | percpu_unary_op(size, qual, "inc", var); \ | |
136 | else if (pao_ID__ == -1) \ | |
137 | percpu_unary_op(size, qual, "dec", var); \ | |
138 | else \ | |
139 | percpu_to_op(size, qual, "add", var, val); \ | |
5917dae8 CL |
140 | } while (0) |
141 | ||
bb631e30 BG |
142 | #define percpu_from_op(size, qual, op, _var) \ |
143 | ({ \ | |
144 | __pcpu_type_##size pfo_val__; \ | |
145 | asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \ | |
146 | : [val] __pcpu_reg_##size("=", pfo_val__) \ | |
147 | : [var] "m" (_var)); \ | |
148 | (typeof(_var))(unsigned long) pfo_val__; \ | |
97b67ae5 JB |
149 | }) |
150 | ||
c94055fe BG |
151 | #define percpu_stable_op(size, op, _var) \ |
152 | ({ \ | |
153 | __pcpu_type_##size pfo_val__; \ | |
154 | asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]") \ | |
155 | : [val] __pcpu_reg_##size("=", pfo_val__) \ | |
156 | : [var] "p" (&(_var))); \ | |
157 | (typeof(_var))(unsigned long) pfo_val__; \ | |
bc9e3be2 | 158 | }) |
3334052a | 159 | |
40304775 TH |
160 | /* |
161 | * Add return operation | |
162 | */ | |
bbff583b | 163 | #define percpu_add_return_op(size, qual, _var, _val) \ |
40304775 | 164 | ({ \ |
bbff583b BG |
165 | __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \ |
166 | asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \ | |
167 | __percpu_arg([var])) \ | |
168 | : [tmp] __pcpu_reg_##size("+", paro_tmp__), \ | |
169 | [var] "+m" (_var) \ | |
170 | : : "memory"); \ | |
171 | (typeof(_var))(unsigned long) (paro_tmp__ + _val); \ | |
40304775 TH |
172 | }) |
173 | ||
7296e08a | 174 | /* |
8270137a CL |
175 | * xchg is implemented using cmpxchg without a lock prefix. xchg is |
176 | * expensive due to the implied lock prefix. The processor cannot prefetch | |
177 | * cachelines if xchg is used. | |
7296e08a | 178 | */ |
73ca542f | 179 | #define percpu_xchg_op(size, qual, _var, _nval) \ |
7296e08a | 180 | ({ \ |
73ca542f BG |
181 | __pcpu_type_##size pxo_old__; \ |
182 | __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \ | |
183 | asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \ | |
184 | "%[oval]") \ | |
185 | "\n1:\t" \ | |
186 | __pcpu_op2_##size("cmpxchg", "%[nval]", \ | |
187 | __percpu_arg([var])) \ | |
188 | "\n\tjnz 1b" \ | |
189 | : [oval] "=&a" (pxo_old__), \ | |
190 | [var] "+m" (_var) \ | |
191 | : [nval] __pcpu_reg_##size(, pxo_new__) \ | |
192 | : "memory"); \ | |
193 | (typeof(_var))(unsigned long) pxo_old__; \ | |
7296e08a CL |
194 | }) |
195 | ||
196 | /* | |
197 | * cmpxchg has no such implied lock semantics as a result it is much | |
198 | * more efficient for cpu local operations. | |
199 | */ | |
ebcd580b | 200 | #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \ |
7296e08a | 201 | ({ \ |
ebcd580b BG |
202 | __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \ |
203 | __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \ | |
204 | asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \ | |
205 | __percpu_arg([var])) \ | |
206 | : [oval] "+a" (pco_old__), \ | |
207 | [var] "+m" (_var) \ | |
208 | : [nval] __pcpu_reg_##size(, pco_new__) \ | |
209 | : "memory"); \ | |
210 | (typeof(_var))(unsigned long) pco_old__; \ | |
7296e08a CL |
211 | }) |
212 | ||
ed8d9adf | 213 | /* |
641b695c | 214 | * this_cpu_read() makes gcc load the percpu variable every time it is |
c6ae41e7 AS |
215 | * accessed while this_cpu_read_stable() allows the value to be cached. |
216 | * this_cpu_read_stable() is more efficient and can be used if its value | |
ed8d9adf LT |
217 | * is guaranteed to be valid across cpus. The current users include |
218 | * get_current() and get_thread_info() both of which are actually | |
219 | * per-thread variables implemented as per-cpu variables and thus | |
220 | * stable for the duration of the respective task. | |
221 | */ | |
c94055fe BG |
222 | #define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp) |
223 | #define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp) | |
224 | #define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp) | |
225 | #define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp) | |
226 | #define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) | |
9939ddaf | 227 | |
bb631e30 BG |
228 | #define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp) |
229 | #define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp) | |
230 | #define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp) | |
b3ca1c10 | 231 | |
c175acc1 BG |
232 | #define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val) |
233 | #define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val) | |
234 | #define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val) | |
33e5614a BG |
235 | #define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) |
236 | #define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) | |
237 | #define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val) | |
c175acc1 BG |
238 | #define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val) |
239 | #define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val) | |
240 | #define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val) | |
241 | #define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val) | |
242 | #define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val) | |
243 | #define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val) | |
2234a6d3 PZ |
244 | |
245 | /* | |
246 | * raw_cpu_xchg() can use a load-store since it is not required to be | |
247 | * IRQ-safe. | |
248 | */ | |
249 | #define raw_percpu_xchg_op(var, nval) \ | |
250 | ({ \ | |
251 | typeof(var) pxo_ret__ = raw_cpu_read(var); \ | |
252 | raw_cpu_write(var, (nval)); \ | |
253 | pxo_ret__; \ | |
254 | }) | |
255 | ||
256 | #define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val) | |
257 | #define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val) | |
258 | #define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val) | |
30ed1a79 | 259 | |
bb631e30 BG |
260 | #define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp) |
261 | #define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp) | |
262 | #define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp) | |
c175acc1 BG |
263 | #define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val) |
264 | #define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val) | |
265 | #define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val) | |
33e5614a BG |
266 | #define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val) |
267 | #define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val) | |
268 | #define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val) | |
c175acc1 BG |
269 | #define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val) |
270 | #define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val) | |
271 | #define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val) | |
272 | #define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val) | |
273 | #define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val) | |
274 | #define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val) | |
73ca542f BG |
275 | #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval) |
276 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval) | |
277 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval) | |
30ed1a79 | 278 | |
bbff583b BG |
279 | #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) |
280 | #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) | |
281 | #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) | |
ebcd580b BG |
282 | #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval) |
283 | #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval) | |
284 | #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval) | |
7296e08a | 285 | |
bbff583b BG |
286 | #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) |
287 | #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) | |
288 | #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) | |
ebcd580b BG |
289 | #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval) |
290 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) | |
291 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) | |
7296e08a | 292 | |
b9ec40af | 293 | #ifdef CONFIG_X86_CMPXCHG64 |
cebef5be | 294 | #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
b9ec40af | 295 | ({ \ |
cebef5be JB |
296 | bool __ret; \ |
297 | typeof(pcp1) __o1 = (o1), __n1 = (n1); \ | |
298 | typeof(pcp2) __o2 = (o2), __n2 = (n2); \ | |
1966c5e5 UB |
299 | asm volatile("cmpxchg8b "__percpu_arg(1) \ |
300 | CC_SET(z) \ | |
301 | : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \ | |
302 | : "b" (__n1), "c" (__n2)); \ | |
b9ec40af CL |
303 | __ret; \ |
304 | }) | |
305 | ||
b3ca1c10 | 306 | #define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
cebef5be | 307 | #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
b9ec40af CL |
308 | #endif /* CONFIG_X86_CMPXCHG64 */ |
309 | ||
30ed1a79 CL |
310 | /* |
311 | * Per cpu atomic 64 bit operations are only available under 64 bit. | |
312 | * 32 bit must fall back to generic operations. | |
313 | */ | |
314 | #ifdef CONFIG_X86_64 | |
bb631e30 | 315 | #define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp) |
c175acc1 | 316 | #define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val) |
33e5614a | 317 | #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) |
c175acc1 BG |
318 | #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) |
319 | #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) | |
bbff583b | 320 | #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) |
2234a6d3 | 321 | #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) |
ebcd580b | 322 | #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval) |
b3ca1c10 | 323 | |
bb631e30 | 324 | #define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) |
c175acc1 | 325 | #define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) |
33e5614a | 326 | #define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) |
c175acc1 BG |
327 | #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) |
328 | #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) | |
bbff583b | 329 | #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) |
73ca542f | 330 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) |
ebcd580b | 331 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) |
30ed1a79 | 332 | |
b9ec40af CL |
333 | /* |
334 | * Pretty complex macro to generate cmpxchg16 instruction. The instruction | |
335 | * is not supported on early AMD64 processors so we must be able to emulate | |
336 | * it in software. The address used in the cmpxchg16 instruction must be | |
337 | * aligned to a 16 byte boundary. | |
338 | */ | |
cebef5be | 339 | #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
b9ec40af | 340 | ({ \ |
cebef5be JB |
341 | bool __ret; \ |
342 | typeof(pcp1) __o1 = (o1), __n1 = (n1); \ | |
343 | typeof(pcp2) __o2 = (o2), __n2 = (n2); \ | |
344 | alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ | |
345 | "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ | |
b9ec40af | 346 | X86_FEATURE_CX16, \ |
cebef5be JB |
347 | ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ |
348 | "+m" (pcp2), "+d" (__o2)), \ | |
349 | "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ | |
b9ec40af CL |
350 | __ret; \ |
351 | }) | |
352 | ||
b3ca1c10 | 353 | #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
cebef5be | 354 | #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
b9ec40af | 355 | |
30ed1a79 CL |
356 | #endif |
357 | ||
117780ee | 358 | static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr, |
349c004e CL |
359 | const unsigned long __percpu *addr) |
360 | { | |
799bc3c5 LR |
361 | unsigned long __percpu *a = |
362 | (unsigned long __percpu *)addr + nr / BITS_PER_LONG; | |
349c004e | 363 | |
641b695c | 364 | #ifdef CONFIG_X86_64 |
b3ca1c10 | 365 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0; |
641b695c | 366 | #else |
b3ca1c10 | 367 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0; |
641b695c | 368 | #endif |
349c004e CL |
369 | } |
370 | ||
117780ee | 371 | static inline bool x86_this_cpu_variable_test_bit(int nr, |
349c004e CL |
372 | const unsigned long __percpu *addr) |
373 | { | |
117780ee | 374 | bool oldbit; |
349c004e | 375 | |
22636f8c | 376 | asm volatile("btl "__percpu_arg(2)",%1" |
64be6d36 PA |
377 | CC_SET(c) |
378 | : CC_OUT(c) (oldbit) | |
799bc3c5 | 379 | : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); |
349c004e CL |
380 | |
381 | return oldbit; | |
382 | } | |
383 | ||
384 | #define x86_this_cpu_test_bit(nr, addr) \ | |
385 | (__builtin_constant_p((nr)) \ | |
386 | ? x86_this_cpu_constant_test_bit((nr), (addr)) \ | |
387 | : x86_this_cpu_variable_test_bit((nr), (addr))) | |
388 | ||
389 | ||
6dbde353 IM |
390 | #include <asm-generic/percpu.h> |
391 | ||
392 | /* We can use this directly for local CPU (faster). */ | |
2c773dd3 | 393 | DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); |
6dbde353 | 394 | |
3334052a | 395 | #endif /* !__ASSEMBLY__ */ |
23ca4bba MT |
396 | |
397 | #ifdef CONFIG_SMP | |
398 | ||
399 | /* | |
400 | * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu | |
401 | * variables that are initialized and accessed before there are per_cpu | |
402 | * areas allocated. | |
403 | */ | |
404 | ||
405 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | |
406 | DEFINE_PER_CPU(_type, _name) = _initvalue; \ | |
407 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ | |
408 | { [0 ... NR_CPUS-1] = _initvalue }; \ | |
c6a92a25 | 409 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
23ca4bba | 410 | |
c35f7741 IY |
411 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
412 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ | |
413 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ | |
414 | { [0 ... NR_CPUS-1] = _initvalue }; \ | |
415 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map | |
416 | ||
23ca4bba MT |
417 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
418 | EXPORT_PER_CPU_SYMBOL(_name) | |
419 | ||
420 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ | |
421 | DECLARE_PER_CPU(_type, _name); \ | |
422 | extern __typeof__(_type) *_name##_early_ptr; \ | |
423 | extern __typeof__(_type) _name##_early_map[] | |
424 | ||
c35f7741 IY |
425 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
426 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ | |
427 | extern __typeof__(_type) *_name##_early_ptr; \ | |
428 | extern __typeof__(_type) _name##_early_map[] | |
429 | ||
23ca4bba MT |
430 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) |
431 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) | |
432 | #define early_per_cpu(_name, _cpu) \ | |
f10fcd47 TH |
433 | *(early_per_cpu_ptr(_name) ? \ |
434 | &early_per_cpu_ptr(_name)[_cpu] : \ | |
435 | &per_cpu(_name, _cpu)) | |
23ca4bba MT |
436 | |
437 | #else /* !CONFIG_SMP */ | |
438 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | |
439 | DEFINE_PER_CPU(_type, _name) = _initvalue | |
440 | ||
c35f7741 IY |
441 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
442 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue | |
443 | ||
23ca4bba MT |
444 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
445 | EXPORT_PER_CPU_SYMBOL(_name) | |
446 | ||
447 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ | |
448 | DECLARE_PER_CPU(_type, _name) | |
449 | ||
c35f7741 IY |
450 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
451 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name) | |
452 | ||
23ca4bba MT |
453 | #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) |
454 | #define early_per_cpu_ptr(_name) NULL | |
455 | /* no early_per_cpu_map() */ | |
456 | ||
457 | #endif /* !CONFIG_SMP */ | |
458 | ||
1965aae3 | 459 | #endif /* _ASM_X86_PERCPU_H */ |