]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PERCPU_H |
2 | #define _ASM_X86_PERCPU_H | |
3334052a | 3 | |
1a51e3a0 | 4 | #ifdef CONFIG_X86_64 |
9939ddaf TH |
5 | #define __percpu_seg gs |
6 | #define __percpu_mov_op movq | |
1a51e3a0 | 7 | #else |
9939ddaf TH |
8 | #define __percpu_seg fs |
9 | #define __percpu_mov_op movl | |
96a388de | 10 | #endif |
3334052a | 11 | |
12 | #ifdef __ASSEMBLY__ | |
13 | ||
14 | /* | |
15 | * PER_CPU finds an address of a per-cpu variable. | |
16 | * | |
17 | * Args: | |
18 | * var - variable name | |
19 | * reg - 32bit register | |
20 | * | |
21 | * The resulting address is stored in the "reg" argument. | |
22 | * | |
23 | * Example: | |
24 | * PER_CPU(cpu_gdt_descr, %ebx) | |
25 | */ | |
26 | #ifdef CONFIG_SMP | |
9939ddaf | 27 | #define PER_CPU(var, reg) \ |
dd17c8f7 RR |
28 | __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ |
29 | lea var(reg), reg | |
30 | #define PER_CPU_VAR(var) %__percpu_seg:var | |
3334052a | 31 | #else /* ! SMP */ |
dd17c8f7 RR |
32 | #define PER_CPU(var, reg) __percpu_mov_op $var, reg |
33 | #define PER_CPU_VAR(var) var | |
3334052a | 34 | #endif /* SMP */ |
35 | ||
2add8e23 BG |
36 | #ifdef CONFIG_X86_64_SMP |
37 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var | |
38 | #else | |
dd17c8f7 | 39 | #define INIT_PER_CPU_VAR(var) var |
2add8e23 BG |
40 | #endif |
41 | ||
3334052a | 42 | #else /* ...!ASSEMBLY */ |
43 | ||
e59a1bb2 | 44 | #include <linux/kernel.h> |
9939ddaf | 45 | #include <linux/stringify.h> |
3334052a | 46 | |
9939ddaf | 47 | #ifdef CONFIG_SMP |
d7c3f8ce | 48 | #define __percpu_prefix "%%"__stringify(__percpu_seg)":" |
c6ae41e7 | 49 | #define __my_cpu_offset this_cpu_read(this_cpu_off) |
db7829c6 BG |
50 | |
51 | /* | |
52 | * Compared to the generic __my_cpu_offset version, the following | |
53 | * saves one instruction and avoids clobbering a temp register. | |
54 | */ | |
bbc344e1 | 55 | #define arch_raw_cpu_ptr(ptr) \ |
db7829c6 BG |
56 | ({ \ |
57 | unsigned long tcp_ptr__; \ | |
db7829c6 BG |
58 | asm volatile("add " __percpu_arg(1) ", %0" \ |
59 | : "=r" (tcp_ptr__) \ | |
60 | : "m" (this_cpu_off), "0" (ptr)); \ | |
61 | (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ | |
62 | }) | |
9939ddaf | 63 | #else |
d7c3f8ce | 64 | #define __percpu_prefix "" |
9939ddaf | 65 | #endif |
3334052a | 66 | |
d7c3f8ce CL |
67 | #define __percpu_arg(x) __percpu_prefix "%P" #x |
68 | ||
2add8e23 BG |
69 | /* |
70 | * Initialized pointers to per-cpu variables needed for the boot | |
71 | * processor need to use these macros to get the proper address | |
72 | * offset from __per_cpu_load on SMP. | |
73 | * | |
74 | * There also must be an entry in vmlinux_64.lds.S | |
75 | */ | |
76 | #define DECLARE_INIT_PER_CPU(var) \ | |
dd17c8f7 | 77 | extern typeof(var) init_per_cpu_var(var) |
2add8e23 BG |
78 | |
79 | #ifdef CONFIG_X86_64_SMP | |
80 | #define init_per_cpu_var(var) init_per_cpu__##var | |
81 | #else | |
dd17c8f7 | 82 | #define init_per_cpu_var(var) var |
2add8e23 BG |
83 | #endif |
84 | ||
3334052a | 85 | /* For arch-specific code, we can use direct single-insn ops (they |
86 | * don't give an lvalue though). */ | |
87 | extern void __bad_percpu_size(void); | |
88 | ||
bc9e3be2 JP |
89 | #define percpu_to_op(op, var, val) \ |
90 | do { \ | |
0f5e4816 | 91 | typedef typeof(var) pto_T__; \ |
bc9e3be2 | 92 | if (0) { \ |
0f5e4816 TH |
93 | pto_T__ pto_tmp__; \ |
94 | pto_tmp__ = (val); \ | |
23b764d0 | 95 | (void)pto_tmp__; \ |
bc9e3be2 JP |
96 | } \ |
97 | switch (sizeof(var)) { \ | |
98 | case 1: \ | |
87b26406 | 99 | asm(op "b %1,"__percpu_arg(0) \ |
bc9e3be2 | 100 | : "+m" (var) \ |
0f5e4816 | 101 | : "qi" ((pto_T__)(val))); \ |
bc9e3be2 JP |
102 | break; \ |
103 | case 2: \ | |
87b26406 | 104 | asm(op "w %1,"__percpu_arg(0) \ |
bc9e3be2 | 105 | : "+m" (var) \ |
0f5e4816 | 106 | : "ri" ((pto_T__)(val))); \ |
bc9e3be2 JP |
107 | break; \ |
108 | case 4: \ | |
87b26406 | 109 | asm(op "l %1,"__percpu_arg(0) \ |
bc9e3be2 | 110 | : "+m" (var) \ |
0f5e4816 | 111 | : "ri" ((pto_T__)(val))); \ |
bc9e3be2 | 112 | break; \ |
9939ddaf | 113 | case 8: \ |
87b26406 | 114 | asm(op "q %1,"__percpu_arg(0) \ |
9939ddaf | 115 | : "+m" (var) \ |
0f5e4816 | 116 | : "re" ((pto_T__)(val))); \ |
9939ddaf | 117 | break; \ |
bc9e3be2 JP |
118 | default: __bad_percpu_size(); \ |
119 | } \ | |
120 | } while (0) | |
121 | ||
5917dae8 CL |
122 | /* |
123 | * Generate a percpu add to memory instruction and optimize code | |
40f0a5d0 | 124 | * if one is added or subtracted. |
5917dae8 CL |
125 | */ |
126 | #define percpu_add_op(var, val) \ | |
127 | do { \ | |
128 | typedef typeof(var) pao_T__; \ | |
129 | const int pao_ID__ = (__builtin_constant_p(val) && \ | |
bd09d9a3 GT |
130 | ((val) == 1 || (val) == -1)) ? \ |
131 | (int)(val) : 0; \ | |
5917dae8 CL |
132 | if (0) { \ |
133 | pao_T__ pao_tmp__; \ | |
134 | pao_tmp__ = (val); \ | |
23b764d0 | 135 | (void)pao_tmp__; \ |
5917dae8 CL |
136 | } \ |
137 | switch (sizeof(var)) { \ | |
138 | case 1: \ | |
139 | if (pao_ID__ == 1) \ | |
140 | asm("incb "__percpu_arg(0) : "+m" (var)); \ | |
141 | else if (pao_ID__ == -1) \ | |
142 | asm("decb "__percpu_arg(0) : "+m" (var)); \ | |
143 | else \ | |
144 | asm("addb %1, "__percpu_arg(0) \ | |
145 | : "+m" (var) \ | |
146 | : "qi" ((pao_T__)(val))); \ | |
147 | break; \ | |
148 | case 2: \ | |
149 | if (pao_ID__ == 1) \ | |
150 | asm("incw "__percpu_arg(0) : "+m" (var)); \ | |
151 | else if (pao_ID__ == -1) \ | |
152 | asm("decw "__percpu_arg(0) : "+m" (var)); \ | |
153 | else \ | |
154 | asm("addw %1, "__percpu_arg(0) \ | |
155 | : "+m" (var) \ | |
156 | : "ri" ((pao_T__)(val))); \ | |
157 | break; \ | |
158 | case 4: \ | |
159 | if (pao_ID__ == 1) \ | |
160 | asm("incl "__percpu_arg(0) : "+m" (var)); \ | |
161 | else if (pao_ID__ == -1) \ | |
162 | asm("decl "__percpu_arg(0) : "+m" (var)); \ | |
163 | else \ | |
164 | asm("addl %1, "__percpu_arg(0) \ | |
165 | : "+m" (var) \ | |
166 | : "ri" ((pao_T__)(val))); \ | |
167 | break; \ | |
168 | case 8: \ | |
169 | if (pao_ID__ == 1) \ | |
170 | asm("incq "__percpu_arg(0) : "+m" (var)); \ | |
171 | else if (pao_ID__ == -1) \ | |
172 | asm("decq "__percpu_arg(0) : "+m" (var)); \ | |
173 | else \ | |
174 | asm("addq %1, "__percpu_arg(0) \ | |
175 | : "+m" (var) \ | |
176 | : "re" ((pao_T__)(val))); \ | |
177 | break; \ | |
178 | default: __bad_percpu_size(); \ | |
179 | } \ | |
180 | } while (0) | |
181 | ||
ed8d9adf | 182 | #define percpu_from_op(op, var, constraint) \ |
bc9e3be2 | 183 | ({ \ |
0f5e4816 | 184 | typeof(var) pfo_ret__; \ |
bc9e3be2 JP |
185 | switch (sizeof(var)) { \ |
186 | case 1: \ | |
87b26406 | 187 | asm(op "b "__percpu_arg(1)",%0" \ |
0f5e4816 | 188 | : "=q" (pfo_ret__) \ |
ed8d9adf | 189 | : constraint); \ |
bc9e3be2 JP |
190 | break; \ |
191 | case 2: \ | |
87b26406 | 192 | asm(op "w "__percpu_arg(1)",%0" \ |
0f5e4816 | 193 | : "=r" (pfo_ret__) \ |
ed8d9adf | 194 | : constraint); \ |
bc9e3be2 JP |
195 | break; \ |
196 | case 4: \ | |
87b26406 | 197 | asm(op "l "__percpu_arg(1)",%0" \ |
0f5e4816 | 198 | : "=r" (pfo_ret__) \ |
ed8d9adf | 199 | : constraint); \ |
9939ddaf TH |
200 | break; \ |
201 | case 8: \ | |
87b26406 | 202 | asm(op "q "__percpu_arg(1)",%0" \ |
0f5e4816 | 203 | : "=r" (pfo_ret__) \ |
ed8d9adf | 204 | : constraint); \ |
bc9e3be2 JP |
205 | break; \ |
206 | default: __bad_percpu_size(); \ | |
207 | } \ | |
0f5e4816 | 208 | pfo_ret__; \ |
bc9e3be2 | 209 | }) |
3334052a | 210 | |
402af0d7 JB |
211 | #define percpu_unary_op(op, var) \ |
212 | ({ \ | |
213 | switch (sizeof(var)) { \ | |
214 | case 1: \ | |
215 | asm(op "b "__percpu_arg(0) \ | |
216 | : "+m" (var)); \ | |
217 | break; \ | |
218 | case 2: \ | |
219 | asm(op "w "__percpu_arg(0) \ | |
220 | : "+m" (var)); \ | |
221 | break; \ | |
222 | case 4: \ | |
223 | asm(op "l "__percpu_arg(0) \ | |
224 | : "+m" (var)); \ | |
225 | break; \ | |
226 | case 8: \ | |
227 | asm(op "q "__percpu_arg(0) \ | |
228 | : "+m" (var)); \ | |
229 | break; \ | |
230 | default: __bad_percpu_size(); \ | |
231 | } \ | |
232 | }) | |
233 | ||
40304775 TH |
234 | /* |
235 | * Add return operation | |
236 | */ | |
237 | #define percpu_add_return_op(var, val) \ | |
238 | ({ \ | |
239 | typeof(var) paro_ret__ = val; \ | |
240 | switch (sizeof(var)) { \ | |
241 | case 1: \ | |
242 | asm("xaddb %0, "__percpu_arg(1) \ | |
243 | : "+q" (paro_ret__), "+m" (var) \ | |
244 | : : "memory"); \ | |
245 | break; \ | |
246 | case 2: \ | |
247 | asm("xaddw %0, "__percpu_arg(1) \ | |
248 | : "+r" (paro_ret__), "+m" (var) \ | |
249 | : : "memory"); \ | |
250 | break; \ | |
251 | case 4: \ | |
252 | asm("xaddl %0, "__percpu_arg(1) \ | |
253 | : "+r" (paro_ret__), "+m" (var) \ | |
254 | : : "memory"); \ | |
255 | break; \ | |
256 | case 8: \ | |
257 | asm("xaddq %0, "__percpu_arg(1) \ | |
258 | : "+re" (paro_ret__), "+m" (var) \ | |
259 | : : "memory"); \ | |
260 | break; \ | |
261 | default: __bad_percpu_size(); \ | |
262 | } \ | |
263 | paro_ret__ += val; \ | |
264 | paro_ret__; \ | |
265 | }) | |
266 | ||
7296e08a | 267 | /* |
8270137a CL |
268 | * xchg is implemented using cmpxchg without a lock prefix. xchg is |
269 | * expensive due to the implied lock prefix. The processor cannot prefetch | |
270 | * cachelines if xchg is used. | |
7296e08a CL |
271 | */ |
272 | #define percpu_xchg_op(var, nval) \ | |
273 | ({ \ | |
274 | typeof(var) pxo_ret__; \ | |
275 | typeof(var) pxo_new__ = (nval); \ | |
276 | switch (sizeof(var)) { \ | |
277 | case 1: \ | |
889a7a6a ED |
278 | asm("\n\tmov "__percpu_arg(1)",%%al" \ |
279 | "\n1:\tcmpxchgb %2, "__percpu_arg(1) \ | |
8270137a | 280 | "\n\tjnz 1b" \ |
889a7a6a | 281 | : "=&a" (pxo_ret__), "+m" (var) \ |
7296e08a CL |
282 | : "q" (pxo_new__) \ |
283 | : "memory"); \ | |
284 | break; \ | |
285 | case 2: \ | |
889a7a6a ED |
286 | asm("\n\tmov "__percpu_arg(1)",%%ax" \ |
287 | "\n1:\tcmpxchgw %2, "__percpu_arg(1) \ | |
8270137a | 288 | "\n\tjnz 1b" \ |
889a7a6a | 289 | : "=&a" (pxo_ret__), "+m" (var) \ |
7296e08a CL |
290 | : "r" (pxo_new__) \ |
291 | : "memory"); \ | |
292 | break; \ | |
293 | case 4: \ | |
889a7a6a ED |
294 | asm("\n\tmov "__percpu_arg(1)",%%eax" \ |
295 | "\n1:\tcmpxchgl %2, "__percpu_arg(1) \ | |
8270137a | 296 | "\n\tjnz 1b" \ |
889a7a6a | 297 | : "=&a" (pxo_ret__), "+m" (var) \ |
7296e08a CL |
298 | : "r" (pxo_new__) \ |
299 | : "memory"); \ | |
300 | break; \ | |
301 | case 8: \ | |
889a7a6a ED |
302 | asm("\n\tmov "__percpu_arg(1)",%%rax" \ |
303 | "\n1:\tcmpxchgq %2, "__percpu_arg(1) \ | |
8270137a | 304 | "\n\tjnz 1b" \ |
889a7a6a | 305 | : "=&a" (pxo_ret__), "+m" (var) \ |
7296e08a CL |
306 | : "r" (pxo_new__) \ |
307 | : "memory"); \ | |
308 | break; \ | |
309 | default: __bad_percpu_size(); \ | |
310 | } \ | |
311 | pxo_ret__; \ | |
312 | }) | |
313 | ||
314 | /* | |
315 | * cmpxchg has no such implied lock semantics as a result it is much | |
316 | * more efficient for cpu local operations. | |
317 | */ | |
318 | #define percpu_cmpxchg_op(var, oval, nval) \ | |
319 | ({ \ | |
320 | typeof(var) pco_ret__; \ | |
321 | typeof(var) pco_old__ = (oval); \ | |
322 | typeof(var) pco_new__ = (nval); \ | |
323 | switch (sizeof(var)) { \ | |
324 | case 1: \ | |
325 | asm("cmpxchgb %2, "__percpu_arg(1) \ | |
326 | : "=a" (pco_ret__), "+m" (var) \ | |
327 | : "q" (pco_new__), "0" (pco_old__) \ | |
328 | : "memory"); \ | |
329 | break; \ | |
330 | case 2: \ | |
331 | asm("cmpxchgw %2, "__percpu_arg(1) \ | |
332 | : "=a" (pco_ret__), "+m" (var) \ | |
333 | : "r" (pco_new__), "0" (pco_old__) \ | |
334 | : "memory"); \ | |
335 | break; \ | |
336 | case 4: \ | |
337 | asm("cmpxchgl %2, "__percpu_arg(1) \ | |
338 | : "=a" (pco_ret__), "+m" (var) \ | |
339 | : "r" (pco_new__), "0" (pco_old__) \ | |
340 | : "memory"); \ | |
341 | break; \ | |
342 | case 8: \ | |
343 | asm("cmpxchgq %2, "__percpu_arg(1) \ | |
344 | : "=a" (pco_ret__), "+m" (var) \ | |
345 | : "r" (pco_new__), "0" (pco_old__) \ | |
346 | : "memory"); \ | |
347 | break; \ | |
348 | default: __bad_percpu_size(); \ | |
349 | } \ | |
350 | pco_ret__; \ | |
351 | }) | |
352 | ||
ed8d9adf | 353 | /* |
641b695c | 354 | * this_cpu_read() makes gcc load the percpu variable every time it is |
c6ae41e7 AS |
355 | * accessed while this_cpu_read_stable() allows the value to be cached. |
356 | * this_cpu_read_stable() is more efficient and can be used if its value | |
ed8d9adf LT |
357 | * is guaranteed to be valid across cpus. The current users include |
358 | * get_current() and get_thread_info() both of which are actually | |
359 | * per-thread variables implemented as per-cpu variables and thus | |
360 | * stable for the duration of the respective task. | |
361 | */ | |
c6ae41e7 | 362 | #define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) |
9939ddaf | 363 | |
b3ca1c10 CL |
364 | #define raw_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
365 | #define raw_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
366 | #define raw_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
367 | ||
368 | #define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | |
369 | #define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | |
370 | #define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | |
371 | #define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val) | |
372 | #define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | |
373 | #define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | |
374 | #define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | |
375 | #define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | |
376 | #define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | |
377 | #define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | |
378 | #define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | |
379 | #define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | |
380 | #define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) | |
381 | #define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) | |
382 | #define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) | |
30ed1a79 CL |
383 | |
384 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
385 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
386 | #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
387 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | |
388 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | |
389 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | |
5917dae8 CL |
390 | #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
391 | #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | |
392 | #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | |
30ed1a79 CL |
393 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
394 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | |
395 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | |
396 | #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | |
397 | #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | |
398 | #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | |
7296e08a CL |
399 | #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) |
400 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) | |
401 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) | |
30ed1a79 | 402 | |
b3ca1c10 CL |
403 | #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) |
404 | #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) | |
405 | #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) | |
406 | #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | |
407 | #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | |
408 | #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | |
7296e08a | 409 | |
b3ca1c10 CL |
410 | #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) |
411 | #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) | |
412 | #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) | |
7296e08a CL |
413 | #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
414 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | |
415 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | |
416 | ||
b9ec40af | 417 | #ifdef CONFIG_X86_CMPXCHG64 |
cebef5be | 418 | #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
b9ec40af | 419 | ({ \ |
cebef5be JB |
420 | bool __ret; \ |
421 | typeof(pcp1) __o1 = (o1), __n1 = (n1); \ | |
422 | typeof(pcp2) __o2 = (o2), __n2 = (n2); \ | |
b9ec40af | 423 | asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ |
cebef5be JB |
424 | : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \ |
425 | : "b" (__n1), "c" (__n2), "a" (__o1)); \ | |
b9ec40af CL |
426 | __ret; \ |
427 | }) | |
428 | ||
b3ca1c10 | 429 | #define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
cebef5be | 430 | #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
b9ec40af CL |
431 | #endif /* CONFIG_X86_CMPXCHG64 */ |
432 | ||
30ed1a79 CL |
433 | /* |
434 | * Per cpu atomic 64 bit operations are only available under 64 bit. | |
435 | * 32 bit must fall back to generic operations. | |
436 | */ | |
437 | #ifdef CONFIG_X86_64 | |
b3ca1c10 CL |
438 | #define raw_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
439 | #define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | |
440 | #define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | |
441 | #define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | |
442 | #define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | |
443 | #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) | |
444 | #define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | |
445 | #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | |
446 | ||
447 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
448 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | |
449 | #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | |
450 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | |
451 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | |
452 | #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) | |
453 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | |
2485b646 | 454 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
30ed1a79 | 455 | |
b9ec40af CL |
456 | /* |
457 | * Pretty complex macro to generate cmpxchg16 instruction. The instruction | |
458 | * is not supported on early AMD64 processors so we must be able to emulate | |
459 | * it in software. The address used in the cmpxchg16 instruction must be | |
460 | * aligned to a 16 byte boundary. | |
461 | */ | |
cebef5be | 462 | #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
b9ec40af | 463 | ({ \ |
cebef5be JB |
464 | bool __ret; \ |
465 | typeof(pcp1) __o1 = (o1), __n1 = (n1); \ | |
466 | typeof(pcp2) __o2 = (o2), __n2 = (n2); \ | |
467 | alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ | |
468 | "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ | |
b9ec40af | 469 | X86_FEATURE_CX16, \ |
cebef5be JB |
470 | ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ |
471 | "+m" (pcp2), "+d" (__o2)), \ | |
472 | "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ | |
b9ec40af CL |
473 | __ret; \ |
474 | }) | |
475 | ||
b3ca1c10 | 476 | #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
cebef5be | 477 | #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
b9ec40af | 478 | |
30ed1a79 CL |
479 | #endif |
480 | ||
49357d19 TH |
481 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
482 | #define x86_test_and_clear_bit_percpu(bit, var) \ | |
483 | ({ \ | |
484 | int old__; \ | |
87b26406 | 485 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ |
dd17c8f7 | 486 | : "=r" (old__), "+m" (var) \ |
87b26406 | 487 | : "dIr" (bit)); \ |
49357d19 TH |
488 | old__; \ |
489 | }) | |
490 | ||
349c004e CL |
491 | static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, |
492 | const unsigned long __percpu *addr) | |
493 | { | |
494 | unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; | |
495 | ||
641b695c | 496 | #ifdef CONFIG_X86_64 |
b3ca1c10 | 497 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0; |
641b695c | 498 | #else |
b3ca1c10 | 499 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0; |
641b695c | 500 | #endif |
349c004e CL |
501 | } |
502 | ||
503 | static inline int x86_this_cpu_variable_test_bit(int nr, | |
504 | const unsigned long __percpu *addr) | |
505 | { | |
506 | int oldbit; | |
507 | ||
508 | asm volatile("bt "__percpu_arg(2)",%1\n\t" | |
509 | "sbb %0,%0" | |
510 | : "=r" (oldbit) | |
511 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | |
512 | ||
513 | return oldbit; | |
514 | } | |
515 | ||
516 | #define x86_this_cpu_test_bit(nr, addr) \ | |
517 | (__builtin_constant_p((nr)) \ | |
518 | ? x86_this_cpu_constant_test_bit((nr), (addr)) \ | |
519 | : x86_this_cpu_variable_test_bit((nr), (addr))) | |
520 | ||
521 | ||
6dbde353 IM |
522 | #include <asm-generic/percpu.h> |
523 | ||
524 | /* We can use this directly for local CPU (faster). */ | |
2c773dd3 | 525 | DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); |
6dbde353 | 526 | |
3334052a | 527 | #endif /* !__ASSEMBLY__ */ |
23ca4bba MT |
528 | |
529 | #ifdef CONFIG_SMP | |
530 | ||
531 | /* | |
532 | * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu | |
533 | * variables that are initialized and accessed before there are per_cpu | |
534 | * areas allocated. | |
535 | */ | |
536 | ||
537 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | |
538 | DEFINE_PER_CPU(_type, _name) = _initvalue; \ | |
539 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ | |
540 | { [0 ... NR_CPUS-1] = _initvalue }; \ | |
c6a92a25 | 541 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
23ca4bba | 542 | |
c35f7741 IY |
543 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
544 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ | |
545 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ | |
546 | { [0 ... NR_CPUS-1] = _initvalue }; \ | |
547 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map | |
548 | ||
23ca4bba MT |
549 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
550 | EXPORT_PER_CPU_SYMBOL(_name) | |
551 | ||
552 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ | |
553 | DECLARE_PER_CPU(_type, _name); \ | |
554 | extern __typeof__(_type) *_name##_early_ptr; \ | |
555 | extern __typeof__(_type) _name##_early_map[] | |
556 | ||
c35f7741 IY |
557 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
558 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ | |
559 | extern __typeof__(_type) *_name##_early_ptr; \ | |
560 | extern __typeof__(_type) _name##_early_map[] | |
561 | ||
23ca4bba MT |
562 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) |
563 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) | |
564 | #define early_per_cpu(_name, _cpu) \ | |
f10fcd47 TH |
565 | *(early_per_cpu_ptr(_name) ? \ |
566 | &early_per_cpu_ptr(_name)[_cpu] : \ | |
567 | &per_cpu(_name, _cpu)) | |
23ca4bba MT |
568 | |
569 | #else /* !CONFIG_SMP */ | |
570 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | |
571 | DEFINE_PER_CPU(_type, _name) = _initvalue | |
572 | ||
c35f7741 IY |
573 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
574 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue | |
575 | ||
23ca4bba MT |
576 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
577 | EXPORT_PER_CPU_SYMBOL(_name) | |
578 | ||
579 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ | |
580 | DECLARE_PER_CPU(_type, _name) | |
581 | ||
c35f7741 IY |
582 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
583 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name) | |
584 | ||
23ca4bba MT |
585 | #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) |
586 | #define early_per_cpu_ptr(_name) NULL | |
587 | /* no early_per_cpu_map() */ | |
588 | ||
589 | #endif /* !CONFIG_SMP */ | |
590 | ||
1965aae3 | 591 | #endif /* _ASM_X86_PERCPU_H */ |