]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PERCPU_H |
2 | #define _ASM_X86_PERCPU_H | |
3334052a | 3 | |
1a51e3a0 | 4 | #ifdef CONFIG_X86_64 |
9939ddaf TH |
5 | #define __percpu_seg gs |
6 | #define __percpu_mov_op movq | |
1a51e3a0 | 7 | #else |
9939ddaf TH |
8 | #define __percpu_seg fs |
9 | #define __percpu_mov_op movl | |
96a388de | 10 | #endif |
3334052a | 11 | |
12 | #ifdef __ASSEMBLY__ | |
13 | ||
14 | /* | |
15 | * PER_CPU finds an address of a per-cpu variable. | |
16 | * | |
17 | * Args: | |
18 | * var - variable name | |
19 | * reg - 32bit register | |
20 | * | |
21 | * The resulting address is stored in the "reg" argument. | |
22 | * | |
23 | * Example: | |
24 | * PER_CPU(cpu_gdt_descr, %ebx) | |
25 | */ | |
26 | #ifdef CONFIG_SMP | |
9939ddaf | 27 | #define PER_CPU(var, reg) \ |
dd17c8f7 RR |
28 | __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ |
29 | lea var(reg), reg | |
30 | #define PER_CPU_VAR(var) %__percpu_seg:var | |
3334052a | 31 | #else /* ! SMP */ |
dd17c8f7 RR |
32 | #define PER_CPU(var, reg) __percpu_mov_op $var, reg |
33 | #define PER_CPU_VAR(var) var | |
3334052a | 34 | #endif /* SMP */ |
35 | ||
2add8e23 BG |
36 | #ifdef CONFIG_X86_64_SMP |
37 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var | |
38 | #else | |
dd17c8f7 | 39 | #define INIT_PER_CPU_VAR(var) var |
2add8e23 BG |
40 | #endif |
41 | ||
3334052a | 42 | #else /* ...!ASSEMBLY */ |
43 | ||
e59a1bb2 | 44 | #include <linux/kernel.h> |
9939ddaf | 45 | #include <linux/stringify.h> |
3334052a | 46 | |
9939ddaf | 47 | #ifdef CONFIG_SMP |
87b26406 | 48 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x |
6dbde353 | 49 | #define __my_cpu_offset percpu_read(this_cpu_off) |
9939ddaf | 50 | #else |
ed8d9adf | 51 | #define __percpu_arg(x) "%P" #x |
9939ddaf | 52 | #endif |
3334052a | 53 | |
2add8e23 BG |
54 | /* |
55 | * Initialized pointers to per-cpu variables needed for the boot | |
56 | * processor need to use these macros to get the proper address | |
57 | * offset from __per_cpu_load on SMP. | |
58 | * | |
59 | * There also must be an entry in vmlinux_64.lds.S | |
60 | */ | |
61 | #define DECLARE_INIT_PER_CPU(var) \ | |
dd17c8f7 | 62 | extern typeof(var) init_per_cpu_var(var) |
2add8e23 BG |
63 | |
64 | #ifdef CONFIG_X86_64_SMP | |
65 | #define init_per_cpu_var(var) init_per_cpu__##var | |
66 | #else | |
dd17c8f7 | 67 | #define init_per_cpu_var(var) var |
2add8e23 BG |
68 | #endif |
69 | ||
3334052a | 70 | /* For arch-specific code, we can use direct single-insn ops (they |
71 | * don't give an lvalue though). */ | |
72 | extern void __bad_percpu_size(void); | |
73 | ||
bc9e3be2 JP |
74 | #define percpu_to_op(op, var, val) \ |
75 | do { \ | |
0f5e4816 | 76 | typedef typeof(var) pto_T__; \ |
bc9e3be2 | 77 | if (0) { \ |
0f5e4816 TH |
78 | pto_T__ pto_tmp__; \ |
79 | pto_tmp__ = (val); \ | |
23b764d0 | 80 | (void)pto_tmp__; \ |
bc9e3be2 JP |
81 | } \ |
82 | switch (sizeof(var)) { \ | |
83 | case 1: \ | |
87b26406 | 84 | asm(op "b %1,"__percpu_arg(0) \ |
bc9e3be2 | 85 | : "+m" (var) \ |
0f5e4816 | 86 | : "qi" ((pto_T__)(val))); \ |
bc9e3be2 JP |
87 | break; \ |
88 | case 2: \ | |
87b26406 | 89 | asm(op "w %1,"__percpu_arg(0) \ |
bc9e3be2 | 90 | : "+m" (var) \ |
0f5e4816 | 91 | : "ri" ((pto_T__)(val))); \ |
bc9e3be2 JP |
92 | break; \ |
93 | case 4: \ | |
87b26406 | 94 | asm(op "l %1,"__percpu_arg(0) \ |
bc9e3be2 | 95 | : "+m" (var) \ |
0f5e4816 | 96 | : "ri" ((pto_T__)(val))); \ |
bc9e3be2 | 97 | break; \ |
9939ddaf | 98 | case 8: \ |
87b26406 | 99 | asm(op "q %1,"__percpu_arg(0) \ |
9939ddaf | 100 | : "+m" (var) \ |
0f5e4816 | 101 | : "re" ((pto_T__)(val))); \ |
9939ddaf | 102 | break; \ |
bc9e3be2 JP |
103 | default: __bad_percpu_size(); \ |
104 | } \ | |
105 | } while (0) | |
106 | ||
5917dae8 CL |
107 | /* |
108 | * Generate a percpu add to memory instruction and optimize code | |
40f0a5d0 | 109 | * if one is added or subtracted. |
5917dae8 CL |
110 | */ |
111 | #define percpu_add_op(var, val) \ | |
112 | do { \ | |
113 | typedef typeof(var) pao_T__; \ | |
114 | const int pao_ID__ = (__builtin_constant_p(val) && \ | |
115 | ((val) == 1 || (val) == -1)) ? (val) : 0; \ | |
116 | if (0) { \ | |
117 | pao_T__ pao_tmp__; \ | |
118 | pao_tmp__ = (val); \ | |
23b764d0 | 119 | (void)pao_tmp__; \ |
5917dae8 CL |
120 | } \ |
121 | switch (sizeof(var)) { \ | |
122 | case 1: \ | |
123 | if (pao_ID__ == 1) \ | |
124 | asm("incb "__percpu_arg(0) : "+m" (var)); \ | |
125 | else if (pao_ID__ == -1) \ | |
126 | asm("decb "__percpu_arg(0) : "+m" (var)); \ | |
127 | else \ | |
128 | asm("addb %1, "__percpu_arg(0) \ | |
129 | : "+m" (var) \ | |
130 | : "qi" ((pao_T__)(val))); \ | |
131 | break; \ | |
132 | case 2: \ | |
133 | if (pao_ID__ == 1) \ | |
134 | asm("incw "__percpu_arg(0) : "+m" (var)); \ | |
135 | else if (pao_ID__ == -1) \ | |
136 | asm("decw "__percpu_arg(0) : "+m" (var)); \ | |
137 | else \ | |
138 | asm("addw %1, "__percpu_arg(0) \ | |
139 | : "+m" (var) \ | |
140 | : "ri" ((pao_T__)(val))); \ | |
141 | break; \ | |
142 | case 4: \ | |
143 | if (pao_ID__ == 1) \ | |
144 | asm("incl "__percpu_arg(0) : "+m" (var)); \ | |
145 | else if (pao_ID__ == -1) \ | |
146 | asm("decl "__percpu_arg(0) : "+m" (var)); \ | |
147 | else \ | |
148 | asm("addl %1, "__percpu_arg(0) \ | |
149 | : "+m" (var) \ | |
150 | : "ri" ((pao_T__)(val))); \ | |
151 | break; \ | |
152 | case 8: \ | |
153 | if (pao_ID__ == 1) \ | |
154 | asm("incq "__percpu_arg(0) : "+m" (var)); \ | |
155 | else if (pao_ID__ == -1) \ | |
156 | asm("decq "__percpu_arg(0) : "+m" (var)); \ | |
157 | else \ | |
158 | asm("addq %1, "__percpu_arg(0) \ | |
159 | : "+m" (var) \ | |
160 | : "re" ((pao_T__)(val))); \ | |
161 | break; \ | |
162 | default: __bad_percpu_size(); \ | |
163 | } \ | |
164 | } while (0) | |
165 | ||
ed8d9adf | 166 | #define percpu_from_op(op, var, constraint) \ |
bc9e3be2 | 167 | ({ \ |
0f5e4816 | 168 | typeof(var) pfo_ret__; \ |
bc9e3be2 JP |
169 | switch (sizeof(var)) { \ |
170 | case 1: \ | |
87b26406 | 171 | asm(op "b "__percpu_arg(1)",%0" \ |
0f5e4816 | 172 | : "=q" (pfo_ret__) \ |
ed8d9adf | 173 | : constraint); \ |
bc9e3be2 JP |
174 | break; \ |
175 | case 2: \ | |
87b26406 | 176 | asm(op "w "__percpu_arg(1)",%0" \ |
0f5e4816 | 177 | : "=r" (pfo_ret__) \ |
ed8d9adf | 178 | : constraint); \ |
bc9e3be2 JP |
179 | break; \ |
180 | case 4: \ | |
87b26406 | 181 | asm(op "l "__percpu_arg(1)",%0" \ |
0f5e4816 | 182 | : "=r" (pfo_ret__) \ |
ed8d9adf | 183 | : constraint); \ |
9939ddaf TH |
184 | break; \ |
185 | case 8: \ | |
87b26406 | 186 | asm(op "q "__percpu_arg(1)",%0" \ |
0f5e4816 | 187 | : "=r" (pfo_ret__) \ |
ed8d9adf | 188 | : constraint); \ |
bc9e3be2 JP |
189 | break; \ |
190 | default: __bad_percpu_size(); \ | |
191 | } \ | |
0f5e4816 | 192 | pfo_ret__; \ |
bc9e3be2 | 193 | }) |
3334052a | 194 | |
402af0d7 JB |
195 | #define percpu_unary_op(op, var) \ |
196 | ({ \ | |
197 | switch (sizeof(var)) { \ | |
198 | case 1: \ | |
199 | asm(op "b "__percpu_arg(0) \ | |
200 | : "+m" (var)); \ | |
201 | break; \ | |
202 | case 2: \ | |
203 | asm(op "w "__percpu_arg(0) \ | |
204 | : "+m" (var)); \ | |
205 | break; \ | |
206 | case 4: \ | |
207 | asm(op "l "__percpu_arg(0) \ | |
208 | : "+m" (var)); \ | |
209 | break; \ | |
210 | case 8: \ | |
211 | asm(op "q "__percpu_arg(0) \ | |
212 | : "+m" (var)); \ | |
213 | break; \ | |
214 | default: __bad_percpu_size(); \ | |
215 | } \ | |
216 | }) | |
217 | ||
ed8d9adf LT |
218 | /* |
219 | * percpu_read() makes gcc load the percpu variable every time it is | |
220 | * accessed while percpu_read_stable() allows the value to be cached. | |
221 | * percpu_read_stable() is more efficient and can be used if its value | |
222 | * is guaranteed to be valid across cpus. The current users include | |
223 | * get_current() and get_thread_info() both of which are actually | |
224 | * per-thread variables implemented as per-cpu variables and thus | |
225 | * stable for the duration of the respective task. | |
226 | */ | |
dd17c8f7 RR |
227 | #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) |
228 | #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) | |
229 | #define percpu_write(var, val) percpu_to_op("mov", var, val) | |
5917dae8 CL |
230 | #define percpu_add(var, val) percpu_add_op(var, val) |
231 | #define percpu_sub(var, val) percpu_add_op(var, -(val)) | |
dd17c8f7 RR |
232 | #define percpu_and(var, val) percpu_to_op("and", var, val) |
233 | #define percpu_or(var, val) percpu_to_op("or", var, val) | |
234 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) | |
402af0d7 | 235 | #define percpu_inc(var) percpu_unary_op("inc", var) |
9939ddaf | 236 | |
30ed1a79 CL |
237 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
238 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
239 | #define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
240 | ||
241 | #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | |
242 | #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | |
243 | #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | |
5917dae8 CL |
244 | #define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
245 | #define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | |
246 | #define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | |
30ed1a79 CL |
247 | #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
248 | #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | |
249 | #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | |
250 | #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | |
251 | #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | |
252 | #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | |
253 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | |
254 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | |
255 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | |
256 | ||
257 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
258 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
259 | #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
260 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | |
261 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | |
262 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | |
5917dae8 CL |
263 | #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
264 | #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | |
265 | #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | |
30ed1a79 CL |
266 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
267 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | |
268 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | |
269 | #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | |
270 | #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | |
271 | #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | |
272 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | |
273 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | |
274 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | |
275 | ||
5917dae8 CL |
276 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
277 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | |
278 | #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | |
30ed1a79 CL |
279 | #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
280 | #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | |
281 | #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | |
282 | #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | |
283 | #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | |
284 | #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | |
285 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | |
286 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | |
287 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | |
288 | ||
289 | /* | |
290 | * Per cpu atomic 64 bit operations are only available under 64 bit. | |
291 | * 32 bit must fall back to generic operations. | |
292 | */ | |
293 | #ifdef CONFIG_X86_64 | |
294 | #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
295 | #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | |
5917dae8 | 296 | #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
30ed1a79 CL |
297 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
298 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | |
299 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | |
300 | ||
301 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | |
302 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | |
5917dae8 | 303 | #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
30ed1a79 CL |
304 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
305 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | |
306 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | |
307 | ||
5917dae8 | 308 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
30ed1a79 CL |
309 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
310 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | |
311 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | |
312 | ||
313 | #endif | |
314 | ||
49357d19 TH |
315 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
316 | #define x86_test_and_clear_bit_percpu(bit, var) \ | |
317 | ({ \ | |
318 | int old__; \ | |
87b26406 | 319 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ |
dd17c8f7 | 320 | : "=r" (old__), "+m" (var) \ |
87b26406 | 321 | : "dIr" (bit)); \ |
49357d19 TH |
322 | old__; \ |
323 | }) | |
324 | ||
6dbde353 IM |
325 | #include <asm-generic/percpu.h> |
326 | ||
327 | /* We can use this directly for local CPU (faster). */ | |
328 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | |
329 | ||
3334052a | 330 | #endif /* !__ASSEMBLY__ */ |
23ca4bba MT |
331 | |
332 | #ifdef CONFIG_SMP | |
333 | ||
334 | /* | |
335 | * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu | |
336 | * variables that are initialized and accessed before there are per_cpu | |
337 | * areas allocated. | |
338 | */ | |
339 | ||
340 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | |
341 | DEFINE_PER_CPU(_type, _name) = _initvalue; \ | |
342 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ | |
343 | { [0 ... NR_CPUS-1] = _initvalue }; \ | |
c6a92a25 | 344 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
23ca4bba MT |
345 | |
346 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ | |
347 | EXPORT_PER_CPU_SYMBOL(_name) | |
348 | ||
349 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ | |
350 | DECLARE_PER_CPU(_type, _name); \ | |
351 | extern __typeof__(_type) *_name##_early_ptr; \ | |
352 | extern __typeof__(_type) _name##_early_map[] | |
353 | ||
354 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) | |
355 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) | |
356 | #define early_per_cpu(_name, _cpu) \ | |
f10fcd47 TH |
357 | *(early_per_cpu_ptr(_name) ? \ |
358 | &early_per_cpu_ptr(_name)[_cpu] : \ | |
359 | &per_cpu(_name, _cpu)) | |
23ca4bba MT |
360 | |
361 | #else /* !CONFIG_SMP */ | |
362 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | |
363 | DEFINE_PER_CPU(_type, _name) = _initvalue | |
364 | ||
365 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ | |
366 | EXPORT_PER_CPU_SYMBOL(_name) | |
367 | ||
368 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ | |
369 | DECLARE_PER_CPU(_type, _name) | |
370 | ||
371 | #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) | |
372 | #define early_per_cpu_ptr(_name) NULL | |
373 | /* no early_per_cpu_map() */ | |
374 | ||
375 | #endif /* !CONFIG_SMP */ | |
376 | ||
1965aae3 | 377 | #endif /* _ASM_X86_PERCPU_H */ |