]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | |
7ff6f082 | 3 | |
0a3021f4 | 4 | #include <linux/preempt.h> |
1da177e4 | 5 | #include <linux/smp.h> |
7ff6f082 | 6 | #include <linux/cpumask.h> |
6a242909 | 7 | #include <linux/pfn.h> |
de380b55 | 8 | #include <linux/init.h> |
7ff6f082 | 9 | |
1da177e4 LT |
10 | #include <asm/percpu.h> |
11 | ||
6a242909 | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
b00742d3 | 13 | #ifdef CONFIG_MODULES |
6a242909 | 14 | #define PERCPU_MODULE_RESERVE (8 << 10) |
b00742d3 | 15 | #else |
6a242909 | 16 | #define PERCPU_MODULE_RESERVE 0 |
1da177e4 LT |
17 | #endif |
18 | ||
6a242909 | 19 | #ifndef PERCPU_ENOUGH_ROOM |
b00742d3 | 20 | #define PERCPU_ENOUGH_ROOM \ |
6a242909 TH |
21 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
22 | PERCPU_MODULE_RESERVE) | |
23 | #endif | |
b00742d3 | 24 | |
632bbfee JB |
25 | /* |
26 | * Must be an lvalue. Since @var must be a simple identifier, | |
27 | * we force a syntax error here if it isn't. | |
28 | */ | |
29 | #define get_cpu_var(var) (*({ \ | |
632bbfee JB |
30 | preempt_disable(); \ |
31 | &__get_cpu_var(var); })) | |
f7b64fe8 | 32 | |
e0fdb0e0 RR |
33 | /* |
34 | * The weird & is necessary because sparse considers (void)(var) to be | |
35 | * a direct dereference of percpu variable (var). | |
36 | */ | |
f7b64fe8 | 37 | #define put_cpu_var(var) do { \ |
e0fdb0e0 | 38 | (void)&(var); \ |
f7b64fe8 TH |
39 | preempt_enable(); \ |
40 | } while (0) | |
1da177e4 | 41 | |
8b8e2ec1 PZ |
42 | #define get_cpu_ptr(var) ({ \ |
43 | preempt_disable(); \ | |
44 | this_cpu_ptr(var); }) | |
45 | ||
46 | #define put_cpu_ptr(var) do { \ | |
47 | (void)(var); \ | |
48 | preempt_enable(); \ | |
49 | } while (0) | |
50 | ||
8d408b4b | 51 | /* minimum unit size, also is the maximum supported allocation size */ |
6abad5ac | 52 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) |
8d408b4b | 53 | |
099a19d9 TH |
54 | /* |
55 | * Percpu allocator can serve percpu allocations before slab is | |
56 | * initialized which allows slab to depend on the percpu allocator. | |
57 | * The following two parameters decide how much resource to | |
58 | * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or | |
59 | * larger than PERCPU_DYNAMIC_EARLY_SIZE. | |
60 | */ | |
61 | #define PERCPU_DYNAMIC_EARLY_SLOTS 128 | |
62 | #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) | |
63 | ||
8d408b4b TH |
64 | /* |
65 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | |
6b19b0c2 TH |
66 | * back on the first chunk for dynamic percpu allocation if arch is |
67 | * manually allocating and mapping it for faster access (as a part of | |
68 | * large page mapping for example). | |
8d408b4b | 69 | * |
6b19b0c2 TH |
70 | * The following values give between one and two pages of free space |
71 | * after typical minimal boot (2-way SMP, single disk and NIC) with | |
72 | * both defconfig and a distro config on x86_64 and 32. More | |
73 | * intelligent way to determine this would be nice. | |
8d408b4b | 74 | */ |
6b19b0c2 TH |
75 | #if BITS_PER_LONG > 32 |
76 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | |
77 | #else | |
78 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | |
79 | #endif | |
8d408b4b | 80 | |
fbf59bc9 | 81 | extern void *pcpu_base_addr; |
fb435d52 | 82 | extern const unsigned long *pcpu_unit_offsets; |
1da177e4 | 83 | |
fd1e8a1f TH |
84 | struct pcpu_group_info { |
85 | int nr_units; /* aligned # of units */ | |
86 | unsigned long base_offset; /* base address offset */ | |
87 | unsigned int *cpu_map; /* unit->cpu map, empty | |
88 | * entries contain NR_CPUS */ | |
89 | }; | |
90 | ||
91 | struct pcpu_alloc_info { | |
92 | size_t static_size; | |
93 | size_t reserved_size; | |
94 | size_t dyn_size; | |
95 | size_t unit_size; | |
96 | size_t atom_size; | |
97 | size_t alloc_size; | |
98 | size_t __ai_size; /* internal, don't use */ | |
99 | int nr_groups; /* 0 if grouping unnecessary */ | |
100 | struct pcpu_group_info groups[]; | |
101 | }; | |
102 | ||
f58dc01b TH |
103 | enum pcpu_fc { |
104 | PCPU_FC_AUTO, | |
105 | PCPU_FC_EMBED, | |
106 | PCPU_FC_PAGE, | |
f58dc01b TH |
107 | |
108 | PCPU_FC_NR, | |
109 | }; | |
110 | extern const char *pcpu_fc_names[PCPU_FC_NR]; | |
111 | ||
112 | extern enum pcpu_fc pcpu_chosen_fc; | |
113 | ||
3cbc8565 TH |
114 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, |
115 | size_t align); | |
d4b95f80 TH |
116 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); |
117 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); | |
a530b795 | 118 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); |
fbf59bc9 | 119 | |
fd1e8a1f TH |
120 | extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, |
121 | int nr_units); | |
122 | extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); | |
123 | ||
fb435d52 TH |
124 | extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
125 | void *base_addr); | |
8d408b4b | 126 | |
08fc4580 | 127 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK |
4ba6ce25 | 128 | extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, |
c8826dd5 TH |
129 | size_t atom_size, |
130 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | |
131 | pcpu_fc_alloc_fn_t alloc_fn, | |
132 | pcpu_fc_free_fn_t free_fn); | |
08fc4580 | 133 | #endif |
66c3a757 | 134 | |
08fc4580 | 135 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
fb435d52 | 136 | extern int __init pcpu_page_first_chunk(size_t reserved_size, |
d4b95f80 TH |
137 | pcpu_fc_alloc_fn_t alloc_fn, |
138 | pcpu_fc_free_fn_t free_fn, | |
139 | pcpu_fc_populate_pte_fn_t populate_pte_fn); | |
08fc4580 | 140 | #endif |
d4b95f80 | 141 | |
f2a8205c TH |
142 | /* |
143 | * Use this to get to a cpu's version of the per-cpu object | |
144 | * dynamically allocated. Non-atomic access to the current CPU's | |
145 | * version should probably be combined with get_cpu()/put_cpu(). | |
146 | */ | |
bbddff05 | 147 | #ifdef CONFIG_SMP |
fbf59bc9 | 148 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
bbddff05 TH |
149 | #else |
150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | |
151 | #endif | |
fbf59bc9 | 152 | |
e0fdb0e0 | 153 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
10fad5e4 | 154 | extern bool is_kernel_percpu_address(unsigned long addr); |
1da177e4 | 155 | |
bbddff05 | 156 | #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
e74e3962 TH |
157 | extern void __init setup_per_cpu_areas(void); |
158 | #endif | |
099a19d9 | 159 | extern void __init percpu_init_late(void); |
e74e3962 | 160 | |
de380b55 TH |
161 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
162 | extern void free_percpu(void __percpu *__pdata); | |
163 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |
164 | ||
64ef291f | 165 | #define alloc_percpu(type) \ |
e0fdb0e0 | 166 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
1da177e4 | 167 | |
066123a5 TH |
168 | /* |
169 | * Optional methods for optimized non-lvalue per-cpu variable access. | |
170 | * | |
171 | * @var can be a percpu variable or a field of it and its size should | |
172 | * equal char, int or long. percpu_read() evaluates to a lvalue and | |
173 | * all others to void. | |
174 | * | |
175 | * These operations are guaranteed to be atomic w.r.t. preemption. | |
176 | * The generic versions use plain get/put_cpu_var(). Archs are | |
177 | * encouraged to implement single-instruction alternatives which don't | |
178 | * require preemption protection. | |
179 | */ | |
180 | #ifndef percpu_read | |
181 | # define percpu_read(var) \ | |
182 | ({ \ | |
f7b64fe8 TH |
183 | typeof(var) *pr_ptr__ = &(var); \ |
184 | typeof(var) pr_ret__; \ | |
185 | pr_ret__ = get_cpu_var(*pr_ptr__); \ | |
186 | put_cpu_var(*pr_ptr__); \ | |
187 | pr_ret__; \ | |
066123a5 TH |
188 | }) |
189 | #endif | |
190 | ||
191 | #define __percpu_generic_to_op(var, val, op) \ | |
192 | do { \ | |
f7b64fe8 TH |
193 | typeof(var) *pgto_ptr__ = &(var); \ |
194 | get_cpu_var(*pgto_ptr__) op val; \ | |
195 | put_cpu_var(*pgto_ptr__); \ | |
066123a5 TH |
196 | } while (0) |
197 | ||
198 | #ifndef percpu_write | |
199 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) | |
200 | #endif | |
201 | ||
202 | #ifndef percpu_add | |
203 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) | |
204 | #endif | |
205 | ||
206 | #ifndef percpu_sub | |
207 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) | |
208 | #endif | |
209 | ||
210 | #ifndef percpu_and | |
211 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) | |
212 | #endif | |
213 | ||
214 | #ifndef percpu_or | |
215 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) | |
216 | #endif | |
217 | ||
218 | #ifndef percpu_xor | |
219 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | |
220 | #endif | |
221 | ||
7340a0b1 CL |
222 | /* |
223 | * Branching function to split up a function into a set of functions that | |
224 | * are called for different scalar sizes of the objects handled. | |
225 | */ | |
226 | ||
227 | extern void __bad_size_call_parameter(void); | |
228 | ||
0f5e4816 TH |
229 | #define __pcpu_size_call_return(stem, variable) \ |
230 | ({ typeof(variable) pscr_ret__; \ | |
545695fb | 231 | __verify_pcpu_ptr(&(variable)); \ |
7340a0b1 | 232 | switch(sizeof(variable)) { \ |
0f5e4816 TH |
233 | case 1: pscr_ret__ = stem##1(variable);break; \ |
234 | case 2: pscr_ret__ = stem##2(variable);break; \ | |
235 | case 4: pscr_ret__ = stem##4(variable);break; \ | |
236 | case 8: pscr_ret__ = stem##8(variable);break; \ | |
7340a0b1 CL |
237 | default: \ |
238 | __bad_size_call_parameter();break; \ | |
239 | } \ | |
0f5e4816 | 240 | pscr_ret__; \ |
7340a0b1 CL |
241 | }) |
242 | ||
a663ffff CL |
243 | #define __pcpu_size_call_return2(stem, variable, ...) \ |
244 | ({ \ | |
245 | typeof(variable) pscr2_ret__; \ | |
246 | __verify_pcpu_ptr(&(variable)); \ | |
247 | switch(sizeof(variable)) { \ | |
248 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | |
249 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | |
250 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | |
251 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | |
252 | default: \ | |
253 | __bad_size_call_parameter(); break; \ | |
254 | } \ | |
255 | pscr2_ret__; \ | |
256 | }) | |
257 | ||
0f5e4816 | 258 | #define __pcpu_size_call(stem, variable, ...) \ |
7340a0b1 | 259 | do { \ |
545695fb | 260 | __verify_pcpu_ptr(&(variable)); \ |
7340a0b1 CL |
261 | switch(sizeof(variable)) { \ |
262 | case 1: stem##1(variable, __VA_ARGS__);break; \ | |
263 | case 2: stem##2(variable, __VA_ARGS__);break; \ | |
264 | case 4: stem##4(variable, __VA_ARGS__);break; \ | |
265 | case 8: stem##8(variable, __VA_ARGS__);break; \ | |
266 | default: \ | |
267 | __bad_size_call_parameter();break; \ | |
268 | } \ | |
269 | } while (0) | |
270 | ||
271 | /* | |
272 | * Optimized manipulation for memory allocated through the per cpu | |
dd17c8f7 | 273 | * allocator or for addresses of per cpu variables. |
7340a0b1 CL |
274 | * |
275 | * These operation guarantee exclusivity of access for other operations | |
276 | * on the *same* processor. The assumption is that per cpu data is only | |
277 | * accessed by a single processor instance (the current one). | |
278 | * | |
279 | * The first group is used for accesses that must be done in a | |
280 | * preemption safe way since we know that the context is not preempt | |
281 | * safe. Interrupts may occur. If the interrupt modifies the variable | |
282 | * too then RMW actions will not be reliable. | |
283 | * | |
284 | * The arch code can provide optimized functions in two ways: | |
285 | * | |
286 | * 1. Override the function completely. F.e. define this_cpu_add(). | |
287 | * The arch must then ensure that the various scalar format passed | |
288 | * are handled correctly. | |
289 | * | |
290 | * 2. Provide functions for certain scalar sizes. F.e. provide | |
291 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte | |
292 | * sized RMW actions. If arch code does not provide operations for | |
293 | * a scalar size then the fallback in the generic code will be | |
294 | * used. | |
295 | */ | |
296 | ||
297 | #define _this_cpu_generic_read(pcp) \ | |
298 | ({ typeof(pcp) ret__; \ | |
299 | preempt_disable(); \ | |
300 | ret__ = *this_cpu_ptr(&(pcp)); \ | |
301 | preempt_enable(); \ | |
302 | ret__; \ | |
303 | }) | |
304 | ||
305 | #ifndef this_cpu_read | |
306 | # ifndef this_cpu_read_1 | |
307 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) | |
308 | # endif | |
309 | # ifndef this_cpu_read_2 | |
310 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) | |
311 | # endif | |
312 | # ifndef this_cpu_read_4 | |
313 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) | |
314 | # endif | |
315 | # ifndef this_cpu_read_8 | |
316 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) | |
317 | # endif | |
0f5e4816 | 318 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) |
7340a0b1 CL |
319 | #endif |
320 | ||
321 | #define _this_cpu_generic_to_op(pcp, val, op) \ | |
322 | do { \ | |
323 | preempt_disable(); \ | |
f7b64fe8 | 324 | *__this_cpu_ptr(&(pcp)) op val; \ |
7340a0b1 CL |
325 | preempt_enable(); \ |
326 | } while (0) | |
327 | ||
328 | #ifndef this_cpu_write | |
329 | # ifndef this_cpu_write_1 | |
330 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
331 | # endif | |
332 | # ifndef this_cpu_write_2 | |
333 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
334 | # endif | |
335 | # ifndef this_cpu_write_4 | |
336 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
337 | # endif | |
338 | # ifndef this_cpu_write_8 | |
339 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
340 | # endif | |
0f5e4816 | 341 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) |
7340a0b1 CL |
342 | #endif |
343 | ||
344 | #ifndef this_cpu_add | |
345 | # ifndef this_cpu_add_1 | |
346 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
347 | # endif | |
348 | # ifndef this_cpu_add_2 | |
349 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
350 | # endif | |
351 | # ifndef this_cpu_add_4 | |
352 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
353 | # endif | |
354 | # ifndef this_cpu_add_8 | |
355 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
356 | # endif | |
0f5e4816 | 357 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) |
7340a0b1 CL |
358 | #endif |
359 | ||
360 | #ifndef this_cpu_sub | |
361 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) | |
362 | #endif | |
363 | ||
364 | #ifndef this_cpu_inc | |
365 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | |
366 | #endif | |
367 | ||
368 | #ifndef this_cpu_dec | |
369 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | |
370 | #endif | |
371 | ||
372 | #ifndef this_cpu_and | |
373 | # ifndef this_cpu_and_1 | |
374 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
375 | # endif | |
376 | # ifndef this_cpu_and_2 | |
377 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
378 | # endif | |
379 | # ifndef this_cpu_and_4 | |
380 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
381 | # endif | |
382 | # ifndef this_cpu_and_8 | |
383 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
384 | # endif | |
0f5e4816 | 385 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) |
7340a0b1 CL |
386 | #endif |
387 | ||
388 | #ifndef this_cpu_or | |
389 | # ifndef this_cpu_or_1 | |
390 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
391 | # endif | |
392 | # ifndef this_cpu_or_2 | |
393 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
394 | # endif | |
395 | # ifndef this_cpu_or_4 | |
396 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
397 | # endif | |
398 | # ifndef this_cpu_or_8 | |
399 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
400 | # endif | |
0f5e4816 | 401 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
7340a0b1 CL |
402 | #endif |
403 | ||
404 | #ifndef this_cpu_xor | |
405 | # ifndef this_cpu_xor_1 | |
406 | # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
407 | # endif | |
408 | # ifndef this_cpu_xor_2 | |
409 | # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
410 | # endif | |
411 | # ifndef this_cpu_xor_4 | |
412 | # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
413 | # endif | |
414 | # ifndef this_cpu_xor_8 | |
415 | # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
416 | # endif | |
0f5e4816 | 417 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
7340a0b1 CL |
418 | #endif |
419 | ||
40304775 TH |
420 | #define _this_cpu_generic_add_return(pcp, val) \ |
421 | ({ \ | |
422 | typeof(pcp) ret__; \ | |
423 | preempt_disable(); \ | |
424 | __this_cpu_add(pcp, val); \ | |
425 | ret__ = __this_cpu_read(pcp); \ | |
426 | preempt_enable(); \ | |
427 | ret__; \ | |
428 | }) | |
429 | ||
430 | #ifndef this_cpu_add_return | |
431 | # ifndef this_cpu_add_return_1 | |
432 | # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) | |
433 | # endif | |
434 | # ifndef this_cpu_add_return_2 | |
435 | # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) | |
436 | # endif | |
437 | # ifndef this_cpu_add_return_4 | |
438 | # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) | |
439 | # endif | |
440 | # ifndef this_cpu_add_return_8 | |
441 | # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) | |
442 | # endif | |
443 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | |
444 | #endif | |
445 | ||
446 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) | |
447 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | |
448 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | |
449 | ||
2b712442 CL |
450 | #define _this_cpu_generic_xchg(pcp, nval) \ |
451 | ({ typeof(pcp) ret__; \ | |
452 | preempt_disable(); \ | |
453 | ret__ = __this_cpu_read(pcp); \ | |
454 | __this_cpu_write(pcp, nval); \ | |
455 | preempt_enable(); \ | |
456 | ret__; \ | |
457 | }) | |
458 | ||
459 | #ifndef this_cpu_xchg | |
460 | # ifndef this_cpu_xchg_1 | |
461 | # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | |
462 | # endif | |
463 | # ifndef this_cpu_xchg_2 | |
464 | # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | |
465 | # endif | |
466 | # ifndef this_cpu_xchg_4 | |
467 | # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | |
468 | # endif | |
469 | # ifndef this_cpu_xchg_8 | |
470 | # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | |
471 | # endif | |
472 | # define this_cpu_xchg(pcp, nval) \ | |
473 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | |
474 | #endif | |
475 | ||
476 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ | |
477 | ({ typeof(pcp) ret__; \ | |
478 | preempt_disable(); \ | |
479 | ret__ = __this_cpu_read(pcp); \ | |
480 | if (ret__ == (oval)) \ | |
481 | __this_cpu_write(pcp, nval); \ | |
482 | preempt_enable(); \ | |
483 | ret__; \ | |
484 | }) | |
485 | ||
486 | #ifndef this_cpu_cmpxchg | |
487 | # ifndef this_cpu_cmpxchg_1 | |
488 | # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | |
489 | # endif | |
490 | # ifndef this_cpu_cmpxchg_2 | |
491 | # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | |
492 | # endif | |
493 | # ifndef this_cpu_cmpxchg_4 | |
494 | # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | |
495 | # endif | |
496 | # ifndef this_cpu_cmpxchg_8 | |
497 | # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | |
498 | # endif | |
499 | # define this_cpu_cmpxchg(pcp, oval, nval) \ | |
500 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | |
501 | #endif | |
502 | ||
7340a0b1 CL |
503 | /* |
504 | * Generic percpu operations that do not require preemption handling. | |
505 | * Either we do not care about races or the caller has the | |
506 | * responsibility of handling preemptions issues. Arch code can still | |
507 | * override these instructions since the arch per cpu code may be more | |
508 | * efficient and may actually get race freeness for free (that is the | |
509 | * case for x86 for example). | |
510 | * | |
511 | * If there is no other protection through preempt disable and/or | |
512 | * disabling interupts then one of these RMW operations can show unexpected | |
513 | * behavior because the execution thread was rescheduled on another processor | |
514 | * or an interrupt occurred and the same percpu variable was modified from | |
515 | * the interrupt context. | |
516 | */ | |
517 | #ifndef __this_cpu_read | |
518 | # ifndef __this_cpu_read_1 | |
519 | # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) | |
520 | # endif | |
521 | # ifndef __this_cpu_read_2 | |
522 | # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) | |
523 | # endif | |
524 | # ifndef __this_cpu_read_4 | |
525 | # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) | |
526 | # endif | |
527 | # ifndef __this_cpu_read_8 | |
528 | # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) | |
529 | # endif | |
0f5e4816 | 530 | # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) |
7340a0b1 CL |
531 | #endif |
532 | ||
533 | #define __this_cpu_generic_to_op(pcp, val, op) \ | |
534 | do { \ | |
535 | *__this_cpu_ptr(&(pcp)) op val; \ | |
536 | } while (0) | |
537 | ||
538 | #ifndef __this_cpu_write | |
539 | # ifndef __this_cpu_write_1 | |
540 | # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
541 | # endif | |
542 | # ifndef __this_cpu_write_2 | |
543 | # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
544 | # endif | |
545 | # ifndef __this_cpu_write_4 | |
546 | # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
547 | # endif | |
548 | # ifndef __this_cpu_write_8 | |
549 | # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
550 | # endif | |
0f5e4816 | 551 | # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) |
7340a0b1 CL |
552 | #endif |
553 | ||
554 | #ifndef __this_cpu_add | |
555 | # ifndef __this_cpu_add_1 | |
556 | # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
557 | # endif | |
558 | # ifndef __this_cpu_add_2 | |
559 | # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
560 | # endif | |
561 | # ifndef __this_cpu_add_4 | |
562 | # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
563 | # endif | |
564 | # ifndef __this_cpu_add_8 | |
565 | # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
566 | # endif | |
0f5e4816 | 567 | # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) |
7340a0b1 CL |
568 | #endif |
569 | ||
570 | #ifndef __this_cpu_sub | |
571 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) | |
572 | #endif | |
573 | ||
574 | #ifndef __this_cpu_inc | |
575 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | |
576 | #endif | |
577 | ||
578 | #ifndef __this_cpu_dec | |
579 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | |
580 | #endif | |
581 | ||
582 | #ifndef __this_cpu_and | |
583 | # ifndef __this_cpu_and_1 | |
584 | # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
585 | # endif | |
586 | # ifndef __this_cpu_and_2 | |
587 | # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
588 | # endif | |
589 | # ifndef __this_cpu_and_4 | |
590 | # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
591 | # endif | |
592 | # ifndef __this_cpu_and_8 | |
593 | # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
594 | # endif | |
0f5e4816 | 595 | # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) |
7340a0b1 CL |
596 | #endif |
597 | ||
598 | #ifndef __this_cpu_or | |
599 | # ifndef __this_cpu_or_1 | |
600 | # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
601 | # endif | |
602 | # ifndef __this_cpu_or_2 | |
603 | # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
604 | # endif | |
605 | # ifndef __this_cpu_or_4 | |
606 | # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
607 | # endif | |
608 | # ifndef __this_cpu_or_8 | |
609 | # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
610 | # endif | |
0f5e4816 | 611 | # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) |
7340a0b1 CL |
612 | #endif |
613 | ||
614 | #ifndef __this_cpu_xor | |
615 | # ifndef __this_cpu_xor_1 | |
616 | # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
617 | # endif | |
618 | # ifndef __this_cpu_xor_2 | |
619 | # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
620 | # endif | |
621 | # ifndef __this_cpu_xor_4 | |
622 | # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
623 | # endif | |
624 | # ifndef __this_cpu_xor_8 | |
625 | # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
626 | # endif | |
0f5e4816 | 627 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) |
7340a0b1 CL |
628 | #endif |
629 | ||
a663ffff CL |
630 | #define __this_cpu_generic_add_return(pcp, val) \ |
631 | ({ \ | |
632 | __this_cpu_add(pcp, val); \ | |
633 | __this_cpu_read(pcp); \ | |
634 | }) | |
635 | ||
636 | #ifndef __this_cpu_add_return | |
637 | # ifndef __this_cpu_add_return_1 | |
638 | # define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val) | |
639 | # endif | |
640 | # ifndef __this_cpu_add_return_2 | |
641 | # define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val) | |
642 | # endif | |
643 | # ifndef __this_cpu_add_return_4 | |
644 | # define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val) | |
645 | # endif | |
646 | # ifndef __this_cpu_add_return_8 | |
647 | # define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) | |
648 | # endif | |
649 | # define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | |
650 | #endif | |
651 | ||
652 | #define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) | |
653 | #define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | |
654 | #define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | |
655 | ||
2b712442 CL |
656 | #define __this_cpu_generic_xchg(pcp, nval) \ |
657 | ({ typeof(pcp) ret__; \ | |
658 | ret__ = __this_cpu_read(pcp); \ | |
659 | __this_cpu_write(pcp, nval); \ | |
660 | ret__; \ | |
661 | }) | |
662 | ||
663 | #ifndef __this_cpu_xchg | |
664 | # ifndef __this_cpu_xchg_1 | |
665 | # define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | |
666 | # endif | |
667 | # ifndef __this_cpu_xchg_2 | |
668 | # define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | |
669 | # endif | |
670 | # ifndef __this_cpu_xchg_4 | |
671 | # define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | |
672 | # endif | |
673 | # ifndef __this_cpu_xchg_8 | |
674 | # define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | |
675 | # endif | |
676 | # define __this_cpu_xchg(pcp, nval) \ | |
677 | __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) | |
678 | #endif | |
679 | ||
680 | #define __this_cpu_generic_cmpxchg(pcp, oval, nval) \ | |
681 | ({ \ | |
682 | typeof(pcp) ret__; \ | |
683 | ret__ = __this_cpu_read(pcp); \ | |
684 | if (ret__ == (oval)) \ | |
685 | __this_cpu_write(pcp, nval); \ | |
686 | ret__; \ | |
687 | }) | |
688 | ||
689 | #ifndef __this_cpu_cmpxchg | |
690 | # ifndef __this_cpu_cmpxchg_1 | |
691 | # define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | |
692 | # endif | |
693 | # ifndef __this_cpu_cmpxchg_2 | |
694 | # define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | |
695 | # endif | |
696 | # ifndef __this_cpu_cmpxchg_4 | |
697 | # define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | |
698 | # endif | |
699 | # ifndef __this_cpu_cmpxchg_8 | |
700 | # define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | |
701 | # endif | |
702 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ | |
703 | __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) | |
704 | #endif | |
705 | ||
7340a0b1 CL |
706 | /* |
707 | * IRQ safe versions of the per cpu RMW operations. Note that these operations | |
708 | * are *not* safe against modification of the same variable from another | |
709 | * processors (which one gets when using regular atomic operations) | |
2b712442 | 710 | * They are guaranteed to be atomic vs. local interrupts and |
7340a0b1 CL |
711 | * preemption only. |
712 | */ | |
713 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ | |
714 | do { \ | |
715 | unsigned long flags; \ | |
716 | local_irq_save(flags); \ | |
717 | *__this_cpu_ptr(&(pcp)) op val; \ | |
718 | local_irq_restore(flags); \ | |
719 | } while (0) | |
720 | ||
721 | #ifndef irqsafe_cpu_add | |
722 | # ifndef irqsafe_cpu_add_1 | |
723 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
724 | # endif | |
725 | # ifndef irqsafe_cpu_add_2 | |
726 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
727 | # endif | |
728 | # ifndef irqsafe_cpu_add_4 | |
729 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
730 | # endif | |
731 | # ifndef irqsafe_cpu_add_8 | |
732 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
733 | # endif | |
0f5e4816 | 734 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) |
7340a0b1 CL |
735 | #endif |
736 | ||
737 | #ifndef irqsafe_cpu_sub | |
738 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) | |
739 | #endif | |
740 | ||
741 | #ifndef irqsafe_cpu_inc | |
742 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) | |
743 | #endif | |
744 | ||
745 | #ifndef irqsafe_cpu_dec | |
746 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) | |
747 | #endif | |
748 | ||
749 | #ifndef irqsafe_cpu_and | |
750 | # ifndef irqsafe_cpu_and_1 | |
751 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
752 | # endif | |
753 | # ifndef irqsafe_cpu_and_2 | |
754 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
755 | # endif | |
756 | # ifndef irqsafe_cpu_and_4 | |
757 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
758 | # endif | |
759 | # ifndef irqsafe_cpu_and_8 | |
760 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
761 | # endif | |
0f5e4816 | 762 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) |
7340a0b1 CL |
763 | #endif |
764 | ||
765 | #ifndef irqsafe_cpu_or | |
766 | # ifndef irqsafe_cpu_or_1 | |
767 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
768 | # endif | |
769 | # ifndef irqsafe_cpu_or_2 | |
770 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
771 | # endif | |
772 | # ifndef irqsafe_cpu_or_4 | |
773 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
774 | # endif | |
775 | # ifndef irqsafe_cpu_or_8 | |
776 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
777 | # endif | |
0f5e4816 | 778 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) |
7340a0b1 CL |
779 | #endif |
780 | ||
781 | #ifndef irqsafe_cpu_xor | |
782 | # ifndef irqsafe_cpu_xor_1 | |
783 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
784 | # endif | |
785 | # ifndef irqsafe_cpu_xor_2 | |
786 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
787 | # endif | |
788 | # ifndef irqsafe_cpu_xor_4 | |
789 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
790 | # endif | |
791 | # ifndef irqsafe_cpu_xor_8 | |
792 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
793 | # endif | |
0f5e4816 | 794 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) |
7340a0b1 CL |
795 | #endif |
796 | ||
2b712442 CL |
797 | #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \ |
798 | ({ \ | |
799 | typeof(pcp) ret__; \ | |
800 | unsigned long flags; \ | |
801 | local_irq_save(flags); \ | |
802 | ret__ = __this_cpu_read(pcp); \ | |
803 | if (ret__ == (oval)) \ | |
804 | __this_cpu_write(pcp, nval); \ | |
805 | local_irq_restore(flags); \ | |
806 | ret__; \ | |
807 | }) | |
808 | ||
809 | #ifndef irqsafe_cpu_cmpxchg | |
810 | # ifndef irqsafe_cpu_cmpxchg_1 | |
811 | # define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | |
812 | # endif | |
813 | # ifndef irqsafe_cpu_cmpxchg_2 | |
814 | # define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | |
815 | # endif | |
816 | # ifndef irqsafe_cpu_cmpxchg_4 | |
817 | # define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | |
818 | # endif | |
819 | # ifndef irqsafe_cpu_cmpxchg_8 | |
820 | # define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | |
821 | # endif | |
822 | # define irqsafe_cpu_cmpxchg(pcp, oval, nval) \ | |
823 | __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) | |
824 | #endif | |
825 | ||
1da177e4 | 826 | #endif /* __LINUX_PERCPU_H */ |