]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | |
7ff6f082 | 3 | |
0a3021f4 | 4 | #include <linux/preempt.h> |
1da177e4 LT |
5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | |
7ff6f082 | 7 | #include <linux/cpumask.h> |
6a242909 | 8 | #include <linux/pfn.h> |
7ff6f082 | 9 | |
1da177e4 LT |
10 | #include <asm/percpu.h> |
11 | ||
6a242909 | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
b00742d3 | 13 | #ifdef CONFIG_MODULES |
6a242909 | 14 | #define PERCPU_MODULE_RESERVE (8 << 10) |
b00742d3 | 15 | #else |
6a242909 | 16 | #define PERCPU_MODULE_RESERVE 0 |
1da177e4 LT |
17 | #endif |
18 | ||
6a242909 | 19 | #ifndef PERCPU_ENOUGH_ROOM |
b00742d3 | 20 | #define PERCPU_ENOUGH_ROOM \ |
6a242909 TH |
21 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
22 | PERCPU_MODULE_RESERVE) | |
23 | #endif | |
b00742d3 | 24 | |
632bbfee JB |
25 | /* |
26 | * Must be an lvalue. Since @var must be a simple identifier, | |
27 | * we force a syntax error here if it isn't. | |
28 | */ | |
29 | #define get_cpu_var(var) (*({ \ | |
632bbfee JB |
30 | preempt_disable(); \ |
31 | &__get_cpu_var(var); })) | |
f7b64fe8 TH |
32 | |
33 | #define put_cpu_var(var) do { \ | |
34 | (void)(var); \ | |
35 | preempt_enable(); \ | |
36 | } while (0) | |
1da177e4 LT |
37 | |
38 | #ifdef CONFIG_SMP | |
39 | ||
8d408b4b | 40 | /* minimum unit size, also is the maximum supported allocation size */ |
6a242909 | 41 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
8d408b4b TH |
42 | |
43 | /* | |
44 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | |
6b19b0c2 TH |
45 | * back on the first chunk for dynamic percpu allocation if arch is |
46 | * manually allocating and mapping it for faster access (as a part of | |
47 | * large page mapping for example). | |
8d408b4b | 48 | * |
6b19b0c2 TH |
49 | * The following values give between one and two pages of free space |
50 | * after typical minimal boot (2-way SMP, single disk and NIC) with | |
51 | * both defconfig and a distro config on x86_64 and 32. More | |
52 | * intelligent way to determine this would be nice. | |
8d408b4b | 53 | */ |
6b19b0c2 TH |
54 | #if BITS_PER_LONG > 32 |
55 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | |
56 | #else | |
57 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | |
58 | #endif | |
8d408b4b | 59 | |
fbf59bc9 | 60 | extern void *pcpu_base_addr; |
fb435d52 | 61 | extern const unsigned long *pcpu_unit_offsets; |
1da177e4 | 62 | |
fd1e8a1f TH |
63 | struct pcpu_group_info { |
64 | int nr_units; /* aligned # of units */ | |
65 | unsigned long base_offset; /* base address offset */ | |
66 | unsigned int *cpu_map; /* unit->cpu map, empty | |
67 | * entries contain NR_CPUS */ | |
68 | }; | |
69 | ||
70 | struct pcpu_alloc_info { | |
71 | size_t static_size; | |
72 | size_t reserved_size; | |
73 | size_t dyn_size; | |
74 | size_t unit_size; | |
75 | size_t atom_size; | |
76 | size_t alloc_size; | |
77 | size_t __ai_size; /* internal, don't use */ | |
78 | int nr_groups; /* 0 if grouping unnecessary */ | |
79 | struct pcpu_group_info groups[]; | |
80 | }; | |
81 | ||
f58dc01b TH |
82 | enum pcpu_fc { |
83 | PCPU_FC_AUTO, | |
84 | PCPU_FC_EMBED, | |
85 | PCPU_FC_PAGE, | |
f58dc01b TH |
86 | |
87 | PCPU_FC_NR, | |
88 | }; | |
89 | extern const char *pcpu_fc_names[PCPU_FC_NR]; | |
90 | ||
91 | extern enum pcpu_fc pcpu_chosen_fc; | |
92 | ||
3cbc8565 TH |
93 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, |
94 | size_t align); | |
d4b95f80 TH |
95 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); |
96 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); | |
a530b795 | 97 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); |
fbf59bc9 | 98 | |
fd1e8a1f TH |
99 | extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, |
100 | int nr_units); | |
101 | extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); | |
102 | ||
103 | extern struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |
104 | size_t reserved_size, ssize_t dyn_size, | |
105 | size_t atom_size, | |
033e48fb | 106 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn); |
033e48fb | 107 | |
fb435d52 TH |
108 | extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
109 | void *base_addr); | |
8d408b4b | 110 | |
08fc4580 | 111 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK |
c8826dd5 TH |
112 | extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, |
113 | size_t atom_size, | |
114 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | |
115 | pcpu_fc_alloc_fn_t alloc_fn, | |
116 | pcpu_fc_free_fn_t free_fn); | |
08fc4580 | 117 | #endif |
66c3a757 | 118 | |
08fc4580 | 119 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
fb435d52 | 120 | extern int __init pcpu_page_first_chunk(size_t reserved_size, |
d4b95f80 TH |
121 | pcpu_fc_alloc_fn_t alloc_fn, |
122 | pcpu_fc_free_fn_t free_fn, | |
123 | pcpu_fc_populate_pte_fn_t populate_pte_fn); | |
08fc4580 | 124 | #endif |
d4b95f80 | 125 | |
f2a8205c TH |
126 | /* |
127 | * Use this to get to a cpu's version of the per-cpu object | |
128 | * dynamically allocated. Non-atomic access to the current CPU's | |
129 | * version should probably be combined with get_cpu()/put_cpu(). | |
130 | */ | |
fbf59bc9 TH |
131 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
132 | ||
edcb4639 | 133 | extern void *__alloc_reserved_percpu(size_t size, size_t align); |
f2a8205c TH |
134 | extern void *__alloc_percpu(size_t size, size_t align); |
135 | extern void free_percpu(void *__pdata); | |
1da177e4 | 136 | |
e74e3962 TH |
137 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
138 | extern void __init setup_per_cpu_areas(void); | |
139 | #endif | |
140 | ||
1da177e4 LT |
141 | #else /* CONFIG_SMP */ |
142 | ||
b36128c8 | 143 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
7ff6f082 | 144 | |
f2a8205c | 145 | static inline void *__alloc_percpu(size_t size, size_t align) |
7ff6f082 | 146 | { |
f2a8205c TH |
147 | /* |
148 | * Can't easily make larger alignment work with kmalloc. WARN | |
149 | * on it. Larger alignment should only be used for module | |
150 | * percpu sections on SMP for which this path isn't used. | |
151 | */ | |
e3176036 | 152 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
d2b02615 | 153 | return kzalloc(size, GFP_KERNEL); |
7ff6f082 MP |
154 | } |
155 | ||
f2a8205c | 156 | static inline void free_percpu(void *p) |
7ff6f082 | 157 | { |
f2a8205c | 158 | kfree(p); |
1da177e4 LT |
159 | } |
160 | ||
e74e3962 TH |
161 | static inline void __init setup_per_cpu_areas(void) { } |
162 | ||
a76761b6 TH |
163 | static inline void *pcpu_lpage_remapped(void *kaddr) |
164 | { | |
165 | return NULL; | |
166 | } | |
167 | ||
1da177e4 LT |
168 | #endif /* CONFIG_SMP */ |
169 | ||
64ef291f TH |
170 | #define alloc_percpu(type) \ |
171 | (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) | |
1da177e4 | 172 | |
066123a5 TH |
173 | /* |
174 | * Optional methods for optimized non-lvalue per-cpu variable access. | |
175 | * | |
176 | * @var can be a percpu variable or a field of it and its size should | |
177 | * equal char, int or long. percpu_read() evaluates to a lvalue and | |
178 | * all others to void. | |
179 | * | |
180 | * These operations are guaranteed to be atomic w.r.t. preemption. | |
181 | * The generic versions use plain get/put_cpu_var(). Archs are | |
182 | * encouraged to implement single-instruction alternatives which don't | |
183 | * require preemption protection. | |
184 | */ | |
185 | #ifndef percpu_read | |
186 | # define percpu_read(var) \ | |
187 | ({ \ | |
f7b64fe8 TH |
188 | typeof(var) *pr_ptr__ = &(var); \ |
189 | typeof(var) pr_ret__; \ | |
190 | pr_ret__ = get_cpu_var(*pr_ptr__); \ | |
191 | put_cpu_var(*pr_ptr__); \ | |
192 | pr_ret__; \ | |
066123a5 TH |
193 | }) |
194 | #endif | |
195 | ||
196 | #define __percpu_generic_to_op(var, val, op) \ | |
197 | do { \ | |
f7b64fe8 TH |
198 | typeof(var) *pgto_ptr__ = &(var); \ |
199 | get_cpu_var(*pgto_ptr__) op val; \ | |
200 | put_cpu_var(*pgto_ptr__); \ | |
066123a5 TH |
201 | } while (0) |
202 | ||
203 | #ifndef percpu_write | |
204 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) | |
205 | #endif | |
206 | ||
207 | #ifndef percpu_add | |
208 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) | |
209 | #endif | |
210 | ||
211 | #ifndef percpu_sub | |
212 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) | |
213 | #endif | |
214 | ||
215 | #ifndef percpu_and | |
216 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) | |
217 | #endif | |
218 | ||
219 | #ifndef percpu_or | |
220 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) | |
221 | #endif | |
222 | ||
223 | #ifndef percpu_xor | |
224 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | |
225 | #endif | |
226 | ||
7340a0b1 CL |
227 | /* |
228 | * Branching function to split up a function into a set of functions that | |
229 | * are called for different scalar sizes of the objects handled. | |
230 | */ | |
231 | ||
232 | extern void __bad_size_call_parameter(void); | |
233 | ||
0f5e4816 TH |
234 | #define __pcpu_size_call_return(stem, variable) \ |
235 | ({ typeof(variable) pscr_ret__; \ | |
7340a0b1 | 236 | switch(sizeof(variable)) { \ |
0f5e4816 TH |
237 | case 1: pscr_ret__ = stem##1(variable);break; \ |
238 | case 2: pscr_ret__ = stem##2(variable);break; \ | |
239 | case 4: pscr_ret__ = stem##4(variable);break; \ | |
240 | case 8: pscr_ret__ = stem##8(variable);break; \ | |
7340a0b1 CL |
241 | default: \ |
242 | __bad_size_call_parameter();break; \ | |
243 | } \ | |
0f5e4816 | 244 | pscr_ret__; \ |
7340a0b1 CL |
245 | }) |
246 | ||
0f5e4816 | 247 | #define __pcpu_size_call(stem, variable, ...) \ |
7340a0b1 CL |
248 | do { \ |
249 | switch(sizeof(variable)) { \ | |
250 | case 1: stem##1(variable, __VA_ARGS__);break; \ | |
251 | case 2: stem##2(variable, __VA_ARGS__);break; \ | |
252 | case 4: stem##4(variable, __VA_ARGS__);break; \ | |
253 | case 8: stem##8(variable, __VA_ARGS__);break; \ | |
254 | default: \ | |
255 | __bad_size_call_parameter();break; \ | |
256 | } \ | |
257 | } while (0) | |
258 | ||
259 | /* | |
260 | * Optimized manipulation for memory allocated through the per cpu | |
dd17c8f7 | 261 | * allocator or for addresses of per cpu variables. |
7340a0b1 CL |
262 | * |
263 | * These operation guarantee exclusivity of access for other operations | |
264 | * on the *same* processor. The assumption is that per cpu data is only | |
265 | * accessed by a single processor instance (the current one). | |
266 | * | |
267 | * The first group is used for accesses that must be done in a | |
268 | * preemption safe way since we know that the context is not preempt | |
269 | * safe. Interrupts may occur. If the interrupt modifies the variable | |
270 | * too then RMW actions will not be reliable. | |
271 | * | |
272 | * The arch code can provide optimized functions in two ways: | |
273 | * | |
274 | * 1. Override the function completely. F.e. define this_cpu_add(). | |
275 | * The arch must then ensure that the various scalar format passed | |
276 | * are handled correctly. | |
277 | * | |
278 | * 2. Provide functions for certain scalar sizes. F.e. provide | |
279 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte | |
280 | * sized RMW actions. If arch code does not provide operations for | |
281 | * a scalar size then the fallback in the generic code will be | |
282 | * used. | |
283 | */ | |
284 | ||
285 | #define _this_cpu_generic_read(pcp) \ | |
286 | ({ typeof(pcp) ret__; \ | |
287 | preempt_disable(); \ | |
288 | ret__ = *this_cpu_ptr(&(pcp)); \ | |
289 | preempt_enable(); \ | |
290 | ret__; \ | |
291 | }) | |
292 | ||
293 | #ifndef this_cpu_read | |
294 | # ifndef this_cpu_read_1 | |
295 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) | |
296 | # endif | |
297 | # ifndef this_cpu_read_2 | |
298 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) | |
299 | # endif | |
300 | # ifndef this_cpu_read_4 | |
301 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) | |
302 | # endif | |
303 | # ifndef this_cpu_read_8 | |
304 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) | |
305 | # endif | |
0f5e4816 | 306 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) |
7340a0b1 CL |
307 | #endif |
308 | ||
309 | #define _this_cpu_generic_to_op(pcp, val, op) \ | |
310 | do { \ | |
311 | preempt_disable(); \ | |
f7b64fe8 | 312 | *__this_cpu_ptr(&(pcp)) op val; \ |
7340a0b1 CL |
313 | preempt_enable(); \ |
314 | } while (0) | |
315 | ||
316 | #ifndef this_cpu_write | |
317 | # ifndef this_cpu_write_1 | |
318 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
319 | # endif | |
320 | # ifndef this_cpu_write_2 | |
321 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
322 | # endif | |
323 | # ifndef this_cpu_write_4 | |
324 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
325 | # endif | |
326 | # ifndef this_cpu_write_8 | |
327 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | |
328 | # endif | |
0f5e4816 | 329 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) |
7340a0b1 CL |
330 | #endif |
331 | ||
332 | #ifndef this_cpu_add | |
333 | # ifndef this_cpu_add_1 | |
334 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
335 | # endif | |
336 | # ifndef this_cpu_add_2 | |
337 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
338 | # endif | |
339 | # ifndef this_cpu_add_4 | |
340 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
341 | # endif | |
342 | # ifndef this_cpu_add_8 | |
343 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | |
344 | # endif | |
0f5e4816 | 345 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) |
7340a0b1 CL |
346 | #endif |
347 | ||
348 | #ifndef this_cpu_sub | |
349 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) | |
350 | #endif | |
351 | ||
352 | #ifndef this_cpu_inc | |
353 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | |
354 | #endif | |
355 | ||
356 | #ifndef this_cpu_dec | |
357 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | |
358 | #endif | |
359 | ||
360 | #ifndef this_cpu_and | |
361 | # ifndef this_cpu_and_1 | |
362 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
363 | # endif | |
364 | # ifndef this_cpu_and_2 | |
365 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
366 | # endif | |
367 | # ifndef this_cpu_and_4 | |
368 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
369 | # endif | |
370 | # ifndef this_cpu_and_8 | |
371 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | |
372 | # endif | |
0f5e4816 | 373 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) |
7340a0b1 CL |
374 | #endif |
375 | ||
376 | #ifndef this_cpu_or | |
377 | # ifndef this_cpu_or_1 | |
378 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
379 | # endif | |
380 | # ifndef this_cpu_or_2 | |
381 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
382 | # endif | |
383 | # ifndef this_cpu_or_4 | |
384 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
385 | # endif | |
386 | # ifndef this_cpu_or_8 | |
387 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | |
388 | # endif | |
0f5e4816 | 389 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
7340a0b1 CL |
390 | #endif |
391 | ||
392 | #ifndef this_cpu_xor | |
393 | # ifndef this_cpu_xor_1 | |
394 | # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
395 | # endif | |
396 | # ifndef this_cpu_xor_2 | |
397 | # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
398 | # endif | |
399 | # ifndef this_cpu_xor_4 | |
400 | # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
401 | # endif | |
402 | # ifndef this_cpu_xor_8 | |
403 | # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | |
404 | # endif | |
0f5e4816 | 405 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
7340a0b1 CL |
406 | #endif |
407 | ||
408 | /* | |
409 | * Generic percpu operations that do not require preemption handling. | |
410 | * Either we do not care about races or the caller has the | |
411 | * responsibility of handling preemptions issues. Arch code can still | |
412 | * override these instructions since the arch per cpu code may be more | |
413 | * efficient and may actually get race freeness for free (that is the | |
414 | * case for x86 for example). | |
415 | * | |
416 | * If there is no other protection through preempt disable and/or | |
417 | * disabling interupts then one of these RMW operations can show unexpected | |
418 | * behavior because the execution thread was rescheduled on another processor | |
419 | * or an interrupt occurred and the same percpu variable was modified from | |
420 | * the interrupt context. | |
421 | */ | |
422 | #ifndef __this_cpu_read | |
423 | # ifndef __this_cpu_read_1 | |
424 | # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) | |
425 | # endif | |
426 | # ifndef __this_cpu_read_2 | |
427 | # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) | |
428 | # endif | |
429 | # ifndef __this_cpu_read_4 | |
430 | # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) | |
431 | # endif | |
432 | # ifndef __this_cpu_read_8 | |
433 | # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) | |
434 | # endif | |
0f5e4816 | 435 | # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) |
7340a0b1 CL |
436 | #endif |
437 | ||
438 | #define __this_cpu_generic_to_op(pcp, val, op) \ | |
439 | do { \ | |
440 | *__this_cpu_ptr(&(pcp)) op val; \ | |
441 | } while (0) | |
442 | ||
443 | #ifndef __this_cpu_write | |
444 | # ifndef __this_cpu_write_1 | |
445 | # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
446 | # endif | |
447 | # ifndef __this_cpu_write_2 | |
448 | # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
449 | # endif | |
450 | # ifndef __this_cpu_write_4 | |
451 | # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
452 | # endif | |
453 | # ifndef __this_cpu_write_8 | |
454 | # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | |
455 | # endif | |
0f5e4816 | 456 | # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) |
7340a0b1 CL |
457 | #endif |
458 | ||
459 | #ifndef __this_cpu_add | |
460 | # ifndef __this_cpu_add_1 | |
461 | # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
462 | # endif | |
463 | # ifndef __this_cpu_add_2 | |
464 | # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
465 | # endif | |
466 | # ifndef __this_cpu_add_4 | |
467 | # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
468 | # endif | |
469 | # ifndef __this_cpu_add_8 | |
470 | # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | |
471 | # endif | |
0f5e4816 | 472 | # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) |
7340a0b1 CL |
473 | #endif |
474 | ||
475 | #ifndef __this_cpu_sub | |
476 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) | |
477 | #endif | |
478 | ||
479 | #ifndef __this_cpu_inc | |
480 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | |
481 | #endif | |
482 | ||
483 | #ifndef __this_cpu_dec | |
484 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | |
485 | #endif | |
486 | ||
487 | #ifndef __this_cpu_and | |
488 | # ifndef __this_cpu_and_1 | |
489 | # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
490 | # endif | |
491 | # ifndef __this_cpu_and_2 | |
492 | # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
493 | # endif | |
494 | # ifndef __this_cpu_and_4 | |
495 | # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
496 | # endif | |
497 | # ifndef __this_cpu_and_8 | |
498 | # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | |
499 | # endif | |
0f5e4816 | 500 | # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) |
7340a0b1 CL |
501 | #endif |
502 | ||
503 | #ifndef __this_cpu_or | |
504 | # ifndef __this_cpu_or_1 | |
505 | # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
506 | # endif | |
507 | # ifndef __this_cpu_or_2 | |
508 | # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
509 | # endif | |
510 | # ifndef __this_cpu_or_4 | |
511 | # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
512 | # endif | |
513 | # ifndef __this_cpu_or_8 | |
514 | # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | |
515 | # endif | |
0f5e4816 | 516 | # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) |
7340a0b1 CL |
517 | #endif |
518 | ||
519 | #ifndef __this_cpu_xor | |
520 | # ifndef __this_cpu_xor_1 | |
521 | # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
522 | # endif | |
523 | # ifndef __this_cpu_xor_2 | |
524 | # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
525 | # endif | |
526 | # ifndef __this_cpu_xor_4 | |
527 | # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
528 | # endif | |
529 | # ifndef __this_cpu_xor_8 | |
530 | # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | |
531 | # endif | |
0f5e4816 | 532 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) |
7340a0b1 CL |
533 | #endif |
534 | ||
535 | /* | |
536 | * IRQ safe versions of the per cpu RMW operations. Note that these operations | |
537 | * are *not* safe against modification of the same variable from another | |
538 | * processors (which one gets when using regular atomic operations) | |
539 | . They are guaranteed to be atomic vs. local interrupts and | |
540 | * preemption only. | |
541 | */ | |
542 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ | |
543 | do { \ | |
544 | unsigned long flags; \ | |
545 | local_irq_save(flags); \ | |
546 | *__this_cpu_ptr(&(pcp)) op val; \ | |
547 | local_irq_restore(flags); \ | |
548 | } while (0) | |
549 | ||
550 | #ifndef irqsafe_cpu_add | |
551 | # ifndef irqsafe_cpu_add_1 | |
552 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
553 | # endif | |
554 | # ifndef irqsafe_cpu_add_2 | |
555 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
556 | # endif | |
557 | # ifndef irqsafe_cpu_add_4 | |
558 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
559 | # endif | |
560 | # ifndef irqsafe_cpu_add_8 | |
561 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | |
562 | # endif | |
0f5e4816 | 563 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) |
7340a0b1 CL |
564 | #endif |
565 | ||
566 | #ifndef irqsafe_cpu_sub | |
567 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) | |
568 | #endif | |
569 | ||
570 | #ifndef irqsafe_cpu_inc | |
571 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) | |
572 | #endif | |
573 | ||
574 | #ifndef irqsafe_cpu_dec | |
575 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) | |
576 | #endif | |
577 | ||
578 | #ifndef irqsafe_cpu_and | |
579 | # ifndef irqsafe_cpu_and_1 | |
580 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
581 | # endif | |
582 | # ifndef irqsafe_cpu_and_2 | |
583 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
584 | # endif | |
585 | # ifndef irqsafe_cpu_and_4 | |
586 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
587 | # endif | |
588 | # ifndef irqsafe_cpu_and_8 | |
589 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | |
590 | # endif | |
0f5e4816 | 591 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) |
7340a0b1 CL |
592 | #endif |
593 | ||
594 | #ifndef irqsafe_cpu_or | |
595 | # ifndef irqsafe_cpu_or_1 | |
596 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
597 | # endif | |
598 | # ifndef irqsafe_cpu_or_2 | |
599 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
600 | # endif | |
601 | # ifndef irqsafe_cpu_or_4 | |
602 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
603 | # endif | |
604 | # ifndef irqsafe_cpu_or_8 | |
605 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | |
606 | # endif | |
0f5e4816 | 607 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) |
7340a0b1 CL |
608 | #endif |
609 | ||
610 | #ifndef irqsafe_cpu_xor | |
611 | # ifndef irqsafe_cpu_xor_1 | |
612 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
613 | # endif | |
614 | # ifndef irqsafe_cpu_xor_2 | |
615 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
616 | # endif | |
617 | # ifndef irqsafe_cpu_xor_4 | |
618 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
619 | # endif | |
620 | # ifndef irqsafe_cpu_xor_8 | |
621 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | |
622 | # endif | |
0f5e4816 | 623 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) |
7340a0b1 CL |
624 | #endif |
625 | ||
1da177e4 | 626 | #endif /* __LINUX_PERCPU_H */ |