]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | |
7ff6f082 | 3 | |
0a3021f4 | 4 | #include <linux/preempt.h> |
1da177e4 LT |
5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | |
7ff6f082 MP |
7 | #include <linux/cpumask.h> |
8 | ||
1da177e4 LT |
9 | #include <asm/percpu.h> |
10 | ||
5280e004 | 11 | #ifdef CONFIG_SMP |
12 | #define DEFINE_PER_CPU(type, name) \ | |
13 | __attribute__((__section__(".data.percpu"))) \ | |
14 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | |
15 | ||
16 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
17 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | |
18 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | |
19 | ____cacheline_aligned_in_smp | |
20 | #else | |
21 | #define DEFINE_PER_CPU(type, name) \ | |
22 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | |
23 | ||
24 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
25 | DEFINE_PER_CPU(type, name) | |
26 | #endif | |
27 | ||
28 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | |
29 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | |
30 | ||
1da177e4 LT |
31 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ |
32 | #ifndef PERCPU_ENOUGH_ROOM | |
b00742d3 JF |
33 | #ifdef CONFIG_MODULES |
34 | #define PERCPU_MODULE_RESERVE 8192 | |
35 | #else | |
36 | #define PERCPU_MODULE_RESERVE 0 | |
1da177e4 LT |
37 | #endif |
38 | ||
b00742d3 JF |
39 | #define PERCPU_ENOUGH_ROOM \ |
40 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | |
41 | #endif /* PERCPU_ENOUGH_ROOM */ | |
42 | ||
632bbfee JB |
43 | /* |
44 | * Must be an lvalue. Since @var must be a simple identifier, | |
45 | * we force a syntax error here if it isn't. | |
46 | */ | |
47 | #define get_cpu_var(var) (*({ \ | |
a666ecfb | 48 | extern int simple_identifier_##var(void); \ |
632bbfee JB |
49 | preempt_disable(); \ |
50 | &__get_cpu_var(var); })) | |
1da177e4 LT |
51 | #define put_cpu_var(var) preempt_enable() |
52 | ||
53 | #ifdef CONFIG_SMP | |
54 | ||
55 | struct percpu_data { | |
b3242151 | 56 | void *ptrs[1]; |
1da177e4 LT |
57 | }; |
58 | ||
7ff6f082 | 59 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
1da177e4 | 60 | /* |
7ff6f082 MP |
61 | * Use this to get to a cpu's version of the per-cpu object dynamically |
62 | * allocated. Non-atomic access to the current CPU's version should | |
1da177e4 LT |
63 | * probably be combined with get_cpu()/put_cpu(). |
64 | */ | |
7ff6f082 MP |
65 | #define percpu_ptr(ptr, cpu) \ |
66 | ({ \ | |
67 | struct percpu_data *__p = __percpu_disguise(ptr); \ | |
68 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | |
1da177e4 LT |
69 | }) |
70 | ||
7ff6f082 MP |
71 | extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); |
72 | extern void percpu_depopulate(void *__pdata, int cpu); | |
73 | extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |
74 | cpumask_t *mask); | |
75 | extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); | |
76 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | |
77 | extern void percpu_free(void *__pdata); | |
1da177e4 LT |
78 | |
79 | #else /* CONFIG_SMP */ | |
80 | ||
7ff6f082 MP |
81 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
82 | ||
83 | static inline void percpu_depopulate(void *__pdata, int cpu) | |
84 | { | |
85 | } | |
86 | ||
87 | static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | |
88 | { | |
89 | } | |
1da177e4 | 90 | |
7ff6f082 MP |
91 | static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, |
92 | int cpu) | |
1da177e4 | 93 | { |
7ff6f082 | 94 | return percpu_ptr(__pdata, cpu); |
1da177e4 | 95 | } |
7ff6f082 MP |
96 | |
97 | static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |
98 | cpumask_t *mask) | |
99 | { | |
100 | return 0; | |
101 | } | |
102 | ||
0891a8d7 | 103 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) |
7ff6f082 MP |
104 | { |
105 | return kzalloc(size, gfp); | |
106 | } | |
107 | ||
108 | static inline void percpu_free(void *__pdata) | |
109 | { | |
110 | kfree(__pdata); | |
1da177e4 LT |
111 | } |
112 | ||
113 | #endif /* CONFIG_SMP */ | |
114 | ||
7ff6f082 MP |
115 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ |
116 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | |
117 | #define percpu_depopulate_mask(__pdata, mask) \ | |
118 | __percpu_depopulate_mask((__pdata), &(mask)) | |
119 | #define percpu_alloc_mask(size, gfp, mask) \ | |
120 | __percpu_alloc_mask((size), (gfp), &(mask)) | |
121 | ||
122 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | |
123 | ||
124 | /* (legacy) interface for use without CPU hotplug handling */ | |
125 | ||
126 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | |
127 | cpu_possible_map) | |
128 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | |
129 | #define free_percpu(ptr) percpu_free((ptr)) | |
130 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | |
1da177e4 LT |
131 | |
132 | #endif /* __LINUX_PERCPU_H */ |